@@ -24,8 +24,10 @@
#include "bnxt_util.h"
#include "tf_core.h"
+#include "tfc.h"
#include "bnxt_ulp.h"
#include "bnxt_tf_common.h"
+#include "bnxt_mpc.h"
#include "bnxt_vnic.h"
/* Vendor ID */
@@ -1035,6 +1037,7 @@ struct bnxt {
struct bnxt_ring_stats_ext *prev_tx_ring_stats_ext;
struct bnxt_vnic_queue_db vnic_queue_db;
+ struct bnxt_mpc *mpc;
#define BNXT_MAX_MC_ADDRS ((bp)->max_mcast_addr)
struct rte_ether_addr *mcast_addr_list;
rte_iova_t mc_list_dma_addr;
@@ -8,6 +8,7 @@
#include <stdbool.h>
#include <rte_io.h>
+#include <rte_version.h>
#include "hsi_struct_def_dpdk.h"
struct bnxt_db_info;
@@ -15,6 +16,10 @@ struct bnxt_db_info;
#define CMP_TYPE(cmp) \
(((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
+#define CMPL_VALID(cmp, v) \
+ (!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \
+ CMPL_BASE_V) == !(v))
+
/* Get completion length from completion type, in 16-byte units. */
#define CMP_LEN(cmp_type) (((cmp_type) & 1) + 1)
@@ -28,6 +33,14 @@ struct bnxt_db_info;
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define NEXT_CMPL(cpr, idx, v, inc) do { \
+ (idx) += (inc); \
+ if (unlikely((idx) >= (cpr)->cp_ring_struct->ring_size)) { \
+ (v) = !(v); \
+ (idx) = 0; \
+ } \
+} while (0)
+
#define B_CP_DB_REARM(cpr, raw_cons) \
rte_write32((DB_CP_REARM_FLAGS | \
DB_RING_IDX(&((cpr)->cp_db), raw_cons)), \
@@ -74,6 +87,8 @@ struct bnxt_cp_ring_info {
uint32_t hw_stats_ctx_id;
struct bnxt_ring *cp_ring_struct;
+ bool valid;
+ uint32_t epoch;
};
#define RX_CMP_L2_ERRORS \
@@ -104,10 +119,13 @@ bool bnxt_is_recovery_enabled(struct bnxt *bp);
bool bnxt_is_primary_func(struct bnxt *bp);
void bnxt_stop_rxtx(struct rte_eth_dev *eth_dev);
+#if (RTE_VERSION_NUM(21, 8, 0, 0) < RTE_VERSION)
+void bnxt_start_rxtx(struct rte_eth_dev *eth_dev);
+#endif
/**
* Check validity of a completion ring entry. If the entry is valid, include a
- * C11 rte_memory_order_acquire fence to ensure that subsequent loads of fields in the
+ * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
* completion are not hoisted by the compiler or by the CPU to come before the
* loading of the "valid" field.
*
@@ -124,7 +142,7 @@ void bnxt_stop_rxtx(struct rte_eth_dev *eth_dev);
static __rte_always_inline bool
bnxt_cpr_cmp_valid(const void *cmpl, uint32_t raw_cons, uint32_t ring_size)
{
- const struct cmpl_base *c = cmpl;
+ const struct cmpl_base *c = (const struct cmpl_base *)cmpl;
bool expected, valid;
expected = !(raw_cons & ring_size);
@@ -2397,7 +2397,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return rc;
}
-static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
int rc;
struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
@@ -7477,6 +7477,49 @@ int bnxt_hwrm_config_host_mtu(struct bnxt *bp)
return rc;
}
+int bnxt_hwrm_func_cfg_mpc(struct bnxt *bp, uint8_t mpc_chnls_msk, bool enable)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+ uint16_t mpc_chnls = 0;
+
+ HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
+ req.fid = rte_cpu_to_le_16(0xffff);
+ req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MPC_CHNLS);
+ if (enable) {
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_TCE))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TCE_ENABLE;
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_RCE))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RCE_ENABLE;
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_TE_CFA))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TE_CFA_ENABLE;
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_RE_CFA))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RE_CFA_ENABLE;
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_PRIMATE))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_PRIMATE_ENABLE;
+ } else {
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_TCE))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TCE_DISABLE;
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_RCE))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RCE_DISABLE;
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_TE_CFA))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TE_CFA_DISABLE;
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_RE_CFA))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RE_CFA_DISABLE;
+ if (mpc_chnls_msk & (1 << BNXT_MPC_CHNL_PRIMATE))
+ mpc_chnls |= HWRM_FUNC_CFG_INPUT_MPC_CHNLS_PRIMATE_DISABLE;
+ }
+ req.mpc_chnls = rte_cpu_to_le_16(mpc_chnls);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
int
bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
@@ -7499,3 +7542,41 @@ bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
return rc;
}
+
+int bnxt_hwrm_tf_oem_cmd(struct bnxt *bp,
+ uint32_t *in,
+ uint16_t in_len,
+ uint32_t *out,
+ uint16_t out_len)
+{
+ struct hwrm_oem_cmd_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_oem_cmd_input req = {0};
+ int rc = 0;
+
+ if (!BNXT_VF(bp)) {
+ PMD_DRV_LOG_LINE(DEBUG, "Not a VF. Command not supported");
+ return -ENOTSUP;
+ }
+
+ HWRM_PREP(&req, HWRM_OEM_CMD, BNXT_USE_CHIMP_MB);
+
+ req.oem_id = rte_cpu_to_le_32(0x14e4);
+ req.naming_authority =
+ HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_PCI_SIG;
+ req.message_family =
+ HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_TRUFLOW;
+ memcpy(req.oem_data, in, in_len);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+
+ HWRM_CHECK_RESULT();
+ if (resp->oem_id == 0x14e4 &&
+ resp->naming_authority ==
+ HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_PCI_SIG &&
+ resp->message_family ==
+ HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_TRUFLOW)
+ memcpy(out, resp->oem_data, out_len);
+ HWRM_UNLOCK();
+
+ return rc;
+}
@@ -367,6 +367,10 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
void bnxt_free_hwrm_tx_ring(struct bnxt *bp, int queue_index);
int bnxt_alloc_hwrm_tx_ring(struct bnxt *bp, int queue_index);
int bnxt_hwrm_config_host_mtu(struct bnxt *bp);
+int bnxt_hwrm_func_cfg_mpc(struct bnxt *bp,
+ uint8_t mpc_chnls_msk,
+ bool enable);
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
int bnxt_vnic_rss_clear_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_vnic_rss_configure_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp);
@@ -375,4 +379,9 @@ int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
int bnxt_hwrm_func_backing_store_types_count(struct bnxt *bp);
int bnxt_hwrm_func_backing_store_ctx_alloc(struct bnxt *bp, uint16_t types);
int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp);
+int bnxt_hwrm_tf_oem_cmd(struct bnxt *bp,
+ uint32_t *in,
+ uint16_t in_len,
+ uint32_t *out,
+ uint16_t out_len);
#endif
new file mode 100644
@@ -0,0 +1,772 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+#include <rte_malloc.h>
+#include <unistd.h>
+
+#include "bnxt.h"
+#include "bnxt_ring.h"
+#include "bnxt_mpc.h"
+#include "bnxt_hwrm.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*#define MPC_DEBUG 1*/
+
+#define BNXT_MPC_BP_SIZE 16
+
+static int bnxt_mpc_chnls_enable(struct bnxt *bp)
+{
+ struct bnxt_mpc *mpc = bp->mpc;
+ uint8_t mpc_chnl_msk = 0;
+ int i, rc;
+
+ if (!mpc)
+ return -EINVAL;
+
+ for (i = 0; i < BNXT_MPC_CHNL_MAX; i++) {
+ if (!(mpc->mpc_chnls_cap & (1 << i)))
+ continue;
+ mpc_chnl_msk |= (1 << i);
+ }
+ mpc->mpc_chnls_en = mpc_chnl_msk;
+
+ if (!BNXT_PF(bp))
+ return 0;
+
+ rc = bnxt_hwrm_func_cfg_mpc(bp, mpc_chnl_msk, true);
+ if (rc != 0) {
+ mpc->mpc_chnls_en = 0;
+ PMD_DRV_LOG_LINE(ERR, "MPC chnls enabling failed rc:%d", rc);
+ }
+
+ return rc;
+}
+
+static int bnxt_mpc_chnls_disable(struct bnxt *bp)
+{
+ struct bnxt_mpc *mpc = bp->mpc;
+ uint8_t mpc_chnl_msk = 0;
+ int i, rc;
+
+ if (!mpc)
+ return -EINVAL;
+ mpc->mpc_chnls_en = 0;
+
+ if (!BNXT_PF(bp))
+ return 0;
+
+ for (i = 0; i < BNXT_MPC_CHNL_MAX; i++) {
+ if (!(mpc->mpc_chnls_en & (1 << i)))
+ continue;
+ mpc_chnl_msk |= (1 << i);
+ }
+ rc = bnxt_hwrm_func_cfg_mpc(bp, mpc_chnl_msk, false);
+ if (rc != 0)
+ PMD_DRV_LOG_LINE(ERR, "MPC chnls disabling failed rc:%d", rc);
+
+ return rc;
+}
+
+static void bnxt_mpc_queue_release_mbufs(struct bnxt_mpc_txq *mpc_queue)
+{
+ struct bnxt_sw_mpc_bd *sw_ring;
+ uint16_t i;
+
+ if (!mpc_queue)
+ return;
+
+ sw_ring = mpc_queue->mpc_ring->mpc_buf_ring;
+ if (!sw_ring)
+ return;
+
+ for (i = 0; i < mpc_queue->mpc_ring->mpc_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mpc_mbuf) {
+ rte_free(sw_ring[i].mpc_mbuf);
+ sw_ring[i].mpc_mbuf = NULL;
+ }
+ }
+}
+
+static void bnxt_mpc_queue_release_one(struct bnxt_mpc_txq *mpc_queue)
+{
+ if (!mpc_queue)
+ return;
+
+ if (is_bnxt_in_error(mpc_queue->bp))
+ return;
+ /* Free MPC ring HW descriptors */
+ bnxt_mpc_queue_release_mbufs(mpc_queue);
+ bnxt_free_ring(mpc_queue->mpc_ring->mpc_ring_struct);
+ /* Free MPC completion ring HW descriptors */
+ bnxt_free_ring(mpc_queue->cp_ring->cp_ring_struct);
+
+ rte_memzone_free(mpc_queue->mz);
+ mpc_queue->mz = NULL;
+
+ rte_free(mpc_queue->free);
+ rte_free(mpc_queue);
+}
+
+static void bnxt_mpc_ring_free_one(struct bnxt_mpc_txq *mpc_queue)
+{
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_mpc_ring_info *mpr;
+ struct bnxt_ring *ring;
+
+ if (!mpc_queue)
+ return;
+
+ if (is_bnxt_in_error(mpc_queue->bp))
+ return;
+
+ mpr = mpc_queue->mpc_ring;
+ ring = mpr->mpc_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cpr = mpc_queue->cp_ring;
+ bnxt_hwrm_ring_free(mpc_queue->bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX,
+ cpr->cp_ring_struct->fw_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(mpr->mpc_desc_ring, 0,
+ mpr->mpc_ring_struct->ring_size * sizeof(*mpr->mpc_desc_ring));
+ memset(mpr->mpc_buf_ring, 0,
+ mpr->mpc_ring_struct->ring_size * sizeof(*mpr->mpc_buf_ring));
+ mpr->raw_prod = 0;
+ mpr->raw_cons = 0;
+
+ bnxt_free_cp_ring(mpc_queue->bp, cpr);
+ bnxt_hwrm_stat_ctx_free(mpc_queue->bp, cpr);
+}
+
+int bnxt_mpc_close(struct bnxt *bp)
+{
+ int i, rc = 0;
+ struct bnxt_mpc_txq *mpc_queue;
+ struct bnxt_mpc *mpc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
+ if (!bp->mpc)
+ return 0;
+
+ mpc = bp->mpc;
+ /* free the MPC TX ring for each channel. */
+ for (i = 0; i < BNXT_MPC_CHNL_MAX; i++) {
+ if (!(mpc->mpc_chnls_en & (1 << i)))
+ continue;
+ mpc_queue = mpc->mpc_txq[i];
+ if (!mpc_queue)
+ continue;
+ bnxt_mpc_ring_free_one(mpc_queue);
+ bnxt_mpc_queue_release_one(mpc_queue);
+ mpc->mpc_txq[i] = NULL;
+ }
+
+ rc = bnxt_mpc_chnls_disable(bp);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "MPC channels disable failed rc:%d", rc);
+
+ return rc;
+}
+
+static int bnxt_init_mpc_ring_struct(struct bnxt_mpc_txq *mpc_queue,
+ unsigned int socket_id)
+{
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_mpc_ring_info *mpr;
+ struct bnxt_ring *ring;
+ int rc = 0;
+
+ mpr = rte_zmalloc_socket("bnxt_mpc_ring",
+ sizeof(struct bnxt_mpc_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (mpr == NULL)
+ return -ENOMEM;
+ mpc_queue->mpc_ring = mpr;
+
+ ring = rte_zmalloc_socket("bnxt_mpc_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "MPC ring struct alloc failed rc:%d", rc);
+ rc = -ENOMEM;
+ goto bnxt_init_mpc_ring_struct_err;
+ }
+
+ mpr->mpc_ring_struct = ring;
+ ring->ring_size = rte_align32pow2(mpc_queue->nb_mpc_desc);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)mpr->mpc_desc_ring;
+ ring->bd_dma = mpr->mpc_desc_mapping;
+ ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_mpc_bd);
+ ring->vmem = (void **)&mpr->mpc_buf_ring;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ cpr = rte_zmalloc_socket("bnxt_mpc_ring",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "MPC cp ring alloc failed rc:%d", rc);
+ rc = -ENOMEM;
+ goto bnxt_init_mpc_ring_struct_err1;
+ }
+ mpc_queue->cp_ring = cpr;
+
+ ring = rte_zmalloc_socket("bnxt_mpc_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "MPC cp ring struct alloc failed rc:%d", rc);
+ rc = -ENOMEM;
+ goto bnxt_init_mpc_ring_struct_err2;
+ }
+ cpr->cp_ring_struct = ring;
+ ring->ring_size = mpr->mpc_ring_struct->ring_size;
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)cpr->cp_desc_ring;
+ ring->bd_dma = cpr->cp_desc_mapping;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ return 0;
+
+bnxt_init_mpc_ring_struct_err2:
+ rte_free(cpr);
+bnxt_init_mpc_ring_struct_err1:
+ rte_free(ring);
+bnxt_init_mpc_ring_struct_err:
+ rte_free(mpr);
+ mpc_queue->mpc_ring = NULL;
+ return rc;
+}
+
+/*
+ * For a MPC queue, allocates a completion ring with vmem and bd ring,
+ * stats mem, a TX ring with vmem and bd ring.
+ *
+ * Order in the allocation is:
+ * stats - Always non-zero length
+ * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
+ * tx vmem - Only non-zero length
+ * cp bd ring - Always non-zero length
+ * tx bd ring - Only non-zero length
+ */
+
+static int bnxt_alloc_mpc_rings(struct bnxt_mpc_txq *mpc_queue,
+ const char *suffix)
+{
+ struct bnxt_ring *cp_ring;
+ struct bnxt_cp_ring_info *cp_ring_info;
+ struct bnxt_mpc_ring_info *mpc_ring_info;
+ struct bnxt_ring *ring;
+ struct rte_pci_device *pdev;
+ const struct rte_memzone *mz = NULL;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ rte_iova_t mz_phys_addr;
+
+ if (!mpc_queue)
+ return -EINVAL;
+
+ pdev = mpc_queue->bp->pdev;
+ mpc_ring_info = mpc_queue->mpc_ring;
+ cp_ring = mpc_queue->cp_ring->cp_ring_struct;
+ cp_ring_info = mpc_queue->cp_ring;
+
+ int stats_len = BNXT_HWRM_CTX_GET_SIZE(mpc_queue->bp);
+ stats_len = RTE_CACHE_LINE_ROUNDUP(stats_len);
+ stats_len = RTE_ALIGN(stats_len, 128);
+
+ int cp_vmem_start = stats_len;
+ int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
+ cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
+
+ int nq_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
+ nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
+
+ int nq_vmem_start = cp_vmem_start + cp_vmem_len;
+
+ int mpc_vmem_start = nq_vmem_start + nq_vmem_len;
+ int mpc_vmem_len =
+ RTE_CACHE_LINE_ROUNDUP(mpc_ring_info->mpc_ring_struct->vmem_size);
+ mpc_vmem_len = RTE_ALIGN(mpc_vmem_len, 128);
+
+ int cp_ring_start = mpc_vmem_start + mpc_vmem_len;
+ cp_ring_start = RTE_ALIGN(cp_ring_start, 4096);
+
+ int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
+ sizeof(struct cmpl_base));
+ cp_ring_len = RTE_ALIGN(cp_ring_len, 128);
+
+ int mpc_ring_start = cp_ring_start + cp_ring_len;
+ mpc_ring_start = RTE_ALIGN(mpc_ring_start, 4096);
+ int mpc_ring_len =
+ RTE_CACHE_LINE_ROUNDUP(mpc_ring_info->mpc_ring_struct->ring_size *
+ sizeof(struct tx_bd_mp_cmd));
+ mpc_ring_len = RTE_ALIGN(mpc_ring_len, 4096);
+
+ int total_alloc_len = mpc_ring_start + mpc_ring_len;
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_" PCI_PRI_FMT "-%04x_%s", pdev->addr.domain,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function,
+ mpc_queue->chnl_id, suffix);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ getpagesize());
+ if (mz == NULL || !mz->addr)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ mz_phys_addr = mz->iova;
+
+ mpc_queue->mz = mz;
+ ring = mpc_ring_info->mpc_ring_struct;
+
+ ring->bd = ((char *)mz->addr + mpc_ring_start);
+ mpc_ring_info->mpc_desc_ring = (struct tx_bd_mp_cmd *)ring->bd;
+ ring->bd_dma = mz_phys_addr + mpc_ring_start;
+ mpc_ring_info->mpc_desc_mapping = ring->bd_dma;
+ ring->mem_zone = (const void *)mz;
+
+ if (ring->vmem_size) {
+ ring->vmem = (void **)((char *)mz->addr + mpc_vmem_start);
+ mpc_ring_info->mpc_buf_ring =
+ (struct bnxt_sw_mpc_bd *)ring->vmem;
+ }
+
+ cp_ring->bd = ((char *)mz->addr + cp_ring_start);
+ cp_ring->bd_dma = mz_phys_addr + cp_ring_start;
+ cp_ring_info->cp_desc_ring = cp_ring->bd;
+ cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
+ cp_ring->mem_zone = (const void *)mz;
+
+ if (cp_ring->vmem_size)
+ *cp_ring->vmem = (char *)mz->addr + stats_len;
+
+ cp_ring_info->hw_stats = mz->addr;
+ cp_ring_info->hw_stats_map = mz_phys_addr;
+ cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+
+ return 0;
+}
+
+static void bnxt_init_one_mpc_ring(struct bnxt_mpc_txq *mpc_queue)
+{
+ struct bnxt_mpc_ring_info *mpr = mpc_queue->mpc_ring;
+ struct bnxt_cp_ring_info *cpr = mpc_queue->cp_ring;
+ struct bnxt_ring *ring = mpr->mpc_ring_struct;
+
+ mpc_queue->wake_thresh = ring->ring_size / 2;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ mpr->epoch = 0;
+ cpr->epoch = 0;
+}
+
+static uint16_t get_mpc_ring_logical_id(uint8_t mpc_cap,
+ enum bnxt_mpc_chnl chnl_id,
+ uint16_t offset)
+{
+ unsigned int i;
+ uint8_t logical_id = 0;
+
+ for (i = 0; i < BNXT_MPC_CHNL_MAX; i++) {
+ if (!(mpc_cap & (1 << i)))
+ continue;
+
+ if (i == chnl_id)
+ return logical_id + offset;
+
+ logical_id++;
+ }
+
+ return INVALID_HW_RING_ID;
+}
+
+static int bnxt_mpc_queue_setup_one(struct bnxt *bp, enum bnxt_mpc_chnl chnl_id,
+ uint16_t nb_desc, unsigned int socket_id)
+{
+ int rc = 0;
+ struct bnxt_mpc *mpc;
+ struct bnxt_mpc_txq *mpc_queue;
+
+ if (!bp || !bp->mpc)
+ return 0;
+
+ mpc = bp->mpc;
+ mpc_queue = rte_zmalloc_socket("bnxt_mpc_queue",
+ sizeof(struct bnxt_mpc_txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!mpc_queue) {
+ PMD_DRV_LOG_LINE(ERR, "bnxt_mpc_queue allocation failed!");
+ return -ENOMEM;
+ }
+
+ mpc_queue->free =
+ rte_zmalloc_socket(NULL,
+ sizeof(struct bnxt_mpc_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!mpc_queue->free) {
+ PMD_DRV_LOG_LINE(ERR, "allocation of mpc mbuf free array failed!");
+ rc = -ENOMEM;
+ goto bnxt_mpc_queue_setup_one_err;
+ }
+ mpc_queue->bp = bp;
+ mpc_queue->nb_mpc_desc = nb_desc;
+ /* TBD: hardcoded to 1 for now and should be tuned later for perf */
+ mpc_queue->free_thresh = BNXT_MPC_DESC_THRESH;
+
+ rc = bnxt_init_mpc_ring_struct(mpc_queue, socket_id);
+ if (rc)
+ goto bnxt_mpc_queue_setup_one_err1;
+
+ mpc_queue->chnl_id = chnl_id;
+
+ /* allocate MPC TX ring hardware descriptors */
+ rc = bnxt_alloc_mpc_rings(mpc_queue, "mpc");
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "ring_dma_zone_reserve for mpc_ring failed!");
+ rc = -ENOMEM;
+ goto bnxt_mpc_queue_setup_one_err1;
+ }
+ bnxt_init_one_mpc_ring(mpc_queue);
+ mpc_queue->queue_idx = get_mpc_ring_logical_id(bp->mpc->mpc_chnls_cap,
+ chnl_id,
+ bp->tx_cp_nr_rings);
+ mpc_queue->started = true;
+ mpc->mpc_txq[chnl_id] = mpc_queue;
+
+ return 0;
+
+bnxt_mpc_queue_setup_one_err1:
+ rte_free(mpc_queue->free);
+bnxt_mpc_queue_setup_one_err:
+ rte_free(mpc_queue);
+ return rc;
+}
+
+static int bnxt_mpc_ring_alloc_one(struct bnxt *bp, enum bnxt_mpc_chnl chnl_id)
+{
+ int rc = 0;
+ struct bnxt_mpc_txq *mpc_queue;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring *cp_ring;
+ struct bnxt_mpc_ring_info *mpr;
+ struct bnxt_ring *ring;
+ struct bnxt_coal coal;
+ uint32_t map_index;
+
+ if (!bp || !bp->mpc)
+ return 0;
+
+ mpc_queue = bp->mpc->mpc_txq[chnl_id];
+ if (!mpc_queue)
+ return -EINVAL;
+
+ bnxt_init_dflt_coal(&coal);
+ cpr = mpc_queue->cp_ring;
+ cp_ring = cpr->cp_ring_struct;
+ map_index = mpc_queue->queue_idx;
+
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "mpc ring %d stats alloc failed rc:%d!",
+ chnl_id, rc);
+ return rc;
+ }
+ rc = bnxt_alloc_cmpl_ring(bp, map_index, cpr);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "mpc ring %d cmpl ring alloc failed rc:%d!",
+ chnl_id, rc);
+ goto bnxt_mpc_ring_alloc_one_err;
+ }
+ mpr = mpc_queue->mpc_ring;
+ ring = mpr->mpc_ring_struct;
+ map_index = BNXT_MPC_MAP_INDEX(chnl_id, mpc_queue->queue_idx);
+
+ rc = bnxt_hwrm_ring_alloc(bp,
+ ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
+ map_index,
+ cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id,
+ MPC_HW_COS_ID);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "mpc ring %d tx ring alloc failed rc:%d!",
+ chnl_id, rc);
+ goto bnxt_mpc_ring_alloc_one_err1;
+ }
+
+ bnxt_set_db(bp, &mpr->db, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX, chnl_id,
+ ring->fw_ring_id, ring->ring_mask);
+
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
+
+ return rc;
+
+bnxt_mpc_ring_alloc_one_err1:
+ bnxt_free_cp_ring(bp, cpr);
+bnxt_mpc_ring_alloc_one_err:
+ bnxt_hwrm_stat_ctx_free(bp, cpr);
+ return rc;
+}
+
+int bnxt_mpc_open(struct bnxt *bp)
+{
+ int rc = 0;
+ enum bnxt_mpc_chnl i;
+ struct bnxt_mpc *mpc;
+ unsigned int socket_id;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
+ if (!bp->mpc)
+ return 0;
+
+ /* enable the MPC channels first */
+ rc = bnxt_mpc_chnls_enable(bp);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "MPC channels enable failed rc:%d", rc);
+ return rc;
+ }
+ socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
+ mpc = bp->mpc;
+
+ /* Limit to MPC TE_CFA and RE_CFA */
+ mpc->mpc_chnls_cap &= (1 << HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA) |
+ (1 << HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA);
+
+ /* allocate one MPC TX ring for each channel. */
+ for (i = 0; i < BNXT_MPC_CHNL_MAX; i++) {
+ if (!(mpc->mpc_chnls_cap & (1 << i)))
+ continue;
+ rc = bnxt_mpc_queue_setup_one(bp, i, BNXT_MPC_NB_DESC, socket_id);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "MPC queue %d setup failed rc:%d",
+ i, rc);
+ goto bnxt_mpc_open_err;
+ }
+ rc = bnxt_mpc_ring_alloc_one(bp, i);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "MPC ring %d alloc failed rc:%d",
+ i, rc);
+ goto bnxt_mpc_open_err;
+ }
+ }
+
+ return rc;
+
+bnxt_mpc_open_err:
+ bnxt_mpc_close(bp);
+ return rc;
+}
+
+int bnxt_mpc_cmd_cmpl(struct bnxt_mpc_txq *mpc_queue, struct bnxt_mpc_mbuf *out_msg)
+{
+ struct bnxt_cp_ring_info *cpr = mpc_queue->cp_ring;
+ uint32_t raw_cons = cpr->cp_raw_cons;
+ uint32_t cons;
+ struct cmpl_base *mpc_cmpl;
+ uint32_t nb_mpc_cmds = 0;
+ struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
+ struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
+ uint32_t ring_mask = cp_ring_struct->ring_mask;
+ uint32_t idx = raw_cons;
+ uint32_t num_bds;
+ bool is_long =
+ (out_msg->cmp_type == CMPL_BASE_TYPE_MID_PATH_LONG ? true : false);
+
+ do {
+ cons = RING_CMPL(ring_mask, raw_cons);
+ mpc_cmpl = &cpr->cp_desc_ring[cons];
+
+ rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
+ ring_mask]);
+
+ if (!CMPL_VALID(mpc_cmpl, cpr->valid)) {
+ break;
+ } else if (is_long) {
+ uint32_t cons_tmp = cons + 1;
+ uint32_t valid;
+ struct cmpl_base *tmp_mpc_cmpl = &cp_desc_ring[cons_tmp & ring_mask];
+
+ if ((cons_tmp & ring_mask) < (cons & ring_mask))
+ valid = !cpr->valid;
+ else
+ valid = cpr->valid;
+
+ if (!CMPL_VALID(tmp_mpc_cmpl, valid))
+ break;
+ }
+
+ NEXT_CMPL(cpr,
+ cons,
+ cpr->valid,
+ (is_long ? 2 : 1));
+
+ rte_prefetch0(&cp_desc_ring[cons]);
+
+ if (likely(CMP_TYPE(mpc_cmpl) == out_msg->cmp_type)) {
+ nb_mpc_cmds++;
+ idx = raw_cons;
+ raw_cons = cons;
+ break;
+ } else {
+ PMD_DRV_LOG_LINE(DEBUG, "Unhandled CMP type %02x",
+ CMP_TYPE(mpc_cmpl));
+ }
+
+ raw_cons = cons;
+ } while (nb_mpc_cmds < ring_mask);
+
+ if (nb_mpc_cmds) {
+ memcpy(out_msg->msg_data,
+ &cpr->cp_desc_ring[idx],
+ BNXT_MPC_BP_SIZE);
+
+ if (is_long) {
+ uint32_t tidx = idx + 1;
+
+ if (tidx >= BNXT_MPC_NB_DESC)
+ tidx = 0;
+
+ memcpy(out_msg->msg_data + BNXT_MPC_BP_SIZE,
+ &cpr->cp_desc_ring[tidx],
+ BNXT_MPC_BP_SIZE);
+ }
+
+ if (is_long)
+ num_bds = 2;
+ else
+ num_bds = 1;
+
+ cpr->cp_raw_cons = idx + num_bds;
+
+ /* Handle the wrap */
+ if (cpr->cp_raw_cons >= BNXT_MPC_NB_DESC) {
+ cpr->epoch = (cpr->epoch == 0 ? 1 : 0);
+ cpr->cp_raw_cons -= BNXT_MPC_NB_DESC;
+ }
+
+ bnxt_db_mpc_cq(cpr);
+ }
+
+ return nb_mpc_cmds;
+}
+
+static uint16_t bnxt_mpc_xmit(struct bnxt_mpc_mbuf *mpc_cmd,
+ struct bnxt_mpc_txq *mpc_queue,
+ uint32_t *opaque)
+{
+ struct bnxt_mpc_ring_info *mpr = mpc_queue->mpc_ring;
+ struct bnxt_ring *ring = mpr->mpc_ring_struct;
+ unsigned short nr_bds = 0;
+ uint16_t prod;
+ struct bnxt_sw_mpc_bd *mpc_buf;
+ struct tx_bd_mp_cmd *mpc_bd;
+ uint8_t *msg_buf;
+ int i;
+
+ if (unlikely(is_bnxt_in_error(mpc_queue->bp)))
+ return -EIO;
+
+ nr_bds = (mpc_cmd->msg_size + sizeof(struct tx_bd_mp_cmd) - 1)
+ / sizeof(struct tx_bd_mp_cmd) + 1;
+
+ prod = RING_IDX(ring, mpr->raw_prod);
+ mpc_buf = &mpr->mpc_buf_ring[prod];
+ mpc_buf->mpc_mbuf = mpc_cmd;
+ mpc_buf->nr_bds = nr_bds;
+
+ mpc_bd = &mpr->mpc_desc_ring[prod];
+ memset(mpc_bd, 0, sizeof(struct tx_bd_mp_cmd));
+ mpc_bd->opaque = *opaque;
+ mpc_bd->flags_type = nr_bds << TX_BD_MP_CMD_FLAGS_BD_CNT_SFT;
+ mpc_bd->flags_type |= TX_BD_MP_CMD_TYPE_TX_BD_MP_CMD;
+ mpc_bd->len = mpc_cmd->msg_size;
+
+ /* copy the messages to the subsequent inline bds */
+ for (i = 0; i < nr_bds - 1; i++) {
+ mpr->raw_prod = RING_NEXT(mpr->raw_prod) % BNXT_MPC_NB_DESC;
+ prod = RING_IDX(ring, mpr->raw_prod);
+ mpc_bd = &mpr->mpc_desc_ring[prod];
+ msg_buf = mpc_cmd->msg_data + i * sizeof(struct tx_bd_mp_cmd);
+ memcpy(mpc_bd, msg_buf, sizeof(struct tx_bd_mp_cmd));
+ }
+
+ mpr->raw_prod = RING_NEXT(mpr->raw_prod) % BNXT_MPC_NB_DESC;
+ return 0;
+}
+
+int bnxt_mpc_send(struct bnxt *bp,
+ struct bnxt_mpc_mbuf *in_msg,
+ struct bnxt_mpc_mbuf *out_msg,
+ uint32_t *opaque,
+ bool batch)
+{
+ int rc;
+ struct bnxt_mpc_txq *mpc_queue = bp->mpc->mpc_txq[in_msg->chnl_id];
+ int retry = BNXT_MPC_RX_RETRY;
+ uint32_t pi = 0;
+
+ if (out_msg->cmp_type != CMPL_BASE_TYPE_MID_PATH_SHORT &&
+ out_msg->cmp_type != CMPL_BASE_TYPE_MID_PATH_LONG)
+ return -1;
+
+#ifdef MPC_DEBUG
+ if (mpc_queue == NULL || mpc_queue->mpc_ring == NULL)
+ return -1;
+#endif
+
+ /*
+ * Save the producer index so that if wrapping occurs
+ * it can be detected.
+ */
+ pi = mpc_queue->mpc_ring->raw_prod;
+ rc = bnxt_mpc_xmit(in_msg, mpc_queue, opaque);
+
+ if (unlikely(rc))
+ return -1;
+ /*
+ * If the producer index wraps then toggle the epoch.
+ */
+ if (mpc_queue->mpc_ring->raw_prod < pi)
+ mpc_queue->mpc_ring->epoch = (mpc_queue->mpc_ring->epoch == 0 ? 1 : 0);
+
+ /*
+ * Ring the Tx doorbell.
+ */
+ bnxt_db_mpc_write(&mpc_queue->mpc_ring->db,
+ mpc_queue->mpc_ring->raw_prod,
+ mpc_queue->mpc_ring->epoch);
+
+ if (batch)
+ return 0;
+
+ /* Wait for response */
+ do {
+ rte_delay_us_block(BNXT_MPC_RX_US_DELAY);
+
+ rc = bnxt_mpc_cmd_cmpl(mpc_queue, out_msg);
+
+ if (rc == 1)
+ return 0;
+ retry--;
+ } while (retry);
+
+ return -1;
+}
new file mode 100644
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_MPC_H_
+#define _BNXT_MPC_H_
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <rte_malloc.h>
+
+/* MPC Batch support */
+extern bool bnxt_tfc_mpc_batch;
+extern uint8_t bnxt_mpc_batch_count;
+
+#define BNXT_MPC_RX_RETRY 100000
+
+#define BNXT_MPC_NB_DESC 128
+#define BNXT_MPC_DESC_THRESH 3
+#define BNXT_MPC_CHNL_SHIFT 16
+#define BNXT_MPC_QIDX_MSK 0xFFFF
+#define BNXT_MPC_CHNL(x) ((x) >> BNXT_MPC_CHNL_SHIFT)
+#define BNXT_MPC_QIDX(x) ((x) & BNXT_MPC_QIDX_MSK)
+#define BNXT_MPC_MAP_INDEX(x, y) (((x) << BNXT_MPC_CHNL_SHIFT) | (y))
+
+#define BNXT_MPC_CHNLS_SUPPORTED 2 /* Limit to MPC TE_CFA and RE_CFA */
+
+/* BNXT_MPC_RINGS_SUPPORTED set to 1 TE_CFA and 1 1 RE_CFA types.
+ * Can be set upto tx_nr_rings * BNXT_MPC_CHNLS_SUPPORTED if needed.
+ */
+#define BNXT_MPC_RINGS_SUPPORTED (1 * BNXT_MPC_CHNLS_SUPPORTED)
+
+/* Defines the number of msgs there are in an MPC msg completion event.
+ * Used to pass an opaque value into the MPC msg xmit function. The
+ * completion processing uses this value to ring the doorbell correctly to
+ * signal "completion event processing complete" to the hardware.
+ */
+#define BNXT_MPC_COMP_MSG_COUNT 1
+
+/* Defines the uS delay prior to processing an MPC completion */
+#define BNXT_MPC_RX_US_DELAY 1
+
+enum bnxt_mpc_chnl {
+ BNXT_MPC_CHNL_TCE = 0,
+ BNXT_MPC_CHNL_RCE = 1,
+ BNXT_MPC_CHNL_TE_CFA = 2,
+ BNXT_MPC_CHNL_RE_CFA = 3,
+ BNXT_MPC_CHNL_PRIMATE = 4,
+ BNXT_MPC_CHNL_MAX = 5,
+};
+
+struct bnxt_sw_mpc_bd {
+ struct bnxt_mpc_mbuf *mpc_mbuf; /* mpc mbuf associated with mpc bd */
+ unsigned short nr_bds;
+};
+
+struct bnxt_mpc_ring_info {
+ uint16_t raw_prod;
+ uint16_t raw_cons;
+ struct bnxt_db_info db;
+
+ struct tx_bd_mp_cmd *mpc_desc_ring;
+ struct bnxt_sw_mpc_bd *mpc_buf_ring;
+
+ rte_iova_t mpc_desc_mapping;
+
+ uint32_t dev_state;
+
+ struct bnxt_ring *mpc_ring_struct;
+ uint32_t epoch;
+};
+
+struct bnxt_mpc_mbuf {
+ enum bnxt_mpc_chnl chnl_id;
+ uint8_t cmp_type;
+ uint8_t *msg_data;
+ /* MPC msg size in bytes, must be multiple of 16Bytes */
+ uint16_t msg_size;
+};
+
+struct bnxt_mpc_txq {
+ enum bnxt_mpc_chnl chnl_id;
+ uint32_t queue_idx;
+ uint16_t nb_mpc_desc; /* number of MPC descriptors */
+ uint16_t free_thresh;/* minimum mpc cmds before freeing */
+ int wake_thresh;
+ uint8_t started; /* MPC queue is started */
+
+ struct bnxt *bp;
+ struct bnxt_mpc_ring_info *mpc_ring;
+ unsigned int cp_nr_rings;
+ struct bnxt_cp_ring_info *cp_ring;
+ const struct rte_memzone *mz;
+ struct bnxt_mpc_mbuf **free;
+
+ void (*cmpl_handler_cb)(struct bnxt_mpc_txq *mpc_queue,
+ uint32_t nb_mpc_cmds);
+};
+
+struct bnxt_mpc {
+ uint8_t mpc_chnls_cap;
+ uint8_t mpc_chnls_en;
+ struct bnxt_mpc_txq *mpc_txq[BNXT_MPC_CHNL_MAX];
+};
+
+int bnxt_mpc_open(struct bnxt *bp);
+int bnxt_mpc_close(struct bnxt *bp);
+int bnxt_mpc_send(struct bnxt *bp,
+ struct bnxt_mpc_mbuf *in_msg,
+ struct bnxt_mpc_mbuf *out_msg,
+ uint32_t *opaque,
+ bool batch);
+int bnxt_mpc_cmd_cmpl(struct bnxt_mpc_txq *mpc_queue, struct bnxt_mpc_mbuf *out_msg);
+int bnxt_mpc_poll_cmd_cmpls(struct bnxt_mpc_txq *mpc_queue);
+
+#endif
@@ -57,6 +57,7 @@ int bnxt_alloc_ring_grps(struct bnxt *bp)
/* P5 does not support ring groups.
* But we will use the array to save RSS context IDs.
*/
+ /* TODO Revisit for Thor 2 */
if (BNXT_CHIP_P5_P7(bp)) {
bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
} else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
@@ -329,7 +330,7 @@ int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
return 0;
}
-static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
+void bnxt_init_dflt_coal(struct bnxt_coal *coal)
{
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -347,12 +348,12 @@ static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
}
-static void bnxt_set_db(struct bnxt *bp,
- struct bnxt_db_info *db,
- uint32_t ring_type,
- uint32_t map_idx,
- uint32_t fid,
- uint32_t ring_mask)
+void bnxt_set_db(struct bnxt *bp,
+ struct bnxt_db_info *db,
+ uint32_t ring_type,
+ uint32_t map_idx,
+ uint32_t fid,
+ uint32_t ring_mask)
{
if (BNXT_CHIP_P5_P7(bp)) {
int db_offset = DB_PF_OFFSET;
@@ -400,8 +401,8 @@ static void bnxt_set_db(struct bnxt *bp,
db->db_ring_mask = ring_mask;
}
-static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
- struct bnxt_cp_ring_info *cpr)
+int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
+ struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
@@ -37,7 +37,8 @@
#define MAX_CP_DESC_CNT (16 * 1024)
#define INVALID_HW_RING_ID ((uint16_t)-1)
-#define INVALID_STATS_CTX_ID ((uint16_t)-1)
+#define INVALID_STATS_CTX_ID ((uint16_t)-1)
+#define MPC_HW_COS_ID ((uint16_t)-2)
struct bnxt_ring {
void *bd;
@@ -80,6 +81,15 @@ void bnxt_free_async_cp_ring(struct bnxt *bp);
int bnxt_alloc_async_ring_struct(struct bnxt *bp);
int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp);
void bnxt_free_rxtx_nq_ring(struct bnxt *bp);
+void bnxt_init_dflt_coal(struct bnxt_coal *coal);
+int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
+ struct bnxt_cp_ring_info *cpr);
+void bnxt_set_db(struct bnxt *bp,
+ struct bnxt_db_info *db,
+ uint32_t ring_type,
+ uint32_t map_idx,
+ uint32_t fid,
+ uint32_t ring_mask);
static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
{
@@ -98,6 +108,22 @@ static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
}
}
+static inline void bnxt_db_mpc_write(struct bnxt_db_info *db, uint32_t idx, uint32_t epoch)
+{
+ uint32_t db_idx = DB_RING_IDX(db, idx);
+ void *doorbell = db->doorbell;
+
+ if (likely(db->db_64)) {
+ uint64_t key_idx = db->db_key64 | db_idx |
+ (epoch << 24);
+ rte_write64_relaxed(key_idx, doorbell);
+ } else {
+ uint32_t key_idx = db->db_key32 | db_idx;
+
+ rte_write32_relaxed(key_idx, doorbell);
+ }
+}
+
/* Ring an NQ doorbell and disable interrupts for the ring. */
static inline void bnxt_db_nq(struct bnxt_cp_ring_info *cpr)
{
@@ -143,4 +169,25 @@ static inline void bnxt_db_cq(struct bnxt_cp_ring_info *cpr)
B_CP_DIS_DB(cpr, cp_raw_cons);
}
}
+
+static inline void bnxt_db_mpc_cq(struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_db_info *db = &cpr->cp_db;
+ uint32_t idx = DB_RING_IDX(&cpr->cp_db, cpr->cp_raw_cons);
+
+ if (likely(db->db_64)) {
+ uint64_t key_idx = db->db_key64 | idx |
+ (cpr->epoch << 24);
+ void *doorbell = db->doorbell;
+
+ rte_compiler_barrier();
+ rte_write64_relaxed(key_idx, doorbell);
+ } else {
+ uint32_t cp_raw_cons = cpr->cp_raw_cons;
+
+ rte_compiler_barrier();
+ B_CP_DIS_DB(cpr, cp_raw_cons);
+ }
+}
+
#endif
@@ -3,6 +3,10 @@
* All rights reserved.
*/
+/*!
+ * \file
+ * \brief Exported functions for CFA HW programming
+ */
#ifndef _HCAPI_CFA_H_
#define _HCAPI_CFA_H_
@@ -104,18 +108,7 @@ struct hcapi_cfa_devops {
extern const size_t CFA_RM_HANDLE_DATA_SIZE;
-#if SUPPORT_CFA_HW_ALL
-extern const struct hcapi_cfa_devops cfa_p4_devops;
-extern const struct hcapi_cfa_devops cfa_p58_devops;
-
-#elif defined(SUPPORT_CFA_HW_P4) && SUPPORT_CFA_HW_P4
extern const struct hcapi_cfa_devops cfa_p4_devops;
-uint64_t hcapi_cfa_p4_key_hash(uint64_t *key_data, uint16_t bitlen);
-/* SUPPORT_CFA_HW_P4 */
-#elif defined(SUPPORT_CFA_HW_P58) && SUPPORT_CFA_HW_P58
extern const struct hcapi_cfa_devops cfa_p58_devops;
-uint64_t hcapi_cfa_p58_key_hash(uint64_t *key_data, uint16_t bitlen);
-/* SUPPORT_CFA_HW_P58 */
-#endif
#endif /* HCAPI_CFA_H_ */
@@ -1,12 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2021 Broadcom
+ * Copyright(c) 2019-2023 Broadcom
* All rights reserved.
*/
-/*!
- * \file
- * \brief Exported functions for CFA HW programming
- */
#ifndef _HCAPI_CFA_DEFS_H_
#define _HCAPI_CFA_DEFS_H_
@@ -23,34 +19,24 @@
#define CFA_BITS_PER_BYTE (8)
#define CFA_BITS_PER_WORD (sizeof(uint32_t) * CFA_BITS_PER_BYTE)
#define __CFA_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#ifndef CFA_ALIGN
#define CFA_ALIGN(x, a) __CFA_ALIGN_MASK((x), (a) - 1)
+#endif
#define CFA_ALIGN_256(x) CFA_ALIGN(x, 256)
#define CFA_ALIGN_128(x) CFA_ALIGN(x, 128)
#define CFA_ALIGN_32(x) CFA_ALIGN(x, 32)
-#define NUM_WORDS_ALIGN_32BIT(x) (CFA_ALIGN_32(x) / CFA_BITS_PER_WORD)
-#define NUM_WORDS_ALIGN_128BIT(x) (CFA_ALIGN_128(x) / CFA_BITS_PER_WORD)
-#define NUM_WORDS_ALIGN_256BIT(x) (CFA_ALIGN_256(x) / CFA_BITS_PER_WORD)
-
/* TODO: redefine according to chip variant */
#define CFA_GLOBAL_CFG_DATA_SZ (100)
-#ifndef SUPPORT_CFA_HW_P4
-#define SUPPORT_CFA_HW_P4 (0)
-#endif
-
-#ifndef SUPPORT_CFA_HW_P45
-#define SUPPORT_CFA_HW_P45 (0)
-#endif
-
-#ifndef SUPPORT_CFA_HW_P58
-#define SUPPORT_CFA_HW_P58 (0)
-#endif
-
-#if SUPPORT_CFA_HW_ALL
#include "hcapi_cfa_p4.h"
#include "hcapi_cfa_p58.h"
-#endif /* SUPPORT_CFA_HW_ALL */
+
+#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_TCAM_MAX_FLD
+#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_RMP_DR_MAX_FLD
+#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p58_prof_key_cfg)
+#define CFA_KEY_MAX_FIELD_CNT 0
+#define CFA_ACT_MAX_TEMPLATE_SZ 0
/*
* Hashing defines
@@ -61,6 +47,7 @@
#define ucrc32(ch, crc) (crc32tbl[((crc) ^ (ch)) & 0xff] ^ ((crc) >> 8))
#define crc32(x, y) crc32i(~0, x, y)
+
/**
* CFA HW version definition
*/
@@ -68,7 +55,8 @@ enum hcapi_cfa_ver {
HCAPI_CFA_P40 = 0, /**< CFA phase 4.0 */
HCAPI_CFA_P45 = 1, /**< CFA phase 4.5 */
HCAPI_CFA_P58 = 2, /**< CFA phase 5.8 */
- HCAPI_CFA_PMAX = 3
+ HCAPI_CFA_P59 = 3, /**< CFA phase 5.9 */
+ HCAPI_CFA_PMAX = 4
};
/**
@@ -100,7 +88,7 @@ enum hcapi_cfa_hwops {
* operation is also undo the add operation
* performed by the HCAPI_CFA_HWOPS_ADD op.
*/
- HCAPI_CFA_HWOPS_EVICT, /*< This operation is used to evict entries from
+ HCAPI_CFA_HWOPS_EVICT, /*< This operation is used to edit entries from
* CFA cache memories. This operation is only
* applicable to tables that use CFA caches.
*/
@@ -116,6 +104,44 @@ enum hcapi_cfa_key_ctrlops {
HCAPI_CFA_KEY_CTRLOPS_MAX
};
+/**
+ * CFA HW field structure definition
+ */
+struct hcapi_cfa_field {
+ /** [in] Starting bit position pf the HW field within a HW table
+ * entry.
+ */
+ uint16_t bitpos;
+ /** [in] Number of bits for the HW field. */
+ uint16_t bitlen;
+};
+
+/**
+ * CFA HW table entry layout structure definition
+ */
+struct hcapi_cfa_layout {
+ /** [out] Bit order of layout */
+ bool is_msb_order;
+ /** [out] Size in bits of entry */
+ uint32_t total_sz_in_bits;
+ /** [out] data pointer of the HW layout fields array */
+ struct hcapi_cfa_field *field_array;
+ /** [out] number of HW field entries in the HW layout field array */
+ uint32_t array_sz;
+ /** [out] layout_id - layout id associated with the layout */
+ uint16_t layout_id;
+};
+
+/**
+ * CFA HW data object definition
+ */
+struct hcapi_cfa_data_obj {
+ /** [in] HW field identifier. Used as an index to a HW table layout */
+ uint16_t field_id;
+ /** [in] Value of the HW field */
+ uint64_t val;
+};
+
/**
* CFA HW definition
*/
@@ -294,6 +320,91 @@ struct hcapi_cfa_key_loc {
uint32_t mem_idx;
};
+/**
+ * CFA HW layout table definition
+ */
+struct hcapi_cfa_layout_tbl {
+ /** [out] data pointer to an array of fix formatted layouts supported.
+ * The index to the array is the CFA HW table ID
+ */
+ const struct hcapi_cfa_layout *tbl;
+ /** [out] number of fix formatted layouts in the layout array */
+ uint16_t num_layouts;
+};
+
+/**
+ * Key template consists of key fields that can be enabled/disabled
+ * individually.
+ */
+struct hcapi_cfa_key_template {
+ /** [in] key field enable field array, set 1 to the correspeonding
+ * field enable to make a field valid
+ */
+ uint8_t field_en[CFA_KEY_MAX_FIELD_CNT];
+ /** [in] Identify if the key template is for TCAM. If false, the
+ * key template is for EM. This field is mandantory for device that
+ * only support fix key formats.
+ */
+ bool is_wc_tcam_key;
+ /** [in] Identify if the key template will be use for IPv6 Keys.
+ *
+ */
+ bool is_ipv6_key;
+};
+
+/**
+ * key layout consist of field array, key bitlen, key ID, and other meta data
+ * pertain to a key
+ */
+struct hcapi_cfa_key_layout {
+ /** [out] key layout data */
+ struct hcapi_cfa_layout *layout;
+ /** [out] actual key size in number of bits */
+ uint16_t bitlen;
+ /** [out] key identifier and this field is only valid for device
+ * that supports fix key formats
+ */
+ uint16_t id;
+ /** [out] Identified the key layout is WC TCAM key */
+ bool is_wc_tcam_key;
+ /** [out] Identify if the key template will be use for IPv6 Keys.
+ *
+ */
+ bool is_ipv6_key;
+ /** [out] total slices size, valid for WC TCAM key only. It can be
+ * used by the user to determine the total size of WC TCAM key slices
+ * in bytes.
+ */
+ uint16_t slices_size;
+};
+
+/**
+ * key layout memory contents
+ */
+struct hcapi_cfa_key_layout_contents {
+ /** key layouts */
+ struct hcapi_cfa_key_layout key_layout;
+
+ /** layout */
+ struct hcapi_cfa_layout layout;
+
+ /** fields */
+ struct hcapi_cfa_field field_array[CFA_KEY_MAX_FIELD_CNT];
+};
+
+/**
+ * Action template consists of action fields that can be enabled/disabled
+ * individually.
+ */
+struct hcapi_cfa_action_template {
+ /** [in] CFA version for the action template */
+ enum hcapi_cfa_ver hw_ver;
+ /** [in] action field enable field array, set 1 to the correspeonding
+ * field enable to make a field valid
+ */
+ uint8_t data[CFA_ACT_MAX_TEMPLATE_SZ];
+};
+
/**
* Action record info
*/
@@ -332,6 +443,421 @@ struct hcapi_cfa_action_obj {
struct hcapi_cfa_action_layout *layout;
};
+/**
+ * action layout consist of field array, action wordlen and action format ID
+ */
+struct hcapi_cfa_action_layout {
+ /** [in] action identifier */
+ uint16_t id;
+ /** [out] action layout data */
+ struct hcapi_cfa_layout *layout;
+ /** [out] actual action record size in number of bits */
+ uint16_t bitlen;
+};
+
+/**
+ * CFA backing store type definition
+ */
+enum hcapi_cfa_bs_type {
+ HCAPI_CFA_BS_TYPE_LKUP, /**< EM LKUP backing store type */
+ HCAPI_CFA_BS_TYPE_ACT, /**< Action backing store type */
+ HCAPI_CFA_BS_TYPE_MAX
+};
+
+/**
+ * CFA backing store configuration data object
+ */
+struct hcapi_cfa_bs_cfg {
+ enum hcapi_cfa_bs_type type;
+ uint16_t tbl_scope;
+ struct hcapi_cfa_bs_db *bs_db;
+};
+
+/**
+ * CFA backing store data base object
+ */
+struct hcapi_cfa_bs_db {
+ /** [in] memory manager database signature */
+ uint32_t signature;
+#define HCAPI_CFA_BS_SIGNATURE 0xCFA0B300
+ /** [in] memory manager database base pointer (VA) */
+ void *mgmt_db;
+ /** [in] memory manager database size in bytes */
+ uint32_t mgmt_db_sz;
+ /** [in] Backing store memory pool base pointer
+ * (VA – backed by IOVA which is DMA accessible))
+ */
+ void *bs_ptr;
+ /** [in] bs_offset - byte offset to the section of the
+ * backing store memory managed by the backing store
+ * memory manager.
+ * For EM backing store, this is the starting byte
+ * offset to the EM record memory.
+ * For Action backing store, this offset is 0.
+ */
+ uint32_t offset;
+ /** [in] backing store memory pool size in bytes
+ */
+ uint32_t bs_sz;
+};
+
+/**
+ * \defgroup CFA_HCAPI_PUT_API
+ * HCAPI used for writing to the hardware
+ * @{
+ */
+
+/**
+ * This API provides the functionality to program a specified value to a
+ * HW field based on the provided programming layout.
+ *
+ * @param[in,out] obj_data
+ * A data pointer to a CFA HW key/mask data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_id
+ * ID of the HW field to be programmed
+ *
+ * @param[in] val
+ * Value of the HW field to be programmed
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_put_field(uint64_t *data_buf,
+ const struct hcapi_cfa_layout *layout,
+ uint16_t field_id, uint64_t val);
+
+/**
+ * This API provides the functionality to program an array of field values
+ * with corresponding field IDs to a number of profiler sub-block fields
+ * based on the fixed profiler sub-block hardware programming layout.
+ *
+ * @param[in, out] obj_data
+ * A pointer to a CFA profiler key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_tbl
+ * A pointer to an array that consists of the object field
+ * ID/value pairs
+ *
+ * @param[in] field_tbl_sz
+ * Number of entries in the table
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_put_fields(uint64_t *obj_data,
+ const struct hcapi_cfa_layout *layout,
+ struct hcapi_cfa_data_obj *field_tbl,
+ uint16_t field_tbl_sz);
+/**
+ * This API provides the functionality to program an array of field values
+ * with corresponding field IDs to a number of profiler sub-block fields
+ * based on the fixed profiler sub-block hardware programming layout. This
+ * API will swap the n byte blocks before programming the field array.
+ *
+ * @param[in, out] obj_data
+ * A pointer to a CFA profiler key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_tbl
+ * A pointer to an array that consists of the object field
+ * ID/value pairs
+ *
+ * @param[in] field_tbl_sz
+ * Number of entries in the table
+ *
+ * @param[in] data_size
+ * size of the data in bytes
+ *
+ * @param[in] n
+ * block size in bytes
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_put_fields_swap(uint64_t *obj_data,
+ const struct hcapi_cfa_layout *layout,
+ struct hcapi_cfa_data_obj *field_tbl,
+ uint16_t field_tbl_sz, uint16_t data_size,
+ uint16_t n);
+/**
+ * This API provides the functionality to write a value to a
+ * field within the bit position and bit length of a HW data
+ * object based on a provided programming layout.
+ *
+ * @param[in, out] act_obj
+ * A pointer of the action object to be initialized
+ *
+ * @param[in] layout
+ * A pointer of the programming layout
+ *
+ * @param field_id
+ * [in] Identifier of the HW field
+ *
+ * @param[in] bitpos_adj
+ * Bit position adjustment value
+ *
+ * @param[in] bitlen_adj
+ * Bit length adjustment value
+ *
+ * @param[in] val
+ * HW field value to be programmed
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_put_field_rel(uint64_t *obj_data,
+ const struct hcapi_cfa_layout *layout,
+ uint16_t field_id, int16_t bitpos_adj,
+ int16_t bitlen_adj, uint64_t val);
+
+/*@}*/
+
+/**
+ * \defgroup CFA_HCAPI_GET_API
+ * HCAPI used for writing to the hardware
+ * @{
+ */
+
+/**
+ * This API provides the functionality to get the word length of
+ * a layout object.
+ *
+ * @param[in] layout
+ * A pointer of the HW layout
+ *
+ * @return
+ * Word length of the layout object
+ */
+uint16_t hcapi_cfa_get_wordlen(const struct hcapi_cfa_layout *layout);
+
+/**
+ * The API provides the functionality to get bit offset and bit
+ * length information of a field from a programming layout.
+ *
+ * @param[in] layout
+ * A pointer of the action layout
+ *
+ * @param[out] slice
+ * A pointer to the action offset info data structure
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_get_slice(const struct hcapi_cfa_layout *layout,
+ uint16_t field_id, struct hcapi_cfa_field *slice);
+
+/**
+ * This API provides the functionality to read the value of a
+ * CFA HW field from CFA HW data object based on the hardware
+ * programming layout.
+ *
+ * @param[in] obj_data
+ * A pointer to a CFA HW key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_id
+ * ID of the HW field to be programmed
+ *
+ * @param[out] val
+ * Value of the HW field
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_get_field(uint64_t *obj_data,
+ const struct hcapi_cfa_layout *layout,
+ uint16_t field_id, uint64_t *val);
+
+/**
+ * This API provides the functionality to read 128-bit value of
+ * a CFA HW field from CFA HW data object based on the hardware
+ * programming layout.
+ *
+ * @param[in] obj_data
+ * A pointer to a CFA HW key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_id
+ * ID of the HW field to be programmed
+ *
+ * @param[out] val_msb
+ * Msb value of the HW field
+ *
+ * @param[out] val_lsb
+ * Lsb value of the HW field
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_get128_field(uint64_t *obj_data,
+ const struct hcapi_cfa_layout *layout,
+ uint16_t field_id, uint64_t *val_msb,
+ uint64_t *val_lsb);
+
+/**
+ * This API provides the functionality to read a number of
+ * HW fields from a CFA HW data object based on the hardware
+ * programming layout.
+ *
+ * @param[in] obj_data
+ * A pointer to a CFA profiler key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in, out] field_tbl
+ * A pointer to an array that consists of the object field
+ * ID/value pairs
+ *
+ * @param[in] field_tbl_sz
+ * Number of entries in the table
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_get_fields(uint64_t *obj_data,
+ const struct hcapi_cfa_layout *layout,
+ struct hcapi_cfa_data_obj *field_tbl,
+ uint16_t field_tbl_sz);
+
+/**
+ * This API provides the functionality to read a number of
+ * HW fields from a CFA HW data object based on the hardware
+ * programming layout.This API will swap the n byte blocks before
+ * retrieving the field array.
+ *
+ * @param[in] obj_data
+ * A pointer to a CFA profiler key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in, out] field_tbl
+ * A pointer to an array that consists of the object field
+ * ID/value pairs
+ *
+ * @param[in] field_tbl_sz
+ * Number of entries in the table
+ *
+ * @param[in] data_size
+ * size of the data in bytes
+ *
+ * @param[in] n
+ * block size in bytes
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_get_fields_swap(uint64_t *obj_data,
+ const struct hcapi_cfa_layout *layout,
+ struct hcapi_cfa_data_obj *field_tbl,
+ uint16_t field_tbl_sz, uint16_t data_size,
+ uint16_t n);
+
+/**
+ * Get a value to a specific location relative to a HW field
+ *
+ * This API provides the functionality to read HW field from
+ * a section of a HW data object identified by the bit position
+ * and bit length from a given programming layout in order to avoid
+ * reading the entire HW data object.
+ *
+ * @param[in] obj_data
+ * A pointer of the data object to read from
+ *
+ * @param[in] layout
+ * A pointer of the programming layout
+ *
+ * @param[in] field_id
+ * Identifier of the HW field
+ *
+ * @param[in] bitpos_adj
+ * Bit position adjustment value
+ *
+ * @param[in] bitlen_adj
+ * Bit length adjustment value
+ *
+ * @param[out] val
+ * Value of the HW field
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_get_field_rel(uint64_t *obj_data,
+ const struct hcapi_cfa_layout *layout,
+ uint16_t field_id, int16_t bitpos_adj,
+ int16_t bitlen_adj, uint64_t *val);
+
+/**
+ * Get the length of the layout in words
+ *
+ * @param[in] layout
+ * A pointer to the layout to determine the number of words
+ * required
+ *
+ * @return
+ * number of words needed for the given layout
+ */
+uint16_t cfa_hw_get_wordlen(const struct hcapi_cfa_layout *layout);
+
+/**
+ * This function is used to initialize a layout_contents structure
+ *
+ * The struct hcapi_cfa_key_layout is complex as there are three
+ * layers of abstraction. Each of those layer need to be properly
+ * initialized.
+ *
+ * @param[in] contents
+ * A pointer of the layout contents to initialize
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_init_key_contents(struct hcapi_cfa_key_layout_contents *contents);
+
+/**
+ * This function is used to validate a key template
+ *
+ * The struct hcapi_cfa_key_template is complex as there are three
+ * layers of abstraction. Each of those layer need to be properly
+ * validated.
+ *
+ * @param[in] key_template
+ * A pointer of the key template contents to validate
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_is_valid_key_template(struct hcapi_cfa_key_template *key_template);
+
+/**
+ * This function is used to validate a key layout
+ *
+ * The struct hcapi_cfa_key_layout is complex as there are three
+ * layers of abstraction. Each of those layer need to be properly
+ * validated.
+ *
+ * @param[in] key_layout
+ * A pointer of the key layout contents to validate
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int hcapi_cfa_is_valid_key_layout(struct hcapi_cfa_key_layout *key_layout);
+
/**
* This function is used to hash E/EM keys
*
new file mode 100644
@@ -0,0 +1,92 @@
+#
+# Copyright(c) 2019-2021 Broadcom Limited, all rights reserved
+# Contains proprietary and confidential information.
+#
+# This source file is the property of Broadcom Limited, and
+# may not be copied or distributed in any isomorphic form without
+# the prior written consent of Broadcom Limited.
+#
+
+# Needed for compilation with chip-specific regdef.h
+add_definitions(-DFIRMWARE_VIEW=1)
+
+# Platform specific defines
+if (cfa_p70)
+ add_definitions(-DSUPPORT_CFA_HW_P70=1)
+ set(PXX_FOLDER p70)
+ set (tsm_needed 1)
+ set (mm_needed 1)
+endif ()
+
+if (cfa_p80)
+ add_definitions(-DSUPPORT_CFA_HW_P80=1)
+ set(PXX_FOLDER p80)
+ set (tcm_needed 1)
+endif ()
+
+# Reset Doc dir variables
+set(CFA_API_DOC_DIRS "" CACHE INTERNAL "")
+set(CFA_DESIGN_DOC_DIRS "" CACHE INTERNAL "")
+set(CFA_UT_DOC_DIRS "" CACHE INTERNAL "")
+
+# Include sub directories
+
+if (idm_needed)
+ add_subdirectory(idm)
+ set(idm_libs cfa-idm-lib cfa-idm-lib-ut)
+endif ()
+
+if (tbm_needed)
+ add_subdirectory(tbm)
+ set(tbm_libs cfa-tbm-lib cfa-tbm-lib-ut)
+endif ()
+
+if (gim_needed)
+ add_subdirectory(gim)
+ set(gim_libs cfa-gim-lib cfa-gim-lib-ut)
+endif ()
+
+if (mm_needed)
+ add_subdirectory(mm)
+ set(mm_libs cfa-mm-lib cfa-mm-lib-ut)
+endif ()
+
+if (tsm_needed)
+ add_subdirectory(tpm)
+ add_subdirectory(tim)
+ set(cfa-tim-lib cfa-tim-lib-ut cfa-tpm-lib cfa-tpm-lib-ut)
+endif ()
+
+if (tcm_needed)
+ add_subdirectory(tcm)
+ set(tcm_libs cfa-tcm-lib cfa-tcm-lib-ut)
+endif ()
+
+if (rdm_needed)
+ add_subdirectory(rdm)
+ set(rdm_libs cfa-rdm-lib cfa-rdm-lib-ut)
+endif ()
+
+# Update Doxygen dirs for api documentation
+#set(CFA_API_DOC_DIRS ${CFA_API_DOC_DIRS}
+# ${CMAKE_CURRENT_SOURCE_DIR}/include
+# CACHE INTERNAL "")
+
+# Update Doxygen dirs for design documentation
+#set(CFA_DESIGN_DOC_DIRS ${CFA_DESIGN_DOC_DIRS}
+# ${CMAKE_CURRENT_SOURCE_DIR}/include
+# CACHE INTERNAL "")
+
+# Include docs
+#if (DOXYGEN_FOUND)
+# add_subdirectory(docs)
+# add_custom_target(cfa-v3-docs
+# DEPENDS hcapi-cfa-api-docs
+# hcapi-cfa-design-docs
+# hcapi-cfa-ut-docs
+# )
+#endif (DOXYGEN_FOUND)
+
+add_custom_target(cfa-v3-libs
+ ALL
+ DEPENDS ${tpm_libs} ${tim_libs} ${mm_libs})
new file mode 100644
@@ -0,0 +1,42 @@
+/****************************************************************************
+ * Copyright(c) 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_mpc.c
+ *
+ * @brief CFA Builder MPC binding api
+ */
+
+#include <errno.h>
+#include "cfa_bld.h"
+#include "host/cfa_bld_mpcops.h"
+
+#if SUPPORT_CFA_HW_P70
+#include "cfa_bld_p70_mpcops.h"
+#endif
+
+int cfa_bld_mpc_bind(enum cfa_ver hw_ver, struct cfa_bld_mpcinfo *mpcinfo)
+{
+ if (!mpcinfo)
+ return -EINVAL;
+
+ switch (hw_ver) {
+ case CFA_P40:
+ case CFA_P45:
+ case CFA_P58:
+ case CFA_P59:
+ return -ENOTSUP;
+ case CFA_P70:
+#if SUPPORT_CFA_HW_P70
+ return cfa_bld_p70_mpc_bind(hw_ver, mpcinfo);
+#else
+ return -ENOTSUP;
+#endif
+ default:
+ return -EINVAL;
+ }
+}
new file mode 100644
@@ -0,0 +1,578 @@
+/****************************************************************************
+ * Copyright(c) 2021 - 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_defs.h
+ *
+ * @brief CFA Builder library structure definitions and API
+ */
+
+#ifndef _CFA_BLD_DEFS_H_
+#define _CFA_BLD_DEFS_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "cfa_resources.h"
+#include "cfa_types.h"
+
+/**
+ * @addtogroup CFA_BLD CFA Builder Library
+ * \ingroup CFA_V3
+ * The CFA builder library is a set of APIs provided the following services:
+ *
+ * 1. Provide users generic put service to convert software programming data
+ * into a hardware data bit stream according to a HW layout representation,
+ * or generic get service to extract value of a field or values of a number
+ * of fields from the raw hardware data bit stream according to a HW layout.
+ *
+ * - A software programming data is represented in {field_idx, val}
+ * structure.
+ * - A HW layout is represented with array of CFA field structures with
+ * {bitpos, bitlen} and identified by a layout id corresponding to a CFA
+ * HW table.
+ * - A HW data bit stream are bits that is formatted according to a HW
+ * layout representation.
+ *
+ * 2. Provide EM/WC key and action related service APIs to compile layout,
+ * init, and manipulate key and action data objects.
+ *
+ * 3. Provide CFA mid-path message building APIs. (TBD)
+ *
+ * The CFA builder library is designed to run in the primate firmware and also
+ * as part of the following host base diagnostic software.
+ * - Lcdiag
+ * - Truflow CLI
+ * - coredump decorder
+ *
+ * @{
+ */
+
+/** @name CFA Builder Common Definition
+ * CFA builder common structures and enumerations
+ */
+
+/**@{*/
+/**
+ * CFA HW KEY CONTROL OPCODE definition
+ */
+enum cfa_key_ctrlops {
+ CFA_KEY_CTRLOPS_INSERT, /**< insert WC control bits */
+ CFA_KEY_CTRLOPS_STRIP, /**< strip WC control bits */
+ CFA_KEY_CTRLOPS_SWAP, /**< swap EM cache lines */
+ CFA_KEY_CTRLOPS_MAX
+};
+
+/**
+ * CFA HW field structure definition
+ */
+struct cfa_field {
+ /** [in] Starting bit position pf the HW field within a HW table
+ * entry.
+ */
+ uint16_t bitpos;
+ /** [in] Number of bits for the HW field. */
+ uint16_t bitlen;
+};
+
+/**
+ * CFA HW table entry layout structure definition
+ */
+struct cfa_layout {
+ /** [out] Bit order of layout
+ * if swap_order_bitpos is non-zero, the bit order of the layout
+ * will be swapped after this bit. swap_order_bitpos must be a
+ * multiple of 64. This is currently only used for inlined action
+ * records where the AR is lsb and the following inlined actions
+ * must be msb.
+ */
+ bool is_msb_order;
+ /** [out] Reverse is_msb_order after this bit if non-zero */
+ uint16_t swap_order_bitpos;
+ /** [out] Size in bits of entry */
+ uint32_t total_sz_in_bits;
+ /** [in/out] data pointer of the HW layout fields array */
+ struct cfa_field *field_array;
+ /** [out] number of HW field entries in the HW layout field array */
+ uint32_t array_sz;
+ /** [out] layout_id - layout id associated with the layout */
+ uint16_t layout_id;
+};
+
+/**
+ * CFA HW data object definition
+ */
+struct cfa_data_obj {
+ /** [in] HW field identifier. Used as an index to a HW table layout */
+ uint16_t field_id;
+ /** [in] Value of the HW field */
+ uint64_t val;
+};
+
+/**
+ * CFA HW key buffer definition
+ */
+struct cfa_key_obj {
+ /** [in] pointer to the key data buffer */
+ uint32_t *data;
+ /** [in] buffer len in bytes */
+ uint32_t data_len_bytes;
+ /** [out] Data length in bits
+ * When cfa_key_obj is passed as an output parameter, the updated
+ * key length (if the key length changes) is returned in this field by the
+ * key processing api (e.g cfa_bld_key_transform)
+ * When cfa_key_obj is passed as an input parameter, this field is unused
+ * and need not be initialized by the caller.
+ */
+ uint32_t data_len_bits;
+ /** [in] Pointer to the key layout */
+ struct cfa_key_layout *layout;
+};
+
+/**
+ * CFA HW layout table definition
+ */
+struct cfa_layout_tbl {
+ /** [out] data pointer to an array of fix formatted layouts supported.
+ * The index to the array is either the CFA resource subtype or
+ * remap table ID
+ */
+ const struct cfa_layout *layouts;
+ /** [out] number of fix formatted layouts in the layout array */
+ uint16_t num_layouts;
+};
+
+/**
+ * key layout consist of field array, key bitlen, key ID, and other meta data
+ * pertain to a key
+ */
+struct cfa_key_layout {
+ /** [in/out] key layout data */
+ struct cfa_layout *layout;
+ /** [out] actual key size in number of bits */
+ uint16_t bitlen;
+ /** [out] key identifier and this field is only valid for device
+ * that supports fix key formats
+ */
+ uint16_t id;
+ /** [out] Identified the key layout is WC TCAM key */
+ bool is_wc_tcam_key;
+ /** [out] Identify if the key template will be use for IPv6 Keys.
+ *
+ * Note: This is important for Thor2 as the field length for the FlowId
+ * is dependent on the L3 flow type. For Thor2 for IPv4 Keys, the Flow
+ * Id field is 16 bits, for all other types (IPv6, ARP, PTP, EAP, RoCE,
+ * FCoE, UPAR), the Flow Id field length is 20 bits.
+ */
+ bool is_ipv6_key;
+ /** [out] total number of slices, valid for WC TCAM key only. It can be
+ * used by the user to pass in the num_slices to write to the hardware.
+ */
+ uint16_t num_slices;
+};
+
+/**
+ * CFA HW key table definition
+ *
+ * Applicable to EEM and on-chip EM table only.
+ */
+struct cfa_key_tbl {
+ /** [in] For EEM, this is the KEY0 base mem pointer. For off-chip EM,
+ * this is the base mem pointer of the key table.
+ */
+ uint8_t *base0;
+ /** [in] total size of the key table in bytes. For EEM, this size is
+ * same for both KEY0 and KEY1 table.
+ */
+ uint32_t size;
+ /** [in] number of key buckets, applicable for newer chips */
+ uint32_t num_buckets;
+ /** [in] For EEM, this is KEY1 base mem pointer. Fo on-chip EM,
+ * this is the key record memory base pointer within the key table,
+ * applicable for newer chip
+ */
+ uint8_t *base1;
+ /** [in] Optional - If the table is managed by a Backing Store
+ * database, then this object can be use to configure the EM Key.
+ */
+ struct cfa_bs_db *bs_db;
+ /** [in] Page size for EEM tables */
+ uint32_t page_size;
+};
+
+/**
+ * CFA HW key data definition
+ */
+struct cfa_key_data {
+ /** [in] For on-chip key table, it is the offset in unit of smallest
+ * key. For off-chip key table, it is the byte offset relative
+ * to the key record memory base and adjusted for page and entry size.
+ */
+ uint32_t offset;
+ /** [in] HW key data buffer pointer */
+ uint8_t *data;
+ /** [in] size of the key in bytes */
+ uint16_t size;
+ /** [in] optional table scope ID */
+ uint8_t tbl_scope;
+ /** [in] the fid owner of the key */
+ uint64_t metadata;
+ /** [in] stored with the bucket which can be used to by
+ * the caller to retreved later via the GET HW OP.
+ */
+};
+
+/**
+ * CFA HW key location definition
+ */
+struct cfa_key_loc {
+ /** [out] on-chip EM bucket offset or off-chip EM bucket mem pointer */
+ uint64_t bucket_mem_ptr;
+ /** [out] off-chip EM key offset mem pointer */
+ uint64_t mem_ptr;
+ /** [out] index within the array of the EM buckets */
+ uint32_t bucket_mem_idx;
+ /** [out] index within the EM bucket */
+ uint8_t bucket_idx;
+ /** [out] index within the EM records */
+ uint32_t mem_idx;
+};
+
+/**
+ * Action record info
+ */
+struct cfa_action_addr {
+ /** [in] action SRAM block ID for on-chip action records or table
+ * scope of the action backing store
+ */
+ uint16_t blk_id;
+ /** [in] ar_id or cache line aligned address offset for the action
+ * record
+ */
+ uint32_t offset;
+};
+
+/**
+ * Action object definition
+ */
+struct cfa_action_obj {
+ /** [in] pointer to the action data buffer */
+ uint64_t *data;
+ /** [in] buffer len in bytes */
+ uint32_t len;
+ /** [in] pointer to the action layout */
+ struct cfa_action_layout *layout;
+};
+
+/**
+ * action layout consist of field array, action wordlen and action format ID
+ */
+struct cfa_action_layout {
+ /** [in] action identifier */
+ uint16_t id;
+ /** [out] action layout data */
+ struct cfa_layout *layout;
+ /** [out] actual action record size in number of bits */
+ uint16_t bitlen;
+};
+
+/**@}*/
+
+/** @name CFA Builder PUT_FIELD APIs
+ * CFA Manager apis used for generating hw layout specific data objects that
+ * can be programmed to the hardware
+ */
+
+/**@{*/
+/**
+ * @brief This API provides the functionality to program a specified value to a
+ * HW field based on the provided programming layout.
+ *
+ * @param[in,out] data_buf
+ * A data pointer to a CFA HW key/mask data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_id
+ * ID of the HW field to be programmed
+ *
+ * @param[in] val
+ * Value of the HW field to be programmed
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_put_field(uint64_t *data_buf, const struct cfa_layout *layout,
+ uint16_t field_id, uint64_t val);
+
+/**
+ * @brief This API provides the functionality to program an array of field
+ * values with corresponding field IDs to a number of profiler sub-block fields
+ * based on the fixed profiler sub-block hardware programming layout.
+ *
+ * @param[in, out] obj_data
+ * A pointer to a CFA profiler key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_tbl
+ * A pointer to an array that consists of the object field
+ * ID/value pairs
+ *
+ * @param[in] field_tbl_sz
+ * Number of entries in the table
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_put_fields(uint64_t *obj_data, const struct cfa_layout *layout,
+ struct cfa_data_obj *field_tbl, uint16_t field_tbl_sz);
+
+/**
+ * @brief This API provides the functionality to program an array of field
+ * values with corresponding field IDs to a number of profiler sub-block fields
+ * based on the fixed profiler sub-block hardware programming layout. This
+ * API will swap the n byte blocks before programming the field array.
+ *
+ * @param[in, out] obj_data
+ * A pointer to a CFA profiler key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_tbl
+ * A pointer to an array that consists of the object field
+ * ID/value pairs
+ *
+ * @param[in] field_tbl_sz
+ * Number of entries in the table
+ *
+ * @param[in] data_size
+ * size of the data in bytes
+ *
+ * @param[in] n
+ * block size in bytes
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_put_fields_swap(uint64_t *obj_data, const struct cfa_layout *layout,
+ struct cfa_data_obj *field_tbl, uint16_t field_tbl_sz,
+ uint16_t data_size, uint16_t n);
+
+/**
+ * @brief This API provides the functionality to write a value to a
+ * field within the bit position and bit length of a HW data
+ * object based on a provided programming layout.
+ *
+ * @param[in, out] obj_data
+ * A pointer of the action object to be initialized
+ *
+ * @param[in] layout
+ * A pointer of the programming layout
+ *
+ * @param field_id
+ * [in] Identifier of the HW field
+ *
+ * @param[in] bitpos_adj
+ * Bit position adjustment value
+ *
+ * @param[in] bitlen_adj
+ * Bit length adjustment value
+ *
+ * @param[in] val
+ * HW field value to be programmed
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_put_field_rel(uint64_t *obj_data, const struct cfa_layout *layout,
+ uint16_t field_id, int16_t bitpos_adj, int16_t bitlen_adj,
+ uint64_t val);
+
+/**@}*/
+
+/** @name CFA Builder GET_FIELD APIs
+ * CFA Manager apis used for extract hw layout specific fields from CFA HW
+ * data objects
+ */
+
+/**@{*/
+/**
+ * @brief The API provides the functionality to get bit offset and bit
+ * length information of a field from a programming layout.
+ *
+ * @param[in] layout
+ * A pointer of the action layout
+ *
+ * @param[in] field_id
+ * The field for which to retrieve the slice
+ *
+ * @param[out] slice
+ * A pointer to the action offset info data structure
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_get_slice(const struct cfa_layout *layout, uint16_t field_id,
+ struct cfa_field *slice);
+
+/**
+ * @brief This API provides the functionality to read the value of a
+ * CFA HW field from CFA HW data object based on the hardware
+ * programming layout.
+ *
+ * @param[in] obj_data
+ * A pointer to a CFA HW key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_id
+ * ID of the HW field to be programmed
+ *
+ * @param[out] val
+ * Value of the HW field
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_get_field(uint64_t *obj_data, const struct cfa_layout *layout,
+ uint16_t field_id, uint64_t *val);
+
+/**
+ * @brief This API provides the functionality to read 128-bit value of
+ * a CFA HW field from CFA HW data object based on the hardware
+ * programming layout.
+ *
+ * @param[in] obj_data
+ * A pointer to a CFA HW key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in] field_id
+ * ID of the HW field to be programmed
+ *
+ * @param[out] val_msb
+ * Msb value of the HW field
+ *
+ * @param[out] val_lsb
+ * Lsb value of the HW field
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_get128_field(uint64_t *obj_data, const struct cfa_layout *layout,
+ uint16_t field_id, uint64_t *val_msb, uint64_t *val_lsb);
+
+/**
+ * @brief This API provides the functionality to read a number of
+ * HW fields from a CFA HW data object based on the hardware
+ * programming layout.
+ *
+ * @param[in] obj_data
+ * A pointer to a CFA profiler key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in, out] field_tbl
+ * A pointer to an array that consists of the object field
+ * ID/value pairs
+ *
+ * @param[in] field_tbl_sz
+ * Number of entries in the table
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_get_fields(uint64_t *obj_data, const struct cfa_layout *layout,
+ struct cfa_data_obj *field_tbl, uint16_t field_tbl_sz);
+
+/**
+ * @brief This API provides the functionality to read a number of
+ * HW fields from a CFA HW data object based on the hardware
+ * programming layout.This API will swap the n byte blocks before
+ * retrieving the field array.
+ *
+ * @param[in] obj_data
+ * A pointer to a CFA profiler key/mask object data
+ *
+ * @param[in] layout
+ * A pointer to CFA HW programming layout
+ *
+ * @param[in, out] field_tbl
+ * A pointer to an array that consists of the object field
+ * ID/value pairs
+ *
+ * @param[in] field_tbl_sz
+ * Number of entries in the table
+ *
+ * @param[in] data_size
+ * size of the data in bytes
+ *
+ * @param[in] n
+ * block size in bytes
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_get_fields_swap(uint64_t *obj_data, const struct cfa_layout *layout,
+ struct cfa_data_obj *field_tbl, uint16_t field_tbl_sz,
+ uint16_t data_size, uint16_t n);
+
+/**
+ * @brief Get a value to a specific location relative to a HW field
+ * This API provides the functionality to read HW field from
+ * a section of a HW data object identified by the bit position
+ * and bit length from a given programming layout in order to avoid
+ * reading the entire HW data object.
+ *
+ * @param[in] obj_data
+ * A pointer of the data object to read from
+ *
+ * @param[in] layout
+ * A pointer of the programming layout
+ *
+ * @param[in] field_id
+ * Identifier of the HW field
+ *
+ * @param[in] bitpos_adj
+ * Bit position adjustment value
+ *
+ * @param[in] bitlen_adj
+ * Bit length adjustment value
+ *
+ * @param[out] val
+ * Value of the HW field
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_get_field_rel(uint64_t *obj_data, const struct cfa_layout *layout,
+ uint16_t field_id, int16_t bitpos_adj, int16_t bitlen_adj,
+ uint64_t *val);
+
+/**
+ * @brief Get the length of the layout in words
+ *
+ * @param[in] layout
+ * A pointer to the layout to determine the number of words
+ * required
+ *
+ * @return
+ * number of words needed for the given layout
+ */
+uint16_t cfa_get_wordlen(const struct cfa_layout *layout);
+
+/**@}*/
+
+/**@}*/
+#endif /* _CFA_BLD_DEFS_H_*/
new file mode 100644
@@ -0,0 +1,524 @@
+/****************************************************************************
+ * Copyright(c) 2021 - 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld.h
+ *
+ * @brief CFA HW independent Builder library public api header
+ */
+
+#ifndef _CFA_BLD_H_
+#define _CFA_BLD_H_
+
+#include "sys_util.h"
+#include "cfa_bld_defs.h"
+#include "cfa_bld_field_ids.h"
+
+/**
+ * @addtogroup CFA_BLD CFA Builder Library
+ * \ingroup CFA_V3
+ * @{
+ */
+
+/**
+ * Maximum key array size
+ */
+#define CFA_V3_KEY_MAX_FIELD_CNT \
+ MAX((uint16_t)CFA_BLD_EM_KEY_LAYOUT_MAX_FLD, \
+ (uint16_t)CFA_BLD_WC_TCAM_FKB_MAX_FLD)
+#define CFA_V3_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_bld_action_template)
+
+/** @name CFA Builder Templates
+ * CFA builder action and key templates definition and enumerations
+ */
+
+/**@{*/
+enum action_type {
+ /** Select this type to build an Full Action Record Object
+ */
+ CFA_BLD_ACT_OBJ_TYPE_FULL_ACT,
+ /** Select this type to build an Compact Action Record Object
+ */
+ CFA_BLD_ACT_OBJ_TYPE_COMPACT_ACT,
+ /** Select this type to build an MCG Action Record Object
+ */
+ CFA_BLD_ACT_OBJ_TYPE_MCG_ACT,
+ /** Select this type to build Standalone Modify Action Record Object */
+ CFA_BLD_ACT_OBJ_TYPE_MODIFY,
+ /** Select this type to build Standalone Stat Action Record Object */
+ CFA_BLD_ACT_OBJ_TYPE_STAT,
+ /** Select this type to build Standalone Source Action Record Object */
+ CFA_BLD_ACT_OBJ_TYPE_SRC_PROP,
+ /** Select this type to build Standalone Encap Action Record Object */
+ CFA_BLD_ACT_OBJ_TYPE_ENCAP,
+};
+
+enum stat_op {
+ /** Set to statistic to ingress to CFA
+ */
+ CFA_BLD_STAT_OP_INGRESS = 0,
+ /** Set to statistic to egress from CFA
+ */
+ CFA_BLD_STAT_OP_EGRESS = 1,
+};
+
+enum stat_type {
+ /** Set to statistic to Foward packet count(64b)/Foward byte
+ * count(64b)
+ */
+ CFA_BLD_STAT_COUNTER_SIZE_16B = 0,
+ /** Set to statistic to Forward packet count(64b)/Forward byte
+ * count(64b)/ TCP Flags(16b)/Timestamp(32b)
+ */
+ CFA_BLD_STAT_COUNTER_SIZE_24B = 1,
+ /** Set to statistic to Forward packet count(64b)/Forward byte
+ * count(64b)/Meter(drop or red) packet count(64b)/Meter(drop
+ * or red) byte count(64b)
+ */
+ CFA_BLD_STAT_COUNTER_SIZE_32B = 2,
+ /** Set to statistic to Forward packet count(64b)/Forward byte
+ * count(64b)/Meter(drop or red) packet count(38b)/Meter(drop
+ * or red) byte count(42b)/TCP Flags(16b)/Timestamp(32b)
+ */
+ CFA_BLD_STAT_COUNTER_SIZE_32B_ALL = 3,
+};
+
+enum encap_vtag {
+ CFA_BLD_ACT_ENCAP_VTAGS_PUSH_0 = 0,
+ CFA_BLD_ACT_ENCAP_VTAGS_PUSH_1,
+ CFA_BLD_ACT_ENCAP_VTAGS_PUSH_2
+};
+
+enum encap_l3 {
+ /** Set to disable any L3 encapsulation
+ * processing, default
+ */
+ CFA_BLD_ACT_ENCAP_L3_NONE = 0,
+ /** Set to enable L3 IPv4 encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_L3_IPV4 = 4,
+ /** Set to enable L3 IPv6 encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_L3_IPV6 = 5,
+ /** Set to enable L3 MPLS 8847 encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_L3_MPLS_8847 = 6,
+ /** Set to enable L3 MPLS 8848 encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_L3_MPLS_8848 = 7
+};
+
+enum encap_tunnel {
+ /** Set to disable Tunnel header encapsulation
+ * processing, default
+ */
+ CFA_BLD_ACT_ENCAP_TNL_NONE = 0,
+ /** Set to enable Tunnel Generic Full header
+ * encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_TNL_GENERIC_FULL,
+ /** Set to enable VXLAN header encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_TNL_VXLAN,
+ /** Set to enable NGE (VXLAN2) header encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_TNL_NGE,
+ /** Set to enable NVGRE header encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_TNL_NVGRE,
+ /** Set to enable GRE header encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_TNL_GRE,
+ /** Set to enable Generic header after Tunnel
+ * L4 encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_TNL_GENERIC_AFTER_TL4,
+ /** Set to enable Generic header after Tunnel
+ * encapsulation
+ */
+ CFA_BLD_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
+};
+
+enum source_rec_type {
+ /** Set to Source MAC Address
+ */
+ CFA_BLD_SOURCE_MAC = 0,
+ /** Set to Source MAC and IPv4 Addresses
+ */
+ CFA_BLD_SOURCE_MAC_IPV4 = 1,
+ /** Set to Source MAC and IPv6 Addresses
+ */
+ CFA_BLD_SOURCE_MAC_IPV6 = 2,
+};
+
+/**
+ * From CFA phase 7.0 onwards, setting the modify vector bit
+ * 'ACT_MODIFY_TUNNEL_MODIFY' requires corresponding data fields to be
+ * set. This enum defines the parameters that determine the
+ * layout of this associated data fields. This structure
+ * is not used for versions older than CFA Phase 7.0 and setting
+ * the 'ACT_MODIFY_TUNNEL_MODIFY' bit will just delete the internal tunnel
+ */
+enum tunnel_modify_mode {
+ /* No change to tunnel protocol */
+ CFA_BLD_ACT_MOD_TNL_NO_PROTO_CHANGE = 0,
+ /* 8-bit tunnel protocol change */
+ CFA_BLD_ACT_MOD_TNL_8B_PROTO_CHANGE = 1,
+ /* 16-bit tunnel protocol change */
+ CFA_BLD_ACT_MOD_TNL_16B_PROTO_CHANGE = 2,
+ CFA_BLD_ACT_MOD_TNL_MAX
+};
+
+/**
+ * Action object template structure
+ *
+ * Template structure presents data fields that are necessary to know
+ * at the beginning of Action Builder (AB) processing. Like before the
+ * AB compilation. One such example could be a template that is
+ * flexible in size (Encap Record) and the presence of these fields
+ * allows for determining the template size as well as where the
+ * fields are located in the record.
+ *
+ * The template may also present fields that are not made visible to
+ * the caller by way of the action fields.
+ *
+ * Template fields also allow for additional checking on user visible
+ * fields. One such example could be the encap pointer behavior on a
+ * CFA_BLD_ACT_OBJ_TYPE_ACT or CFA_BLD_ACT_OBJ_TYPE_ACT_SRAM.
+ */
+struct cfa_bld_action_template {
+ /** Action Object type
+ *
+ * Controls the type of the Action Template
+ */
+ enum action_type obj_type;
+
+ /** Action Control
+ *
+ * Controls the internals of the Action Template
+ *
+ * act is valid when:
+ * ((obj_type == CFA_BLD_ACT_OBJ_TYPE_FULL_ACT)
+ * ||
+ * (obj_type == CFA_BLD_ACT_OBJ_TYPE_COMPACT_ACT))
+ *
+ * Specifies whether each action is to be in-line or not.
+ */
+ struct {
+ /** Set to true to enable statistics
+ */
+ uint8_t stat_enable;
+ /** Set to true to enable statistics to be inlined
+ */
+ uint8_t stat_inline;
+ /** Set to true to enable statistics 1
+ */
+ uint8_t stat1_enable;
+ /** Set to true to enable statistics 1 to be inlined
+ */
+ uint8_t stat1_inline;
+ /** Set to true to enable encapsulation
+ */
+ uint8_t encap_enable;
+ /** Set to true to enable encapsulation to be inlined
+ */
+ uint8_t encap_inline;
+ /** Set to true to align the encap record to cache
+ * line
+ */
+ uint8_t encap_align;
+ /** Set to true to source
+ */
+ uint8_t source_enable;
+ /** Set to true to enable source to be inlined
+ */
+ uint8_t source_inline;
+ /** Set to true to enable modfication
+ */
+ uint8_t mod_enable;
+ /** Set to true to enable modify to be inlined
+ */
+ uint8_t mod_inline;
+ /** Set to true to enable subsequent MCGs
+ */
+ uint8_t mcg_subseq_enable;
+ } act;
+
+ /** Statistic Control
+ * Controls the type of statistic the template is describing
+ *
+ * stat is valid when:
+ * ((obj_type == CFA_BLD_ACT_OBJ_TYPE_FULL_ACT) ||
+ * (obj_type == CFA_BLD_ACT_OBJ_TYPE_COMPACT_ACT)) &&
+ * act.stat_enable || act.stat_inline)
+ */
+ struct {
+ enum stat_op op;
+ enum stat_type type;
+ } stat;
+
+ /** Encap Control
+ * Controls the type of encapsulation the template is
+ * describing
+ *
+ * encap is valid when:
+ * ((obj_type == CFA_BLD_ACT_OBJ_TYPE_FULL_ACT) ||
+ * (obj_type == CFA_BLD_ACT_OBJ_TYPE_COMPACT_ACT) &&
+ * act.encap_enable || act.encap_inline)
+ */
+ struct {
+ /** Set to true to enable L2 capability in the
+ * template
+ */
+ uint8_t l2_enable;
+ /** vtag controls the Encap Vector - VTAG Encoding, 4 bits
+ *
+ * <ul>
+ * <li> CFA_BLD_ACT_ENCAP_VTAGS_PUSH_0, default, no VLAN
+ * Tags applied
+ * <li> CFA_BLD_ACT_ENCAP_VTAGS_PUSH_1, adds capability to
+ * set 1 VLAN Tag. Action Template compile adds
+ * the following field to the action object
+ * TF_ER_VLAN1
+ * <li> CFA_BLD_ACT_ENCAP_VTAGS_PUSH_2, adds capability to
+ * set 2 VLAN Tags. Action Template compile adds
+ * the following fields to the action object
+ * TF_ER_VLAN1 and TF_ER_VLAN2
+ * </ul>
+ */
+ enum encap_vtag vtag;
+
+ /*
+ * The remaining fields are NOT supported when
+ * direction is RX and ((obj_type ==
+ * CFA_BLD_ACT_OBJ_TYPE_ACT) && act.encap_enable).
+ * cfa_bld_devops.act_compile_layout will perform the
+ * checking and skip remaining fields.
+ */
+ /** L3 Encap controls the Encap Vector - L3 Encoding,
+ * 3 bits. Defines the type of L3 Encapsulation the
+ * template is describing.
+ * <ul>
+ * <li> CFA_BLD_ACT_ENCAP_L3_NONE, default, no L3
+ * Encapsulation processing.
+ * <li> CFA_BLD_ACT_ENCAP_L3_IPV4, enables L3 IPv4
+ * Encapsulation.
+ * <li> CFA_BLD_ACT_ENCAP_L3_IPV6, enables L3 IPv6
+ * Encapsulation.
+ * <li> CFA_BLD_ACT_ENCAP_L3_MPLS_8847, enables L3 MPLS
+ * 8847 Encapsulation.
+ * <li> CFA_BLD_ACT_ENCAP_L3_MPLS_8848, enables L3 MPLS
+ * 8848 Encapsulation.
+ * </ul>
+ */
+ enum encap_l3 l3;
+
+#define CFA_BLD_ACT_ENCAP_MAX_MPLS_LABELS 8
+ /** 1-8 labels, valid when
+ * (l3 == CFA_BLD_ACT_ENCAP_L3_MPLS_8847) ||
+ * (l3 == CFA_BLD_ACT_ENCAP_L3_MPLS_8848)
+ *
+ * MAX number of MPLS Labels 8.
+ */
+ uint8_t l3_num_mpls_labels;
+
+ /** Set to true to enable L4 capability in the
+ * template.
+ *
+ * true adds TF_EN_UDP_SRC_PORT and
+ * TF_EN_UDP_DST_PORT to the template.
+ */
+ uint8_t l4_enable;
+
+ /** Tunnel Encap controls the Encap Vector - Tunnel
+ * Encap, 3 bits. Defines the type of Tunnel
+ * encapsulation the template is describing
+ * <ul>
+ * <li> CFA_BLD_ACT_ENCAP_TNL_NONE, default, no Tunnel
+ * Encapsulation processing.
+ * <li> CFA_BLD_ACT_ENCAP_TNL_GENERIC_FULL
+ * <li> CFA_BLD_ACT_ENCAP_TNL_VXLAN. NOTE: Expects
+ * l4_enable set to true;
+ * <li> CFA_BLD_ACT_ENCAP_TNL_NGE. NOTE: Expects l4_enable
+ * set to true;
+ * <li> CFA_BLD_ACT_ENCAP_TNL_NVGRE. NOTE: only valid if
+ * l4_enable set to false.
+ * <li> CFA_BLD_ACT_ENCAP_TNL_GRE.NOTE: only valid if
+ * l4_enable set to false.
+ * <li> CFA_BLD_ACT_ENCAP_TNL_GENERIC_AFTER_TL4
+ * <li> CFA_BLD_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
+ * </ul>
+ */
+ enum encap_tunnel tnl;
+
+#define CFA_BLD_ACT_ENCAP_MAX_TUNNEL_GENERIC_SIZE 128
+ /** Number of bytes of generic tunnel header,
+ * valid when
+ * (tnl == CFA_BLD_ACT_ENCAP_TNL_GENERIC_FULL) ||
+ * (tnl == CFA_BLD_ACT_ENCAP_TNL_GENERIC_AFTER_TL4) ||
+ * (tnl == CFA_BLD_ACT_ENCAP_TNL_GENERIC_AFTER_TNL)
+ */
+ uint8_t tnl_generic_size;
+
+#define CFA_BLD_ACT_ENCAP_MAX_OPLEN 15
+ /** Number of 32b words of nge options,
+ * valid when
+ * (tnl == CFA_BLD_ACT_ENCAP_TNL_NGE)
+ */
+ uint8_t tnl_nge_op_len;
+
+ /** Set to true to enable SPDNIC tunnel
+ * template,
+ * valid when
+ * (tnl == CFA_BLD_ACT_ENCAP_TNL_GENERIC_FULL)
+ */
+ uint8_t spdnic_enable;
+
+ /** SPDNIC flags field,
+ * valid when
+ * (tnl == CFA_BLD_ACT_ENCAP_TNL_GENERIC_FULL)
+ */
+ uint8_t tnl_spdnic_flags;
+
+ /** Set to true to enable MAC/VLAN/IP/TNL overrides in the
+ * template
+ */
+ bool encap_override;
+ /* Currently not planned */
+ /* Custom Header */
+ /* uint8_t custom_enable; */
+ } encap;
+
+ /** Modify Control
+ *
+ * Controls the type of the Modify Action the template is
+ * describing
+ *
+ * modify is valid when:
+ * ((obj_type == CFA_BLD_ACT_OBJ_TYPE_FULL_ACT) ||
+ * (obj_type == CFA_BLD_ACT_OBJ_TYPE_COMPACT_ACT) &&
+ * act.modify_enable || act.modify_inline)
+ */
+/** Set to enable Modify of Metadata
+ */
+#define CFA_BLD_ACT_MODIFY_META 0x1
+/** Set to enable Delete of Outer VLAN
+ */
+#define CFA_BLD_ACT_MODIFY_DEL_OVLAN 0x2
+/** Set to enable Delete of Inner VLAN
+ */
+#define CFA_BLD_ACT_MODIFY_DEL_IVLAN 0x4
+/** Set to enable Replace or Add of Outer VLAN
+ */
+#define CFA_BLD_ACT_MODIFY_REPL_ADD_OVLAN 0x8
+/** Set to enable Replace or Add of Inner VLAN
+ */
+#define CFA_BLD_ACT_MODIFY_REPL_ADD_IVLAN 0x10
+/** Set to enable Modify of TTL
+ */
+#define CFA_BLD_ACT_MODIFY_TTL_UPDATE 0x20
+/** Set to enable delete of INT Tunnel
+ */
+#define CFA_BLD_ACT_MODIFY_DEL_INT_TNL 0x40
+/** For phase 7.0 this bit can be used to modify the tunnel
+ * protocol in addition to deleting internal or outer tunnel
+ */
+#define CFA_BLD_ACT_MODIFY_TUNNEL_MODIFY CFA_BLD_ACT_MODIFY_DEL_INT_TNL
+/** Set to enable Modify of Field
+ */
+#define CFA_BLD_ACT_MODIFY_FIELD 0x80
+/** Set to enable Modify of Destination MAC
+ */
+#define CFA_BLD_ACT_MODIFY_DMAC 0x100
+/** Set to enable Modify of Source MAC
+ */
+#define CFA_BLD_ACT_MODIFY_SMAC 0x200
+/** Set to enable Modify of Source IPv6 Address
+ */
+#define CFA_BLD_ACT_MODIFY_SRC_IPV6 0x400
+/** Set to enable Modify of Destination IPv6 Address
+ */
+#define CFA_BLD_ACT_MODIFY_DST_IPV6 0x800
+/** Set to enable Modify of Source IPv4 Address
+ */
+#define CFA_BLD_ACT_MODIFY_SRC_IPV4 0x1000
+/** Set to enable Modify of Destination IPv4 Address
+ */
+#define CFA_BLD_ACT_MODIFY_DST_IPV4 0x2000
+/** Set to enable Modify of L4 Source Port
+ */
+#define CFA_BLD_ACT_MODIFY_SRC_PORT 0x4000
+/** Set to enable Modify of L4 Destination Port
+ */
+#define CFA_BLD_ACT_MODIFY_DST_PORT 0x8000
+ uint16_t modify;
+
+/** Set to enable Modify of KID
+ */
+#define CFA_BLD_ACT_MODIFY_FIELD_KID 0x1
+
+ /* Valid for phase 7.0 or higher */
+ uint16_t field_modify;
+
+ /* Valid for phase 7.0 or higher */
+ enum tunnel_modify_mode tnl_mod_mode;
+
+ /** Source Control
+ *
+ * Controls the type of the Source Action the template is
+ * describing
+ *
+ * source is valid when:
+ * ((obj_type == CFA_BLD_ACT_OBJ_TYPE_FULL_ACT) ||
+ * (obj_type == CFA_BLD_ACT_OBJ_TYPE_COMPACT_ACT) &&
+ * act.source_enable || act.source_inline)
+ */
+ enum source_rec_type source;
+};
+
+/**
+ * Key template consists of key fields that can be enabled/disabled
+ * individually.
+ */
+struct cfa_key_template {
+ /** [in] Identify if the key template is for TCAM. If false, the
+ * key template is for EM. This field is mandantory for device that
+ * only support fix key formats.
+ */
+ bool is_wc_tcam_key;
+ /** [in] Identify if the key template will be use for IPv6 Keys.
+ *
+ * Note: This is important for THOR2 as the field length for the FlowId
+ * is dependent on the L3 flow type. For THOR2 for IPv4 Keys, the Flow
+ * Id field is 16 bits, for all other types (IPv6, ARP, PTP, EAP, RoCE,
+ * FCoE, UPAR), the Flow Id field length is 20 bits.
+ */
+ bool is_ipv6_key;
+ /** [in] key field enable field array, set 1 to the corresponding
+ * field enable to make a field valid
+ */
+ uint8_t field_en[CFA_V3_KEY_MAX_FIELD_CNT];
+};
+
+/**
+ * Action template consists of action fields that can be enabled/disabled
+ * individually.
+ */
+struct cfa_action_template {
+ /** [in] CFA version for the action template */
+ enum cfa_ver hw_ver;
+ /** [in] action field enable field array, set 1 to the corresponding
+ * field enable to make a field valid
+ */
+ uint8_t data[CFA_V3_ACT_MAX_TEMPLATE_SZ];
+};
+
+/**@}*/
+
+/**@}*/
+
+#endif /* _CFA_BLD_H_ */
new file mode 100644
@@ -0,0 +1,297 @@
+/****************************************************************************
+ * Copyright(c) 2021 - 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_devops.h
+ *
+ * @brief CFA Builder devops interface for host applications
+ */
+
+#ifndef _CFA_BLD_DEVOPS_H_
+#define _CFA_BLD_DEVOPS_H_
+
+#include <stdio.h>
+
+#include "cfa_bld.h"
+#include "cfa_bld_defs.h"
+
+struct cfa_bld_devops;
+
+/**
+ * @addtogroup CFA_BLD CFA Builder Library
+ * \ingroup CFA_V3
+ * @{
+ */
+
+/**
+ * CFA device information
+ */
+struct cfa_bld_devinfo {
+ /** [out] CFA Builder operations function pointer table */
+ const struct cfa_bld_devops *devops;
+};
+
+/**
+ * @name CFA_BLD CFA Builder Host Device OPS API
+ * CFA builder host specific API used by host CFA application to bind
+ * to different CFA devices and access device by using device OPS.
+ */
+
+/**@{*/
+/** CFA bind builder API
+ *
+ * This API retrieves the CFA global device configuration. This API should be
+ * called first before doing any operations to CFA through API. The returned
+ * global device information should be referenced throughout the lifetime of
+ * the CFA application.
+ *
+ * @param[in] hw_ver
+ * hardware version of the CFA
+ *
+ * @param[out] dev_info
+ * CFA global device information
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_bld_bind(enum cfa_ver hw_ver, struct cfa_bld_devinfo *dev_info);
+
+/** CFA device specific function hooks structure
+ *
+ * The following device hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer. The pupose of these hooks
+ * to support CFA device operations for different device variants.
+ */
+struct cfa_bld_devops {
+ /** Get CFA layout for hw fix format tables
+ *
+ * This API takes returns the CFA layout for a given resource type
+ * resource subtype and CFA direction.
+ *
+ * @param[in] rtype
+ * CFA HW resource type. Valid values are CFA_RTYPE_XXX
+ *
+ * @param[in] rsubtype
+ * CFA HW resource sub type for the given resource type 'rtype'
+ * Valid values are CFA_RSUBTYPE_XXX_YYY, where XXX is the resource
+ * type
+ *
+ * @param[in] dir
+ * CFA direction. RX/TX. Note that the returned layout is different
+ * for RX and TX, only for VEB and VSPT tables. For all tables, the
+ * layout is the same for both directions.
+ *
+ * @param[out] layout
+ * Pointer to the table layout to be returned
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ * @note example usage: To get L2 context TCAM table, use
+ * struct cfa_layout *l2ctxt_tcam_layout;
+ * devops->cfa_bld_get_table_layout(CFA_RTYPE_TCAM,
+ * CFA_RSUBTYPE_TCAM_L2CTX,
+ * CFA_TX,
+ * &l2ctxt_tcam_layout);
+ */
+ int (*cfa_bld_get_table_layout)(enum cfa_resource_type rtype,
+ uint8_t rsubtype, enum cfa_dir dir,
+ struct cfa_layout **layout);
+
+ /** Get CFA layout for HW remap tables
+ *
+ * This API takes returns the CFA remap layout for a given tcam
+ * resource sub type, remap type and CFA direction.
+ *
+ * @param[in] st
+ * CFA TCAM table sub types. Valid values are CFA_RSUBTYPE_TCAM_XXX
+ *
+ * @param[in] rmp_tt
+ * CFA Remap table type. See enum cfa_remap_tbl_type
+ *
+ * @param[in] dir
+ * CFA direction. RX/TX.
+ *
+ * @param[out] layout
+ * Pointer to the remap table layout to be returned
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ * @note example usage: To get Profiler TCAM Remap bypass table, use
+ * struct cfa_layout *prof_tcam_rmp_byp_layout;
+ * devops->cfa_bld_get_remap_table_layout(CFA_RSUBTYPE_TCAM_PROF_TCAM,
+ * CFA_REMAP_TBL_TYPE_BYPASS,
+ * CFA_TX,
+ * &prof_tcam_rmp_byp_layout);
+ */
+ int (*cfa_bld_get_remap_table_layout)(enum cfa_resource_subtype_tcam st,
+ enum cfa_remap_tbl_type rmp_tt,
+ enum cfa_dir dir,
+ struct cfa_layout **layout);
+
+ /** build key layout
+ *
+ * This API takes the user provided key template as input and
+ * compiles it into a key layout supported by the hardware.
+ * It is intended that an application will only compile a
+ * key layout once for the provided key template and then
+ * reference the key layout throughout the lifetime of that
+ * key template.
+ *
+ * @param[in] key_template
+ * A pointer to the key template
+ *
+ * @param[in,out] layout
+ * A pointer of the key layout
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+ int (*cfa_bld_key_compile_layout)(struct cfa_key_template *t,
+ struct cfa_key_layout *l);
+
+ /** Print formatted key object
+ *
+ * This API prints in human readable form the data in a key
+ * object based upon the key layout provided. It also provides the
+ * option to provide a raw byte output.
+ *
+ * @param[in] stream
+ * Generally set to stdout (stderr possible)
+ *
+ * @param[in] key_obj
+ * A pointer to the key_obj to be displayed
+ *
+ * @param[in] key_layout
+ * A pointer to the key_layout indicating the key format
+ *
+ * @param[in] decode
+ * If set, decode the fields, if clear provide raw byte output.
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+ int (*cfa_bld_key_print_obj)(FILE *stream, struct cfa_key_obj *key_obj,
+ struct cfa_key_layout *key_layout,
+ bool decode);
+
+ /** Transform key data with device specific control information
+ *
+ * This API inserts or strips device specific control information
+ * to/from a key object.
+ *
+ * @param[in] op
+ * specify key transform operations.
+ *
+ * @param[in] key_obj
+ * A pointer of the key object to be transformed
+ *
+ * @param[out] key_obj_out
+ * A pointer of the transformed key data object
+ * The updated bitlen for the transformed key is returned
+ * in the data_len_bits field of this object.
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+ int (*cfa_bld_key_transform)(enum cfa_key_ctrlops op,
+ struct cfa_key_obj *key_obj,
+ struct cfa_key_obj *key_obj_out);
+
+ /** build action layout
+ *
+ * This API takes the user provided action template as input and
+ * compiles it into an action layout supported by the hardware.
+ * It is intended that an application will only compile an
+ * action layout once for the provided action template and then
+ * reference the action layout throughout the lifetime of that
+ * action template.
+ *
+ * @param[in] act_template
+ * A pointer to the action template
+ *
+ * @param[in,out] layout
+ * A pointer of the action layout
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+ int (*cfa_bld_act_compile_layout)(struct cfa_action_template *t,
+ struct cfa_action_layout *l);
+
+ /** initialize action private fields
+ *
+ * This API provides the functionality to zero out the action
+ * object data fields and set pre-initialized private fields
+ * based on the layout. Any action object must be initialized
+ * using this API before any put and get APIs can be executed
+ * for an action object.
+ *
+ * @param[in,out] act_obj
+ * A pointer of the action object to be initialized
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+ int (*cfa_bld_action_init_obj)(struct cfa_action_obj *act_obj);
+
+ /** compute inline action object pointers/offsets
+ *
+ * This API provides the functionality to compute and set
+ * pointers/offset to the inlined actions in an action record.
+ * This API is applicable only to the action object type that
+ * support inline actions.
+ *
+ * @param[in,out] act_obj
+ * A pointer of the action object to be initialized
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+ int (*cfa_bld_action_compute_ptr)(struct cfa_action_obj *obj);
+
+ /** Print action object
+ *
+ * This API presents the action object in human readable
+ * format.
+ *
+ *
+ * @param[in] stream
+ * Generally set to stdout (stderr possible)
+ *
+ * @param[in,out] act_obj
+ * A pointer of the action object to be displayed
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+ int (*cfa_bld_action_print_obj)(FILE *stream,
+ struct cfa_action_obj *obj,
+ bool decode);
+
+ /** Print field object
+ *
+ * This API prints out the raw field output
+ *
+ * @param[in] fld_obj
+ * A pointer fld_obj to be displayed
+ *
+ * @param[in] fld_layout
+ * A pointer to the cfa_layout indicating the field format
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+ int (*cfa_bld_fld_print_obj)(uint64_t *fld_obj,
+ struct cfa_layout *layout);
+};
+
+/**@}*/
+
+/**@}*/
+#endif /* _CFA_BLD_DEVOPS_H_ */
new file mode 100644
@@ -0,0 +1,1542 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_field_ids.h
+ *
+ * @brief Enumerations definitions for CFA HW table fields, Action record
+ * fields and Lookup Key (EM/WC-TCAM) fields.
+ *
+ * This file is independent of the CFA HW version and defines the
+ * superset of the enumeration values for table, action and EM/WC-TCAM
+ * bit fields. This file is meant for use by host applications that
+ * support multiple devices with different CFA Hw versions.
+ *
+ * These enum definitions should be updated whenever any of the
+ * definitions in the auto-generated header 'cfa_bld_pxx_field_ids.h'
+ * file gets any new enum values.
+ */
+
+#ifndef _CFA_BLD_FIELD_IDS_H_
+#define _CFA_BLD_FIELD_IDS_H_
+
+/**
+ * Lookup Field Range Check Range Memory Fields:
+ */
+enum cfa_bld_lkup_frc_profile_flds {
+ CFA_BLD_LKUP_FRC_PROFILE_FIELD_SEL_1_FLD = 0,
+ CFA_BLD_LKUP_FRC_PROFILE_RANGE_CHECK_1_FLD = 1,
+ CFA_BLD_LKUP_FRC_PROFILE_FIELD_SEL_0_FLD = 2,
+ CFA_BLD_LKUP_FRC_PROFILE_RANGE_CHECK_0_FLD = 3,
+ CFA_BLD_LKUP_FRC_PROFILE_MAX_FLD
+};
+
+/**
+ * Lookup Connection Tracking State Memory Fields:
+ */
+enum cfa_bld_lkup_ct_state_flds {
+ CFA_BLD_LKUP_CT_STATE_NOTIFY_FLD = 0,
+ CFA_BLD_LKUP_CT_STATE_NOTIFY_STATE_FLD = 1,
+ CFA_BLD_LKUP_CT_STATE_ACTION_FLD = 2,
+ CFA_BLD_LKUP_CT_STATE_TIMER_SELECT_FLD = 3,
+ CFA_BLD_LKUP_CT_STATE_TIMER_PRELOAD_FLD = 4,
+ CFA_BLD_LKUP_CT_STATE_MAX_FLD
+};
+
+/**
+ * Lookup Connection Tracking State Machine Rule Memory Fields:
+ */
+enum cfa_bld_lkup_ct_rule_flds {
+ CFA_BLD_LKUP_CT_RULE_VALID_FLD = 0,
+ CFA_BLD_LKUP_CT_RULE_MASK_FLD = 1,
+ CFA_BLD_LKUP_CT_RULE_PKT_NOT_BG_FLD = 2,
+ CFA_BLD_LKUP_CT_RULE_STATE_FLD = 3,
+ CFA_BLD_LKUP_CT_RULE_TCP_FLAGS_FLD = 4,
+ CFA_BLD_LKUP_CT_RULE_PROT_IS_TCP_FLD = 5,
+ CFA_BLD_LKUP_CT_RULE_MSB_UPDT_FLD = 6,
+ CFA_BLD_LKUP_CT_RULE_FLAGS_FAILED_FLD = 7,
+ CFA_BLD_LKUP_CT_RULE_WIN_FAILED_FLD = 8,
+ CFA_BLD_LKUP_CT_RULE_MAX_FLD
+};
+
+/**
+ * Lookup Connection Tracking State Machine Rule Record Memory Fields:
+ */
+enum cfa_bld_lkup_ct_rule_record_flds {
+ CFA_BLD_LKUP_CT_RULE_RECORD_ACTION_FLD = 0,
+ CFA_BLD_LKUP_CT_RULE_RECORD_NEXT_STATE_FLD = 1,
+ CFA_BLD_LKUP_CT_RULE_RECORD_SEND_FLD = 2,
+ CFA_BLD_LKUP_CT_RULE_RECORD_MAX_FLD
+};
+
+/**
+ * VEB Destination Bitmap Remap Table. Fields:
+ */
+enum cfa_bld_act_veb_rmp_flds {
+ CFA_BLD_ACT_VEB_RMP_MODE_FLD = 0,
+ CFA_BLD_ACT_VEB_RMP_ENABLE_FLD = 1,
+ CFA_BLD_ACT_VEB_RMP_BITMAP_FLD = 2,
+ CFA_BLD_ACT_VEB_RMP_MAX_FLD
+};
+
+/**
+ * Lookup Field Range Check Range Memory Fields:
+ */
+enum cfa_bld_lkup_frc_range_flds {
+ CFA_BLD_LKUP_FRC_RANGE_RANGE_LO_FLD = 0,
+ CFA_BLD_LKUP_FRC_RANGE_RANGE_HI_FLD = 1,
+ CFA_BLD_LKUP_FRC_RANGE_MAX_FLD
+};
+
+/**
+ * L2 Context TCAM. Fields:
+ */
+enum cfa_bld_prof_l2_ctxt_tcam_flds {
+ CFA_BLD_PROF_L2_CTXT_TCAM_VALID_FLD = 0,
+ CFA_BLD_PROF_L2_CTXT_TCAM_SPARE_FLD = 1,
+ CFA_BLD_PROF_L2_CTXT_TCAM_MPASS_CNT_FLD = 2,
+ CFA_BLD_PROF_L2_CTXT_TCAM_RCYC_FLD = 3,
+ CFA_BLD_PROF_L2_CTXT_TCAM_LOOPBACK_FLD = 4,
+ CFA_BLD_PROF_L2_CTXT_TCAM_SPIF_FLD = 5,
+ CFA_BLD_PROF_L2_CTXT_TCAM_PARIF_FLD = 6,
+ CFA_BLD_PROF_L2_CTXT_TCAM_SVIF_FLD = 7,
+ CFA_BLD_PROF_L2_CTXT_TCAM_METADATA_FLD = 8,
+ CFA_BLD_PROF_L2_CTXT_TCAM_L2_FUNC_FLD = 9,
+ CFA_BLD_PROF_L2_CTXT_TCAM_ROCE_FLD = 10,
+ CFA_BLD_PROF_L2_CTXT_TCAM_PURE_LLC_FLD = 11,
+ CFA_BLD_PROF_L2_CTXT_TCAM_OT_HDR_TYPE_FLD = 12,
+ CFA_BLD_PROF_L2_CTXT_TCAM_T_HDR_TYPE_FLD = 13,
+ CFA_BLD_PROF_L2_CTXT_TCAM_ID_CTXT_FLD = 14,
+ CFA_BLD_PROF_L2_CTXT_TCAM_MAC0_FLD = 15,
+ CFA_BLD_PROF_L2_CTXT_TCAM_MAC1_FLD = 16,
+ CFA_BLD_PROF_L2_CTXT_TCAM_VTAG_PRESENT_FLD = 17,
+ CFA_BLD_PROF_L2_CTXT_TCAM_TWO_VTAGS_FLD = 18,
+ CFA_BLD_PROF_L2_CTXT_TCAM_OVLAN_VID_FLD = 19,
+ CFA_BLD_PROF_L2_CTXT_TCAM_OVLAN_TPID_SEL_FLD = 20,
+ CFA_BLD_PROF_L2_CTXT_TCAM_IVLAN_VID_FLD = 21,
+ CFA_BLD_PROF_L2_CTXT_TCAM_IVLAN_TPID_SEL_FLD = 22,
+ CFA_BLD_PROF_L2_CTXT_TCAM_ETYPE_FLD = 23,
+ CFA_BLD_PROF_L2_CTXT_TCAM_MAX_FLD
+};
+
+/**
+ * Profiler Profile Lookup TCAM Fields:
+ */
+enum cfa_bld_prof_profile_tcam_flds {
+ CFA_BLD_PROF_PROFILE_TCAM_VALID_FLD = 0,
+ CFA_BLD_PROF_PROFILE_TCAM_SPARE_FLD = 1,
+ CFA_BLD_PROF_PROFILE_TCAM_LOOPBACK_FLD = 2,
+ CFA_BLD_PROF_PROFILE_TCAM_PKT_TYPE_FLD = 3,
+ CFA_BLD_PROF_PROFILE_TCAM_RCYC_FLD = 4,
+ CFA_BLD_PROF_PROFILE_TCAM_METADATA_FLD = 5,
+ CFA_BLD_PROF_PROFILE_TCAM_AGG_ERROR_FLD = 6,
+ CFA_BLD_PROF_PROFILE_TCAM_L2_FUNC_FLD = 7,
+ CFA_BLD_PROF_PROFILE_TCAM_PROF_FUNC_FLD = 8,
+ CFA_BLD_PROF_PROFILE_TCAM_HREC_NEXT_FLD = 9,
+ CFA_BLD_PROF_PROFILE_TCAM_INT_HDR_TYPE_FLD = 10,
+ CFA_BLD_PROF_PROFILE_TCAM_INT_HDR_GROUP_FLD = 11,
+ CFA_BLD_PROF_PROFILE_TCAM_INT_IFA_TAIL_FLD = 12,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL2_HDR_VALID_FLD = 13,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL2_HDR_TYPE_FLD = 14,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL2_UC_MC_BC_FLD = 15,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL2_VTAG_PRESENT_FLD = 16,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL2_TWO_VTAGS_FLD = 17,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL3_HDR_VALID_FLD = 18,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL3_HDR_ERROR_FLD = 19,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL3_HDR_TYPE_FLD = 20,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL3_HDR_ISIP_FLD = 21,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL4_HDR_VALID_FLD = 22,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL4_HDR_ERROR_FLD = 23,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL4_HDR_TYPE_FLD = 24,
+ CFA_BLD_PROF_PROFILE_TCAM_OTL4_HDR_IS_UDP_TCP_FLD = 25,
+ CFA_BLD_PROF_PROFILE_TCAM_OT_HDR_VALID_FLD = 26,
+ CFA_BLD_PROF_PROFILE_TCAM_OT_HDR_ERROR_FLD = 27,
+ CFA_BLD_PROF_PROFILE_TCAM_OT_HDR_TYPE_FLD = 28,
+ CFA_BLD_PROF_PROFILE_TCAM_OT_HDR_FLAGS_FLD = 29,
+ CFA_BLD_PROF_PROFILE_TCAM_TL2_HDR_VALID_FLD = 30,
+ CFA_BLD_PROF_PROFILE_TCAM_TL2_HDR_TYPE_FLD = 31,
+ CFA_BLD_PROF_PROFILE_TCAM_TL2_UC_MC_BC_FLD = 32,
+ CFA_BLD_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_FLD = 33,
+ CFA_BLD_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_FLD = 34,
+ CFA_BLD_PROF_PROFILE_TCAM_TL3_HDR_VALID_FLD = 35,
+ CFA_BLD_PROF_PROFILE_TCAM_TL3_HDR_ERROR_FLD = 36,
+ CFA_BLD_PROF_PROFILE_TCAM_TL3_HDR_TYPE_FLD = 37,
+ CFA_BLD_PROF_PROFILE_TCAM_TL3_HDR_ISIP_FLD = 38,
+ CFA_BLD_PROF_PROFILE_TCAM_TL4_HDR_VALID_FLD = 39,
+ CFA_BLD_PROF_PROFILE_TCAM_TL4_HDR_ERROR_FLD = 40,
+ CFA_BLD_PROF_PROFILE_TCAM_TL4_HDR_TYPE_FLD = 41,
+ CFA_BLD_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_FLD = 42,
+ CFA_BLD_PROF_PROFILE_TCAM_TUN_HDR_VALID_FLD = 43,
+ CFA_BLD_PROF_PROFILE_TCAM_TUN_HDR_ERROR_FLD = 44,
+ CFA_BLD_PROF_PROFILE_TCAM_TUN_HDR_TYPE_FLD = 45,
+ CFA_BLD_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_FLD = 46,
+ CFA_BLD_PROF_PROFILE_TCAM_L2_HDR_VALID_FLD = 47,
+ CFA_BLD_PROF_PROFILE_TCAM_L2_HDR_ERROR_FLD = 48,
+ CFA_BLD_PROF_PROFILE_TCAM_L2_HDR_TYPE_FLD = 49,
+ CFA_BLD_PROF_PROFILE_TCAM_L2_UC_MC_BC_FLD = 50,
+ CFA_BLD_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_FLD = 51,
+ CFA_BLD_PROF_PROFILE_TCAM_L2_TWO_VTAGS_FLD = 52,
+ CFA_BLD_PROF_PROFILE_TCAM_L3_HDR_VALID_FLD = 53,
+ CFA_BLD_PROF_PROFILE_TCAM_L3_HDR_ERROR_FLD = 54,
+ CFA_BLD_PROF_PROFILE_TCAM_L3_HDR_TYPE_FLD = 55,
+ CFA_BLD_PROF_PROFILE_TCAM_L3_HDR_ISIP_FLD = 56,
+ CFA_BLD_PROF_PROFILE_TCAM_L3_PROT_FLD = 57,
+ CFA_BLD_PROF_PROFILE_TCAM_L4_HDR_VALID_FLD = 58,
+ CFA_BLD_PROF_PROFILE_TCAM_L4_HDR_ERROR_FLD = 59,
+ CFA_BLD_PROF_PROFILE_TCAM_L4_HDR_TYPE_FLD = 60,
+ CFA_BLD_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_FLD = 61,
+ CFA_BLD_PROF_PROFILE_TCAM_L4_HDR_SUBTYPE_FLD = 62,
+ CFA_BLD_PROF_PROFILE_TCAM_L4_HDR_FLAGS_FLD = 63,
+ CFA_BLD_PROF_PROFILE_TCAM_L4_DCN_PRESENT_FLD = 64,
+ CFA_BLD_PROF_PROFILE_TCAM_MAX_FLD
+};
+
+/**
+ * Action VEB TCAM. TX Fields (VEB Remap Mode):
+ */
+enum cfa_bld_act_veb_tcam_tx_flds {
+ CFA_BLD_ACT_VEB_TCAM_TX_VALID_FLD = 0,
+ CFA_BLD_ACT_VEB_TCAM_TX_PARIF_IN_FLD = 1,
+ CFA_BLD_ACT_VEB_TCAM_TX_NUM_VTAGS_FLD = 2,
+ CFA_BLD_ACT_VEB_TCAM_TX_DMAC_FLD = 3,
+ CFA_BLD_ACT_VEB_TCAM_TX_OVID_FLD = 4,
+ CFA_BLD_ACT_VEB_TCAM_TX_IVID_FLD = 5,
+ CFA_BLD_ACT_VEB_TCAM_TX_MAX_FLD
+};
+
+/**
+ * RX Fields (Source Knockout Mode):
+ */
+enum cfa_bld_act_veb_tcam_rx_flds {
+ CFA_BLD_ACT_VEB_TCAM_RX_VALID_FLD = 0,
+ CFA_BLD_ACT_VEB_TCAM_RX_SPARE_FLD = 1,
+ CFA_BLD_ACT_VEB_TCAM_RX_PADDING_FLD = 2,
+ CFA_BLD_ACT_VEB_TCAM_RX_UNICAST_FLD = 3,
+ CFA_BLD_ACT_VEB_TCAM_RX_MULTICAST_FLD = 4,
+ CFA_BLD_ACT_VEB_TCAM_RX_BROADCAST_FLD = 5,
+ CFA_BLD_ACT_VEB_TCAM_RX_PFID_FLD = 6,
+ CFA_BLD_ACT_VEB_TCAM_RX_VFID_FLD = 7,
+ CFA_BLD_ACT_VEB_TCAM_RX_SMAC_FLD = 8,
+ CFA_BLD_ACT_VEB_TCAM_RX_MAX_FLD
+};
+
+/**
+ * Action Feature Chaining TCAM.
+ */
+enum cfa_bld_act_fc_tcam_flds {
+ CFA_BLD_ACT_FC_TCAM_FC_VALID_FLD = 0,
+ CFA_BLD_ACT_FC_TCAM_FC_RSVD_FLD = 1,
+ CFA_BLD_ACT_FC_TCAM_FC_METADATA_FLD = 2,
+ CFA_BLD_ACT_FC_TCAM_MAX_FLD
+};
+
+/**
+ * Feature Chaining TCAM Remap Table Fields:
+ */
+enum cfa_bld_act_fc_rmp_dr_flds {
+ CFA_BLD_ACT_FC_RMP_DR_METADATA_FLD = 0,
+ CFA_BLD_ACT_FC_RMP_DR_METAMASK_FLD = 1,
+ CFA_BLD_ACT_FC_RMP_DR_L2_FUNC_FLD = 2,
+ CFA_BLD_ACT_FC_RMP_DR_RSVD_FLD = 3,
+ CFA_BLD_ACT_FC_RMP_DR_MAX_FLD
+};
+
+/**
+ * Profile Input Lookup Table Memory Fields:
+ */
+enum cfa_bld_prof_ilt_dr_flds {
+ CFA_BLD_PROF_ILT_DR_ILT_META_EN_FLD = 0,
+ CFA_BLD_PROF_ILT_DR_META_PROF_FLD = 1,
+ CFA_BLD_PROF_ILT_DR_METADATA_FLD = 2,
+ CFA_BLD_PROF_ILT_DR_PARIF_FLD = 3,
+ CFA_BLD_PROF_ILT_DR_L2_FUNC_FLD = 4,
+ CFA_BLD_PROF_ILT_DR_EN_BD_META_FLD = 5,
+ CFA_BLD_PROF_ILT_DR_EN_BD_ACTION_FLD = 6,
+ CFA_BLD_PROF_ILT_DR_EN_ILT_DEST_FLD = 7,
+ CFA_BLD_PROF_ILT_DR_ILT_FWD_OP_FLD = 8,
+ CFA_BLD_PROF_ILT_DR_ILT_ACT_HINT_FLD = 9,
+ CFA_BLD_PROF_ILT_DR_ILT_SCOPE_FLD = 10,
+ CFA_BLD_PROF_ILT_DR_ILT_ACT_REC_PTR_FLD = 11,
+ CFA_BLD_PROF_ILT_DR_ILT_DESTINATION_FLD = 12,
+ CFA_BLD_PROF_ILT_DR_MAX_FLD
+};
+
+/**
+ * Profile Lookup TCAM Remap Table Fields:
+ */
+enum cfa_bld_prof_profile_rmp_dr_flds {
+ CFA_BLD_PROF_PROFILE_RMP_DR_PL_BYP_LKUP_EN_FLD = 0,
+ CFA_BLD_PROF_PROFILE_RMP_DR_EM_SEARCH_EN_FLD = 1,
+ CFA_BLD_PROF_PROFILE_RMP_DR_EM_PROFILE_ID_FLD = 2,
+ CFA_BLD_PROF_PROFILE_RMP_DR_EM_KEY_ID_FLD = 3,
+ CFA_BLD_PROF_PROFILE_RMP_DR_EM_SCOPE_FLD = 4,
+ CFA_BLD_PROF_PROFILE_RMP_DR_TCAM_SEARCH_EN_FLD = 5,
+ CFA_BLD_PROF_PROFILE_RMP_DR_TCAM_PROFILE_ID_FLD = 6,
+ CFA_BLD_PROF_PROFILE_RMP_DR_TCAM_KEY_ID_FLD = 7,
+ CFA_BLD_PROF_PROFILE_RMP_DR_TCAM_SCOPE_FLD = 8,
+ CFA_BLD_PROF_PROFILE_RMP_DR_MAX_FLD
+};
+
+/**
+ * PROF_PROFILE_RMP_DR_BYP
+ */
+enum cfa_bld_prof_profile_rmp_dr_byp_flds {
+ CFA_BLD_PROF_PROFILE_RMP_DR_BYP_PL_BYP_LKUP_EN_FLD = 0,
+ CFA_BLD_PROF_PROFILE_RMP_DR_BYP_RESERVED_FLD = 1,
+ CFA_BLD_PROF_PROFILE_RMP_DR_BYP_BYPASS_OP_FLD = 2,
+ CFA_BLD_PROF_PROFILE_RMP_DR_BYP_PL_ACT_HINT_FLD = 3,
+ CFA_BLD_PROF_PROFILE_RMP_DR_BYP_PL_SCOPE_FLD = 4,
+ CFA_BLD_PROF_PROFILE_RMP_DR_BYP_PL_ACT_REC_PTR_FLD = 5,
+ CFA_BLD_PROF_PROFILE_RMP_DR_BYP_MAX_FLD
+};
+
+/**
+ * VNIC-SVIF Properties Table Fields: TX SVIF Properties Table
+ */
+enum cfa_bld_act_vspt_dr_tx_flds {
+ CFA_BLD_ACT_VSPT_DR_TX_TPID_AS_CTL_FLD = 0,
+ CFA_BLD_ACT_VSPT_DR_TX_ALWD_TPID_FLD = 1,
+ CFA_BLD_ACT_VSPT_DR_TX_DFLT_TPID_FLD = 2,
+ CFA_BLD_ACT_VSPT_DR_TX_PRI_AS_CTL_FLD = 3,
+ CFA_BLD_ACT_VSPT_DR_TX_ALWD_PRI_FLD = 4,
+ CFA_BLD_ACT_VSPT_DR_TX_DFLT_PRI_FLD = 5,
+ CFA_BLD_ACT_VSPT_DR_TX_MIR_FLD = 6,
+ CFA_BLD_ACT_VSPT_DR_TX_MAX_FLD
+};
+
+/**
+ * RX VNIC Properties Table
+ */
+enum cfa_bld_act_vspt_dr_rx_flds {
+ CFA_BLD_ACT_VSPT_DR_RX_RSVD_FLD = 0,
+ CFA_BLD_ACT_VSPT_DR_RX_METAFMT_FLD = 1,
+ CFA_BLD_ACT_VSPT_DR_RX_FID_FLD = 2,
+ CFA_BLD_ACT_VSPT_DR_RX_MIR_FLD = 3,
+ CFA_BLD_ACT_VSPT_DR_RX_MAX_FLD
+};
+
+/**
+ * LAG ID Balance Table Fields:
+ */
+enum cfa_bld_act_lbt_dr_flds {
+ CFA_BLD_ACT_LBT_DR_DST_BMP_FLD = 0,
+ CFA_BLD_ACT_LBT_DR_MAX_FLD
+};
+
+/**
+ * L2 Context Lookup Remap Table Fields:
+ */
+enum cfa_bld_prof_l2_ctxt_rmp_dr_flds {
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_PRSV_PARIF_FLD = 0,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_PARIF_FLD = 1,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_PRSV_L2IP_CTXT_FLD = 2,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_L2IP_CTXT_FLD = 3,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_PRSV_PROF_FUNC_FLD = 4,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_PROF_FUNC_FLD = 5,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_CTXT_OPCODE_FLD = 6,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_L2IP_META_ENB_FLD = 7,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_L2IP_META_FLD = 8,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_L2IP_ACT_ENB_FLD = 9,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_L2IP_ACT_DATA_FLD = 10,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_L2IP_RFS_ENB_FLD = 11,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_L2IP_RFS_DATA_FLD = 12,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_L2IP_DEST_ENB_FLD = 13,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_L2IP_DEST_DATA_FLD = 14,
+ CFA_BLD_PROF_L2_CTXT_RMP_DR_MAX_FLD
+};
+
+/**
+ * Multi Field Register.
+ */
+enum cfa_bld_act_fc_tcam_result_flds {
+ CFA_BLD_ACT_FC_TCAM_RESULT_SEARCH_RESULT_FLD = 0,
+ CFA_BLD_ACT_FC_TCAM_RESULT_UNUSED_0_FLD = 1,
+ CFA_BLD_ACT_FC_TCAM_RESULT_SEARCH_HIT_FLD = 2,
+ CFA_BLD_ACT_FC_TCAM_RESULT_MAX_FLD
+};
+
+/**
+ * Multi Field Register.
+ */
+enum cfa_bld_act_mirror_flds {
+ CFA_BLD_ACT_MIRROR_UNUSED_0_FLD = 0,
+ CFA_BLD_ACT_MIRROR_RELATIVE_FLD = 1,
+ CFA_BLD_ACT_MIRROR_HINT_FLD = 2,
+ CFA_BLD_ACT_MIRROR_SAMP_FLD = 3,
+ CFA_BLD_ACT_MIRROR_TRUNC_FLD = 4,
+ CFA_BLD_ACT_MIRROR_IGN_DROP_FLD = 5,
+ CFA_BLD_ACT_MIRROR_MODE_FLD = 6,
+ CFA_BLD_ACT_MIRROR_COND_FLD = 7,
+ CFA_BLD_ACT_MIRROR_AR_PTR_FLD = 8,
+ CFA_BLD_ACT_MIRROR_SAMP_CFG_FLD = 9,
+ CFA_BLD_ACT_MIRROR_MAX_FLD
+};
+
+/**
+ * WC LREC Lookup Record
+ */
+enum cfa_bld_wc_lrec_flds {
+ CFA_BLD_WC_LREC_METADATA_FLD = 0,
+ CFA_BLD_WC_LREC_META_PROF_FLD = 1,
+ CFA_BLD_WC_LREC_PROF_FUNC_FLD = 2,
+ CFA_BLD_WC_LREC_RECYCLE_DEST_FLD = 3,
+ CFA_BLD_WC_LREC_FC_PTR_FLD = 4,
+ CFA_BLD_WC_LREC_FC_TYPE_FLD = 5,
+ CFA_BLD_WC_LREC_FC_OP_FLD = 6,
+ CFA_BLD_WC_LREC_PATHS_M1_FLD = 7,
+ CFA_BLD_WC_LREC_ACT_REC_SIZE_FLD = 8,
+ CFA_BLD_WC_LREC_RING_TABLE_IDX_FLD = 9,
+ CFA_BLD_WC_LREC_DESTINATION_FLD = 10,
+ CFA_BLD_WC_LREC_ACT_REC_PTR_FLD = 11,
+ CFA_BLD_WC_LREC_ACT_HINT_FLD = 12,
+ CFA_BLD_WC_LREC_STRENGTH_FLD = 13,
+ CFA_BLD_WC_LREC_OPCODE_FLD = 14,
+ CFA_BLD_WC_LREC_EPOCH1_FLD = 15,
+ CFA_BLD_WC_LREC_EPOCH0_FLD = 16,
+ CFA_BLD_WC_LREC_REC_SIZE_FLD = 17,
+ CFA_BLD_WC_LREC_VALID_FLD = 18,
+ CFA_BLD_WC_LREC_MAX_FLD
+};
+
+/**
+ * EM LREC Lookup Record
+ */
+enum cfa_bld_em_lrec_flds {
+ CFA_BLD_EM_LREC_RANGE_IDX_FLD = 0,
+ CFA_BLD_EM_LREC_RANGE_PROFILE_FLD = 1,
+ CFA_BLD_EM_LREC_CREC_TIMER_VALUE_FLD = 2,
+ CFA_BLD_EM_LREC_CREC_STATE_FLD = 3,
+ CFA_BLD_EM_LREC_CREC_TCP_MSB_OPP_INIT_FLD = 4,
+ CFA_BLD_EM_LREC_CREC_TCP_MSB_OPP_FLD = 5,
+ CFA_BLD_EM_LREC_CREC_TCP_MSB_LOC_FLD = 6,
+ CFA_BLD_EM_LREC_CREC_TCP_WIN_FLD = 7,
+ CFA_BLD_EM_LREC_CREC_TCP_UPDT_EN_FLD = 8,
+ CFA_BLD_EM_LREC_CREC_TCP_DIR_FLD = 9,
+ CFA_BLD_EM_LREC_METADATA_FLD = 10,
+ CFA_BLD_EM_LREC_PROF_FUNC_FLD = 11,
+ CFA_BLD_EM_LREC_META_PROF_FLD = 12,
+ CFA_BLD_EM_LREC_RECYCLE_DEST_FLD = 13,
+ CFA_BLD_EM_LREC_FC_PTR_FLD = 14,
+ CFA_BLD_EM_LREC_FC_TYPE_FLD = 15,
+ CFA_BLD_EM_LREC_FC_OP_FLD = 16,
+ CFA_BLD_EM_LREC_PATHS_M1_FLD = 17,
+ CFA_BLD_EM_LREC_ACT_REC_SIZE_FLD = 18,
+ CFA_BLD_EM_LREC_RING_TABLE_IDX_FLD = 19,
+ CFA_BLD_EM_LREC_DESTINATION_FLD = 20,
+ CFA_BLD_EM_LREC_ACT_REC_PTR_FLD = 21,
+ CFA_BLD_EM_LREC_ACT_HINT_FLD = 22,
+ CFA_BLD_EM_LREC_STRENGTH_FLD = 23,
+ CFA_BLD_EM_LREC_OPCODE_FLD = 24,
+ CFA_BLD_EM_LREC_EPOCH1_FLD = 25,
+ CFA_BLD_EM_LREC_EPOCH0_FLD = 26,
+ CFA_BLD_EM_LREC_REC_SIZE_FLD = 27,
+ CFA_BLD_EM_LREC_VALID_FLD = 28,
+ CFA_BLD_EM_LREC_MAX_FLD
+};
+
+/**
+ * EM Lookup Bucket Format
+ */
+enum cfa_bld_em_bucket_flds {
+ CFA_BLD_EM_BUCKET_BIN0_ENTRY_FLD = 0,
+ CFA_BLD_EM_BUCKET_BIN0_HASH_MSBS_FLD = 1,
+ CFA_BLD_EM_BUCKET_BIN1_ENTRY_FLD = 2,
+ CFA_BLD_EM_BUCKET_BIN1_HASH_MSBS_FLD = 3,
+ CFA_BLD_EM_BUCKET_BIN2_ENTRY_FLD = 4,
+ CFA_BLD_EM_BUCKET_BIN2_HASH_MSBS_FLD = 5,
+ CFA_BLD_EM_BUCKET_BIN3_ENTRY_FLD = 6,
+ CFA_BLD_EM_BUCKET_BIN3_HASH_MSBS_FLD = 7,
+ CFA_BLD_EM_BUCKET_BIN4_ENTRY_FLD = 8,
+ CFA_BLD_EM_BUCKET_BIN4_HASH_MSBS_FLD = 9,
+ CFA_BLD_EM_BUCKET_BIN5_ENTRY_FLD = 10,
+ CFA_BLD_EM_BUCKET_BIN5_HASH_MSBS_FLD = 11,
+ CFA_BLD_EM_BUCKET_CHAIN_POINTER_FLD = 12,
+ CFA_BLD_EM_BUCKET_CHAIN_VALID_FLD = 13,
+ CFA_BLD_EM_BUCKET_MAX_FLD
+};
+
+/**
+ * Compact Action Record. The compact action record uses relative
+ * pointers to access needed data. This keeps the compact action record
+ * down to 64b.
+ */
+enum cfa_bld_compact_action_flds {
+ CFA_BLD_COMPACT_ACTION_TYPE_FLD = 0,
+ CFA_BLD_COMPACT_ACTION_DROP_FLD = 1,
+ CFA_BLD_COMPACT_ACTION_VLAN_DELETE_FLD = 2,
+ CFA_BLD_COMPACT_ACTION_DEST_FLD = 3,
+ CFA_BLD_COMPACT_ACTION_DEST_OP_FLD = 4,
+ CFA_BLD_COMPACT_ACTION_DECAP_FLD = 5,
+ CFA_BLD_COMPACT_ACTION_MIRRORING_FLD = 6,
+ CFA_BLD_COMPACT_ACTION_METER_PTR_FLD = 7,
+ CFA_BLD_COMPACT_ACTION_STAT0_OFF_FLD = 8,
+ CFA_BLD_COMPACT_ACTION_STAT0_OP_FLD = 9,
+ CFA_BLD_COMPACT_ACTION_STAT0_CTR_TYPE_FLD = 10,
+ CFA_BLD_COMPACT_ACTION_MOD_OFF_FLD = 11,
+ CFA_BLD_COMPACT_ACTION_ENC_OFF_FLD = 12,
+ CFA_BLD_COMPACT_ACTION_SRC_OFF_FLD = 13,
+ CFA_BLD_COMPACT_ACTION_UNUSED_0_FLD = 14,
+ CFA_BLD_COMPACT_ACTION_MAX_FLD
+};
+
+/**
+ * Full Action Record. The full action record uses full pointers to
+ * access needed data. It also allows access to all the action features.
+ * The Full Action record is 192b.
+ */
+enum cfa_bld_full_action_flds {
+ CFA_BLD_FULL_ACTION_TYPE_FLD = 0,
+ CFA_BLD_FULL_ACTION_DROP_FLD = 1,
+ CFA_BLD_FULL_ACTION_VLAN_DELETE_FLD = 2,
+ CFA_BLD_FULL_ACTION_DEST_FLD = 3,
+ CFA_BLD_FULL_ACTION_DEST_OP_FLD = 4,
+ CFA_BLD_FULL_ACTION_DECAP_FLD = 5,
+ CFA_BLD_FULL_ACTION_MIRRORING_FLD = 6,
+ CFA_BLD_FULL_ACTION_METER_PTR_FLD = 7,
+ CFA_BLD_FULL_ACTION_STAT0_PTR_FLD = 8,
+ CFA_BLD_FULL_ACTION_STAT0_OP_FLD = 9,
+ CFA_BLD_FULL_ACTION_STAT0_CTR_TYPE_FLD = 10,
+ CFA_BLD_FULL_ACTION_STAT1_PTR_FLD = 11,
+ CFA_BLD_FULL_ACTION_STAT1_OP_FLD = 12,
+ CFA_BLD_FULL_ACTION_STAT1_CTR_TYPE_FLD = 13,
+ CFA_BLD_FULL_ACTION_MOD_PTR_FLD = 14,
+ CFA_BLD_FULL_ACTION_ENC_PTR_FLD = 15,
+ CFA_BLD_FULL_ACTION_SRC_PTR_FLD = 16,
+ CFA_BLD_FULL_ACTION_UNUSED_0_FLD = 17,
+ CFA_BLD_FULL_ACTION_MAX_FLD
+};
+
+/**
+ * Multicast Group Action Record. This action is used to send the packet
+ * to multiple destinations. The MGC Action record is 256b.
+ */
+enum cfa_bld_mcg_action_flds {
+ CFA_BLD_MCG_ACTION_TYPE_FLD = 0,
+ CFA_BLD_MCG_ACTION_SRC_KO_EN_FLD = 1,
+ CFA_BLD_MCG_ACTION_UNUSED_0_FLD = 2,
+ CFA_BLD_MCG_ACTION_NEXT_PTR_FLD = 3,
+ CFA_BLD_MCG_ACTION_PTR0_ACT_HINT_FLD = 4,
+ CFA_BLD_MCG_ACTION_PTR0_ACT_REC_PTR_FLD = 5,
+ CFA_BLD_MCG_ACTION_PTR1_ACT_HINT_FLD = 6,
+ CFA_BLD_MCG_ACTION_PTR1_ACT_REC_PTR_FLD = 7,
+ CFA_BLD_MCG_ACTION_PTR2_ACT_HINT_FLD = 8,
+ CFA_BLD_MCG_ACTION_PTR2_ACT_REC_PTR_FLD = 9,
+ CFA_BLD_MCG_ACTION_PTR3_ACT_HINT_FLD = 10,
+ CFA_BLD_MCG_ACTION_PTR3_ACT_REC_PTR_FLD = 11,
+ CFA_BLD_MCG_ACTION_PTR4_ACT_HINT_FLD = 12,
+ CFA_BLD_MCG_ACTION_PTR4_ACT_REC_PTR_FLD = 13,
+ CFA_BLD_MCG_ACTION_PTR5_ACT_HINT_FLD = 14,
+ CFA_BLD_MCG_ACTION_PTR5_ACT_REC_PTR_FLD = 15,
+ CFA_BLD_MCG_ACTION_PTR6_ACT_HINT_FLD = 16,
+ CFA_BLD_MCG_ACTION_PTR6_ACT_REC_PTR_FLD = 17,
+ CFA_BLD_MCG_ACTION_PTR7_ACT_HINT_FLD = 18,
+ CFA_BLD_MCG_ACTION_PTR7_ACT_REC_PTR_FLD = 19,
+ CFA_BLD_MCG_ACTION_MAX_FLD
+};
+
+/**
+ * Multicast Group Action Record. This action is used to send the packet
+ * to multiple destinations. The MGC Action record is 256b.
+ */
+enum cfa_bld_mcg_subseq_action_flds {
+ CFA_BLD_MCG_SUBSEQ_ACTION_TYPE_FLD = 0,
+ CFA_BLD_MCG_SUBSEQ_ACTION_UNUSED_0_FLD = 1,
+ CFA_BLD_MCG_SUBSEQ_ACTION_NEXT_PTR_FLD = 2,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR0_ACT_HINT_FLD = 3,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR0_ACT_REC_PTR_FLD = 4,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR1_ACT_HINT_FLD = 5,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR1_ACT_REC_PTR_FLD = 6,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR2_ACT_HINT_FLD = 7,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR2_ACT_REC_PTR_FLD = 8,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR3_ACT_HINT_FLD = 9,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR3_ACT_REC_PTR_FLD = 10,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR4_ACT_HINT_FLD = 11,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR4_ACT_REC_PTR_FLD = 12,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR5_ACT_HINT_FLD = 13,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR5_ACT_REC_PTR_FLD = 14,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR6_ACT_HINT_FLD = 15,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR6_ACT_REC_PTR_FLD = 16,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR7_ACT_HINT_FLD = 17,
+ CFA_BLD_MCG_SUBSEQ_ACTION_PTR7_ACT_REC_PTR_FLD = 18,
+ CFA_BLD_MCG_SUBSEQ_ACTION_MAX_FLD
+};
+
+/**
+ * Action Meter Formats
+ */
+enum cfa_bld_meters_flds {
+ CFA_BLD_METERS_BKT_C_FLD = 0,
+ CFA_BLD_METERS_BKT_E_FLD = 1,
+ CFA_BLD_METERS_FLAGS_MTR_VAL_FLD = 2,
+ CFA_BLD_METERS_FLAGS_ECN_RMP_EN_FLD = 3,
+ CFA_BLD_METERS_FLAGS_CF_FLD = 4,
+ CFA_BLD_METERS_FLAGS_PM_FLD = 5,
+ CFA_BLD_METERS_FLAGS_RFC2698_FLD = 6,
+ CFA_BLD_METERS_FLAGS_CBSM_FLD = 7,
+ CFA_BLD_METERS_FLAGS_EBSM_FLD = 8,
+ CFA_BLD_METERS_FLAGS_CBND_FLD = 9,
+ CFA_BLD_METERS_FLAGS_EBND_FLD = 10,
+ CFA_BLD_METERS_CBS_FLD = 11,
+ CFA_BLD_METERS_EBS_FLD = 12,
+ CFA_BLD_METERS_CIR_FLD = 13,
+ CFA_BLD_METERS_EIR_FLD = 14,
+ CFA_BLD_METERS_PROTECTION_SCOPE_FLD = 15,
+ CFA_BLD_METERS_PROTECTION_RSVD_FLD = 16,
+ CFA_BLD_METERS_PROTECTION_ENABLE_FLD = 17,
+ CFA_BLD_METERS_MAX_FLD
+};
+
+/**
+ * Enumeration for fkb
+ */
+enum cfa_bld_fkb_flds {
+ CFA_BLD_FKB_PROF_ID_FLD = 0,
+ CFA_BLD_FKB_L2CTXT_FLD = 1,
+ CFA_BLD_FKB_L2FUNC_FLD = 2,
+ CFA_BLD_FKB_PARIF_FLD = 3,
+ CFA_BLD_FKB_SPIF_FLD = 4,
+ CFA_BLD_FKB_SVIF_FLD = 5,
+ CFA_BLD_FKB_LCOS_FLD = 6,
+ CFA_BLD_FKB_META_HI_FLD = 7,
+ CFA_BLD_FKB_META_LO_FLD = 8,
+ CFA_BLD_FKB_RCYC_CNT_FLD = 9,
+ CFA_BLD_FKB_LOOPBACK_FLD = 10,
+ CFA_BLD_FKB_OTL2_TYPE_FLD = 11,
+ CFA_BLD_FKB_OTL2_DMAC_FLD = 12,
+ CFA_BLD_FKB_OTL2_SMAC_FLD = 13,
+ CFA_BLD_FKB_OTL2_DT_FLD = 14,
+ CFA_BLD_FKB_OTL2_SA_FLD = 15,
+ CFA_BLD_FKB_OTL2_NVT_FLD = 16,
+ CFA_BLD_FKB_OTL2_OVP_FLD = 17,
+ CFA_BLD_FKB_OTL2_OVD_FLD = 18,
+ CFA_BLD_FKB_OTL2_OVV_FLD = 19,
+ CFA_BLD_FKB_OTL2_OVT_FLD = 20,
+ CFA_BLD_FKB_OTL2_IVP_FLD = 21,
+ CFA_BLD_FKB_OTL2_IVD_FLD = 22,
+ CFA_BLD_FKB_OTL2_IVV_FLD = 23,
+ CFA_BLD_FKB_OTL2_IVT_FLD = 24,
+ CFA_BLD_FKB_OTL2_ETYPE_FLD = 25,
+ CFA_BLD_FKB_OTL3_TYPE_FLD = 26,
+ CFA_BLD_FKB_OTL3_SIP3_FLD = 27,
+ CFA_BLD_FKB_OTL3_SIP2_FLD = 28,
+ CFA_BLD_FKB_OTL3_SIP1_FLD = 29,
+ CFA_BLD_FKB_OTL3_SIP0_FLD = 30,
+ CFA_BLD_FKB_OTL3_DIP3_FLD = 31,
+ CFA_BLD_FKB_OTL3_DIP2_FLD = 32,
+ CFA_BLD_FKB_OTL3_DIP1_FLD = 33,
+ CFA_BLD_FKB_OTL3_DIP0_FLD = 34,
+ CFA_BLD_FKB_OTL3_TTL_FLD = 35,
+ CFA_BLD_FKB_OTL3_PROT_FLD = 36,
+ CFA_BLD_FKB_OTL3_FID_FLD = 37,
+ CFA_BLD_FKB_OTL3_QOS_FLD = 38,
+ CFA_BLD_FKB_OTL3_IEH_NONEXT_FLD = 39,
+ CFA_BLD_FKB_OTL3_IEH_SEP_FLD = 40,
+ CFA_BLD_FKB_OTL3_IEH_AUTH_FLD = 41,
+ CFA_BLD_FKB_OTL3_IEH_DEST_FLD = 42,
+ CFA_BLD_FKB_OTL3_IEH_FRAG_FLD = 43,
+ CFA_BLD_FKB_OTL3_IEH_RTHDR_FLD = 44,
+ CFA_BLD_FKB_OTL3_IEH_HOP_FLD = 45,
+ CFA_BLD_FKB_OTL3_IEH_1FRAG_FLD = 46,
+ CFA_BLD_FKB_OTL3_DF_FLD = 47,
+ CFA_BLD_FKB_OTL3_L3ERR_FLD = 48,
+ CFA_BLD_FKB_OTL4_TYPE_FLD = 49,
+ CFA_BLD_FKB_OTL4_SRC_FLD = 50,
+ CFA_BLD_FKB_OTL4_DST_FLD = 51,
+ CFA_BLD_FKB_OTL4_FLAGS_FLD = 52,
+ CFA_BLD_FKB_OTL4_SEQ_FLD = 53,
+ CFA_BLD_FKB_OTL4_PA_FLD = 54,
+ CFA_BLD_FKB_OTL4_OPT_FLD = 55,
+ CFA_BLD_FKB_OTL4_TCPTS_FLD = 56,
+ CFA_BLD_FKB_OTL4_ERR_FLD = 57,
+ CFA_BLD_FKB_OT_TYPE_FLD = 58,
+ CFA_BLD_FKB_OT_FLAGS_FLD = 59,
+ CFA_BLD_FKB_OT_IDS_FLD = 60,
+ CFA_BLD_FKB_OT_ID_FLD = 61,
+ CFA_BLD_FKB_OT_CTXTS_FLD = 62,
+ CFA_BLD_FKB_OT_CTXT_FLD = 63,
+ CFA_BLD_FKB_OT_QOS_FLD = 64,
+ CFA_BLD_FKB_OT_ERR_FLD = 65,
+ CFA_BLD_FKB_TL2_TYPE_FLD = 66,
+ CFA_BLD_FKB_TL2_DMAC_FLD = 67,
+ CFA_BLD_FKB_TL2_SMAC_FLD = 68,
+ CFA_BLD_FKB_TL2_DT_FLD = 69,
+ CFA_BLD_FKB_TL2_SA_FLD = 70,
+ CFA_BLD_FKB_TL2_NVT_FLD = 71,
+ CFA_BLD_FKB_TL2_OVP_FLD = 72,
+ CFA_BLD_FKB_TL2_OVD_FLD = 73,
+ CFA_BLD_FKB_TL2_OVV_FLD = 74,
+ CFA_BLD_FKB_TL2_OVT_FLD = 75,
+ CFA_BLD_FKB_TL2_IVP_FLD = 76,
+ CFA_BLD_FKB_TL2_IVD_FLD = 77,
+ CFA_BLD_FKB_TL2_IVV_FLD = 78,
+ CFA_BLD_FKB_TL2_IVT_FLD = 79,
+ CFA_BLD_FKB_TL2_ETYPE_FLD = 80,
+ CFA_BLD_FKB_TL3_TYPE_FLD = 81,
+ CFA_BLD_FKB_TL3_SIP3_FLD = 82,
+ CFA_BLD_FKB_TL3_SIP2_FLD = 83,
+ CFA_BLD_FKB_TL3_SIP1_FLD = 84,
+ CFA_BLD_FKB_TL3_SIP0_FLD = 85,
+ CFA_BLD_FKB_TL3_DIP3_FLD = 86,
+ CFA_BLD_FKB_TL3_DIP2_FLD = 87,
+ CFA_BLD_FKB_TL3_DIP1_FLD = 88,
+ CFA_BLD_FKB_TL3_DIP0_FLD = 89,
+ CFA_BLD_FKB_TL3_TTL_FLD = 90,
+ CFA_BLD_FKB_TL3_PROT_FLD = 91,
+ CFA_BLD_FKB_TL3_FID_FLD = 92,
+ CFA_BLD_FKB_TL3_QOS_FLD = 93,
+ CFA_BLD_FKB_TL3_IEH_NONEXT_FLD = 94,
+ CFA_BLD_FKB_TL3_IEH_SEP_FLD = 95,
+ CFA_BLD_FKB_TL3_IEH_AUTH_FLD = 96,
+ CFA_BLD_FKB_TL3_IEH_DEST_FLD = 97,
+ CFA_BLD_FKB_TL3_IEH_FRAG_FLD = 98,
+ CFA_BLD_FKB_TL3_IEH_RTHDR_FLD = 99,
+ CFA_BLD_FKB_TL3_IEH_HOP_FLD = 100,
+ CFA_BLD_FKB_TL3_IEH_1FRAG_FLD = 101,
+ CFA_BLD_FKB_TL3_DF_FLD = 102,
+ CFA_BLD_FKB_TL3_L3ERR_FLD = 103,
+ CFA_BLD_FKB_TL4_TYPE_FLD = 104,
+ CFA_BLD_FKB_TL4_SRC_FLD = 105,
+ CFA_BLD_FKB_TL4_DST_FLD = 106,
+ CFA_BLD_FKB_TL4_FLAGS_FLD = 107,
+ CFA_BLD_FKB_TL4_SEQ_FLD = 108,
+ CFA_BLD_FKB_TL4_PA_FLD = 109,
+ CFA_BLD_FKB_TL4_OPT_FLD = 110,
+ CFA_BLD_FKB_TL4_TCPTS_FLD = 111,
+ CFA_BLD_FKB_TL4_ERR_FLD = 112,
+ CFA_BLD_FKB_T_TYPE_FLD = 113,
+ CFA_BLD_FKB_T_FLAGS_FLD = 114,
+ CFA_BLD_FKB_T_IDS_FLD = 115,
+ CFA_BLD_FKB_T_ID_FLD = 116,
+ CFA_BLD_FKB_T_CTXTS_FLD = 117,
+ CFA_BLD_FKB_T_CTXT_FLD = 118,
+ CFA_BLD_FKB_T_QOS_FLD = 119,
+ CFA_BLD_FKB_T_ERR_FLD = 120,
+ CFA_BLD_FKB_L2_TYPE_FLD = 121,
+ CFA_BLD_FKB_L2_DMAC_FLD = 122,
+ CFA_BLD_FKB_L2_SMAC_FLD = 123,
+ CFA_BLD_FKB_L2_DT_FLD = 124,
+ CFA_BLD_FKB_L2_SA_FLD = 125,
+ CFA_BLD_FKB_L2_NVT_FLD = 126,
+ CFA_BLD_FKB_L2_OVP_FLD = 127,
+ CFA_BLD_FKB_L2_OVD_FLD = 128,
+ CFA_BLD_FKB_L2_OVV_FLD = 129,
+ CFA_BLD_FKB_L2_OVT_FLD = 130,
+ CFA_BLD_FKB_L2_IVP_FLD = 131,
+ CFA_BLD_FKB_L2_IVD_FLD = 132,
+ CFA_BLD_FKB_L2_IVV_FLD = 133,
+ CFA_BLD_FKB_L2_IVT_FLD = 134,
+ CFA_BLD_FKB_L2_ETYPE_FLD = 135,
+ CFA_BLD_FKB_L3_TYPE_FLD = 136,
+ CFA_BLD_FKB_L3_SIP3_FLD = 137,
+ CFA_BLD_FKB_L3_SIP2_FLD = 138,
+ CFA_BLD_FKB_L3_SIP1_FLD = 139,
+ CFA_BLD_FKB_L3_SIP0_FLD = 140,
+ CFA_BLD_FKB_L3_DIP3_FLD = 141,
+ CFA_BLD_FKB_L3_DIP2_FLD = 142,
+ CFA_BLD_FKB_L3_DIP1_FLD = 143,
+ CFA_BLD_FKB_L3_DIP0_FLD = 144,
+ CFA_BLD_FKB_L3_TTL_FLD = 145,
+ CFA_BLD_FKB_L3_PROT_FLD = 146,
+ CFA_BLD_FKB_L3_FID_FLD = 147,
+ CFA_BLD_FKB_L3_QOS_FLD = 148,
+ CFA_BLD_FKB_L3_IEH_NONEXT_FLD = 149,
+ CFA_BLD_FKB_L3_IEH_SEP_FLD = 150,
+ CFA_BLD_FKB_L3_IEH_AUTH_FLD = 151,
+ CFA_BLD_FKB_L3_IEH_DEST_FLD = 152,
+ CFA_BLD_FKB_L3_IEH_FRAG_FLD = 153,
+ CFA_BLD_FKB_L3_IEH_RTHDR_FLD = 154,
+ CFA_BLD_FKB_L3_IEH_HOP_FLD = 155,
+ CFA_BLD_FKB_L3_IEH_1FRAG_FLD = 156,
+ CFA_BLD_FKB_L3_DF_FLD = 157,
+ CFA_BLD_FKB_L3_L3ERR_FLD = 158,
+ CFA_BLD_FKB_L4_TYPE_FLD = 159,
+ CFA_BLD_FKB_L4_SRC_FLD = 160,
+ CFA_BLD_FKB_L4_DST_FLD = 161,
+ CFA_BLD_FKB_L4_FLAGS_FLD = 162,
+ CFA_BLD_FKB_L4_SEQ_FLD = 163,
+ CFA_BLD_FKB_L4_ACK_FLD = 164,
+ CFA_BLD_FKB_L4_WIN_FLD = 165,
+ CFA_BLD_FKB_L4_PA_FLD = 166,
+ CFA_BLD_FKB_L4_OPT_FLD = 167,
+ CFA_BLD_FKB_L4_TCPTS_FLD = 168,
+ CFA_BLD_FKB_L4_TSVAL_FLD = 169,
+ CFA_BLD_FKB_L4_TXECR_FLD = 170,
+ CFA_BLD_FKB_L4_ERR_FLD = 171,
+ CFA_BLD_FKB_MAX_FLD
+};
+
+/**
+ * Enumeration for wc tcam fkb
+ */
+enum cfa_bld_wc_tcam_fkb_flds {
+ CFA_BLD_WC_TCAM_FKB_PROF_ID_FLD = 0,
+ CFA_BLD_WC_TCAM_FKB_L2CTXT_FLD = 1,
+ CFA_BLD_WC_TCAM_FKB_L2FUNC_FLD = 2,
+ CFA_BLD_WC_TCAM_FKB_PARIF_FLD = 3,
+ CFA_BLD_WC_TCAM_FKB_SPIF_FLD = 4,
+ CFA_BLD_WC_TCAM_FKB_SVIF_FLD = 5,
+ CFA_BLD_WC_TCAM_FKB_LCOS_FLD = 6,
+ CFA_BLD_WC_TCAM_FKB_META_HI_FLD = 7,
+ CFA_BLD_WC_TCAM_FKB_META_LO_FLD = 8,
+ CFA_BLD_WC_TCAM_FKB_RCYC_CNT_FLD = 9,
+ CFA_BLD_WC_TCAM_FKB_LOOPBACK_FLD = 10,
+ CFA_BLD_WC_TCAM_FKB_OTL2_TYPE_FLD = 11,
+ CFA_BLD_WC_TCAM_FKB_OTL2_DMAC_FLD = 12,
+ CFA_BLD_WC_TCAM_FKB_OTL2_SMAC_FLD = 13,
+ CFA_BLD_WC_TCAM_FKB_OTL2_DT_FLD = 14,
+ CFA_BLD_WC_TCAM_FKB_OTL2_SA_FLD = 15,
+ CFA_BLD_WC_TCAM_FKB_OTL2_NVT_FLD = 16,
+ CFA_BLD_WC_TCAM_FKB_OTL2_OVP_FLD = 17,
+ CFA_BLD_WC_TCAM_FKB_OTL2_OVD_FLD = 18,
+ CFA_BLD_WC_TCAM_FKB_OTL2_OVV_FLD = 19,
+ CFA_BLD_WC_TCAM_FKB_OTL2_OVT_FLD = 20,
+ CFA_BLD_WC_TCAM_FKB_OTL2_IVP_FLD = 21,
+ CFA_BLD_WC_TCAM_FKB_OTL2_IVD_FLD = 22,
+ CFA_BLD_WC_TCAM_FKB_OTL2_IVV_FLD = 23,
+ CFA_BLD_WC_TCAM_FKB_OTL2_IVT_FLD = 24,
+ CFA_BLD_WC_TCAM_FKB_OTL2_ETYPE_FLD = 25,
+ CFA_BLD_WC_TCAM_FKB_OTL3_TYPE_FLD = 26,
+ CFA_BLD_WC_TCAM_FKB_OTL3_SIP3_FLD = 27,
+ CFA_BLD_WC_TCAM_FKB_OTL3_SIP2_FLD = 28,
+ CFA_BLD_WC_TCAM_FKB_OTL3_SIP1_FLD = 29,
+ CFA_BLD_WC_TCAM_FKB_OTL3_SIP0_FLD = 30,
+ CFA_BLD_WC_TCAM_FKB_OTL3_DIP3_FLD = 31,
+ CFA_BLD_WC_TCAM_FKB_OTL3_DIP2_FLD = 32,
+ CFA_BLD_WC_TCAM_FKB_OTL3_DIP1_FLD = 33,
+ CFA_BLD_WC_TCAM_FKB_OTL3_DIP0_FLD = 34,
+ CFA_BLD_WC_TCAM_FKB_OTL3_TTL_FLD = 35,
+ CFA_BLD_WC_TCAM_FKB_OTL3_PROT_FLD = 36,
+ CFA_BLD_WC_TCAM_FKB_OTL3_FID_FLD = 37,
+ CFA_BLD_WC_TCAM_FKB_OTL3_QOS_FLD = 38,
+ CFA_BLD_WC_TCAM_FKB_OTL3_IEH_NONEXT_FLD = 39,
+ CFA_BLD_WC_TCAM_FKB_OTL3_IEH_SEP_FLD = 40,
+ CFA_BLD_WC_TCAM_FKB_OTL3_IEH_AUTH_FLD = 41,
+ CFA_BLD_WC_TCAM_FKB_OTL3_IEH_DEST_FLD = 42,
+ CFA_BLD_WC_TCAM_FKB_OTL3_IEH_FRAG_FLD = 43,
+ CFA_BLD_WC_TCAM_FKB_OTL3_IEH_RTHDR_FLD = 44,
+ CFA_BLD_WC_TCAM_FKB_OTL3_IEH_HOP_FLD = 45,
+ CFA_BLD_WC_TCAM_FKB_OTL3_IEH_1FRAG_FLD = 46,
+ CFA_BLD_WC_TCAM_FKB_OTL3_DF_FLD = 47,
+ CFA_BLD_WC_TCAM_FKB_OTL3_L3ERR_FLD = 48,
+ CFA_BLD_WC_TCAM_FKB_OTL4_TYPE_FLD = 49,
+ CFA_BLD_WC_TCAM_FKB_OTL4_SRC_FLD = 50,
+ CFA_BLD_WC_TCAM_FKB_OTL4_DST_FLD = 51,
+ CFA_BLD_WC_TCAM_FKB_OTL4_FLAGS_FLD = 52,
+ CFA_BLD_WC_TCAM_FKB_OTL4_SEQ_FLD = 53,
+ CFA_BLD_WC_TCAM_FKB_OTL4_PA_FLD = 54,
+ CFA_BLD_WC_TCAM_FKB_OTL4_OPT_FLD = 55,
+ CFA_BLD_WC_TCAM_FKB_OTL4_TCPTS_FLD = 56,
+ CFA_BLD_WC_TCAM_FKB_OTL4_ERR_FLD = 57,
+ CFA_BLD_WC_TCAM_FKB_OT_TYPE_FLD = 58,
+ CFA_BLD_WC_TCAM_FKB_OT_FLAGS_FLD = 59,
+ CFA_BLD_WC_TCAM_FKB_OT_IDS_FLD = 60,
+ CFA_BLD_WC_TCAM_FKB_OT_ID_FLD = 61,
+ CFA_BLD_WC_TCAM_FKB_OT_CTXTS_FLD = 62,
+ CFA_BLD_WC_TCAM_FKB_OT_CTXT_FLD = 63,
+ CFA_BLD_WC_TCAM_FKB_OT_QOS_FLD = 64,
+ CFA_BLD_WC_TCAM_FKB_OT_ERR_FLD = 65,
+ CFA_BLD_WC_TCAM_FKB_TL2_TYPE_FLD = 66,
+ CFA_BLD_WC_TCAM_FKB_TL2_DMAC_FLD = 67,
+ CFA_BLD_WC_TCAM_FKB_TL2_SMAC_FLD = 68,
+ CFA_BLD_WC_TCAM_FKB_TL2_DT_FLD = 69,
+ CFA_BLD_WC_TCAM_FKB_TL2_SA_FLD = 70,
+ CFA_BLD_WC_TCAM_FKB_TL2_NVT_FLD = 71,
+ CFA_BLD_WC_TCAM_FKB_TL2_OVP_FLD = 72,
+ CFA_BLD_WC_TCAM_FKB_TL2_OVD_FLD = 73,
+ CFA_BLD_WC_TCAM_FKB_TL2_OVV_FLD = 74,
+ CFA_BLD_WC_TCAM_FKB_TL2_OVT_FLD = 75,
+ CFA_BLD_WC_TCAM_FKB_TL2_IVP_FLD = 76,
+ CFA_BLD_WC_TCAM_FKB_TL2_IVD_FLD = 77,
+ CFA_BLD_WC_TCAM_FKB_TL2_IVV_FLD = 78,
+ CFA_BLD_WC_TCAM_FKB_TL2_IVT_FLD = 79,
+ CFA_BLD_WC_TCAM_FKB_TL2_ETYPE_FLD = 80,
+ CFA_BLD_WC_TCAM_FKB_TL3_TYPE_FLD = 81,
+ CFA_BLD_WC_TCAM_FKB_TL3_SIP3_FLD = 82,
+ CFA_BLD_WC_TCAM_FKB_TL3_SIP2_FLD = 83,
+ CFA_BLD_WC_TCAM_FKB_TL3_SIP1_FLD = 84,
+ CFA_BLD_WC_TCAM_FKB_TL3_SIP0_FLD = 85,
+ CFA_BLD_WC_TCAM_FKB_TL3_DIP3_FLD = 86,
+ CFA_BLD_WC_TCAM_FKB_TL3_DIP2_FLD = 87,
+ CFA_BLD_WC_TCAM_FKB_TL3_DIP1_FLD = 88,
+ CFA_BLD_WC_TCAM_FKB_TL3_DIP0_FLD = 89,
+ CFA_BLD_WC_TCAM_FKB_TL3_TTL_FLD = 90,
+ CFA_BLD_WC_TCAM_FKB_TL3_PROT_FLD = 91,
+ CFA_BLD_WC_TCAM_FKB_TL3_FID_FLD = 92,
+ CFA_BLD_WC_TCAM_FKB_TL3_QOS_FLD = 93,
+ CFA_BLD_WC_TCAM_FKB_TL3_IEH_NONEXT_FLD = 94,
+ CFA_BLD_WC_TCAM_FKB_TL3_IEH_SEP_FLD = 95,
+ CFA_BLD_WC_TCAM_FKB_TL3_IEH_AUTH_FLD = 96,
+ CFA_BLD_WC_TCAM_FKB_TL3_IEH_DEST_FLD = 97,
+ CFA_BLD_WC_TCAM_FKB_TL3_IEH_FRAG_FLD = 98,
+ CFA_BLD_WC_TCAM_FKB_TL3_IEH_RTHDR_FLD = 99,
+ CFA_BLD_WC_TCAM_FKB_TL3_IEH_HOP_FLD = 100,
+ CFA_BLD_WC_TCAM_FKB_TL3_IEH_1FRAG_FLD = 101,
+ CFA_BLD_WC_TCAM_FKB_TL3_DF_FLD = 102,
+ CFA_BLD_WC_TCAM_FKB_TL3_L3ERR_FLD = 103,
+ CFA_BLD_WC_TCAM_FKB_TL4_TYPE_FLD = 104,
+ CFA_BLD_WC_TCAM_FKB_TL4_SRC_FLD = 105,
+ CFA_BLD_WC_TCAM_FKB_TL4_DST_FLD = 106,
+ CFA_BLD_WC_TCAM_FKB_TL4_FLAGS_FLD = 107,
+ CFA_BLD_WC_TCAM_FKB_TL4_SEQ_FLD = 108,
+ CFA_BLD_WC_TCAM_FKB_TL4_PA_FLD = 109,
+ CFA_BLD_WC_TCAM_FKB_TL4_OPT_FLD = 110,
+ CFA_BLD_WC_TCAM_FKB_TL4_TCPTS_FLD = 111,
+ CFA_BLD_WC_TCAM_FKB_TL4_ERR_FLD = 112,
+ CFA_BLD_WC_TCAM_FKB_T_TYPE_FLD = 113,
+ CFA_BLD_WC_TCAM_FKB_T_FLAGS_FLD = 114,
+ CFA_BLD_WC_TCAM_FKB_T_IDS_FLD = 115,
+ CFA_BLD_WC_TCAM_FKB_T_ID_FLD = 116,
+ CFA_BLD_WC_TCAM_FKB_T_CTXTS_FLD = 117,
+ CFA_BLD_WC_TCAM_FKB_T_CTXT_FLD = 118,
+ CFA_BLD_WC_TCAM_FKB_T_QOS_FLD = 119,
+ CFA_BLD_WC_TCAM_FKB_T_ERR_FLD = 120,
+ CFA_BLD_WC_TCAM_FKB_L2_TYPE_FLD = 121,
+ CFA_BLD_WC_TCAM_FKB_L2_DMAC_FLD = 122,
+ CFA_BLD_WC_TCAM_FKB_L2_SMAC_FLD = 123,
+ CFA_BLD_WC_TCAM_FKB_L2_DT_FLD = 124,
+ CFA_BLD_WC_TCAM_FKB_L2_SA_FLD = 125,
+ CFA_BLD_WC_TCAM_FKB_L2_NVT_FLD = 126,
+ CFA_BLD_WC_TCAM_FKB_L2_OVP_FLD = 127,
+ CFA_BLD_WC_TCAM_FKB_L2_OVD_FLD = 128,
+ CFA_BLD_WC_TCAM_FKB_L2_OVV_FLD = 129,
+ CFA_BLD_WC_TCAM_FKB_L2_OVT_FLD = 130,
+ CFA_BLD_WC_TCAM_FKB_L2_IVP_FLD = 131,
+ CFA_BLD_WC_TCAM_FKB_L2_IVD_FLD = 132,
+ CFA_BLD_WC_TCAM_FKB_L2_IVV_FLD = 133,
+ CFA_BLD_WC_TCAM_FKB_L2_IVT_FLD = 134,
+ CFA_BLD_WC_TCAM_FKB_L2_ETYPE_FLD = 135,
+ CFA_BLD_WC_TCAM_FKB_L3_TYPE_FLD = 136,
+ CFA_BLD_WC_TCAM_FKB_L3_SIP3_FLD = 137,
+ CFA_BLD_WC_TCAM_FKB_L3_SIP2_FLD = 138,
+ CFA_BLD_WC_TCAM_FKB_L3_SIP1_FLD = 139,
+ CFA_BLD_WC_TCAM_FKB_L3_SIP0_FLD = 140,
+ CFA_BLD_WC_TCAM_FKB_L3_DIP3_FLD = 141,
+ CFA_BLD_WC_TCAM_FKB_L3_DIP2_FLD = 142,
+ CFA_BLD_WC_TCAM_FKB_L3_DIP1_FLD = 143,
+ CFA_BLD_WC_TCAM_FKB_L3_DIP0_FLD = 144,
+ CFA_BLD_WC_TCAM_FKB_L3_TTL_FLD = 145,
+ CFA_BLD_WC_TCAM_FKB_L3_PROT_FLD = 146,
+ CFA_BLD_WC_TCAM_FKB_L3_FID_FLD = 147,
+ CFA_BLD_WC_TCAM_FKB_L3_QOS_FLD = 148,
+ CFA_BLD_WC_TCAM_FKB_L3_IEH_NONEXT_FLD = 149,
+ CFA_BLD_WC_TCAM_FKB_L3_IEH_SEP_FLD = 150,
+ CFA_BLD_WC_TCAM_FKB_L3_IEH_AUTH_FLD = 151,
+ CFA_BLD_WC_TCAM_FKB_L3_IEH_DEST_FLD = 152,
+ CFA_BLD_WC_TCAM_FKB_L3_IEH_FRAG_FLD = 153,
+ CFA_BLD_WC_TCAM_FKB_L3_IEH_RTHDR_FLD = 154,
+ CFA_BLD_WC_TCAM_FKB_L3_IEH_HOP_FLD = 155,
+ CFA_BLD_WC_TCAM_FKB_L3_IEH_1FRAG_FLD = 156,
+ CFA_BLD_WC_TCAM_FKB_L3_DF_FLD = 157,
+ CFA_BLD_WC_TCAM_FKB_L3_L3ERR_FLD = 158,
+ CFA_BLD_WC_TCAM_FKB_L4_TYPE_FLD = 159,
+ CFA_BLD_WC_TCAM_FKB_L4_SRC_FLD = 160,
+ CFA_BLD_WC_TCAM_FKB_L4_DST_FLD = 161,
+ CFA_BLD_WC_TCAM_FKB_L4_FLAGS_FLD = 162,
+ CFA_BLD_WC_TCAM_FKB_L4_SEQ_FLD = 163,
+ CFA_BLD_WC_TCAM_FKB_L4_ACK_FLD = 164,
+ CFA_BLD_WC_TCAM_FKB_L4_WIN_FLD = 165,
+ CFA_BLD_WC_TCAM_FKB_L4_PA_FLD = 166,
+ CFA_BLD_WC_TCAM_FKB_L4_OPT_FLD = 167,
+ CFA_BLD_WC_TCAM_FKB_L4_TCPTS_FLD = 168,
+ CFA_BLD_WC_TCAM_FKB_L4_TSVAL_FLD = 169,
+ CFA_BLD_WC_TCAM_FKB_L4_TXECR_FLD = 170,
+ CFA_BLD_WC_TCAM_FKB_L4_ERR_FLD = 171,
+ CFA_BLD_WC_TCAM_FKB_MAX_FLD
+};
+
+/**
+ * Enumeration for em fkb
+ */
+enum cfa_bld_em_fkb_flds {
+ CFA_BLD_EM_FKB_PROF_ID_FLD = 0,
+ CFA_BLD_EM_FKB_L2CTXT_FLD = 1,
+ CFA_BLD_EM_FKB_L2FUNC_FLD = 2,
+ CFA_BLD_EM_FKB_PARIF_FLD = 3,
+ CFA_BLD_EM_FKB_SPIF_FLD = 4,
+ CFA_BLD_EM_FKB_SVIF_FLD = 5,
+ CFA_BLD_EM_FKB_LCOS_FLD = 6,
+ CFA_BLD_EM_FKB_META_HI_FLD = 7,
+ CFA_BLD_EM_FKB_META_LO_FLD = 8,
+ CFA_BLD_EM_FKB_RCYC_CNT_FLD = 9,
+ CFA_BLD_EM_FKB_LOOPBACK_FLD = 10,
+ CFA_BLD_EM_FKB_OTL2_TYPE_FLD = 11,
+ CFA_BLD_EM_FKB_OTL2_DMAC_FLD = 12,
+ CFA_BLD_EM_FKB_OTL2_SMAC_FLD = 13,
+ CFA_BLD_EM_FKB_OTL2_DT_FLD = 14,
+ CFA_BLD_EM_FKB_OTL2_SA_FLD = 15,
+ CFA_BLD_EM_FKB_OTL2_NVT_FLD = 16,
+ CFA_BLD_EM_FKB_OTL2_OVP_FLD = 17,
+ CFA_BLD_EM_FKB_OTL2_OVD_FLD = 18,
+ CFA_BLD_EM_FKB_OTL2_OVV_FLD = 19,
+ CFA_BLD_EM_FKB_OTL2_OVT_FLD = 20,
+ CFA_BLD_EM_FKB_OTL2_IVP_FLD = 21,
+ CFA_BLD_EM_FKB_OTL2_IVD_FLD = 22,
+ CFA_BLD_EM_FKB_OTL2_IVV_FLD = 23,
+ CFA_BLD_EM_FKB_OTL2_IVT_FLD = 24,
+ CFA_BLD_EM_FKB_OTL2_ETYPE_FLD = 25,
+ CFA_BLD_EM_FKB_OTL3_TYPE_FLD = 26,
+ CFA_BLD_EM_FKB_OTL3_SIP3_FLD = 27,
+ CFA_BLD_EM_FKB_OTL3_SIP2_FLD = 28,
+ CFA_BLD_EM_FKB_OTL3_SIP1_FLD = 29,
+ CFA_BLD_EM_FKB_OTL3_SIP0_FLD = 30,
+ CFA_BLD_EM_FKB_OTL3_DIP3_FLD = 31,
+ CFA_BLD_EM_FKB_OTL3_DIP2_FLD = 32,
+ CFA_BLD_EM_FKB_OTL3_DIP1_FLD = 33,
+ CFA_BLD_EM_FKB_OTL3_DIP0_FLD = 34,
+ CFA_BLD_EM_FKB_OTL3_TTL_FLD = 35,
+ CFA_BLD_EM_FKB_OTL3_PROT_FLD = 36,
+ CFA_BLD_EM_FKB_OTL3_FID_FLD = 37,
+ CFA_BLD_EM_FKB_OTL3_QOS_FLD = 38,
+ CFA_BLD_EM_FKB_OTL3_IEH_NONEXT_FLD = 39,
+ CFA_BLD_EM_FKB_OTL3_IEH_SEP_FLD = 40,
+ CFA_BLD_EM_FKB_OTL3_IEH_AUTH_FLD = 41,
+ CFA_BLD_EM_FKB_OTL3_IEH_DEST_FLD = 42,
+ CFA_BLD_EM_FKB_OTL3_IEH_FRAG_FLD = 43,
+ CFA_BLD_EM_FKB_OTL3_IEH_RTHDR_FLD = 44,
+ CFA_BLD_EM_FKB_OTL3_IEH_HOP_FLD = 45,
+ CFA_BLD_EM_FKB_OTL3_IEH_1FRAG_FLD = 46,
+ CFA_BLD_EM_FKB_OTL3_DF_FLD = 47,
+ CFA_BLD_EM_FKB_OTL3_L3ERR_FLD = 48,
+ CFA_BLD_EM_FKB_OTL4_TYPE_FLD = 49,
+ CFA_BLD_EM_FKB_OTL4_SRC_FLD = 50,
+ CFA_BLD_EM_FKB_OTL4_DST_FLD = 51,
+ CFA_BLD_EM_FKB_OTL4_FLAGS_FLD = 52,
+ CFA_BLD_EM_FKB_OTL4_SEQ_FLD = 53,
+ CFA_BLD_EM_FKB_OTL4_PA_FLD = 54,
+ CFA_BLD_EM_FKB_OTL4_OPT_FLD = 55,
+ CFA_BLD_EM_FKB_OTL4_TCPTS_FLD = 56,
+ CFA_BLD_EM_FKB_OTL4_ERR_FLD = 57,
+ CFA_BLD_EM_FKB_OT_TYPE_FLD = 58,
+ CFA_BLD_EM_FKB_OT_FLAGS_FLD = 59,
+ CFA_BLD_EM_FKB_OT_IDS_FLD = 60,
+ CFA_BLD_EM_FKB_OT_ID_FLD = 61,
+ CFA_BLD_EM_FKB_OT_CTXTS_FLD = 62,
+ CFA_BLD_EM_FKB_OT_CTXT_FLD = 63,
+ CFA_BLD_EM_FKB_OT_QOS_FLD = 64,
+ CFA_BLD_EM_FKB_OT_ERR_FLD = 65,
+ CFA_BLD_EM_FKB_TL2_TYPE_FLD = 66,
+ CFA_BLD_EM_FKB_TL2_DMAC_FLD = 67,
+ CFA_BLD_EM_FKB_TL2_SMAC_FLD = 68,
+ CFA_BLD_EM_FKB_TL2_DT_FLD = 69,
+ CFA_BLD_EM_FKB_TL2_SA_FLD = 70,
+ CFA_BLD_EM_FKB_TL2_NVT_FLD = 71,
+ CFA_BLD_EM_FKB_TL2_OVP_FLD = 72,
+ CFA_BLD_EM_FKB_TL2_OVD_FLD = 73,
+ CFA_BLD_EM_FKB_TL2_OVV_FLD = 74,
+ CFA_BLD_EM_FKB_TL2_OVT_FLD = 75,
+ CFA_BLD_EM_FKB_TL2_IVP_FLD = 76,
+ CFA_BLD_EM_FKB_TL2_IVD_FLD = 77,
+ CFA_BLD_EM_FKB_TL2_IVV_FLD = 78,
+ CFA_BLD_EM_FKB_TL2_IVT_FLD = 79,
+ CFA_BLD_EM_FKB_TL2_ETYPE_FLD = 80,
+ CFA_BLD_EM_FKB_TL3_TYPE_FLD = 81,
+ CFA_BLD_EM_FKB_TL3_SIP3_FLD = 82,
+ CFA_BLD_EM_FKB_TL3_SIP2_FLD = 83,
+ CFA_BLD_EM_FKB_TL3_SIP1_FLD = 84,
+ CFA_BLD_EM_FKB_TL3_SIP0_FLD = 85,
+ CFA_BLD_EM_FKB_TL3_DIP3_FLD = 86,
+ CFA_BLD_EM_FKB_TL3_DIP2_FLD = 87,
+ CFA_BLD_EM_FKB_TL3_DIP1_FLD = 88,
+ CFA_BLD_EM_FKB_TL3_DIP0_FLD = 89,
+ CFA_BLD_EM_FKB_TL3_TTL_FLD = 90,
+ CFA_BLD_EM_FKB_TL3_PROT_FLD = 91,
+ CFA_BLD_EM_FKB_TL3_FID_FLD = 92,
+ CFA_BLD_EM_FKB_TL3_QOS_FLD = 93,
+ CFA_BLD_EM_FKB_TL3_IEH_NONEXT_FLD = 94,
+ CFA_BLD_EM_FKB_TL3_IEH_SEP_FLD = 95,
+ CFA_BLD_EM_FKB_TL3_IEH_AUTH_FLD = 96,
+ CFA_BLD_EM_FKB_TL3_IEH_DEST_FLD = 97,
+ CFA_BLD_EM_FKB_TL3_IEH_FRAG_FLD = 98,
+ CFA_BLD_EM_FKB_TL3_IEH_RTHDR_FLD = 99,
+ CFA_BLD_EM_FKB_TL3_IEH_HOP_FLD = 100,
+ CFA_BLD_EM_FKB_TL3_IEH_1FRAG_FLD = 101,
+ CFA_BLD_EM_FKB_TL3_DF_FLD = 102,
+ CFA_BLD_EM_FKB_TL3_L3ERR_FLD = 103,
+ CFA_BLD_EM_FKB_TL4_TYPE_FLD = 104,
+ CFA_BLD_EM_FKB_TL4_SRC_FLD = 105,
+ CFA_BLD_EM_FKB_TL4_DST_FLD = 106,
+ CFA_BLD_EM_FKB_TL4_FLAGS_FLD = 107,
+ CFA_BLD_EM_FKB_TL4_SEQ_FLD = 108,
+ CFA_BLD_EM_FKB_TL4_PA_FLD = 109,
+ CFA_BLD_EM_FKB_TL4_OPT_FLD = 110,
+ CFA_BLD_EM_FKB_TL4_TCPTS_FLD = 111,
+ CFA_BLD_EM_FKB_TL4_ERR_FLD = 112,
+ CFA_BLD_EM_FKB_T_TYPE_FLD = 113,
+ CFA_BLD_EM_FKB_T_FLAGS_FLD = 114,
+ CFA_BLD_EM_FKB_T_IDS_FLD = 115,
+ CFA_BLD_EM_FKB_T_ID_FLD = 116,
+ CFA_BLD_EM_FKB_T_CTXTS_FLD = 117,
+ CFA_BLD_EM_FKB_T_CTXT_FLD = 118,
+ CFA_BLD_EM_FKB_T_QOS_FLD = 119,
+ CFA_BLD_EM_FKB_T_ERR_FLD = 120,
+ CFA_BLD_EM_FKB_L2_TYPE_FLD = 121,
+ CFA_BLD_EM_FKB_L2_DMAC_FLD = 122,
+ CFA_BLD_EM_FKB_L2_SMAC_FLD = 123,
+ CFA_BLD_EM_FKB_L2_DT_FLD = 124,
+ CFA_BLD_EM_FKB_L2_SA_FLD = 125,
+ CFA_BLD_EM_FKB_L2_NVT_FLD = 126,
+ CFA_BLD_EM_FKB_L2_OVP_FLD = 127,
+ CFA_BLD_EM_FKB_L2_OVD_FLD = 128,
+ CFA_BLD_EM_FKB_L2_OVV_FLD = 129,
+ CFA_BLD_EM_FKB_L2_OVT_FLD = 130,
+ CFA_BLD_EM_FKB_L2_IVP_FLD = 131,
+ CFA_BLD_EM_FKB_L2_IVD_FLD = 132,
+ CFA_BLD_EM_FKB_L2_IVV_FLD = 133,
+ CFA_BLD_EM_FKB_L2_IVT_FLD = 134,
+ CFA_BLD_EM_FKB_L2_ETYPE_FLD = 135,
+ CFA_BLD_EM_FKB_L3_TYPE_FLD = 136,
+ CFA_BLD_EM_FKB_L3_SIP3_FLD = 137,
+ CFA_BLD_EM_FKB_L3_SIP2_FLD = 138,
+ CFA_BLD_EM_FKB_L3_SIP1_FLD = 139,
+ CFA_BLD_EM_FKB_L3_SIP0_FLD = 140,
+ CFA_BLD_EM_FKB_L3_DIP3_FLD = 141,
+ CFA_BLD_EM_FKB_L3_DIP2_FLD = 142,
+ CFA_BLD_EM_FKB_L3_DIP1_FLD = 143,
+ CFA_BLD_EM_FKB_L3_DIP0_FLD = 144,
+ CFA_BLD_EM_FKB_L3_TTL_FLD = 145,
+ CFA_BLD_EM_FKB_L3_PROT_FLD = 146,
+ CFA_BLD_EM_FKB_L3_FID_FLD = 147,
+ CFA_BLD_EM_FKB_L3_QOS_FLD = 148,
+ CFA_BLD_EM_FKB_L3_IEH_NONEXT_FLD = 149,
+ CFA_BLD_EM_FKB_L3_IEH_SEP_FLD = 150,
+ CFA_BLD_EM_FKB_L3_IEH_AUTH_FLD = 151,
+ CFA_BLD_EM_FKB_L3_IEH_DEST_FLD = 152,
+ CFA_BLD_EM_FKB_L3_IEH_FRAG_FLD = 153,
+ CFA_BLD_EM_FKB_L3_IEH_RTHDR_FLD = 154,
+ CFA_BLD_EM_FKB_L3_IEH_HOP_FLD = 155,
+ CFA_BLD_EM_FKB_L3_IEH_1FRAG_FLD = 156,
+ CFA_BLD_EM_FKB_L3_DF_FLD = 157,
+ CFA_BLD_EM_FKB_L3_L3ERR_FLD = 158,
+ CFA_BLD_EM_FKB_L4_TYPE_FLD = 159,
+ CFA_BLD_EM_FKB_L4_SRC_FLD = 160,
+ CFA_BLD_EM_FKB_L4_DST_FLD = 161,
+ CFA_BLD_EM_FKB_L4_FLAGS_FLD = 162,
+ CFA_BLD_EM_FKB_L4_SEQ_FLD = 163,
+ CFA_BLD_EM_FKB_L4_ACK_FLD = 164,
+ CFA_BLD_EM_FKB_L4_WIN_FLD = 165,
+ CFA_BLD_EM_FKB_L4_PA_FLD = 166,
+ CFA_BLD_EM_FKB_L4_OPT_FLD = 167,
+ CFA_BLD_EM_FKB_L4_TCPTS_FLD = 168,
+ CFA_BLD_EM_FKB_L4_TSVAL_FLD = 169,
+ CFA_BLD_EM_FKB_L4_TXECR_FLD = 170,
+ CFA_BLD_EM_FKB_L4_ERR_FLD = 171,
+ CFA_BLD_EM_FKB_MAX_FLD
+};
+
+/**
+ * Enumeration for em key layout
+ */
+enum cfa_bld_em_key_layout_flds {
+ CFA_BLD_EM_KL_RANGE_IDX_FLD = 0,
+ CFA_BLD_EM_KL_RANGE_PROFILE_FLD = 1,
+ CFA_BLD_EM_KL_CREC_TIMER_VALUE_FLD = 2,
+ CFA_BLD_EM_KL_CREC_STATE_FLD = 3,
+ CFA_BLD_EM_KL_CREC_TCP_MSB_OPP_INIT_FLD = 4,
+ CFA_BLD_EM_KL_CREC_TCP_MSB_OPP_FLD = 5,
+ CFA_BLD_EM_KL_CREC_TCP_MSB_LOC_FLD = 6,
+ CFA_BLD_EM_KL_CREC_TCP_WIN_FLD = 7,
+ CFA_BLD_EM_KL_CREC_TCP_UPDT_EN_FLD = 8,
+ CFA_BLD_EM_KL_CREC_TCP_DIR_FLD = 9,
+ CFA_BLD_EM_KL_METADATA_FLD = 10,
+ CFA_BLD_EM_KL_PROF_FUNC_FLD = 11,
+ CFA_BLD_EM_KL_META_PROF_FLD = 12,
+ CFA_BLD_EM_KL_RECYCLE_DEST_FLD = 13,
+ CFA_BLD_EM_KL_FC_PTR_FLD = 14,
+ CFA_BLD_EM_KL_FC_TYPE_FLD = 15,
+ CFA_BLD_EM_KL_FC_OP_FLD = 16,
+ CFA_BLD_EM_KL_PATHS_M1_FLD = 17,
+ CFA_BLD_EM_KL_ACT_REC_SIZE_FLD = 18,
+ CFA_BLD_EM_KL_RING_TABLE_IDX_FLD = 19,
+ CFA_BLD_EM_KL_DESTINATION_FLD = 20,
+ CFA_BLD_EM_KL_ACT_REC_PTR_FLD = 21,
+ CFA_BLD_EM_KL_ACT_HINT_FLD = 22,
+ CFA_BLD_EM_KL_STRENGTH_FLD = 23,
+ CFA_BLD_EM_KL_OPCODE_FLD = 24,
+ CFA_BLD_EM_KL_EPOCH1_FLD = 25,
+ CFA_BLD_EM_KL_EPOCH0_FLD = 26,
+ CFA_BLD_EM_KL_REC_SIZE_FLD = 27,
+ CFA_BLD_EM_KL_VALID_FLD = 28,
+ CFA_BLD_EM_KL_PROF_ID_FLD = 29,
+ CFA_BLD_EM_KL_L2CTXT_FLD = 30,
+ CFA_BLD_EM_KL_L2FUNC_FLD = 31,
+ CFA_BLD_EM_KL_PARIF_FLD = 32,
+ CFA_BLD_EM_KL_SPIF_FLD = 33,
+ CFA_BLD_EM_KL_SVIF_FLD = 34,
+ CFA_BLD_EM_KL_LCOS_FLD = 35,
+ CFA_BLD_EM_KL_META_HI_FLD = 36,
+ CFA_BLD_EM_KL_META_LO_FLD = 37,
+ CFA_BLD_EM_KL_RCYC_CNT_FLD = 38,
+ CFA_BLD_EM_KL_LOOPBACK_FLD = 39,
+ CFA_BLD_EM_KL_OTL2_TYPE_FLD = 40,
+ CFA_BLD_EM_KL_OTL2_DMAC_FLD = 41,
+ CFA_BLD_EM_KL_OTL2_SMAC_FLD = 42,
+ CFA_BLD_EM_KL_OTL2_DT_FLD = 43,
+ CFA_BLD_EM_KL_OTL2_SA_FLD = 44,
+ CFA_BLD_EM_KL_OTL2_NVT_FLD = 45,
+ CFA_BLD_EM_KL_OTL2_OVP_FLD = 46,
+ CFA_BLD_EM_KL_OTL2_OVD_FLD = 47,
+ CFA_BLD_EM_KL_OTL2_OVV_FLD = 48,
+ CFA_BLD_EM_KL_OTL2_OVT_FLD = 49,
+ CFA_BLD_EM_KL_OTL2_IVP_FLD = 50,
+ CFA_BLD_EM_KL_OTL2_IVD_FLD = 51,
+ CFA_BLD_EM_KL_OTL2_IVV_FLD = 52,
+ CFA_BLD_EM_KL_OTL2_IVT_FLD = 53,
+ CFA_BLD_EM_KL_OTL2_ETYPE_FLD = 54,
+ CFA_BLD_EM_KL_OTL3_TYPE_FLD = 55,
+ CFA_BLD_EM_KL_OTL3_SIP3_FLD = 56,
+ CFA_BLD_EM_KL_OTL3_SIP2_FLD = 57,
+ CFA_BLD_EM_KL_OTL3_SIP1_FLD = 58,
+ CFA_BLD_EM_KL_OTL3_SIP0_FLD = 59,
+ CFA_BLD_EM_KL_OTL3_DIP3_FLD = 60,
+ CFA_BLD_EM_KL_OTL3_DIP2_FLD = 61,
+ CFA_BLD_EM_KL_OTL3_DIP1_FLD = 62,
+ CFA_BLD_EM_KL_OTL3_DIP0_FLD = 63,
+ CFA_BLD_EM_KL_OTL3_TTL_FLD = 64,
+ CFA_BLD_EM_KL_OTL3_PROT_FLD = 65,
+ CFA_BLD_EM_KL_OTL3_FID_FLD = 66,
+ CFA_BLD_EM_KL_OTL3_QOS_FLD = 67,
+ CFA_BLD_EM_KL_OTL3_IEH_NONEXT_FLD = 68,
+ CFA_BLD_EM_KL_OTL3_IEH_SEP_FLD = 69,
+ CFA_BLD_EM_KL_OTL3_IEH_AUTH_FLD = 70,
+ CFA_BLD_EM_KL_OTL3_IEH_DEST_FLD = 71,
+ CFA_BLD_EM_KL_OTL3_IEH_FRAG_FLD = 72,
+ CFA_BLD_EM_KL_OTL3_IEH_RTHDR_FLD = 73,
+ CFA_BLD_EM_KL_OTL3_IEH_HOP_FLD = 74,
+ CFA_BLD_EM_KL_OTL3_IEH_1FRAG_FLD = 75,
+ CFA_BLD_EM_KL_OTL3_DF_FLD = 76,
+ CFA_BLD_EM_KL_OTL3_L3ERR_FLD = 77,
+ CFA_BLD_EM_KL_OTL4_TYPE_FLD = 78,
+ CFA_BLD_EM_KL_OTL4_SRC_FLD = 79,
+ CFA_BLD_EM_KL_OTL4_DST_FLD = 80,
+ CFA_BLD_EM_KL_OTL4_FLAGS_FLD = 81,
+ CFA_BLD_EM_KL_OTL4_SEQ_FLD = 82,
+ CFA_BLD_EM_KL_OTL4_PA_FLD = 83,
+ CFA_BLD_EM_KL_OTL4_OPT_FLD = 84,
+ CFA_BLD_EM_KL_OTL4_TCPTS_FLD = 85,
+ CFA_BLD_EM_KL_OTL4_ERR_FLD = 86,
+ CFA_BLD_EM_KL_OT_TYPE_FLD = 87,
+ CFA_BLD_EM_KL_OT_FLAGS_FLD = 88,
+ CFA_BLD_EM_KL_OT_IDS_FLD = 89,
+ CFA_BLD_EM_KL_OT_ID_FLD = 90,
+ CFA_BLD_EM_KL_OT_CTXTS_FLD = 91,
+ CFA_BLD_EM_KL_OT_CTXT_FLD = 92,
+ CFA_BLD_EM_KL_OT_QOS_FLD = 93,
+ CFA_BLD_EM_KL_OT_ERR_FLD = 94,
+ CFA_BLD_EM_KL_TL2_TYPE_FLD = 95,
+ CFA_BLD_EM_KL_TL2_DMAC_FLD = 96,
+ CFA_BLD_EM_KL_TL2_SMAC_FLD = 97,
+ CFA_BLD_EM_KL_TL2_DT_FLD = 98,
+ CFA_BLD_EM_KL_TL2_SA_FLD = 99,
+ CFA_BLD_EM_KL_TL2_NVT_FLD = 100,
+ CFA_BLD_EM_KL_TL2_OVP_FLD = 101,
+ CFA_BLD_EM_KL_TL2_OVD_FLD = 102,
+ CFA_BLD_EM_KL_TL2_OVV_FLD = 103,
+ CFA_BLD_EM_KL_TL2_OVT_FLD = 104,
+ CFA_BLD_EM_KL_TL2_IVP_FLD = 105,
+ CFA_BLD_EM_KL_TL2_IVD_FLD = 106,
+ CFA_BLD_EM_KL_TL2_IVV_FLD = 107,
+ CFA_BLD_EM_KL_TL2_IVT_FLD = 108,
+ CFA_BLD_EM_KL_TL2_ETYPE_FLD = 109,
+ CFA_BLD_EM_KL_TL3_TYPE_FLD = 110,
+ CFA_BLD_EM_KL_TL3_SIP3_FLD = 111,
+ CFA_BLD_EM_KL_TL3_SIP2_FLD = 112,
+ CFA_BLD_EM_KL_TL3_SIP1_FLD = 113,
+ CFA_BLD_EM_KL_TL3_SIP0_FLD = 114,
+ CFA_BLD_EM_KL_TL3_DIP3_FLD = 115,
+ CFA_BLD_EM_KL_TL3_DIP2_FLD = 116,
+ CFA_BLD_EM_KL_TL3_DIP1_FLD = 117,
+ CFA_BLD_EM_KL_TL3_DIP0_FLD = 118,
+ CFA_BLD_EM_KL_TL3_TTL_FLD = 119,
+ CFA_BLD_EM_KL_TL3_PROT_FLD = 120,
+ CFA_BLD_EM_KL_TL3_FID_FLD = 121,
+ CFA_BLD_EM_KL_TL3_QOS_FLD = 122,
+ CFA_BLD_EM_KL_TL3_IEH_NONEXT_FLD = 123,
+ CFA_BLD_EM_KL_TL3_IEH_SEP_FLD = 124,
+ CFA_BLD_EM_KL_TL3_IEH_AUTH_FLD = 125,
+ CFA_BLD_EM_KL_TL3_IEH_DEST_FLD = 126,
+ CFA_BLD_EM_KL_TL3_IEH_FRAG_FLD = 127,
+ CFA_BLD_EM_KL_TL3_IEH_RTHDR_FLD = 128,
+ CFA_BLD_EM_KL_TL3_IEH_HOP_FLD = 129,
+ CFA_BLD_EM_KL_TL3_IEH_1FRAG_FLD = 130,
+ CFA_BLD_EM_KL_TL3_DF_FLD = 131,
+ CFA_BLD_EM_KL_TL3_L3ERR_FLD = 132,
+ CFA_BLD_EM_KL_TL4_TYPE_FLD = 133,
+ CFA_BLD_EM_KL_TL4_SRC_FLD = 134,
+ CFA_BLD_EM_KL_TL4_DST_FLD = 135,
+ CFA_BLD_EM_KL_TL4_FLAGS_FLD = 136,
+ CFA_BLD_EM_KL_TL4_SEQ_FLD = 137,
+ CFA_BLD_EM_KL_TL4_PA_FLD = 138,
+ CFA_BLD_EM_KL_TL4_OPT_FLD = 139,
+ CFA_BLD_EM_KL_TL4_TCPTS_FLD = 140,
+ CFA_BLD_EM_KL_TL4_ERR_FLD = 141,
+ CFA_BLD_EM_KL_T_TYPE_FLD = 142,
+ CFA_BLD_EM_KL_T_FLAGS_FLD = 143,
+ CFA_BLD_EM_KL_T_IDS_FLD = 144,
+ CFA_BLD_EM_KL_T_ID_FLD = 145,
+ CFA_BLD_EM_KL_T_CTXTS_FLD = 146,
+ CFA_BLD_EM_KL_T_CTXT_FLD = 147,
+ CFA_BLD_EM_KL_T_QOS_FLD = 148,
+ CFA_BLD_EM_KL_T_ERR_FLD = 149,
+ CFA_BLD_EM_KL_L2_TYPE_FLD = 150,
+ CFA_BLD_EM_KL_L2_DMAC_FLD = 151,
+ CFA_BLD_EM_KL_L2_SMAC_FLD = 152,
+ CFA_BLD_EM_KL_L2_DT_FLD = 153,
+ CFA_BLD_EM_KL_L2_SA_FLD = 154,
+ CFA_BLD_EM_KL_L2_NVT_FLD = 155,
+ CFA_BLD_EM_KL_L2_OVP_FLD = 156,
+ CFA_BLD_EM_KL_L2_OVD_FLD = 157,
+ CFA_BLD_EM_KL_L2_OVV_FLD = 158,
+ CFA_BLD_EM_KL_L2_OVT_FLD = 159,
+ CFA_BLD_EM_KL_L2_IVP_FLD = 160,
+ CFA_BLD_EM_KL_L2_IVD_FLD = 161,
+ CFA_BLD_EM_KL_L2_IVV_FLD = 162,
+ CFA_BLD_EM_KL_L2_IVT_FLD = 163,
+ CFA_BLD_EM_KL_L2_ETYPE_FLD = 164,
+ CFA_BLD_EM_KL_L3_TYPE_FLD = 165,
+ CFA_BLD_EM_KL_L3_SIP3_FLD = 166,
+ CFA_BLD_EM_KL_L3_SIP2_FLD = 167,
+ CFA_BLD_EM_KL_L3_SIP1_FLD = 168,
+ CFA_BLD_EM_KL_L3_SIP0_FLD = 169,
+ CFA_BLD_EM_KL_L3_DIP3_FLD = 170,
+ CFA_BLD_EM_KL_L3_DIP2_FLD = 171,
+ CFA_BLD_EM_KL_L3_DIP1_FLD = 172,
+ CFA_BLD_EM_KL_L3_DIP0_FLD = 173,
+ CFA_BLD_EM_KL_L3_TTL_FLD = 174,
+ CFA_BLD_EM_KL_L3_PROT_FLD = 175,
+ CFA_BLD_EM_KL_L3_FID_FLD = 176,
+ CFA_BLD_EM_KL_L3_QOS_FLD = 177,
+ CFA_BLD_EM_KL_L3_IEH_NONEXT_FLD = 178,
+ CFA_BLD_EM_KL_L3_IEH_SEP_FLD = 179,
+ CFA_BLD_EM_KL_L3_IEH_AUTH_FLD = 180,
+ CFA_BLD_EM_KL_L3_IEH_DEST_FLD = 181,
+ CFA_BLD_EM_KL_L3_IEH_FRAG_FLD = 182,
+ CFA_BLD_EM_KL_L3_IEH_RTHDR_FLD = 183,
+ CFA_BLD_EM_KL_L3_IEH_HOP_FLD = 184,
+ CFA_BLD_EM_KL_L3_IEH_1FRAG_FLD = 185,
+ CFA_BLD_EM_KL_L3_DF_FLD = 186,
+ CFA_BLD_EM_KL_L3_L3ERR_FLD = 187,
+ CFA_BLD_EM_KL_L4_TYPE_FLD = 188,
+ CFA_BLD_EM_KL_L4_SRC_FLD = 189,
+ CFA_BLD_EM_KL_L4_DST_FLD = 190,
+ CFA_BLD_EM_KL_L4_FLAGS_FLD = 191,
+ CFA_BLD_EM_KL_L4_SEQ_FLD = 192,
+ CFA_BLD_EM_KL_L4_ACK_FLD = 193,
+ CFA_BLD_EM_KL_L4_WIN_FLD = 194,
+ CFA_BLD_EM_KL_L4_PA_FLD = 195,
+ CFA_BLD_EM_KL_L4_OPT_FLD = 196,
+ CFA_BLD_EM_KL_L4_TCPTS_FLD = 197,
+ CFA_BLD_EM_KL_L4_TSVAL_FLD = 198,
+ CFA_BLD_EM_KL_L4_TXECR_FLD = 199,
+ CFA_BLD_EM_KL_L4_ERR_FLD = 200,
+ CFA_BLD_EM_KEY_LAYOUT_MAX_FLD = 201,
+};
+
+/**
+ * Enumeration for action
+ */
+enum cfa_bld_action_flds {
+ CFA_BLD_ACT_TYPE_FLD = 0,
+ CFA_BLD_ACT_DROP_FLD = 1,
+ CFA_BLD_ACT_VLAN_DELETE_FLD = 2,
+ CFA_BLD_ACT_DEST_FLD = 3,
+ CFA_BLD_ACT_DEST_OP_FLD = 4,
+ CFA_BLD_ACT_DECAP_FLD = 5,
+ CFA_BLD_ACT_MIRRORING_FLD = 6,
+ CFA_BLD_ACT_METER_PTR_FLD = 7,
+ CFA_BLD_ACT_STAT0_OFF_FLD = 8,
+ CFA_BLD_ACT_STAT0_OP_FLD = 9,
+ CFA_BLD_ACT_STAT0_CTR_TYPE_FLD = 10,
+ CFA_BLD_ACT_MOD_OFF_FLD = 11,
+ CFA_BLD_ACT_ENC_OFF_FLD = 12,
+ CFA_BLD_ACT_SRC_OFF_FLD = 13,
+ CFA_BLD_ACT_COMPACT_RSVD_0_FLD = 14,
+ CFA_BLD_ACT_STAT0_PTR_FLD = 15,
+ CFA_BLD_ACT_STAT1_PTR_FLD = 16,
+ CFA_BLD_ACT_STAT1_OP_FLD = 17,
+ CFA_BLD_ACT_STAT1_CTR_TYPE_FLD = 18,
+ CFA_BLD_ACT_MOD_PTR_FLD = 19,
+ CFA_BLD_ACT_ENC_PTR_FLD = 20,
+ CFA_BLD_ACT_SRC_PTR_FLD = 21,
+ CFA_BLD_ACT_FULL_RSVD_0_FLD = 22,
+ CFA_BLD_ACT_SRC_KO_EN_FLD = 23,
+ CFA_BLD_ACT_MCG_RSVD_0_FLD = 24,
+ CFA_BLD_ACT_NEXT_PTR_FLD = 25,
+ CFA_BLD_ACT_PTR0_ACT_HINT_FLD = 26,
+ CFA_BLD_ACT_PTR0_ACT_REC_PTR_FLD = 27,
+ CFA_BLD_ACT_PTR1_ACT_HINT_FLD = 28,
+ CFA_BLD_ACT_PTR1_ACT_REC_PTR_FLD = 29,
+ CFA_BLD_ACT_PTR2_ACT_HINT_FLD = 30,
+ CFA_BLD_ACT_PTR2_ACT_REC_PTR_FLD = 31,
+ CFA_BLD_ACT_PTR3_ACT_HINT_FLD = 32,
+ CFA_BLD_ACT_PTR3_ACT_REC_PTR_FLD = 33,
+ CFA_BLD_ACT_PTR4_ACT_HINT_FLD = 34,
+ CFA_BLD_ACT_PTR4_ACT_REC_PTR_FLD = 35,
+ CFA_BLD_ACT_PTR5_ACT_HINT_FLD = 36,
+ CFA_BLD_ACT_PTR5_ACT_REC_PTR_FLD = 37,
+ CFA_BLD_ACT_PTR6_ACT_HINT_FLD = 38,
+ CFA_BLD_ACT_PTR6_ACT_REC_PTR_FLD = 39,
+ CFA_BLD_ACT_PTR7_ACT_HINT_FLD = 40,
+ CFA_BLD_ACT_PTR7_ACT_REC_PTR_FLD = 41,
+ CFA_BLD_ACT_MCG_SUBSEQ_RSVD_0_FLD = 42,
+ CFA_BLD_ACT_MOD_MODIFY_ACT_HDR_FLD = 43,
+ CFA_BLD_ACT_MOD_MD_UPDT_DATA_FLD = 44,
+ CFA_BLD_ACT_MOD_MD_UPDT_PROF_FLD = 45,
+ CFA_BLD_ACT_MOD_MD_UPDT_OP_FLD = 46,
+ CFA_BLD_ACT_MOD_MD_UPDT_RSVD_0_FLD = 47,
+ CFA_BLD_ACT_MOD_MD_UPDT_TOP_FLD = 48,
+ CFA_BLD_ACT_MOD_RM_OVLAN_FLD = 49,
+ CFA_BLD_ACT_MOD_RM_IVLAN_FLD = 50,
+ CFA_BLD_ACT_MOD_RPL_IVLAN_FLD = 51,
+ CFA_BLD_ACT_MOD_RPL_OVLAN_FLD = 52,
+ CFA_BLD_ACT_MOD_TTL_UPDT_OP_FLD = 53,
+ CFA_BLD_ACT_MOD_TTL_UPDT_ALT_VID_FLD = 54,
+ CFA_BLD_ACT_MOD_TTL_UPDT_ALT_PFID_FLD = 55,
+ CFA_BLD_ACT_MOD_TTL_UPDT_TOP_FLD = 56,
+ CFA_BLD_ACT_MOD_TNL_MODIFY_DEL_FLD = 57,
+ CFA_BLD_ACT_MOD_TNL_MODIFY_8B_NEW_PROT_FLD = 58,
+ CFA_BLD_ACT_MOD_TNL_MODIFY_8B_EXIST_PROT_FLD = 59,
+ CFA_BLD_ACT_MOD_TNL_MODIFY_8B_VEC_FLD = 60,
+ CFA_BLD_ACT_MOD_TNL_MODIFY_8B_TOP_FLD = 61,
+ CFA_BLD_ACT_MOD_TNL_MODIFY_16B_NEW_PROT_FLD = 62,
+ CFA_BLD_ACT_MOD_TNL_MODIFY_16B_EXIST_PROT_FLD = 63,
+ CFA_BLD_ACT_MOD_TNL_MODIFY_16B_VEC_FLD = 64,
+ CFA_BLD_ACT_MOD_TNL_MODIFY_16B_TOP_FLD = 65,
+ CFA_BLD_ACT_MOD_UPDT_FIELD_DATA0_FLD = 66,
+ CFA_BLD_ACT_MOD_UPDT_FIELD_VEC_RSVD_FLD = 67,
+ CFA_BLD_ACT_MOD_UPDT_FIELD_VEC_KID_FLD = 68,
+ CFA_BLD_ACT_MOD_UPDT_FIELD_TOP_FLD = 69,
+ CFA_BLD_ACT_MOD_SMAC_FLD = 70,
+ CFA_BLD_ACT_MOD_DMAC_FLD = 71,
+ CFA_BLD_ACT_MOD_SIPV6_FLD = 72,
+ CFA_BLD_ACT_MOD_DIPV6_FLD = 73,
+ CFA_BLD_ACT_MOD_SIPV4_FLD = 74,
+ CFA_BLD_ACT_MOD_DIPV4_FLD = 75,
+ CFA_BLD_ACT_MOD_SPORT_FLD = 76,
+ CFA_BLD_ACT_MOD_DPORT_FLD = 77,
+ CFA_BLD_ACT_ENC_ECV_TNL_FLD = 78,
+ CFA_BLD_ACT_ENC_ECV_L4_FLD = 79,
+ CFA_BLD_ACT_ENC_ECV_L3_FLD = 80,
+ CFA_BLD_ACT_ENC_ECV_L2_FLD = 81,
+ CFA_BLD_ACT_ENC_ECV_VTAG_FLD = 82,
+ CFA_BLD_ACT_ENC_ECV_EC_FLD = 83,
+ CFA_BLD_ACT_ENC_ECV_VALID_FLD = 84,
+ CFA_BLD_ACT_ENC_EC_IP_TTL_IH_FLD = 85,
+ CFA_BLD_ACT_ENC_EC_IP_TOS_IH_FLD = 86,
+ CFA_BLD_ACT_ENC_EC_TUN_QOS_FLD = 87,
+ CFA_BLD_ACT_ENC_EC_GRE_SET_K_FLD = 88,
+ CFA_BLD_ACT_ENC_EC_DMAC_OVR_FLD = 89,
+ CFA_BLD_ACT_ENC_EC_VLAN_OVR_FLD = 90,
+ CFA_BLD_ACT_ENC_EC_SMAC_OVR_FLD = 91,
+ CFA_BLD_ACT_ENC_EC_IPV4_ID_CTRL_FLD = 92,
+ CFA_BLD_ACT_ENC_L2_DMAC_FLD = 93,
+ CFA_BLD_ACT_ENC_VLAN1_TAG_VID_FLD = 94,
+ CFA_BLD_ACT_ENC_VLAN1_TAG_DE_FLD = 95,
+ CFA_BLD_ACT_ENC_VLAN1_TAG_PRI_FLD = 96,
+ CFA_BLD_ACT_ENC_VLAN1_TAG_TPID_FLD = 97,
+ CFA_BLD_ACT_ENC_VLAN2_IT_VID_FLD = 98,
+ CFA_BLD_ACT_ENC_VLAN2_IT_DE_FLD = 99,
+ CFA_BLD_ACT_ENC_VLAN2_IT_PRI_FLD = 100,
+ CFA_BLD_ACT_ENC_VLAN2_IT_TPID_FLD = 101,
+ CFA_BLD_ACT_ENC_VLAN2_OT_VID_FLD = 102,
+ CFA_BLD_ACT_ENC_VLAN2_OT_DE_FLD = 103,
+ CFA_BLD_ACT_ENC_VLAN2_OT_PRI_FLD = 104,
+ CFA_BLD_ACT_ENC_VLAN2_OT_TPID_FLD = 105,
+ CFA_BLD_ACT_ENC_IPV4_ID_FLD = 106,
+ CFA_BLD_ACT_ENC_IPV4_TOS_FLD = 107,
+ CFA_BLD_ACT_ENC_IPV4_HLEN_FLD = 108,
+ CFA_BLD_ACT_ENC_IPV4_VER_FLD = 109,
+ CFA_BLD_ACT_ENC_IPV4_PROT_FLD = 110,
+ CFA_BLD_ACT_ENC_IPV4_TTL_FLD = 111,
+ CFA_BLD_ACT_ENC_IPV4_FRAG_FLD = 112,
+ CFA_BLD_ACT_ENC_IPV4_FLAGS_FLD = 113,
+ CFA_BLD_ACT_ENC_IPV4_DEST_FLD = 114,
+ CFA_BLD_ACT_ENC_IPV6_FLOW_LABEL_FLD = 115,
+ CFA_BLD_ACT_ENC_IPV6_TRAFFIC_CLASS_FLD = 116,
+ CFA_BLD_ACT_ENC_IPV6_VER_FLD = 117,
+ CFA_BLD_ACT_ENC_IPV6_HOP_LIMIT_FLD = 118,
+ CFA_BLD_ACT_ENC_IPV6_NEXT_HEADER_FLD = 119,
+ CFA_BLD_ACT_ENC_IPV6_PAYLOAD_LENGTH_FLD = 120,
+ CFA_BLD_ACT_ENC_IPV6_DEST_FLD = 121,
+ CFA_BLD_ACT_ENC_MPLS_TAG1_FLD = 122,
+ CFA_BLD_ACT_ENC_MPLS_TAG2_FLD = 123,
+ CFA_BLD_ACT_ENC_MPLS_TAG3_FLD = 124,
+ CFA_BLD_ACT_ENC_MPLS_TAG4_FLD = 125,
+ CFA_BLD_ACT_ENC_MPLS_TAG5_FLD = 126,
+ CFA_BLD_ACT_ENC_MPLS_TAG6_FLD = 127,
+ CFA_BLD_ACT_ENC_MPLS_TAG7_FLD = 128,
+ CFA_BLD_ACT_ENC_MPLS_TAG8_FLD = 129,
+ CFA_BLD_ACT_ENC_L4_DEST_PORT_FLD = 130,
+ CFA_BLD_ACT_ENC_L4_SRC_PORT_FLD = 131,
+ CFA_BLD_ACT_ENC_TNL_VXLAN_NEXT_PROT_FLD = 132,
+ CFA_BLD_ACT_ENC_TNL_VXLAN_RSVD_0_FLD = 133,
+ CFA_BLD_ACT_ENC_TNL_VXLAN_FLAGS_FLD = 134,
+ CFA_BLD_ACT_ENC_TNL_VXLAN_RSVD_1_FLD = 135,
+ CFA_BLD_ACT_ENC_TNL_VXLAN_VNI_FLD = 136,
+ CFA_BLD_ACT_ENC_TNL_NGE_PROT_TYPE_FLD = 137,
+ CFA_BLD_ACT_ENC_TNL_NGE_RSVD_0_FLD = 138,
+ CFA_BLD_ACT_ENC_TNL_NGE_FLAGS_C_FLD = 139,
+ CFA_BLD_ACT_ENC_TNL_NGE_FLAGS_O_FLD = 140,
+ CFA_BLD_ACT_ENC_TNL_NGE_FLAGS_OPT_LEN_FLD = 141,
+ CFA_BLD_ACT_ENC_TNL_NGE_FLAGS_VER_FLD = 142,
+ CFA_BLD_ACT_ENC_TNL_NGE_RSVD_1_FLD = 143,
+ CFA_BLD_ACT_ENC_TNL_NGE_VNI_FLD = 144,
+ CFA_BLD_ACT_ENC_TNL_NGE_OPTIONS_FLD = 145,
+ CFA_BLD_ACT_ENC_TNL_NVGRE_FLOW_ID_FLD = 146,
+ CFA_BLD_ACT_ENC_TNL_NVGRE_VSID_FLD = 147,
+ CFA_BLD_ACT_ENC_TNL_GRE_KEY_FLD = 148,
+ CFA_BLD_ACT_ENC_TNL_GENERIC_TID_FLD = 149,
+ CFA_BLD_ACT_ENC_TNL_GENERIC_LENGTH_FLD = 150,
+ CFA_BLD_ACT_ENC_TNL_GENERIC_HEADER_FLD = 151,
+ CFA_BLD_ACT_ENC_SPDNIC_SIZE_FLD = 152,
+ CFA_BLD_ACT_ENC_SPDNIC_TID_FLD = 153,
+ CFA_BLD_ACT_ENC_SPDNIC_FLAGS_FLD = 154,
+ CFA_BLD_ACT_ENC_SPDNIC_RSVD_FLD = 155,
+ CFA_BLD_ACT_SRC_MAC_FLD = 156,
+ CFA_BLD_ACT_SRC_IPV4_ADDR_FLD = 157,
+ CFA_BLD_ACT_SRC_IPV6_ADDR_FLD = 158,
+ CFA_BLD_ACT_STAT0_B16_FPC_FLD = 159,
+ CFA_BLD_ACT_STAT1_B16_FPC_FLD = 160,
+ CFA_BLD_ACT_STAT0_B16_FBC_FLD = 161,
+ CFA_BLD_ACT_STAT1_B16_FBC_FLD = 162,
+ CFA_BLD_ACT_STAT0_B24_FPC_FLD = 163,
+ CFA_BLD_ACT_STAT1_B24_FPC_FLD = 164,
+ CFA_BLD_ACT_STAT0_B24_FBC_FLD = 165,
+ CFA_BLD_ACT_STAT1_B24_FBC_FLD = 166,
+ CFA_BLD_ACT_STAT0_B24_TIMESTAMP_FLD = 167,
+ CFA_BLD_ACT_STAT1_B24_TIMESTAMP_FLD = 168,
+ CFA_BLD_ACT_STAT0_B24_TCP_FLAGS_FLD = 169,
+ CFA_BLD_ACT_STAT1_B24_TCP_FLAGS_FLD = 170,
+ CFA_BLD_ACT_STAT0_B24_UNUSED_0_FLD = 171,
+ CFA_BLD_ACT_STAT1_B24_UNUSED_0_FLD = 172,
+ CFA_BLD_ACT_STAT0_B32A_FPC_FLD = 173,
+ CFA_BLD_ACT_STAT1_B32A_FPC_FLD = 174,
+ CFA_BLD_ACT_STAT0_B32A_FBC_FLD = 175,
+ CFA_BLD_ACT_STAT1_B32A_FBC_FLD = 176,
+ CFA_BLD_ACT_STAT0_B32A_MPC_FLD = 177,
+ CFA_BLD_ACT_STAT1_B32A_MPC_FLD = 178,
+ CFA_BLD_ACT_STAT0_B32A_MBC_FLD = 179,
+ CFA_BLD_ACT_STAT1_B32A_MBC_FLD = 180,
+ CFA_BLD_ACT_STAT0_B32B_FPC_FLD = 181,
+ CFA_BLD_ACT_STAT1_B32B_FPC_FLD = 182,
+ CFA_BLD_ACT_STAT0_B32B_FBC_FLD = 183,
+ CFA_BLD_ACT_STAT1_B32B_FBC_FLD = 184,
+ CFA_BLD_ACT_STAT0_B32B_TIMESTAMP_FLD = 185,
+ CFA_BLD_ACT_STAT1_B32B_TIMESTAMP_FLD = 186,
+ CFA_BLD_ACT_STAT0_B32B_TCP_FLAGS_FLD = 187,
+ CFA_BLD_ACT_STAT1_B32B_TCP_FLAGS_FLD = 188,
+ CFA_BLD_ACT_STAT0_B32B_UNUSED_0_FLD = 189,
+ CFA_BLD_ACT_STAT1_B32B_UNUSED_0_FLD = 190,
+ CFA_BLD_ACT_STAT0_B32B_MPC15_0_FLD = 191,
+ CFA_BLD_ACT_STAT1_B32B_MPC15_0_FLD = 192,
+ CFA_BLD_ACT_STAT0_B32B_MPC37_16_FLD = 193,
+ CFA_BLD_ACT_STAT1_B32B_MPC37_16_FLD = 194,
+ CFA_BLD_ACT_STAT0_B32B_MBC_FLD = 195,
+ CFA_BLD_ACT_STAT1_B32B_MBC_FLD = 196,
+ CFA_BLD_ACTION_MAX_FLD = 197,
+};
+
+#endif /* _CFA_BLD_FIELD_IDS_H_ */
new file mode 100644
@@ -0,0 +1,1286 @@
+/****************************************************************************
+ * Copyright(c) 2001-2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * Name: cfa_bld_mpc_field_ids.h
+ *
+ * Description: Enumeration definitions for the MPC command/response fields
+ * across multiple hw versions of CFA.
+ *
+ * This file is independent of the CFA HW version and defines the
+ * superset of the enumeration values for MPC command/response
+ * structure fields. This file is meant for use by host applications
+ * that support multiple devices with different CFA Hw versions.
+ *
+ * These enum definitions should be updated whenever any of the
+ * definitions in the auto-generated header 'cfa_bld_pxx_field_ids.h'
+ * file gets any new enum values.
+ *
+ ****************************************************************************/
+#ifndef _CFA_BLD_MPC_FIELD_IDS_H_
+#define _CFA_BLD_MPC_FIELD_IDS_H_
+
+/**
+ * CFA Hardware Cache Table Type
+ */
+enum cfa_bld_mpc_hw_table_type {
+ CFA_BLD_MPC_HW_TABLE_TYPE_ACTION, /**< CFA Action Record Table */
+ CFA_BLD_MPC_HW_TABLE_TYPE_LOOKUP, /**< CFA EM Lookup Record Table */
+ CFA_BLD_MPC_HW_TABLE_TYPE_MAX
+};
+
+/*
+ * CFA MPC Cache access reading mode
+ * To be used as a value for CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD
+ */
+enum cfa_bld_mpc_read_mode {
+ CFA_BLD_MPC_RD_NORMAL, /**< Normal read mode */
+ CFA_BLD_MPC_RD_EVICT, /**< Read the cache and evict the cache line */
+ CFA_BLD_MPC_RD_DEBUG_LINE, /**< Debug read line mode */
+ CFA_BLD_MPC_RD_DEBUG_TAG, /**< Debug read tag mode */
+ CFA_BLD_MPC_RD_MODE_MAX
+};
+
+/**
+ * CFA MPC Cache access writing mode
+ * To be used as a value for CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD
+ */
+enum cfa_bld_mpc_write_mode {
+ CFA_BLD_MPC_WR_WRITE_THRU, /**< Write to cache in Write through mode */
+ CFA_BLD_MPC_WR_WRITE_BACK, /**< Write to cache in Write back mode */
+ CFA_BLD_MPC_WR_MODE_MAX
+};
+
+/**
+ * CFA MPC Cache access eviction mode
+ * To be used as a value for CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD
+ */
+enum cfa_bld_mpc_evict_mode {
+ /**
+ * Line evict: These modes evict a single cache line
+ * In these modes, the eviction occurs regardless of the cache line
+ * state (CLEAN/CLEAN_FAST_EVICT/DIRTY)
+ */
+ /* Cache line addressed by set/way is evicted */
+ CFA_BLD_MPC_EV_EVICT_LINE,
+ /* Cache line hit with the table scope/address tuple is evicted */
+ CFA_BLD_MPC_EV_EVICT_SCOPE_ADDRESS,
+
+ /**
+ * Set Evict: These modes evict cache lines that meet certain criteria
+ * from the entire cache set.
+ */
+ /*
+ * Cache lines only in CLEAN state are evicted from the set
+ * derived from the address
+ */
+ CFA_BLD_MPC_EV_EVICT_CLEAN_LINES,
+ /*
+ * Cache lines only in CLEAN_FAST_EVICT state are evicted from
+ * the set derived from the address
+ */
+ CFA_BLD_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES,
+ /*
+ * Cache lines in both CLEAN and CLEAN_FAST_EVICT states are
+ * evicted from the set derived from the address
+ */
+ CFA_BLD_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES,
+ /*
+ * All Cache lines in the set identified by the address and
+ * belonging to the table scope are evicted.
+ */
+ CFA_BLD_MPC_EV_EVICT_TABLE_SCOPE,
+ CFA_BLD_MPC_EV_MODE_MAX,
+};
+
+/**
+ * MPC CFA Command completion status
+ */
+enum cfa_bld_mpc_cmpl_status {
+ /* Command success */
+ CFA_BLD_MPC_OK,
+ /* Unsupported CFA opcode */
+ CFA_BLD_MPC_UNSPRT_ERR,
+ /* CFA command format error */
+ CFA_BLD_MPC_FMT_ERR,
+ /* SVIF-Table Scope error */
+ CFA_BLD_MPC_SCOPE_ERR,
+ /* Address error: Only used if EM command or TABLE_TYPE=EM */
+ CFA_BLD_MPC_ADDR_ERR,
+ /* Cache operation error */
+ CFA_BLD_MPC_CACHE_ERR,
+ /* EM_SEARCH or EM_DELETE did not find a matching EM entry */
+ CFA_BLD_MPC_EM_MISS,
+ /* EM_INSERT found a matching EM entry and REPLACE=0 in the command */
+ CFA_BLD_MPC_EM_DUPLICATE,
+ /* EM_EVENT_COLLECTION_FAIL no events to return */
+ CFA_BLD_MPC_EM_EVENT_COLLECTION_FAIL,
+ /*
+ * EM_INSERT required a dynamic bucket to be added to the chain
+ * to successfully insert the EM entry, but the entry provided
+ * for use as dynamic bucket was invalid. (bucket_idx == 0)
+ */
+ CFA_BLD_MPC_EM_ABORT,
+};
+
+/**
+ * Field IDS for READ_CMD: This command reads 1-4 consecutive 32B words
+ * from the specified address within a table scope.
+ */
+enum cfa_bld_mpc_read_cmd_fields {
+ CFA_BLD_MPC_READ_CMD_OPAQUE_FLD = 0,
+ /* This value selects the table type to be acted upon. */
+ CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD = 1,
+ /* Table scope to access. */
+ CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD = 3,
+ /*
+ * Test field for CFA MPC builder validation, added to introduce
+ * a hold in the field mapping array
+ */
+ CFA_BLD_MPC_READ_CMD_RANDOM_TEST_FLD = 4,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD = 5,
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD = 6,
+ /*
+ * The 64-bit host address to which to write the DMA data returned in
+ * the completion. The data will be written to the same function as the
+ * one that owns the SQ this command is read from. DATA_SIZE determines
+ * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0,
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD = 7,
+ CFA_BLD_MPC_READ_CMD_MAX_FLD = 8,
+};
+
+/**
+ * Field IDS for WRITE_CMD: This command writes 1-4 consecutive 32B
+ * words to the specified address within a table scope.
+ */
+enum cfa_bld_mpc_write_cmd_fields {
+ CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD = 0,
+ /* This value selects the table type to be acted upon. */
+ CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD = 1,
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ CFA_BLD_MPC_WRITE_CMD_WRITE_THROUGH_FLD = 2,
+ /* Table scope to access. */
+ CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD = 3,
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD = 4,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD = 5,
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD = 6,
+ CFA_BLD_MPC_WRITE_CMD_MAX_FLD = 7,
+};
+
+/**
+ * Field IDS for READ_CLR_CMD: This command performs a read-modify-write
+ * to the specified 32B address using a 16b mask that specifies up to 16
+ * 16b words to clear before writing the data back. It returns the 32B
+ * data word read from cache (not the value written after the clear
+ * operation).
+ */
+enum cfa_bld_mpc_read_clr_cmd_fields {
+ CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD = 0,
+ /* This value selects the table type to be acted upon. */
+ CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD = 1,
+ /* Table scope to access. */
+ CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * This field is no longer used. The READ_CLR command always reads (and
+ * does a mask-clear) on a single cache line. This field was added for
+ * SR2 A0 to avoid an ADDR_ERR when TABLE_INDEX=0 and TABLE_TYPE=EM (see
+ * CUMULUS-17872). That issue was fixed in SR2 B0.
+ */
+ CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD = 3,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD = 4,
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD = 5,
+ /*
+ * The 64-bit host address to which to write the DMA data returned in
+ * the completion. The data will be written to the same function as the
+ * one that owns the SQ this command is read from. DATA_SIZE determines
+ * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0,
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD = 6,
+ /*
+ * Specifies bits in 32B data word to clear. For x=0..15, when
+ * clear_mask[x]=1, data[x*16+15:x*16] is set to 0.
+ */
+ CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD = 7,
+ CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD = 8,
+};
+
+/**
+ * Field IDS for INVALIDATE_CMD: This command forces an explicit evict
+ * of 1-4 consecutive cache lines such that the next time the structure
+ * is used it will be re-read from its backing store location.
+ */
+enum cfa_bld_mpc_invalidate_cmd_fields {
+ CFA_BLD_MPC_INVALIDATE_CMD_OPAQUE_FLD = 0,
+ /* This value selects the table type to be acted upon. */
+ CFA_BLD_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD = 1,
+ /* Table scope to access. */
+ CFA_BLD_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * This value identifies the number of cache lines to invalidate. A
+ * FMT_ERR is reported if the value is not in the range of [1, 4].
+ */
+ CFA_BLD_MPC_INVALIDATE_CMD_DATA_SIZE_FLD = 3,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD = 4,
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ CFA_BLD_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD = 5,
+ CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD = 6,
+};
+
+/**
+ * Field IDS for EM_SEARCH_CMD: This command supplies an exact match
+ * entry of 1-4 32B words to search for in the exact match table. CFA
+ * first computes the hash value of the key in the entry, and determines
+ * the static bucket address to search from the hash and the
+ * (EM_BUCKETS, EM_SIZE) for TABLE_SCOPE. It then searches that static
+ * bucket chain for an entry with a matching key (the LREC in the
+ * command entry is ignored). If a matching entry is found, CFA reports
+ * OK status in the completion. Otherwise, assuming no errors abort the
+ * search before it completes, it reports EM_MISS status.
+ */
+enum cfa_bld_mpc_em_search_cmd_fields {
+ CFA_BLD_MPC_EM_SEARCH_CMD_OPAQUE_FLD = 0,
+ /* Table scope to access. */
+ CFA_BLD_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD = 1,
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD = 2,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD = 3,
+ CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD = 4,
+};
+
+/**
+ * Field IDS for EM_INSERT_CMD: This command supplies an exact match
+ * entry of 1-4 32B words to insert in the exact match table. CFA first
+ * computes the hash value of the key in the entry, and determines the
+ * static bucket address to search from the hash and the (EM_BUCKETS,
+ * EM_SIZE) for TABLE_SCOPE. It then writes the 1-4 32B words of the
+ * exact match entry starting at the TABLE_INDEX location in the
+ * command. When the entry write completes, it searches the static
+ * bucket chain for an existing entry with a key matching the key in the
+ * insert entry (the LREC does not need to match). If a matching entry
+ * is found: * If REPLACE=0, the CFA aborts the insert and returns
+ * EM_DUPLICATE status. * If REPLACE=1, the CFA overwrites the matching
+ * entry with the new entry. REPLACED_ENTRY=1 in the completion in this
+ * case to signal that an entry was replaced. The location of the entry
+ * is provided in the completion. If no match is found, CFA adds the new
+ * entry to the lowest unused entry in the tail bucket. If the current
+ * tail bucket is full, this requires adding a new bucket to the tail.
+ * Then entry is then inserted at entry number 0. TABLE_INDEX2 provides
+ * the address of the new tail bucket, if needed. If set to 0, the
+ * insert is aborted and returns EM_ABORT status instead of adding a new
+ * bucket to the tail. CHAIN_UPD in the completion indicates whether a
+ * new bucket was added (1) or not (0). For locked scopes, if the read
+ * of the static bucket gives a locked scope miss error, indicating that
+ * the address is not in the cache, the static bucket is assumed empty.
+ * In this case, TAI creates a new bucket, setting entry 0 to the new
+ * entry fields and initializing all other fields to 0. It writes this
+ * new bucket to the static bucket address, which installs it in the
+ * cache.
+ */
+enum cfa_bld_mpc_em_insert_cmd_fields {
+ CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD = 0,
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD = 1,
+ /* Table scope to access. */
+ CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD = 3,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD = 4,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Starting
+ * address to write exact match entry being inserted.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD = 5,
+ /*
+ * Determines setting of OPTION field for all cache write requests for
+ * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support
+ * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD = 6,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Only used
+ * when no duplicate entry is found and the tail bucket in the chain
+ * searched has no unused entries. In this case, TABLE_INDEX2 provides
+ * the index to the 32B dynamic bucket to add to the tail of the chain
+ * (it is the new tail bucket). In this case, the CFA first writes
+ * TABLE_INDEX2 with a new bucket: * Entry 0 of the bucket sets the
+ * HASH_MSBS computed from the hash and ENTRY_PTR to TABLE_INDEX. *
+ * Entries 1-5 of the bucket set HASH_MSBS and ENTRY_PTR to 0. * CHAIN=0
+ * and CHAIN_PTR is set to CHAIN_PTR from to original tail bucket to
+ * maintain the background chaining. CFA then sets CHAIN=1 and
+ * CHAIN_PTR=TABLE_INDEX2 in the original tail bucket to link the new
+ * bucket to the chain. CHAIN_UPD=1 in the completion to signal that the
+ * new bucket at TABLE_INDEX2 was added to the tail of the chain.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD = 7,
+ /*
+ * Only used if an entry is found whose key matches the exact match
+ * entry key in the command: * REPLACE=0: The insert is aborted and
+ * EM_DUPLICATE status is returned, signaling that the insert failed.
+ * The index of the matching entry that blocked the insertion is
+ * returned in the completion. * REPLACE=1: The matching entry is
+ * replaced with that from the command (ENTRY_PTR in the bucket is
+ * overwritten with TABLE_INDEX from the command). HASH_MSBS for the
+ * entry number never changes in this case since it had to match the new
+ * entry key HASH_MSBS to match. When an entry is replaced,
+ * REPLACED_ENTRY=1 in the completion and the index of the matching
+ * entry is returned in the completion so that software can de-allocate
+ * the entry.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD = 8,
+ CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD = 9,
+};
+
+/**
+ * Field IDS for EM_DELETE_CMD: This command searches for an exact match
+ * entry index in the static bucket chain and deletes it if found.
+ * TABLE_INDEX give the entry index to delete and TABLE_INDEX2 gives the
+ * static bucket index. If a matching entry is found: * If the matching
+ * entry is the last valid entry in the tail bucket, its entry fields
+ * (HASH_MSBS and ENTRY_PTR) are set to 0 to delete the entry. * If the
+ * matching entry is not the last valid entry in the tail bucket, the
+ * entry fields from that last entry are moved to the matching entry,
+ * and the fields of that last entry are set to 0. * If any of the
+ * previous processing results in the tail bucket not having any valid
+ * entries, the tail bucket is the static bucket, the scope is a locked
+ * scope, and CHAIN_PTR=0, hardware evicts the static bucket from the
+ * cache and the completion signals this case with CHAIN_UPD=1. * If any
+ * of the previous processing results in the tail bucket not having any
+ * valid entries, and the tail bucket is not the static bucket, the tail
+ * bucket is removed from the chain. In this case, the penultimate
+ * bucket in the chain becomes the tail bucket. It has CHAIN set to 0 to
+ * unlink the tail bucket, and CHAIN_PTR set to that from the original
+ * tail bucket to preserve background chaining. The completion signals
+ * this case with CHAIN_UPD=1 and returns the index to the bucket
+ * removed so that software can de-allocate it. CFA returns OK status if
+ * the entry was successfully deleted. Otherwise, it returns EM_MISS
+ * status assuming there were no errors that caused processing to be
+ * aborted.
+ */
+enum cfa_bld_mpc_em_delete_cmd_fields {
+ CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD = 0,
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD = 1,
+ /* Table scope to access. */
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD = 3,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Entry index
+ * to delete.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD = 4,
+ /*
+ * Determines setting of OPTION field for all cache write requests for
+ * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support
+ * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD = 5,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Static
+ * bucket address for bucket chain.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD = 6,
+ CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD = 7,
+};
+
+/**
+ * Field IDS for EM_CHAIN_CMD: This command updates CHAIN_PTR in the
+ * tail bucket of a static bucket chain, supplying both the static
+ * bucket and the new CHAIN_PTR value. TABLE_INDEX is the new CHAIN_PTR
+ * value and TABLE_INDEX2[23:0] is the static bucket. This command
+ * provides software a means to update background chaining coherently
+ * with other bucket updates. The value of CHAIN is unaffected (stays at
+ * 0). For locked scopes, if the static bucket is the tail bucket, it is
+ * empty (all of its ENTRY_PTR values are 0), and TABLE_INDEX=0 (the
+ * CHAIN_PTR is being set to 0), instead of updating the static bucket
+ * it is evicted from the cache. In this case, CHAIN_UPD=1 in the
+ * completion.
+ */
+enum cfa_bld_mpc_em_chain_cmd_fields {
+ CFA_BLD_MPC_EM_CHAIN_CMD_OPAQUE_FLD = 0,
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD = 1,
+ /* Table scope to access. */
+ CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD = 3,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. New
+ * CHAIN_PTR to write to tail bucket.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD = 4,
+ /*
+ * Determines setting of OPTION field for all cache write requests for
+ * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support
+ * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD = 5,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Static
+ * bucket address for bucket chain.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD = 6,
+ CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD = 7,
+};
+
+/**
+ * Field IDS for READ_CMP: When no errors, teturns 1-4 consecutive 32B
+ * words from the TABLE_INDEX within the TABLE_SCOPE specified in the
+ * command, writing them to HOST_ADDRESS from the command.
+ */
+enum cfa_bld_mpc_read_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_BLD_MPC_READ_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_BLD_MPC_READ_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_BLD_MPC_READ_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_BLD_MPC_READ_CMP_OPCODE_FLD = 3,
+ /*
+ * The length of the DMA that accompanies the completion in units of
+ * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates
+ * that there is no DMA that accompanies the completion.
+ */
+ CFA_BLD_MPC_READ_CMP_DMA_LENGTH_FLD = 4,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_BLD_MPC_READ_CMP_OPAQUE_FLD = 5,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_READ_CMP_V_FLD = 6,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_BLD_MPC_READ_CMP_HASH_MSB_FLD = 7,
+ /* TABLE_TYPE from the command. */
+ CFA_BLD_MPC_READ_CMP_TABLE_TYPE_FLD = 8,
+ /* TABLE_SCOPE from the command. */
+ CFA_BLD_MPC_READ_CMP_TABLE_SCOPE_FLD = 9,
+ /* TABLE_INDEX from the command. */
+ CFA_BLD_MPC_READ_CMP_TABLE_INDEX_FLD = 10,
+ CFA_BLD_MPC_READ_CMP_MAX_FLD = 11,
+};
+
+/**
+ * Field IDS for WRITE_CMP: Returns status of the write of 1-4
+ * consecutive 32B words starting at TABLE_INDEX in the table specified
+ * by (TABLE_TYPE, TABLE_SCOPE).
+ */
+enum cfa_bld_mpc_write_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_BLD_MPC_WRITE_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_BLD_MPC_WRITE_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_BLD_MPC_WRITE_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_BLD_MPC_WRITE_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_BLD_MPC_WRITE_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_WRITE_CMP_V_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_BLD_MPC_WRITE_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_TYPE from the command. */
+ CFA_BLD_MPC_WRITE_CMP_TABLE_TYPE_FLD = 7,
+ /* TABLE_SCOPE from the command. */
+ CFA_BLD_MPC_WRITE_CMP_TABLE_SCOPE_FLD = 8,
+ /* TABLE_INDEX from the command. */
+ CFA_BLD_MPC_WRITE_CMP_TABLE_INDEX_FLD = 9,
+ CFA_BLD_MPC_WRITE_CMP_MAX_FLD = 10,
+};
+
+/**
+ * Field IDS for READ_CLR_CMP: When no errors, returns 1 32B word from
+ * TABLE_INDEX in the table specified by (TABLE_TYPE, TABLE_SCOPE). The
+ * data returned is the value prior to the clear.
+ */
+enum cfa_bld_mpc_read_clr_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_BLD_MPC_READ_CLR_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_BLD_MPC_READ_CLR_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_BLD_MPC_READ_CLR_CMP_OPCODE_FLD = 3,
+ /*
+ * The length of the DMA that accompanies the completion in units of
+ * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates
+ * that there is no DMA that accompanies the completion.
+ */
+ CFA_BLD_MPC_READ_CLR_CMP_DMA_LENGTH_FLD = 4,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_BLD_MPC_READ_CLR_CMP_OPAQUE_FLD = 5,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_READ_CLR_CMP_V_FLD = 6,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_BLD_MPC_READ_CLR_CMP_HASH_MSB_FLD = 7,
+ /* TABLE_TYPE from the command. */
+ CFA_BLD_MPC_READ_CLR_CMP_TABLE_TYPE_FLD = 8,
+ /* TABLE_SCOPE from the command. */
+ CFA_BLD_MPC_READ_CLR_CMP_TABLE_SCOPE_FLD = 9,
+ /* TABLE_INDEX from the command. */
+ CFA_BLD_MPC_READ_CLR_CMP_TABLE_INDEX_FLD = 10,
+ CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD = 11,
+};
+
+/**
+ * Field IDS for INVALIDATE_CMP: Returns status for INVALIDATE commands.
+ */
+enum cfa_bld_mpc_invalidate_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_BLD_MPC_INVALIDATE_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_BLD_MPC_INVALIDATE_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_BLD_MPC_INVALIDATE_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_BLD_MPC_INVALIDATE_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_INVALIDATE_CMP_V_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_BLD_MPC_INVALIDATE_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_TYPE from the command. */
+ CFA_BLD_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD = 7,
+ /* TABLE_SCOPE from the command. */
+ CFA_BLD_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD = 8,
+ /* TABLE_INDEX from the command. */
+ CFA_BLD_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD = 9,
+ CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD = 10,
+};
+
+/**
+ * Field IDS for EM_SEARCH_CMP: For OK status, returns the index of the
+ * matching entry found for the EM key supplied in the command. Returns
+ * EM_MISS status if no match was found.
+ */
+enum cfa_bld_mpc_em_search_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_BLD_MPC_EM_SEARCH_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_BLD_MPC_EM_SEARCH_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMP_V1_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_SCOPE from the command. */
+ CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD = 7,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK
+ * status, gives ENTRY_PTR[25:0] of the matching entry found. Otherwise,
+ * set to 0.
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD = 8,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. If the hash
+ * is computed (no errors during initial processing of the command),
+ * TABLE_INDEX2[23:0] is the static bucket address determined from the
+ * hash of the exact match entry key in the command and the (EM_SIZE,
+ * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24
+ * in this case are set to 0. For any other status, it is always 0.
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD = 9,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMP_V2_FLD = 10,
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ CFA_BLD_MPC_EM_SEARCH_CMP_BKT_NUM_FLD = 11,
+ /* See BKT_NUM description. */
+ CFA_BLD_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD = 12,
+ CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD = 13,
+};
+
+/**
+ * Field IDS for EM_INSERT_CMP: OK status indicates that the exact match
+ * entry from the command was successfully inserted. EM_DUPLICATE status
+ * indicates that the insert was aborted because an entry with the same
+ * exact match key was found and REPLACE=0 in the command. EM_ABORT
+ * status indicates that no duplicate was found, the tail bucket in the
+ * chain was full, and TABLE_INDEX2=0. No changes are made to the
+ * database in this case. TABLE_INDEX is the starting address at which
+ * to insert the exact match entry (from the command). TABLE_INDEX2 is
+ * the address at which to insert a new bucket at the tail of the static
+ * bucket chain if needed (from the command). CHAIN_UPD=1 if a new
+ * bucket was added at this address. TABLE_INDEX3 is the static bucket
+ * address for the chain, determined from hashing the exact match entry.
+ * Software needs this address and TABLE_INDEX in order to delete the
+ * entry using an EM_DELETE command. TABLE_INDEX4 is the index of an
+ * entry found that had a matching exact match key to the command entry
+ * key. If no matching entry was found, it is set to 0. There are two
+ * cases when there is a matching entry, depending on REPLACE from the
+ * command: * REPLACE=0: EM_DUPLICATE status is reported and the insert
+ * is aborted. Software can use the static bucket address
+ * (TABLE_INDEX3[23:0]) and the matching entry (TABLE_INDEX4) in an
+ * EM_DELETE command if it wishes to explicity delete the matching
+ * entry. * REPLACE=1: REPLACED_ENTRY=1 to signal that the entry at
+ * TABLE_INDEX4 was replaced by the insert entry. REPLACED_ENTRY will
+ * only be 1 if reporting OK status in this case. Software can de-
+ * allocate the entry at TABLE_INDEX4.
+ */
+enum cfa_bld_mpc_em_insert_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_BLD_MPC_EM_INSERT_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_V1_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_SCOPE from the command. */
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD = 7,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX
+ * from the command, which is the starting address at which to insert
+ * the exact match entry.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD = 8,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2
+ * from the command, which is the index for the new tail bucket to add
+ * if needed (CHAIN_UPD=1 if it was used).
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD = 9,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. If the hash
+ * is computed (no errors during initial processing of the command),
+ * TABLE_INDEX2[23:0] is the static bucket address determined from the
+ * hash of the exact match entry key in the command and the (EM_SIZE,
+ * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24
+ * in this case are set to 0. For any other status, it is always 0.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD = 10,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_V2_FLD = 11,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. ENTRY_PTR of
+ * matching entry found. Set to 0 if no matching entry found. If
+ * REPLACED_ENTRY=1, that indicates a matching entry was found and
+ * REPLACE=1 in the command. In this case, the matching entry was
+ * replaced by the new entry in the command and this index can therefore
+ * by de-allocated.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD = 12,
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD = 13,
+ /* See BKT_NUM description. */
+ CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD = 14,
+ /*
+ * Specifies if the chain was updated while processing the command: Set
+ * to 1 when a new bucket is added to the tail of the static bucket
+ * chain at TABLE_INDEX2. This occurs if and only if the insert requires
+ * adding a new entry and the tail bucket is full. If set to 0,
+ * TABLE_INDEX2 was not used and is therefore still free.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD = 15,
+ /*
+ * Set to 1 if a matching entry was found and REPLACE=1 in command. In
+ * the case, the entry starting at TABLE_INDEX4 was replaced and can
+ * therefore be de-allocated. Otherwise, this flag is set to 0.
+ */
+ CFA_BLD_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD = 16,
+ CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD = 17,
+};
+
+/**
+ * Field IDS for EM_DELETE_CMP: OK status indicates that an ENTRY_PTR
+ * matching TABLE_INDEX was found in the static bucket chain specified
+ * and was therefore deleted. EM_MISS status indicates that no match was
+ * found. TABLE_INDEX is from the command. It is the index of the entry
+ * to delete. TABLE_INDEX2 is from the command. It is the static bucket
+ * address. TABLE_INDEX3 is the index of the tail bucket of the static
+ * bucket chain prior to processing the command. TABLE_INDEX4 is the
+ * index of the tail bucket of the static bucket chain after processing
+ * the command. If CHAIN_UPD=1 and TABLE_INDEX4==TABLE_INDEX2, the
+ * static bucket was the tail bucket, it became empty after the delete,
+ * the scope is a locked scope, and CHAIN_PTR was 0. In this case, the
+ * static bucket has been evicted from the cache. Otherwise, if
+ * CHAIN_UPD=1, the original tail bucket given by TABLE_INDEX3 was
+ * removed from the chain because it went empty. It can therefore be de-
+ * allocated.
+ */
+enum cfa_bld_mpc_em_delete_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_BLD_MPC_EM_DELETE_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_V1_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_SCOPE from the command. */
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD = 7,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX
+ * from the command, which is the index of the entry to delete.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD = 8,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2
+ * from the command.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD = 9,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK or
+ * EM_MISS status, the index of the tail bucket of the chain prior to
+ * processing the command. If CHAIN_UPD=1, the bucket was removed and
+ * this index can be de-allocated. For other status values, it is set to
+ * 0.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD = 10,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_V2_FLD = 11,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK or
+ * EM_MISS status, the index of the tail bucket of the chain prior to
+ * after the command. If CHAIN_UPD=0 (always for EM_MISS status), it is
+ * always equal to TABLE_INDEX3 as the chain was not updated. For other
+ * status values, it is set to 0.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD = 12,
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_BKT_NUM_FLD = 13,
+ /* See BKT_NUM description. */
+ CFA_BLD_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD = 14,
+ /*
+ * Specifies if the chain was updated while processing the command: Set
+ * to 1 when a bucket is removed from the static bucket chain. This
+ * occurs if after the delete, the tail bucket is a dynamic bucket and
+ * no longer has any valid entries. In this case, software should de-
+ * allocate the dynamic bucket at TABLE_INDEX3. It is also set to 1 when
+ * the static bucket is evicted, which only occurs for locked scopes.
+ * See the EM_DELETE command description for details.
+ */
+ CFA_BLD_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD = 15,
+ CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD = 16,
+};
+
+/**
+ * Field IDS for EM_CHAIN_CMP: OK status indicates that the CHAIN_PTR of
+ * the tail bucket was successfully updated. TABLE_INDEX is from the
+ * command. It is the value of the new CHAIN_PTR. TABLE_INDEX2 is from
+ * the command. TABLE_INDEX3 is the index of the tail bucket of the
+ * static bucket chain.
+ */
+enum cfa_bld_mpc_em_chain_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_BLD_MPC_EM_CHAIN_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_BLD_MPC_EM_CHAIN_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_V1_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_SCOPE from the command. */
+ CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD = 7,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX
+ * from the command, which is the new CHAIN_PTR for the tail bucket of
+ * the static bucket chain.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD = 8,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2
+ * from the command.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD = 9,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK
+ * status, the index of the tail bucket of the chain. Otherwise, set to
+ * 0.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX3_FLD = 10,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_V2_FLD = 11,
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_BKT_NUM_FLD = 12,
+ /* See BKT_NUM description. */
+ CFA_BLD_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD = 13,
+ /*
+ * Set to 1 when the scope is a locked scope, the tail bucket is the
+ * static bucket, the bucket is empty (all of its ENTRY_PTR values are
+ * 0), and TABLE_INDEX=0 in the command. In this case, the static bucket
+ * is evicted. For all other cases, it is set to 0.
+ */
+ CFA_BLD_MPC_EM_CHAIN_CMP_CHAIN_UPD_FLD = 14,
+ CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD = 15,
+};
+
+#endif /* _CFA_BLD_MPC_FIELD_IDS_H_ */
new file mode 100644
@@ -0,0 +1,598 @@
+/****************************************************************************
+ * Copyright(c) 2021 - 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_mpcops.h
+ *
+ * @brief CFA Builder MPC ops interface for host applications
+ */
+
+#ifndef _CFA_BLD_MPCOPS_H_
+#define _CFA_BLD_MPCOPS_H_
+
+#include <stdio.h>
+#include "cfa_types.h"
+
+/**
+ * CFA HW data object definition
+ */
+struct cfa_mpc_data_obj {
+ /** [in] MPC field identifier */
+ uint16_t field_id;
+ /** [in] Value of the HW field */
+ uint64_t val;
+};
+
+struct cfa_bld_mpcops;
+
+/**
+ * @addtogroup CFA_BLD CFA Builder Library
+ * \ingroup CFA_V3
+ * @{
+ */
+
+/**
+ * CFA MPC ops interface
+ */
+struct cfa_bld_mpcinfo {
+ /** [out] CFA MPC Builder operations function pointer table */
+ const struct cfa_bld_mpcops *mpcops;
+};
+
+/**
+ * @name CFA_BLD_MPC CFA Builder Host MPC OPS API
+ * CFA builder host specific API used by host CFA application to bind
+ * to different CFA devices and access device by using MPC OPS.
+ */
+
+/**@{*/
+/** CFA builder MPC bind API
+ *
+ * This API retrieves the CFA global MPC configuration.
+ *
+ * @param[in] hw_ver
+ * hardware version of the CFA
+ *
+ * @param[out] mpc_info
+ * CFA MPC interface
+ *
+ * @return
+ * 0 for SUCCESS, negative value for FAILURE
+ */
+int cfa_bld_mpc_bind(enum cfa_ver hw_ver, struct cfa_bld_mpcinfo *mpc_info);
+
+/** CFA device specific function hooks for CFA MPC command composition
+ * and response parsing
+ *
+ * The following device hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer. The pupose of these hooks
+ * to support CFA device operations for different device variants.
+ */
+struct cfa_bld_mpcops {
+ /** Build MPC Cache read command
+ *
+ * This API composes the MPC cache read command given the list
+ * of read parameters specified as an array of cfa_mpc_data_obj objects.
+ *
+ * @param[in] cmd
+ * MPC command buffer to compose the cache read command into.
+ *
+ * @param[in,out] cmd_buff_len
+ * Pointer to command buffer length variable. The caller sets this
+ * to the size of the 'cmd' buffer in byes. The api updates this to
+ * the actual size of the composed command. If the buffer length
+ * passed is not large enough to hold the composed command, an error
+ * is returned by the api.
+ *
+ * @param[in] fields
+ * Array of CFA data objects indexed by CFA_BLD_MPC_READ_CMD_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_READ_CMD_MAX_FLD. If the caller intends to set a
+ * specific field in the MPC command, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself (See example
+ * below). Otherwise set the field_id to INVALID_U16. If the caller
+ * sets the field_id for a field that is not valid for the device
+ * an error is returned.
+ *
+ * To set the table type to EM:
+ * fields[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].field_id =
+ * CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD;
+ * fields[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].val =
+ * CFA_HW_TABLE_LOOKUP;
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_build_cache_read)(uint8_t *cmd,
+ uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Build MPC Cache Write command
+ *
+ * This API composes the MPC cache write command given the list
+ * of write parameters specified as an array of cfa_mpc_data_obj
+ * objects.
+ *
+ * @param[in] cmd
+ * MPC command buffer to compose the cache write command into.
+ *
+ * @param[in,out] cmd_buff_len
+ * Pointer to command buffer length variable. The caller sets this
+ * to the size of the 'cmd' buffer in byes. The api updates this to
+ * the actual size of the composed command. If the buffer length
+ * passed is not large enough to hold the composed command, an error
+ * is returned by the api.
+ *
+ * @param[in] data
+ * Pointer to the data to be written. Note that this data is just
+ * copied at the right offset into the command buffer. The actual MPC
+ * write happens when the command is issued over the MPC interface.
+ *
+ * @param[in] fields
+ * Array of CFA data objects indexed by CFA_BLD_MPC_WRITE_CMD_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_WRITE_CMD_MAX_FLD. If the caller intends to set a
+ * specific field in the MPC command, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise
+ * set the field_id to INVALID_U16. If the caller sets the field_id for
+ * a field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_build_cache_write)(uint8_t *cmd,
+ uint32_t *cmd_buff_len,
+ const uint8_t *data,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Build MPC Cache Invalidate (Evict) command
+ *
+ * This API composes the MPC cache evict command given the list
+ * of evict parameters specified as an array of cfa_mpc_data_obj
+ * objects.
+ *
+ * @param[in] cmd
+ * MPC command buffer to compose the cache evict command into.
+ *
+ * @param[in,out] cmd_buff_len
+ * Pointer to command buffer length variable. The caller sets this
+ * to the size of the 'cmd' buffer in byes. The api updates this to
+ * the actual size of the composed command. If the buffer length
+ * passed is not large enough to hold the composed command, an error
+ * is returned by the api.
+ *
+ * @param[in] fields
+ * Array of cfa_mpc_data_obj indexed by
+ * CFA_BLD_MPC_INVALIDATE_CMD_XXX_FLD enum values. The size of this
+ * array shall be CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD. If the caller
+ * intends to set a specific field in the MPC command, the caller
+ * should set the field_id in cfa_mpc_data_obj to the array index
+ * itself. Otherwise set the field_id to INVALID_U16. If the caller
+ * sets the field_id for a field that is not valid for the device an
+ * error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_build_cache_evict)(uint8_t *cmd,
+ uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Build MPC Cache read and clear command
+ *
+ * This API composes the MPC cache read-n-clear command given the list
+ * of read parameters specified as an array of cfa_mpc_data_obj objects.
+ *
+ * @param[in] cmd
+ * MPC command buffer to compose the cache read-n-clear command into.
+ *
+ * @param[in,out] cmd_buff_len
+ * Pointer to command buffer length variable. The caller sets this
+ * to the size of the 'cmd' buffer in byes. The api updates this to
+ * the actual size of the composed command. If the buffer length
+ * passed is not large enough to hold the composed command, an error
+ * is returned by the api.
+ *
+ * @param[in] fields
+ * Array of cfa_mpc_data_obj indexed by
+ * CFA_BLD_MPC_READ_CLR_CMD_XXX_FLD enum values. The size of this
+ * array shall be CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD. If the caller
+ * intends to set a specific field in the MPC command, the caller
+ * should set the field_id in cfa_mpc_data_obj to the array index
+ * itself. Otherwise set the field_id to INVALID_U16. If the caller
+ * sets the field_id for a field that is not valid for the device
+ * an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_build_cache_read_clr)(uint8_t *cmd,
+ uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Build MPC EM search command
+ *
+ * This API composes the MPC EM search command given the list
+ * of EM search parameters specified as an array of cfa_mpc_data_obj
+ * objects
+ *
+ * @param[in] cmd
+ * MPC command buffer to compose the EM search command into.
+ *
+ * @param[in,out] cmd_buff_len
+ * Pointer to command buffer length variable. The caller sets this
+ * to the size of the 'cmd' buffer in byes. The api updates this to
+ * the actual size of the composed command. If the buffer length
+ * passed is not large enough to hold the composed command, an error
+ * is returned by the api.
+ *
+ * @param[in] em_entry
+ * Pointer to the em_entry to be searched.
+ *
+ * @param[in] fields
+ * Array of cfa_mpc_data_obj indexed by
+ * CFA_BLD_MPC_EM_SEARCH_CMD_XXX_FLD enum values. The size of this
+ * array shall be CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD. If the caller
+ * intends to set a specific field in the MPC command, the caller
+ * should set the field_id in cfa_mpc_data_obj to the array index
+ * itself. Otherwise set the field_id to INVALID_U16. If the caller
+ * sets the field_id for a field that is not valid for the device an
+ * error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_build_em_search)(uint8_t *cmd, uint32_t *cmd_buff_len,
+ uint8_t *em_entry,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Build MPC EM insert command
+ *
+ * This API composes the MPC EM insert command given the list
+ * of EM insert parameters specified as an array of cfa_mpc_data_obj objects
+ *
+ * @param[in] cmd
+ * MPC command buffer to compose the EM insert command into.
+ *
+ * @param[in,out] cmd_buff_len
+ * Pointer to command buffer length variable. The caller sets this
+ * to the size of the 'cmd' buffer in bytes. The api updates this to
+ * the actual size of the composed command. If the buffer length
+ * passed is not large enough to hold the composed command, an error
+ * is returned by the api.
+ *
+ * @param[in] em_entry
+ * Pointer to the em_entry to be inserted.
+ *
+ * @param[in] fields
+ * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_INSERT_CMD_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD. If the caller intends to set a
+ * specific field in the MPC command, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_build_em_insert)(uint8_t *cmd, uint32_t *cmd_buff_len,
+ const uint8_t *em_entry,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Build MPC EM delete command
+ *
+ * This API composes the MPC EM delete command given the list
+ * of EM delete parameters specified as an array of cfa_mpc_data_obj objects
+ *
+ * @param[in] cmd
+ * MPC command buffer to compose the EM delete command into.
+ *
+ * @param[in,out] cmd_buff_len
+ * Pointer to command buffer length variable. The caller sets this
+ * to the size of the 'cmd' buffer in byes. The api updates this to
+ * the actual size of the composed command. If the buffer length
+ * passed is not large enough to hold the composed command, an error
+ * is returned by the api.
+ *
+ * @param[in] fields
+ * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_DELETE_CMD_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD. If the caller intends to set a
+ * specific field in the MPC command, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_build_em_delete)(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Build MPC EM chain command
+ *
+ * This API composes the MPC EM chain command given the list
+ * of EM chain parameters specified as an array of cfa_mpc_data_obj objects
+ *
+ * @param[in] cmd
+ * MPC command buffer to compose the EM chain command into.
+ *
+ * @param[in,out] cmd_buff_len
+ * Pointer to command buffer length variable. The caller sets this
+ * to the size of the 'cmd' buffer in byes. The api updates this to
+ * the actual size of the composed command. If the buffer length
+ * passed is not large enough to hold the composed command, an error
+ * is returned by the api.
+ *
+ * @param[in] fields
+ * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_CHAIN_CMD_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD. If the caller intends to set a
+ * specific field in the MPC command, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_build_em_chain)(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Parse MPC Cache read response
+ *
+ * This API parses the MPC cache read response message and returns
+ * the read parameters as an array of cfa_mpc_data_obj objects.
+ *
+ * @param[in] resp
+ * MPC response buffer containing the cache read response.
+ *
+ * @param[in] resp_buff_len
+ * Response buffer length in bytes
+ *
+ * @param[in] rd_data
+ * Buffer to copy the MPC read data into
+ *
+ * @param[in] rd_data_len
+ * Size of the rd_data buffer in bytes
+ *
+ * @param[out] fields
+ * Array of CFA data objects indexed by CFA_BLD_MPC_READ_CMP_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_READ_CMP_MAX_FLD. If the caller intends to retrieve a
+ * specific field in the MPC response, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_parse_cache_read)(uint8_t *resp,
+ uint32_t resp_buff_len,
+ uint8_t *rd_data,
+ uint32_t rd_data_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Parse MPC Cache Write response
+ *
+ * This API parses the MPC cache write response message and returns
+ * the write response fields as an array of cfa_mpc_data_obj objects.
+ *
+ * @param[in] resp
+ * MPC response buffer containing the cache write response.
+ *
+ * @param[in] resp_buff_len
+ * Response buffer length in bytes
+ *
+ * @param[out] fields
+ * Array of CFA data objects indexed by CFA_BLD_MPC_WRITE_CMP_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_WRITE_CMP_MAX_FLD. If the caller intends to retrieve a
+ * specific field in the MPC response, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_parse_cache_write)(uint8_t *resp,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Parse MPC Cache Invalidate (Evict) response
+ *
+ * This API parses the MPC cache evict response message and returns
+ * the evict response fields as an array of cfa_mpc_data_obj objects.
+ *
+ * @param[in] resp
+ * MPC response buffer containing the cache evict response.
+ *
+ * @param[in] resp_buff_len
+ * Response buffer length in bytes
+ *
+ * @param[out] fields
+ * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_INVALIDATE_CMP_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD. If the caller intends to get a
+ * specific field in the MPC response, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_parse_cache_evict)(uint8_t *resp,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /* clang-format off */
+ /** Parse MPC Cache read and clear response
+ *
+ * This API parses the MPC cache read-n-clear response message and
+ * returns the read response fields as an array of cfa_mpc_data_obj objects.
+ *
+ * @param[in] resp
+ * MPC response buffer containing the cache read-n-clear response.
+ *
+ * @param[in] resp_buff_len
+ * Response buffer length in bytes
+ *
+ * @param[in] rd_data
+ * Buffer to copy the MPC read data into
+ *
+ * @param[in] rd_data_len
+ * Size of the rd_data buffer in bytes
+ *
+ * @param[out] fields
+ * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_READ_CLR_CMP_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD. If the caller intends to get a
+ * specific field in the MPC response, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_parse_cache_read_clr)(uint8_t *resp,
+ uint32_t resp_buff_len, uint8_t *rd_data,
+ uint32_t rd_data_len, struct cfa_mpc_data_obj *fields);
+
+ /* clang-format on */
+ /** Parse MPC EM search response
+ *
+ * This API parses the MPC EM search response message and returns
+ * the EM search response fields as an array of cfa_mpc_data_obj objects
+ *
+ * @param[in] resp
+ * MPC response buffer containing the EM search response.
+ *
+ * @param[in] resp_buff_len
+ * Response buffer length in bytes
+ *
+ * @param[out] fields
+ * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_SEARCH_CMP_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD. If the caller intends to get a
+ * specific field in the MPC response, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_parse_em_search)(uint8_t *resp,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Parse MPC EM insert response
+ *
+ * This API parses the MPC EM insert response message and returns
+ * the EM insert response fields as an array of cfa_mpc_data_obj objects
+ *
+ * @param[in] resp
+ * MPC response buffer containing the EM insert response.
+ *
+ * @param[in] resp_buff_len
+ * Response buffer length in bytes
+ *
+ * @param[out] fields
+ * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_INSERT_CMP_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD. If the caller intends to get a
+ * specific field in the MPC response, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_parse_em_insert)(uint8_t *resp,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Parse MPC EM delete response
+ *
+ * This API parses the MPC EM delete response message and returns
+ * the EM delete response fields as an array of cfa_mpc_data_obj objects
+ *
+ * @param[in] resp
+ * MPC response buffer containing the EM delete response.
+ *
+ * @param[in] resp_buff_len
+ * Response buffer length in bytes
+ *
+ * @param[out] fields
+ * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_DELETE_CMP_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD. If the caller intends to get a
+ * specific field in the MPC response, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_parse_em_delete)(uint8_t *resp,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+ /** Parse MPC EM chain response
+ *
+ * This API parses the MPC EM chain response message and returns
+ * the EM chain response fields as an array of cfa_mpc_data_obj objects
+ *
+ * @param[in] resp
+ * MPC response buffer containing the EM chain response.
+ *
+ * @param[in] resp_buff_len
+ * Response buffer length in bytes
+ *
+ * @param[out] fields
+ * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_CHAIN_CMP_XXX_FLD
+ * enum values. The size of this array shall be
+ * CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD. If the caller intends to get a
+ * specific field in the MPC response, the caller should set the
+ * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set
+ * the field_id to INVALID_U16. If the caller sets the field_id for a
+ * field that is not valid for the device an error is returned.
+ *
+ * @return
+ * 0 for SUCCESS, negative errno for FAILURE
+ *
+ */
+ int (*cfa_bld_mpc_parse_em_chain)(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+};
+
+/**@}*/
+
+/**@}*/
+#endif /* _CFA_BLD_DEVOPS_H_ */
new file mode 100644
@@ -0,0 +1,543 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_p70.h
+ *
+ * @brief CFA Phase 7.0 specific Builder public definitions
+ */
+
+#ifndef _CFA_BLD_P70_H_
+#define _CFA_BLD_P70_H_
+
+#include "sys_util.h"
+#include "cfa_bld_defs.h"
+#include "cfa_bld_p70_field_ids.h"
+
+/**
+ * Maximum key array size
+ */
+#define CFA_P70_KEY_MAX_FIELD_CNT \
+ MAX((uint16_t)CFA_P70_EM_KEY_LAYOUT_MAX_FLD, \
+ (uint16_t)CFA_P70_WC_TCAM_FKB_MAX_FLD)
+#define CFA_P70_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_bld_p70_action_template)
+
+#define CFA_P70_PROF_MAX_KEYS 4
+enum cfa_p70_mac_sel_mode {
+ CFA_P70_MAC_SEL_MODE_FIRST = 0,
+ CFA_P70_MAC_SEL_MODE_LOWEST = 1
+};
+
+struct cfa_p70_prof_key_cfg {
+ uint8_t mac_sel[CFA_P70_PROF_MAX_KEYS];
+#define CFA_PROF_P70_MAC_SEL_DMAC0 (1 << 0)
+#define CFA_PROF_P70_MAC_SEL_T_MAC0 (1 << 1)
+#define CFA_PROF_P70_MAC_SEL_OUTERMOST_MAC0 (1 << 2)
+#define CFA_PROF_P70_MAC_SEL_DMAC1 (1 << 3)
+#define CFA_PROF_P70_MAC_SEL_T_MAC1 (1 << 4)
+#define CFA_PROF_P70_MAC_OUTERMOST_MAC1 (1 << 5)
+ uint8_t vlan_sel[CFA_P70_PROF_MAX_KEYS];
+#define CFA_PROF_P70_VLAN_SEL_INNER_HDR 0
+#define CFA_PROF_P70_VLAN_SEL_TUNNEL_HDR 1
+#define CFA_PROF_P70_VLAN_SEL_OUTERMOST_HDR 2
+ uint8_t pass_cnt;
+ enum cfa_p70_mac_sel_mode mode;
+};
+
+/*
+ * Field id remap function pointer. Passed by cfa-v3 caller
+ * to builder apis if the caller requires the apis to remap
+ * the field ids before using them to update key/action layout
+ * objects. An example of one such api is action_compute_ptr()
+ * which updates the offsets for the modify/encap/source/stat
+ * records in the action record. If the caller is remapping
+ * the field ids (to save memory in fw builds for example), then
+ * this remap api is required to be passed. If passed as NULL,
+ * the field ids are not remapped and used directly to index
+ * into the layout.
+ *
+ * @param Input field id
+ *
+ * @return Remapped field id on success, UNIT16_MAX on failure.
+ */
+typedef uint16_t(cfa_fld_remap)(uint16_t);
+
+/**
+ * CFA P70 action layout definition
+ */
+
+enum action_type_p70 {
+ /** Select this type to build an Full Action Record Object
+ */
+ CFA_P70_ACT_OBJ_TYPE_FULL_ACT,
+ /** Select this type to build an Compact Action Record Object
+ */
+ CFA_P70_ACT_OBJ_TYPE_COMPACT_ACT,
+ /** Select this type to build an MCG Action Record Object
+ */
+ CFA_P70_ACT_OBJ_TYPE_MCG_ACT,
+ /** Select this type to build Standalone Modify Action Record Object */
+ CFA_P70_ACT_OBJ_TYPE_MODIFY,
+ /** Select this type to build Standalone Stat Action Record Object */
+ CFA_P70_ACT_OBJ_TYPE_STAT,
+ /** Select this type to build Standalone Source Action Record Object */
+ CFA_P70_ACT_OBJ_TYPE_SRC_PROP,
+ /** Select this type to build Standalone Encap Action Record Object */
+ CFA_P70_ACT_OBJ_TYPE_ENCAP,
+};
+
+enum stat_op_p70 {
+ /** Set to statistic to ingress to CFA
+ */
+ CFA_P70_STAT_OP_INGRESS = 0,
+ /** Set to statistic to egress from CFA
+ */
+ CFA_P70_STAT_OP_EGRESS = 1,
+};
+
+enum stat_type_p70 {
+ /** Set to statistic to Foward packet count(64b)/Foward byte
+ * count(64b)
+ */
+ CFA_P70_STAT_COUNTER_SIZE_16B = 0,
+ /** Set to statistic to Forward packet count(64b)/Forward byte
+ * count(64b)/ TCP Flags(16b)/Timestamp(32b)
+ */
+ CFA_P70_STAT_COUNTER_SIZE_24B = 1,
+ /** Set to statistic to Forward packet count(64b)/Forward byte
+ * count(64b)/Meter(drop or red) packet count(64b)/Meter(drop
+ * or red) byte count(64b)
+ */
+ CFA_P70_STAT_COUNTER_SIZE_32B = 2,
+ /** Set to statistic to Forward packet count(64b)/Forward byte
+ * count(64b)/Meter(drop or red) packet count(38b)/Meter(drop
+ * or red) byte count(42b)/TCP Flags(16b)/Timestamp(32b)
+ */
+ CFA_P70_STAT_COUNTER_SIZE_32B_ALL = 3,
+};
+
+enum encap_vtag_p70 {
+ CFA_P70_ACT_ENCAP_VTAGS_PUSH_0 = 0,
+ CFA_P70_ACT_ENCAP_VTAGS_PUSH_1,
+ CFA_P70_ACT_ENCAP_VTAGS_PUSH_2
+};
+
+enum encap_l3_p70 {
+ /** Set to disable any L3 encapsulation
+ * processing, default
+ */
+ CFA_P70_ACT_ENCAP_L3_NONE = 0,
+ /** Set to enable L3 IPv4 encapsulation
+ */
+ CFA_P70_ACT_ENCAP_L3_IPV4 = 4,
+ /** Set to enable L3 IPv6 encapsulation
+ */
+ CFA_P70_ACT_ENCAP_L3_IPV6 = 5,
+ /** Set to enable L3 MPLS 8847 encapsulation
+ */
+ CFA_P70_ACT_ENCAP_L3_MPLS_8847 = 6,
+ /** Set to enable L3 MPLS 8848 encapsulation
+ */
+ CFA_P70_ACT_ENCAP_L3_MPLS_8848 = 7
+};
+
+enum encap_tunnel_p70 {
+ /** Set to disable Tunnel header encapsulation
+ * processing, default
+ */
+ CFA_P70_ACT_ENCAP_TNL_NONE = 0,
+ /** Set to enable Tunnel Generic Full header
+ * encapsulation
+ */
+ CFA_P70_ACT_ENCAP_TNL_GENERIC_FULL,
+ /** Set to enable VXLAN header encapsulation
+ */
+ CFA_P70_ACT_ENCAP_TNL_VXLAN,
+ /** Set to enable NGE (VXLAN2) header encapsulation
+ */
+ CFA_P70_ACT_ENCAP_TNL_NGE,
+ /** Set to enable NVGRE header encapsulation
+ */
+ CFA_P70_ACT_ENCAP_TNL_NVGRE,
+ /** Set to enable GRE header encapsulation
+ */
+ CFA_P70_ACT_ENCAP_TNL_GRE,
+ /** Set to enable Generic header after Tunnel
+ * L4 encapsulation
+ */
+ CFA_P70_ACT_ENCAP_TNL_GENERIC_AFTER_TL4,
+ /** Set to enable Generic header after Tunnel
+ * encapsulation
+ */
+ CFA_P70_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
+};
+
+enum source_rec_type_p70 {
+ /** Set to Source MAC Address
+ */
+ CFA_P70_SOURCE_MAC = 0,
+ /** Set to Source MAC and IPv4 Addresses
+ */
+ CFA_P70_SOURCE_MAC_IPV4 = 1,
+ /** Set to Source MAC and IPv6 Addresses
+ */
+ CFA_P70_SOURCE_MAC_IPV6 = 2,
+};
+
+/**
+ * From CFA phase 7.0 onwards, setting the modify vector bit
+ * 'ACT_MODIFY_TUNNEL_MODIFY' requires corresponding data fields to be
+ * set. This enum defines the parameters that determine the
+ * layout of this associated data fields. This structure
+ * is not used for versions older than CFA Phase 7.0 and setting
+ * the 'ACT_MODIFY_TUNNEL_MODIFY' bit will just delete the internal tunnel
+ */
+enum tunnel_modify_mode_p70 {
+ /* No change to tunnel protocol */
+ CFA_P70_ACT_MOD_TNL_NO_PROTO_CHANGE = 0,
+ /* 8-bit tunnel protocol change */
+ CFA_P70_ACT_MOD_TNL_8B_PROTO_CHANGE = 1,
+ /* 16-bit tunnel protocol change */
+ CFA_P70_ACT_MOD_TNL_16B_PROTO_CHANGE = 2,
+ CFA_P70_ACT_MOD_TNL_MAX
+};
+
+/**
+ * Action object template structure
+ *
+ * Template structure presents data fields that are necessary to know
+ * at the beginning of Action Builder (AB) processing. Like before the
+ * AB compilation. One such example could be a template that is
+ * flexible in size (Encap Record) and the presence of these fields
+ * allows for determining the template size as well as where the
+ * fields are located in the record.
+ *
+ * The template may also present fields that are not made visible to
+ * the caller by way of the action fields.
+ *
+ * Template fields also allow for additional checking on user visible
+ * fields. One such example could be the encap pointer behavior on a
+ * CFA_P70_ACT_OBJ_TYPE_ACT or CFA_P70_ACT_OBJ_TYPE_ACT_SRAM.
+ */
+struct cfa_bld_p70_action_template {
+ /** Action Object type
+ *
+ * Controls the type of the Action Template
+ */
+ enum action_type_p70 obj_type;
+
+ /** Action Control
+ *
+ * Controls the internals of the Action Template
+ *
+ * act is valid when:
+ * ((obj_type == CFA_P70_ACT_OBJ_TYPE_FULL_ACT)
+ * ||
+ * (obj_type == CFA_P70_ACT_OBJ_TYPE_COMPACT_ACT))
+ *
+ * Specifies whether each action is to be in-line or not.
+ */
+ struct {
+ /** Set to true to enable statistics
+ */
+ uint8_t stat_enable;
+ /** Set to true to enable statistics to be inlined
+ */
+ uint8_t stat_inline;
+ /** Set to true to enable statistics 1
+ */
+ uint8_t stat1_enable;
+ /** Set to true to enable statistics 1 to be inlined
+ */
+ uint8_t stat1_inline;
+ /** Set to true to enable encapsulation
+ */
+ uint8_t encap_enable;
+ /** Set to true to enable encapsulation to be inlined
+ */
+ uint8_t encap_inline;
+ /** Set to true to align the encap record to cache
+ * line
+ */
+ uint8_t encap_align;
+ /** Set to true to source
+ */
+ uint8_t source_enable;
+ /** Set to true to enable source to be inlined
+ */
+ uint8_t source_inline;
+ /** Set to true to enable modfication
+ */
+ uint8_t mod_enable;
+ /** Set to true to enable modify to be inlined
+ */
+ uint8_t mod_inline;
+ /** Set to true to enable subsequent MCGs
+ */
+ uint8_t mcg_subseq_enable;
+ } act;
+
+ /** Statistic Control
+ * Controls the type of statistic the template is describing
+ *
+ * stat is valid when:
+ * ((obj_type == CFA_P70_ACT_OBJ_TYPE_FULL_ACT) ||
+ * (obj_type == CFA_P70_ACT_OBJ_TYPE_COMPACT_ACT)) &&
+ * act.stat_enable || act.stat_inline)
+ */
+ struct {
+ enum stat_op_p70 op;
+ enum stat_type_p70 type;
+ } stat;
+
+ /** Encap Control
+ * Controls the type of encapsulation the template is
+ * describing
+ *
+ * encap is valid when:
+ * ((obj_type == CFA_P70_ACT_OBJ_TYPE_FULL_ACT) ||
+ * (obj_type == CFA_P70_ACT_OBJ_TYPE_COMPACT_ACT) &&
+ * act.encap_enable || act.encap_inline)
+ */
+ struct {
+ /** Set to true to enable L2 capability in the
+ * template
+ */
+ uint8_t l2_enable;
+ /** vtag controls the Encap Vector - VTAG Encoding, 4 bits
+ *
+ * <ul>
+ * <li> CFA_P70_ACT_ENCAP_VTAGS_PUSH_0, default, no VLAN
+ * Tags applied
+ * <li> CFA_P70_ACT_ENCAP_VTAGS_PUSH_1, adds capability to
+ * set 1 VLAN Tag. Action Template compile adds
+ * the following field to the action object
+ * TF_ER_VLAN1
+ * <li> CFA_P70_ACT_ENCAP_VTAGS_PUSH_2, adds capability to
+ * set 2 VLAN Tags. Action Template compile adds
+ * the following fields to the action object
+ * TF_ER_VLAN1 and TF_ER_VLAN2
+ * </ul>
+ */
+ enum encap_vtag_p70 vtag;
+
+ /*
+ * The remaining fields are NOT supported when
+ * direction is RX and ((obj_type ==
+ * CFA_P70_ACT_OBJ_TYPE_ACT) && act.encap_enable).
+ * cfa_bld_p70_action_compile_layout will perform the
+ * checking and skip remaining fields.
+ */
+ /** L3 Encap controls the Encap Vector - L3 Encoding,
+ * 3 bits. Defines the type of L3 Encapsulation the
+ * template is describing.
+ * <ul>
+ * <li> CFA_P70_ACT_ENCAP_L3_NONE, default, no L3
+ * Encapsulation processing.
+ * <li> CFA_P70_ACT_ENCAP_L3_IPV4, enables L3 IPv4
+ * Encapsulation.
+ * <li> CFA_P70_ACT_ENCAP_L3_IPV6, enables L3 IPv6
+ * Encapsulation.
+ * <li> CFA_P70_ACT_ENCAP_L3_MPLS_8847, enables L3 MPLS
+ * 8847 Encapsulation.
+ * <li> CFA_P70_ACT_ENCAP_L3_MPLS_8848, enables L3 MPLS
+ * 8848 Encapsulation.
+ * </ul>
+ */
+ enum encap_l3_p70 l3;
+
+#define CFA_P70_ACT_ENCAP_MAX_MPLS_LABELS 8
+ /** 1-8 labels, valid when
+ * (l3 == CFA_P70_ACT_ENCAP_L3_MPLS_8847) ||
+ * (l3 == CFA_P70_ACT_ENCAP_L3_MPLS_8848)
+ *
+ * MAX number of MPLS Labels 8.
+ */
+ uint8_t l3_num_mpls_labels;
+
+ /** Set to true to enable L4 capability in the
+ * template.
+ *
+ * true adds TF_EN_UDP_SRC_PORT and
+ * TF_EN_UDP_DST_PORT to the template.
+ */
+ uint8_t l4_enable;
+
+ /** Tunnel Encap controls the Encap Vector - Tunnel
+ * Encap, 3 bits. Defines the type of Tunnel
+ * encapsulation the template is describing
+ * <ul>
+ * <li> CFA_P70_ACT_ENCAP_TNL_NONE, default, no Tunnel
+ * Encapsulation processing.
+ * <li> CFA_P70_ACT_ENCAP_TNL_GENERIC_FULL
+ * <li> CFA_P70_ACT_ENCAP_TNL_VXLAN. NOTE: Expects
+ * l4_enable set to true;
+ * <li> CFA_P70_ACT_ENCAP_TNL_NGE. NOTE: Expects l4_enable
+ * set to true;
+ * <li> CFA_P70_ACT_ENCAP_TNL_NVGRE. NOTE: only valid if
+ * l4_enable set to false.
+ * <li> CFA_P70_ACT_ENCAP_TNL_GRE.NOTE: only valid if
+ * l4_enable set to false.
+ * <li> CFA_P70_ACT_ENCAP_TNL_GENERIC_AFTER_TL4
+ * <li> CFA_P70_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
+ * </ul>
+ */
+ enum encap_tunnel_p70 tnl;
+
+#define CFA_P70_ACT_ENCAP_MAX_TUNNEL_GENERIC_SIZE 128
+ /** Number of bytes of generic tunnel header,
+ * valid when
+ * (tnl == CFA_P70_ACT_ENCAP_TNL_GENERIC_FULL) ||
+ * (tnl == CFA_P70_ACT_ENCAP_TNL_GENERIC_AFTER_TL4) ||
+ * (tnl == CFA_P70_ACT_ENCAP_TNL_GENERIC_AFTER_TNL)
+ */
+ uint8_t tnl_generic_size;
+
+#define CFA_P70_ACT_ENCAP_MAX_OPLEN 15
+ /** Number of 32b words of nge options,
+ * valid when
+ * (tnl == CFA_P70_ACT_ENCAP_TNL_NGE)
+ */
+ uint8_t tnl_nge_op_len;
+
+ /** Set to true to enable MAC/VLAN/IP/TNL overrides in the
+ * template
+ */
+ bool encap_override;
+ /* Currently not planned */
+ /* Custom Header */
+ /* uint8_t custom_enable; */
+ } encap;
+
+ /** Modify Control
+ *
+ * Controls the type of the Modify Action the template is
+ * describing
+ *
+ * modify is valid when:
+ * ((obj_type == CFA_P70_ACT_OBJ_TYPE_FULL_ACT) ||
+ * (obj_type == CFA_P70_ACT_OBJ_TYPE_COMPACT_ACT) &&
+ * act.modify_enable || act.modify_inline)
+ */
+/** Set to enable Modify of Metadata
+ */
+#define CFA_P70_ACT_MODIFY_META 0x1
+/** Set to enable Delete of Outer VLAN
+ */
+#define CFA_P70_ACT_MODIFY_DEL_OVLAN 0x2
+/** Set to enable Delete of Inner VLAN
+ */
+#define CFA_P70_ACT_MODIFY_DEL_IVLAN 0x4
+/** Set to enable Replace or Add of Outer VLAN
+ */
+#define CFA_P70_ACT_MODIFY_REPL_ADD_OVLAN 0x8
+/** Set to enable Replace or Add of Inner VLAN
+ */
+#define CFA_P70_ACT_MODIFY_REPL_ADD_IVLAN 0x10
+/** Set to enable Modify of TTL
+ */
+#define CFA_P70_ACT_MODIFY_TTL_UPDATE 0x20
+/** Set to enable delete of INT Tunnel
+ */
+#define CFA_P70_ACT_MODIFY_DEL_INT_TNL 0x40
+/** For phase 7.0 this bit can be used to modify the
+ * tunnel protocol in addition to deleting internal
+ * or outer tunnel
+ */
+#define CFA_P70_ACT_MODIFY_TUNNEL_MODIFY CFA_P70_ACT_MODIFY_DEL_INT_TNL
+/** Set to enable Modify of Field
+ */
+#define CFA_P70_ACT_MODIFY_FIELD 0x80
+/** Set to enable Modify of Destination MAC
+ */
+#define CFA_P70_ACT_MODIFY_DMAC 0x100
+/** Set to enable Modify of Source MAC
+ */
+#define CFA_P70_ACT_MODIFY_SMAC 0x200
+/** Set to enable Modify of Source IPv6 Address
+ */
+#define CFA_P70_ACT_MODIFY_SRC_IPV6 0x400
+/** Set to enable Modify of Destination IPv6 Address
+ */
+#define CFA_P70_ACT_MODIFY_DST_IPV6 0x800
+/** Set to enable Modify of Source IPv4 Address
+ */
+#define CFA_P70_ACT_MODIFY_SRC_IPV4 0x1000
+/** Set to enable Modify of Destination IPv4 Address
+ */
+#define CFA_P70_ACT_MODIFY_DST_IPV4 0x2000
+/** Set to enable Modify of L4 Source Port
+ */
+#define CFA_P70_ACT_MODIFY_SRC_PORT 0x4000
+/** Set to enable Modify of L4 Destination Port
+ */
+#define CFA_P70_ACT_MODIFY_DST_PORT 0x8000
+ uint16_t modify;
+
+/** Set to enable Modify of KID
+ */
+#define CFA_P70_ACT_MODIFY_FIELD_KID 0x1
+ uint16_t field_modify;
+
+ /* Valid for phase 7.0 or higher */
+ enum tunnel_modify_mode_p70 tnl_mod_mode;
+
+ /** Source Control
+ *
+ * Controls the type of the Source Action the template is
+ * describing
+ *
+ * source is valid when:
+ * ((obj_type == CFA_P70_ACT_OBJ_TYPE_FULL_ACT) ||
+ * (obj_type == CFA_P70_ACT_OBJ_TYPE_COMPACT_ACT) &&
+ * act.source_enable || act.source_inline)
+ */
+ enum source_rec_type_p70 source;
+};
+
+/**
+ * Key template consists of key fields that can be enabled/disabled
+ * individually.
+ */
+struct cfa_p70_key_template {
+ /** [in] Identify if the key template is for TCAM. If false, the
+ * key template is for EM. This field is mandantory for device that
+ * only support fix key formats.
+ */
+ bool is_wc_tcam_key;
+ /** [in] Identify if the key template will be use for IPv6 Keys.
+ *
+ * Note: This is important for SR2 as the field length for the Flow Id
+ * is dependent on the L3 flow type. For SR2 for IPv4 Keys, the Flow
+ * Id field is 16 bits, for all other types (IPv6, ARP, PTP, EAP, RoCE,
+ * FCoE, UPAR), the Flow Id field length is 20 bits.
+ */
+ bool is_ipv6_key;
+ /** [in] key field enable field array, set 1 to the correspeonding
+ * field enable to make a field valid
+ */
+ uint8_t field_en[CFA_P70_KEY_MAX_FIELD_CNT];
+};
+
+/**
+ * Action template consists of action fields that can be enabled/disabled
+ * individually.
+ */
+struct cfa_p70_action_template {
+ /** [in] CFA version for the action template */
+ enum cfa_ver hw_ver;
+ /** [in] action field enable field array, set 1 to the correspeonding
+ * field enable to make a field valid
+ */
+ uint8_t data[CFA_P70_ACT_MAX_TEMPLATE_SZ];
+};
+
+#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P70_PROF_L2_CTXT_TCAM_MAX_FLD
+#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P70_PROF_L2_CTXT_RMP_DR_MAX_FLD
+#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p70_prof_key_cfg)
+
+#endif /* _CFA_BLD_P70_H_ */
new file mode 100644
@@ -0,0 +1,1542 @@
+/****************************************************************************
+ * Copyright(c) 2001-2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * Name: cfa_bld_p70_field_ids.h
+ *
+ * Description: Enumerations definitions for CFA phase 7.0 HW table fields
+ * Action record fields and Lookup Key (EM/WC-TCAM) fields.
+ *
+ * Date: 09/29/22 11:50:37
+ *
+ * Note: This file is scripted generated by ./cfa_header_gen.py.
+ * DO NOT modify this file manually !!!!
+ *
+ ****************************************************************************/
+#ifndef _CFA_BLD_P70_FIELD_IDS_H_
+#define _CFA_BLD_P70_FIELD_IDS_H_
+
+/* clang-format off */
+
+/**
+ * Lookup Field Range Check Range Memory Fields:
+ */
+enum cfa_p70_lkup_frc_profile_flds {
+ CFA_P70_LKUP_FRC_PROFILE_FIELD_SEL_1_FLD = 0,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_1_FLD = 1,
+ CFA_P70_LKUP_FRC_PROFILE_FIELD_SEL_0_FLD = 2,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_FLD = 3,
+ CFA_P70_LKUP_FRC_PROFILE_MAX_FLD
+};
+
+/**
+ * Lookup Connection Tracking State Memory Fields:
+ */
+enum cfa_p70_lkup_ct_state_flds {
+ CFA_P70_LKUP_CT_STATE_NOTIFY_FLD = 0,
+ CFA_P70_LKUP_CT_STATE_NOTIFY_STATE_FLD = 1,
+ CFA_P70_LKUP_CT_STATE_ACTION_FLD = 2,
+ CFA_P70_LKUP_CT_STATE_TIMER_SELECT_FLD = 3,
+ CFA_P70_LKUP_CT_STATE_TIMER_PRELOAD_FLD = 4,
+ CFA_P70_LKUP_CT_STATE_MAX_FLD
+};
+
+/**
+ * Lookup Connection Tracking State Machine Rule Memory Fields:
+ */
+enum cfa_p70_lkup_ct_rule_flds {
+ CFA_P70_LKUP_CT_RULE_VALID_FLD = 0,
+ CFA_P70_LKUP_CT_RULE_MASK_FLD = 1,
+ CFA_P70_LKUP_CT_RULE_PKT_NOT_BG_FLD = 2,
+ CFA_P70_LKUP_CT_RULE_STATE_FLD = 3,
+ CFA_P70_LKUP_CT_RULE_TCP_FLAGS_FLD = 4,
+ CFA_P70_LKUP_CT_RULE_PROT_IS_TCP_FLD = 5,
+ CFA_P70_LKUP_CT_RULE_MSB_UPDT_FLD = 6,
+ CFA_P70_LKUP_CT_RULE_FLAGS_FAILED_FLD = 7,
+ CFA_P70_LKUP_CT_RULE_WIN_FAILED_FLD = 8,
+ CFA_P70_LKUP_CT_RULE_MAX_FLD
+};
+
+/**
+ * Lookup Connection Tracking State Machine Rule Record Memory Fields:
+ */
+enum cfa_p70_lkup_ct_rule_record_flds {
+ CFA_P70_LKUP_CT_RULE_RECORD_ACTION_FLD = 0,
+ CFA_P70_LKUP_CT_RULE_RECORD_NEXT_STATE_FLD = 1,
+ CFA_P70_LKUP_CT_RULE_RECORD_SEND_FLD = 2,
+ CFA_P70_LKUP_CT_RULE_RECORD_MAX_FLD
+};
+
+/**
+ * VEB Destination Bitmap Remap Table. Fields:
+ */
+enum cfa_p70_act_veb_rmp_flds {
+ CFA_P70_ACT_VEB_RMP_MODE_FLD = 0,
+ CFA_P70_ACT_VEB_RMP_ENABLE_FLD = 1,
+ CFA_P70_ACT_VEB_RMP_BITMAP_FLD = 2,
+ CFA_P70_ACT_VEB_RMP_MAX_FLD
+};
+
+/**
+ * Lookup Field Range Check Range Memory Fields:
+ */
+enum cfa_p70_lkup_frc_range_flds {
+ CFA_P70_LKUP_FRC_RANGE_RANGE_LO_FLD = 0,
+ CFA_P70_LKUP_FRC_RANGE_RANGE_HI_FLD = 1,
+ CFA_P70_LKUP_FRC_RANGE_MAX_FLD
+};
+
+/**
+ * L2 Context TCAM. Fields:
+ */
+enum cfa_p70_prof_l2_ctxt_tcam_flds {
+ CFA_P70_PROF_L2_CTXT_TCAM_VALID_FLD = 0,
+ CFA_P70_PROF_L2_CTXT_TCAM_SPARE_FLD = 1,
+ CFA_P70_PROF_L2_CTXT_TCAM_MPASS_CNT_FLD = 2,
+ CFA_P70_PROF_L2_CTXT_TCAM_RCYC_FLD = 3,
+ CFA_P70_PROF_L2_CTXT_TCAM_LOOPBACK_FLD = 4,
+ CFA_P70_PROF_L2_CTXT_TCAM_SPIF_FLD = 5,
+ CFA_P70_PROF_L2_CTXT_TCAM_PARIF_FLD = 6,
+ CFA_P70_PROF_L2_CTXT_TCAM_SVIF_FLD = 7,
+ CFA_P70_PROF_L2_CTXT_TCAM_METADATA_FLD = 8,
+ CFA_P70_PROF_L2_CTXT_TCAM_L2_FUNC_FLD = 9,
+ CFA_P70_PROF_L2_CTXT_TCAM_ROCE_FLD = 10,
+ CFA_P70_PROF_L2_CTXT_TCAM_PURE_LLC_FLD = 11,
+ CFA_P70_PROF_L2_CTXT_TCAM_OT_HDR_TYPE_FLD = 12,
+ CFA_P70_PROF_L2_CTXT_TCAM_T_HDR_TYPE_FLD = 13,
+ CFA_P70_PROF_L2_CTXT_TCAM_ID_CTXT_FLD = 14,
+ CFA_P70_PROF_L2_CTXT_TCAM_MAC0_FLD = 15,
+ CFA_P70_PROF_L2_CTXT_TCAM_MAC1_FLD = 16,
+ CFA_P70_PROF_L2_CTXT_TCAM_VTAG_PRESENT_FLD = 17,
+ CFA_P70_PROF_L2_CTXT_TCAM_TWO_VTAGS_FLD = 18,
+ CFA_P70_PROF_L2_CTXT_TCAM_OVLAN_VID_FLD = 19,
+ CFA_P70_PROF_L2_CTXT_TCAM_OVLAN_TPID_SEL_FLD = 20,
+ CFA_P70_PROF_L2_CTXT_TCAM_IVLAN_VID_FLD = 21,
+ CFA_P70_PROF_L2_CTXT_TCAM_IVLAN_TPID_SEL_FLD = 22,
+ CFA_P70_PROF_L2_CTXT_TCAM_ETYPE_FLD = 23,
+ CFA_P70_PROF_L2_CTXT_TCAM_MAX_FLD
+};
+
+/**
+ * Profiler Profile Lookup TCAM Fields:
+ */
+enum cfa_p70_prof_profile_tcam_flds {
+ CFA_P70_PROF_PROFILE_TCAM_VALID_FLD = 0,
+ CFA_P70_PROF_PROFILE_TCAM_SPARE_FLD = 1,
+ CFA_P70_PROF_PROFILE_TCAM_LOOPBACK_FLD = 2,
+ CFA_P70_PROF_PROFILE_TCAM_PKT_TYPE_FLD = 3,
+ CFA_P70_PROF_PROFILE_TCAM_RCYC_FLD = 4,
+ CFA_P70_PROF_PROFILE_TCAM_METADATA_FLD = 5,
+ CFA_P70_PROF_PROFILE_TCAM_AGG_ERROR_FLD = 6,
+ CFA_P70_PROF_PROFILE_TCAM_L2_FUNC_FLD = 7,
+ CFA_P70_PROF_PROFILE_TCAM_PROF_FUNC_FLD = 8,
+ CFA_P70_PROF_PROFILE_TCAM_HREC_NEXT_FLD = 9,
+ CFA_P70_PROF_PROFILE_TCAM_INT_HDR_TYPE_FLD = 10,
+ CFA_P70_PROF_PROFILE_TCAM_INT_HDR_GROUP_FLD = 11,
+ CFA_P70_PROF_PROFILE_TCAM_INT_IFA_TAIL_FLD = 12,
+ CFA_P70_PROF_PROFILE_TCAM_OTL2_HDR_VALID_FLD = 13,
+ CFA_P70_PROF_PROFILE_TCAM_OTL2_HDR_TYPE_FLD = 14,
+ CFA_P70_PROF_PROFILE_TCAM_OTL2_UC_MC_BC_FLD = 15,
+ CFA_P70_PROF_PROFILE_TCAM_OTL2_VTAG_PRESENT_FLD = 16,
+ CFA_P70_PROF_PROFILE_TCAM_OTL2_TWO_VTAGS_FLD = 17,
+ CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_VALID_FLD = 18,
+ CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_ERROR_FLD = 19,
+ CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_TYPE_FLD = 20,
+ CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_ISIP_FLD = 21,
+ CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_VALID_FLD = 22,
+ CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_ERROR_FLD = 23,
+ CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_TYPE_FLD = 24,
+ CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_IS_UDP_TCP_FLD = 25,
+ CFA_P70_PROF_PROFILE_TCAM_OT_HDR_VALID_FLD = 26,
+ CFA_P70_PROF_PROFILE_TCAM_OT_HDR_ERROR_FLD = 27,
+ CFA_P70_PROF_PROFILE_TCAM_OT_HDR_TYPE_FLD = 28,
+ CFA_P70_PROF_PROFILE_TCAM_OT_HDR_FLAGS_FLD = 29,
+ CFA_P70_PROF_PROFILE_TCAM_TL2_HDR_VALID_FLD = 30,
+ CFA_P70_PROF_PROFILE_TCAM_TL2_HDR_TYPE_FLD = 31,
+ CFA_P70_PROF_PROFILE_TCAM_TL2_UC_MC_BC_FLD = 32,
+ CFA_P70_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_FLD = 33,
+ CFA_P70_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_FLD = 34,
+ CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_VALID_FLD = 35,
+ CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_ERROR_FLD = 36,
+ CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_TYPE_FLD = 37,
+ CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_ISIP_FLD = 38,
+ CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_VALID_FLD = 39,
+ CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_ERROR_FLD = 40,
+ CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_TYPE_FLD = 41,
+ CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_FLD = 42,
+ CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_VALID_FLD = 43,
+ CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_ERROR_FLD = 44,
+ CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_TYPE_FLD = 45,
+ CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_FLD = 46,
+ CFA_P70_PROF_PROFILE_TCAM_L2_HDR_VALID_FLD = 47,
+ CFA_P70_PROF_PROFILE_TCAM_L2_HDR_ERROR_FLD = 48,
+ CFA_P70_PROF_PROFILE_TCAM_L2_HDR_TYPE_FLD = 49,
+ CFA_P70_PROF_PROFILE_TCAM_L2_UC_MC_BC_FLD = 50,
+ CFA_P70_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_FLD = 51,
+ CFA_P70_PROF_PROFILE_TCAM_L2_TWO_VTAGS_FLD = 52,
+ CFA_P70_PROF_PROFILE_TCAM_L3_HDR_VALID_FLD = 53,
+ CFA_P70_PROF_PROFILE_TCAM_L3_HDR_ERROR_FLD = 54,
+ CFA_P70_PROF_PROFILE_TCAM_L3_HDR_TYPE_FLD = 55,
+ CFA_P70_PROF_PROFILE_TCAM_L3_HDR_ISIP_FLD = 56,
+ CFA_P70_PROF_PROFILE_TCAM_L3_PROT_FLD = 57,
+ CFA_P70_PROF_PROFILE_TCAM_L4_HDR_VALID_FLD = 58,
+ CFA_P70_PROF_PROFILE_TCAM_L4_HDR_ERROR_FLD = 59,
+ CFA_P70_PROF_PROFILE_TCAM_L4_HDR_TYPE_FLD = 60,
+ CFA_P70_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_FLD = 61,
+ CFA_P70_PROF_PROFILE_TCAM_L4_HDR_SUBTYPE_FLD = 62,
+ CFA_P70_PROF_PROFILE_TCAM_L4_HDR_FLAGS_FLD = 63,
+ CFA_P70_PROF_PROFILE_TCAM_L4_DCN_PRESENT_FLD = 64,
+ CFA_P70_PROF_PROFILE_TCAM_MAX_FLD
+};
+
+/**
+ * Action VEB TCAM. TX Fields (VEB Remap Mode):
+ */
+enum cfa_p70_act_veb_tcam_tx_flds {
+ CFA_P70_ACT_VEB_TCAM_TX_VALID_FLD = 0,
+ CFA_P70_ACT_VEB_TCAM_TX_PARIF_IN_FLD = 1,
+ CFA_P70_ACT_VEB_TCAM_TX_NUM_VTAGS_FLD = 2,
+ CFA_P70_ACT_VEB_TCAM_TX_DMAC_FLD = 3,
+ CFA_P70_ACT_VEB_TCAM_TX_OVID_FLD = 4,
+ CFA_P70_ACT_VEB_TCAM_TX_IVID_FLD = 5,
+ CFA_P70_ACT_VEB_TCAM_TX_MAX_FLD
+};
+
+/**
+ * RX Fields (Source Knockout Mode):
+ */
+enum cfa_p70_act_veb_tcam_rx_flds {
+ CFA_P70_ACT_VEB_TCAM_RX_VALID_FLD = 0,
+ CFA_P70_ACT_VEB_TCAM_RX_SPARE_FLD = 1,
+ CFA_P70_ACT_VEB_TCAM_RX_PADDING_FLD = 2,
+ CFA_P70_ACT_VEB_TCAM_RX_UNICAST_FLD = 3,
+ CFA_P70_ACT_VEB_TCAM_RX_MULTICAST_FLD = 4,
+ CFA_P70_ACT_VEB_TCAM_RX_BROADCAST_FLD = 5,
+ CFA_P70_ACT_VEB_TCAM_RX_PFID_FLD = 6,
+ CFA_P70_ACT_VEB_TCAM_RX_VFID_FLD = 7,
+ CFA_P70_ACT_VEB_TCAM_RX_SMAC_FLD = 8,
+ CFA_P70_ACT_VEB_TCAM_RX_MAX_FLD
+};
+
+/**
+ * Action Feature Chaining TCAM.
+ */
+enum cfa_p70_act_fc_tcam_flds {
+ CFA_P70_ACT_FC_TCAM_FC_VALID_FLD = 0,
+ CFA_P70_ACT_FC_TCAM_FC_RSVD_FLD = 1,
+ CFA_P70_ACT_FC_TCAM_FC_METADATA_FLD = 2,
+ CFA_P70_ACT_FC_TCAM_MAX_FLD
+};
+
+/**
+ * Feature Chaining TCAM Remap Table Fields:
+ */
+enum cfa_p70_act_fc_rmp_dr_flds {
+ CFA_P70_ACT_FC_RMP_DR_METADATA_FLD = 0,
+ CFA_P70_ACT_FC_RMP_DR_METAMASK_FLD = 1,
+ CFA_P70_ACT_FC_RMP_DR_L2_FUNC_FLD = 2,
+ CFA_P70_ACT_FC_RMP_DR_MAX_FLD
+};
+
+/**
+ * Profile Input Lookup Table Memory Fields:
+ */
+enum cfa_p70_prof_ilt_dr_flds {
+ CFA_P70_PROF_ILT_DR_ILT_META_EN_FLD = 0,
+ CFA_P70_PROF_ILT_DR_META_PROF_FLD = 1,
+ CFA_P70_PROF_ILT_DR_METADATA_FLD = 2,
+ CFA_P70_PROF_ILT_DR_PARIF_FLD = 3,
+ CFA_P70_PROF_ILT_DR_L2_FUNC_FLD = 4,
+ CFA_P70_PROF_ILT_DR_EN_BD_META_FLD = 5,
+ CFA_P70_PROF_ILT_DR_EN_BD_ACTION_FLD = 6,
+ CFA_P70_PROF_ILT_DR_EN_ILT_DEST_FLD = 7,
+ CFA_P70_PROF_ILT_DR_ILT_FWD_OP_FLD = 8,
+ CFA_P70_PROF_ILT_DR_ILT_ACT_HINT_FLD = 9,
+ CFA_P70_PROF_ILT_DR_ILT_SCOPE_FLD = 10,
+ CFA_P70_PROF_ILT_DR_ILT_ACT_REC_PTR_FLD = 11,
+ CFA_P70_PROF_ILT_DR_ILT_DESTINATION_FLD = 12,
+ CFA_P70_PROF_ILT_DR_MAX_FLD
+};
+
+/**
+ * Profile Lookup TCAM Remap Table Fields:
+ */
+enum cfa_p70_prof_profile_rmp_dr_flds {
+ CFA_P70_PROF_PROFILE_RMP_DR_PL_BYP_LKUP_EN_FLD = 0,
+ CFA_P70_PROF_PROFILE_RMP_DR_EM_SEARCH_EN_FLD = 1,
+ CFA_P70_PROF_PROFILE_RMP_DR_EM_PROFILE_ID_FLD = 2,
+ CFA_P70_PROF_PROFILE_RMP_DR_EM_KEY_ID_FLD = 3,
+ CFA_P70_PROF_PROFILE_RMP_DR_EM_SCOPE_FLD = 4,
+ CFA_P70_PROF_PROFILE_RMP_DR_TCAM_SEARCH_EN_FLD = 5,
+ CFA_P70_PROF_PROFILE_RMP_DR_TCAM_PROFILE_ID_FLD = 6,
+ CFA_P70_PROF_PROFILE_RMP_DR_TCAM_KEY_ID_FLD = 7,
+ CFA_P70_PROF_PROFILE_RMP_DR_TCAM_SCOPE_FLD = 8,
+ CFA_P70_PROF_PROFILE_RMP_DR_MAX_FLD
+};
+
+/**
+ * PROF_PROFILE_RMP_DR_BYP
+ */
+enum cfa_p70_prof_profile_rmp_dr_byp_flds {
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_BYP_LKUP_EN_FLD = 0,
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_RESERVED_FLD = 1,
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_BYPASS_OP_FLD = 2,
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_ACT_HINT_FLD = 3,
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_SCOPE_FLD = 4,
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_ACT_REC_PTR_FLD = 5,
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_MAX_FLD
+};
+
+/**
+ * VNIC-SVIF Properties Table Fields: TX SVIF Properties Table
+ */
+enum cfa_p70_act_vspt_dr_tx_flds {
+ CFA_P70_ACT_VSPT_DR_TX_TPID_AS_CTL_FLD = 0,
+ CFA_P70_ACT_VSPT_DR_TX_ALWD_TPID_FLD = 1,
+ CFA_P70_ACT_VSPT_DR_TX_DFLT_TPID_FLD = 2,
+ CFA_P70_ACT_VSPT_DR_TX_PRI_AS_CTL_FLD = 3,
+ CFA_P70_ACT_VSPT_DR_TX_ALWD_PRI_FLD = 4,
+ CFA_P70_ACT_VSPT_DR_TX_DFLT_PRI_FLD = 5,
+ CFA_P70_ACT_VSPT_DR_TX_MIR_FLD = 6,
+ CFA_P70_ACT_VSPT_DR_TX_MAX_FLD
+};
+
+/**
+ * RX VNIC Properties Table
+ */
+enum cfa_p70_act_vspt_dr_rx_flds {
+ CFA_P70_ACT_VSPT_DR_RX_RSVD_FLD = 0,
+ CFA_P70_ACT_VSPT_DR_RX_METAFMT_FLD = 1,
+ CFA_P70_ACT_VSPT_DR_RX_FID_FLD = 2,
+ CFA_P70_ACT_VSPT_DR_RX_MIR_FLD = 3,
+ CFA_P70_ACT_VSPT_DR_RX_MAX_FLD
+};
+
+/**
+ * LAG ID Balance Table Fields:
+ */
+enum cfa_p70_act_lbt_dr_flds {
+ CFA_P70_ACT_LBT_DR_DST_BMP_FLD = 0,
+ CFA_P70_ACT_LBT_DR_MAX_FLD
+};
+
+/**
+ * L2 Context Lookup Remap Table Fields:
+ */
+enum cfa_p70_prof_l2_ctxt_rmp_dr_flds {
+ CFA_P70_PROF_L2_CTXT_RMP_DR_PRSV_PARIF_FLD = 0,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_PARIF_FLD = 1,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_PRSV_L2IP_CTXT_FLD = 2,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_CTXT_FLD = 3,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_PRSV_PROF_FUNC_FLD = 4,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_PROF_FUNC_FLD = 5,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_CTXT_OPCODE_FLD = 6,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_META_ENB_FLD = 7,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_META_FLD = 8,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_ACT_ENB_FLD = 9,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_ACT_DATA_FLD = 10,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_RFS_ENB_FLD = 11,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_RFS_DATA_FLD = 12,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_DEST_ENB_FLD = 13,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_DEST_DATA_FLD = 14,
+ CFA_P70_PROF_L2_CTXT_RMP_DR_MAX_FLD
+};
+
+/**
+ * Multi Field Register.
+ */
+enum cfa_p70_act_fc_tcam_result_flds {
+ CFA_P70_ACT_FC_TCAM_RESULT_SEARCH_RESULT_FLD = 0,
+ CFA_P70_ACT_FC_TCAM_RESULT_UNUSED_0_FLD = 1,
+ CFA_P70_ACT_FC_TCAM_RESULT_SEARCH_HIT_FLD = 2,
+ CFA_P70_ACT_FC_TCAM_RESULT_MAX_FLD
+};
+
+/**
+ * Multi Field Register.
+ */
+enum cfa_p70_act_mirror_flds {
+ CFA_P70_ACT_MIRROR_UNUSED_0_FLD = 0,
+ CFA_P70_ACT_MIRROR_RELATIVE_FLD = 1,
+ CFA_P70_ACT_MIRROR_HINT_FLD = 2,
+ CFA_P70_ACT_MIRROR_SAMP_FLD = 3,
+ CFA_P70_ACT_MIRROR_TRUNC_FLD = 4,
+ CFA_P70_ACT_MIRROR_IGN_DROP_FLD = 5,
+ CFA_P70_ACT_MIRROR_MODE_FLD = 6,
+ CFA_P70_ACT_MIRROR_COND_FLD = 7,
+ CFA_P70_ACT_MIRROR_AR_PTR_FLD = 8,
+ CFA_P70_ACT_MIRROR_SAMP_CFG_FLD = 9,
+ CFA_P70_ACT_MIRROR_MAX_FLD
+};
+
+/**
+ * WC LREC Lookup Record
+ */
+enum cfa_p70_wc_lrec_flds {
+ CFA_P70_WC_LREC_METADATA_FLD = 0,
+ CFA_P70_WC_LREC_META_PROF_FLD = 1,
+ CFA_P70_WC_LREC_PROF_FUNC_FLD = 2,
+ CFA_P70_WC_LREC_RECYCLE_DEST_FLD = 3,
+ CFA_P70_WC_LREC_FC_PTR_FLD = 4,
+ CFA_P70_WC_LREC_FC_TYPE_FLD = 5,
+ CFA_P70_WC_LREC_FC_OP_FLD = 6,
+ CFA_P70_WC_LREC_PATHS_M1_FLD = 7,
+ CFA_P70_WC_LREC_ACT_REC_SIZE_FLD = 8,
+ CFA_P70_WC_LREC_RING_TABLE_IDX_FLD = 9,
+ CFA_P70_WC_LREC_DESTINATION_FLD = 10,
+ CFA_P70_WC_LREC_ACT_REC_PTR_FLD = 11,
+ CFA_P70_WC_LREC_ACT_HINT_FLD = 12,
+ CFA_P70_WC_LREC_STRENGTH_FLD = 13,
+ CFA_P70_WC_LREC_OPCODE_FLD = 14,
+ CFA_P70_WC_LREC_EPOCH1_FLD = 15,
+ CFA_P70_WC_LREC_EPOCH0_FLD = 16,
+ CFA_P70_WC_LREC_REC_SIZE_FLD = 17,
+ CFA_P70_WC_LREC_VALID_FLD = 18,
+ CFA_P70_WC_LREC_MAX_FLD
+};
+
+/**
+ * EM LREC Lookup Record
+ */
+enum cfa_p70_em_lrec_flds {
+ CFA_P70_EM_LREC_RANGE_IDX_FLD = 0,
+ CFA_P70_EM_LREC_RANGE_PROFILE_FLD = 1,
+ CFA_P70_EM_LREC_CREC_TIMER_VALUE_FLD = 2,
+ CFA_P70_EM_LREC_CREC_STATE_FLD = 3,
+ CFA_P70_EM_LREC_CREC_TCP_MSB_OPP_INIT_FLD = 4,
+ CFA_P70_EM_LREC_CREC_TCP_MSB_OPP_FLD = 5,
+ CFA_P70_EM_LREC_CREC_TCP_MSB_LOC_FLD = 6,
+ CFA_P70_EM_LREC_CREC_TCP_WIN_FLD = 7,
+ CFA_P70_EM_LREC_CREC_TCP_UPDT_EN_FLD = 8,
+ CFA_P70_EM_LREC_CREC_TCP_DIR_FLD = 9,
+ CFA_P70_EM_LREC_METADATA_FLD = 10,
+ CFA_P70_EM_LREC_PROF_FUNC_FLD = 11,
+ CFA_P70_EM_LREC_META_PROF_FLD = 12,
+ CFA_P70_EM_LREC_RECYCLE_DEST_FLD = 13,
+ CFA_P70_EM_LREC_FC_PTR_FLD = 14,
+ CFA_P70_EM_LREC_FC_TYPE_FLD = 15,
+ CFA_P70_EM_LREC_FC_OP_FLD = 16,
+ CFA_P70_EM_LREC_PATHS_M1_FLD = 17,
+ CFA_P70_EM_LREC_ACT_REC_SIZE_FLD = 18,
+ CFA_P70_EM_LREC_RING_TABLE_IDX_FLD = 19,
+ CFA_P70_EM_LREC_DESTINATION_FLD = 20,
+ CFA_P70_EM_LREC_ACT_REC_PTR_FLD = 21,
+ CFA_P70_EM_LREC_ACT_HINT_FLD = 22,
+ CFA_P70_EM_LREC_STRENGTH_FLD = 23,
+ CFA_P70_EM_LREC_OPCODE_FLD = 24,
+ CFA_P70_EM_LREC_EPOCH1_FLD = 25,
+ CFA_P70_EM_LREC_EPOCH0_FLD = 26,
+ CFA_P70_EM_LREC_REC_SIZE_FLD = 27,
+ CFA_P70_EM_LREC_VALID_FLD = 28,
+ CFA_P70_EM_LREC_MAX_FLD
+};
+
+/**
+ * EM Lookup Bucket Format
+ */
+enum cfa_p70_em_bucket_flds {
+ CFA_P70_EM_BUCKET_BIN0_ENTRY_FLD = 0,
+ CFA_P70_EM_BUCKET_BIN0_HASH_MSBS_FLD = 1,
+ CFA_P70_EM_BUCKET_BIN1_ENTRY_FLD = 2,
+ CFA_P70_EM_BUCKET_BIN1_HASH_MSBS_FLD = 3,
+ CFA_P70_EM_BUCKET_BIN2_ENTRY_FLD = 4,
+ CFA_P70_EM_BUCKET_BIN2_HASH_MSBS_FLD = 5,
+ CFA_P70_EM_BUCKET_BIN3_ENTRY_FLD = 6,
+ CFA_P70_EM_BUCKET_BIN3_HASH_MSBS_FLD = 7,
+ CFA_P70_EM_BUCKET_BIN4_ENTRY_FLD = 8,
+ CFA_P70_EM_BUCKET_BIN4_HASH_MSBS_FLD = 9,
+ CFA_P70_EM_BUCKET_BIN5_ENTRY_FLD = 10,
+ CFA_P70_EM_BUCKET_BIN5_HASH_MSBS_FLD = 11,
+ CFA_P70_EM_BUCKET_CHAIN_POINTER_FLD = 12,
+ CFA_P70_EM_BUCKET_CHAIN_VALID_FLD = 13,
+ CFA_P70_EM_BUCKET_MAX_FLD
+};
+
+/**
+ * Compact Action Record. The compact action record uses relative
+ * pointers to access needed data. This keeps the compact action record
+ * down to 64b.
+ */
+enum cfa_p70_compact_action_flds {
+ CFA_P70_COMPACT_ACTION_TYPE_FLD = 0,
+ CFA_P70_COMPACT_ACTION_DROP_FLD = 1,
+ CFA_P70_COMPACT_ACTION_VLAN_DELETE_FLD = 2,
+ CFA_P70_COMPACT_ACTION_DEST_FLD = 3,
+ CFA_P70_COMPACT_ACTION_DEST_OP_FLD = 4,
+ CFA_P70_COMPACT_ACTION_DECAP_FLD = 5,
+ CFA_P70_COMPACT_ACTION_MIRRORING_FLD = 6,
+ CFA_P70_COMPACT_ACTION_METER_PTR_FLD = 7,
+ CFA_P70_COMPACT_ACTION_STAT0_OFF_FLD = 8,
+ CFA_P70_COMPACT_ACTION_STAT0_OP_FLD = 9,
+ CFA_P70_COMPACT_ACTION_STAT0_CTR_TYPE_FLD = 10,
+ CFA_P70_COMPACT_ACTION_MOD_OFF_FLD = 11,
+ CFA_P70_COMPACT_ACTION_ENC_OFF_FLD = 12,
+ CFA_P70_COMPACT_ACTION_SRC_OFF_FLD = 13,
+ CFA_P70_COMPACT_ACTION_UNUSED_0_FLD = 14,
+ CFA_P70_COMPACT_ACTION_MAX_FLD
+};
+
+/**
+ * Full Action Record. The full action record uses full pointers to
+ * access needed data. It also allows access to all the action features.
+ * The Full Action record is 192b.
+ */
+enum cfa_p70_full_action_flds {
+ CFA_P70_FULL_ACTION_TYPE_FLD = 0,
+ CFA_P70_FULL_ACTION_DROP_FLD = 1,
+ CFA_P70_FULL_ACTION_VLAN_DELETE_FLD = 2,
+ CFA_P70_FULL_ACTION_DEST_FLD = 3,
+ CFA_P70_FULL_ACTION_DEST_OP_FLD = 4,
+ CFA_P70_FULL_ACTION_DECAP_FLD = 5,
+ CFA_P70_FULL_ACTION_MIRRORING_FLD = 6,
+ CFA_P70_FULL_ACTION_METER_PTR_FLD = 7,
+ CFA_P70_FULL_ACTION_STAT0_PTR_FLD = 8,
+ CFA_P70_FULL_ACTION_STAT0_OP_FLD = 9,
+ CFA_P70_FULL_ACTION_STAT0_CTR_TYPE_FLD = 10,
+ CFA_P70_FULL_ACTION_STAT1_PTR_FLD = 11,
+ CFA_P70_FULL_ACTION_STAT1_OP_FLD = 12,
+ CFA_P70_FULL_ACTION_STAT1_CTR_TYPE_FLD = 13,
+ CFA_P70_FULL_ACTION_MOD_PTR_FLD = 14,
+ CFA_P70_FULL_ACTION_ENC_PTR_FLD = 15,
+ CFA_P70_FULL_ACTION_SRC_PTR_FLD = 16,
+ CFA_P70_FULL_ACTION_UNUSED_0_FLD = 17,
+ CFA_P70_FULL_ACTION_MAX_FLD
+};
+
+/**
+ * Multicast Group Action Record. This action is used to send the packet
+ * to multiple destinations. The MGC Action record is 256b.
+ */
+enum cfa_p70_mcg_action_flds {
+ CFA_P70_MCG_ACTION_TYPE_FLD = 0,
+ CFA_P70_MCG_ACTION_SRC_KO_EN_FLD = 1,
+ CFA_P70_MCG_ACTION_UNUSED_0_FLD = 2,
+ CFA_P70_MCG_ACTION_NEXT_PTR_FLD = 3,
+ CFA_P70_MCG_ACTION_PTR0_ACT_HINT_FLD = 4,
+ CFA_P70_MCG_ACTION_PTR0_ACT_REC_PTR_FLD = 5,
+ CFA_P70_MCG_ACTION_PTR1_ACT_HINT_FLD = 6,
+ CFA_P70_MCG_ACTION_PTR1_ACT_REC_PTR_FLD = 7,
+ CFA_P70_MCG_ACTION_PTR2_ACT_HINT_FLD = 8,
+ CFA_P70_MCG_ACTION_PTR2_ACT_REC_PTR_FLD = 9,
+ CFA_P70_MCG_ACTION_PTR3_ACT_HINT_FLD = 10,
+ CFA_P70_MCG_ACTION_PTR3_ACT_REC_PTR_FLD = 11,
+ CFA_P70_MCG_ACTION_PTR4_ACT_HINT_FLD = 12,
+ CFA_P70_MCG_ACTION_PTR4_ACT_REC_PTR_FLD = 13,
+ CFA_P70_MCG_ACTION_PTR5_ACT_HINT_FLD = 14,
+ CFA_P70_MCG_ACTION_PTR5_ACT_REC_PTR_FLD = 15,
+ CFA_P70_MCG_ACTION_PTR6_ACT_HINT_FLD = 16,
+ CFA_P70_MCG_ACTION_PTR6_ACT_REC_PTR_FLD = 17,
+ CFA_P70_MCG_ACTION_PTR7_ACT_HINT_FLD = 18,
+ CFA_P70_MCG_ACTION_PTR7_ACT_REC_PTR_FLD = 19,
+ CFA_P70_MCG_ACTION_MAX_FLD
+};
+
+/**
+ * Multicast Group Action Record. This action is used to send the packet
+ * to multiple destinations. The MGC Action record is 256b.
+ */
+enum cfa_p70_mcg_subseq_action_flds {
+ CFA_P70_MCG_SUBSEQ_ACTION_TYPE_FLD = 0,
+ CFA_P70_MCG_SUBSEQ_ACTION_UNUSED_0_FLD = 1,
+ CFA_P70_MCG_SUBSEQ_ACTION_NEXT_PTR_FLD = 2,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR0_ACT_HINT_FLD = 3,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR0_ACT_REC_PTR_FLD = 4,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR1_ACT_HINT_FLD = 5,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR1_ACT_REC_PTR_FLD = 6,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR2_ACT_HINT_FLD = 7,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR2_ACT_REC_PTR_FLD = 8,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR3_ACT_HINT_FLD = 9,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR3_ACT_REC_PTR_FLD = 10,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR4_ACT_HINT_FLD = 11,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR4_ACT_REC_PTR_FLD = 12,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR5_ACT_HINT_FLD = 13,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR5_ACT_REC_PTR_FLD = 14,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR6_ACT_HINT_FLD = 15,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR6_ACT_REC_PTR_FLD = 16,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR7_ACT_HINT_FLD = 17,
+ CFA_P70_MCG_SUBSEQ_ACTION_PTR7_ACT_REC_PTR_FLD = 18,
+ CFA_P70_MCG_SUBSEQ_ACTION_MAX_FLD
+};
+
+/**
+ * Action Meter Formats
+ */
+enum cfa_p70_meters_flds {
+ CFA_P70_METERS_BKT_C_FLD = 0,
+ CFA_P70_METERS_BKT_E_FLD = 1,
+ CFA_P70_METERS_FLAGS_MTR_VAL_FLD = 2,
+ CFA_P70_METERS_FLAGS_ECN_RMP_EN_FLD = 3,
+ CFA_P70_METERS_FLAGS_CF_FLD = 4,
+ CFA_P70_METERS_FLAGS_PM_FLD = 5,
+ CFA_P70_METERS_FLAGS_RFC2698_FLD = 6,
+ CFA_P70_METERS_FLAGS_CBSM_FLD = 7,
+ CFA_P70_METERS_FLAGS_EBSM_FLD = 8,
+ CFA_P70_METERS_FLAGS_CBND_FLD = 9,
+ CFA_P70_METERS_FLAGS_EBND_FLD = 10,
+ CFA_P70_METERS_CBS_FLD = 11,
+ CFA_P70_METERS_EBS_FLD = 12,
+ CFA_P70_METERS_CIR_FLD = 13,
+ CFA_P70_METERS_EIR_FLD = 14,
+ CFA_P70_METERS_PROTECTION_SCOPE_FLD = 15,
+ CFA_P70_METERS_PROTECTION_RSVD_FLD = 16,
+ CFA_P70_METERS_PROTECTION_ENABLE_FLD = 17,
+ CFA_P70_METERS_MAX_FLD
+};
+
+/**
+ * Enumeration for fkb
+ */
+enum cfa_p70_fkb_flds {
+ CFA_P70_FKB_PROF_ID_FLD = 0,
+ CFA_P70_FKB_L2CTXT_FLD = 1,
+ CFA_P70_FKB_L2FUNC_FLD = 2,
+ CFA_P70_FKB_PARIF_FLD = 3,
+ CFA_P70_FKB_SPIF_FLD = 4,
+ CFA_P70_FKB_SVIF_FLD = 5,
+ CFA_P70_FKB_LCOS_FLD = 6,
+ CFA_P70_FKB_META_HI_FLD = 7,
+ CFA_P70_FKB_META_LO_FLD = 8,
+ CFA_P70_FKB_RCYC_CNT_FLD = 9,
+ CFA_P70_FKB_LOOPBACK_FLD = 10,
+ CFA_P70_FKB_OTL2_TYPE_FLD = 11,
+ CFA_P70_FKB_OTL2_DMAC_FLD = 12,
+ CFA_P70_FKB_OTL2_SMAC_FLD = 13,
+ CFA_P70_FKB_OTL2_DT_FLD = 14,
+ CFA_P70_FKB_OTL2_SA_FLD = 15,
+ CFA_P70_FKB_OTL2_NVT_FLD = 16,
+ CFA_P70_FKB_OTL2_OVP_FLD = 17,
+ CFA_P70_FKB_OTL2_OVD_FLD = 18,
+ CFA_P70_FKB_OTL2_OVV_FLD = 19,
+ CFA_P70_FKB_OTL2_OVT_FLD = 20,
+ CFA_P70_FKB_OTL2_IVP_FLD = 21,
+ CFA_P70_FKB_OTL2_IVD_FLD = 22,
+ CFA_P70_FKB_OTL2_IVV_FLD = 23,
+ CFA_P70_FKB_OTL2_IVT_FLD = 24,
+ CFA_P70_FKB_OTL2_ETYPE_FLD = 25,
+ CFA_P70_FKB_OTL3_TYPE_FLD = 26,
+ CFA_P70_FKB_OTL3_SIP3_FLD = 27,
+ CFA_P70_FKB_OTL3_SIP2_FLD = 28,
+ CFA_P70_FKB_OTL3_SIP1_FLD = 29,
+ CFA_P70_FKB_OTL3_SIP0_FLD = 30,
+ CFA_P70_FKB_OTL3_DIP3_FLD = 31,
+ CFA_P70_FKB_OTL3_DIP2_FLD = 32,
+ CFA_P70_FKB_OTL3_DIP1_FLD = 33,
+ CFA_P70_FKB_OTL3_DIP0_FLD = 34,
+ CFA_P70_FKB_OTL3_TTL_FLD = 35,
+ CFA_P70_FKB_OTL3_PROT_FLD = 36,
+ CFA_P70_FKB_OTL3_FID_FLD = 37,
+ CFA_P70_FKB_OTL3_QOS_FLD = 38,
+ CFA_P70_FKB_OTL3_IEH_NONEXT_FLD = 39,
+ CFA_P70_FKB_OTL3_IEH_SEP_FLD = 40,
+ CFA_P70_FKB_OTL3_IEH_AUTH_FLD = 41,
+ CFA_P70_FKB_OTL3_IEH_DEST_FLD = 42,
+ CFA_P70_FKB_OTL3_IEH_FRAG_FLD = 43,
+ CFA_P70_FKB_OTL3_IEH_RTHDR_FLD = 44,
+ CFA_P70_FKB_OTL3_IEH_HOP_FLD = 45,
+ CFA_P70_FKB_OTL3_IEH_1FRAG_FLD = 46,
+ CFA_P70_FKB_OTL3_DF_FLD = 47,
+ CFA_P70_FKB_OTL3_L3ERR_FLD = 48,
+ CFA_P70_FKB_OTL4_TYPE_FLD = 49,
+ CFA_P70_FKB_OTL4_SRC_FLD = 50,
+ CFA_P70_FKB_OTL4_DST_FLD = 51,
+ CFA_P70_FKB_OTL4_FLAGS_FLD = 52,
+ CFA_P70_FKB_OTL4_SEQ_FLD = 53,
+ CFA_P70_FKB_OTL4_PA_FLD = 54,
+ CFA_P70_FKB_OTL4_OPT_FLD = 55,
+ CFA_P70_FKB_OTL4_TCPTS_FLD = 56,
+ CFA_P70_FKB_OTL4_ERR_FLD = 57,
+ CFA_P70_FKB_OT_TYPE_FLD = 58,
+ CFA_P70_FKB_OT_FLAGS_FLD = 59,
+ CFA_P70_FKB_OT_IDS_FLD = 60,
+ CFA_P70_FKB_OT_ID_FLD = 61,
+ CFA_P70_FKB_OT_CTXTS_FLD = 62,
+ CFA_P70_FKB_OT_CTXT_FLD = 63,
+ CFA_P70_FKB_OT_QOS_FLD = 64,
+ CFA_P70_FKB_OT_ERR_FLD = 65,
+ CFA_P70_FKB_TL2_TYPE_FLD = 66,
+ CFA_P70_FKB_TL2_DMAC_FLD = 67,
+ CFA_P70_FKB_TL2_SMAC_FLD = 68,
+ CFA_P70_FKB_TL2_DT_FLD = 69,
+ CFA_P70_FKB_TL2_SA_FLD = 70,
+ CFA_P70_FKB_TL2_NVT_FLD = 71,
+ CFA_P70_FKB_TL2_OVP_FLD = 72,
+ CFA_P70_FKB_TL2_OVD_FLD = 73,
+ CFA_P70_FKB_TL2_OVV_FLD = 74,
+ CFA_P70_FKB_TL2_OVT_FLD = 75,
+ CFA_P70_FKB_TL2_IVP_FLD = 76,
+ CFA_P70_FKB_TL2_IVD_FLD = 77,
+ CFA_P70_FKB_TL2_IVV_FLD = 78,
+ CFA_P70_FKB_TL2_IVT_FLD = 79,
+ CFA_P70_FKB_TL2_ETYPE_FLD = 80,
+ CFA_P70_FKB_TL3_TYPE_FLD = 81,
+ CFA_P70_FKB_TL3_SIP3_FLD = 82,
+ CFA_P70_FKB_TL3_SIP2_FLD = 83,
+ CFA_P70_FKB_TL3_SIP1_FLD = 84,
+ CFA_P70_FKB_TL3_SIP0_FLD = 85,
+ CFA_P70_FKB_TL3_DIP3_FLD = 86,
+ CFA_P70_FKB_TL3_DIP2_FLD = 87,
+ CFA_P70_FKB_TL3_DIP1_FLD = 88,
+ CFA_P70_FKB_TL3_DIP0_FLD = 89,
+ CFA_P70_FKB_TL3_TTL_FLD = 90,
+ CFA_P70_FKB_TL3_PROT_FLD = 91,
+ CFA_P70_FKB_TL3_FID_FLD = 92,
+ CFA_P70_FKB_TL3_QOS_FLD = 93,
+ CFA_P70_FKB_TL3_IEH_NONEXT_FLD = 94,
+ CFA_P70_FKB_TL3_IEH_SEP_FLD = 95,
+ CFA_P70_FKB_TL3_IEH_AUTH_FLD = 96,
+ CFA_P70_FKB_TL3_IEH_DEST_FLD = 97,
+ CFA_P70_FKB_TL3_IEH_FRAG_FLD = 98,
+ CFA_P70_FKB_TL3_IEH_RTHDR_FLD = 99,
+ CFA_P70_FKB_TL3_IEH_HOP_FLD = 100,
+ CFA_P70_FKB_TL3_IEH_1FRAG_FLD = 101,
+ CFA_P70_FKB_TL3_DF_FLD = 102,
+ CFA_P70_FKB_TL3_L3ERR_FLD = 103,
+ CFA_P70_FKB_TL4_TYPE_FLD = 104,
+ CFA_P70_FKB_TL4_SRC_FLD = 105,
+ CFA_P70_FKB_TL4_DST_FLD = 106,
+ CFA_P70_FKB_TL4_FLAGS_FLD = 107,
+ CFA_P70_FKB_TL4_SEQ_FLD = 108,
+ CFA_P70_FKB_TL4_PA_FLD = 109,
+ CFA_P70_FKB_TL4_OPT_FLD = 110,
+ CFA_P70_FKB_TL4_TCPTS_FLD = 111,
+ CFA_P70_FKB_TL4_ERR_FLD = 112,
+ CFA_P70_FKB_T_TYPE_FLD = 113,
+ CFA_P70_FKB_T_FLAGS_FLD = 114,
+ CFA_P70_FKB_T_IDS_FLD = 115,
+ CFA_P70_FKB_T_ID_FLD = 116,
+ CFA_P70_FKB_T_CTXTS_FLD = 117,
+ CFA_P70_FKB_T_CTXT_FLD = 118,
+ CFA_P70_FKB_T_QOS_FLD = 119,
+ CFA_P70_FKB_T_ERR_FLD = 120,
+ CFA_P70_FKB_L2_TYPE_FLD = 121,
+ CFA_P70_FKB_L2_DMAC_FLD = 122,
+ CFA_P70_FKB_L2_SMAC_FLD = 123,
+ CFA_P70_FKB_L2_DT_FLD = 124,
+ CFA_P70_FKB_L2_SA_FLD = 125,
+ CFA_P70_FKB_L2_NVT_FLD = 126,
+ CFA_P70_FKB_L2_OVP_FLD = 127,
+ CFA_P70_FKB_L2_OVD_FLD = 128,
+ CFA_P70_FKB_L2_OVV_FLD = 129,
+ CFA_P70_FKB_L2_OVT_FLD = 130,
+ CFA_P70_FKB_L2_IVP_FLD = 131,
+ CFA_P70_FKB_L2_IVD_FLD = 132,
+ CFA_P70_FKB_L2_IVV_FLD = 133,
+ CFA_P70_FKB_L2_IVT_FLD = 134,
+ CFA_P70_FKB_L2_ETYPE_FLD = 135,
+ CFA_P70_FKB_L3_TYPE_FLD = 136,
+ CFA_P70_FKB_L3_SIP3_FLD = 137,
+ CFA_P70_FKB_L3_SIP2_FLD = 138,
+ CFA_P70_FKB_L3_SIP1_FLD = 139,
+ CFA_P70_FKB_L3_SIP0_FLD = 140,
+ CFA_P70_FKB_L3_DIP3_FLD = 141,
+ CFA_P70_FKB_L3_DIP2_FLD = 142,
+ CFA_P70_FKB_L3_DIP1_FLD = 143,
+ CFA_P70_FKB_L3_DIP0_FLD = 144,
+ CFA_P70_FKB_L3_TTL_FLD = 145,
+ CFA_P70_FKB_L3_PROT_FLD = 146,
+ CFA_P70_FKB_L3_FID_FLD = 147,
+ CFA_P70_FKB_L3_QOS_FLD = 148,
+ CFA_P70_FKB_L3_IEH_NONEXT_FLD = 149,
+ CFA_P70_FKB_L3_IEH_SEP_FLD = 150,
+ CFA_P70_FKB_L3_IEH_AUTH_FLD = 151,
+ CFA_P70_FKB_L3_IEH_DEST_FLD = 152,
+ CFA_P70_FKB_L3_IEH_FRAG_FLD = 153,
+ CFA_P70_FKB_L3_IEH_RTHDR_FLD = 154,
+ CFA_P70_FKB_L3_IEH_HOP_FLD = 155,
+ CFA_P70_FKB_L3_IEH_1FRAG_FLD = 156,
+ CFA_P70_FKB_L3_DF_FLD = 157,
+ CFA_P70_FKB_L3_L3ERR_FLD = 158,
+ CFA_P70_FKB_L4_TYPE_FLD = 159,
+ CFA_P70_FKB_L4_SRC_FLD = 160,
+ CFA_P70_FKB_L4_DST_FLD = 161,
+ CFA_P70_FKB_L4_FLAGS_FLD = 162,
+ CFA_P70_FKB_L4_SEQ_FLD = 163,
+ CFA_P70_FKB_L4_ACK_FLD = 164,
+ CFA_P70_FKB_L4_WIN_FLD = 165,
+ CFA_P70_FKB_L4_PA_FLD = 166,
+ CFA_P70_FKB_L4_OPT_FLD = 167,
+ CFA_P70_FKB_L4_TCPTS_FLD = 168,
+ CFA_P70_FKB_L4_TSVAL_FLD = 169,
+ CFA_P70_FKB_L4_TXECR_FLD = 170,
+ CFA_P70_FKB_L4_ERR_FLD = 171,
+ CFA_P70_FKB_MAX_FLD = 172,
+};
+
+/**
+ * Enumeration for wc tcam fkb
+ */
+enum cfa_p70_wc_tcam_fkb_flds {
+ CFA_P70_WC_TCAM_FKB_PROF_ID_FLD = 0,
+ CFA_P70_WC_TCAM_FKB_L2CTXT_FLD = 1,
+ CFA_P70_WC_TCAM_FKB_L2FUNC_FLD = 2,
+ CFA_P70_WC_TCAM_FKB_PARIF_FLD = 3,
+ CFA_P70_WC_TCAM_FKB_SPIF_FLD = 4,
+ CFA_P70_WC_TCAM_FKB_SVIF_FLD = 5,
+ CFA_P70_WC_TCAM_FKB_LCOS_FLD = 6,
+ CFA_P70_WC_TCAM_FKB_META_HI_FLD = 7,
+ CFA_P70_WC_TCAM_FKB_META_LO_FLD = 8,
+ CFA_P70_WC_TCAM_FKB_RCYC_CNT_FLD = 9,
+ CFA_P70_WC_TCAM_FKB_LOOPBACK_FLD = 10,
+ CFA_P70_WC_TCAM_FKB_OTL2_TYPE_FLD = 11,
+ CFA_P70_WC_TCAM_FKB_OTL2_DMAC_FLD = 12,
+ CFA_P70_WC_TCAM_FKB_OTL2_SMAC_FLD = 13,
+ CFA_P70_WC_TCAM_FKB_OTL2_DT_FLD = 14,
+ CFA_P70_WC_TCAM_FKB_OTL2_SA_FLD = 15,
+ CFA_P70_WC_TCAM_FKB_OTL2_NVT_FLD = 16,
+ CFA_P70_WC_TCAM_FKB_OTL2_OVP_FLD = 17,
+ CFA_P70_WC_TCAM_FKB_OTL2_OVD_FLD = 18,
+ CFA_P70_WC_TCAM_FKB_OTL2_OVV_FLD = 19,
+ CFA_P70_WC_TCAM_FKB_OTL2_OVT_FLD = 20,
+ CFA_P70_WC_TCAM_FKB_OTL2_IVP_FLD = 21,
+ CFA_P70_WC_TCAM_FKB_OTL2_IVD_FLD = 22,
+ CFA_P70_WC_TCAM_FKB_OTL2_IVV_FLD = 23,
+ CFA_P70_WC_TCAM_FKB_OTL2_IVT_FLD = 24,
+ CFA_P70_WC_TCAM_FKB_OTL2_ETYPE_FLD = 25,
+ CFA_P70_WC_TCAM_FKB_OTL3_TYPE_FLD = 26,
+ CFA_P70_WC_TCAM_FKB_OTL3_SIP3_FLD = 27,
+ CFA_P70_WC_TCAM_FKB_OTL3_SIP2_FLD = 28,
+ CFA_P70_WC_TCAM_FKB_OTL3_SIP1_FLD = 29,
+ CFA_P70_WC_TCAM_FKB_OTL3_SIP0_FLD = 30,
+ CFA_P70_WC_TCAM_FKB_OTL3_DIP3_FLD = 31,
+ CFA_P70_WC_TCAM_FKB_OTL3_DIP2_FLD = 32,
+ CFA_P70_WC_TCAM_FKB_OTL3_DIP1_FLD = 33,
+ CFA_P70_WC_TCAM_FKB_OTL3_DIP0_FLD = 34,
+ CFA_P70_WC_TCAM_FKB_OTL3_TTL_FLD = 35,
+ CFA_P70_WC_TCAM_FKB_OTL3_PROT_FLD = 36,
+ CFA_P70_WC_TCAM_FKB_OTL3_FID_FLD = 37,
+ CFA_P70_WC_TCAM_FKB_OTL3_QOS_FLD = 38,
+ CFA_P70_WC_TCAM_FKB_OTL3_IEH_NONEXT_FLD = 39,
+ CFA_P70_WC_TCAM_FKB_OTL3_IEH_SEP_FLD = 40,
+ CFA_P70_WC_TCAM_FKB_OTL3_IEH_AUTH_FLD = 41,
+ CFA_P70_WC_TCAM_FKB_OTL3_IEH_DEST_FLD = 42,
+ CFA_P70_WC_TCAM_FKB_OTL3_IEH_FRAG_FLD = 43,
+ CFA_P70_WC_TCAM_FKB_OTL3_IEH_RTHDR_FLD = 44,
+ CFA_P70_WC_TCAM_FKB_OTL3_IEH_HOP_FLD = 45,
+ CFA_P70_WC_TCAM_FKB_OTL3_IEH_1FRAG_FLD = 46,
+ CFA_P70_WC_TCAM_FKB_OTL3_DF_FLD = 47,
+ CFA_P70_WC_TCAM_FKB_OTL3_L3ERR_FLD = 48,
+ CFA_P70_WC_TCAM_FKB_OTL4_TYPE_FLD = 49,
+ CFA_P70_WC_TCAM_FKB_OTL4_SRC_FLD = 50,
+ CFA_P70_WC_TCAM_FKB_OTL4_DST_FLD = 51,
+ CFA_P70_WC_TCAM_FKB_OTL4_FLAGS_FLD = 52,
+ CFA_P70_WC_TCAM_FKB_OTL4_SEQ_FLD = 53,
+ CFA_P70_WC_TCAM_FKB_OTL4_PA_FLD = 54,
+ CFA_P70_WC_TCAM_FKB_OTL4_OPT_FLD = 55,
+ CFA_P70_WC_TCAM_FKB_OTL4_TCPTS_FLD = 56,
+ CFA_P70_WC_TCAM_FKB_OTL4_ERR_FLD = 57,
+ CFA_P70_WC_TCAM_FKB_OT_TYPE_FLD = 58,
+ CFA_P70_WC_TCAM_FKB_OT_FLAGS_FLD = 59,
+ CFA_P70_WC_TCAM_FKB_OT_IDS_FLD = 60,
+ CFA_P70_WC_TCAM_FKB_OT_ID_FLD = 61,
+ CFA_P70_WC_TCAM_FKB_OT_CTXTS_FLD = 62,
+ CFA_P70_WC_TCAM_FKB_OT_CTXT_FLD = 63,
+ CFA_P70_WC_TCAM_FKB_OT_QOS_FLD = 64,
+ CFA_P70_WC_TCAM_FKB_OT_ERR_FLD = 65,
+ CFA_P70_WC_TCAM_FKB_TL2_TYPE_FLD = 66,
+ CFA_P70_WC_TCAM_FKB_TL2_DMAC_FLD = 67,
+ CFA_P70_WC_TCAM_FKB_TL2_SMAC_FLD = 68,
+ CFA_P70_WC_TCAM_FKB_TL2_DT_FLD = 69,
+ CFA_P70_WC_TCAM_FKB_TL2_SA_FLD = 70,
+ CFA_P70_WC_TCAM_FKB_TL2_NVT_FLD = 71,
+ CFA_P70_WC_TCAM_FKB_TL2_OVP_FLD = 72,
+ CFA_P70_WC_TCAM_FKB_TL2_OVD_FLD = 73,
+ CFA_P70_WC_TCAM_FKB_TL2_OVV_FLD = 74,
+ CFA_P70_WC_TCAM_FKB_TL2_OVT_FLD = 75,
+ CFA_P70_WC_TCAM_FKB_TL2_IVP_FLD = 76,
+ CFA_P70_WC_TCAM_FKB_TL2_IVD_FLD = 77,
+ CFA_P70_WC_TCAM_FKB_TL2_IVV_FLD = 78,
+ CFA_P70_WC_TCAM_FKB_TL2_IVT_FLD = 79,
+ CFA_P70_WC_TCAM_FKB_TL2_ETYPE_FLD = 80,
+ CFA_P70_WC_TCAM_FKB_TL3_TYPE_FLD = 81,
+ CFA_P70_WC_TCAM_FKB_TL3_SIP3_FLD = 82,
+ CFA_P70_WC_TCAM_FKB_TL3_SIP2_FLD = 83,
+ CFA_P70_WC_TCAM_FKB_TL3_SIP1_FLD = 84,
+ CFA_P70_WC_TCAM_FKB_TL3_SIP0_FLD = 85,
+ CFA_P70_WC_TCAM_FKB_TL3_DIP3_FLD = 86,
+ CFA_P70_WC_TCAM_FKB_TL3_DIP2_FLD = 87,
+ CFA_P70_WC_TCAM_FKB_TL3_DIP1_FLD = 88,
+ CFA_P70_WC_TCAM_FKB_TL3_DIP0_FLD = 89,
+ CFA_P70_WC_TCAM_FKB_TL3_TTL_FLD = 90,
+ CFA_P70_WC_TCAM_FKB_TL3_PROT_FLD = 91,
+ CFA_P70_WC_TCAM_FKB_TL3_FID_FLD = 92,
+ CFA_P70_WC_TCAM_FKB_TL3_QOS_FLD = 93,
+ CFA_P70_WC_TCAM_FKB_TL3_IEH_NONEXT_FLD = 94,
+ CFA_P70_WC_TCAM_FKB_TL3_IEH_SEP_FLD = 95,
+ CFA_P70_WC_TCAM_FKB_TL3_IEH_AUTH_FLD = 96,
+ CFA_P70_WC_TCAM_FKB_TL3_IEH_DEST_FLD = 97,
+ CFA_P70_WC_TCAM_FKB_TL3_IEH_FRAG_FLD = 98,
+ CFA_P70_WC_TCAM_FKB_TL3_IEH_RTHDR_FLD = 99,
+ CFA_P70_WC_TCAM_FKB_TL3_IEH_HOP_FLD = 100,
+ CFA_P70_WC_TCAM_FKB_TL3_IEH_1FRAG_FLD = 101,
+ CFA_P70_WC_TCAM_FKB_TL3_DF_FLD = 102,
+ CFA_P70_WC_TCAM_FKB_TL3_L3ERR_FLD = 103,
+ CFA_P70_WC_TCAM_FKB_TL4_TYPE_FLD = 104,
+ CFA_P70_WC_TCAM_FKB_TL4_SRC_FLD = 105,
+ CFA_P70_WC_TCAM_FKB_TL4_DST_FLD = 106,
+ CFA_P70_WC_TCAM_FKB_TL4_FLAGS_FLD = 107,
+ CFA_P70_WC_TCAM_FKB_TL4_SEQ_FLD = 108,
+ CFA_P70_WC_TCAM_FKB_TL4_PA_FLD = 109,
+ CFA_P70_WC_TCAM_FKB_TL4_OPT_FLD = 110,
+ CFA_P70_WC_TCAM_FKB_TL4_TCPTS_FLD = 111,
+ CFA_P70_WC_TCAM_FKB_TL4_ERR_FLD = 112,
+ CFA_P70_WC_TCAM_FKB_T_TYPE_FLD = 113,
+ CFA_P70_WC_TCAM_FKB_T_FLAGS_FLD = 114,
+ CFA_P70_WC_TCAM_FKB_T_IDS_FLD = 115,
+ CFA_P70_WC_TCAM_FKB_T_ID_FLD = 116,
+ CFA_P70_WC_TCAM_FKB_T_CTXTS_FLD = 117,
+ CFA_P70_WC_TCAM_FKB_T_CTXT_FLD = 118,
+ CFA_P70_WC_TCAM_FKB_T_QOS_FLD = 119,
+ CFA_P70_WC_TCAM_FKB_T_ERR_FLD = 120,
+ CFA_P70_WC_TCAM_FKB_L2_TYPE_FLD = 121,
+ CFA_P70_WC_TCAM_FKB_L2_DMAC_FLD = 122,
+ CFA_P70_WC_TCAM_FKB_L2_SMAC_FLD = 123,
+ CFA_P70_WC_TCAM_FKB_L2_DT_FLD = 124,
+ CFA_P70_WC_TCAM_FKB_L2_SA_FLD = 125,
+ CFA_P70_WC_TCAM_FKB_L2_NVT_FLD = 126,
+ CFA_P70_WC_TCAM_FKB_L2_OVP_FLD = 127,
+ CFA_P70_WC_TCAM_FKB_L2_OVD_FLD = 128,
+ CFA_P70_WC_TCAM_FKB_L2_OVV_FLD = 129,
+ CFA_P70_WC_TCAM_FKB_L2_OVT_FLD = 130,
+ CFA_P70_WC_TCAM_FKB_L2_IVP_FLD = 131,
+ CFA_P70_WC_TCAM_FKB_L2_IVD_FLD = 132,
+ CFA_P70_WC_TCAM_FKB_L2_IVV_FLD = 133,
+ CFA_P70_WC_TCAM_FKB_L2_IVT_FLD = 134,
+ CFA_P70_WC_TCAM_FKB_L2_ETYPE_FLD = 135,
+ CFA_P70_WC_TCAM_FKB_L3_TYPE_FLD = 136,
+ CFA_P70_WC_TCAM_FKB_L3_SIP3_FLD = 137,
+ CFA_P70_WC_TCAM_FKB_L3_SIP2_FLD = 138,
+ CFA_P70_WC_TCAM_FKB_L3_SIP1_FLD = 139,
+ CFA_P70_WC_TCAM_FKB_L3_SIP0_FLD = 140,
+ CFA_P70_WC_TCAM_FKB_L3_DIP3_FLD = 141,
+ CFA_P70_WC_TCAM_FKB_L3_DIP2_FLD = 142,
+ CFA_P70_WC_TCAM_FKB_L3_DIP1_FLD = 143,
+ CFA_P70_WC_TCAM_FKB_L3_DIP0_FLD = 144,
+ CFA_P70_WC_TCAM_FKB_L3_TTL_FLD = 145,
+ CFA_P70_WC_TCAM_FKB_L3_PROT_FLD = 146,
+ CFA_P70_WC_TCAM_FKB_L3_FID_FLD = 147,
+ CFA_P70_WC_TCAM_FKB_L3_QOS_FLD = 148,
+ CFA_P70_WC_TCAM_FKB_L3_IEH_NONEXT_FLD = 149,
+ CFA_P70_WC_TCAM_FKB_L3_IEH_SEP_FLD = 150,
+ CFA_P70_WC_TCAM_FKB_L3_IEH_AUTH_FLD = 151,
+ CFA_P70_WC_TCAM_FKB_L3_IEH_DEST_FLD = 152,
+ CFA_P70_WC_TCAM_FKB_L3_IEH_FRAG_FLD = 153,
+ CFA_P70_WC_TCAM_FKB_L3_IEH_RTHDR_FLD = 154,
+ CFA_P70_WC_TCAM_FKB_L3_IEH_HOP_FLD = 155,
+ CFA_P70_WC_TCAM_FKB_L3_IEH_1FRAG_FLD = 156,
+ CFA_P70_WC_TCAM_FKB_L3_DF_FLD = 157,
+ CFA_P70_WC_TCAM_FKB_L3_L3ERR_FLD = 158,
+ CFA_P70_WC_TCAM_FKB_L4_TYPE_FLD = 159,
+ CFA_P70_WC_TCAM_FKB_L4_SRC_FLD = 160,
+ CFA_P70_WC_TCAM_FKB_L4_DST_FLD = 161,
+ CFA_P70_WC_TCAM_FKB_L4_FLAGS_FLD = 162,
+ CFA_P70_WC_TCAM_FKB_L4_SEQ_FLD = 163,
+ CFA_P70_WC_TCAM_FKB_L4_ACK_FLD = 164,
+ CFA_P70_WC_TCAM_FKB_L4_WIN_FLD = 165,
+ CFA_P70_WC_TCAM_FKB_L4_PA_FLD = 166,
+ CFA_P70_WC_TCAM_FKB_L4_OPT_FLD = 167,
+ CFA_P70_WC_TCAM_FKB_L4_TCPTS_FLD = 168,
+ CFA_P70_WC_TCAM_FKB_L4_TSVAL_FLD = 169,
+ CFA_P70_WC_TCAM_FKB_L4_TXECR_FLD = 170,
+ CFA_P70_WC_TCAM_FKB_L4_ERR_FLD = 171,
+ CFA_P70_WC_TCAM_FKB_MAX_FLD = 172,
+};
+
+/**
+ * Enumeration for em fkb
+ */
+enum cfa_p70_em_fkb_flds {
+ CFA_P70_EM_FKB_PROF_ID_FLD = 0,
+ CFA_P70_EM_FKB_L2CTXT_FLD = 1,
+ CFA_P70_EM_FKB_L2FUNC_FLD = 2,
+ CFA_P70_EM_FKB_PARIF_FLD = 3,
+ CFA_P70_EM_FKB_SPIF_FLD = 4,
+ CFA_P70_EM_FKB_SVIF_FLD = 5,
+ CFA_P70_EM_FKB_LCOS_FLD = 6,
+ CFA_P70_EM_FKB_META_HI_FLD = 7,
+ CFA_P70_EM_FKB_META_LO_FLD = 8,
+ CFA_P70_EM_FKB_RCYC_CNT_FLD = 9,
+ CFA_P70_EM_FKB_LOOPBACK_FLD = 10,
+ CFA_P70_EM_FKB_OTL2_TYPE_FLD = 11,
+ CFA_P70_EM_FKB_OTL2_DMAC_FLD = 12,
+ CFA_P70_EM_FKB_OTL2_SMAC_FLD = 13,
+ CFA_P70_EM_FKB_OTL2_DT_FLD = 14,
+ CFA_P70_EM_FKB_OTL2_SA_FLD = 15,
+ CFA_P70_EM_FKB_OTL2_NVT_FLD = 16,
+ CFA_P70_EM_FKB_OTL2_OVP_FLD = 17,
+ CFA_P70_EM_FKB_OTL2_OVD_FLD = 18,
+ CFA_P70_EM_FKB_OTL2_OVV_FLD = 19,
+ CFA_P70_EM_FKB_OTL2_OVT_FLD = 20,
+ CFA_P70_EM_FKB_OTL2_IVP_FLD = 21,
+ CFA_P70_EM_FKB_OTL2_IVD_FLD = 22,
+ CFA_P70_EM_FKB_OTL2_IVV_FLD = 23,
+ CFA_P70_EM_FKB_OTL2_IVT_FLD = 24,
+ CFA_P70_EM_FKB_OTL2_ETYPE_FLD = 25,
+ CFA_P70_EM_FKB_OTL3_TYPE_FLD = 26,
+ CFA_P70_EM_FKB_OTL3_SIP3_FLD = 27,
+ CFA_P70_EM_FKB_OTL3_SIP2_FLD = 28,
+ CFA_P70_EM_FKB_OTL3_SIP1_FLD = 29,
+ CFA_P70_EM_FKB_OTL3_SIP0_FLD = 30,
+ CFA_P70_EM_FKB_OTL3_DIP3_FLD = 31,
+ CFA_P70_EM_FKB_OTL3_DIP2_FLD = 32,
+ CFA_P70_EM_FKB_OTL3_DIP1_FLD = 33,
+ CFA_P70_EM_FKB_OTL3_DIP0_FLD = 34,
+ CFA_P70_EM_FKB_OTL3_TTL_FLD = 35,
+ CFA_P70_EM_FKB_OTL3_PROT_FLD = 36,
+ CFA_P70_EM_FKB_OTL3_FID_FLD = 37,
+ CFA_P70_EM_FKB_OTL3_QOS_FLD = 38,
+ CFA_P70_EM_FKB_OTL3_IEH_NONEXT_FLD = 39,
+ CFA_P70_EM_FKB_OTL3_IEH_SEP_FLD = 40,
+ CFA_P70_EM_FKB_OTL3_IEH_AUTH_FLD = 41,
+ CFA_P70_EM_FKB_OTL3_IEH_DEST_FLD = 42,
+ CFA_P70_EM_FKB_OTL3_IEH_FRAG_FLD = 43,
+ CFA_P70_EM_FKB_OTL3_IEH_RTHDR_FLD = 44,
+ CFA_P70_EM_FKB_OTL3_IEH_HOP_FLD = 45,
+ CFA_P70_EM_FKB_OTL3_IEH_1FRAG_FLD = 46,
+ CFA_P70_EM_FKB_OTL3_DF_FLD = 47,
+ CFA_P70_EM_FKB_OTL3_L3ERR_FLD = 48,
+ CFA_P70_EM_FKB_OTL4_TYPE_FLD = 49,
+ CFA_P70_EM_FKB_OTL4_SRC_FLD = 50,
+ CFA_P70_EM_FKB_OTL4_DST_FLD = 51,
+ CFA_P70_EM_FKB_OTL4_FLAGS_FLD = 52,
+ CFA_P70_EM_FKB_OTL4_SEQ_FLD = 53,
+ CFA_P70_EM_FKB_OTL4_PA_FLD = 54,
+ CFA_P70_EM_FKB_OTL4_OPT_FLD = 55,
+ CFA_P70_EM_FKB_OTL4_TCPTS_FLD = 56,
+ CFA_P70_EM_FKB_OTL4_ERR_FLD = 57,
+ CFA_P70_EM_FKB_OT_TYPE_FLD = 58,
+ CFA_P70_EM_FKB_OT_FLAGS_FLD = 59,
+ CFA_P70_EM_FKB_OT_IDS_FLD = 60,
+ CFA_P70_EM_FKB_OT_ID_FLD = 61,
+ CFA_P70_EM_FKB_OT_CTXTS_FLD = 62,
+ CFA_P70_EM_FKB_OT_CTXT_FLD = 63,
+ CFA_P70_EM_FKB_OT_QOS_FLD = 64,
+ CFA_P70_EM_FKB_OT_ERR_FLD = 65,
+ CFA_P70_EM_FKB_TL2_TYPE_FLD = 66,
+ CFA_P70_EM_FKB_TL2_DMAC_FLD = 67,
+ CFA_P70_EM_FKB_TL2_SMAC_FLD = 68,
+ CFA_P70_EM_FKB_TL2_DT_FLD = 69,
+ CFA_P70_EM_FKB_TL2_SA_FLD = 70,
+ CFA_P70_EM_FKB_TL2_NVT_FLD = 71,
+ CFA_P70_EM_FKB_TL2_OVP_FLD = 72,
+ CFA_P70_EM_FKB_TL2_OVD_FLD = 73,
+ CFA_P70_EM_FKB_TL2_OVV_FLD = 74,
+ CFA_P70_EM_FKB_TL2_OVT_FLD = 75,
+ CFA_P70_EM_FKB_TL2_IVP_FLD = 76,
+ CFA_P70_EM_FKB_TL2_IVD_FLD = 77,
+ CFA_P70_EM_FKB_TL2_IVV_FLD = 78,
+ CFA_P70_EM_FKB_TL2_IVT_FLD = 79,
+ CFA_P70_EM_FKB_TL2_ETYPE_FLD = 80,
+ CFA_P70_EM_FKB_TL3_TYPE_FLD = 81,
+ CFA_P70_EM_FKB_TL3_SIP3_FLD = 82,
+ CFA_P70_EM_FKB_TL3_SIP2_FLD = 83,
+ CFA_P70_EM_FKB_TL3_SIP1_FLD = 84,
+ CFA_P70_EM_FKB_TL3_SIP0_FLD = 85,
+ CFA_P70_EM_FKB_TL3_DIP3_FLD = 86,
+ CFA_P70_EM_FKB_TL3_DIP2_FLD = 87,
+ CFA_P70_EM_FKB_TL3_DIP1_FLD = 88,
+ CFA_P70_EM_FKB_TL3_DIP0_FLD = 89,
+ CFA_P70_EM_FKB_TL3_TTL_FLD = 90,
+ CFA_P70_EM_FKB_TL3_PROT_FLD = 91,
+ CFA_P70_EM_FKB_TL3_FID_FLD = 92,
+ CFA_P70_EM_FKB_TL3_QOS_FLD = 93,
+ CFA_P70_EM_FKB_TL3_IEH_NONEXT_FLD = 94,
+ CFA_P70_EM_FKB_TL3_IEH_SEP_FLD = 95,
+ CFA_P70_EM_FKB_TL3_IEH_AUTH_FLD = 96,
+ CFA_P70_EM_FKB_TL3_IEH_DEST_FLD = 97,
+ CFA_P70_EM_FKB_TL3_IEH_FRAG_FLD = 98,
+ CFA_P70_EM_FKB_TL3_IEH_RTHDR_FLD = 99,
+ CFA_P70_EM_FKB_TL3_IEH_HOP_FLD = 100,
+ CFA_P70_EM_FKB_TL3_IEH_1FRAG_FLD = 101,
+ CFA_P70_EM_FKB_TL3_DF_FLD = 102,
+ CFA_P70_EM_FKB_TL3_L3ERR_FLD = 103,
+ CFA_P70_EM_FKB_TL4_TYPE_FLD = 104,
+ CFA_P70_EM_FKB_TL4_SRC_FLD = 105,
+ CFA_P70_EM_FKB_TL4_DST_FLD = 106,
+ CFA_P70_EM_FKB_TL4_FLAGS_FLD = 107,
+ CFA_P70_EM_FKB_TL4_SEQ_FLD = 108,
+ CFA_P70_EM_FKB_TL4_PA_FLD = 109,
+ CFA_P70_EM_FKB_TL4_OPT_FLD = 110,
+ CFA_P70_EM_FKB_TL4_TCPTS_FLD = 111,
+ CFA_P70_EM_FKB_TL4_ERR_FLD = 112,
+ CFA_P70_EM_FKB_T_TYPE_FLD = 113,
+ CFA_P70_EM_FKB_T_FLAGS_FLD = 114,
+ CFA_P70_EM_FKB_T_IDS_FLD = 115,
+ CFA_P70_EM_FKB_T_ID_FLD = 116,
+ CFA_P70_EM_FKB_T_CTXTS_FLD = 117,
+ CFA_P70_EM_FKB_T_CTXT_FLD = 118,
+ CFA_P70_EM_FKB_T_QOS_FLD = 119,
+ CFA_P70_EM_FKB_T_ERR_FLD = 120,
+ CFA_P70_EM_FKB_L2_TYPE_FLD = 121,
+ CFA_P70_EM_FKB_L2_DMAC_FLD = 122,
+ CFA_P70_EM_FKB_L2_SMAC_FLD = 123,
+ CFA_P70_EM_FKB_L2_DT_FLD = 124,
+ CFA_P70_EM_FKB_L2_SA_FLD = 125,
+ CFA_P70_EM_FKB_L2_NVT_FLD = 126,
+ CFA_P70_EM_FKB_L2_OVP_FLD = 127,
+ CFA_P70_EM_FKB_L2_OVD_FLD = 128,
+ CFA_P70_EM_FKB_L2_OVV_FLD = 129,
+ CFA_P70_EM_FKB_L2_OVT_FLD = 130,
+ CFA_P70_EM_FKB_L2_IVP_FLD = 131,
+ CFA_P70_EM_FKB_L2_IVD_FLD = 132,
+ CFA_P70_EM_FKB_L2_IVV_FLD = 133,
+ CFA_P70_EM_FKB_L2_IVT_FLD = 134,
+ CFA_P70_EM_FKB_L2_ETYPE_FLD = 135,
+ CFA_P70_EM_FKB_L3_TYPE_FLD = 136,
+ CFA_P70_EM_FKB_L3_SIP3_FLD = 137,
+ CFA_P70_EM_FKB_L3_SIP2_FLD = 138,
+ CFA_P70_EM_FKB_L3_SIP1_FLD = 139,
+ CFA_P70_EM_FKB_L3_SIP0_FLD = 140,
+ CFA_P70_EM_FKB_L3_DIP3_FLD = 141,
+ CFA_P70_EM_FKB_L3_DIP2_FLD = 142,
+ CFA_P70_EM_FKB_L3_DIP1_FLD = 143,
+ CFA_P70_EM_FKB_L3_DIP0_FLD = 144,
+ CFA_P70_EM_FKB_L3_TTL_FLD = 145,
+ CFA_P70_EM_FKB_L3_PROT_FLD = 146,
+ CFA_P70_EM_FKB_L3_FID_FLD = 147,
+ CFA_P70_EM_FKB_L3_QOS_FLD = 148,
+ CFA_P70_EM_FKB_L3_IEH_NONEXT_FLD = 149,
+ CFA_P70_EM_FKB_L3_IEH_SEP_FLD = 150,
+ CFA_P70_EM_FKB_L3_IEH_AUTH_FLD = 151,
+ CFA_P70_EM_FKB_L3_IEH_DEST_FLD = 152,
+ CFA_P70_EM_FKB_L3_IEH_FRAG_FLD = 153,
+ CFA_P70_EM_FKB_L3_IEH_RTHDR_FLD = 154,
+ CFA_P70_EM_FKB_L3_IEH_HOP_FLD = 155,
+ CFA_P70_EM_FKB_L3_IEH_1FRAG_FLD = 156,
+ CFA_P70_EM_FKB_L3_DF_FLD = 157,
+ CFA_P70_EM_FKB_L3_L3ERR_FLD = 158,
+ CFA_P70_EM_FKB_L4_TYPE_FLD = 159,
+ CFA_P70_EM_FKB_L4_SRC_FLD = 160,
+ CFA_P70_EM_FKB_L4_DST_FLD = 161,
+ CFA_P70_EM_FKB_L4_FLAGS_FLD = 162,
+ CFA_P70_EM_FKB_L4_SEQ_FLD = 163,
+ CFA_P70_EM_FKB_L4_ACK_FLD = 164,
+ CFA_P70_EM_FKB_L4_WIN_FLD = 165,
+ CFA_P70_EM_FKB_L4_PA_FLD = 166,
+ CFA_P70_EM_FKB_L4_OPT_FLD = 167,
+ CFA_P70_EM_FKB_L4_TCPTS_FLD = 168,
+ CFA_P70_EM_FKB_L4_TSVAL_FLD = 169,
+ CFA_P70_EM_FKB_L4_TXECR_FLD = 170,
+ CFA_P70_EM_FKB_L4_ERR_FLD = 171,
+ CFA_P70_EM_FKB_MAX_FLD = 172,
+};
+
+/**
+ * Enumeration for em key layout
+ */
+enum cfa_p70_em_key_layout_flds {
+ CFA_P70_EM_KL_RANGE_IDX_FLD = 0,
+ CFA_P70_EM_KL_RANGE_PROFILE_FLD = 1,
+ CFA_P70_EM_KL_CREC_TIMER_VALUE_FLD = 2,
+ CFA_P70_EM_KL_CREC_STATE_FLD = 3,
+ CFA_P70_EM_KL_CREC_TCP_MSB_OPP_INIT_FLD = 4,
+ CFA_P70_EM_KL_CREC_TCP_MSB_OPP_FLD = 5,
+ CFA_P70_EM_KL_CREC_TCP_MSB_LOC_FLD = 6,
+ CFA_P70_EM_KL_CREC_TCP_WIN_FLD = 7,
+ CFA_P70_EM_KL_CREC_TCP_UPDT_EN_FLD = 8,
+ CFA_P70_EM_KL_CREC_TCP_DIR_FLD = 9,
+ CFA_P70_EM_KL_METADATA_FLD = 10,
+ CFA_P70_EM_KL_PROF_FUNC_FLD = 11,
+ CFA_P70_EM_KL_META_PROF_FLD = 12,
+ CFA_P70_EM_KL_RECYCLE_DEST_FLD = 13,
+ CFA_P70_EM_KL_FC_PTR_FLD = 14,
+ CFA_P70_EM_KL_FC_TYPE_FLD = 15,
+ CFA_P70_EM_KL_FC_OP_FLD = 16,
+ CFA_P70_EM_KL_PATHS_M1_FLD = 17,
+ CFA_P70_EM_KL_ACT_REC_SIZE_FLD = 18,
+ CFA_P70_EM_KL_RING_TABLE_IDX_FLD = 19,
+ CFA_P70_EM_KL_DESTINATION_FLD = 20,
+ CFA_P70_EM_KL_ACT_REC_PTR_FLD = 21,
+ CFA_P70_EM_KL_ACT_HINT_FLD = 22,
+ CFA_P70_EM_KL_STRENGTH_FLD = 23,
+ CFA_P70_EM_KL_OPCODE_FLD = 24,
+ CFA_P70_EM_KL_EPOCH1_FLD = 25,
+ CFA_P70_EM_KL_EPOCH0_FLD = 26,
+ CFA_P70_EM_KL_REC_SIZE_FLD = 27,
+ CFA_P70_EM_KL_VALID_FLD = 28,
+ CFA_P70_EM_KL_PROF_ID_FLD = 29,
+ CFA_P70_EM_KL_L2CTXT_FLD = 30,
+ CFA_P70_EM_KL_L2FUNC_FLD = 31,
+ CFA_P70_EM_KL_PARIF_FLD = 32,
+ CFA_P70_EM_KL_SPIF_FLD = 33,
+ CFA_P70_EM_KL_SVIF_FLD = 34,
+ CFA_P70_EM_KL_LCOS_FLD = 35,
+ CFA_P70_EM_KL_META_HI_FLD = 36,
+ CFA_P70_EM_KL_META_LO_FLD = 37,
+ CFA_P70_EM_KL_RCYC_CNT_FLD = 38,
+ CFA_P70_EM_KL_LOOPBACK_FLD = 39,
+ CFA_P70_EM_KL_OTL2_TYPE_FLD = 40,
+ CFA_P70_EM_KL_OTL2_DMAC_FLD = 41,
+ CFA_P70_EM_KL_OTL2_SMAC_FLD = 42,
+ CFA_P70_EM_KL_OTL2_DT_FLD = 43,
+ CFA_P70_EM_KL_OTL2_SA_FLD = 44,
+ CFA_P70_EM_KL_OTL2_NVT_FLD = 45,
+ CFA_P70_EM_KL_OTL2_OVP_FLD = 46,
+ CFA_P70_EM_KL_OTL2_OVD_FLD = 47,
+ CFA_P70_EM_KL_OTL2_OVV_FLD = 48,
+ CFA_P70_EM_KL_OTL2_OVT_FLD = 49,
+ CFA_P70_EM_KL_OTL2_IVP_FLD = 50,
+ CFA_P70_EM_KL_OTL2_IVD_FLD = 51,
+ CFA_P70_EM_KL_OTL2_IVV_FLD = 52,
+ CFA_P70_EM_KL_OTL2_IVT_FLD = 53,
+ CFA_P70_EM_KL_OTL2_ETYPE_FLD = 54,
+ CFA_P70_EM_KL_OTL3_TYPE_FLD = 55,
+ CFA_P70_EM_KL_OTL3_SIP3_FLD = 56,
+ CFA_P70_EM_KL_OTL3_SIP2_FLD = 57,
+ CFA_P70_EM_KL_OTL3_SIP1_FLD = 58,
+ CFA_P70_EM_KL_OTL3_SIP0_FLD = 59,
+ CFA_P70_EM_KL_OTL3_DIP3_FLD = 60,
+ CFA_P70_EM_KL_OTL3_DIP2_FLD = 61,
+ CFA_P70_EM_KL_OTL3_DIP1_FLD = 62,
+ CFA_P70_EM_KL_OTL3_DIP0_FLD = 63,
+ CFA_P70_EM_KL_OTL3_TTL_FLD = 64,
+ CFA_P70_EM_KL_OTL3_PROT_FLD = 65,
+ CFA_P70_EM_KL_OTL3_FID_FLD = 66,
+ CFA_P70_EM_KL_OTL3_QOS_FLD = 67,
+ CFA_P70_EM_KL_OTL3_IEH_NONEXT_FLD = 68,
+ CFA_P70_EM_KL_OTL3_IEH_SEP_FLD = 69,
+ CFA_P70_EM_KL_OTL3_IEH_AUTH_FLD = 70,
+ CFA_P70_EM_KL_OTL3_IEH_DEST_FLD = 71,
+ CFA_P70_EM_KL_OTL3_IEH_FRAG_FLD = 72,
+ CFA_P70_EM_KL_OTL3_IEH_RTHDR_FLD = 73,
+ CFA_P70_EM_KL_OTL3_IEH_HOP_FLD = 74,
+ CFA_P70_EM_KL_OTL3_IEH_1FRAG_FLD = 75,
+ CFA_P70_EM_KL_OTL3_DF_FLD = 76,
+ CFA_P70_EM_KL_OTL3_L3ERR_FLD = 77,
+ CFA_P70_EM_KL_OTL4_TYPE_FLD = 78,
+ CFA_P70_EM_KL_OTL4_SRC_FLD = 79,
+ CFA_P70_EM_KL_OTL4_DST_FLD = 80,
+ CFA_P70_EM_KL_OTL4_FLAGS_FLD = 81,
+ CFA_P70_EM_KL_OTL4_SEQ_FLD = 82,
+ CFA_P70_EM_KL_OTL4_PA_FLD = 83,
+ CFA_P70_EM_KL_OTL4_OPT_FLD = 84,
+ CFA_P70_EM_KL_OTL4_TCPTS_FLD = 85,
+ CFA_P70_EM_KL_OTL4_ERR_FLD = 86,
+ CFA_P70_EM_KL_OT_TYPE_FLD = 87,
+ CFA_P70_EM_KL_OT_FLAGS_FLD = 88,
+ CFA_P70_EM_KL_OT_IDS_FLD = 89,
+ CFA_P70_EM_KL_OT_ID_FLD = 90,
+ CFA_P70_EM_KL_OT_CTXTS_FLD = 91,
+ CFA_P70_EM_KL_OT_CTXT_FLD = 92,
+ CFA_P70_EM_KL_OT_QOS_FLD = 93,
+ CFA_P70_EM_KL_OT_ERR_FLD = 94,
+ CFA_P70_EM_KL_TL2_TYPE_FLD = 95,
+ CFA_P70_EM_KL_TL2_DMAC_FLD = 96,
+ CFA_P70_EM_KL_TL2_SMAC_FLD = 97,
+ CFA_P70_EM_KL_TL2_DT_FLD = 98,
+ CFA_P70_EM_KL_TL2_SA_FLD = 99,
+ CFA_P70_EM_KL_TL2_NVT_FLD = 100,
+ CFA_P70_EM_KL_TL2_OVP_FLD = 101,
+ CFA_P70_EM_KL_TL2_OVD_FLD = 102,
+ CFA_P70_EM_KL_TL2_OVV_FLD = 103,
+ CFA_P70_EM_KL_TL2_OVT_FLD = 104,
+ CFA_P70_EM_KL_TL2_IVP_FLD = 105,
+ CFA_P70_EM_KL_TL2_IVD_FLD = 106,
+ CFA_P70_EM_KL_TL2_IVV_FLD = 107,
+ CFA_P70_EM_KL_TL2_IVT_FLD = 108,
+ CFA_P70_EM_KL_TL2_ETYPE_FLD = 109,
+ CFA_P70_EM_KL_TL3_TYPE_FLD = 110,
+ CFA_P70_EM_KL_TL3_SIP3_FLD = 111,
+ CFA_P70_EM_KL_TL3_SIP2_FLD = 112,
+ CFA_P70_EM_KL_TL3_SIP1_FLD = 113,
+ CFA_P70_EM_KL_TL3_SIP0_FLD = 114,
+ CFA_P70_EM_KL_TL3_DIP3_FLD = 115,
+ CFA_P70_EM_KL_TL3_DIP2_FLD = 116,
+ CFA_P70_EM_KL_TL3_DIP1_FLD = 117,
+ CFA_P70_EM_KL_TL3_DIP0_FLD = 118,
+ CFA_P70_EM_KL_TL3_TTL_FLD = 119,
+ CFA_P70_EM_KL_TL3_PROT_FLD = 120,
+ CFA_P70_EM_KL_TL3_FID_FLD = 121,
+ CFA_P70_EM_KL_TL3_QOS_FLD = 122,
+ CFA_P70_EM_KL_TL3_IEH_NONEXT_FLD = 123,
+ CFA_P70_EM_KL_TL3_IEH_SEP_FLD = 124,
+ CFA_P70_EM_KL_TL3_IEH_AUTH_FLD = 125,
+ CFA_P70_EM_KL_TL3_IEH_DEST_FLD = 126,
+ CFA_P70_EM_KL_TL3_IEH_FRAG_FLD = 127,
+ CFA_P70_EM_KL_TL3_IEH_RTHDR_FLD = 128,
+ CFA_P70_EM_KL_TL3_IEH_HOP_FLD = 129,
+ CFA_P70_EM_KL_TL3_IEH_1FRAG_FLD = 130,
+ CFA_P70_EM_KL_TL3_DF_FLD = 131,
+ CFA_P70_EM_KL_TL3_L3ERR_FLD = 132,
+ CFA_P70_EM_KL_TL4_TYPE_FLD = 133,
+ CFA_P70_EM_KL_TL4_SRC_FLD = 134,
+ CFA_P70_EM_KL_TL4_DST_FLD = 135,
+ CFA_P70_EM_KL_TL4_FLAGS_FLD = 136,
+ CFA_P70_EM_KL_TL4_SEQ_FLD = 137,
+ CFA_P70_EM_KL_TL4_PA_FLD = 138,
+ CFA_P70_EM_KL_TL4_OPT_FLD = 139,
+ CFA_P70_EM_KL_TL4_TCPTS_FLD = 140,
+ CFA_P70_EM_KL_TL4_ERR_FLD = 141,
+ CFA_P70_EM_KL_T_TYPE_FLD = 142,
+ CFA_P70_EM_KL_T_FLAGS_FLD = 143,
+ CFA_P70_EM_KL_T_IDS_FLD = 144,
+ CFA_P70_EM_KL_T_ID_FLD = 145,
+ CFA_P70_EM_KL_T_CTXTS_FLD = 146,
+ CFA_P70_EM_KL_T_CTXT_FLD = 147,
+ CFA_P70_EM_KL_T_QOS_FLD = 148,
+ CFA_P70_EM_KL_T_ERR_FLD = 149,
+ CFA_P70_EM_KL_L2_TYPE_FLD = 150,
+ CFA_P70_EM_KL_L2_DMAC_FLD = 151,
+ CFA_P70_EM_KL_L2_SMAC_FLD = 152,
+ CFA_P70_EM_KL_L2_DT_FLD = 153,
+ CFA_P70_EM_KL_L2_SA_FLD = 154,
+ CFA_P70_EM_KL_L2_NVT_FLD = 155,
+ CFA_P70_EM_KL_L2_OVP_FLD = 156,
+ CFA_P70_EM_KL_L2_OVD_FLD = 157,
+ CFA_P70_EM_KL_L2_OVV_FLD = 158,
+ CFA_P70_EM_KL_L2_OVT_FLD = 159,
+ CFA_P70_EM_KL_L2_IVP_FLD = 160,
+ CFA_P70_EM_KL_L2_IVD_FLD = 161,
+ CFA_P70_EM_KL_L2_IVV_FLD = 162,
+ CFA_P70_EM_KL_L2_IVT_FLD = 163,
+ CFA_P70_EM_KL_L2_ETYPE_FLD = 164,
+ CFA_P70_EM_KL_L3_TYPE_FLD = 165,
+ CFA_P70_EM_KL_L3_SIP3_FLD = 166,
+ CFA_P70_EM_KL_L3_SIP2_FLD = 167,
+ CFA_P70_EM_KL_L3_SIP1_FLD = 168,
+ CFA_P70_EM_KL_L3_SIP0_FLD = 169,
+ CFA_P70_EM_KL_L3_DIP3_FLD = 170,
+ CFA_P70_EM_KL_L3_DIP2_FLD = 171,
+ CFA_P70_EM_KL_L3_DIP1_FLD = 172,
+ CFA_P70_EM_KL_L3_DIP0_FLD = 173,
+ CFA_P70_EM_KL_L3_TTL_FLD = 174,
+ CFA_P70_EM_KL_L3_PROT_FLD = 175,
+ CFA_P70_EM_KL_L3_FID_FLD = 176,
+ CFA_P70_EM_KL_L3_QOS_FLD = 177,
+ CFA_P70_EM_KL_L3_IEH_NONEXT_FLD = 178,
+ CFA_P70_EM_KL_L3_IEH_SEP_FLD = 179,
+ CFA_P70_EM_KL_L3_IEH_AUTH_FLD = 180,
+ CFA_P70_EM_KL_L3_IEH_DEST_FLD = 181,
+ CFA_P70_EM_KL_L3_IEH_FRAG_FLD = 182,
+ CFA_P70_EM_KL_L3_IEH_RTHDR_FLD = 183,
+ CFA_P70_EM_KL_L3_IEH_HOP_FLD = 184,
+ CFA_P70_EM_KL_L3_IEH_1FRAG_FLD = 185,
+ CFA_P70_EM_KL_L3_DF_FLD = 186,
+ CFA_P70_EM_KL_L3_L3ERR_FLD = 187,
+ CFA_P70_EM_KL_L4_TYPE_FLD = 188,
+ CFA_P70_EM_KL_L4_SRC_FLD = 189,
+ CFA_P70_EM_KL_L4_DST_FLD = 190,
+ CFA_P70_EM_KL_L4_FLAGS_FLD = 191,
+ CFA_P70_EM_KL_L4_SEQ_FLD = 192,
+ CFA_P70_EM_KL_L4_ACK_FLD = 193,
+ CFA_P70_EM_KL_L4_WIN_FLD = 194,
+ CFA_P70_EM_KL_L4_PA_FLD = 195,
+ CFA_P70_EM_KL_L4_OPT_FLD = 196,
+ CFA_P70_EM_KL_L4_TCPTS_FLD = 197,
+ CFA_P70_EM_KL_L4_TSVAL_FLD = 198,
+ CFA_P70_EM_KL_L4_TXECR_FLD = 199,
+ CFA_P70_EM_KL_L4_ERR_FLD = 200,
+ CFA_P70_EM_KEY_LAYOUT_MAX_FLD = 201,
+ CFA_P70_EM_KL_MAX_FLD = CFA_P70_EM_KEY_LAYOUT_MAX_FLD,
+};
+
+/**
+ * Enumeration for action
+ */
+enum cfa_p70_action_flds {
+ CFA_P70_ACT_TYPE_FLD = 0,
+ CFA_P70_ACT_DROP_FLD = 1,
+ CFA_P70_ACT_VLAN_DELETE_FLD = 2,
+ CFA_P70_ACT_DEST_FLD = 3,
+ CFA_P70_ACT_DEST_OP_FLD = 4,
+ CFA_P70_ACT_DECAP_FLD = 5,
+ CFA_P70_ACT_MIRRORING_FLD = 6,
+ CFA_P70_ACT_METER_PTR_FLD = 7,
+ CFA_P70_ACT_STAT0_OFF_FLD = 8,
+ CFA_P70_ACT_STAT0_OP_FLD = 9,
+ CFA_P70_ACT_STAT0_CTR_TYPE_FLD = 10,
+ CFA_P70_ACT_MOD_OFF_FLD = 11,
+ CFA_P70_ACT_ENC_OFF_FLD = 12,
+ CFA_P70_ACT_SRC_OFF_FLD = 13,
+ CFA_P70_ACT_COMPACT_RSVD_0_FLD = 14,
+ CFA_P70_ACT_STAT0_PTR_FLD = 15,
+ CFA_P70_ACT_STAT1_PTR_FLD = 16,
+ CFA_P70_ACT_STAT1_OP_FLD = 17,
+ CFA_P70_ACT_STAT1_CTR_TYPE_FLD = 18,
+ CFA_P70_ACT_MOD_PTR_FLD = 19,
+ CFA_P70_ACT_ENC_PTR_FLD = 20,
+ CFA_P70_ACT_SRC_PTR_FLD = 21,
+ CFA_P70_ACT_FULL_RSVD_0_FLD = 22,
+ CFA_P70_ACT_SRC_KO_EN_FLD = 23,
+ CFA_P70_ACT_MCG_RSVD_0_FLD = 24,
+ CFA_P70_ACT_NEXT_PTR_FLD = 25,
+ CFA_P70_ACT_PTR0_ACT_HINT_FLD = 26,
+ CFA_P70_ACT_PTR0_ACT_REC_PTR_FLD = 27,
+ CFA_P70_ACT_PTR1_ACT_HINT_FLD = 28,
+ CFA_P70_ACT_PTR1_ACT_REC_PTR_FLD = 29,
+ CFA_P70_ACT_PTR2_ACT_HINT_FLD = 30,
+ CFA_P70_ACT_PTR2_ACT_REC_PTR_FLD = 31,
+ CFA_P70_ACT_PTR3_ACT_HINT_FLD = 32,
+ CFA_P70_ACT_PTR3_ACT_REC_PTR_FLD = 33,
+ CFA_P70_ACT_PTR4_ACT_HINT_FLD = 34,
+ CFA_P70_ACT_PTR4_ACT_REC_PTR_FLD = 35,
+ CFA_P70_ACT_PTR5_ACT_HINT_FLD = 36,
+ CFA_P70_ACT_PTR5_ACT_REC_PTR_FLD = 37,
+ CFA_P70_ACT_PTR6_ACT_HINT_FLD = 38,
+ CFA_P70_ACT_PTR6_ACT_REC_PTR_FLD = 39,
+ CFA_P70_ACT_PTR7_ACT_HINT_FLD = 40,
+ CFA_P70_ACT_PTR7_ACT_REC_PTR_FLD = 41,
+ CFA_P70_ACT_MCG_SUBSEQ_RSVD_0_FLD = 42,
+ CFA_P70_ACT_MOD_MODIFY_ACT_HDR_FLD = 43,
+ CFA_P70_ACT_MOD_MD_UPDT_DATA_FLD = 44,
+ CFA_P70_ACT_MOD_MD_UPDT_PROF_FLD = 45,
+ CFA_P70_ACT_MOD_MD_UPDT_OP_FLD = 46,
+ CFA_P70_ACT_MOD_MD_UPDT_RSVD_0_FLD = 47,
+ CFA_P70_ACT_MOD_MD_UPDT_TOP_FLD = 48,
+ CFA_P70_ACT_MOD_RM_OVLAN_FLD = 49,
+ CFA_P70_ACT_MOD_RM_IVLAN_FLD = 50,
+ CFA_P70_ACT_MOD_RPL_IVLAN_FLD = 51,
+ CFA_P70_ACT_MOD_RPL_OVLAN_FLD = 52,
+ CFA_P70_ACT_MOD_TTL_UPDT_OP_FLD = 53,
+ CFA_P70_ACT_MOD_TTL_UPDT_ALT_VID_FLD = 54,
+ CFA_P70_ACT_MOD_TTL_UPDT_ALT_PFID_FLD = 55,
+ CFA_P70_ACT_MOD_TTL_UPDT_TOP_FLD = 56,
+ CFA_P70_ACT_MOD_TNL_MODIFY_DEL_FLD = 57,
+ CFA_P70_ACT_MOD_TNL_MODIFY_8B_NEW_PROT_FLD = 58,
+ CFA_P70_ACT_MOD_TNL_MODIFY_8B_EXIST_PROT_FLD = 59,
+ CFA_P70_ACT_MOD_TNL_MODIFY_8B_VEC_FLD = 60,
+ CFA_P70_ACT_MOD_TNL_MODIFY_8B_TOP_FLD = 61,
+ CFA_P70_ACT_MOD_TNL_MODIFY_16B_NEW_PROT_FLD = 62,
+ CFA_P70_ACT_MOD_TNL_MODIFY_16B_EXIST_PROT_FLD = 63,
+ CFA_P70_ACT_MOD_TNL_MODIFY_16B_VEC_FLD = 64,
+ CFA_P70_ACT_MOD_TNL_MODIFY_16B_TOP_FLD = 65,
+ CFA_P70_ACT_MOD_UPDT_FIELD_DATA0_FLD = 66,
+ CFA_P70_ACT_MOD_UPDT_FIELD_VEC_RSVD_FLD = 67,
+ CFA_P70_ACT_MOD_UPDT_FIELD_VEC_KID_FLD = 68,
+ CFA_P70_ACT_MOD_UPDT_FIELD_TOP_FLD = 69,
+ CFA_P70_ACT_MOD_SMAC_FLD = 70,
+ CFA_P70_ACT_MOD_DMAC_FLD = 71,
+ CFA_P70_ACT_MOD_SIPV6_FLD = 72,
+ CFA_P70_ACT_MOD_DIPV6_FLD = 73,
+ CFA_P70_ACT_MOD_SIPV4_FLD = 74,
+ CFA_P70_ACT_MOD_DIPV4_FLD = 75,
+ CFA_P70_ACT_MOD_SPORT_FLD = 76,
+ CFA_P70_ACT_MOD_DPORT_FLD = 77,
+ CFA_P70_ACT_ENC_ECV_TNL_FLD = 78,
+ CFA_P70_ACT_ENC_ECV_L4_FLD = 79,
+ CFA_P70_ACT_ENC_ECV_L3_FLD = 80,
+ CFA_P70_ACT_ENC_ECV_L2_FLD = 81,
+ CFA_P70_ACT_ENC_ECV_VTAG_FLD = 82,
+ CFA_P70_ACT_ENC_ECV_EC_FLD = 83,
+ CFA_P70_ACT_ENC_ECV_VALID_FLD = 84,
+ CFA_P70_ACT_ENC_EC_IP_TTL_IH_FLD = 85,
+ CFA_P70_ACT_ENC_EC_IP_TOS_IH_FLD = 86,
+ CFA_P70_ACT_ENC_EC_TUN_QOS_FLD = 87,
+ CFA_P70_ACT_ENC_EC_GRE_SET_K_FLD = 88,
+ CFA_P70_ACT_ENC_EC_DMAC_OVR_FLD = 89,
+ CFA_P70_ACT_ENC_EC_VLAN_OVR_FLD = 90,
+ CFA_P70_ACT_ENC_EC_SMAC_OVR_FLD = 91,
+ CFA_P70_ACT_ENC_EC_IPV4_ID_CTRL_FLD = 92,
+ CFA_P70_ACT_ENC_L2_DMAC_FLD = 93,
+ CFA_P70_ACT_ENC_VLAN1_TAG_VID_FLD = 94,
+ CFA_P70_ACT_ENC_VLAN1_TAG_DE_FLD = 95,
+ CFA_P70_ACT_ENC_VLAN1_TAG_PRI_FLD = 96,
+ CFA_P70_ACT_ENC_VLAN1_TAG_TPID_FLD = 97,
+ CFA_P70_ACT_ENC_VLAN2_IT_VID_FLD = 98,
+ CFA_P70_ACT_ENC_VLAN2_IT_DE_FLD = 99,
+ CFA_P70_ACT_ENC_VLAN2_IT_PRI_FLD = 100,
+ CFA_P70_ACT_ENC_VLAN2_IT_TPID_FLD = 101,
+ CFA_P70_ACT_ENC_VLAN2_OT_VID_FLD = 102,
+ CFA_P70_ACT_ENC_VLAN2_OT_DE_FLD = 103,
+ CFA_P70_ACT_ENC_VLAN2_OT_PRI_FLD = 104,
+ CFA_P70_ACT_ENC_VLAN2_OT_TPID_FLD = 105,
+ CFA_P70_ACT_ENC_IPV4_ID_FLD = 106,
+ CFA_P70_ACT_ENC_IPV4_TOS_FLD = 107,
+ CFA_P70_ACT_ENC_IPV4_HLEN_FLD = 108,
+ CFA_P70_ACT_ENC_IPV4_VER_FLD = 109,
+ CFA_P70_ACT_ENC_IPV4_PROT_FLD = 110,
+ CFA_P70_ACT_ENC_IPV4_TTL_FLD = 111,
+ CFA_P70_ACT_ENC_IPV4_FRAG_FLD = 112,
+ CFA_P70_ACT_ENC_IPV4_FLAGS_FLD = 113,
+ CFA_P70_ACT_ENC_IPV4_DEST_FLD = 114,
+ CFA_P70_ACT_ENC_IPV6_FLOW_LABEL_FLD = 115,
+ CFA_P70_ACT_ENC_IPV6_TRAFFIC_CLASS_FLD = 116,
+ CFA_P70_ACT_ENC_IPV6_VER_FLD = 117,
+ CFA_P70_ACT_ENC_IPV6_HOP_LIMIT_FLD = 118,
+ CFA_P70_ACT_ENC_IPV6_NEXT_HEADER_FLD = 119,
+ CFA_P70_ACT_ENC_IPV6_PAYLOAD_LENGTH_FLD = 120,
+ CFA_P70_ACT_ENC_IPV6_DEST_FLD = 121,
+ CFA_P70_ACT_ENC_MPLS_TAG1_FLD = 122,
+ CFA_P70_ACT_ENC_MPLS_TAG2_FLD = 123,
+ CFA_P70_ACT_ENC_MPLS_TAG3_FLD = 124,
+ CFA_P70_ACT_ENC_MPLS_TAG4_FLD = 125,
+ CFA_P70_ACT_ENC_MPLS_TAG5_FLD = 126,
+ CFA_P70_ACT_ENC_MPLS_TAG6_FLD = 127,
+ CFA_P70_ACT_ENC_MPLS_TAG7_FLD = 128,
+ CFA_P70_ACT_ENC_MPLS_TAG8_FLD = 129,
+ CFA_P70_ACT_ENC_L4_DEST_PORT_FLD = 130,
+ CFA_P70_ACT_ENC_L4_SRC_PORT_FLD = 131,
+ CFA_P70_ACT_ENC_TNL_VXLAN_NEXT_PROT_FLD = 132,
+ CFA_P70_ACT_ENC_TNL_VXLAN_RSVD_0_FLD = 133,
+ CFA_P70_ACT_ENC_TNL_VXLAN_FLAGS_FLD = 134,
+ CFA_P70_ACT_ENC_TNL_VXLAN_RSVD_1_FLD = 135,
+ CFA_P70_ACT_ENC_TNL_VXLAN_VNI_FLD = 136,
+ CFA_P70_ACT_ENC_TNL_NGE_PROT_TYPE_FLD = 137,
+ CFA_P70_ACT_ENC_TNL_NGE_RSVD_0_FLD = 138,
+ CFA_P70_ACT_ENC_TNL_NGE_FLAGS_C_FLD = 139,
+ CFA_P70_ACT_ENC_TNL_NGE_FLAGS_O_FLD = 140,
+ CFA_P70_ACT_ENC_TNL_NGE_FLAGS_OPT_LEN_FLD = 141,
+ CFA_P70_ACT_ENC_TNL_NGE_FLAGS_VER_FLD = 142,
+ CFA_P70_ACT_ENC_TNL_NGE_RSVD_1_FLD = 143,
+ CFA_P70_ACT_ENC_TNL_NGE_VNI_FLD = 144,
+ CFA_P70_ACT_ENC_TNL_NGE_OPTIONS_FLD = 145,
+ CFA_P70_ACT_ENC_TNL_NVGRE_FLOW_ID_FLD = 146,
+ CFA_P70_ACT_ENC_TNL_NVGRE_VSID_FLD = 147,
+ CFA_P70_ACT_ENC_TNL_GRE_KEY_FLD = 148,
+ CFA_P70_ACT_ENC_TNL_GENERIC_TID_FLD = 149,
+ CFA_P70_ACT_ENC_TNL_GENERIC_LENGTH_FLD = 150,
+ CFA_P70_ACT_ENC_TNL_GENERIC_HEADER_FLD = 151,
+ CFA_P70_ACT_SRC_MAC_FLD = 152,
+ CFA_P70_ACT_SRC_IPV4_ADDR_FLD = 153,
+ CFA_P70_ACT_SRC_IPV6_ADDR_FLD = 154,
+ CFA_P70_ACT_STAT0_B16_FPC_FLD = 155,
+ CFA_P70_ACT_STAT1_B16_FPC_FLD = 156,
+ CFA_P70_ACT_STAT0_B16_FBC_FLD = 157,
+ CFA_P70_ACT_STAT1_B16_FBC_FLD = 158,
+ CFA_P70_ACT_STAT0_B24_FPC_FLD = 159,
+ CFA_P70_ACT_STAT1_B24_FPC_FLD = 160,
+ CFA_P70_ACT_STAT0_B24_FBC_FLD = 161,
+ CFA_P70_ACT_STAT1_B24_FBC_FLD = 162,
+ CFA_P70_ACT_STAT0_B24_TIMESTAMP_FLD = 163,
+ CFA_P70_ACT_STAT1_B24_TIMESTAMP_FLD = 164,
+ CFA_P70_ACT_STAT0_B24_TCP_FLAGS_FLD = 165,
+ CFA_P70_ACT_STAT1_B24_TCP_FLAGS_FLD = 166,
+ CFA_P70_ACT_STAT0_B24_UNUSED_0_FLD = 167,
+ CFA_P70_ACT_STAT1_B24_UNUSED_0_FLD = 168,
+ CFA_P70_ACT_STAT0_B32A_FPC_FLD = 169,
+ CFA_P70_ACT_STAT1_B32A_FPC_FLD = 170,
+ CFA_P70_ACT_STAT0_B32A_FBC_FLD = 171,
+ CFA_P70_ACT_STAT1_B32A_FBC_FLD = 172,
+ CFA_P70_ACT_STAT0_B32A_MPC_FLD = 173,
+ CFA_P70_ACT_STAT1_B32A_MPC_FLD = 174,
+ CFA_P70_ACT_STAT0_B32A_MBC_FLD = 175,
+ CFA_P70_ACT_STAT1_B32A_MBC_FLD = 176,
+ CFA_P70_ACT_STAT0_B32B_FPC_FLD = 177,
+ CFA_P70_ACT_STAT1_B32B_FPC_FLD = 178,
+ CFA_P70_ACT_STAT0_B32B_FBC_FLD = 179,
+ CFA_P70_ACT_STAT1_B32B_FBC_FLD = 180,
+ CFA_P70_ACT_STAT0_B32B_TIMESTAMP_FLD = 181,
+ CFA_P70_ACT_STAT1_B32B_TIMESTAMP_FLD = 182,
+ CFA_P70_ACT_STAT0_B32B_TCP_FLAGS_FLD = 183,
+ CFA_P70_ACT_STAT1_B32B_TCP_FLAGS_FLD = 184,
+ CFA_P70_ACT_STAT0_B32B_UNUSED_0_FLD = 185,
+ CFA_P70_ACT_STAT1_B32B_UNUSED_0_FLD = 186,
+ CFA_P70_ACT_STAT0_B32B_MPC15_0_FLD = 187,
+ CFA_P70_ACT_STAT1_B32B_MPC15_0_FLD = 188,
+ CFA_P70_ACT_STAT0_B32B_MPC37_16_FLD = 189,
+ CFA_P70_ACT_STAT1_B32B_MPC37_16_FLD = 190,
+ CFA_P70_ACT_STAT0_B32B_MBC_FLD = 191,
+ CFA_P70_ACT_STAT1_B32B_MBC_FLD = 192,
+ CFA_P70_ACTION_MAX_FLD = 193,
+ CFA_P70_ACT_MAX_FLD = CFA_P70_ACTION_MAX_FLD,
+};
+
+#define CFA_P70_EM_KEY_LAYOUT_2_BASE_FLD(FLD) \
+ ((FLD) - CFA_P70_EM_LREC_MAX_FLD)
+
+/* clang-format on */
+
+#endif /* _CFA_BLD_P70_FIELD_IDS_H_ */
new file mode 100644
@@ -0,0 +1,548 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_p70_mpc.h
+ *
+ * @brief CFA 7.0 Public api definitions to build CFA Mid-path commands and
+ * Parse CFA Mid-path Command completions
+ */
+
+#ifndef _CFA_BLD_P70_MPC_H_
+#define _CFA_BLD_P70_MPC_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+/**
+ * CFA Mid-Path Command (MPC) opcodes. The MPC CFA operations
+ * are divided into 2 sub groups. Cache access operations
+ * and EM update operations.
+ */
+enum cfa_mpc_opcode {
+ /**
+ * MPC Cache access commands
+ */
+ /* MPC Command to read Action/Lookup cache (up to 4 lines) */
+ CFA_MPC_READ,
+ /* MPC Command to write to Action/Lookup cache (up to 4 lines) */
+ CFA_MPC_WRITE,
+ /* MPC Cmd to Read and Clear Action/Lookup cache line (max 1 line) */
+ CFA_MPC_READ_CLR,
+ /* MPC Cmd to Invalidate Action/Lkup cache lines (up to 4 lines) */
+ CFA_MPC_INVALIDATE,
+
+ /**
+ * MPC EM update commands
+ */
+ /**
+ * MPC Command to search for an EM entry by its key in the
+ * EM bucket chain
+ */
+ CFA_MPC_EM_SEARCH,
+ /* MPC command to insert a new EM entry to the EM bucket chain */
+ CFA_MPC_EM_INSERT,
+ /* MPC Command to delete an EM entry from the EM bucket chain */
+ CFA_MPC_EM_DELETE,
+ /* MPC Command to add an EM bucket to the tail of EM bucket chain */
+ CFA_MPC_EM_CHAIN,
+ CFA_MPC_OPC_MAX,
+};
+
+/**
+ * CFA MPC Cache access reading mode
+ */
+enum cfa_mpc_read_mode {
+ CFA_MPC_RD_NORMAL, /**< Normal read mode */
+ CFA_MPC_RD_EVICT, /**< Read the cache and evict the cache line */
+ CFA_MPC_RD_DEBUG_LINE, /**< Debug read mode line */
+ CFA_MPC_RD_DEBUG_TAG, /**< Debug read mode tag */
+ CFA_MPC_RD_MODE_MAX
+};
+
+/**
+ * CFA MPC Cache access writing mode
+ */
+enum cfa_mpc_write_mode {
+ CFA_MPC_WR_WRITE_THRU, /**< Write to cache in Write through mode */
+ CFA_MPC_WR_WRITE_BACK, /**< Write to cache in Write back mode */
+ CFA_MPC_WR_MODE_MAX
+};
+
+/**
+ * CFA MPC Cache access eviction mode
+ */
+enum cfa_mpc_evict_mode {
+ /**
+ * Line evict: These modes evict a single cache line
+ * In these modes, the eviction occurs regardless of the cache line
+ * state (CLEAN/CLEAN_FAST_EVICT/DIRTY)
+ */
+ /* Cache line addressed by set/way is evicted */
+ CFA_MPC_EV_EVICT_LINE,
+ /* Cache line hit with the table scope/address tuple is evicted */
+ CFA_MPC_EV_EVICT_SCOPE_ADDRESS,
+
+ /**
+ * Set Evict: These modes evict cache lines that meet certain criteria
+ * from the entire cache set.
+ */
+ /*
+ * Cache lines only in CLEAN state are evicted from the set
+ * derived from the address
+ */
+ CFA_MPC_EV_EVICT_CLEAN_LINES,
+ /*
+ * Cache lines only in CLEAN_FAST_EVICT state are evicted from
+ * the set derived from the address
+ */
+ CFA_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES,
+ /*
+ * Cache lines in both CLEAN and CLEAN_FAST_EVICT states are
+ * evicted from the set derived from the address
+ */
+ CFA_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES,
+ /*
+ * All Cache lines in the set identified by the address and
+ * belonging to the table scope are evicted.
+ */
+ CFA_MPC_EV_EVICT_TABLE_SCOPE,
+ CFA_MPC_EV_MODE_MAX,
+};
+
+/**
+ * CFA Hardware Cache Table Type
+ */
+enum cfa_hw_table_type {
+ CFA_HW_TABLE_ACTION, /**< CFA Action Record Table */
+ CFA_HW_TABLE_LOOKUP, /**< CFA EM Lookup Record Table */
+ CFA_HW_TABLE_MAX
+};
+
+/**
+ * MPC Command parameters specific to Cache read operations
+ */
+struct cfa_mpc_cache_read_params {
+ /* Specifies the cache option for reading the cache lines */
+ enum cfa_mpc_read_mode mode;
+ /**
+ * Clear mask to use for the Read-Clear operation
+ * Each bit in the mask correspond to 2 bytes in the
+ * cache line. Setting the corresponding mask bit, clears
+ * the corresponding data bytes in the cache line AFTER
+ * the read. This field is ignored for Read CMD.
+ */
+ uint16_t clear_mask;
+ /**
+ * External host memory address
+ *
+ * The 64-bit IOVA host address to which to write the DMA data returned
+ * in the completion. The data will be written to the same function as
+ * the one that owns the queue this command is read from. Address must
+ * be 4 byte aligned.
+ */
+ uint64_t host_address;
+};
+
+/**
+ * MPC Command parameters specific to Cache write operation
+ */
+struct cfa_mpc_cache_write_params {
+ /* Specifies the cache option for the write access */
+ enum cfa_mpc_write_mode mode;
+ /* Pointer to data to be written to cache */
+ const uint8_t *data_ptr;
+};
+
+/**
+ * MPC Command parameters specific to Cache evict/invalidate operation
+ */
+struct cfa_mpc_cache_evict_params {
+ /* Specifies the cache option for Invalidation operation */
+ enum cfa_mpc_evict_mode mode;
+};
+
+/**
+ * MPC CFA Command parameters for cache related operations
+ */
+struct cfa_mpc_cache_axs_params {
+ /** Common parameters for cache operations */
+ /*
+ * Opaque value that will be returned in the MPC CFA
+ * Completion message. This can be used by the caller to associate
+ * completions with commands.
+ */
+ uint32_t opaque;
+ /*
+ * Table Scope to address the cache line. For Thor2
+ * the table scope goes for 0 - 31.
+ */
+ uint8_t tbl_scope;
+ /*
+ * Table Index to address the cache line. Note that
+ * this is the offset to the 32B record in the table
+ * scope backing store, expressed in 32B units.
+ */
+ uint32_t tbl_index;
+ /*
+ * Number of cache lines (32B word) in the access
+ * This should be set to 1 for READ-CLEAR command and between 1 and
+ * 4 for all other cache access commands (READ/WRITE/INVALIDATE)
+ */
+ uint8_t data_size;
+ /* CFA table type for which this Host IF hw operation is intended for */
+ enum cfa_hw_table_type tbl_type;
+
+ /* Cache operation specific params */
+ union {
+ /** Read and Read clear specific parameters */
+ struct cfa_mpc_cache_read_params read;
+ /** Cache write specific parameters */
+ struct cfa_mpc_cache_write_params write;
+ /** Cache invalidate operation specific parameters */
+ struct cfa_mpc_cache_evict_params evict;
+ };
+};
+
+/**
+ * MPC CFA command parameters specific to EM insert operation
+ */
+struct cfa_mpc_em_insert_params {
+ /*
+ * Pointer to the Exact Match entry to search. The
+ * EM Key in the entry is used to for the search
+ */
+ const uint8_t *em_entry;
+ /* Size of the EM entry in 32B words (1- 4) */
+ uint8_t data_size;
+ /* Flag to indicate if a matching entry (if found) should be replaced */
+ bool replace;
+ /* Table index to write the EM entry being inserted */
+ uint32_t entry_idx;
+ /*
+ * Table index to the EM record that can be used to
+ * create a new EM bucket, if the insertion results
+ * in a EM bucket chain's tail update.
+ */
+ uint32_t bucket_idx;
+};
+
+/**
+ * MPC CFA command parameters specific to EM search operation
+ */
+struct cfa_mpc_em_search_params {
+ /*
+ * Pointer to the Exact Match entry to search. The
+ * EM Key in the entry is used to for the search
+ */
+ uint8_t *em_entry;
+ /* Size of the EM entry in 32B words (1- 4) */
+ uint8_t data_size;
+};
+
+/**
+ * MPC CFA command parameters specific to EM delete operation
+ */
+struct cfa_mpc_em_delete_params {
+ /* Table index to the EM record to delete */
+ uint32_t entry_idx;
+ /*
+ * Table index to the static bucket for the EM bucket chain.
+ * As part of EM Delete processing, the hw walks the EM bucket
+ * chain to determine if the entry_idx is part of the chain.
+ * If the entry_idx is found to be a part of the chain, it is
+ * deleted from the chain and the EM bucket is repacked. If the
+ * tail of the bucket has only one valid entry, then the delete
+ * operation results in a tail update and one free EM entry
+ */
+ uint32_t bucket_idx;
+};
+
+/**
+ * MPC CFA command parameters specific to EM chain operation
+ */
+struct cfa_mpc_em_chain_params {
+ /*
+ * Table index that will form the chain
+ * pointer to the tail bucket in the EM bucket chain
+ */
+ uint32_t entry_idx;
+ /*
+ * Table index to the static bucket for
+ * EM bucket chain to be updated.
+ */
+ uint32_t bucket_idx;
+};
+
+/**
+ * MPC CFA Command parameters for EM operations
+ */
+struct cfa_mpc_em_op_params {
+ /** Common parameters for EM update operations */
+ /*
+ * Opaque value that will be returned in the MPC CFA
+ * Completion message. This can be used by the caller to associate
+ * completions with commands.
+ */
+ uint32_t opaque;
+ /*
+ * Table Scope to address the cache line. For Thor2
+ * the table scope goes for 0 - 31.
+ */
+ uint8_t tbl_scope;
+ /** EM update operation specific params */
+ union {
+ /** EM Search operation params */
+ struct cfa_mpc_em_search_params search;
+ /** EM Insert operation params */
+ struct cfa_mpc_em_insert_params insert;
+ /** EM Delete operation params */
+ struct cfa_mpc_em_delete_params del;
+ /** EM Chain operation params */
+ struct cfa_mpc_em_chain_params chain;
+ };
+};
+
+/**
+ * MPC CFA Command completion status
+ */
+enum cfa_mpc_cmpl_status {
+ /* Command success */
+ CFA_MPC_OK = 0,
+ /* Unsupported CFA opcode */
+ CFA_MPC_UNSPRT_ERR = 1,
+ /* CFA command format error */
+ CFA_MPC_FMT_ERR = 2,
+ /* SVIF-Table Scope error */
+ CFA_MPC_SCOPE_ERR = 3,
+ /* Address error: Only used if EM command or TABLE_TYPE=EM */
+ CFA_MPC_ADDR_ERR = 4,
+ /* Cache operation error */
+ CFA_MPC_CACHE_ERR = 5,
+ /* EM_SEARCH or EM_DELETE did not find a matching EM entry */
+ CFA_MPC_EM_MISS = 6,
+ /* EM_INSERT found a matching EM entry and REPLACE=0 in the command */
+ CFA_MPC_EM_DUPLICATE = 7,
+ /* EM_EVENT_COLLECTION_FAIL no events to return */
+ CFA_MPC_EM_EVENT_COLLECTION_FAIL = 8,
+ /*
+ * EM_INSERT required a dynamic bucket to be added to the chain
+ * to successfully insert the EM entry, but the entry provided
+ * for use as dynamic bucket was invalid. (bucket_idx == 0)
+ */
+ CFA_MPC_EM_ABORT = 9,
+};
+
+/**
+ * MPC Cache access command completion result
+ */
+struct cfa_mpc_cache_axs_result {
+ /*
+ * Opaque value returned in the completion message. This can
+ * be used by the caller to associate completions with commands.
+ */
+ uint32_t opaque;
+ /* MPC Command completion status code */
+ enum cfa_mpc_cmpl_status status;
+ /*
+ * Additional error information
+ * when status code is one of FMT, SCOPE, ADDR or CACHE error
+ */
+ uint32_t error_data;
+ /*
+ * Pointer to buffer to copy read data to.
+ * Needs to be valid for READ, READ-CLEAR operations
+ * Not set for write and evict operations
+ */
+ uint8_t *rd_data;
+ /*
+ * Size of the data buffer in Bytes. Should be at least
+ * be data_size * 32 for MPC cache reads
+ */
+ uint16_t data_len;
+};
+
+/**
+ * MPC EM search operation result
+ */
+struct cfa_mpc_em_search_result {
+ uint32_t bucket_num; /**< See CFA EAS */
+ uint32_t num_entries; /**< See CFA EAS */
+ /* Set to HASH[35:24] of the hash computed from the EM entry key. */
+ uint32_t hash_msb;
+ /*
+ * IF a match is found, this field is set
+ * to the table index of the matching EM entry
+ */
+ uint32_t match_idx;
+ /*
+ * Table index to the static bucket determined by hashing the EM entry
+ * key
+ */
+ uint32_t bucket_idx;
+};
+
+/**
+ * MPC EM insert operation result
+ */
+struct cfa_mpc_em_insert_result {
+ uint32_t bucket_num; /**< See CFA EAS */
+ uint32_t num_entries; /**< See CFA EAS */
+ /* Set to HASH[35:24] of the hash computed from the EM entry key. */
+ uint32_t hash_msb;
+ /*
+ * If replace = 1 and a matchng entry is found, this field is
+ * updated with the table index of the replaced entry. This table
+ * index is therefore free for use.
+ */
+ uint32_t match_idx;
+ /*
+ * Table index to the static bucket determined by hashing the EM entry
+ * key
+ */
+ uint32_t bucket_idx;
+ /* Flag: Matching entry was found and replace */
+ uint8_t replaced:1;
+ /* Flag: EM bucket chain was updated */
+ uint8_t chain_update:1;
+};
+
+/**
+ * MPC EM delete operation result
+ */
+struct cfa_mpc_em_delete_result {
+ uint32_t bucket_num; /**< See CFA EAS */
+ uint32_t num_entries; /**< See CFA EAS */
+ /*
+ * Table index to EM bucket tail BEFORE the delete command
+ * was processed with a OK or EM_MISS status. If chain update = 1, then
+ * this bucket can be freed
+ */
+ uint32_t prev_tail;
+ /*
+ * Table index to EM bucket tail AFTER the delete command
+ * was processed with a OK or EM_MISS status. Same as prev_tail
+ * if chain_update = 0.
+ */
+ uint32_t new_tail;
+ /* Flag: EM bucket chain was updated */
+ uint8_t chain_update:1;
+};
+
+/**
+ * MPC EM chain operation result
+ */
+struct cfa_mpc_em_chain_result {
+ uint32_t bucket_num; /**< See CFA EAS */
+ uint32_t num_entries; /**< See CFA EAS */
+};
+
+/**
+ * MPC EM operation completion result
+ */
+struct cfa_mpc_em_op_result {
+ /*
+ * Opaque value returned in the completion message. This can
+ * be used by the caller to associate completions with commands.
+ */
+ uint32_t opaque;
+ /* MPC Command completion status code */
+ enum cfa_mpc_cmpl_status status;
+ /*
+ * Additional error information
+ * when status code is one of FMT, SCOPE, ADDR or CACHE error
+ */
+ uint32_t error_data;
+ union {
+ /** EM Search specific results */
+ struct cfa_mpc_em_search_result search;
+ /** EM Insert specific results */
+ struct cfa_mpc_em_insert_result insert;
+ /** EM Delete specific results */
+ struct cfa_mpc_em_delete_result del;
+ /** EM Chain specific results */
+ struct cfa_mpc_em_chain_result chain;
+ };
+};
+
+/**
+ * Build MPC CFA Cache access command
+ *
+ * @param [in] opc MPC opcode
+ *
+ * @param [out] cmd_buff Command data buffer to write the command to
+ *
+ * @param [in/out] cmd_buff_len Pointer to command buffer size param
+ * Set by caller to indicate the input cmd_buff size.
+ * Set to the actual size of the command generated by the api.
+ *
+ * @param [in] parms Pointer to MPC cache access command parameters
+ *
+ * @return 0 on Success, negative errno on failure
+ */
+int cfa_mpc_build_cache_axs_cmd(enum cfa_mpc_opcode opc, uint8_t *cmd_buff,
+ uint32_t *cmd_buff_len,
+ struct cfa_mpc_cache_axs_params *parms);
+
+/**
+ * Parse MPC CFA Cache access command completion result
+ *
+ * @param [in] opc MPC cache access opcode
+ *
+ * @param [in] resp_buff Data buffer containing the response to parse
+ *
+ * @param [in] resp_buff_len Response buffer size
+ *
+ * @param [out] result Pointer to MPC cache access result object. This
+ * object will contain the fields parsed and extracted from the
+ * response buffer.
+ *
+ * @return 0 on Success, negative errno on failure
+ */
+int cfa_mpc_parse_cache_axs_resp(enum cfa_mpc_opcode opc, uint8_t *resp_buff,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_cache_axs_result *result);
+
+/**
+ * Build MPC CFA EM operation command
+ *
+ * @param [in] opc MPC EM opcode
+ *
+ * @param [in] cmd_buff Command data buffer to write the command to
+ *
+ * @param [in/out] cmd_buff_len Pointer to command buffer size param
+ * Set by caller to indicate the input cmd_buff size.
+ * Set to the actual size of the command generated by the api.
+ *
+ * @param [in] parms Pointer to MPC cache access command parameters
+ *
+ * @return 0 on Success, negative errno on failure
+ */
+int cfa_mpc_build_em_op_cmd(enum cfa_mpc_opcode opc, uint8_t *cmd_buff,
+ uint32_t *cmd_buff_len,
+ struct cfa_mpc_em_op_params *parms);
+
+/**
+ * Parse MPC CFA EM operation command completion result
+ *
+ * @param [in] opc MPC cache access opcode
+ *
+ * @param [in] resp_buff Data buffer containing the response to parse
+ *
+ * @param [in] resp_buff_len Response buffer size
+ *
+ * @param [out] result Pointer to MPC EM operation result object. This
+ * object will contain the fields parsed and extracted from the
+ * response buffer.
+ *
+ * @return 0 on Success, negative errno on failure
+ */
+int cfa_mpc_parse_em_op_resp(enum cfa_mpc_opcode opc, uint8_t *resp_buff,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_em_op_result *result);
+
+#endif /* _CFA_BLD_P70_MPC_H_ */
new file mode 100644
@@ -0,0 +1,164 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file
+ *
+ * @brief
+ */
+
+#ifndef _CFA_P70_H_
+#define _CFA_P70_H_
+
+#include "sys_util.h"
+#include "cfa_p70_hw.h"
+
+#define BITS_TO_BYTES(n) (((n) + 7) / 8)
+#define BYTES_TO_WORDS(n) (((n) + 3) / 4)
+
+/* EM Lrec size */
+#define CFA_P70_EM_LREC_SZ CFA_P70_EM_LREC_TOTAL_NUM_BITS
+/* Encap header length */
+#define CFA_P70_ACT_ENCAP_MIN_HDR_LEN 64
+/* Max AR pointers per MCG record */
+#define CFA_P70_ACT_MCG_MAX_AR_PTR 8
+/* Max Key fields */
+#define CFA_P70_KEY_FLD_ID_MAX CFA_P70_EM_KEY_LAYOUT_MAX_FLD
+
+/* profiler ILT, l2ctxt remap, and profile remap are 32-bit accessed */
+#define CFA_PROF_P7P0_ILT_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_PROF_ILT_DR_TOTAL_NUM_BITS)
+#define CFA_PROF_P7P0_L2_CTXT_RMP_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_PROF_L2_CTXT_RMP_DR_TOTAL_NUM_BITS)
+#define CFA_PROF_P7P0_PROFILE_RMP_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_PROF_PROFILE_RMP_DR_TOTAL_NUM_BITS)
+/* profiler TCAM and L2 ctxt TCAM are accessed via Wide-bus */
+#define CFA_PROF_P7P0_PROFILE_TCAM_NUM_WORDS \
+ NUM_WORDS_ALIGN_128BIT(CFA_P70_PROF_PROFILE_TCAM_TOTAL_NUM_BITS)
+#define CFA_PROF_P7P0_L2_CTXT_TCAM_NUM_WORDS \
+ NUM_WORDS_ALIGN_128BIT(CFA_P70_PROF_L2_CTXT_TCAM_TOTAL_NUM_BITS)
+/* FKB are accessed via Wide-bus */
+#define CFA_P70_EM_FKB_NUM_WORDS NUM_WORDS_ALIGN_128BIT(CFA_P70_EM_FKB_MAX_FLD)
+#define CFA_P70_EM_FKB_NUM_ENTRIES 128
+
+/* EM FKB Mask */
+/* EM_FKB_MASK total num bits defined in CFA EAS section 3.3.9.2.2 EM Key */
+#define CFA_P70_EM_FKB_MASK_TOTAL_NUM_BITS 896
+#define CFA_P70_EM_FKB_MASK_NUM_WORDS \
+ NUM_WORDS_ALIGN_128BIT(CFA_P70_EM_FKB_MASK_TOTAL_NUM_BITS)
+#define CFA_P70_EM_FKB_MASK_NUM_ENTRIES 128
+
+#define CFA_P70_WC_TCAM_FKB_NUM_WORDS \
+ NUM_WORDS_ALIGN_128BIT(CFA_P70_WC_TCAM_FKB_MAX_FLD)
+#define CFA_P70_WC_TCAM_FKB_NUM_ENTRIES 128
+/* VNIC-SVIF Properties Table are accessed via Wide-bus */
+#define CFA_ACT_P7P0_VSPT_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_ACT_VSPT_DR_TX_TOTAL_NUM_BITS)
+#define CFA_P70_ACT_VEB_TCAM_NUM_WORDS \
+ NUM_WORDS_ALIGN_128BIT(CFA_P70_ACT_VEB_TCAM_RX_TOTAL_NUM_BITS)
+#define CFA_P70_ACT_MIRROR_NUM_WORDS \
+ NUM_WORDS_ALIGN_128BIT(CFA_P70_ACT_MIRROR_TOTAL_NUM_BITS)
+#define CFA_P7P0_ACT_VEB_RMP_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_ACT_VEB_RMP_TOTAL_NUM_BITS)
+#define CFA_P7P0_ACT_LBT_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_ACT_LBT_DR_TOTAL_NUM_BITS)
+#define CFA_P70_LKUP_EM_ENTRY_SIZE_IN_BITS 256
+#define CFA_P70_LKUP_EM_MAX_ENTRIES 4
+#define CFA_P70_LKUP_EM_MAX_ENTRY_SIZE_IN_BITS \
+ (CFA_P70_LKUP_EM_ENTRY_SIZE_IN_BITS * CFA_P70_LKUP_EM_MAX_ENTRIES)
+/* Maximum EM key size in bits */
+#define CFA_P70_LKUP_EM_DATA_SIZE_IN_BITS \
+ (CFA_P70_LKUP_EM_MAX_ENTRY_SIZE_IN_BITS - CFA_P70_EM_LREC_SZ)
+#define CFA_P70_LKUP_WC_DATA_SIZE_IN_BITS 688
+#define CFA_P70_LKUP_WC_DATA_SIZE_WITH_CTRL_INFO_IN_BITS 700
+#define CFA_P70_LKUP_WC_DATA_SIZE \
+ (BITS_TO_BYTES(CFA_P70_LKUP_WC_DATA_SIZE_IN_BITS))
+#define CFA_P70_LKUP_WC_MAX_DATA_SIZE \
+ (BITS_TO_BYTES(CFA_P70_LKUP_WC_DATA_SIZE_WITH_CTRL_INFO_IN_BITS))
+#define CFA_P70_LKUP_WC_NUM_WORDS (BYTES_TO_WORDS(CFA_P70_LKUP_WC_DATA_SIZE))
+#define CFA_P70_LKUP_WC_NUM_WORDS_PER_BANK (CFA_P70_LKUP_WC_NUM_WORDS / 2)
+#define CFA_P70_LKUP_WC_LREC_DATA_SIZE \
+ (BITS_TO_BYTES(CFA_P70_WC_LREC_TOTAL_NUM_BITS))
+#define CFA_P70_LKUP_WC_LREC_NUM_WORDS \
+ (BYTES_TO_WORDS(CFA_P70_LKUP_WC_LREC_DATA_SIZE))
+#define CFA_P70_LKUP_WC_SLICE_LEN_WITH_CTRL_INFO 175
+#define CFA_P70_LKUP_WC_SLICE_LEN 172
+#define CFA_P70_LKUP_WC_TCAM_IDX_MASK 0x1fff
+#define CFA_P70_LKUP_WC_ROW_IDX_SFT 2
+#define CFA_P70_LKUP_WC_SLICE_IDX_MASK 0x3
+#define CFA_P70_LKUP_WC_NUM_SLICES 4
+#define CFA_P70_LKUP_WC_NUM_SLICES_PER_BANK 2
+#define CFA_P70_LKUP_WC_TCAM_CTRL_172B_KEY 0
+#define CFA_P70_LKUP_WC_TCAM_CTRL_344B_KEY 1
+#define CFA_P70_LKUP_WC_TCAM_CTRL_688B_KEY 2
+#define CFA_P70_LKUP_WC_TCAM_CTRL_MODE_SFT 29
+#define CFA_P70_LKUP_WC_TCAM_CTRL_MODE_MASK 0x3
+#define CFA_P70_LKUP_WC_TCAM_CTRL_VALID_SFT 31
+#define CFA_P70_LKUP_WC_TCAM_CTRL_VALID_MASK 0x1
+#define CFA_P70_LKUP_WC_TCAM_CTRL_NUM_BITS 3
+#define CFA_P70_LKUP_WC_TCAM_CTRL_MODE_NUM_BITS 2
+#define GET_NUM_SLICES_FROM_MODE(mode) (1 << (mode))
+#define CFA_P70_LKUP_WC_SLICE_NUM_BYTES \
+ (BITS_TO_BYTES(CFA_P70_LKUP_WC_SLICE_LEN_WITH_CTRL_INFO))
+#define CFA_P70_LKUP_WC_SLICE_NUM_WORDS \
+ (BYTES_TO_WORDS(CFA_P70_LKUP_WC_SLICE_NUM_BYTES))
+#define CFA_P70_WC_TCAM_GET_NUM_SLICES_FROM_NUM_BYTES(n) \
+ ((((n) << 3) + CFA_P70_LKUP_WC_SLICE_LEN_WITH_CTRL_INFO - 1) / \
+ CFA_P70_LKUP_WC_SLICE_LEN_WITH_CTRL_INFO)
+#define CFA_MASK32(N) (((N) < 32) ? ((1U << (N)) - 1) : 0xffffffff)
+#define CFA_P70_ECV_VTAG_ADD0_IMMED CFA_P70_ECV_VTAG_ADD0_IMMED_PRI0
+#define CFA_P70_ECV_VTAG_PRI_MASK \
+ (~CFA_P70_ECV_VTAG_ADD0_IMMED & \
+ CFA_MASK32(CFA_P70_ACT_ENC_ECV_VTAG_NUM_BITS))
+
+#define CFA_P70_LKUP_EPOCH0_NUM_WORDS 1
+#define CFA_P70_LKUP_EPOCH1_NUM_WORDS 1
+#define CFA_P70_LKUP_EPOCH0_ENTRIES 4096
+#define CFA_P70_LKUP_EPOCH1_ENTRIES 256
+
+/* Field range check table register widths */
+#define CFA_P70_FRC_PROF_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_LKUP_FRC_PROFILE_TOTAL_NUM_BITS)
+#define CFA_P70_FRC_ENTRY_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_LKUP_FRC_RANGE_TOTAL_NUM_BITS)
+
+/* Connection tracking table register widths */
+#define CFA_P70_CT_STATE_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_LKUP_CT_STATE_TOTAL_NUM_BITS)
+#define CFA_P70_CT_RULE_TCAM_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_LKUP_CT_RULE_TOTAL_NUM_BITS)
+#define CFA_P70_CT_RULE_TCAM_RMP_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_LKUP_CT_RULE_RECORD_TOTAL_NUM_BITS)
+
+/* Feature Chain table register widths */
+#define CFA_P70_ACT_FC_TCAM_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_ACT_FC_TCAM_TOTAL_NUM_BITS)
+#define CFA_P70_ACT_FC_TCAM_RMP_NUM_WORDS \
+ NUM_WORDS_ALIGN_32BIT(CFA_P70_ACT_FC_TCAM_RESULT_TOTAL_NUM_BITS)
+/* Feature Context table register width */
+#define CFA_P70_ACT_FC_NUM_WORDS \
+ NUM_WORDS_ALIGN_128BIT(CFA_P70_ACT_FC_RMP_DR_TOTAL_NUM_BITS)
+
+/* Meter instance table register width */
+#define CFA_P70_ACT_METER_NUM_WORDS \
+ NUM_WORDS_ALIGN_128BIT(CFA_P70_METERS_TOTAL_NUM_BITS)
+
+/* Metadata Mask table register widths */
+#define CFA_P70_METAMASK_PROF_NUM_WORDS 1
+#define CFA_P70_METAMASK_LKUP_NUM_WORDS 1
+#define CFA_P70_METAMASK_ACT_NUM_WORDS 1
+#define MAX_METAMASK_PROF(chip_cfg) 8
+#define MAX_METAMASK_LKUP(chip_cfg) 8
+#define MAX_METAMASK_ACT(chip_cfg) 16
+
+#define CFA_P70_VEB_TCAM_NUM_SLICES 1
+#define CFA_P70_CT_TCAM_NUM_SLICES 1
+#define CFA_P70_FC_TCAM_NUM_SLICES 1
+#define CFA_P70_L2CTXT_TCAM_NUM_SLICES 1
+#define CFA_P70_PROF_TCAM_NUM_SLICES 1
+
+#endif /* _CFA_P70_H_ */
new file mode 100644
@@ -0,0 +1,4286 @@
+/****************************************************************************
+ * Copyright(c) 2001-2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * Name: cfa_p70_hw.h
+ *
+ * Description: CFA HW table layout field position/length definitions
+ *
+ * Date: 09/29/22 11:50:37
+ *
+ * Note: This file is scripted generated by ./cfa_header_gen.py.
+ * DO NOT modify this file manually !!!!
+ *
+ ****************************************************************************/
+#ifndef _CFA_P70_HW_H_
+#define _CFA_P70_HW_H_
+
+/* clang-format off */
+#include "cfa_bld_p70_field_ids.h"
+
+
+/**
+ * Field code selection 1 for range checking (for idx 1 ...)
+ */
+#define CFA_P70_LKUP_FRC_PROFILE_FIELD_SEL_1_BITPOS 36
+#define CFA_P70_LKUP_FRC_PROFILE_FIELD_SEL_1_NUM_BITS 4
+
+/**
+ * Mask of ranges to check against FIELD_SEL_1
+ */
+#define CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_1_BITPOS 20
+#define CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_1_NUM_BITS 16
+
+/**
+ * Field code selection 0 for range checking
+ */
+#define CFA_P70_LKUP_FRC_PROFILE_FIELD_SEL_0_BITPOS 16
+#define CFA_P70_LKUP_FRC_PROFILE_FIELD_SEL_0_NUM_BITS 4
+
+/**
+ * Mask of ranges to check against FIELD_SEL_0 The following shows the
+ * FIELD_SEL code points:
+ */
+#define CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_BITPOS 0
+#define CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_NUM_BITS 16
+/**
+ * Mask of ranges to check against FIELD_SEL_0 The following shows the
+ * FIELD_SEL code points:
+ */
+enum cfa_p70_lkup_frc_profile_range_check_0 {
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_TL2_OVLAN_VID = 0,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_TL2_IVLAN_VID = 1,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_TL4_SRC = 2,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_TL4_DEST = 3,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_L2_OVLAN_VID = 4,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_L2_IVLAN_VID = 5,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_IP_LENGTH = 6,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_L4_SRC = 7,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_L4_DEST = 8,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_TUN_ID = 9,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_TUN_CTXT = 10,
+ CFA_P70_LKUP_FRC_PROFILE_RANGE_CHECK_0_0 = 15,
+};
+
+/**
+ * Total number of bits for LKUP_FRC_PROFILE
+ */
+#define CFA_P70_LKUP_FRC_PROFILE_TOTAL_NUM_BITS 40
+
+/**
+ * When 1, block rule searches and do host notify during background
+ * visit
+ */
+#define CFA_P70_LKUP_CT_STATE_NOTIFY_BITPOS 13
+#define CFA_P70_LKUP_CT_STATE_NOTIFY_NUM_BITS 1
+
+/**
+ * Next state to go to after host notify (only used when NOTIFY=1)
+ */
+#define CFA_P70_LKUP_CT_STATE_NOTIFY_STATE_BITPOS 8
+#define CFA_P70_LKUP_CT_STATE_NOTIFY_STATE_NUM_BITS 5
+
+/**
+ * Default forwarding action (0=fwd, 1=miss, 2/3=copy)
+ */
+#define CFA_P70_LKUP_CT_STATE_ACTION_BITPOS 6
+#define CFA_P70_LKUP_CT_STATE_ACTION_NUM_BITS 2
+
+/**
+ * Specifies timer (0=disabled, 1-3=timers 1-3)
+ */
+#define CFA_P70_LKUP_CT_STATE_TIMER_SELECT_BITPOS 4
+#define CFA_P70_LKUP_CT_STATE_TIMER_SELECT_NUM_BITS 2
+
+/**
+ * Timer preload value for connections in this state
+ */
+#define CFA_P70_LKUP_CT_STATE_TIMER_PRELOAD_BITPOS 0
+#define CFA_P70_LKUP_CT_STATE_TIMER_PRELOAD_NUM_BITS 4
+
+/**
+ * Total number of bits for LKUP_CT_STATE
+ */
+#define CFA_P70_LKUP_CT_STATE_TOTAL_NUM_BITS 14
+
+/**
+ * Rule only used if VALID=1 (for idx 1 ...)
+ */
+#define CFA_P70_LKUP_CT_RULE_VALID_BITPOS 38
+#define CFA_P70_LKUP_CT_RULE_VALID_NUM_BITS 1
+
+/**
+ * Mask
+ */
+#define CFA_P70_LKUP_CT_RULE_MASK_BITPOS 19
+#define CFA_P70_LKUP_CT_RULE_MASK_NUM_BITS 19
+
+/**
+ * Rule for packet (1) or background (0)
+ */
+#define CFA_P70_LKUP_CT_RULE_PKT_NOT_BG_BITPOS 18
+#define CFA_P70_LKUP_CT_RULE_PKT_NOT_BG_NUM_BITS 1
+
+/**
+ * Current connection state
+ */
+#define CFA_P70_LKUP_CT_RULE_STATE_BITPOS 13
+#define CFA_P70_LKUP_CT_RULE_STATE_NUM_BITS 5
+
+/**
+ * TCP packet flags
+ */
+#define CFA_P70_LKUP_CT_RULE_TCP_FLAGS_BITPOS 4
+#define CFA_P70_LKUP_CT_RULE_TCP_FLAGS_NUM_BITS 9
+
+/**
+ * Packet protocol is TCP
+ */
+#define CFA_P70_LKUP_CT_RULE_PROT_IS_TCP_BITPOS 3
+#define CFA_P70_LKUP_CT_RULE_PROT_IS_TCP_NUM_BITS 1
+
+/**
+ * Updating tcp_msb_loc
+ */
+#define CFA_P70_LKUP_CT_RULE_MSB_UPDT_BITPOS 2
+#define CFA_P70_LKUP_CT_RULE_MSB_UPDT_NUM_BITS 1
+
+/**
+ * Packet flag error
+ */
+#define CFA_P70_LKUP_CT_RULE_FLAGS_FAILED_BITPOS 1
+#define CFA_P70_LKUP_CT_RULE_FLAGS_FAILED_NUM_BITS 1
+
+/**
+ * Packet failed TCP window check If VALID=0, the rule is ignored during
+ * searches. When VALID=1, MASK[18:0] provides a mask for bits 18:0. If
+ * the mask bit is set to 0, the corresponding bit is ignored during
+ * searches (does not need to match for the rule to match). During
+ * background updates, all fields in the search key other than STATE are
+ * always 0 (PKT_NOT_BG=0 and the other fields are unused). During
+ * packet updates when PROT_IS_TCP=0, PKT_NOT_BG=1 and STATE is set to
+ * the current state but the other fields will always be 0. If there is
+ * a matching rule found, the record in LKUP_CT_RULE_RECORD for that
+ * rule number is used.
+ */
+#define CFA_P70_LKUP_CT_RULE_WIN_FAILED_BITPOS 0
+#define CFA_P70_LKUP_CT_RULE_WIN_FAILED_NUM_BITS 1
+
+/**
+ * Total number of bits for LKUP_CT_RULE
+ */
+#define CFA_P70_LKUP_CT_RULE_TOTAL_NUM_BITS 39
+
+/**
+ * Forward action (packet only): 0=fwd, 1=miss, 2/3=copy
+ */
+#define CFA_P70_LKUP_CT_RULE_RECORD_ACTION_BITPOS 7
+#define CFA_P70_LKUP_CT_RULE_RECORD_ACTION_NUM_BITS 2
+
+/**
+ * Next state for the connection
+ */
+#define CFA_P70_LKUP_CT_RULE_RECORD_NEXT_STATE_BITPOS 2
+#define CFA_P70_LKUP_CT_RULE_RECORD_NEXT_STATE_NUM_BITS 5
+
+/**
+ * Signals whether to send message to other CFA.k When SEND=0, no
+ * message is sent. Otherwise, SEND[1] indicates that TCP_MSB_LOC in the
+ * message is valid and SEND[0] that STATE is valid.
+ */
+#define CFA_P70_LKUP_CT_RULE_RECORD_SEND_BITPOS 0
+#define CFA_P70_LKUP_CT_RULE_RECORD_SEND_NUM_BITS 2
+
+/**
+ * Total number of bits for LKUP_CT_RULE_RECORD
+ */
+#define CFA_P70_LKUP_CT_RULE_RECORD_TOTAL_NUM_BITS 9
+
+/**
+ * destination remap mode when enabled
+ */
+#define CFA_P70_ACT_VEB_RMP_MODE_BITPOS 6
+#define CFA_P70_ACT_VEB_RMP_MODE_NUM_BITS 1
+/**
+ * destination remap mode when enabled
+ */
+enum cfa_p70_act_veb_rmp_mode {
+ /* over write existing bitmap with entry */
+ CFA_P70_ACT_VEB_RMP_MODE_OVRWRT = 0,
+ /* or entry bit map with existing */
+ CFA_P70_ACT_VEB_RMP_MODE_ORTGTHR = 1,
+};
+
+/**
+ * enable remap the bitmap
+ */
+#define CFA_P70_ACT_VEB_RMP_ENABLE_BITPOS 5
+#define CFA_P70_ACT_VEB_RMP_ENABLE_NUM_BITS 1
+
+/**
+ * destination bitmap #CAS_SW_REF
+ * Action.CFA.VEB.Remap.tx.veb.remap.entry
+ */
+#define CFA_P70_ACT_VEB_RMP_BITMAP_BITPOS 0
+#define CFA_P70_ACT_VEB_RMP_BITMAP_NUM_BITS 5
+
+/**
+ * Total number of bits for ACT_VEB_RMP
+ */
+#define CFA_P70_ACT_VEB_RMP_TOTAL_NUM_BITS 7
+
+/**
+ * Range low
+ */
+#define CFA_P70_LKUP_FRC_RANGE_RANGE_LO_BITPOS 16
+#define CFA_P70_LKUP_FRC_RANGE_RANGE_LO_NUM_BITS 16
+
+/**
+ * Range high Field matches range when in [range_lo, range_hi]
+ * (inclusive). A read/write to this register causes a read/write to the
+ * LKUP_FRC_RANGE memory at address LKUP_FRC_RANGE_ADDR.
+ */
+#define CFA_P70_LKUP_FRC_RANGE_RANGE_HI_BITPOS 0
+#define CFA_P70_LKUP_FRC_RANGE_RANGE_HI_NUM_BITS 16
+
+/**
+ * Total number of bits for LKUP_FRC_RANGE
+ */
+#define CFA_P70_LKUP_FRC_RANGE_TOTAL_NUM_BITS 32
+
+/**
+ * TCAM entry is valid (for idx 7 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_VALID_BITPOS 255
+#define CFA_P70_PROF_L2_CTXT_TCAM_VALID_NUM_BITS 1
+
+/**
+ * spare bits (for idx 7 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_SPARE_BITPOS 253
+#define CFA_P70_PROF_L2_CTXT_TCAM_SPARE_NUM_BITS 2
+
+/**
+ * Multi-pass cycle count (for idx 7 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_MPASS_CNT_BITPOS 251
+#define CFA_P70_PROF_L2_CTXT_TCAM_MPASS_CNT_NUM_BITS 2
+
+/**
+ * Recycle count from prof_in (for idx 7 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_RCYC_BITPOS 247
+#define CFA_P70_PROF_L2_CTXT_TCAM_RCYC_NUM_BITS 4
+
+/**
+ * loopback input from prof_in (for idx 7 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_LOOPBACK_BITPOS 246
+#define CFA_P70_PROF_L2_CTXT_TCAM_LOOPBACK_NUM_BITS 1
+
+/**
+ * Source network port from prof_in (for idx 7 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_SPIF_BITPOS 244
+#define CFA_P70_PROF_L2_CTXT_TCAM_SPIF_NUM_BITS 2
+
+/**
+ * Partition provided by input block (for idx 7 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_PARIF_BITPOS 239
+#define CFA_P70_PROF_L2_CTXT_TCAM_PARIF_NUM_BITS 5
+
+/**
+ * Source network port or vnic (for idx 7 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_SVIF_BITPOS 228
+#define CFA_P70_PROF_L2_CTXT_TCAM_SVIF_NUM_BITS 11
+
+/**
+ * Metadata provided by Input block
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_METADATA_BITPOS 196
+#define CFA_P70_PROF_L2_CTXT_TCAM_METADATA_NUM_BITS 32
+
+/**
+ * L2 function
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_L2_FUNC_BITPOS 188
+#define CFA_P70_PROF_L2_CTXT_TCAM_L2_FUNC_NUM_BITS 8
+
+/**
+ * ROCE Packet detected by the Parser (for idx 5 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_ROCE_BITPOS 187
+#define CFA_P70_PROF_L2_CTXT_TCAM_ROCE_NUM_BITS 1
+
+/**
+ * Pure LLC Packet detected by the Parser. (for idx 5 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_PURE_LLC_BITPOS 186
+#define CFA_P70_PROF_L2_CTXT_TCAM_PURE_LLC_NUM_BITS 1
+
+/**
+ * 5b enc Outer Tunnel Type (for idx 5 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_OT_HDR_TYPE_BITPOS 181
+#define CFA_P70_PROF_L2_CTXT_TCAM_OT_HDR_TYPE_NUM_BITS 5
+
+/**
+ * 5b enc Tunnel Type The id_ctxt field is tunnel id or tunnel context
+ * selected from outer tunnel header or tunnel header. (for idx 5 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_T_HDR_TYPE_BITPOS 176
+#define CFA_P70_PROF_L2_CTXT_TCAM_T_HDR_TYPE_NUM_BITS 5
+
+/**
+ * FLDS Tunnel Status ID or Context. Each of these fields are from the
+ * selected outer tunnel, tunnel, inner, or outermost L2 header
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_ID_CTXT_BITPOS 144
+#define CFA_P70_PROF_L2_CTXT_TCAM_ID_CTXT_NUM_BITS 32
+
+/**
+ * Selected DMAC/SMAC
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_MAC0_BITPOS 96
+#define CFA_P70_PROF_L2_CTXT_TCAM_MAC0_NUM_BITS 48
+
+/**
+ * Selected DMAC/SMAC
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_MAC1_BITPOS 48
+#define CFA_P70_PROF_L2_CTXT_TCAM_MAC1_NUM_BITS 48
+
+/**
+ * 1+ VLAN tags present (for idx 1 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_VTAG_PRESENT_BITPOS 47
+#define CFA_P70_PROF_L2_CTXT_TCAM_VTAG_PRESENT_NUM_BITS 1
+
+/**
+ * 2 VLAN tags present (for idx 1 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_TWO_VTAGS_BITPOS 46
+#define CFA_P70_PROF_L2_CTXT_TCAM_TWO_VTAGS_NUM_BITS 1
+
+/**
+ * Outer VLAN VID (for idx 1 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_OVLAN_VID_BITPOS 34
+#define CFA_P70_PROF_L2_CTXT_TCAM_OVLAN_VID_NUM_BITS 12
+
+/**
+ * Outer VLAN TPID, 3b encoded
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_OVLAN_TPID_SEL_BITPOS 31
+#define CFA_P70_PROF_L2_CTXT_TCAM_OVLAN_TPID_SEL_NUM_BITS 3
+
+/**
+ * Inner VLAN VID
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_IVLAN_VID_BITPOS 19
+#define CFA_P70_PROF_L2_CTXT_TCAM_IVLAN_VID_NUM_BITS 12
+
+/**
+ * Inner VLAN TPID, 3b encoded
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_IVLAN_TPID_SEL_BITPOS 16
+#define CFA_P70_PROF_L2_CTXT_TCAM_IVLAN_TPID_SEL_NUM_BITS 3
+
+/**
+ * Ethertype. #CAS_SW_REF Profiler.l2ip.context.tcam.key #CAS_SW_REF
+ * Profiler.l2ip.context.ipv6.tcam.key
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_ETYPE_BITPOS 0
+#define CFA_P70_PROF_L2_CTXT_TCAM_ETYPE_NUM_BITS 16
+
+/**
+ * Total number of bits for PROF_L2_CTXT_TCAM
+ */
+#define CFA_P70_PROF_L2_CTXT_TCAM_TOTAL_NUM_BITS 256
+
+/**
+ * Valid(1)/Invalid(0) TCAM entry. (for idx 5 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_VALID_BITPOS 183
+#define CFA_P70_PROF_PROFILE_TCAM_VALID_NUM_BITS 1
+
+/**
+ * spare bits. (for idx 5 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_SPARE_BITPOS 181
+#define CFA_P70_PROF_PROFILE_TCAM_SPARE_NUM_BITS 2
+
+/**
+ * Loopback bit. (for idx 5 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_LOOPBACK_BITPOS 180
+#define CFA_P70_PROF_PROFILE_TCAM_LOOPBACK_NUM_BITS 1
+
+/**
+ * Packet type directly from prof_in (for idx 5 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_PKT_TYPE_BITPOS 176
+#define CFA_P70_PROF_PROFILE_TCAM_PKT_TYPE_NUM_BITS 4
+
+/**
+ * Recycle count from prof_in (for idx 5 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_RCYC_BITPOS 172
+#define CFA_P70_PROF_PROFILE_TCAM_RCYC_NUM_BITS 4
+
+/**
+ * From L2 Context Lookup stage.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_METADATA_BITPOS 140
+#define CFA_P70_PROF_PROFILE_TCAM_METADATA_NUM_BITS 32
+
+/**
+ * Aggregate error flag from Input stage. (for idx 4 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_AGG_ERROR_BITPOS 139
+#define CFA_P70_PROF_PROFILE_TCAM_AGG_ERROR_NUM_BITS 1
+
+/**
+ * L2 function (for idx 4 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L2_FUNC_BITPOS 131
+#define CFA_P70_PROF_PROFILE_TCAM_L2_FUNC_NUM_BITS 8
+
+/**
+ * Profile function from L2 Context Lookup stage.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_PROF_FUNC_BITPOS 123
+#define CFA_P70_PROF_PROFILE_TCAM_PROF_FUNC_NUM_BITS 8
+
+/**
+ * From FLDS Input General Status tunnel(1)/no tunnel(0) (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_HREC_NEXT_BITPOS 121
+#define CFA_P70_PROF_PROFILE_TCAM_HREC_NEXT_NUM_BITS 2
+
+/**
+ * INT header type. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_INT_HDR_TYPE_BITPOS 119
+#define CFA_P70_PROF_PROFILE_TCAM_INT_HDR_TYPE_NUM_BITS 2
+
+/**
+ * INT header group. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_INT_HDR_GROUP_BITPOS 117
+#define CFA_P70_PROF_PROFILE_TCAM_INT_HDR_GROUP_NUM_BITS 2
+
+/**
+ * INT metadata is tail stamp. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_INT_IFA_TAIL_BITPOS 116
+#define CFA_P70_PROF_PROFILE_TCAM_INT_IFA_TAIL_NUM_BITS 1
+
+/**
+ * resolved flds_otl2_hdr_valid. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_HDR_VALID_BITPOS 115
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_HDR_VALID_NUM_BITS 1
+
+/**
+ * Outer Tunnel L2 header type. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_HDR_TYPE_BITPOS 113
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_HDR_TYPE_NUM_BITS 2
+
+/**
+ * flds_otl2_dst_type remapped: UC(0)/MC(2)/BC(3) (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_UC_MC_BC_BITPOS 111
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_UC_MC_BC_NUM_BITS 2
+
+/**
+ * 1+ VLAN tags present in Outer Tunnel L2 header (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_VTAG_PRESENT_BITPOS 110
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_VTAG_PRESENT_NUM_BITS 1
+
+/**
+ * 2 VLAN tags present in Outer Tunnel L2 header (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_TWO_VTAGS_BITPOS 109
+#define CFA_P70_PROF_PROFILE_TCAM_OTL2_TWO_VTAGS_NUM_BITS 1
+
+/**
+ * resolved flds_otl3_hdr_valid. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_VALID_BITPOS 108
+#define CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_VALID_NUM_BITS 1
+
+/**
+ * flds_otl3_hdr_valid is stop_w_error. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_ERROR_BITPOS 107
+#define CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_ERROR_NUM_BITS 1
+
+/**
+ * Outer Tunnel L3 header type directly from FLDS. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_TYPE_BITPOS 103
+#define CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_TYPE_NUM_BITS 4
+
+/**
+ * Outer Tunnel L3 header is IPV4 or IPV6. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_ISIP_BITPOS 102
+#define CFA_P70_PROF_PROFILE_TCAM_OTL3_HDR_ISIP_NUM_BITS 1
+
+/**
+ * resolved flds_otl4_hdr_valid. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_VALID_BITPOS 101
+#define CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_VALID_NUM_BITS 1
+
+/**
+ * flds_otl4_hdr_valid is stop_w_error. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_ERROR_BITPOS 100
+#define CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_ERROR_NUM_BITS 1
+
+/**
+ * Outer Tunnel L4 header type. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_TYPE_BITPOS 96
+#define CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_TYPE_NUM_BITS 4
+
+/**
+ * OTL4 header is UDP or TCP. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_IS_UDP_TCP_BITPOS 95
+#define CFA_P70_PROF_PROFILE_TCAM_OTL4_HDR_IS_UDP_TCP_NUM_BITS 1
+
+/**
+ * resolved flds_ot_hdr_valid. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OT_HDR_VALID_BITPOS 94
+#define CFA_P70_PROF_PROFILE_TCAM_OT_HDR_VALID_NUM_BITS 1
+
+/**
+ * flds_ot_hdr_valid is stop_w_error. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OT_HDR_ERROR_BITPOS 93
+#define CFA_P70_PROF_PROFILE_TCAM_OT_HDR_ERROR_NUM_BITS 1
+
+/**
+ * Outer Tunnel header type. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OT_HDR_TYPE_BITPOS 88
+#define CFA_P70_PROF_PROFILE_TCAM_OT_HDR_TYPE_NUM_BITS 5
+
+/**
+ * Outer Tunnel header flags. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_OT_HDR_FLAGS_BITPOS 80
+#define CFA_P70_PROF_PROFILE_TCAM_OT_HDR_FLAGS_NUM_BITS 8
+
+/**
+ * resolved flds_tl2_hdr_valid. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_HDR_VALID_BITPOS 79
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_HDR_VALID_NUM_BITS 1
+
+/**
+ * Tunnel L2 header type directly from FLDS. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_HDR_TYPE_BITPOS 77
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_HDR_TYPE_NUM_BITS 2
+
+/**
+ * flds_tl2_dst_type remapped: UC(0)/MC(2)/BC(3) (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_UC_MC_BC_BITPOS 75
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_UC_MC_BC_NUM_BITS 2
+
+/**
+ * 1+ VLAN tags present in Tunnel L2 header (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_BITPOS 74
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_NUM_BITS 1
+
+/**
+ * 2 VLAN tags present in Tunnel L2 header (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_BITPOS 73
+#define CFA_P70_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_NUM_BITS 1
+
+/**
+ * resolved flds_tl3_hdr_valid. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_VALID_BITPOS 72
+#define CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_VALID_NUM_BITS 1
+
+/**
+ * flds_tl3_hdr_valid is stop_w_error. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_ERROR_BITPOS 71
+#define CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_ERROR_NUM_BITS 1
+
+/**
+ * Tunnel L3 header type directly from FLDS. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_TYPE_BITPOS 67
+#define CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_TYPE_NUM_BITS 4
+
+/**
+ * Tunnel L3 header is IPV4 or IPV6. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_ISIP_BITPOS 66
+#define CFA_P70_PROF_PROFILE_TCAM_TL3_HDR_ISIP_NUM_BITS 1
+
+/**
+ * resolved flds_tl4_hdr_valid. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_VALID_BITPOS 65
+#define CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_VALID_NUM_BITS 1
+
+/**
+ * flds_tl4_hdr_valid is stop_w_error. (for idx 2 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_ERROR_BITPOS 64
+#define CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_ERROR_NUM_BITS 1
+
+/**
+ * Tunnel L4 header type directly from FLDS. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_TYPE_BITPOS 60
+#define CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_TYPE_NUM_BITS 4
+
+/**
+ * TL4 header is UDP or TCP. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_BITPOS 59
+#define CFA_P70_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_NUM_BITS 1
+
+/**
+ * resolved flds_tun_hdr_valid. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_VALID_BITPOS 58
+#define CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_VALID_NUM_BITS 1
+
+/**
+ * flds_tun_hdr_valid is stop_w_error. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_ERROR_BITPOS 57
+#define CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_ERROR_NUM_BITS 1
+
+/**
+ * Tunnel header type directly from FLDS. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_TYPE_BITPOS 52
+#define CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_TYPE_NUM_BITS 5
+
+/**
+ * Tunnel header flags directly from FLDS. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_BITPOS 44
+#define CFA_P70_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_NUM_BITS 8
+
+/**
+ * resolved flds_l2_hdr_valid. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L2_HDR_VALID_BITPOS 43
+#define CFA_P70_PROF_PROFILE_TCAM_L2_HDR_VALID_NUM_BITS 1
+
+/**
+ * flds_l2_hdr_valid is stop_w_error. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L2_HDR_ERROR_BITPOS 42
+#define CFA_P70_PROF_PROFILE_TCAM_L2_HDR_ERROR_NUM_BITS 1
+
+/**
+ * L2 header type directly from FLDS. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L2_HDR_TYPE_BITPOS 40
+#define CFA_P70_PROF_PROFILE_TCAM_L2_HDR_TYPE_NUM_BITS 2
+
+/**
+ * flds_l2_dst_type remapped: UC(0)/MC(2)/BC(3). (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L2_UC_MC_BC_BITPOS 38
+#define CFA_P70_PROF_PROFILE_TCAM_L2_UC_MC_BC_NUM_BITS 2
+
+/**
+ * 1+ VLAN tags present in inner L2 header. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_BITPOS 37
+#define CFA_P70_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_NUM_BITS 1
+
+/**
+ * 2 VLAN tags present in inner L2 header. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L2_TWO_VTAGS_BITPOS 36
+#define CFA_P70_PROF_PROFILE_TCAM_L2_TWO_VTAGS_NUM_BITS 1
+
+/**
+ * resolved flds_l3_hdr_valid. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L3_HDR_VALID_BITPOS 35
+#define CFA_P70_PROF_PROFILE_TCAM_L3_HDR_VALID_NUM_BITS 1
+
+/**
+ * flds_l3_hdr_valid is stop_w_error. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L3_HDR_ERROR_BITPOS 34
+#define CFA_P70_PROF_PROFILE_TCAM_L3_HDR_ERROR_NUM_BITS 1
+
+/**
+ * L3 header type directly from FLDS.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L3_HDR_TYPE_BITPOS 30
+#define CFA_P70_PROF_PROFILE_TCAM_L3_HDR_TYPE_NUM_BITS 4
+
+/**
+ * L3 header is IPV4 or IPV6.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L3_HDR_ISIP_BITPOS 29
+#define CFA_P70_PROF_PROFILE_TCAM_L3_HDR_ISIP_NUM_BITS 1
+
+/**
+ * L3 header next protocol directly from FLDS.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L3_PROT_BITPOS 21
+#define CFA_P70_PROF_PROFILE_TCAM_L3_PROT_NUM_BITS 8
+
+/**
+ * resolved flds_l4_hdr_valid.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_VALID_BITPOS 20
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_VALID_NUM_BITS 1
+
+/**
+ * flds_l4_hdr_valid is stop_w_error.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_ERROR_BITPOS 19
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_ERROR_NUM_BITS 1
+
+/**
+ * L4 header type directly from FLDS.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_TYPE_BITPOS 15
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_TYPE_NUM_BITS 4
+
+/**
+ * L4 header is UDP or TCP.2
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_BITPOS 14
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_NUM_BITS 1
+
+/**
+ * L4 header subtype directly from FLDS.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_SUBTYPE_BITPOS 11
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_SUBTYPE_NUM_BITS 3
+
+/**
+ * L4 header flags directly from FLDS.
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_FLAGS_BITPOS 2
+#define CFA_P70_PROF_PROFILE_TCAM_L4_HDR_FLAGS_NUM_BITS 9
+
+/**
+ * DCN present bits directly from FLDS. #CAS_SW_REF
+ * Profiler.profile.lookup.tcam.key
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_L4_DCN_PRESENT_BITPOS 0
+#define CFA_P70_PROF_PROFILE_TCAM_L4_DCN_PRESENT_NUM_BITS 2
+
+/**
+ * Total number of bits for PROF_PROFILE_TCAM
+ */
+#define CFA_P70_PROF_PROFILE_TCAM_TOTAL_NUM_BITS 184
+
+/**
+ * Valid entry (for idx 2 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_TX_VALID_BITPOS 79
+#define CFA_P70_ACT_VEB_TCAM_TX_VALID_NUM_BITS 1
+
+/**
+ * PF Parif Number (for idx 2 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_TX_PARIF_IN_BITPOS 74
+#define CFA_P70_ACT_VEB_TCAM_TX_PARIF_IN_NUM_BITS 5
+
+/**
+ * Number of VLAN Tags. (for idx 2 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_TX_NUM_VTAGS_BITPOS 72
+#define CFA_P70_ACT_VEB_TCAM_TX_NUM_VTAGS_NUM_BITS 2
+
+/**
+ * Dest. MAC Address
+ */
+#define CFA_P70_ACT_VEB_TCAM_TX_DMAC_BITPOS 24
+#define CFA_P70_ACT_VEB_TCAM_TX_DMAC_NUM_BITS 48
+
+/**
+ * Outer VLAN Tag ID
+ */
+#define CFA_P70_ACT_VEB_TCAM_TX_OVID_BITPOS 12
+#define CFA_P70_ACT_VEB_TCAM_TX_OVID_NUM_BITS 12
+
+/**
+ * Inner VLAN Tag ID #CAS_SW_REF Action.CFA.VEB.TCAM.tx.veb.tcam.entry
+ */
+#define CFA_P70_ACT_VEB_TCAM_TX_IVID_BITPOS 0
+#define CFA_P70_ACT_VEB_TCAM_TX_IVID_NUM_BITS 12
+
+/**
+ * Total number of bits for ACT_VEB_TCAM_TX
+ */
+#define CFA_P70_ACT_VEB_TCAM_TX_TOTAL_NUM_BITS 80
+
+/**
+ * Valid entry (for idx 2 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_VALID_BITPOS 79
+#define CFA_P70_ACT_VEB_TCAM_RX_VALID_NUM_BITS 1
+
+/**
+ * spare (for idx 2 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_SPARE_BITPOS 78
+#define CFA_P70_ACT_VEB_TCAM_RX_SPARE_NUM_BITS 1
+
+/**
+ * program to zero (for idx 2 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_PADDING_BITPOS 68
+#define CFA_P70_ACT_VEB_TCAM_RX_PADDING_NUM_BITS 10
+
+/**
+ * DMAC is unicast address (for idx 2 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_UNICAST_BITPOS 67
+#define CFA_P70_ACT_VEB_TCAM_RX_UNICAST_NUM_BITS 1
+
+/**
+ * DMAC is multicast address (for idx 2 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_MULTICAST_BITPOS 66
+#define CFA_P70_ACT_VEB_TCAM_RX_MULTICAST_NUM_BITS 1
+
+/**
+ * DMAC is broadcast address (for idx 2 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_BROADCAST_BITPOS 65
+#define CFA_P70_ACT_VEB_TCAM_RX_BROADCAST_NUM_BITS 1
+
+/**
+ * pfid
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_PFID_BITPOS 60
+#define CFA_P70_ACT_VEB_TCAM_RX_PFID_NUM_BITS 5
+
+/**
+ * vfid (for idx 1 ...)
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_VFID_BITPOS 48
+#define CFA_P70_ACT_VEB_TCAM_RX_VFID_NUM_BITS 12
+
+/**
+ * source mac #CAS_SW_REF AAction.CFA.VEB.TCAM.rx.veb.tcam.entry
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_SMAC_BITPOS 0
+#define CFA_P70_ACT_VEB_TCAM_RX_SMAC_NUM_BITS 48
+
+/**
+ * Total number of bits for ACT_VEB_TCAM_RX
+ */
+#define CFA_P70_ACT_VEB_TCAM_RX_TOTAL_NUM_BITS 80
+
+/**
+ * Valid entry (for idx 1 ...)
+ */
+#define CFA_P70_ACT_FC_TCAM_FC_VALID_BITPOS 33
+#define CFA_P70_ACT_FC_TCAM_FC_VALID_NUM_BITS 1
+
+/**
+ * Reserved (for idx 1 ...)
+ */
+#define CFA_P70_ACT_FC_TCAM_FC_RSVD_BITPOS 32
+#define CFA_P70_ACT_FC_TCAM_FC_RSVD_NUM_BITS 1
+
+/**
+ * Updated metadata. #CAS_SW_REF Action.CFA.FC.TCAM.fc.tcam.meta.entry
+ * #CAS_SW_REF Action.CFA.FC.TCAM.fc.tcam.l2ip.func.entry #CAS_SW_REF
+ * Action.CFA.FC.TCAM.fc.tcam.l2.ctxt.entry #CAS_SW_REF
+ * Action.CFA.FC.TCAM.fc.tcam.l2ipf.ctxt.entry
+ */
+#define CFA_P70_ACT_FC_TCAM_FC_METADATA_BITPOS 0
+#define CFA_P70_ACT_FC_TCAM_FC_METADATA_NUM_BITS 32
+
+/**
+ * Total number of bits for ACT_FC_TCAM
+ */
+#define CFA_P70_ACT_FC_TCAM_TOTAL_NUM_BITS 34
+
+/**
+ * New metadata.
+ */
+#define CFA_P70_ACT_FC_RMP_DR_METADATA_BITPOS 40
+#define CFA_P70_ACT_FC_RMP_DR_METADATA_NUM_BITS 32
+
+/**
+ * Metadata merge control mask.
+ */
+#define CFA_P70_ACT_FC_RMP_DR_METAMASK_BITPOS 8
+#define CFA_P70_ACT_FC_RMP_DR_METAMASK_NUM_BITS 32
+
+/**
+ * New L2 function. #CAS_SW_REF Action.CFA.FC.Remap.fc.remap.entry
+ */
+#define CFA_P70_ACT_FC_RMP_DR_L2_FUNC_BITPOS 0
+#define CFA_P70_ACT_FC_RMP_DR_L2_FUNC_NUM_BITS 8
+
+/**
+ * Total number of bits for ACT_FC_RMP_DR
+ */
+#define CFA_P70_ACT_FC_RMP_DR_TOTAL_NUM_BITS 72
+
+/**
+ * enables ilt metadata (for idx 3 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_ILT_META_EN_BITPOS 104
+#define CFA_P70_PROF_ILT_DR_ILT_META_EN_NUM_BITS 1
+
+/**
+ * meta profile register index (for idx 3 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_META_PROF_BITPOS 101
+#define CFA_P70_PROF_ILT_DR_META_PROF_NUM_BITS 3
+
+/**
+ * ilt metadata, used when ilt_meta_en is set
+ */
+#define CFA_P70_PROF_ILT_DR_METADATA_BITPOS 69
+#define CFA_P70_PROF_ILT_DR_METADATA_NUM_BITS 32
+
+/**
+ * Partition (for idx 2 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_PARIF_BITPOS 64
+#define CFA_P70_PROF_ILT_DR_PARIF_NUM_BITS 5
+
+/**
+ * L2 function (for idx 1 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_L2_FUNC_BITPOS 56
+#define CFA_P70_PROF_ILT_DR_L2_FUNC_NUM_BITS 8
+
+/**
+ * When set cfa_meta opcode is allowed (for idx 1 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_EN_BD_META_BITPOS 55
+#define CFA_P70_PROF_ILT_DR_EN_BD_META_NUM_BITS 1
+
+/**
+ * When set act_rec_ptr is set to cfa_action if it is non-zero.
+ * Otherwise act_rec_ptr is set to act_rec_ptr from this table. (for idx
+ * 1 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_EN_BD_ACTION_BITPOS 54
+#define CFA_P70_PROF_ILT_DR_EN_BD_ACTION_NUM_BITS 1
+
+/**
+ * When set destination is set to destination from this table. Otherwise
+ * it is set to est_dest. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_EN_ILT_DEST_BITPOS 53
+#define CFA_P70_PROF_ILT_DR_EN_ILT_DEST_NUM_BITS 1
+
+/**
+ * ILT opcode (for idx 1 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_ILT_FWD_OP_BITPOS 50
+#define CFA_P70_PROF_ILT_DR_ILT_FWD_OP_NUM_BITS 3
+/**
+ * ILT opcode (for idx 1 ...)
+ */
+enum cfa_p70_prof_ilt_dr_ilt_fwd_op {
+ /* cfa is bypassed */
+ CFA_P70_PROF_ILT_DR_ILT_FWD_OP_BYPASS_CFA = 0,
+ /* cfa is bypassed if packet is ROCE */
+ CFA_P70_PROF_ILT_DR_ILT_FWD_OP_BYPASS_CFA_ROCE = 1,
+ /* profiler and lookup blocks are bypassed */
+ CFA_P70_PROF_ILT_DR_ILT_FWD_OP_BYPASS_LKUP = 2,
+ /* packet proceeds to L2 Context Stage */
+ CFA_P70_PROF_ILT_DR_ILT_FWD_OP_NORMAL_FLOW = 3,
+ /* mark packet for drop */
+ CFA_P70_PROF_ILT_DR_ILT_FWD_OP_DROP = 4,
+};
+
+/**
+ * action hint used with act_rec_ptr (for idx 1 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_ILT_ACT_HINT_BITPOS 48
+#define CFA_P70_PROF_ILT_DR_ILT_ACT_HINT_NUM_BITS 2
+
+/**
+ * table scope used with act_rec_ptr (for idx 1 ...)
+ */
+#define CFA_P70_PROF_ILT_DR_ILT_SCOPE_BITPOS 43
+#define CFA_P70_PROF_ILT_DR_ILT_SCOPE_NUM_BITS 5
+
+/**
+ * Default act_rec_ptr or explicit on Lookup Bypass.
+ */
+#define CFA_P70_PROF_ILT_DR_ILT_ACT_REC_PTR_BITPOS 17
+#define CFA_P70_PROF_ILT_DR_ILT_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * used for destination #CAS_SW_REF Profiler.input.lookup.table.entry
+ */
+#define CFA_P70_PROF_ILT_DR_ILT_DESTINATION_BITPOS 0
+#define CFA_P70_PROF_ILT_DR_ILT_DESTINATION_NUM_BITS 17
+
+/**
+ * Total number of bits for PROF_ILT_DR
+ */
+#define CFA_P70_PROF_ILT_DR_TOTAL_NUM_BITS 105
+
+/**
+ * Normal operation. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_PL_BYP_LKUP_EN_BITPOS 42
+#define CFA_P70_PROF_PROFILE_RMP_DR_PL_BYP_LKUP_EN_NUM_BITS 1
+
+/**
+ * Enable search in EM database. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_EM_SEARCH_EN_BITPOS 41
+#define CFA_P70_PROF_PROFILE_RMP_DR_EM_SEARCH_EN_NUM_BITS 1
+
+/**
+ * ID to differentiate common EM keys. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_EM_PROFILE_ID_BITPOS 33
+#define CFA_P70_PROF_PROFILE_RMP_DR_EM_PROFILE_ID_NUM_BITS 8
+
+/**
+ * Exact match key template select.
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_EM_KEY_ID_BITPOS 26
+#define CFA_P70_PROF_PROFILE_RMP_DR_EM_KEY_ID_NUM_BITS 7
+
+/**
+ * Exact Match Lookup table scope.
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_EM_SCOPE_BITPOS 21
+#define CFA_P70_PROF_PROFILE_RMP_DR_EM_SCOPE_NUM_BITS 5
+
+/**
+ * Enable search in TCAM database.
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_TCAM_SEARCH_EN_BITPOS 20
+#define CFA_P70_PROF_PROFILE_RMP_DR_TCAM_SEARCH_EN_NUM_BITS 1
+
+/**
+ * ID to differentiate common TCAM keys.
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_TCAM_PROFILE_ID_BITPOS 12
+#define CFA_P70_PROF_PROFILE_RMP_DR_TCAM_PROFILE_ID_NUM_BITS 8
+
+/**
+ * TCAM key template select.
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_TCAM_KEY_ID_BITPOS 5
+#define CFA_P70_PROF_PROFILE_RMP_DR_TCAM_KEY_ID_NUM_BITS 7
+
+/**
+ * Wild-card TCAM Lookup table scope.
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_TCAM_SCOPE_BITPOS 0
+#define CFA_P70_PROF_PROFILE_RMP_DR_TCAM_SCOPE_NUM_BITS 5
+
+/**
+ * Total number of bits for PROF_PROFILE_RMP_DR
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_TOTAL_NUM_BITS 43
+
+/**
+ * Bypass operation. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_BYP_LKUP_EN_BITPOS 42
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_BYP_LKUP_EN_NUM_BITS 1
+
+/**
+ * Reserved for future use. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_RESERVED_BITPOS 36
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_RESERVED_NUM_BITS 6
+
+/**
+ * Bypass operations. (for idx 1 ...)
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_BYPASS_OP_BITPOS 33
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_BYPASS_OP_NUM_BITS 3
+/**
+ * Bypass operations. (for idx 1 ...)
+ */
+enum cfa_p70_prof_profile_rmp_dr_byp_bypass_op {
+ /* cfa is bypassed */
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_BYPASS_OP_BYPASS_CFA = 0,
+ /* Byass lookup use act_record_ptr from this table. */
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_BYPASS_OP_BYPASS_LKUP = 1,
+ /* Byass lookup use Partition Default Action Record Pointer Table */
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_BYPASS_OP_BYPASS_DEFAULT = 2,
+ /* Byass lookup use Partition Error Action Record Pointer Table. */
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_BYPASS_OP_BYPASS_ERROR = 3,
+ /* set the drop flag. */
+ CFA_P70_PROF_PROFILE_RMP_DR_BYP_BYPASS_OP_DROP = 4,
+};
+
+/**
+ * action hint used with plact_rec_ptr
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_ACT_HINT_BITPOS 31
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_ACT_HINT_NUM_BITS 2
+
+/**
+ * table scope used with pl_act_rec_ptr
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_SCOPE_BITPOS 26
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_SCOPE_NUM_BITS 5
+
+/**
+ * Used for BYPASS_LKUP. #CAS_SW_REF Profiler.profile.remap.entry.build
+ * #CAS_SW_REF Profiler.profile.remap.entry.bypass.cfa #CAS_SW_REF
+ * Profiler.profile.remap.entry.bypass.lkup #CAS_SW_REF
+ * Profiler.profile.remap.entry.other
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_ACT_REC_PTR_BITPOS 0
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_PL_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * Total number of bits for PROF_PROFILE_RMP_DR_BYP
+ */
+#define CFA_P70_PROF_PROFILE_RMP_DR_BYP_TOTAL_NUM_BITS 43
+
+/**
+ * VLAN TPID anti-spoofing control.
+ */
+#define CFA_P70_ACT_VSPT_DR_TX_TPID_AS_CTL_BITPOS 29
+#define CFA_P70_ACT_VSPT_DR_TX_TPID_AS_CTL_NUM_BITS 2
+/**
+ * VLAN TPID anti-spoofing control.
+ */
+enum cfa_p70_act_vspt_dr_tx_tpid_as_ctl {
+ CFA_P70_ACT_VSPT_DR_TX_TPID_IGNORE = 0,
+ CFA_P70_ACT_VSPT_DR_TX_TPID_DEFAULT = 1,
+ CFA_P70_ACT_VSPT_DR_TX_TPID_DROP = 2,
+};
+
+/**
+ * VLAN allowed TPID bit map.
+ */
+#define CFA_P70_ACT_VSPT_DR_TX_ALWD_TPID_BITPOS 21
+#define CFA_P70_ACT_VSPT_DR_TX_ALWD_TPID_NUM_BITS 8
+
+/**
+ * VLAN encoded default TPID.
+ */
+#define CFA_P70_ACT_VSPT_DR_TX_DFLT_TPID_BITPOS 18
+#define CFA_P70_ACT_VSPT_DR_TX_DFLT_TPID_NUM_BITS 3
+
+/**
+ * VLAN PRIority anti-spoofing control.
+ */
+#define CFA_P70_ACT_VSPT_DR_TX_PRI_AS_CTL_BITPOS 16
+#define CFA_P70_ACT_VSPT_DR_TX_PRI_AS_CTL_NUM_BITS 2
+/**
+ * VLAN PRIority anti-spoofing control.
+ */
+enum cfa_p70_act_vspt_dr_tx_pri_as_ctl {
+ CFA_P70_ACT_VSPT_DR_TX_PRI_IGNORE = 0,
+ CFA_P70_ACT_VSPT_DR_TX_PRI_DEFAULT = 1,
+ CFA_P70_ACT_VSPT_DR_TX_PRI_DROP = 2,
+};
+
+/**
+ * VLAN allowed PRIority bit map.
+ */
+#define CFA_P70_ACT_VSPT_DR_TX_ALWD_PRI_BITPOS 8
+#define CFA_P70_ACT_VSPT_DR_TX_ALWD_PRI_NUM_BITS 8
+
+/**
+ * VLAN default PRIority.
+ */
+#define CFA_P70_ACT_VSPT_DR_TX_DFLT_PRI_BITPOS 5
+#define CFA_P70_ACT_VSPT_DR_TX_DFLT_PRI_NUM_BITS 3
+
+/**
+ * Mirror destination (1..31) or 5'h0=NO_MIRROR #CAS_SW_REF
+ * Action.CFA.DEST.SVIF.Property.Tables.tx.svif.property.entry
+ */
+#define CFA_P70_ACT_VSPT_DR_TX_MIR_BITPOS 0
+#define CFA_P70_ACT_VSPT_DR_TX_MIR_NUM_BITS 5
+
+/**
+ * Total number of bits for ACT_VSPT_DR_TX
+ */
+#define CFA_P70_ACT_VSPT_DR_TX_TOTAL_NUM_BITS 31
+
+/**
+ * Reserved for future use.
+ */
+#define CFA_P70_ACT_VSPT_DR_RX_RSVD_BITPOS 24
+#define CFA_P70_ACT_VSPT_DR_RX_RSVD_NUM_BITS 7
+
+/**
+ * Output metadata format select.
+ */
+#define CFA_P70_ACT_VSPT_DR_RX_METAFMT_BITPOS 22
+#define CFA_P70_ACT_VSPT_DR_RX_METAFMT_NUM_BITS 2
+/**
+ * Output metadata format select.
+ */
+enum cfa_p70_act_vspt_dr_rx_metafmt {
+ CFA_P70_ACT_VSPT_DR_RX_METAFMT_ACT_REC_PTR = 0,
+ CFA_P70_ACT_VSPT_DR_RX_METAFMT_TUNNEL_ID = 1,
+ CFA_P70_ACT_VSPT_DR_RX_METAFMT_CSTM_HDR_DATA = 2,
+ CFA_P70_ACT_VSPT_DR_RX_METAFMT_HDR_OFFSETS = 3,
+};
+
+/**
+ * Function ID: 4 bit PF and 12 bit VID (VNIC ID)
+ */
+#define CFA_P70_ACT_VSPT_DR_RX_FID_BITPOS 5
+#define CFA_P70_ACT_VSPT_DR_RX_FID_NUM_BITS 17
+
+/**
+ * Mirror destination (1..31) or 5'h0=NO_MIRROR #CAS_SW_REF
+ * Action.CFA.DEST.SVIF.Property.Tables.rx.destination.property.entry
+ */
+#define CFA_P70_ACT_VSPT_DR_RX_MIR_BITPOS 0
+#define CFA_P70_ACT_VSPT_DR_RX_MIR_NUM_BITS 5
+
+/**
+ * Total number of bits for ACT_VSPT_DR_RX
+ */
+#define CFA_P70_ACT_VSPT_DR_RX_TOTAL_NUM_BITS 31
+
+/**
+ * LAG destination bit map.
+ */
+#define CFA_P70_ACT_LBT_DR_DST_BMP_BITPOS 0
+#define CFA_P70_ACT_LBT_DR_DST_BMP_NUM_BITS 5
+
+/**
+ * Total number of bits for ACT_LBT_DR
+ */
+#define CFA_P70_ACT_LBT_DR_TOTAL_NUM_BITS 5
+
+/**
+ * Preserve incoming partition, don't remap (for idx 3 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PRSV_PARIF_BITPOS 126
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PRSV_PARIF_NUM_BITS 1
+
+/**
+ * Partition, replaces parif from input block (for idx 3 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PARIF_BITPOS 121
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PARIF_NUM_BITS 5
+
+/**
+ * Preserve incoming L2_CTXT (for idx 3 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PRSV_L2IP_CTXT_BITPOS 120
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PRSV_L2IP_CTXT_NUM_BITS 1
+
+/**
+ * L2 logical id which may be used in EM and WC Lookups. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_CTXT_BITPOS 109
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_CTXT_NUM_BITS 11
+
+/**
+ * Preserve incoming PROF_FUNC (for idx 3 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PRSV_PROF_FUNC_BITPOS 108
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PRSV_PROF_FUNC_NUM_BITS 1
+
+/**
+ * Allow Profile TCAM Lookup Table to be logically partitioned. (for idx
+ * 3 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PROF_FUNC_BITPOS 100
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_PROF_FUNC_NUM_BITS 8
+
+/**
+ * Context operation code. (for idx 3 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_CTXT_OPCODE_BITPOS 98
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_CTXT_OPCODE_NUM_BITS 2
+/**
+ * Context operation code. (for idx 3 ...)
+ */
+enum cfa_p70_prof_l2_ctxt_rmp_dr_ctxt_opcode {
+ /* def_ctxt_data provides destination */
+ CFA_P70_PROF_L2_CTXT_RMP_DR_CTXT_OPCODE_BYPASS_CFA = 0,
+ /* def_ctxt_data provides act_rec_ptr */
+ CFA_P70_PROF_L2_CTXT_RMP_DR_CTXT_OPCODE_BYPASS_LKUP = 1,
+ /* continue normal flow */
+ CFA_P70_PROF_L2_CTXT_RMP_DR_CTXT_OPCODE_NORMAL_FLOW = 2,
+ /* mark packet for drop */
+ CFA_P70_PROF_L2_CTXT_RMP_DR_CTXT_OPCODE_DROP = 3,
+};
+
+/**
+ * Enables remap of meta_data from Input block (for idx 3 ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_META_ENB_BITPOS 97
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_META_ENB_NUM_BITS 1
+
+/**
+ * l2ip_meta_prof[2:0] = l2ip_meta[34:32], l2ip_meta_data[31:0] =
+ * l2ip_meta[31:0]
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_META_BITPOS 62
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_META_NUM_BITS 35
+
+/**
+ * Enables remap of action record pointer from Input block (for idx 1
+ * ...)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_ACT_ENB_BITPOS 61
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_ACT_ENB_NUM_BITS 1
+
+/**
+ * l2ip_act_hint[1:0] = l2ip_act_data[32:31], l2ip_act_scope[4:0] =
+ * l2ip_act_data[30:26], l2ip_act_rec_ptr[25:0] = l2ip_act_data[25:0]
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_ACT_DATA_BITPOS 28
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_ACT_DATA_NUM_BITS 33
+
+/**
+ * Enables remap of ring_table_idx
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_RFS_ENB_BITPOS 27
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_RFS_ENB_NUM_BITS 1
+
+/**
+ * ring_table_idx[8:0] = l2ip_rfs_data[8:0] (rx only)
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_RFS_DATA_BITPOS 18
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_RFS_DATA_NUM_BITS 9
+
+/**
+ * Enables remap of destination from input block
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_DEST_ENB_BITPOS 17
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_DEST_ENB_NUM_BITS 1
+
+/**
+ * destination[16:0] = l2ip_dest_data[16:0] #CAS_SW_REF
+ * Profiler.l2ip.context.remap.table
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_DEST_DATA_BITPOS 0
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_L2IP_DEST_DATA_NUM_BITS 17
+
+/**
+ * Total number of bits for PROF_L2_CTXT_RMP_DR
+ */
+#define CFA_P70_PROF_L2_CTXT_RMP_DR_TOTAL_NUM_BITS 127
+
+/**
+ * FC TCAM Search Result.
+ */
+#define CFA_P70_ACT_FC_TCAM_RESULT_SEARCH_RESULT_BITPOS 0
+#define CFA_P70_ACT_FC_TCAM_RESULT_SEARCH_RESULT_NUM_BITS 6
+
+/**
+ * Unused Field.
+ */
+#define CFA_P70_ACT_FC_TCAM_RESULT_UNUSED_0_BITPOS 6
+#define CFA_P70_ACT_FC_TCAM_RESULT_UNUSED_0_NUM_BITS 25
+
+/**
+ * FC TCAM Search Hit.
+ */
+#define CFA_P70_ACT_FC_TCAM_RESULT_SEARCH_HIT_BITPOS 31
+#define CFA_P70_ACT_FC_TCAM_RESULT_SEARCH_HIT_NUM_BITS 1
+
+/**
+ * Total number of bits for ACT_FC_TCAM_RESULT
+ */
+#define CFA_P70_ACT_FC_TCAM_RESULT_TOTAL_NUM_BITS 32
+
+/**
+ * Unused Field.
+ */
+#define CFA_P70_ACT_MIRROR_UNUSED_0_BITPOS 0
+#define CFA_P70_ACT_MIRROR_UNUSED_0_NUM_BITS 21
+#define CFA_P70_ACT_MIRROR_RELATIVE_BITPOS 21
+#define CFA_P70_ACT_MIRROR_RELATIVE_NUM_BITS 1
+/**
+ * RELATIVE
+ */
+enum cfa_p70_act_mirror_relative {
+ /* act_rec_ptr field is absolute. */
+ CFA_P70_ACT_MIRROR_RELATIVE_ABSOLUTE = 0,
+ /*
+ * act_rec_ptr field is relative to the original action record pointer.
+ */
+ CFA_P70_ACT_MIRROR_RELATIVE_RELATIVE = 1,
+};
+
+/**
+ * micr1_act_hint[1:0] - action hint used with act_rec_ptr.
+ */
+#define CFA_P70_ACT_MIRROR_HINT_BITPOS 22
+#define CFA_P70_ACT_MIRROR_HINT_NUM_BITS 2
+
+/**
+ * Sampling mode.
+ */
+#define CFA_P70_ACT_MIRROR_SAMP_BITPOS 24
+#define CFA_P70_ACT_MIRROR_SAMP_NUM_BITS 2
+/**
+ * Sampling mode.
+ */
+enum cfa_p70_act_mirror_samp {
+ /* PRNG based. */
+ CFA_P70_ACT_MIRROR_SAMP_STAT = 0,
+ /* packet count based. */
+ CFA_P70_ACT_MIRROR_SAMP_PACKET = 1,
+ /* packet count w/jitter based. */
+ CFA_P70_ACT_MIRROR_SAMP_JITTER = 2,
+ /* timer based. */
+ CFA_P70_ACT_MIRROR_SAMP_TIMER = 3,
+};
+
+/**
+ * Truncation mode.
+ */
+#define CFA_P70_ACT_MIRROR_TRUNC_BITPOS 26
+#define CFA_P70_ACT_MIRROR_TRUNC_NUM_BITS 2
+/**
+ * Truncation mode.
+ */
+enum cfa_p70_act_mirror_trunc {
+ /* No Truncation. */
+ CFA_P70_ACT_MIRROR_TRUNC_DISABLED = 0,
+ /* RFFU. */
+ CFA_P70_ACT_MIRROR_TRUNC_RSVD = 1,
+ /* mirror copy will restrict outermost tunnel payload to 128B. */
+ CFA_P70_ACT_MIRROR_TRUNC_B128 = 2,
+ /* mirror copy will restrict outermost tunnel payload to 256B. */
+ CFA_P70_ACT_MIRROR_TRUNC_B256 = 3,
+};
+#define CFA_P70_ACT_MIRROR_IGN_DROP_BITPOS 28
+#define CFA_P70_ACT_MIRROR_IGN_DROP_NUM_BITS 1
+/**
+ * IGN_DROP
+ */
+enum cfa_p70_act_mirror_ign_drop {
+ /*
+ * Honor Drop When set the mirror copy is made regardless if the initial
+ * action is to drop the packet or not.
+ */
+ CFA_P70_ACT_MIRROR_IGN_DROP_HONOR = 0,
+ /* Ignore Drop */
+ CFA_P70_ACT_MIRROR_IGN_DROP_IGNORE = 1,
+};
+#define CFA_P70_ACT_MIRROR_MODE_BITPOS 29
+#define CFA_P70_ACT_MIRROR_MODE_NUM_BITS 2
+/**
+ * MODE
+ */
+enum cfa_p70_act_mirror_mode {
+ /* No Copy. */
+ CFA_P70_ACT_MIRROR_MODE_DISABLED = 0,
+ /* Override AR. */
+ CFA_P70_ACT_MIRROR_MODE_OVERRIDE = 1,
+ /* Ingress Copy. */
+ CFA_P70_ACT_MIRROR_MODE_INGRESS = 2,
+ /* Egress Copy. */
+ CFA_P70_ACT_MIRROR_MODE_EGRESS = 3,
+};
+#define CFA_P70_ACT_MIRROR_COND_BITPOS 31
+#define CFA_P70_ACT_MIRROR_COND_NUM_BITS 1
+/**
+ * COND
+ */
+enum cfa_p70_act_mirror_cond {
+ /* mirror is only processed if Lookup copy bit is set */
+ CFA_P70_ACT_MIRROR_COND_UNCONDITIONAL = 0,
+ /* mirror is processed unconditionally. */
+ CFA_P70_ACT_MIRROR_COND_CONDITIONAL = 1,
+};
+
+/**
+ * Mirror Destination 1 Action Record Pointer.
+ */
+#define CFA_P70_ACT_MIRROR_AR_PTR_BITPOS 32
+#define CFA_P70_ACT_MIRROR_AR_PTR_NUM_BITS 26
+
+/**
+ * Mirror Destination 1 Sampling Conifiguration.
+ */
+#define CFA_P70_ACT_MIRROR_SAMP_CFG_BITPOS 64
+#define CFA_P70_ACT_MIRROR_SAMP_CFG_NUM_BITS 32
+
+/**
+ * Total number of bits for ACT_MIRROR
+ */
+#define CFA_P70_ACT_MIRROR_TOTAL_NUM_BITS 96
+
+/**
+ * This is the new medadata that is merged with the existing packet
+ * metadata, based on the profile selected by META_PROF.
+ */
+#define CFA_P70_WC_LREC_METADATA_BITPOS 5
+#define CFA_P70_WC_LREC_METADATA_NUM_BITS 32
+
+/**
+ * Specifies one of 8 metadata profile masks to use when merging the
+ * input metadata with the LREC metadata for recycling.
+ */
+#define CFA_P70_WC_LREC_META_PROF_BITPOS 37
+#define CFA_P70_WC_LREC_META_PROF_NUM_BITS 3
+
+/**
+ * When a packet is recycled to the Profile TCAM, this value is used as
+ * the PROF_FUNC field in the TCAM search.
+ */
+#define CFA_P70_WC_LREC_PROF_FUNC_BITPOS 40
+#define CFA_P70_WC_LREC_PROF_FUNC_NUM_BITS 8
+
+/**
+ * Indicates whether the packet will be recycled to the L2 Context TCAM,
+ * the Profile TCAM. When to the Profile TCAM, PROF_FUNC is used for the
+ * search key.
+ */
+#define CFA_P70_WC_LREC_RECYCLE_DEST_BITPOS 48
+#define CFA_P70_WC_LREC_RECYCLE_DEST_NUM_BITS 1
+
+/**
+ * Flow counter pointer.
+ */
+#define CFA_P70_WC_LREC_FC_PTR_BITPOS 0
+#define CFA_P70_WC_LREC_FC_PTR_NUM_BITS 28
+
+/**
+ * Flow counter type.
+ */
+#define CFA_P70_WC_LREC_FC_TYPE_BITPOS 28
+#define CFA_P70_WC_LREC_FC_TYPE_NUM_BITS 2
+
+/**
+ * Flow counter op.
+ */
+#define CFA_P70_WC_LREC_FC_OP_BITPOS 30
+#define CFA_P70_WC_LREC_FC_OP_NUM_BITS 1
+/**
+ * Enumeration definition for field 'fc_op'
+ */
+enum cfa_p70_wc_lrec_fc_op {
+ /* ingress */
+ CFA_P70_WC_LREC_FC_OP_INGRESS = 0,
+ /* egress */
+ CFA_P70_WC_LREC_FC_OP_EGRESS = 1,
+};
+
+/**
+ * When not present, a value of 0 is used which disables ECMP. The final
+ * action record location is: ! ACT_REC_PTR += (ECMP_HASH % PATHS_M1 +
+ * 1)) * ACT_REC_SIZE
+ */
+#define CFA_P70_WC_LREC_PATHS_M1_BITPOS 31
+#define CFA_P70_WC_LREC_PATHS_M1_NUM_BITS 4
+
+/**
+ * Specifies the size in 32B units of the action memory allocated for
+ * each ECMP path.
+ */
+#define CFA_P70_WC_LREC_ACT_REC_SIZE_BITPOS 35
+#define CFA_P70_WC_LREC_ACT_REC_SIZE_NUM_BITS 5
+
+/**
+ * This field is used in flow steering applications such as Linux RFS.
+ * This field is used in conjunction with the VNIC destination in the
+ * action record on RX to steer the packet to a specific ring.
+ */
+#define CFA_P70_WC_LREC_RING_TABLE_IDX_BITPOS 40
+#define CFA_P70_WC_LREC_RING_TABLE_IDX_NUM_BITS 9
+
+/**
+ * This field provides a destination for the packet, which goes directly
+ * to the output of CFA.
+ */
+#define CFA_P70_WC_LREC_DESTINATION_BITPOS 49
+#define CFA_P70_WC_LREC_DESTINATION_NUM_BITS 17
+
+/**
+ * This is the action record pointer. This value points into the current
+ * scope action table. Not that when ACT_REC_SIZE and PATHS_M1 are
+ * preset and PATHS_M1 != 0, the value may be modified using this as the
+ * base pointer for ECMP.
+ */
+#define CFA_P70_WC_LREC_ACT_REC_PTR_BITPOS 49
+#define CFA_P70_WC_LREC_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This value provides a hit of the action record size to the Action
+ * block.
+ */
+#define CFA_P70_WC_LREC_ACT_HINT_BITPOS 75
+#define CFA_P70_WC_LREC_ACT_HINT_NUM_BITS 2
+
+/**
+ * When both WC and EM have a hit, the one with the higher STRENGTH is
+ * used. If the STRENGTHs are equal, the LKUP_TIE_BREAKER register bit
+ * determines the winner. (0=WC, 1=EM)
+ */
+#define CFA_P70_WC_LREC_STRENGTH_BITPOS 77
+#define CFA_P70_WC_LREC_STRENGTH_NUM_BITS 2
+
+/**
+ * This field defines the format for the LREC and the basic thing that
+ * will be done with the packet.
+ */
+#define CFA_P70_WC_LREC_OPCODE_BITPOS 79
+#define CFA_P70_WC_LREC_OPCODE_NUM_BITS 4
+/**
+ * Enumeration definition for field 'opcode'
+ */
+enum cfa_p70_wc_lrec_opcode {
+ /*
+ * This value means the packet will go to the action block for edit
+ * processing and that no RFS will be specified for the packet.
+ */
+ CFA_P70_WC_LREC_OPCODE_NORMAL = 0,
+ /*
+ * This value means the packet will go to the action block for edit
+ * processing and that RFS will be specified for the packet.
+ */
+ CFA_P70_WC_LREC_OPCODE_NORMAL_RFS = 1,
+ /*
+ * This value means the packet will go directly to the output, bypassing
+ * the action block and that no RFS will be specified for the packet.
+ */
+ CFA_P70_WC_LREC_OPCODE_FAST = 2,
+ /*
+ * This value means the packet will go directly to the output, bypassing
+ * the action block and that RFS will be specified for the packet.
+ */
+ CFA_P70_WC_LREC_OPCODE_FAST_RFS = 3,
+ /*
+ * This value Recycles the packet to the Profiler and provides LREC
+ * fields that determine the fields returned to the Profiler for further
+ * processing.
+ */
+ CFA_P70_WC_LREC_OPCODE_RECYCLE = 8,
+};
+
+/**
+ * In addition to requiring VALID=1, the bits indexed by epoch1 must be
+ * set to '1' in the EPOCH1_MASK table, or the LREC is invalid. This is
+ * used to invalidate rules as a group.
+ */
+#define CFA_P70_WC_LREC_EPOCH1_BITPOS 83
+#define CFA_P70_WC_LREC_EPOCH1_NUM_BITS 6
+
+/**
+ * In addition to requiring VALID=1, the bits indexed by epoch0 must be
+ * set to '1' in the EPOCH0_MASK table, or the LREC is invalid. This is
+ * used to invalidate rules as a group.
+ */
+#define CFA_P70_WC_LREC_EPOCH0_BITPOS 89
+#define CFA_P70_WC_LREC_EPOCH0_NUM_BITS 12
+
+/**
+ * Record size in 32B words minus 1 (ignored by hardware).
+ */
+#define CFA_P70_WC_LREC_REC_SIZE_BITPOS 101
+#define CFA_P70_WC_LREC_REC_SIZE_NUM_BITS 2
+
+/**
+ * When set to '0', the LREC is not valid.
+ */
+#define CFA_P70_WC_LREC_VALID_BITPOS 103
+#define CFA_P70_WC_LREC_VALID_NUM_BITS 1
+
+/**
+ * Total number of bits for wc_lrec
+ */
+#define CFA_P70_WC_LREC_TOTAL_NUM_BITS 104
+
+/**
+ * This value provides a base pointer to the LKUP_FRC_RANGE memory. Each
+ * packet can have up to 16 ranges. A value of 16'hFFFF disables FRC.
+ */
+#define CFA_P70_EM_LREC_RANGE_IDX_BITPOS 0
+#define CFA_P70_EM_LREC_RANGE_IDX_NUM_BITS 16
+
+/**
+ * Selects one of 16 profiles for FRC in the LKUP_RANGE_PROFILE table,
+ * which specifies 2 packet fields to range check and gives a mask of 16
+ * ranges determined by range_index.
+ */
+#define CFA_P70_EM_LREC_RANGE_PROFILE_BITPOS 16
+#define CFA_P70_EM_LREC_RANGE_PROFILE_NUM_BITS 4
+
+/**
+ * Current timer value for the connection.
+ */
+#define CFA_P70_EM_LREC_CREC_TIMER_VALUE_BITPOS 20
+#define CFA_P70_EM_LREC_CREC_TIMER_VALUE_NUM_BITS 4
+
+/**
+ * Current state of the connection.
+ */
+#define CFA_P70_EM_LREC_CREC_STATE_BITPOS 24
+#define CFA_P70_EM_LREC_CREC_STATE_NUM_BITS 5
+
+/**
+ * Set to one by hardware whenever a notify of a valid tcp_msb_opp has
+ * been written into the connection record. Software can also initialize
+ * this to one if it initializes tcp_msb_opp to a valid value.
+ */
+#define CFA_P70_EM_LREC_CREC_TCP_MSB_OPP_INIT_BITPOS 29
+#define CFA_P70_EM_LREC_CREC_TCP_MSB_OPP_INIT_NUM_BITS 1
+
+/**
+ * Bits 31:14 of seq# or ack# as seen in packets on the opposite path.
+ */
+#define CFA_P70_EM_LREC_CREC_TCP_MSB_OPP_BITPOS 30
+#define CFA_P70_EM_LREC_CREC_TCP_MSB_OPP_NUM_BITS 18
+
+/**
+ * Bits 31:14 of seq# or ack# as seen in packets on the local path.
+ */
+#define CFA_P70_EM_LREC_CREC_TCP_MSB_LOC_BITPOS 48
+#define CFA_P70_EM_LREC_CREC_TCP_MSB_LOC_NUM_BITS 18
+
+/**
+ * Window size is 1<<TCP_WIN. A value of 0 disables window checks. Only
+ * modified by SW.
+ */
+#define CFA_P70_EM_LREC_CREC_TCP_WIN_BITPOS 66
+#define CFA_P70_EM_LREC_CREC_TCP_WIN_NUM_BITS 5
+
+/**
+ * Enables update of TCP_MSB_LOC when '1'. Only modified by SW.
+ */
+#define CFA_P70_EM_LREC_CREC_TCP_UPDT_EN_BITPOS 71
+#define CFA_P70_EM_LREC_CREC_TCP_UPDT_EN_NUM_BITS 1
+
+/**
+ * Direction of tracked connection. Only modified by SW.
+ */
+#define CFA_P70_EM_LREC_CREC_TCP_DIR_BITPOS 72
+#define CFA_P70_EM_LREC_CREC_TCP_DIR_NUM_BITS 1
+/**
+ * Enumeration definition for field 'crec_tcp_dir'
+ */
+enum cfa_p70_em_lrec_crec_tcp_dir {
+ /* RX */
+ CFA_P70_EM_LREC_CREC_TCP_DIR_RX = 0,
+ /* TX */
+ CFA_P70_EM_LREC_CREC_TCP_DIR_TX = 1,
+};
+
+/**
+ * This is the new medadata that is merged with the existing packet
+ * metadata, based on the profile selected by META_PROF.
+ */
+#define CFA_P70_EM_LREC_METADATA_BITPOS 29
+#define CFA_P70_EM_LREC_METADATA_NUM_BITS 32
+
+/**
+ * When a packet is recycled to the Profile TCAM, this value is used as
+ * the PROF_FUNC field in the TCAM search.
+ */
+#define CFA_P70_EM_LREC_PROF_FUNC_BITPOS 61
+#define CFA_P70_EM_LREC_PROF_FUNC_NUM_BITS 8
+
+/**
+ * Specifies one of 8 metadata profile masks to use when merging the
+ * input metadata with the LREC metadata for recycling.
+ */
+#define CFA_P70_EM_LREC_META_PROF_BITPOS 69
+#define CFA_P70_EM_LREC_META_PROF_NUM_BITS 3
+
+/**
+ * Indicates whether the packet will be recycled to the L2 Context TCAM,
+ * the Profile TCAM. When to the Profile TCAM, PROF_FUNC is used for the
+ * search key.
+ */
+#define CFA_P70_EM_LREC_RECYCLE_DEST_BITPOS 72
+#define CFA_P70_EM_LREC_RECYCLE_DEST_NUM_BITS 1
+
+/**
+ * Flow counter pointer.
+ */
+#define CFA_P70_EM_LREC_FC_PTR_BITPOS 24
+#define CFA_P70_EM_LREC_FC_PTR_NUM_BITS 28
+
+/**
+ * Flow counter type.
+ */
+#define CFA_P70_EM_LREC_FC_TYPE_BITPOS 52
+#define CFA_P70_EM_LREC_FC_TYPE_NUM_BITS 2
+
+/**
+ * Flow counter op.
+ */
+#define CFA_P70_EM_LREC_FC_OP_BITPOS 54
+#define CFA_P70_EM_LREC_FC_OP_NUM_BITS 1
+/**
+ * Enumeration definition for field 'fc_op'
+ */
+enum cfa_p70_em_lrec_fc_op {
+ /* ingress */
+ CFA_P70_EM_LREC_FC_OP_INGRESS = 0,
+ /* egress */
+ CFA_P70_EM_LREC_FC_OP_EGRESS = 1,
+};
+
+/**
+ * When not present, a value of 0 is used which disables ECMP. The final
+ * action record location is: ! ACT_REC_PTR += (ECMP_HASH % PATHS_M1 +
+ * 1)) * ACT_REC_SIZE
+ */
+#define CFA_P70_EM_LREC_PATHS_M1_BITPOS 55
+#define CFA_P70_EM_LREC_PATHS_M1_NUM_BITS 4
+
+/**
+ * Specifies the size in 32B units of the action memory allocated for
+ * each ECMP path.
+ */
+#define CFA_P70_EM_LREC_ACT_REC_SIZE_BITPOS 59
+#define CFA_P70_EM_LREC_ACT_REC_SIZE_NUM_BITS 5
+
+/**
+ * This field is used in flow steering applications such as Linux RFS.
+ * This field is used in conjunction with the VNIC destination in the
+ * action record on RX to steer the packet to a specific ring.
+ */
+#define CFA_P70_EM_LREC_RING_TABLE_IDX_BITPOS 64
+#define CFA_P70_EM_LREC_RING_TABLE_IDX_NUM_BITS 9
+
+/**
+ * This field provides a destination for the packet, which goes directly
+ * to the output of CFA.
+ */
+#define CFA_P70_EM_LREC_DESTINATION_BITPOS 73
+#define CFA_P70_EM_LREC_DESTINATION_NUM_BITS 17
+
+/**
+ * This is the action record pointer. This value points into the current
+ * scope action table. Not that when ACT_REC_SIZE and PATHS_M1 are
+ * preset and PATHS_M1 != 0, the value may be modified using this as the
+ * base pointer for ECMP.
+ */
+#define CFA_P70_EM_LREC_ACT_REC_PTR_BITPOS 73
+#define CFA_P70_EM_LREC_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This value provides a hit of the action record size to the Action
+ * block.
+ */
+#define CFA_P70_EM_LREC_ACT_HINT_BITPOS 99
+#define CFA_P70_EM_LREC_ACT_HINT_NUM_BITS 2
+
+/**
+ * When both WC and EM have a hit, the one with the higher STRENGTH is
+ * used. If the STRENGTHs are equal, the LKUP_TIE_BREAKER register bit
+ * determines the winner. (0=WC, 1=EM)
+ */
+#define CFA_P70_EM_LREC_STRENGTH_BITPOS 101
+#define CFA_P70_EM_LREC_STRENGTH_NUM_BITS 2
+
+/**
+ * This field defines the format for the LREC and the basic thing that
+ * will be done with the packet.
+ */
+#define CFA_P70_EM_LREC_OPCODE_BITPOS 103
+#define CFA_P70_EM_LREC_OPCODE_NUM_BITS 4
+/**
+ * Enumeration definition for field 'opcode'
+ */
+enum cfa_p70_em_lrec_opcode {
+ /*
+ * This value means the packet will go to the action block for edit
+ * processing and that no RFS will be specified for the packet.
+ */
+ CFA_P70_EM_LREC_OPCODE_NORMAL = 0,
+ /*
+ * This value means the packet will go to the action block for edit
+ * processing and that RFS will be specified for the packet.
+ */
+ CFA_P70_EM_LREC_OPCODE_NORMAL_RFS = 1,
+ /*
+ * This value means the packet will go directly to the output, bypassing
+ * the action block and that no RFS will be specified for the packet.
+ */
+ CFA_P70_EM_LREC_OPCODE_FAST = 2,
+ /*
+ * This value means the packet will go directly to the output, bypassing
+ * the action block and that RFS will be specified for the packet.
+ */
+ CFA_P70_EM_LREC_OPCODE_FAST_RFS = 3,
+ /*
+ * This means the packet will go to the action block, but will have
+ * connection tracking affect the action, but no RFS. Connection
+ * tracking determines the ACTION, which is forward, miss, or copy. The
+ * default action record pointer is used when ACTION=miss.
+ */
+ CFA_P70_EM_LREC_OPCODE_CT_MISS_DEF = 4,
+ /*
+ * This means the packet will go to the action block, but will have
+ * connection tracking affect the action, but no RFS. Connection
+ * tracking determines the ACTION, which is forward, miss, or copy. The
+ * default action record pointer is used when ACTION=forward or
+ * ACTION=copy.
+ */
+ CFA_P70_EM_LREC_OPCODE_CT_HIT_DEF = 6,
+ /*
+ * This value Recycles the packet to the Profiler and provides LREC
+ * fields that determine the fields returned to the Profiler for further
+ * processing.
+ */
+ CFA_P70_EM_LREC_OPCODE_RECYCLE = 8,
+};
+
+/**
+ * In addition to requiring VALID=1, the bits indexed by epoch1 must be
+ * set to '1' in the EPOCH1_MASK table, or the LREC is invalid. This is
+ * used to invalidate rules as a group.
+ */
+#define CFA_P70_EM_LREC_EPOCH1_BITPOS 107
+#define CFA_P70_EM_LREC_EPOCH1_NUM_BITS 6
+
+/**
+ * In addition to requiring VALID=1, the bits indexed by epoch0 must be
+ * set to '1' in the EPOCH0_MASK table, or the LREC is invalid. This is
+ * used to invalidate rules as a group.
+ */
+#define CFA_P70_EM_LREC_EPOCH0_BITPOS 113
+#define CFA_P70_EM_LREC_EPOCH0_NUM_BITS 12
+
+/**
+ * Record size in 32B words minus 1 (ignored by hardware).
+ */
+#define CFA_P70_EM_LREC_REC_SIZE_BITPOS 125
+#define CFA_P70_EM_LREC_REC_SIZE_NUM_BITS 2
+
+/**
+ * When set to '0', the LREC is not valid.
+ */
+#define CFA_P70_EM_LREC_VALID_BITPOS 127
+#define CFA_P70_EM_LREC_VALID_NUM_BITS 1
+
+/**
+ * Total number of bits for em_lrec
+ */
+#define CFA_P70_EM_LREC_TOTAL_NUM_BITS 128
+
+/**
+ * This entry points to the entry associated with this bucket area. If
+ * this value is zero, then the bucket area is not valid and should be
+ * skipped.
+ */
+#define CFA_P70_EM_BUCKET_BIN0_ENTRY_BITPOS 0
+#define CFA_P70_EM_BUCKET_BIN0_ENTRY_NUM_BITS 26
+
+/**
+ * This field holds the upper 12 bits of the 36b spooky hash of the key.
+ * This part of the bucket area must match for the entry associated with
+ * the bucket area to be read.
+ */
+#define CFA_P70_EM_BUCKET_BIN0_HASH_MSBS_BITPOS 26
+#define CFA_P70_EM_BUCKET_BIN0_HASH_MSBS_NUM_BITS 12
+
+/**
+ * This entry points to the entry associated with this bucket area. If
+ * this value is zero, then the bucket area is not valid and should be
+ * skipped.
+ */
+#define CFA_P70_EM_BUCKET_BIN1_ENTRY_BITPOS 38
+#define CFA_P70_EM_BUCKET_BIN1_ENTRY_NUM_BITS 26
+
+/**
+ * This field holds the upper 12 bits of the 36b spooky hash of the key.
+ * This part of the bucket area must match for the entry associated with
+ * the bucket area to be read.
+ */
+#define CFA_P70_EM_BUCKET_BIN1_HASH_MSBS_BITPOS 64
+#define CFA_P70_EM_BUCKET_BIN1_HASH_MSBS_NUM_BITS 12
+
+/**
+ * This entry points to the entry associated with this bucket area. If
+ * this value is zero, then the bucket area is not valid and should be
+ * skipped.
+ */
+#define CFA_P70_EM_BUCKET_BIN2_ENTRY_BITPOS 76
+#define CFA_P70_EM_BUCKET_BIN2_ENTRY_NUM_BITS 26
+
+/**
+ * This field holds the upper 12 bits of the 36b spooky hash of the key.
+ * This part of the bucket area must match for the entry associated with
+ * the bucket area to be read.
+ */
+#define CFA_P70_EM_BUCKET_BIN2_HASH_MSBS_BITPOS 102
+#define CFA_P70_EM_BUCKET_BIN2_HASH_MSBS_NUM_BITS 12
+
+/**
+ * This entry points to the entry associated with this bucket area. If
+ * this value is zero, then the bucket area is not valid and should be
+ * skipped.
+ */
+#define CFA_P70_EM_BUCKET_BIN3_ENTRY_BITPOS 114
+#define CFA_P70_EM_BUCKET_BIN3_ENTRY_NUM_BITS 26
+
+/**
+ * This field holds the upper 12 bits of the 36b spooky hash of the key.
+ * This part of the bucket area must match for the entry associated with
+ * the bucket area to be read.
+ */
+#define CFA_P70_EM_BUCKET_BIN3_HASH_MSBS_BITPOS 140
+#define CFA_P70_EM_BUCKET_BIN3_HASH_MSBS_NUM_BITS 12
+
+/**
+ * This entry points to the entry associated with this bucket area. If
+ * this value is zero, then the bucket area is not valid and should be
+ * skipped.
+ */
+#define CFA_P70_EM_BUCKET_BIN4_ENTRY_BITPOS 152
+#define CFA_P70_EM_BUCKET_BIN4_ENTRY_NUM_BITS 26
+
+/**
+ * This field holds the upper 12 bits of the 36b spooky hash of the key.
+ * This part of the bucket area must match for the entry associated with
+ * the bucket area to be read.
+ */
+#define CFA_P70_EM_BUCKET_BIN4_HASH_MSBS_BITPOS 178
+#define CFA_P70_EM_BUCKET_BIN4_HASH_MSBS_NUM_BITS 12
+
+/**
+ * This entry points to the entry associated with this bucket area. If
+ * this value is zero, then the bucket area is not valid and should be
+ * skipped.
+ */
+#define CFA_P70_EM_BUCKET_BIN5_ENTRY_BITPOS 190
+#define CFA_P70_EM_BUCKET_BIN5_ENTRY_NUM_BITS 26
+
+/**
+ * This field holds the upper 12 bits of the 36b spooky hash of the key.
+ * This part of the bucket area must match for the entry associated with
+ * the bucket area to be read.
+ */
+#define CFA_P70_EM_BUCKET_BIN5_HASH_MSBS_BITPOS 216
+#define CFA_P70_EM_BUCKET_BIN5_HASH_MSBS_NUM_BITS 12
+
+/**
+ * This value points to the next bucket in the chain. When set to 0, the
+ * next bucket visit for a background thread is to the starting bucket
+ * for the thread.
+ */
+#define CFA_P70_EM_BUCKET_CHAIN_POINTER_BITPOS 228
+#define CFA_P70_EM_BUCKET_CHAIN_POINTER_NUM_BITS 26
+
+/**
+ * If this value is '1', then the pointer value must be valid and will
+ * be followed if a key match is not found in any bin in the current
+ * bucket.
+ */
+#define CFA_P70_EM_BUCKET_CHAIN_VALID_BITPOS 254
+#define CFA_P70_EM_BUCKET_CHAIN_VALID_NUM_BITS 1
+
+/**
+ * Total number of bits for em_bucket
+ */
+#define CFA_P70_EM_BUCKET_TOTAL_NUM_BITS 255
+
+/**
+ * The type field identifies the format of the action record to the
+ * hardware.
+ */
+#define CFA_P70_COMPACT_ACTION_TYPE_BITPOS 0
+#define CFA_P70_COMPACT_ACTION_TYPE_NUM_BITS 3
+/**
+ * Enumeration definition for field 'type'
+ */
+enum cfa_p70_compact_action_type {
+ /*
+ * Compact Action Record. The compact action record uses relative
+ * pointers to access needed data. This keeps the compact action record
+ * down to 64b.
+ */
+ CFA_P70_COMPACT_ACTION_TYPE_COMPACT_ACTION = 0,
+};
+
+/**
+ * When this value is '1', the packet will be dropped.
+ */
+#define CFA_P70_COMPACT_ACTION_DROP_BITPOS 3
+#define CFA_P70_COMPACT_ACTION_DROP_NUM_BITS 1
+
+/**
+ * This value controls how the VLAN Delete/Report edit works.
+ */
+#define CFA_P70_COMPACT_ACTION_VLAN_DELETE_BITPOS 4
+#define CFA_P70_COMPACT_ACTION_VLAN_DELETE_NUM_BITS 2
+/**
+ * Enumeration definition for field 'vlan_delete'
+ */
+enum cfa_p70_compact_action_vlan_delete {
+ /* The VLAN tag is left alone. */
+ CFA_P70_COMPACT_ACTION_VLAN_DELETE_DISABLED = 0,
+ /* Strip/Report the outer VLAN tag. Leave the inner VLAN tag. */
+ CFA_P70_COMPACT_ACTION_VLAN_DELETE_OUTER = 1,
+ /*
+ * Strip both the outer and inner VLAN tag. Report the inner VLAN tag.
+ */
+ CFA_P70_COMPACT_ACTION_VLAN_DELETE_BOTH = 2,
+ /*
+ * If the outer VID != 0, strip and pass the outer VLAG tag and leave
+ * the inner VLAN tag. If outer VID == 0, then strip both VLAN tags and
+ * report the inner VLAN tag.
+ */
+ CFA_P70_COMPACT_ACTION_VLAN_DELETE_COND = 3,
+};
+
+/**
+ * This value specifies the port destination mask for TX path and is the
+ * index into the VNIC Properties Table for the RX path.
+ */
+#define CFA_P70_COMPACT_ACTION_DEST_BITPOS 6
+#define CFA_P70_COMPACT_ACTION_DEST_NUM_BITS 7
+#define CFA_P70_COMPACT_ACTION_DEST_OP_BITPOS 17
+#define CFA_P70_COMPACT_ACTION_DEST_OP_NUM_BITS 2
+/**
+ * Enumeration definition for field 'dest_op'
+ */
+enum cfa_p70_compact_action_dest_op {
+ /* Use the dest field from the Action Record. */
+ CFA_P70_COMPACT_ACTION_DEST_OP_NORMAL = 0,
+ /*
+ * This value specifies that the default destination as determined by
+ * the Profiler/Lookup/MCG stages and passed into the Action Record
+ * Fetch should be used instead of the destination from the Action
+ * Record. For example this can be useful for applications where actions
+ * are desired on a packet but the destination is to be taken solely
+ * from the Profiler Input Lookup Table.
+ */
+ CFA_P70_COMPACT_ACTION_DEST_OP_DEFAULT = 1,
+ /*
+ * This value specifies that the lower order bits of the metadata should
+ * be used instead of the destination from the Action Record.
+ */
+ CFA_P70_COMPACT_ACTION_DEST_OP_METADATA = 2,
+};
+
+/**
+ * This field controls the decapsulation function for the action.
+ */
+#define CFA_P70_COMPACT_ACTION_DECAP_BITPOS 19
+#define CFA_P70_COMPACT_ACTION_DECAP_NUM_BITS 5
+/**
+ * Enumeration definition for field 'decap'
+ */
+enum cfa_p70_compact_action_decap {
+ /* Do nothing. */
+ CFA_P70_COMPACT_ACTION_DECAP_DISABLE = 0,
+ /* Decap the outer VLAN tag */
+ CFA_P70_COMPACT_ACTION_DECAP_OVLAN = 1,
+ /* Decap all the VLAN tags */
+ CFA_P70_COMPACT_ACTION_DECAP_ALL_VLAN = 2,
+ /* Decap through Tunnel L2 header */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_TL2 = 3,
+ /* Decap 1 MPLS label (does not delete outer L2) */
+ CFA_P70_COMPACT_ACTION_DECAP_1MPLS = 4,
+ /* Decap 1 MPLS label and outer L2 */
+ CFA_P70_COMPACT_ACTION_DECAP_1MPLS_OL2 = 5,
+ /* Decap 2 MPLS labels (does not delete outer L2) */
+ CFA_P70_COMPACT_ACTION_DECAP_2MPLS = 6,
+ /* Decap 2 MPLS labels and outer L2 */
+ CFA_P70_COMPACT_ACTION_DECAP_2MPLS_OL2 = 7,
+ /* Decap through Tunnel L3 header */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_TL3 = 8,
+ /* Decap through Tunnel L4 header */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_TL4 = 9,
+ /* Decap through Tunnel header */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_T = 10,
+ /* Decap through Inner L2 */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_L2 = 11,
+ /* Decap through Inner L3 */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_L3 = 12,
+ /* Decap through inner L4 */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_L4 = 13,
+ /* Shift tunnel->inner (single shift) */
+ CFA_P70_COMPACT_ACTION_DECAP_SHIFT_SINGLE = 14,
+ /* Un-parse (treat header as payload) */
+ CFA_P70_COMPACT_ACTION_DECAP_UNPARSE = 15,
+ /* Shift outer tunnel->inner (double shift) */
+ CFA_P70_COMPACT_ACTION_DECAP_SHIFT_DOUBLE = 18,
+ /* Decap through Outer Tunnel L2 header */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_OL2 = 20,
+ /* Decap through Outer Tunnel L3 header */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_OL3 = 21,
+ /* Decap through Outer Tunnel L4 header */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_OL4 = 22,
+ /* Decap through Outer Tunnel header */
+ CFA_P70_COMPACT_ACTION_DECAP_TO_OT = 23,
+};
+
+/**
+ * The mirroring value selects one of 31 mirror destinations for the
+ * packet. A value of zero means that there is not Action Record
+ * mirroring for the packet.
+ */
+#define CFA_P70_COMPACT_ACTION_MIRRORING_BITPOS 24
+#define CFA_P70_COMPACT_ACTION_MIRRORING_NUM_BITS 5
+
+/**
+ * This value points to one of the 1024 meter entries. If the meter has
+ * scope verification enabled, then the scope in the meter table entry
+ * must match the scope of this action record.
+ */
+#define CFA_P70_COMPACT_ACTION_METER_PTR_BITPOS 29
+#define CFA_P70_COMPACT_ACTION_METER_PTR_NUM_BITS 10
+
+/**
+ * This is the offset to the statistic structure in 8B units from the
+ * start of the Action Record. A value of zero will disable the
+ * statistics action.
+ */
+#define CFA_P70_COMPACT_ACTION_STAT0_OFF_BITPOS 39
+#define CFA_P70_COMPACT_ACTION_STAT0_OFF_NUM_BITS 3
+
+/**
+ * This value controls the packet size that is used for counted stats.
+ */
+#define CFA_P70_COMPACT_ACTION_STAT0_OP_BITPOS 42
+#define CFA_P70_COMPACT_ACTION_STAT0_OP_NUM_BITS 1
+/**
+ * Enumeration definition for field 'stat0_op'
+ */
+enum cfa_p70_compact_action_stat0_op {
+ /* Statistics count reflects packet at 'ingress' to CFA. */
+ CFA_P70_COMPACT_ACTION_STAT0_OP_INGRESS = 0,
+ /* Statistics count reflects packet at 'egress' from CFA. */
+ CFA_P70_COMPACT_ACTION_STAT0_OP_EGRESS = 1,
+};
+
+/**
+ * Selects counter type. In all cases, fields are packet little endian
+ * in the action memory.
+ */
+#define CFA_P70_COMPACT_ACTION_STAT0_CTR_TYPE_BITPOS 43
+#define CFA_P70_COMPACT_ACTION_STAT0_CTR_TYPE_NUM_BITS 2
+/**
+ * Enumeration definition for field 'stat0_ctr_type'
+ */
+enum cfa_p70_compact_action_stat0_ctr_type {
+ /* Forward packet count(64b)/byte count(64b) */
+ CFA_P70_COMPACT_ACTION_STAT0_CTR_TYPE_B16 = 0,
+ /*
+ * Forward packet count(64b)/byte count(64b) timestamp(32b) TCP
+ * Flags(16b) reserved(23b)
+ */
+ CFA_P70_COMPACT_ACTION_STAT0_CTR_TYPE_B24 = 1,
+ /*
+ * Forward packet count(64b)/byte count(64b) Meter (drop or red) packet
+ * count(64b)/byte count(64b)
+ */
+ CFA_P70_COMPACT_ACTION_STAT0_CTR_TYPE_B32A = 2,
+ /*
+ * Forward packet count(64b)/byte count(64b) Meter timestamp(32b) TCP
+ * Flags(16b) reserved(6b) (drop or red) packet count(38b)/byte
+ * count(42b)
+ */
+ CFA_P70_COMPACT_ACTION_STAT0_CTR_TYPE_B32B = 3,
+};
+
+/**
+ * This is an offset to the modification record. This is the offset in
+ * 8B units from the start of the Action Record to get to dependent
+ * record data. A value of zero indicates no additional actions.
+ */
+#define CFA_P70_COMPACT_ACTION_MOD_OFF_BITPOS 45
+#define CFA_P70_COMPACT_ACTION_MOD_OFF_NUM_BITS 5
+
+/**
+ * This is an offset to the encapsulation record. This is the offset in
+ * 8B units from the start of the Action Record to get to dependent
+ * record data. A value of zero indicates no additional actions.
+ */
+#define CFA_P70_COMPACT_ACTION_ENC_OFF_BITPOS 50
+#define CFA_P70_COMPACT_ACTION_ENC_OFF_NUM_BITS 6
+
+/**
+ * This is an offset to the source record. This is the offset in 8B
+ * units from the start of the Action Record to get to dependent record
+ * data. A value of zero indicates no additional actions.
+ */
+#define CFA_P70_COMPACT_ACTION_SRC_OFF_BITPOS 56
+#define CFA_P70_COMPACT_ACTION_SRC_OFF_NUM_BITS 4
+#define CFA_P70_COMPACT_ACTION_UNUSED_0_BITPOS 60
+#define CFA_P70_COMPACT_ACTION_UNUSED_0_NUM_BITS 4
+
+/**
+ * Total number of bits for compact_action
+ */
+#define CFA_P70_COMPACT_ACTION_TOTAL_NUM_BITS 64
+
+/**
+ * The type field identifies the format of the action record to the
+ * hardware.
+ */
+#define CFA_P70_FULL_ACTION_TYPE_BITPOS 0
+#define CFA_P70_FULL_ACTION_TYPE_NUM_BITS 3
+/**
+ * Enumeration definition for field 'type'
+ */
+enum cfa_p70_full_action_type {
+ /*
+ * Full Action Record. The full action record uses full pointers to
+ * access needed data. It also allows access to all the action features.
+ * The Full Action record is 192b.
+ */
+ CFA_P70_FULL_ACTION_TYPE_FULL_ACTION = 1,
+};
+
+/**
+ * When this value is '1', the packet will be dropped.
+ */
+#define CFA_P70_FULL_ACTION_DROP_BITPOS 3
+#define CFA_P70_FULL_ACTION_DROP_NUM_BITS 1
+
+/**
+ * This value controls how the VLAN Delete/Report edit works.
+ */
+#define CFA_P70_FULL_ACTION_VLAN_DELETE_BITPOS 4
+#define CFA_P70_FULL_ACTION_VLAN_DELETE_NUM_BITS 2
+/**
+ * Enumeration definition for field 'vlan_delete'
+ */
+enum cfa_p70_full_action_vlan_delete {
+ /* The VLAN tag is left alone. */
+ CFA_P70_FULL_ACTION_VLAN_DELETE_DISABLED = 0,
+ /* Strip/Report the outer VLAN tag. Leave the inner VLAN tag. */
+ CFA_P70_FULL_ACTION_VLAN_DELETE_OUTER = 1,
+ /*
+ * Strip both the outer and inner VLAN tag. Report the inner VLAN tag.
+ */
+ CFA_P70_FULL_ACTION_VLAN_DELETE_BOTH = 2,
+ /*
+ * If the outer VID != 0, strip and pass the outer VLAG tag and leave
+ * the inner VLAN tag. If outer VID == 0, then strip both VLAN tags and
+ * report the inner VLAN tag.
+ */
+ CFA_P70_FULL_ACTION_VLAN_DELETE_COND = 3,
+};
+
+/**
+ * This value specifies the port destination mask for TX path and is the
+ * index into the VNIC Properties Table for the RX path.
+ */
+#define CFA_P70_FULL_ACTION_DEST_BITPOS 6
+#define CFA_P70_FULL_ACTION_DEST_NUM_BITS 7
+#define CFA_P70_FULL_ACTION_DEST_OP_BITPOS 17
+#define CFA_P70_FULL_ACTION_DEST_OP_NUM_BITS 2
+/**
+ * Enumeration definition for field 'dest_op'
+ */
+enum cfa_p70_full_action_dest_op {
+ /* Use the dest field from the Action Record. */
+ CFA_P70_FULL_ACTION_DEST_OP_NORMAL = 0,
+ /*
+ * This value specifies that the default destination as determined by
+ * the Profiler/Lookup/MCG stages and passed into the Action Record
+ * Fetch should be used instead of the destination from the Action
+ * Record. For example this can be useful for applications where actions
+ * are desired on a packet but the destination is to be taken solely
+ * from the Profiler Input Lookup Table.
+ */
+ CFA_P70_FULL_ACTION_DEST_OP_DEFAULT = 1,
+ /*
+ * This value specifies that the lower order bits of the metadata should
+ * be used instead of the destination from the Action Record.
+ */
+ CFA_P70_FULL_ACTION_DEST_OP_METADATA = 2,
+};
+
+/**
+ * This field controls the decapsulation function for the action.
+ */
+#define CFA_P70_FULL_ACTION_DECAP_BITPOS 19
+#define CFA_P70_FULL_ACTION_DECAP_NUM_BITS 5
+/**
+ * Enumeration definition for field 'decap'
+ */
+enum cfa_p70_full_action_decap {
+ /* Do nothing. */
+ CFA_P70_FULL_ACTION_DECAP_DISABLE = 0,
+ /* Decap the outer VLAN tag */
+ CFA_P70_FULL_ACTION_DECAP_OVLAN = 1,
+ /* Decap all the VLAN tags */
+ CFA_P70_FULL_ACTION_DECAP_ALL_VLAN = 2,
+ /* Decap through Tunnel L2 header */
+ CFA_P70_FULL_ACTION_DECAP_TO_TL2 = 3,
+ /* Decap 1 MPLS label (does not delete outer L2) */
+ CFA_P70_FULL_ACTION_DECAP_1MPLS = 4,
+ /* Decap 1 MPLS label and outer L2 */
+ CFA_P70_FULL_ACTION_DECAP_1MPLS_OL2 = 5,
+ /* Decap 2 MPLS labels (does not delete outer L2) */
+ CFA_P70_FULL_ACTION_DECAP_2MPLS = 6,
+ /* Decap 2 MPLS labels and outer L2 */
+ CFA_P70_FULL_ACTION_DECAP_2MPLS_OL2 = 7,
+ /* Decap through Tunnel L3 header */
+ CFA_P70_FULL_ACTION_DECAP_TO_TL3 = 8,
+ /* Decap through Tunnel L4 header */
+ CFA_P70_FULL_ACTION_DECAP_TO_TL4 = 9,
+ /* Decap through Tunnel header */
+ CFA_P70_FULL_ACTION_DECAP_TO_T = 10,
+ /* Decap through Inner L2 */
+ CFA_P70_FULL_ACTION_DECAP_TO_L2 = 11,
+ /* Decap through Inner L3 */
+ CFA_P70_FULL_ACTION_DECAP_TO_L3 = 12,
+ /* Decap through inner L4 */
+ CFA_P70_FULL_ACTION_DECAP_TO_L4 = 13,
+ /* Shift tunnel->inner (single shift) */
+ CFA_P70_FULL_ACTION_DECAP_SHIFT_SINGLE = 14,
+ /* Un-parse (treat header as payload) */
+ CFA_P70_FULL_ACTION_DECAP_UNPARSE = 15,
+ /* Shift outer tunnel->inner (double shift) */
+ CFA_P70_FULL_ACTION_DECAP_SHIFT_DOUBLE = 18,
+ /* Decap through Outer Tunnel L2 header */
+ CFA_P70_FULL_ACTION_DECAP_TO_OL2 = 20,
+ /* Decap through Outer Tunnel L3 header */
+ CFA_P70_FULL_ACTION_DECAP_TO_OL3 = 21,
+ /* Decap through Outer Tunnel L4 header */
+ CFA_P70_FULL_ACTION_DECAP_TO_OL4 = 22,
+ /* Decap through Outer Tunnel header */
+ CFA_P70_FULL_ACTION_DECAP_TO_OT = 23,
+};
+
+/**
+ * The mirroring value selects one of 31 mirror destinations for the
+ * packet. A value of zero means that there is not Action Record
+ * mirroring for the packet.
+ */
+#define CFA_P70_FULL_ACTION_MIRRORING_BITPOS 24
+#define CFA_P70_FULL_ACTION_MIRRORING_NUM_BITS 5
+
+/**
+ * This value points to one of the 1024 meter entries. If the meter has
+ * scope verification enabled, then the scope in the meter table entry
+ * must match the scope of this action record.
+ */
+#define CFA_P70_FULL_ACTION_METER_PTR_BITPOS 29
+#define CFA_P70_FULL_ACTION_METER_PTR_NUM_BITS 10
+
+/**
+ * This is the pointer to the statistic structure in 8B units A value of
+ * zero will disable the statistics action.
+ */
+#define CFA_P70_FULL_ACTION_STAT0_PTR_BITPOS 39
+#define CFA_P70_FULL_ACTION_STAT0_PTR_NUM_BITS 28
+
+/**
+ * This value controls the packet size that is used for counted stats.
+ */
+#define CFA_P70_FULL_ACTION_STAT0_OP_BITPOS 67
+#define CFA_P70_FULL_ACTION_STAT0_OP_NUM_BITS 1
+/**
+ * Enumeration definition for field 'stat0_op'
+ */
+enum cfa_p70_full_action_stat0_op {
+ /* Statistics count reflects packet at 'ingress' to CFA. */
+ CFA_P70_FULL_ACTION_STAT0_OP_INGRESS = 0,
+ /* Statistics count reflects packet at 'egress' from CFA. */
+ CFA_P70_FULL_ACTION_STAT0_OP_EGRESS = 1,
+};
+
+/**
+ * Selects counter type. In all cases, fields are packet little endian
+ * in the action memory.
+ */
+#define CFA_P70_FULL_ACTION_STAT0_CTR_TYPE_BITPOS 68
+#define CFA_P70_FULL_ACTION_STAT0_CTR_TYPE_NUM_BITS 2
+/**
+ * Enumeration definition for field 'stat0_ctr_type'
+ */
+enum cfa_p70_full_action_stat0_ctr_type {
+ /* Forward packet count(64b)/byte count(64b) */
+ CFA_P70_FULL_ACTION_STAT0_CTR_TYPE_B16 = 0,
+ /*
+ * Forward packet count(64b)/byte count(64b) timestamp(32b) TCP
+ * Flags(16b) reserved(23b)
+ */
+ CFA_P70_FULL_ACTION_STAT0_CTR_TYPE_B24 = 1,
+ /*
+ * Forward packet count(64b)/byte count(64b) Meter (drop or red) packet
+ * count(64b)/byte count(64b)
+ */
+ CFA_P70_FULL_ACTION_STAT0_CTR_TYPE_B32A = 2,
+ /*
+ * Forward packet count(64b)/byte count(64b) Meter timestamp(32b) TCP
+ * Flags(16b) reserved(6b) (drop or red) packet count(38b)/byte
+ * count(42b)
+ */
+ CFA_P70_FULL_ACTION_STAT0_CTR_TYPE_B32B = 3,
+};
+
+/**
+ * This is the pointer to the statistic structure in 8B units A value of
+ * zero will disable the statistics action.
+ */
+#define CFA_P70_FULL_ACTION_STAT1_PTR_BITPOS 70
+#define CFA_P70_FULL_ACTION_STAT1_PTR_NUM_BITS 28
+
+/**
+ * This value controls the packet size that is used for counted stats.
+ */
+#define CFA_P70_FULL_ACTION_STAT1_OP_BITPOS 98
+#define CFA_P70_FULL_ACTION_STAT1_OP_NUM_BITS 1
+/**
+ * Enumeration definition for field 'stat1_op'
+ */
+enum cfa_p70_full_action_stat1_op {
+ /* Statistics count reflects packet at 'ingress' to CFA. */
+ CFA_P70_FULL_ACTION_STAT1_OP_INGRESS = 0,
+ /* Statistics count reflects packet at 'egress' from CFA. */
+ CFA_P70_FULL_ACTION_STAT1_OP_EGRESS = 1,
+};
+
+/**
+ * Selects counter type. In all cases, fields are packet little endian
+ * in the action memory.
+ */
+#define CFA_P70_FULL_ACTION_STAT1_CTR_TYPE_BITPOS 99
+#define CFA_P70_FULL_ACTION_STAT1_CTR_TYPE_NUM_BITS 2
+/**
+ * Enumeration definition for field 'stat1_ctr_type'
+ */
+enum cfa_p70_full_action_stat1_ctr_type {
+ /* Forward packet count(64b)/byte count(64b) */
+ CFA_P70_FULL_ACTION_STAT1_CTR_TYPE_B16 = 0,
+ /*
+ * Forward packet count(64b)/byte count(64b) timestamp(32b) TCP
+ * Flags(16b) reserved(23b)
+ */
+ CFA_P70_FULL_ACTION_STAT1_CTR_TYPE_B24 = 1,
+ /*
+ * Forward packet count(64b)/byte count(64b) Meter (drop or red) packet
+ * count(64b)/byte count(64b)
+ */
+ CFA_P70_FULL_ACTION_STAT1_CTR_TYPE_B32A = 2,
+ /*
+ * Forward packet count(64b)/byte count(64b) Meter timestamp(32b) TCP
+ * Flags(16b) reserved(6b) (drop or red) packet count(38b)/byte
+ * count(42b)
+ */
+ CFA_P70_FULL_ACTION_STAT1_CTR_TYPE_B32B = 3,
+};
+
+/**
+ * This is a pointer to the modification record. This is a pointer in 8B
+ * units directly to dependent record data. A value of zero indicates no
+ * additional actions.
+ */
+#define CFA_P70_FULL_ACTION_MOD_PTR_BITPOS 101
+#define CFA_P70_FULL_ACTION_MOD_PTR_NUM_BITS 28
+
+/**
+ * This is a pointer to the encapsulation record. This is a pointer in
+ * 8B units directly to dependent record data. A value of zero indicates
+ * no additional actions.
+ */
+#define CFA_P70_FULL_ACTION_ENC_PTR_BITPOS 129
+#define CFA_P70_FULL_ACTION_ENC_PTR_NUM_BITS 28
+
+/**
+ * This is a pointer to the source record. This is a pointer in 8B units
+ * directly to dependent record data. A value of zero indicates no
+ * additional actions.
+ */
+#define CFA_P70_FULL_ACTION_SRC_PTR_BITPOS 157
+#define CFA_P70_FULL_ACTION_SRC_PTR_NUM_BITS 28
+#define CFA_P70_FULL_ACTION_UNUSED_0_BITPOS 185
+#define CFA_P70_FULL_ACTION_UNUSED_0_NUM_BITS 7
+
+/**
+ * Total number of bits for full_action
+ */
+#define CFA_P70_FULL_ACTION_TOTAL_NUM_BITS 192
+
+/**
+ * The type field identifies the format of the action record to the
+ * hardware.
+ */
+#define CFA_P70_MCG_ACTION_TYPE_BITPOS 0
+#define CFA_P70_MCG_ACTION_TYPE_NUM_BITS 3
+/**
+ * Enumeration definition for field 'type'
+ */
+enum cfa_p70_mcg_action_type {
+ /*
+ * Multicast Group Action Record. This action is used to send the packet
+ * to multiple destinations. The MGC Action record is 256b.
+ */
+ CFA_P70_MCG_ACTION_TYPE_MCG_ACTION = 4,
+};
+
+/**
+ * When this bit is set to '1', source knockout will be supported for
+ * the MCG record. This value also applies to any chained subsequent MCG
+ * records. This is applied on the RX CFA only.
+ */
+#define CFA_P70_MCG_ACTION_SRC_KO_EN_BITPOS 3
+#define CFA_P70_MCG_ACTION_SRC_KO_EN_NUM_BITS 1
+#define CFA_P70_MCG_ACTION_UNUSED_0_BITPOS 4
+#define CFA_P70_MCG_ACTION_UNUSED_0_NUM_BITS 2
+
+/**
+ * This is a pointer to the next MGC Subsequent Entries Record. The
+ * Subsequent Entries MGC record must be on a 32B boundary. A value of
+ * zero indicates that there are not additional MGC Subsequent Entries
+ * record.
+ */
+#define CFA_P70_MCG_ACTION_NEXT_PTR_BITPOS 6
+#define CFA_P70_MCG_ACTION_NEXT_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_ACTION_PTR0_ACT_HINT_BITPOS 32
+#define CFA_P70_MCG_ACTION_PTR0_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_ACTION_PTR0_ACT_REC_PTR_BITPOS 34
+#define CFA_P70_MCG_ACTION_PTR0_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_ACTION_PTR1_ACT_HINT_BITPOS 60
+#define CFA_P70_MCG_ACTION_PTR1_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_ACTION_PTR1_ACT_REC_PTR_BITPOS 62
+#define CFA_P70_MCG_ACTION_PTR1_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_ACTION_PTR2_ACT_HINT_BITPOS 88
+#define CFA_P70_MCG_ACTION_PTR2_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_ACTION_PTR2_ACT_REC_PTR_BITPOS 90
+#define CFA_P70_MCG_ACTION_PTR2_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_ACTION_PTR3_ACT_HINT_BITPOS 116
+#define CFA_P70_MCG_ACTION_PTR3_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_ACTION_PTR3_ACT_REC_PTR_BITPOS 118
+#define CFA_P70_MCG_ACTION_PTR3_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_ACTION_PTR4_ACT_HINT_BITPOS 144
+#define CFA_P70_MCG_ACTION_PTR4_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_ACTION_PTR4_ACT_REC_PTR_BITPOS 146
+#define CFA_P70_MCG_ACTION_PTR4_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_ACTION_PTR5_ACT_HINT_BITPOS 172
+#define CFA_P70_MCG_ACTION_PTR5_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_ACTION_PTR5_ACT_REC_PTR_BITPOS 174
+#define CFA_P70_MCG_ACTION_PTR5_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_ACTION_PTR6_ACT_HINT_BITPOS 200
+#define CFA_P70_MCG_ACTION_PTR6_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_ACTION_PTR6_ACT_REC_PTR_BITPOS 202
+#define CFA_P70_MCG_ACTION_PTR6_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_ACTION_PTR7_ACT_HINT_BITPOS 228
+#define CFA_P70_MCG_ACTION_PTR7_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_ACTION_PTR7_ACT_REC_PTR_BITPOS 230
+#define CFA_P70_MCG_ACTION_PTR7_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * Total number of bits for mcg_action
+ */
+#define CFA_P70_MCG_ACTION_TOTAL_NUM_BITS 256
+
+/**
+ * The type field identifies the format of the action record to the
+ * hardware.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_TYPE_BITPOS 0
+#define CFA_P70_MCG_SUBSEQ_ACTION_TYPE_NUM_BITS 3
+/**
+ * Enumeration definition for field 'type'
+ */
+enum cfa_p70_mcg_subseq_action_type {
+ /*
+ * Multicast Group Action Record. This action is used to send the packet
+ * to multiple destinations. The MGC Action record is 256b.
+ */
+ CFA_P70_MCG_SUBSEQ_ACTION_TYPE_MCG_ACTION = 4,
+};
+#define CFA_P70_MCG_SUBSEQ_ACTION_UNUSED_0_BITPOS 3
+#define CFA_P70_MCG_SUBSEQ_ACTION_UNUSED_0_NUM_BITS 3
+
+/**
+ * This is a pointer to the next MGC Subsequent Entries Record. The
+ * Subsequent Entries MGC record must be on a 32B boundary. A value of
+ * zero indicates that there are not additional MGC Subsequent Entries
+ * record.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_NEXT_PTR_BITPOS 6
+#define CFA_P70_MCG_SUBSEQ_ACTION_NEXT_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR0_ACT_HINT_BITPOS 32
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR0_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR0_ACT_REC_PTR_BITPOS 34
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR0_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR1_ACT_HINT_BITPOS 60
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR1_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR1_ACT_REC_PTR_BITPOS 62
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR1_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR2_ACT_HINT_BITPOS 88
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR2_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR2_ACT_REC_PTR_BITPOS 90
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR2_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR3_ACT_HINT_BITPOS 116
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR3_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR3_ACT_REC_PTR_BITPOS 118
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR3_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR4_ACT_HINT_BITPOS 144
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR4_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR4_ACT_REC_PTR_BITPOS 146
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR4_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR5_ACT_HINT_BITPOS 172
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR5_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR5_ACT_REC_PTR_BITPOS 174
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR5_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR6_ACT_HINT_BITPOS 200
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR6_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR6_ACT_REC_PTR_BITPOS 202
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR6_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * This is the prefetch hint that corresponds to this action record
+ * pointer. This value will index into the hint table for the current
+ * scope to determines the actual prefetch size.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR7_ACT_HINT_BITPOS 228
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR7_ACT_HINT_NUM_BITS 2
+
+/**
+ * This is an individual action record pointer for an MGC entry. This
+ * points to a action record for this particular MGC member. If this
+ * pointer is zero, then it will not be followed.
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR7_ACT_REC_PTR_BITPOS 230
+#define CFA_P70_MCG_SUBSEQ_ACTION_PTR7_ACT_REC_PTR_NUM_BITS 26
+
+/**
+ * Total number of bits for mcg_subseq_action
+ */
+#define CFA_P70_MCG_SUBSEQ_ACTION_TOTAL_NUM_BITS 256
+
+/**
+ * Current committed token bucket count.
+ */
+#define CFA_P70_METERS_BKT_C_BITPOS 0
+#define CFA_P70_METERS_BKT_C_NUM_BITS 27
+
+/**
+ * Current excess token bucket count.
+ */
+#define CFA_P70_METERS_BKT_E_BITPOS 27
+#define CFA_P70_METERS_BKT_E_NUM_BITS 27
+
+/**
+ * Meter Valid
+ */
+#define CFA_P70_METERS_FLAGS_MTR_VAL_BITPOS 54
+#define CFA_P70_METERS_FLAGS_MTR_VAL_NUM_BITS 1
+
+/**
+ * ECN Remap Enable
+ */
+#define CFA_P70_METERS_FLAGS_ECN_RMP_EN_BITPOS 55
+#define CFA_P70_METERS_FLAGS_ECN_RMP_EN_NUM_BITS 1
+
+/**
+ * Coupling Flag. Indicates that tokens being added to the committed
+ * bucket should be diverted to the excess bucket when the committed
+ * bucket is full. This bit is ignored when RFC2698=1
+ */
+#define CFA_P70_METERS_FLAGS_CF_BITPOS 56
+#define CFA_P70_METERS_FLAGS_CF_NUM_BITS 1
+
+/**
+ * Packet Mode. When set packet length is ignored and a global value is
+ * used instead.
+ */
+#define CFA_P70_METERS_FLAGS_PM_BITPOS 57
+#define CFA_P70_METERS_FLAGS_PM_NUM_BITS 1
+
+/**
+ * RFC2698 Enable - Indicates if BOTH buckets must have sufficient
+ * tokens to color a packet green per RFC2698, as opposed to just the
+ * committed bucket.
+ */
+#define CFA_P70_METERS_FLAGS_RFC2698_BITPOS 58
+#define CFA_P70_METERS_FLAGS_RFC2698_NUM_BITS 1
+
+/**
+ * Committed Bucket Strict Mode. If set, a packet conforms to the
+ * committed bucket only if the number of tokens is greater than or
+ * equal to the packet length. When not set meter conformance is
+ * independent of packet size and requires only that the token count is
+ * non-negative.
+ */
+#define CFA_P70_METERS_FLAGS_CBSM_BITPOS 59
+#define CFA_P70_METERS_FLAGS_CBSM_NUM_BITS 1
+
+/**
+ * Excess Bucket Strict Mode. If set, a packet conforms to the excess
+ * bucket only if the number of tokens is greater than or equal to the
+ * packet length. When not set, meter conformance is independent of
+ * packet size and requires only that the token count is non-negative.
+ */
+#define CFA_P70_METERS_FLAGS_EBSM_BITPOS 60
+#define CFA_P70_METERS_FLAGS_EBSM_NUM_BITS 1
+
+/**
+ * Committed Bucket No Decrement. If set, tokens are never decremented
+ * from the committed bucket, even when the packet is Green.
+ */
+#define CFA_P70_METERS_FLAGS_CBND_BITPOS 61
+#define CFA_P70_METERS_FLAGS_CBND_NUM_BITS 1
+
+/**
+ * Excess Bucket No Decrement. If set, tokens are never decremented from
+ * the excess bucket, even when the packet is Green.
+ */
+#define CFA_P70_METERS_FLAGS_EBND_BITPOS 62
+#define CFA_P70_METERS_FLAGS_EBND_NUM_BITS 1
+
+/**
+ * Committed Burst Size. Expressed in bytes in a normalized floating
+ * point format.
+ */
+#define CFA_P70_METERS_CBS_BITPOS 63
+#define CFA_P70_METERS_CBS_NUM_BITS 12
+
+/**
+ * Excess Burst Size. Expressed in bytes in a normalized floating point
+ * format.
+ */
+#define CFA_P70_METERS_EBS_BITPOS 75
+#define CFA_P70_METERS_EBS_NUM_BITS 12
+
+/**
+ * Committed Information Rate. A rate expressed in bytes per clock cycle
+ * in a normalized floating point format.
+ */
+#define CFA_P70_METERS_CIR_BITPOS 87
+#define CFA_P70_METERS_CIR_NUM_BITS 17
+
+/**
+ * Excess Information Rate. A rate expressed in bytes per clock cycle in
+ * a normalized floating point format.
+ */
+#define CFA_P70_METERS_EIR_BITPOS 104
+#define CFA_P70_METERS_EIR_NUM_BITS 17
+
+/**
+ * This is the scope whose action records will be allowed to reference
+ * this meter if the enable bit is '1'.
+ */
+#define CFA_P70_METERS_PROTECTION_SCOPE_BITPOS 121
+#define CFA_P70_METERS_PROTECTION_SCOPE_NUM_BITS 5
+
+/**
+ * Reserved.
+ */
+#define CFA_P70_METERS_PROTECTION_RSVD_BITPOS 126
+#define CFA_P70_METERS_PROTECTION_RSVD_NUM_BITS 1
+
+/**
+ * When this bit is '1', the meter will be protected from any scope
+ * action other than the one in the scope field.
+ */
+#define CFA_P70_METERS_PROTECTION_ENABLE_BITPOS 127
+#define CFA_P70_METERS_PROTECTION_ENABLE_NUM_BITS 1
+
+/**
+ * Total number of bits for meters
+ */
+#define CFA_P70_METERS_TOTAL_NUM_BITS 128
+
+/**
+ * Field length definitions for fkb
+ */
+#define CFA_P70_FKB_PROF_ID_NUM_BITS 8
+#define CFA_P70_FKB_L2CTXT_NUM_BITS 11
+#define CFA_P70_FKB_L2FUNC_NUM_BITS 8
+#define CFA_P70_FKB_PARIF_NUM_BITS 2
+#define CFA_P70_FKB_SPIF_NUM_BITS 2
+#define CFA_P70_FKB_SVIF_NUM_BITS 6
+#define CFA_P70_FKB_LCOS_NUM_BITS 3
+#define CFA_P70_FKB_META_HI_NUM_BITS 16
+#define CFA_P70_FKB_META_LO_NUM_BITS 16
+#define CFA_P70_FKB_RCYC_CNT_NUM_BITS 4
+#define CFA_P70_FKB_LOOPBACK_NUM_BITS 1
+#define CFA_P70_FKB_OTL2_TYPE_NUM_BITS 2
+#define CFA_P70_FKB_OTL2_DMAC_NUM_BITS 48
+#define CFA_P70_FKB_OTL2_SMAC_NUM_BITS 48
+#define CFA_P70_FKB_OTL2_DT_NUM_BITS 2
+#define CFA_P70_FKB_OTL2_SA_NUM_BITS 1
+#define CFA_P70_FKB_OTL2_NVT_NUM_BITS 2
+#define CFA_P70_FKB_OTL2_OVP_NUM_BITS 3
+#define CFA_P70_FKB_OTL2_OVD_NUM_BITS 1
+#define CFA_P70_FKB_OTL2_OVV_NUM_BITS 12
+#define CFA_P70_FKB_OTL2_OVT_NUM_BITS 3
+#define CFA_P70_FKB_OTL2_IVP_NUM_BITS 3
+#define CFA_P70_FKB_OTL2_IVD_NUM_BITS 1
+#define CFA_P70_FKB_OTL2_IVV_NUM_BITS 12
+#define CFA_P70_FKB_OTL2_IVT_NUM_BITS 3
+#define CFA_P70_FKB_OTL2_ETYPE_NUM_BITS 16
+#define CFA_P70_FKB_OTL3_TYPE_NUM_BITS 4
+#define CFA_P70_FKB_OTL3_SIP3_NUM_BITS 32
+#define CFA_P70_FKB_OTL3_SIP2_NUM_BITS 32
+#define CFA_P70_FKB_OTL3_SIP1_NUM_BITS 32
+#define CFA_P70_FKB_OTL3_SIP0_NUM_BITS 32
+#define CFA_P70_FKB_OTL3_DIP3_NUM_BITS 32
+#define CFA_P70_FKB_OTL3_DIP2_NUM_BITS 32
+#define CFA_P70_FKB_OTL3_DIP1_NUM_BITS 32
+#define CFA_P70_FKB_OTL3_DIP0_NUM_BITS 32
+#define CFA_P70_FKB_OTL3_TTL_NUM_BITS 8
+#define CFA_P70_FKB_OTL3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_FKB_OTL3_FID bit length is not fixed
+ * So the CFA_P70_FKB_OTL3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_FKB_OTL3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_FKB_OTL3_QOS_NUM_BITS 8
+#define CFA_P70_FKB_OTL3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_FKB_OTL3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_FKB_OTL3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_FKB_OTL3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_FKB_OTL3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_FKB_OTL3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_FKB_OTL3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_FKB_OTL3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_FKB_OTL3_DF_NUM_BITS 1
+#define CFA_P70_FKB_OTL3_L3ERR_NUM_BITS 4
+#define CFA_P70_FKB_OTL4_TYPE_NUM_BITS 4
+#define CFA_P70_FKB_OTL4_SRC_NUM_BITS 16
+#define CFA_P70_FKB_OTL4_DST_NUM_BITS 16
+#define CFA_P70_FKB_OTL4_FLAGS_NUM_BITS 9
+#define CFA_P70_FKB_OTL4_SEQ_NUM_BITS 32
+#define CFA_P70_FKB_OTL4_PA_NUM_BITS 1
+#define CFA_P70_FKB_OTL4_OPT_NUM_BITS 1
+#define CFA_P70_FKB_OTL4_TCPTS_NUM_BITS 1
+#define CFA_P70_FKB_OTL4_ERR_NUM_BITS 4
+#define CFA_P70_FKB_OT_TYPE_NUM_BITS 5
+#define CFA_P70_FKB_OT_FLAGS_NUM_BITS 8
+#define CFA_P70_FKB_OT_IDS_NUM_BITS 24
+#define CFA_P70_FKB_OT_ID_NUM_BITS 32
+#define CFA_P70_FKB_OT_CTXTS_NUM_BITS 24
+#define CFA_P70_FKB_OT_CTXT_NUM_BITS 32
+#define CFA_P70_FKB_OT_QOS_NUM_BITS 3
+#define CFA_P70_FKB_OT_ERR_NUM_BITS 4
+#define CFA_P70_FKB_TL2_TYPE_NUM_BITS 2
+#define CFA_P70_FKB_TL2_DMAC_NUM_BITS 48
+#define CFA_P70_FKB_TL2_SMAC_NUM_BITS 48
+#define CFA_P70_FKB_TL2_DT_NUM_BITS 2
+#define CFA_P70_FKB_TL2_SA_NUM_BITS 1
+#define CFA_P70_FKB_TL2_NVT_NUM_BITS 2
+#define CFA_P70_FKB_TL2_OVP_NUM_BITS 3
+#define CFA_P70_FKB_TL2_OVD_NUM_BITS 1
+#define CFA_P70_FKB_TL2_OVV_NUM_BITS 12
+#define CFA_P70_FKB_TL2_OVT_NUM_BITS 3
+#define CFA_P70_FKB_TL2_IVP_NUM_BITS 3
+#define CFA_P70_FKB_TL2_IVD_NUM_BITS 1
+#define CFA_P70_FKB_TL2_IVV_NUM_BITS 12
+#define CFA_P70_FKB_TL2_IVT_NUM_BITS 3
+#define CFA_P70_FKB_TL2_ETYPE_NUM_BITS 16
+#define CFA_P70_FKB_TL3_TYPE_NUM_BITS 4
+#define CFA_P70_FKB_TL3_SIP3_NUM_BITS 32
+#define CFA_P70_FKB_TL3_SIP2_NUM_BITS 32
+#define CFA_P70_FKB_TL3_SIP1_NUM_BITS 32
+#define CFA_P70_FKB_TL3_SIP0_NUM_BITS 32
+#define CFA_P70_FKB_TL3_DIP3_NUM_BITS 32
+#define CFA_P70_FKB_TL3_DIP2_NUM_BITS 32
+#define CFA_P70_FKB_TL3_DIP1_NUM_BITS 32
+#define CFA_P70_FKB_TL3_DIP0_NUM_BITS 32
+#define CFA_P70_FKB_TL3_TTL_NUM_BITS 8
+#define CFA_P70_FKB_TL3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_FKB_TL3_FID bit length is not fixed
+ * So the CFA_P70_FKB_TL3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_FKB_TL3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_FKB_TL3_QOS_NUM_BITS 8
+#define CFA_P70_FKB_TL3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_FKB_TL3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_FKB_TL3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_FKB_TL3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_FKB_TL3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_FKB_TL3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_FKB_TL3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_FKB_TL3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_FKB_TL3_DF_NUM_BITS 1
+#define CFA_P70_FKB_TL3_L3ERR_NUM_BITS 4
+#define CFA_P70_FKB_TL4_TYPE_NUM_BITS 4
+#define CFA_P70_FKB_TL4_SRC_NUM_BITS 16
+#define CFA_P70_FKB_TL4_DST_NUM_BITS 16
+#define CFA_P70_FKB_TL4_FLAGS_NUM_BITS 9
+#define CFA_P70_FKB_TL4_SEQ_NUM_BITS 32
+#define CFA_P70_FKB_TL4_PA_NUM_BITS 1
+#define CFA_P70_FKB_TL4_OPT_NUM_BITS 1
+#define CFA_P70_FKB_TL4_TCPTS_NUM_BITS 1
+#define CFA_P70_FKB_TL4_ERR_NUM_BITS 4
+#define CFA_P70_FKB_T_TYPE_NUM_BITS 5
+#define CFA_P70_FKB_T_FLAGS_NUM_BITS 8
+#define CFA_P70_FKB_T_IDS_NUM_BITS 24
+#define CFA_P70_FKB_T_ID_NUM_BITS 32
+#define CFA_P70_FKB_T_CTXTS_NUM_BITS 24
+#define CFA_P70_FKB_T_CTXT_NUM_BITS 32
+#define CFA_P70_FKB_T_QOS_NUM_BITS 3
+#define CFA_P70_FKB_T_ERR_NUM_BITS 4
+#define CFA_P70_FKB_L2_TYPE_NUM_BITS 2
+#define CFA_P70_FKB_L2_DMAC_NUM_BITS 48
+#define CFA_P70_FKB_L2_SMAC_NUM_BITS 48
+#define CFA_P70_FKB_L2_DT_NUM_BITS 2
+#define CFA_P70_FKB_L2_SA_NUM_BITS 1
+#define CFA_P70_FKB_L2_NVT_NUM_BITS 2
+#define CFA_P70_FKB_L2_OVP_NUM_BITS 3
+#define CFA_P70_FKB_L2_OVD_NUM_BITS 1
+#define CFA_P70_FKB_L2_OVV_NUM_BITS 12
+#define CFA_P70_FKB_L2_OVT_NUM_BITS 3
+#define CFA_P70_FKB_L2_IVP_NUM_BITS 3
+#define CFA_P70_FKB_L2_IVD_NUM_BITS 1
+#define CFA_P70_FKB_L2_IVV_NUM_BITS 12
+#define CFA_P70_FKB_L2_IVT_NUM_BITS 3
+#define CFA_P70_FKB_L2_ETYPE_NUM_BITS 16
+#define CFA_P70_FKB_L3_TYPE_NUM_BITS 4
+#define CFA_P70_FKB_L3_SIP3_NUM_BITS 32
+#define CFA_P70_FKB_L3_SIP2_NUM_BITS 32
+#define CFA_P70_FKB_L3_SIP1_NUM_BITS 32
+#define CFA_P70_FKB_L3_SIP0_NUM_BITS 32
+#define CFA_P70_FKB_L3_DIP3_NUM_BITS 32
+#define CFA_P70_FKB_L3_DIP2_NUM_BITS 32
+#define CFA_P70_FKB_L3_DIP1_NUM_BITS 32
+#define CFA_P70_FKB_L3_DIP0_NUM_BITS 32
+#define CFA_P70_FKB_L3_TTL_NUM_BITS 8
+#define CFA_P70_FKB_L3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_FKB_L3_FID bit length is not fixed
+ * So the CFA_P70_FKB_L3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_FKB_L3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_FKB_L3_QOS_NUM_BITS 8
+#define CFA_P70_FKB_L3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_FKB_L3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_FKB_L3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_FKB_L3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_FKB_L3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_FKB_L3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_FKB_L3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_FKB_L3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_FKB_L3_DF_NUM_BITS 1
+#define CFA_P70_FKB_L3_L3ERR_NUM_BITS 4
+#define CFA_P70_FKB_L4_TYPE_NUM_BITS 4
+#define CFA_P70_FKB_L4_SRC_NUM_BITS 16
+#define CFA_P70_FKB_L4_DST_NUM_BITS 16
+#define CFA_P70_FKB_L4_FLAGS_NUM_BITS 9
+#define CFA_P70_FKB_L4_SEQ_NUM_BITS 32
+#define CFA_P70_FKB_L4_ACK_NUM_BITS 32
+#define CFA_P70_FKB_L4_WIN_NUM_BITS 16
+#define CFA_P70_FKB_L4_PA_NUM_BITS 1
+#define CFA_P70_FKB_L4_OPT_NUM_BITS 1
+#define CFA_P70_FKB_L4_TCPTS_NUM_BITS 1
+#define CFA_P70_FKB_L4_TSVAL_NUM_BITS 32
+#define CFA_P70_FKB_L4_TXECR_NUM_BITS 32
+#define CFA_P70_FKB_L4_ERR_NUM_BITS 4
+
+/**
+ * Field length definitions for wc tcam fkb
+ */
+#define CFA_P70_WC_TCAM_FKB_PROF_ID_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_L2CTXT_NUM_BITS 11
+#define CFA_P70_WC_TCAM_FKB_L2FUNC_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_PARIF_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_SPIF_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_SVIF_NUM_BITS 6
+#define CFA_P70_WC_TCAM_FKB_LCOS_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_META_HI_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_META_LO_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_RCYC_CNT_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_LOOPBACK_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL2_TYPE_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_OTL2_DMAC_NUM_BITS 48
+#define CFA_P70_WC_TCAM_FKB_OTL2_SMAC_NUM_BITS 48
+#define CFA_P70_WC_TCAM_FKB_OTL2_DT_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_OTL2_SA_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL2_NVT_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_OTL2_OVP_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_OTL2_OVD_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL2_OVV_NUM_BITS 12
+#define CFA_P70_WC_TCAM_FKB_OTL2_OVT_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_OTL2_IVP_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_OTL2_IVD_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL2_IVV_NUM_BITS 12
+#define CFA_P70_WC_TCAM_FKB_OTL2_IVT_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_OTL2_ETYPE_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_OTL3_TYPE_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_OTL3_SIP3_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OTL3_SIP2_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OTL3_SIP1_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OTL3_SIP0_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OTL3_DIP3_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OTL3_DIP2_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OTL3_DIP1_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OTL3_DIP0_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OTL3_TTL_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_OTL3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_WC_TCAM_FKB_OTL3_FID bit length is not fixed
+ * So the CFA_P70_WC_TCAM_FKB_OTL3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_WC_TCAM_FKB_OTL3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_WC_TCAM_FKB_OTL3_QOS_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_OTL3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL3_DF_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL3_L3ERR_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_OTL4_TYPE_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_OTL4_SRC_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_OTL4_DST_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_OTL4_FLAGS_NUM_BITS 9
+#define CFA_P70_WC_TCAM_FKB_OTL4_SEQ_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OTL4_PA_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL4_OPT_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL4_TCPTS_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_OTL4_ERR_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_OT_TYPE_NUM_BITS 5
+#define CFA_P70_WC_TCAM_FKB_OT_FLAGS_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_OT_IDS_NUM_BITS 24
+#define CFA_P70_WC_TCAM_FKB_OT_ID_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OT_CTXTS_NUM_BITS 24
+#define CFA_P70_WC_TCAM_FKB_OT_CTXT_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_OT_QOS_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_OT_ERR_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_TL2_TYPE_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_TL2_DMAC_NUM_BITS 48
+#define CFA_P70_WC_TCAM_FKB_TL2_SMAC_NUM_BITS 48
+#define CFA_P70_WC_TCAM_FKB_TL2_DT_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_TL2_SA_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL2_NVT_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_TL2_OVP_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_TL2_OVD_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL2_OVV_NUM_BITS 12
+#define CFA_P70_WC_TCAM_FKB_TL2_OVT_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_TL2_IVP_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_TL2_IVD_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL2_IVV_NUM_BITS 12
+#define CFA_P70_WC_TCAM_FKB_TL2_IVT_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_TL2_ETYPE_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_TL3_TYPE_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_TL3_SIP3_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_TL3_SIP2_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_TL3_SIP1_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_TL3_SIP0_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_TL3_DIP3_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_TL3_DIP2_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_TL3_DIP1_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_TL3_DIP0_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_TL3_TTL_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_TL3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_WC_TCAM_FKB_TL3_FID bit length is not fixed
+ * So the CFA_P70_WC_TCAM_FKB_TL3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_WC_TCAM_FKB_TL3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_WC_TCAM_FKB_TL3_QOS_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_TL3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL3_DF_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL3_L3ERR_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_TL4_TYPE_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_TL4_SRC_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_TL4_DST_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_TL4_FLAGS_NUM_BITS 9
+#define CFA_P70_WC_TCAM_FKB_TL4_SEQ_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_TL4_PA_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL4_OPT_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL4_TCPTS_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_TL4_ERR_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_T_TYPE_NUM_BITS 5
+#define CFA_P70_WC_TCAM_FKB_T_FLAGS_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_T_IDS_NUM_BITS 24
+#define CFA_P70_WC_TCAM_FKB_T_ID_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_T_CTXTS_NUM_BITS 24
+#define CFA_P70_WC_TCAM_FKB_T_CTXT_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_T_QOS_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_T_ERR_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_L2_TYPE_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_L2_DMAC_NUM_BITS 48
+#define CFA_P70_WC_TCAM_FKB_L2_SMAC_NUM_BITS 48
+#define CFA_P70_WC_TCAM_FKB_L2_DT_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_L2_SA_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L2_NVT_NUM_BITS 2
+#define CFA_P70_WC_TCAM_FKB_L2_OVP_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_L2_OVD_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L2_OVV_NUM_BITS 12
+#define CFA_P70_WC_TCAM_FKB_L2_OVT_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_L2_IVP_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_L2_IVD_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L2_IVV_NUM_BITS 12
+#define CFA_P70_WC_TCAM_FKB_L2_IVT_NUM_BITS 3
+#define CFA_P70_WC_TCAM_FKB_L2_ETYPE_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_L3_TYPE_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_L3_SIP3_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L3_SIP2_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L3_SIP1_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L3_SIP0_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L3_DIP3_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L3_DIP2_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L3_DIP1_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L3_DIP0_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L3_TTL_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_L3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_WC_TCAM_FKB_L3_FID bit length is not fixed
+ * So the CFA_P70_WC_TCAM_FKB_L3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_WC_TCAM_FKB_L3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_WC_TCAM_FKB_L3_QOS_NUM_BITS 8
+#define CFA_P70_WC_TCAM_FKB_L3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L3_DF_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L3_L3ERR_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_L4_TYPE_NUM_BITS 4
+#define CFA_P70_WC_TCAM_FKB_L4_SRC_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_L4_DST_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_L4_FLAGS_NUM_BITS 9
+#define CFA_P70_WC_TCAM_FKB_L4_SEQ_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L4_ACK_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L4_WIN_NUM_BITS 16
+#define CFA_P70_WC_TCAM_FKB_L4_PA_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L4_OPT_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L4_TCPTS_NUM_BITS 1
+#define CFA_P70_WC_TCAM_FKB_L4_TSVAL_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L4_TXECR_NUM_BITS 32
+#define CFA_P70_WC_TCAM_FKB_L4_ERR_NUM_BITS 4
+
+/**
+ * Field length definitions for em fkb
+ */
+#define CFA_P70_EM_FKB_PROF_ID_NUM_BITS 8
+#define CFA_P70_EM_FKB_L2CTXT_NUM_BITS 11
+#define CFA_P70_EM_FKB_L2FUNC_NUM_BITS 8
+#define CFA_P70_EM_FKB_PARIF_NUM_BITS 2
+#define CFA_P70_EM_FKB_SPIF_NUM_BITS 2
+#define CFA_P70_EM_FKB_SVIF_NUM_BITS 6
+#define CFA_P70_EM_FKB_LCOS_NUM_BITS 3
+#define CFA_P70_EM_FKB_META_HI_NUM_BITS 16
+#define CFA_P70_EM_FKB_META_LO_NUM_BITS 16
+#define CFA_P70_EM_FKB_RCYC_CNT_NUM_BITS 4
+#define CFA_P70_EM_FKB_LOOPBACK_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL2_TYPE_NUM_BITS 2
+#define CFA_P70_EM_FKB_OTL2_DMAC_NUM_BITS 48
+#define CFA_P70_EM_FKB_OTL2_SMAC_NUM_BITS 48
+#define CFA_P70_EM_FKB_OTL2_DT_NUM_BITS 2
+#define CFA_P70_EM_FKB_OTL2_SA_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL2_NVT_NUM_BITS 2
+#define CFA_P70_EM_FKB_OTL2_OVP_NUM_BITS 3
+#define CFA_P70_EM_FKB_OTL2_OVD_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL2_OVV_NUM_BITS 12
+#define CFA_P70_EM_FKB_OTL2_OVT_NUM_BITS 3
+#define CFA_P70_EM_FKB_OTL2_IVP_NUM_BITS 3
+#define CFA_P70_EM_FKB_OTL2_IVD_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL2_IVV_NUM_BITS 12
+#define CFA_P70_EM_FKB_OTL2_IVT_NUM_BITS 3
+#define CFA_P70_EM_FKB_OTL2_ETYPE_NUM_BITS 16
+#define CFA_P70_EM_FKB_OTL3_TYPE_NUM_BITS 4
+#define CFA_P70_EM_FKB_OTL3_SIP3_NUM_BITS 32
+#define CFA_P70_EM_FKB_OTL3_SIP2_NUM_BITS 32
+#define CFA_P70_EM_FKB_OTL3_SIP1_NUM_BITS 32
+#define CFA_P70_EM_FKB_OTL3_SIP0_NUM_BITS 32
+#define CFA_P70_EM_FKB_OTL3_DIP3_NUM_BITS 32
+#define CFA_P70_EM_FKB_OTL3_DIP2_NUM_BITS 32
+#define CFA_P70_EM_FKB_OTL3_DIP1_NUM_BITS 32
+#define CFA_P70_EM_FKB_OTL3_DIP0_NUM_BITS 32
+#define CFA_P70_EM_FKB_OTL3_TTL_NUM_BITS 8
+#define CFA_P70_EM_FKB_OTL3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_EM_FKB_OTL3_FID bit length is not fixed
+ * So the CFA_P70_EM_FKB_OTL3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_EM_FKB_OTL3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_EM_FKB_OTL3_QOS_NUM_BITS 8
+#define CFA_P70_EM_FKB_OTL3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL3_DF_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL3_L3ERR_NUM_BITS 4
+#define CFA_P70_EM_FKB_OTL4_TYPE_NUM_BITS 4
+#define CFA_P70_EM_FKB_OTL4_SRC_NUM_BITS 16
+#define CFA_P70_EM_FKB_OTL4_DST_NUM_BITS 16
+#define CFA_P70_EM_FKB_OTL4_FLAGS_NUM_BITS 9
+#define CFA_P70_EM_FKB_OTL4_SEQ_NUM_BITS 32
+#define CFA_P70_EM_FKB_OTL4_PA_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL4_OPT_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL4_TCPTS_NUM_BITS 1
+#define CFA_P70_EM_FKB_OTL4_ERR_NUM_BITS 4
+#define CFA_P70_EM_FKB_OT_TYPE_NUM_BITS 5
+#define CFA_P70_EM_FKB_OT_FLAGS_NUM_BITS 8
+#define CFA_P70_EM_FKB_OT_IDS_NUM_BITS 24
+#define CFA_P70_EM_FKB_OT_ID_NUM_BITS 32
+#define CFA_P70_EM_FKB_OT_CTXTS_NUM_BITS 24
+#define CFA_P70_EM_FKB_OT_CTXT_NUM_BITS 32
+#define CFA_P70_EM_FKB_OT_QOS_NUM_BITS 3
+#define CFA_P70_EM_FKB_OT_ERR_NUM_BITS 4
+#define CFA_P70_EM_FKB_TL2_TYPE_NUM_BITS 2
+#define CFA_P70_EM_FKB_TL2_DMAC_NUM_BITS 48
+#define CFA_P70_EM_FKB_TL2_SMAC_NUM_BITS 48
+#define CFA_P70_EM_FKB_TL2_DT_NUM_BITS 2
+#define CFA_P70_EM_FKB_TL2_SA_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL2_NVT_NUM_BITS 2
+#define CFA_P70_EM_FKB_TL2_OVP_NUM_BITS 3
+#define CFA_P70_EM_FKB_TL2_OVD_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL2_OVV_NUM_BITS 12
+#define CFA_P70_EM_FKB_TL2_OVT_NUM_BITS 3
+#define CFA_P70_EM_FKB_TL2_IVP_NUM_BITS 3
+#define CFA_P70_EM_FKB_TL2_IVD_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL2_IVV_NUM_BITS 12
+#define CFA_P70_EM_FKB_TL2_IVT_NUM_BITS 3
+#define CFA_P70_EM_FKB_TL2_ETYPE_NUM_BITS 16
+#define CFA_P70_EM_FKB_TL3_TYPE_NUM_BITS 4
+#define CFA_P70_EM_FKB_TL3_SIP3_NUM_BITS 32
+#define CFA_P70_EM_FKB_TL3_SIP2_NUM_BITS 32
+#define CFA_P70_EM_FKB_TL3_SIP1_NUM_BITS 32
+#define CFA_P70_EM_FKB_TL3_SIP0_NUM_BITS 32
+#define CFA_P70_EM_FKB_TL3_DIP3_NUM_BITS 32
+#define CFA_P70_EM_FKB_TL3_DIP2_NUM_BITS 32
+#define CFA_P70_EM_FKB_TL3_DIP1_NUM_BITS 32
+#define CFA_P70_EM_FKB_TL3_DIP0_NUM_BITS 32
+#define CFA_P70_EM_FKB_TL3_TTL_NUM_BITS 8
+#define CFA_P70_EM_FKB_TL3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_EM_FKB_TL3_FID bit length is not fixed
+ * So the CFA_P70_EM_FKB_TL3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_EM_FKB_TL3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_EM_FKB_TL3_QOS_NUM_BITS 8
+#define CFA_P70_EM_FKB_TL3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL3_DF_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL3_L3ERR_NUM_BITS 4
+#define CFA_P70_EM_FKB_TL4_TYPE_NUM_BITS 4
+#define CFA_P70_EM_FKB_TL4_SRC_NUM_BITS 16
+#define CFA_P70_EM_FKB_TL4_DST_NUM_BITS 16
+#define CFA_P70_EM_FKB_TL4_FLAGS_NUM_BITS 9
+#define CFA_P70_EM_FKB_TL4_SEQ_NUM_BITS 32
+#define CFA_P70_EM_FKB_TL4_PA_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL4_OPT_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL4_TCPTS_NUM_BITS 1
+#define CFA_P70_EM_FKB_TL4_ERR_NUM_BITS 4
+#define CFA_P70_EM_FKB_T_TYPE_NUM_BITS 5
+#define CFA_P70_EM_FKB_T_FLAGS_NUM_BITS 8
+#define CFA_P70_EM_FKB_T_IDS_NUM_BITS 24
+#define CFA_P70_EM_FKB_T_ID_NUM_BITS 32
+#define CFA_P70_EM_FKB_T_CTXTS_NUM_BITS 24
+#define CFA_P70_EM_FKB_T_CTXT_NUM_BITS 32
+#define CFA_P70_EM_FKB_T_QOS_NUM_BITS 3
+#define CFA_P70_EM_FKB_T_ERR_NUM_BITS 4
+#define CFA_P70_EM_FKB_L2_TYPE_NUM_BITS 2
+#define CFA_P70_EM_FKB_L2_DMAC_NUM_BITS 48
+#define CFA_P70_EM_FKB_L2_SMAC_NUM_BITS 48
+#define CFA_P70_EM_FKB_L2_DT_NUM_BITS 2
+#define CFA_P70_EM_FKB_L2_SA_NUM_BITS 1
+#define CFA_P70_EM_FKB_L2_NVT_NUM_BITS 2
+#define CFA_P70_EM_FKB_L2_OVP_NUM_BITS 3
+#define CFA_P70_EM_FKB_L2_OVD_NUM_BITS 1
+#define CFA_P70_EM_FKB_L2_OVV_NUM_BITS 12
+#define CFA_P70_EM_FKB_L2_OVT_NUM_BITS 3
+#define CFA_P70_EM_FKB_L2_IVP_NUM_BITS 3
+#define CFA_P70_EM_FKB_L2_IVD_NUM_BITS 1
+#define CFA_P70_EM_FKB_L2_IVV_NUM_BITS 12
+#define CFA_P70_EM_FKB_L2_IVT_NUM_BITS 3
+#define CFA_P70_EM_FKB_L2_ETYPE_NUM_BITS 16
+#define CFA_P70_EM_FKB_L3_TYPE_NUM_BITS 4
+#define CFA_P70_EM_FKB_L3_SIP3_NUM_BITS 32
+#define CFA_P70_EM_FKB_L3_SIP2_NUM_BITS 32
+#define CFA_P70_EM_FKB_L3_SIP1_NUM_BITS 32
+#define CFA_P70_EM_FKB_L3_SIP0_NUM_BITS 32
+#define CFA_P70_EM_FKB_L3_DIP3_NUM_BITS 32
+#define CFA_P70_EM_FKB_L3_DIP2_NUM_BITS 32
+#define CFA_P70_EM_FKB_L3_DIP1_NUM_BITS 32
+#define CFA_P70_EM_FKB_L3_DIP0_NUM_BITS 32
+#define CFA_P70_EM_FKB_L3_TTL_NUM_BITS 8
+#define CFA_P70_EM_FKB_L3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_EM_FKB_L3_FID bit length is not fixed
+ * So the CFA_P70_EM_FKB_L3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_EM_FKB_L3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_EM_FKB_L3_QOS_NUM_BITS 8
+#define CFA_P70_EM_FKB_L3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_EM_FKB_L3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_EM_FKB_L3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_EM_FKB_L3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_EM_FKB_L3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_EM_FKB_L3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_EM_FKB_L3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_EM_FKB_L3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_EM_FKB_L3_DF_NUM_BITS 1
+#define CFA_P70_EM_FKB_L3_L3ERR_NUM_BITS 4
+#define CFA_P70_EM_FKB_L4_TYPE_NUM_BITS 4
+#define CFA_P70_EM_FKB_L4_SRC_NUM_BITS 16
+#define CFA_P70_EM_FKB_L4_DST_NUM_BITS 16
+#define CFA_P70_EM_FKB_L4_FLAGS_NUM_BITS 9
+#define CFA_P70_EM_FKB_L4_SEQ_NUM_BITS 32
+#define CFA_P70_EM_FKB_L4_ACK_NUM_BITS 32
+#define CFA_P70_EM_FKB_L4_WIN_NUM_BITS 16
+#define CFA_P70_EM_FKB_L4_PA_NUM_BITS 1
+#define CFA_P70_EM_FKB_L4_OPT_NUM_BITS 1
+#define CFA_P70_EM_FKB_L4_TCPTS_NUM_BITS 1
+#define CFA_P70_EM_FKB_L4_TSVAL_NUM_BITS 32
+#define CFA_P70_EM_FKB_L4_TXECR_NUM_BITS 32
+#define CFA_P70_EM_FKB_L4_ERR_NUM_BITS 4
+
+/**
+ * Field length definitions for em key layout
+ */
+#define CFA_P70_EM_KL_RANGE_IDX_NUM_BITS 16
+#define CFA_P70_EM_KL_RANGE_PROFILE_NUM_BITS 4
+#define CFA_P70_EM_KL_CREC_TIMER_VALUE_NUM_BITS 4
+#define CFA_P70_EM_KL_CREC_STATE_NUM_BITS 5
+#define CFA_P70_EM_KL_CREC_TCP_MSB_OPP_INIT_NUM_BITS 1
+#define CFA_P70_EM_KL_CREC_TCP_MSB_OPP_NUM_BITS 18
+#define CFA_P70_EM_KL_CREC_TCP_MSB_LOC_NUM_BITS 18
+#define CFA_P70_EM_KL_CREC_TCP_WIN_NUM_BITS 5
+#define CFA_P70_EM_KL_CREC_TCP_UPDT_EN_NUM_BITS 1
+#define CFA_P70_EM_KL_CREC_TCP_DIR_NUM_BITS 1
+#define CFA_P70_EM_KL_METADATA_NUM_BITS 32
+#define CFA_P70_EM_KL_PROF_FUNC_NUM_BITS 8
+#define CFA_P70_EM_KL_META_PROF_NUM_BITS 3
+#define CFA_P70_EM_KL_RECYCLE_DEST_NUM_BITS 1
+#define CFA_P70_EM_KL_FC_PTR_NUM_BITS 28
+#define CFA_P70_EM_KL_FC_TYPE_NUM_BITS 2
+#define CFA_P70_EM_KL_FC_OP_NUM_BITS 1
+#define CFA_P70_EM_KL_PATHS_M1_NUM_BITS 4
+#define CFA_P70_EM_KL_ACT_REC_SIZE_NUM_BITS 5
+#define CFA_P70_EM_KL_RING_TABLE_IDX_NUM_BITS 9
+#define CFA_P70_EM_KL_DESTINATION_NUM_BITS 17
+#define CFA_P70_EM_KL_ACT_REC_PTR_NUM_BITS 26
+#define CFA_P70_EM_KL_ACT_HINT_NUM_BITS 2
+#define CFA_P70_EM_KL_STRENGTH_NUM_BITS 2
+#define CFA_P70_EM_KL_OPCODE_NUM_BITS 4
+#define CFA_P70_EM_KL_EPOCH1_NUM_BITS 6
+#define CFA_P70_EM_KL_EPOCH0_NUM_BITS 12
+#define CFA_P70_EM_KL_REC_SIZE_NUM_BITS 2
+#define CFA_P70_EM_KL_VALID_NUM_BITS 1
+#define CFA_P70_EM_KL_PROF_ID_NUM_BITS 8
+#define CFA_P70_EM_KL_L2CTXT_NUM_BITS 11
+#define CFA_P70_EM_KL_L2FUNC_NUM_BITS 8
+#define CFA_P70_EM_KL_PARIF_NUM_BITS 2
+#define CFA_P70_EM_KL_SPIF_NUM_BITS 2
+#define CFA_P70_EM_KL_SVIF_NUM_BITS 6
+#define CFA_P70_EM_KL_LCOS_NUM_BITS 3
+#define CFA_P70_EM_KL_META_HI_NUM_BITS 16
+#define CFA_P70_EM_KL_META_LO_NUM_BITS 16
+#define CFA_P70_EM_KL_RCYC_CNT_NUM_BITS 4
+#define CFA_P70_EM_KL_LOOPBACK_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL2_TYPE_NUM_BITS 2
+#define CFA_P70_EM_KL_OTL2_DMAC_NUM_BITS 48
+#define CFA_P70_EM_KL_OTL2_SMAC_NUM_BITS 48
+#define CFA_P70_EM_KL_OTL2_DT_NUM_BITS 2
+#define CFA_P70_EM_KL_OTL2_SA_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL2_NVT_NUM_BITS 2
+#define CFA_P70_EM_KL_OTL2_OVP_NUM_BITS 3
+#define CFA_P70_EM_KL_OTL2_OVD_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL2_OVV_NUM_BITS 12
+#define CFA_P70_EM_KL_OTL2_OVT_NUM_BITS 3
+#define CFA_P70_EM_KL_OTL2_IVP_NUM_BITS 3
+#define CFA_P70_EM_KL_OTL2_IVD_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL2_IVV_NUM_BITS 12
+#define CFA_P70_EM_KL_OTL2_IVT_NUM_BITS 3
+#define CFA_P70_EM_KL_OTL2_ETYPE_NUM_BITS 16
+#define CFA_P70_EM_KL_OTL3_TYPE_NUM_BITS 4
+#define CFA_P70_EM_KL_OTL3_SIP3_NUM_BITS 32
+#define CFA_P70_EM_KL_OTL3_SIP2_NUM_BITS 32
+#define CFA_P70_EM_KL_OTL3_SIP1_NUM_BITS 32
+#define CFA_P70_EM_KL_OTL3_SIP0_NUM_BITS 32
+#define CFA_P70_EM_KL_OTL3_DIP3_NUM_BITS 32
+#define CFA_P70_EM_KL_OTL3_DIP2_NUM_BITS 32
+#define CFA_P70_EM_KL_OTL3_DIP1_NUM_BITS 32
+#define CFA_P70_EM_KL_OTL3_DIP0_NUM_BITS 32
+#define CFA_P70_EM_KL_OTL3_TTL_NUM_BITS 8
+#define CFA_P70_EM_KL_OTL3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_EM_KL_OTL3_FID bit length is not fixed
+ * So the CFA_P70_EM_KL_OTL3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_EM_KL_OTL3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_EM_KL_OTL3_QOS_NUM_BITS 8
+#define CFA_P70_EM_KL_OTL3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL3_DF_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL3_L3ERR_NUM_BITS 4
+#define CFA_P70_EM_KL_OTL4_TYPE_NUM_BITS 4
+#define CFA_P70_EM_KL_OTL4_SRC_NUM_BITS 16
+#define CFA_P70_EM_KL_OTL4_DST_NUM_BITS 16
+#define CFA_P70_EM_KL_OTL4_FLAGS_NUM_BITS 9
+#define CFA_P70_EM_KL_OTL4_SEQ_NUM_BITS 32
+#define CFA_P70_EM_KL_OTL4_PA_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL4_OPT_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL4_TCPTS_NUM_BITS 1
+#define CFA_P70_EM_KL_OTL4_ERR_NUM_BITS 4
+#define CFA_P70_EM_KL_OT_TYPE_NUM_BITS 5
+#define CFA_P70_EM_KL_OT_FLAGS_NUM_BITS 8
+#define CFA_P70_EM_KL_OT_IDS_NUM_BITS 24
+#define CFA_P70_EM_KL_OT_ID_NUM_BITS 32
+#define CFA_P70_EM_KL_OT_CTXTS_NUM_BITS 24
+#define CFA_P70_EM_KL_OT_CTXT_NUM_BITS 32
+#define CFA_P70_EM_KL_OT_QOS_NUM_BITS 3
+#define CFA_P70_EM_KL_OT_ERR_NUM_BITS 4
+#define CFA_P70_EM_KL_TL2_TYPE_NUM_BITS 2
+#define CFA_P70_EM_KL_TL2_DMAC_NUM_BITS 48
+#define CFA_P70_EM_KL_TL2_SMAC_NUM_BITS 48
+#define CFA_P70_EM_KL_TL2_DT_NUM_BITS 2
+#define CFA_P70_EM_KL_TL2_SA_NUM_BITS 1
+#define CFA_P70_EM_KL_TL2_NVT_NUM_BITS 2
+#define CFA_P70_EM_KL_TL2_OVP_NUM_BITS 3
+#define CFA_P70_EM_KL_TL2_OVD_NUM_BITS 1
+#define CFA_P70_EM_KL_TL2_OVV_NUM_BITS 12
+#define CFA_P70_EM_KL_TL2_OVT_NUM_BITS 3
+#define CFA_P70_EM_KL_TL2_IVP_NUM_BITS 3
+#define CFA_P70_EM_KL_TL2_IVD_NUM_BITS 1
+#define CFA_P70_EM_KL_TL2_IVV_NUM_BITS 12
+#define CFA_P70_EM_KL_TL2_IVT_NUM_BITS 3
+#define CFA_P70_EM_KL_TL2_ETYPE_NUM_BITS 16
+#define CFA_P70_EM_KL_TL3_TYPE_NUM_BITS 4
+#define CFA_P70_EM_KL_TL3_SIP3_NUM_BITS 32
+#define CFA_P70_EM_KL_TL3_SIP2_NUM_BITS 32
+#define CFA_P70_EM_KL_TL3_SIP1_NUM_BITS 32
+#define CFA_P70_EM_KL_TL3_SIP0_NUM_BITS 32
+#define CFA_P70_EM_KL_TL3_DIP3_NUM_BITS 32
+#define CFA_P70_EM_KL_TL3_DIP2_NUM_BITS 32
+#define CFA_P70_EM_KL_TL3_DIP1_NUM_BITS 32
+#define CFA_P70_EM_KL_TL3_DIP0_NUM_BITS 32
+#define CFA_P70_EM_KL_TL3_TTL_NUM_BITS 8
+#define CFA_P70_EM_KL_TL3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_EM_KL_TL3_FID bit length is not fixed
+ * So the CFA_P70_EM_KL_TL3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_EM_KL_TL3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_EM_KL_TL3_QOS_NUM_BITS 8
+#define CFA_P70_EM_KL_TL3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_EM_KL_TL3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_EM_KL_TL3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_EM_KL_TL3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_EM_KL_TL3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_EM_KL_TL3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_EM_KL_TL3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_EM_KL_TL3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_EM_KL_TL3_DF_NUM_BITS 1
+#define CFA_P70_EM_KL_TL3_L3ERR_NUM_BITS 4
+#define CFA_P70_EM_KL_TL4_TYPE_NUM_BITS 4
+#define CFA_P70_EM_KL_TL4_SRC_NUM_BITS 16
+#define CFA_P70_EM_KL_TL4_DST_NUM_BITS 16
+#define CFA_P70_EM_KL_TL4_FLAGS_NUM_BITS 9
+#define CFA_P70_EM_KL_TL4_SEQ_NUM_BITS 32
+#define CFA_P70_EM_KL_TL4_PA_NUM_BITS 1
+#define CFA_P70_EM_KL_TL4_OPT_NUM_BITS 1
+#define CFA_P70_EM_KL_TL4_TCPTS_NUM_BITS 1
+#define CFA_P70_EM_KL_TL4_ERR_NUM_BITS 4
+#define CFA_P70_EM_KL_T_TYPE_NUM_BITS 5
+#define CFA_P70_EM_KL_T_FLAGS_NUM_BITS 8
+#define CFA_P70_EM_KL_T_IDS_NUM_BITS 24
+#define CFA_P70_EM_KL_T_ID_NUM_BITS 32
+#define CFA_P70_EM_KL_T_CTXTS_NUM_BITS 24
+#define CFA_P70_EM_KL_T_CTXT_NUM_BITS 32
+#define CFA_P70_EM_KL_T_QOS_NUM_BITS 3
+#define CFA_P70_EM_KL_T_ERR_NUM_BITS 4
+#define CFA_P70_EM_KL_L2_TYPE_NUM_BITS 2
+#define CFA_P70_EM_KL_L2_DMAC_NUM_BITS 48
+#define CFA_P70_EM_KL_L2_SMAC_NUM_BITS 48
+#define CFA_P70_EM_KL_L2_DT_NUM_BITS 2
+#define CFA_P70_EM_KL_L2_SA_NUM_BITS 1
+#define CFA_P70_EM_KL_L2_NVT_NUM_BITS 2
+#define CFA_P70_EM_KL_L2_OVP_NUM_BITS 3
+#define CFA_P70_EM_KL_L2_OVD_NUM_BITS 1
+#define CFA_P70_EM_KL_L2_OVV_NUM_BITS 12
+#define CFA_P70_EM_KL_L2_OVT_NUM_BITS 3
+#define CFA_P70_EM_KL_L2_IVP_NUM_BITS 3
+#define CFA_P70_EM_KL_L2_IVD_NUM_BITS 1
+#define CFA_P70_EM_KL_L2_IVV_NUM_BITS 12
+#define CFA_P70_EM_KL_L2_IVT_NUM_BITS 3
+#define CFA_P70_EM_KL_L2_ETYPE_NUM_BITS 16
+#define CFA_P70_EM_KL_L3_TYPE_NUM_BITS 4
+#define CFA_P70_EM_KL_L3_SIP3_NUM_BITS 32
+#define CFA_P70_EM_KL_L3_SIP2_NUM_BITS 32
+#define CFA_P70_EM_KL_L3_SIP1_NUM_BITS 32
+#define CFA_P70_EM_KL_L3_SIP0_NUM_BITS 32
+#define CFA_P70_EM_KL_L3_DIP3_NUM_BITS 32
+#define CFA_P70_EM_KL_L3_DIP2_NUM_BITS 32
+#define CFA_P70_EM_KL_L3_DIP1_NUM_BITS 32
+#define CFA_P70_EM_KL_L3_DIP0_NUM_BITS 32
+#define CFA_P70_EM_KL_L3_TTL_NUM_BITS 8
+#define CFA_P70_EM_KL_L3_PROT_NUM_BITS 8
+/**
+ * CFA_P70_EM_KL_L3_FID bit length is not fixed
+ * So the CFA_P70_EM_KL_L3_FID_NUMBITS macro is defined with arguments
+ */
+#define CFA_P70_EM_KL_L3_FID_NUM_BITS(COND) ((COND) ? 16 : 20)
+#define CFA_P70_EM_KL_L3_QOS_NUM_BITS 8
+#define CFA_P70_EM_KL_L3_IEH_NONEXT_NUM_BITS 1
+#define CFA_P70_EM_KL_L3_IEH_SEP_NUM_BITS 1
+#define CFA_P70_EM_KL_L3_IEH_AUTH_NUM_BITS 1
+#define CFA_P70_EM_KL_L3_IEH_DEST_NUM_BITS 1
+#define CFA_P70_EM_KL_L3_IEH_FRAG_NUM_BITS 1
+#define CFA_P70_EM_KL_L3_IEH_RTHDR_NUM_BITS 1
+#define CFA_P70_EM_KL_L3_IEH_HOP_NUM_BITS 1
+#define CFA_P70_EM_KL_L3_IEH_1FRAG_NUM_BITS 1
+#define CFA_P70_EM_KL_L3_DF_NUM_BITS 1
+#define CFA_P70_EM_KL_L3_L3ERR_NUM_BITS 4
+#define CFA_P70_EM_KL_L4_TYPE_NUM_BITS 4
+#define CFA_P70_EM_KL_L4_SRC_NUM_BITS 16
+#define CFA_P70_EM_KL_L4_DST_NUM_BITS 16
+#define CFA_P70_EM_KL_L4_FLAGS_NUM_BITS 9
+#define CFA_P70_EM_KL_L4_SEQ_NUM_BITS 32
+#define CFA_P70_EM_KL_L4_ACK_NUM_BITS 32
+#define CFA_P70_EM_KL_L4_WIN_NUM_BITS 16
+#define CFA_P70_EM_KL_L4_PA_NUM_BITS 1
+#define CFA_P70_EM_KL_L4_OPT_NUM_BITS 1
+#define CFA_P70_EM_KL_L4_TCPTS_NUM_BITS 1
+#define CFA_P70_EM_KL_L4_TSVAL_NUM_BITS 32
+#define CFA_P70_EM_KL_L4_TXECR_NUM_BITS 32
+#define CFA_P70_EM_KL_L4_ERR_NUM_BITS 4
+
+/**
+ * Field length definitions for action
+ */
+#define CFA_P70_ACT_TYPE_NUM_BITS 3
+#define CFA_P70_ACT_DROP_NUM_BITS 1
+#define CFA_P70_ACT_VLAN_DELETE_NUM_BITS 2
+#define CFA_P70_ACT_DEST_NUM_BITS 7
+#define CFA_P70_ACT_DEST_OP_NUM_BITS 2
+#define CFA_P70_ACT_DECAP_NUM_BITS 5
+#define CFA_P70_ACT_MIRRORING_NUM_BITS 5
+#define CFA_P70_ACT_METER_PTR_NUM_BITS 10
+#define CFA_P70_ACT_STAT0_OFF_NUM_BITS 3
+#define CFA_P70_ACT_STAT0_OP_NUM_BITS 1
+#define CFA_P70_ACT_STAT0_CTR_TYPE_NUM_BITS 2
+#define CFA_P70_ACT_MOD_OFF_NUM_BITS 5
+#define CFA_P70_ACT_ENC_OFF_NUM_BITS 6
+#define CFA_P70_ACT_SRC_OFF_NUM_BITS 4
+#define CFA_P70_ACT_COMPACT_RSVD_0_NUM_BITS 4
+#define CFA_P70_ACT_STAT0_PTR_NUM_BITS 28
+#define CFA_P70_ACT_STAT1_PTR_NUM_BITS 28
+#define CFA_P70_ACT_STAT1_OP_NUM_BITS 1
+#define CFA_P70_ACT_STAT1_CTR_TYPE_NUM_BITS 2
+#define CFA_P70_ACT_MOD_PTR_NUM_BITS 28
+#define CFA_P70_ACT_ENC_PTR_NUM_BITS 28
+#define CFA_P70_ACT_SRC_PTR_NUM_BITS 28
+#define CFA_P70_ACT_FULL_RSVD_0_NUM_BITS 7
+#define CFA_P70_ACT_SRC_KO_EN_NUM_BITS 1
+#define CFA_P70_ACT_MCG_RSVD_0_NUM_BITS 2
+#define CFA_P70_ACT_NEXT_PTR_NUM_BITS 26
+#define CFA_P70_ACT_PTR0_ACT_HINT_NUM_BITS 2
+#define CFA_P70_ACT_PTR0_ACT_REC_PTR_NUM_BITS 26
+#define CFA_P70_ACT_PTR1_ACT_HINT_NUM_BITS 2
+#define CFA_P70_ACT_PTR1_ACT_REC_PTR_NUM_BITS 26
+#define CFA_P70_ACT_PTR2_ACT_HINT_NUM_BITS 2
+#define CFA_P70_ACT_PTR2_ACT_REC_PTR_NUM_BITS 26
+#define CFA_P70_ACT_PTR3_ACT_HINT_NUM_BITS 2
+#define CFA_P70_ACT_PTR3_ACT_REC_PTR_NUM_BITS 26
+#define CFA_P70_ACT_PTR4_ACT_HINT_NUM_BITS 2
+#define CFA_P70_ACT_PTR4_ACT_REC_PTR_NUM_BITS 26
+#define CFA_P70_ACT_PTR5_ACT_HINT_NUM_BITS 2
+#define CFA_P70_ACT_PTR5_ACT_REC_PTR_NUM_BITS 26
+#define CFA_P70_ACT_PTR6_ACT_HINT_NUM_BITS 2
+#define CFA_P70_ACT_PTR6_ACT_REC_PTR_NUM_BITS 26
+#define CFA_P70_ACT_PTR7_ACT_HINT_NUM_BITS 2
+#define CFA_P70_ACT_PTR7_ACT_REC_PTR_NUM_BITS 26
+#define CFA_P70_ACT_MCG_SUBSEQ_RSVD_0_NUM_BITS 3
+#define CFA_P70_ACT_MOD_MODIFY_ACT_HDR_NUM_BITS 16
+#define CFA_P70_ACT_MOD_MD_UPDT_DATA_NUM_BITS 32
+#define CFA_P70_ACT_MOD_MD_UPDT_PROF_NUM_BITS 4
+
+/**
+ * Enumeration definition for field 'md_op'
+ */
+enum cfa_p70_md_op {
+ /*
+ * Normal Metadata update: ! md = (md & ~md_prof.mask) | (md_prof.mask &
+ * md_data)
+ */
+ CFA_P70_MD_OP_NORMAL = 0,
+ /*
+ * L2 Hash Metadata update: ! md = (md & ~md_prof.mask) | (md_prof.mask
+ * & hash_l2(seed,packet))
+ */
+ CFA_P70_MD_OP_L2_HASH = 1,
+ /*
+ * L4 Hash Metadata update: ! md = (md & ~ md_prof.mask) | (md_prof.mask
+ * & hash_l3l4(seed,packet))
+ */
+ CFA_P70_MD_OP_L4_HASH = 2,
+ /*
+ * SVIF insert Metadata update: ! md = (md & ~ md_prof.mask) |
+ * (md_prof.mask & zero_extend(svif))
+ */
+ CFA_P70_MD_OP_SVIF = 3,
+};
+#define CFA_P70_ACT_MOD_MD_UPDT_OP_NUM_BITS 2
+#define CFA_P70_ACT_MOD_MD_UPDT_RSVD_0_NUM_BITS 10
+#define CFA_P70_ACT_MOD_MD_UPDT_TOP_NUM_BITS 48
+#define CFA_P70_ACT_MOD_RM_OVLAN_NUM_BITS 32
+#define CFA_P70_ACT_MOD_RM_IVLAN_NUM_BITS 32
+#define CFA_P70_ACT_MOD_RPL_IVLAN_NUM_BITS 32
+#define CFA_P70_ACT_MOD_RPL_OVLAN_NUM_BITS 32
+#define CFA_P70_ACT_MOD_TTL_UPDT_OP_NUM_BITS 15
+#define CFA_P70_ACT_MOD_TTL_UPDT_ALT_VID_NUM_BITS 12
+#define CFA_P70_ACT_MOD_TTL_UPDT_ALT_PFID_NUM_BITS 5
+#define CFA_P70_ACT_MOD_TTL_UPDT_TOP_NUM_BITS 32
+#define CFA_P70_ACT_MOD_TNL_MODIFY_DEL_NUM_BITS 16
+#define CFA_P70_ACT_MOD_TNL_MODIFY_8B_NEW_PROT_NUM_BITS 8
+#define CFA_P70_ACT_MOD_TNL_MODIFY_8B_EXIST_PROT_NUM_BITS 8
+#define CFA_P70_ACT_MOD_TNL_MODIFY_8B_VEC_NUM_BITS 16
+#define CFA_P70_ACT_MOD_TNL_MODIFY_8B_TOP_NUM_BITS 32
+#define CFA_P70_ACT_MOD_TNL_MODIFY_16B_NEW_PROT_NUM_BITS 16
+#define CFA_P70_ACT_MOD_TNL_MODIFY_16B_EXIST_PROT_NUM_BITS 16
+#define CFA_P70_ACT_MOD_TNL_MODIFY_16B_VEC_NUM_BITS 16
+#define CFA_P70_ACT_MOD_TNL_MODIFY_16B_TOP_NUM_BITS 48
+#define CFA_P70_ACT_MOD_UPDT_FIELD_DATA0_NUM_BITS 32
+#define CFA_P70_ACT_MOD_UPDT_FIELD_VEC_RSVD_NUM_BITS 15
+#define CFA_P70_ACT_MOD_UPDT_FIELD_VEC_KID_NUM_BITS 1
+#define CFA_P70_ACT_MOD_UPDT_FIELD_TOP_NUM_BITS 48
+#define CFA_P70_ACT_MOD_SMAC_NUM_BITS 48
+#define CFA_P70_ACT_MOD_DMAC_NUM_BITS 48
+#define CFA_P70_ACT_MOD_SIPV6_NUM_BITS 128
+#define CFA_P70_ACT_MOD_DIPV6_NUM_BITS 128
+#define CFA_P70_ACT_MOD_SIPV4_NUM_BITS 32
+#define CFA_P70_ACT_MOD_DIPV4_NUM_BITS 32
+#define CFA_P70_ACT_MOD_SPORT_NUM_BITS 16
+#define CFA_P70_ACT_MOD_DPORT_NUM_BITS 16
+
+/**
+ * Enumeration definition for field 'ecv_tnl'
+ */
+enum cfa_p70_ecv_tnl {
+ /* No tunnel header will be added. */
+ CFA_P70_ECV_TNL_NOP = 0,
+ /*
+ * Generic full header will be added after inserted L2, L3, or L4
+ * header. The first byte of the tunnel body will be the length of the
+ * inserted tunnel.
+ */
+ CFA_P70_ECV_TNL_GENERIC = 1,
+ /* VXLAN tunnel header will be added. */
+ CFA_P70_ECV_TNL_VXLAN = 2,
+ /* NGE (VXLAN2) Header will be added. */
+ CFA_P70_ECV_TNL_NGE = 3,
+ /* NVGRE Header will be added. */
+ CFA_P70_ECV_TNL_NVGRE = 4,
+ /* GRE Header will be added. */
+ CFA_P70_ECV_TNL_GRE = 5,
+ /*
+ * Generic header after existing L4 header will be added. The first byte
+ * of the tunnel body will be the length of the inserted tunnel.
+ */
+ CFA_P70_ECV_TNL_GENERIC_L4 = 6,
+ /*
+ * Generic header after existing tunnel will be added. The first byte of
+ * the tunnel body will be the length of the inserted tunnel.
+ */
+ CFA_P70_ECV_TNL_GENERIC_TUN = 7,
+};
+#define CFA_P70_ACT_ENC_ECV_TNL_NUM_BITS 3
+
+/**
+ * Enumeration definition for field 'ecv_l4'
+ */
+enum cfa_p70_ecv_l4 {
+ /* No L4 Header */
+ CFA_P70_ECV_L4_NOP = 0,
+ /* No L4 Header */
+ CFA_P70_ECV_L4_NOP1 = 1,
+ /* No L4 Header */
+ CFA_P70_ECV_L4_NOP2 = 2,
+ /* No L4 Header */
+ CFA_P70_ECV_L4_NOP3 = 3,
+ /* Add L4 Header without entropy and with CS=0. */
+ CFA_P70_ECV_L4_L4 = 4,
+ /* Add L4 Header without entropy and with CS=calculated. */
+ CFA_P70_ECV_L4_L4_CS = 5,
+ /* Add L4 Header with entropy and with CS=0. */
+ CFA_P70_ECV_L4_L4_ENT = 6,
+ /* Add L4 Header with entropy and with CS=calculated. */
+ CFA_P70_ECV_L4_L4_ENT_CS = 7,
+};
+#define CFA_P70_ACT_ENC_ECV_L4_NUM_BITS 3
+
+/**
+ * Enumeration definition for field 'ecv_l3'
+ */
+enum cfa_p70_ecv_l3 {
+ /* No L3 Header */
+ CFA_P70_ECV_L3_NOP = 0,
+ /* No L3 Header */
+ CFA_P70_ECV_L3_NOP1 = 1,
+ /* No L3 Header */
+ CFA_P70_ECV_L3_NOP2 = 2,
+ /* No L3 Header */
+ CFA_P70_ECV_L3_NOP3 = 3,
+ /* Add IPV4 Header */
+ CFA_P70_ECV_L3_IPV4 = 4,
+ /* Add IPV4 Header */
+ CFA_P70_ECV_L3_IPV6 = 5,
+ /* Add MPLS (8847) Header */
+ CFA_P70_ECV_L3_MPLS8847 = 6,
+ /* Add MPLS (8848) Header */
+ CFA_P70_ECV_L3_MPLS8848 = 7,
+};
+#define CFA_P70_ACT_ENC_ECV_L3_NUM_BITS 3
+#define CFA_P70_ACT_ENC_ECV_L2_NUM_BITS 1
+
+/**
+ * Enumeration definition for field 'ecv_vtag'
+ */
+enum cfa_p70_ecv_vtag {
+ /* No VLAN tag will be added. */
+ CFA_P70_ECV_VTAG_NOP = 0,
+ /* Add one VLAN tag using the PRI field from the encap record. */
+ CFA_P70_ECV_VTAG_ADD1_USE_PRI = 1,
+ /* Add one VLAN tag remap wit inner VLAN Tag PRI field. */
+ CFA_P70_ECV_VTAG_ADD1_REMAP_INNER_PRI = 2,
+ /* Add one VLAN tag remap with diff serve field. */
+ CFA_P70_ECV_VTAG_ADD1_REMAP_DIFF = 3,
+ /* Add two VLAN tags using the PRI field from the encap record. */
+ CFA_P70_ECV_VTAG_ADD2_USE_PRI = 4,
+ /* Add two VLAN tag remap with diff serve field. */
+ CFA_P70_ECV_VTAG_ADD2_REMAP_DIFF = 5,
+ /* Add zero VLAN tags remap with inner VLAN Tag PRI Field. */
+ CFA_P70_ECV_VTAG_ADD0_REMAP_INNER_PRI = 6,
+ /* Add zero VLAN tags remap with diff serve field. */
+ CFA_P70_ECV_VTAG_ADD0_REMAP_DIFF = 7,
+ /* Add zero VLAG tags remap with immediate PRI=0. */
+ CFA_P70_ECV_VTAG_ADD0_IMMED_PRI0 = 8,
+ /* Add zero VLAG tags remap with immediate PRI=1. */
+ CFA_P70_ECV_VTAG_ADD0_IMMED_PRI1 = 9,
+ /* Add zero VLAG tags remap with immediate PRI=2. */
+ CFA_P70_ECV_VTAG_ADD0_IMMED_PRI2 = 10,
+ /* Add zero VLAG tags remap with immediate PRI=3. */
+ CFA_P70_ECV_VTAG_ADD0_IMMED_PRI3 = 11,
+ /* Add zero VLAG tags remap with immediate PRI=4. */
+ CFA_P70_ECV_VTAG_ADD0_IMMED_PRI4 = 12,
+ /* Add zero VLAG tags remap with immediate PRI=5. */
+ CFA_P70_ECV_VTAG_ADD0_IMMED_PRI5 = 13,
+ /* Add zero VLAG tags remap with immediate PRI=6. */
+ CFA_P70_ECV_VTAG_ADD0_IMMED_PRI6 = 14,
+ /* Add zero VLAG tags remap with immediate PRI=7. */
+ CFA_P70_ECV_VTAG_ADD0_IMMED_PRI7 = 15,
+};
+#define CFA_P70_ACT_ENC_ECV_VTAG_NUM_BITS 4
+#define CFA_P70_ACT_ENC_ECV_EC_NUM_BITS 1
+#define CFA_P70_ACT_ENC_ECV_VALID_NUM_BITS 1
+#define CFA_P70_ACT_ENC_EC_IP_TTL_IH_NUM_BITS 1
+#define CFA_P70_ACT_ENC_EC_IP_TOS_IH_NUM_BITS 1
+#define CFA_P70_ACT_ENC_EC_TUN_QOS_NUM_BITS 3
+#define CFA_P70_ACT_ENC_EC_GRE_SET_K_NUM_BITS 1
+
+/**
+ * Enumeration definition for field 'enccfg_dmac_ovr'
+ */
+enum cfa_p70_enccfg_dmac_ovr {
+ /* use encap record DMAC */
+ CFA_P70_ENCCFG_DMAC_OVR_ENCAP = 0,
+ /* re-use existing inner L2 header DMAC */
+ CFA_P70_ENCCFG_DMAC_OVR_INNER_DMAC = 1,
+ /* re-use existing tunnel L2 header DMAC */
+ CFA_P70_ENCCFG_DMAC_OVR_TUNNEL_DMAC = 2,
+ /* re-use existing outer-most L2 header DMAC */
+ CFA_P70_ENCCFG_DMAC_OVR_OUTER_DMAC = 3,
+};
+#define CFA_P70_ACT_ENC_EC_DMAC_OVR_NUM_BITS 2
+
+/**
+ * Enumeration definition for field 'enccfg_vlan_ovr'
+ */
+enum cfa_p70_enccfg_vlan_ovr {
+ /* use only encap record VLAN tags */
+ CFA_P70_ENCCFG_VLAN_OVR_ENCAP = 0,
+ /* use only existing inner L2 header VLAN tags */
+ CFA_P70_ENCCFG_VLAN_OVR_INNER_L2 = 1,
+ /* use only existing tunnel L2 header VLAN tags */
+ CFA_P70_ENCCFG_VLAN_OVR_TUNNEL_L2 = 2,
+ /* use only existing outer-most L2 header VLAN tags */
+ CFA_P70_ENCCFG_VLAN_OVR_OUTER_L2 = 3,
+ /* include inner VLAN Tag from existing inner L2 header (keeps 1 TAG) */
+ CFA_P70_ENCCFG_VLAN_OVR_INNER_INNER = 4,
+ /* include outer VLAN Tag from existing inner L2 header (keeps 1 TAG) */
+ CFA_P70_ENCCFG_VLAN_OVR_INNER_OUTER = 5,
+ /*
+ * include inner VLAN Tag from existing outer-most L2 header (keeps 1
+ * TAG)
+ */
+ CFA_P70_ENCCFG_VLAN_OVR_OUTER_INNER = 6,
+ /*
+ * include outer VLAN Tag from existing outer-most L2 header (keeps 1
+ * TAG)
+ */
+ CFA_P70_ENCCFG_VLAN_OVR_OUTER_OUTER = 7,
+};
+#define CFA_P70_ACT_ENC_EC_VLAN_OVR_NUM_BITS 3
+
+/**
+ * Enumeration definition for field 'enccfg_smac_ovr'
+ */
+enum cfa_p70_enccfg_smac_ovr {
+ /* use only source property record SMAC */
+ CFA_P70_ENCCFG_SMAC_OVR_ENCAP = 0,
+ /* re-use existing inner L2 header SMAC */
+ CFA_P70_ENCCFG_SMAC_OVR_INNER_SMAC = 1,
+ /* re-use existing tunnel L2 header SMAC */
+ CFA_P70_ENCCFG_SMAC_OVR_TUNNEL_SMAC = 2,
+ /* re-use existing outer-most L2 header SMAC */
+ CFA_P70_ENCCFG_SMAC_OVR_OUTER_SMAC = 3,
+ /* re-use existing inner L2 header DMAC */
+ CFA_P70_ENCCFG_SMAC_OVR_INNER_DMAC = 5,
+ /* re-use existing tunnel L2 header DMAC */
+ CFA_P70_ENCCFG_SMAC_OVR_TUNNEL_DMAC = 6,
+ /* re-use existing outer-most L2 header DMAC */
+ CFA_P70_ENCCFG_SMAC_OVR_OUTER_DMAC = 7,
+};
+#define CFA_P70_ACT_ENC_EC_SMAC_OVR_NUM_BITS 3
+
+/**
+ * Enumeration definition for field 'enccfg_ipv4_id_ctrl'
+ */
+enum cfa_p70_enccfg_ipv4_id_ctrl {
+ /* use encap record IPv4 ID field */
+ CFA_P70_ENCCFG_IPV4_ID_CTRL_ENCAP = 0,
+ /* inherit from next existing IPv4 header ID field */
+ CFA_P70_ENCCFG_IPV4_ID_CTRL_INHERIT = 2,
+ /* use CFA incrementing IPv4 ID counter */
+ CFA_P70_ENCCFG_IPV4_ID_CTRL_INCREMENT = 3,
+};
+#define CFA_P70_ACT_ENC_EC_IPV4_ID_CTRL_NUM_BITS 2
+#define CFA_P70_ACT_ENC_L2_DMAC_NUM_BITS 48
+#define CFA_P70_ACT_ENC_VLAN1_TAG_VID_NUM_BITS 12
+#define CFA_P70_ACT_ENC_VLAN1_TAG_DE_NUM_BITS 1
+#define CFA_P70_ACT_ENC_VLAN1_TAG_PRI_NUM_BITS 3
+#define CFA_P70_ACT_ENC_VLAN1_TAG_TPID_NUM_BITS 16
+#define CFA_P70_ACT_ENC_VLAN2_IT_VID_NUM_BITS 12
+#define CFA_P70_ACT_ENC_VLAN2_IT_DE_NUM_BITS 1
+#define CFA_P70_ACT_ENC_VLAN2_IT_PRI_NUM_BITS 3
+#define CFA_P70_ACT_ENC_VLAN2_IT_TPID_NUM_BITS 16
+#define CFA_P70_ACT_ENC_VLAN2_OT_VID_NUM_BITS 12
+#define CFA_P70_ACT_ENC_VLAN2_OT_DE_NUM_BITS 1
+#define CFA_P70_ACT_ENC_VLAN2_OT_PRI_NUM_BITS 3
+#define CFA_P70_ACT_ENC_VLAN2_OT_TPID_NUM_BITS 16
+#define CFA_P70_ACT_ENC_IPV4_ID_NUM_BITS 16
+#define CFA_P70_ACT_ENC_IPV4_TOS_NUM_BITS 8
+#define CFA_P70_ACT_ENC_IPV4_HLEN_NUM_BITS 4
+#define CFA_P70_ACT_ENC_IPV4_VER_NUM_BITS 4
+#define CFA_P70_ACT_ENC_IPV4_PROT_NUM_BITS 8
+#define CFA_P70_ACT_ENC_IPV4_TTL_NUM_BITS 8
+#define CFA_P70_ACT_ENC_IPV4_FRAG_NUM_BITS 13
+#define CFA_P70_ACT_ENC_IPV4_FLAGS_NUM_BITS 3
+#define CFA_P70_ACT_ENC_IPV4_DEST_NUM_BITS 32
+#define CFA_P70_ACT_ENC_IPV6_FLOW_LABEL_NUM_BITS 20
+#define CFA_P70_ACT_ENC_IPV6_TRAFFIC_CLASS_NUM_BITS 8
+#define CFA_P70_ACT_ENC_IPV6_VER_NUM_BITS 4
+#define CFA_P70_ACT_ENC_IPV6_HOP_LIMIT_NUM_BITS 8
+#define CFA_P70_ACT_ENC_IPV6_NEXT_HEADER_NUM_BITS 8
+#define CFA_P70_ACT_ENC_IPV6_PAYLOAD_LENGTH_NUM_BITS 16
+#define CFA_P70_ACT_ENC_IPV6_DEST_NUM_BITS 128
+#define CFA_P70_ACT_ENC_MPLS_TAG1_NUM_BITS 32
+#define CFA_P70_ACT_ENC_MPLS_TAG2_NUM_BITS 32
+#define CFA_P70_ACT_ENC_MPLS_TAG3_NUM_BITS 32
+#define CFA_P70_ACT_ENC_MPLS_TAG4_NUM_BITS 32
+#define CFA_P70_ACT_ENC_MPLS_TAG5_NUM_BITS 32
+#define CFA_P70_ACT_ENC_MPLS_TAG6_NUM_BITS 32
+#define CFA_P70_ACT_ENC_MPLS_TAG7_NUM_BITS 32
+#define CFA_P70_ACT_ENC_MPLS_TAG8_NUM_BITS 32
+#define CFA_P70_ACT_ENC_L4_DEST_PORT_NUM_BITS 16
+#define CFA_P70_ACT_ENC_L4_SRC_PORT_NUM_BITS 16
+#define CFA_P70_ACT_ENC_TNL_VXLAN_NEXT_PROT_NUM_BITS 8
+#define CFA_P70_ACT_ENC_TNL_VXLAN_RSVD_0_NUM_BITS 16
+#define CFA_P70_ACT_ENC_TNL_VXLAN_FLAGS_NUM_BITS 8
+#define CFA_P70_ACT_ENC_TNL_VXLAN_RSVD_1_NUM_BITS 8
+#define CFA_P70_ACT_ENC_TNL_VXLAN_VNI_NUM_BITS 24
+#define CFA_P70_ACT_ENC_TNL_NGE_PROT_TYPE_NUM_BITS 16
+#define CFA_P70_ACT_ENC_TNL_NGE_RSVD_0_NUM_BITS 6
+#define CFA_P70_ACT_ENC_TNL_NGE_FLAGS_C_NUM_BITS 1
+#define CFA_P70_ACT_ENC_TNL_NGE_FLAGS_O_NUM_BITS 1
+#define CFA_P70_ACT_ENC_TNL_NGE_FLAGS_OPT_LEN_NUM_BITS 6
+#define CFA_P70_ACT_ENC_TNL_NGE_FLAGS_VER_NUM_BITS 2
+#define CFA_P70_ACT_ENC_TNL_NGE_RSVD_1_NUM_BITS 8
+#define CFA_P70_ACT_ENC_TNL_NGE_VNI_NUM_BITS 24
+#define CFA_P70_ACT_ENC_TNL_NGE_OPTIONS_NUM_BITS 64
+#define CFA_P70_ACT_ENC_TNL_NVGRE_FLOW_ID_NUM_BITS 8
+#define CFA_P70_ACT_ENC_TNL_NVGRE_VSID_NUM_BITS 24
+#define CFA_P70_ACT_ENC_TNL_GRE_KEY_NUM_BITS 32
+#define CFA_P70_ACT_ENC_TNL_GENERIC_TID_NUM_BITS 8
+#define CFA_P70_ACT_ENC_TNL_GENERIC_LENGTH_NUM_BITS 8
+#define CFA_P70_ACT_ENC_TNL_GENERIC_HEADER_NUM_BITS 32
+#define CFA_P70_ACT_SRC_MAC_NUM_BITS 48
+#define CFA_P70_ACT_SRC_IPV4_ADDR_NUM_BITS 32
+#define CFA_P70_ACT_SRC_IPV6_ADDR_NUM_BITS 128
+#define CFA_P70_ACT_STAT0_B16_FPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B16_FPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B16_FBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B16_FBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B24_FPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B24_FPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B24_FBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B24_FBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B24_TIMESTAMP_NUM_BITS 32
+#define CFA_P70_ACT_STAT1_B24_TIMESTAMP_NUM_BITS 32
+#define CFA_P70_ACT_STAT0_B24_TCP_FLAGS_NUM_BITS 9
+#define CFA_P70_ACT_STAT1_B24_TCP_FLAGS_NUM_BITS 9
+#define CFA_P70_ACT_STAT0_B24_UNUSED_0_NUM_BITS 23
+#define CFA_P70_ACT_STAT1_B24_UNUSED_0_NUM_BITS 23
+#define CFA_P70_ACT_STAT0_B32A_FPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B32A_FPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B32A_FBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B32A_FBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B32A_MPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B32A_MPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B32A_MBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B32A_MBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B32B_FPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B32B_FPC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B32B_FBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT1_B32B_FBC_NUM_BITS 64
+#define CFA_P70_ACT_STAT0_B32B_TIMESTAMP_NUM_BITS 32
+#define CFA_P70_ACT_STAT1_B32B_TIMESTAMP_NUM_BITS 32
+#define CFA_P70_ACT_STAT0_B32B_TCP_FLAGS_NUM_BITS 9
+#define CFA_P70_ACT_STAT1_B32B_TCP_FLAGS_NUM_BITS 9
+#define CFA_P70_ACT_STAT0_B32B_UNUSED_0_NUM_BITS 7
+#define CFA_P70_ACT_STAT1_B32B_UNUSED_0_NUM_BITS 7
+#define CFA_P70_ACT_STAT0_B32B_MPC15_0_NUM_BITS 16
+#define CFA_P70_ACT_STAT1_B32B_MPC15_0_NUM_BITS 16
+#define CFA_P70_ACT_STAT0_B32B_MPC37_16_NUM_BITS 22
+#define CFA_P70_ACT_STAT1_B32B_MPC37_16_NUM_BITS 22
+#define CFA_P70_ACT_STAT0_B32B_MBC_NUM_BITS 42
+#define CFA_P70_ACT_STAT1_B32B_MBC_NUM_BITS 42
+
+#define CFA_P70_CACHE_LINE_BYTES 32
+#define CFA_P70_CACHE_LINE_BITS \
+ (CFA_P70_CACHE_LINE_BYTES * BITS_PER_BYTE)
+
+/* clang-format on */
+
+#endif /* _CFA_P70_HW_H_ */
new file mode 100644
@@ -0,0 +1,1496 @@
+/****************************************************************************
+ * Copyright(c) 2001-2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * Name: cfa_p70_mpc_structs.h
+ *
+ * Description: MPC CFA command and completion structure definitions
+ *
+ * Date: 09/29/22 11:50:38
+ *
+ * Note: This file is scripted generated by ./cfa_header_gen.py.
+ * DO NOT modify this file manually !!!!
+ *
+ ****************************************************************************/
+#ifndef _CFA_P70_MPC_STRUCTS_H_
+#define _CFA_P70_MPC_STRUCTS_H_
+
+/* clang-format off */
+
+/**
+ * READ_CMD: This command reads 1-4 consecutive 32B words from the
+ * specified address within a table scope.
+ */
+struct cfa_mpc_read_cmd {
+ /*
+ * This value selects the format for the mid-path command for the CFA.
+ */
+ uint32_t opcode:8;
+ #define READ_CMD_OPCODE_READ 0
+ /* This value selects the table type to be acted upon. */
+ uint32_t table_type:4;
+ #define READ_CMD_TABLE_TYPE_ACTION 0
+ #define READ_CMD_TABLE_TYPE_EM 1
+ /* Unused field [4] */
+ uint32_t unused0:4;
+ /* Table scope to access. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ uint32_t data_size:3;
+ /* Unused field [1] */
+ uint32_t unused2:1;
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ uint32_t cache_option:4;
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused3:6;
+ /*
+ * The 64-bit host address to which to write the DMA data returned in
+ * the completion. The data will be written to the same function as the
+ * one that owns the SQ this command is read from. DATA_SIZE determines
+ * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0,
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ uint32_t host_address_1:32;
+ uint32_t host_address_2:32;
+};
+
+/**
+ * WRITE_CMD: This command writes 1-4 consecutive 32B words to the
+ * specified address within a table scope.
+ */
+struct cfa_mpc_write_cmd {
+ /*
+ * This value selects the format for the mid-path command for the CFA.
+ */
+ uint32_t opcode:8;
+ #define WRITE_CMD_OPCODE_WRITE 1
+ /* This value selects the table type to be acted upon. */
+ uint32_t table_type:4;
+ #define WRITE_CMD_TABLE_TYPE_ACTION 0
+ #define WRITE_CMD_TABLE_TYPE_EM 1
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ uint32_t write_through:1;
+ /* Unused field [3] */
+ uint32_t unused0:3;
+ /* Table scope to access. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ uint32_t data_size:3;
+ /* Unused field [1] */
+ uint32_t unused2:1;
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ uint32_t cache_option:4;
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ uint32_t table_index:26;
+ /* Unused field [70] */
+ uint32_t unused3_1:6;
+ uint32_t unused3_2:32;
+ uint32_t unused3_3:32;
+};
+
+/**
+ * READ_CLR_CMD: This command performs a read-modify-write to the
+ * specified 32B address using a 16b mask that specifies up to 16 16b
+ * words to clear before writing the data back. It returns the 32B data
+ * word read from cache (not the value written after the clear
+ * operation).
+ */
+struct cfa_mpc_read_clr_cmd {
+ /*
+ * This value selects the format for the mid-path command for the CFA.
+ */
+ uint32_t opcode:8;
+ #define READ_CLR_CMD_OPCODE_READ_CLR 2
+ /* This value selects the table type to be acted upon. */
+ uint32_t table_type:4;
+ #define READ_CLR_CMD_TABLE_TYPE_ACTION 0
+ #define READ_CLR_CMD_TABLE_TYPE_EM 1
+ /* Unused field [4] */
+ uint32_t unused0:4;
+ /* Table scope to access. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /*
+ * This field is no longer used. The READ_CLR command always reads (and
+ * does a mask-clear) on a single cache line. This field was added for
+ * SR2 A0 to avoid an ADDR_ERR when TABLE_INDEX=0 and TABLE_TYPE=EM (see
+ * CUMULUS-17872). That issue was fixed in SR2 B0.
+ */
+ uint32_t data_size:3;
+ /* Unused field [1] */
+ uint32_t unused2:1;
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ uint32_t cache_option:4;
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused3:6;
+ /*
+ * The 64-bit host address to which to write the DMA data returned in
+ * the completion. The data will be written to the same function as the
+ * one that owns the SQ this command is read from. DATA_SIZE determines
+ * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0,
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ uint32_t host_address_1:32;
+ uint32_t host_address_2:32;
+ /*
+ * Specifies bits in 32B data word to clear. For x=0..15, when
+ * clear_mask[x]=1, data[x*16+15:x*16] is set to 0.
+ */
+ uint32_t clear_mask:16;
+ /* Unused field [16] */
+ uint32_t unused4:16;
+};
+
+/**
+ * INVALIDATE_CMD: This command forces an explicit evict of 1-4
+ * consecutive cache lines such that the next time the structure is used
+ * it will be re-read from its backing store location.
+ */
+struct cfa_mpc_invalidate_cmd {
+ /*
+ * This value selects the format for the mid-path command for the CFA.
+ */
+ uint32_t opcode:8;
+ #define INVALIDATE_CMD_OPCODE_INVALIDATE 5
+ /* This value selects the table type to be acted upon. */
+ uint32_t table_type:4;
+ #define INVALIDATE_CMD_TABLE_TYPE_ACTION 0
+ #define INVALIDATE_CMD_TABLE_TYPE_EM 1
+ /* Unused field [4] */
+ uint32_t unused0:4;
+ /* Table scope to access. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /*
+ * This value identifies the number of cache lines to invalidate. A
+ * FMT_ERR is reported if the value is not in the range of [1, 4].
+ */
+ uint32_t data_size:3;
+ /* Unused field [1] */
+ uint32_t unused2:1;
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ uint32_t cache_option:4;
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused3:6;
+};
+
+/**
+ * EM_SEARCH_CMD: This command supplies an exact match entry of 1-4 32B
+ * words to search for in the exact match table. CFA first computes the
+ * hash value of the key in the entry, and determines the static bucket
+ * address to search from the hash and the (EM_BUCKETS, EM_SIZE) for
+ * TABLE_SCOPE. It then searches that static bucket chain for an entry
+ * with a matching key (the LREC in the command entry is ignored). If a
+ * matching entry is found, CFA reports OK status in the completion.
+ * Otherwise, assuming no errors abort the search before it completes,
+ * it reports EM_MISS status.
+ */
+struct cfa_mpc_em_search_cmd {
+ /*
+ * This value selects the format for the mid-path command for the CFA.
+ */
+ uint32_t opcode:8;
+ #define EM_SEARCH_CMD_OPCODE_EM_SEARCH 8
+ /* Unused field [8] */
+ uint32_t unused0:8;
+ /* Table scope to access. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ uint32_t data_size:3;
+ /* Unused field [1] */
+ uint32_t unused2:1;
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ uint32_t cache_option:4;
+ /* Unused field [96] */
+ uint32_t unused3_1:32;
+ uint32_t unused3_2:32;
+ uint32_t unused3_3:32;
+};
+
+/**
+ * EM_INSERT_CMD: This command supplies an exact match entry of 1-4 32B
+ * words to insert in the exact match table. CFA first computes the hash
+ * value of the key in the entry, and determines the static bucket
+ * address to search from the hash and the (EM_BUCKETS, EM_SIZE) for
+ * TABLE_SCOPE. It then writes the 1-4 32B words of the exact match
+ * entry starting at the TABLE_INDEX location in the command. When the
+ * entry write completes, it searches the static bucket chain for an
+ * existing entry with a key matching the key in the insert entry (the
+ * LREC does not need to match). If a matching entry is found: * If
+ * REPLACE=0, the CFA aborts the insert and returns EM_DUPLICATE status.
+ * * If REPLACE=1, the CFA overwrites the matching entry with the new
+ * entry. REPLACED_ENTRY=1 in the completion in this case to signal that
+ * an entry was replaced. The location of the entry is provided in the
+ * completion. If no match is found, CFA adds the new entry to the
+ * lowest unused entry in the tail bucket. If the current tail bucket is
+ * full, this requires adding a new bucket to the tail. Then entry is
+ * then inserted at entry number 0. TABLE_INDEX2 provides the address of
+ * the new tail bucket, if needed. If set to 0, the insert is aborted
+ * and returns EM_ABORT status instead of adding a new bucket to the
+ * tail. CHAIN_UPD in the completion indicates whether a new bucket was
+ * added (1) or not (0). For locked scopes, if the read of the static
+ * bucket gives a locked scope miss error, indicating that the address
+ * is not in the cache, the static bucket is assumed empty. In this
+ * case, TAI creates a new bucket, setting entry 0 to the new entry
+ * fields and initializing all other fields to 0. It writes this new
+ * bucket to the static bucket address, which installs it in the cache.
+ */
+struct cfa_mpc_em_insert_cmd {
+ /*
+ * This value selects the format for the mid-path command for the CFA.
+ */
+ uint32_t opcode:8;
+ #define EM_INSERT_CMD_OPCODE_EM_INSERT 9
+ /* Unused field [4] */
+ uint32_t unused0:4;
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ uint32_t write_through:1;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /* Table scope to access. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused2:3;
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ uint32_t data_size:3;
+ /* Unused field [1] */
+ uint32_t unused3:1;
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ uint32_t cache_option:4;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Starting
+ * address to write exact match entry being inserted.
+ */
+ uint32_t table_index:26;
+ /* Unused field [2] */
+ uint32_t unused4:2;
+ /*
+ * Determines setting of OPTION field for all cache write requests for
+ * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support
+ * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0.
+ */
+ uint32_t cache_option2:4;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Only used
+ * when no duplicate entry is found and the tail bucket in the chain
+ * searched has no unused entries. In this case, TABLE_INDEX2 provides
+ * the index to the 32B dynamic bucket to add to the tail of the chain
+ * (it is the new tail bucket). In this case, the CFA first writes
+ * TABLE_INDEX2 with a new bucket: * Entry 0 of the bucket sets the
+ * HASH_MSBS computed from the hash and ENTRY_PTR to TABLE_INDEX. *
+ * Entries 1-5 of the bucket set HASH_MSBS and ENTRY_PTR to 0. * CHAIN=0
+ * and CHAIN_PTR is set to CHAIN_PTR from to original tail bucket to
+ * maintain the background chaining. CFA then sets CHAIN=1 and
+ * CHAIN_PTR=TABLE_INDEX2 in the original tail bucket to link the new
+ * bucket to the chain. CHAIN_UPD=1 in the completion to signal that the
+ * new bucket at TABLE_INDEX2 was added to the tail of the chain.
+ */
+ uint32_t table_index2:26;
+ /* Unused field [5] */
+ uint32_t unused5:5;
+ /*
+ * Only used if an entry is found whose key matches the exact match
+ * entry key in the command: * REPLACE=0: The insert is aborted and
+ * EM_DUPLICATE status is returned, signaling that the insert failed.
+ * The index of the matching entry that blocked the insertion is
+ * returned in the completion. * REPLACE=1: The matching entry is
+ * replaced with that from the command (ENTRY_PTR in the bucket is
+ * overwritten with TABLE_INDEX from the command). HASH_MSBS for the
+ * entry number never changes in this case since it had to match the new
+ * entry key HASH_MSBS to match. When an entry is replaced,
+ * REPLACED_ENTRY=1 in the completion and the index of the matching
+ * entry is returned in the completion so that software can de-allocate
+ * the entry.
+ */
+ uint32_t replace:1;
+ /* Unused field [32] */
+ uint32_t unused6:32;
+};
+
+/**
+ * EM_DELETE_CMD: This command searches for an exact match entry index
+ * in the static bucket chain and deletes it if found. TABLE_INDEX give
+ * the entry index to delete and TABLE_INDEX2 gives the static bucket
+ * index. If a matching entry is found: * If the matching entry is the
+ * last valid entry in the tail bucket, its entry fields (HASH_MSBS and
+ * ENTRY_PTR) are set to 0 to delete the entry. * If the matching entry
+ * is not the last valid entry in the tail bucket, the entry fields from
+ * that last entry are moved to the matching entry, and the fields of
+ * that last entry are set to 0. * If any of the previous processing
+ * results in the tail bucket not having any valid entries, the tail
+ * bucket is the static bucket, the scope is a locked scope, and
+ * CHAIN_PTR=0, hardware evicts the static bucket from the cache and the
+ * completion signals this case with CHAIN_UPD=1. * If any of the
+ * previous processing results in the tail bucket not having any valid
+ * entries, and the tail bucket is not the static bucket, the tail
+ * bucket is removed from the chain. In this case, the penultimate
+ * bucket in the chain becomes the tail bucket. It has CHAIN set to 0 to
+ * unlink the tail bucket, and CHAIN_PTR set to that from the original
+ * tail bucket to preserve background chaining. The completion signals
+ * this case with CHAIN_UPD=1 and returns the index to the bucket
+ * removed so that software can de-allocate it. CFA returns OK status if
+ * the entry was successfully deleted. Otherwise, it returns EM_MISS
+ * status assuming there were no errors that caused processing to be
+ * aborted.
+ */
+struct cfa_mpc_em_delete_cmd {
+ /*
+ * This value selects the format for the mid-path command for the CFA.
+ */
+ uint32_t opcode:8;
+ #define EM_DELETE_CMD_OPCODE_EM_DELETE 10
+ /* Unused field [4] */
+ uint32_t unused0:4;
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ uint32_t write_through:1;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /* Table scope to access. */
+ uint32_t table_scope:5;
+ /* Unused field [7] */
+ uint32_t unused2:7;
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ uint32_t cache_option:4;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Entry index
+ * to delete.
+ */
+ uint32_t table_index:26;
+ /* Unused field [2] */
+ uint32_t unused3:2;
+ /*
+ * Determines setting of OPTION field for all cache write requests for
+ * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support
+ * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0.
+ */
+ uint32_t cache_option2:4;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Static
+ * bucket address for bucket chain.
+ */
+ uint32_t table_index2:26;
+ /* Unused field [6] */
+ uint32_t unused4:6;
+};
+
+/**
+ * EM_CHAIN_CMD: This command updates CHAIN_PTR in the tail bucket of a
+ * static bucket chain, supplying both the static bucket and the new
+ * CHAIN_PTR value. TABLE_INDEX is the new CHAIN_PTR value and
+ * TABLE_INDEX2[23:0] is the static bucket. This command provides
+ * software a means to update background chaining coherently with other
+ * bucket updates. The value of CHAIN is unaffected (stays at 0). For
+ * locked scopes, if the static bucket is the tail bucket, it is empty
+ * (all of its ENTRY_PTR values are 0), and TABLE_INDEX=0 (the CHAIN_PTR
+ * is being set to 0), instead of updating the static bucket it is
+ * evicted from the cache. In this case, CHAIN_UPD=1 in the completion.
+ */
+struct cfa_mpc_em_chain_cmd {
+ /*
+ * This value selects the format for the mid-path command for the CFA.
+ */
+ uint32_t opcode:8;
+ #define EM_CHAIN_CMD_OPCODE_EM_CHAIN 11
+ /* Unused field [4] */
+ uint32_t unused0:4;
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ uint32_t write_through:1;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /* Table scope to access. */
+ uint32_t table_scope:5;
+ /* Unused field [7] */
+ uint32_t unused2:7;
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ uint32_t cache_option:4;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. New
+ * CHAIN_PTR to write to tail bucket.
+ */
+ uint32_t table_index:26;
+ /* Unused field [2] */
+ uint32_t unused3:2;
+ /*
+ * Determines setting of OPTION field for all cache write requests for
+ * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support
+ * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0.
+ */
+ uint32_t cache_option2:4;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Static
+ * bucket address for bucket chain.
+ */
+ uint32_t table_index2:26;
+ /* Unused field [6] */
+ uint32_t unused4:6;
+};
+
+/**
+ * READ_CMP: When no errors, teturns 1-4 consecutive 32B words from the
+ * TABLE_INDEX within the TABLE_SCOPE specified in the command, writing
+ * them to HOST_ADDRESS from the command.
+ */
+struct cfa_mpc_read_cmp {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ uint32_t type:6;
+ #define READ_CMP_TYPE_MID_PATH_SHORT 30
+ /* Unused field [2] */
+ uint32_t unused0:2;
+ /* The command processing status. */
+ uint32_t status:4;
+ #define READ_CMP_STATUS_OK 0
+ #define READ_CMP_STATUS_UNSPRT_ERR 1
+ #define READ_CMP_STATUS_FMT_ERR 2
+ #define READ_CMP_STATUS_SCOPE_ERR 3
+ #define READ_CMP_STATUS_ADDR_ERR 4
+ #define READ_CMP_STATUS_CACHE_ERR 5
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ uint32_t mp_client:4;
+ #define READ_CMP_MP_CLIENT_TE_CFA 2
+ #define READ_CMP_MP_CLIENT_RE_CFA 3
+ /* OPCODE from the command. */
+ uint32_t opcode:8;
+ #define READ_CMP_OPCODE_READ 0
+ /*
+ * The length of the DMA that accompanies the completion in units of
+ * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates
+ * that there is no DMA that accompanies the completion.
+ */
+ uint32_t dma_length:8;
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ uint32_t opaque:32;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v:1;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ uint32_t hash_msb:12;
+ /* Unused field [4] */
+ uint32_t unused2:4;
+ /* TABLE_TYPE from the command. */
+ uint32_t table_type:4;
+ #define READ_CMP_TABLE_TYPE_ACTION 0
+ #define READ_CMP_TABLE_TYPE_EM 1
+ /* TABLE_SCOPE from the command. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused3:3;
+ /* TABLE_INDEX from the command. */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused4:6;
+};
+
+/**
+ * WRITE_CMP: Returns status of the write of 1-4 consecutive 32B words
+ * starting at TABLE_INDEX in the table specified by (TABLE_TYPE,
+ * TABLE_SCOPE).
+ */
+struct cfa_mpc_write_cmp {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ uint32_t type:6;
+ #define WRITE_CMP_TYPE_MID_PATH_SHORT 30
+ /* Unused field [2] */
+ uint32_t unused0:2;
+ /* The command processing status. */
+ uint32_t status:4;
+ #define WRITE_CMP_STATUS_OK 0
+ #define WRITE_CMP_STATUS_UNSPRT_ERR 1
+ #define WRITE_CMP_STATUS_FMT_ERR 2
+ #define WRITE_CMP_STATUS_SCOPE_ERR 3
+ #define WRITE_CMP_STATUS_ADDR_ERR 4
+ #define WRITE_CMP_STATUS_CACHE_ERR 5
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ uint32_t mp_client:4;
+ #define WRITE_CMP_MP_CLIENT_TE_CFA 2
+ #define WRITE_CMP_MP_CLIENT_RE_CFA 3
+ /* OPCODE from the command. */
+ uint32_t opcode:8;
+ #define WRITE_CMP_OPCODE_WRITE 1
+ /* Unused field [8] */
+ uint32_t unused1:8;
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ uint32_t opaque:32;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v:1;
+ /* Unused field [3] */
+ uint32_t unused2:3;
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ uint32_t hash_msb:12;
+ /* Unused field [4] */
+ uint32_t unused3:4;
+ /* TABLE_TYPE from the command. */
+ uint32_t table_type:4;
+ #define WRITE_CMP_TABLE_TYPE_ACTION 0
+ #define WRITE_CMP_TABLE_TYPE_EM 1
+ /* TABLE_SCOPE from the command. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused4:3;
+ /* TABLE_INDEX from the command. */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused5:6;
+};
+
+/**
+ * READ_CLR_CMP: When no errors, returns 1 32B word from TABLE_INDEX in
+ * the table specified by (TABLE_TYPE, TABLE_SCOPE). The data returned
+ * is the value prior to the clear.
+ */
+struct cfa_mpc_read_clr_cmp {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ uint32_t type:6;
+ #define READ_CLR_CMP_TYPE_MID_PATH_SHORT 30
+ /* Unused field [2] */
+ uint32_t unused0:2;
+ /* The command processing status. */
+ uint32_t status:4;
+ #define READ_CLR_CMP_STATUS_OK 0
+ #define READ_CLR_CMP_STATUS_UNSPRT_ERR 1
+ #define READ_CLR_CMP_STATUS_FMT_ERR 2
+ #define READ_CLR_CMP_STATUS_SCOPE_ERR 3
+ #define READ_CLR_CMP_STATUS_ADDR_ERR 4
+ #define READ_CLR_CMP_STATUS_CACHE_ERR 5
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ uint32_t mp_client:4;
+ #define READ_CLR_CMP_MP_CLIENT_TE_CFA 2
+ #define READ_CLR_CMP_MP_CLIENT_RE_CFA 3
+ /* OPCODE from the command. */
+ uint32_t opcode:8;
+ #define READ_CLR_CMP_OPCODE_READ_CLR 2
+ /*
+ * The length of the DMA that accompanies the completion in units of
+ * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates
+ * that there is no DMA that accompanies the completion.
+ */
+ uint32_t dma_length:8;
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ uint32_t opaque:32;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v:1;
+ /* Unused field [3] */
+ uint32_t unused1:3;
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ uint32_t hash_msb:12;
+ /* Unused field [4] */
+ uint32_t unused2:4;
+ /* TABLE_TYPE from the command. */
+ uint32_t table_type:4;
+ #define READ_CLR_CMP_TABLE_TYPE_ACTION 0
+ #define READ_CLR_CMP_TABLE_TYPE_EM 1
+ /* TABLE_SCOPE from the command. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused3:3;
+ /* TABLE_INDEX from the command. */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused4:6;
+};
+
+/**
+ * INVALIDATE_CMP: Returns status for INVALIDATE commands.
+ */
+struct cfa_mpc_invalidate_cmp {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ uint32_t type:6;
+ #define INVALIDATE_CMP_TYPE_MID_PATH_SHORT 30
+ /* Unused field [2] */
+ uint32_t unused0:2;
+ /* The command processing status. */
+ uint32_t status:4;
+ #define INVALIDATE_CMP_STATUS_OK 0
+ #define INVALIDATE_CMP_STATUS_UNSPRT_ERR 1
+ #define INVALIDATE_CMP_STATUS_FMT_ERR 2
+ #define INVALIDATE_CMP_STATUS_SCOPE_ERR 3
+ #define INVALIDATE_CMP_STATUS_ADDR_ERR 4
+ #define INVALIDATE_CMP_STATUS_CACHE_ERR 5
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ uint32_t mp_client:4;
+ #define INVALIDATE_CMP_MP_CLIENT_TE_CFA 2
+ #define INVALIDATE_CMP_MP_CLIENT_RE_CFA 3
+ /* OPCODE from the command. */
+ uint32_t opcode:8;
+ #define INVALIDATE_CMP_OPCODE_INVALIDATE 5
+ /* Unused field [8] */
+ uint32_t unused1:8;
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ uint32_t opaque:32;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v:1;
+ /* Unused field [3] */
+ uint32_t unused2:3;
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ uint32_t hash_msb:12;
+ /* Unused field [4] */
+ uint32_t unused3:4;
+ /* TABLE_TYPE from the command. */
+ uint32_t table_type:4;
+ #define INVALIDATE_CMP_TABLE_TYPE_ACTION 0
+ #define INVALIDATE_CMP_TABLE_TYPE_EM 1
+ /* TABLE_SCOPE from the command. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused4:3;
+ /* TABLE_INDEX from the command. */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused5:6;
+};
+
+/**
+ * EM_SEARCH_CMP: For OK status, returns the index of the matching entry
+ * found for the EM key supplied in the command. Returns EM_MISS status
+ * if no match was found.
+ */
+struct cfa_mpc_em_search_cmp {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ uint32_t type:6;
+ #define EM_SEARCH_CMP_TYPE_MID_PATH_LONG 31
+ /* Unused field [2] */
+ uint32_t unused0:2;
+ /* The command processing status. */
+ uint32_t status:4;
+ #define EM_SEARCH_CMP_STATUS_OK 0
+ #define EM_SEARCH_CMP_STATUS_UNSPRT_ERR 1
+ #define EM_SEARCH_CMP_STATUS_FMT_ERR 2
+ #define EM_SEARCH_CMP_STATUS_SCOPE_ERR 3
+ #define EM_SEARCH_CMP_STATUS_ADDR_ERR 4
+ #define EM_SEARCH_CMP_STATUS_CACHE_ERR 5
+ #define EM_SEARCH_CMP_STATUS_EM_MISS 6
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ uint32_t mp_client:4;
+ #define EM_SEARCH_CMP_MP_CLIENT_TE_CFA 2
+ #define EM_SEARCH_CMP_MP_CLIENT_RE_CFA 3
+ /* OPCODE from the command. */
+ uint32_t opcode:8;
+ #define EM_SEARCH_CMP_OPCODE_EM_SEARCH 8
+ /* Unused field [8] */
+ uint32_t unused1:8;
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ uint32_t opaque:32;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v1:1;
+ /* Unused field [3] */
+ uint32_t unused2:3;
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ uint32_t hash_msb:12;
+ /* Unused field [8] */
+ uint32_t unused3:8;
+ /* TABLE_SCOPE from the command. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused4:3;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK
+ * status, gives ENTRY_PTR[25:0] of the matching entry found. Otherwise,
+ * set to 0.
+ */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused5:6;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. If the hash
+ * is computed (no errors during initial processing of the command),
+ * TABLE_INDEX2[23:0] is the static bucket address determined from the
+ * hash of the exact match entry key in the command and the (EM_SIZE,
+ * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24
+ * in this case are set to 0. For any other status, it is always 0.
+ */
+ uint32_t table_index2:26;
+ /* Unused field [38] */
+ uint32_t unused6_1:6;
+ uint32_t unused6_2:32;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v2:1;
+ /* Unused field [31] */
+ uint32_t unused7:31;
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ uint32_t bkt_num:8;
+ /* See BKT_NUM description. */
+ uint32_t num_entries:3;
+ /* Unused field [21] */
+ uint32_t unused8:21;
+};
+
+/**
+ * EM_INSERT_CMP: OK status indicates that the exact match entry from
+ * the command was successfully inserted. EM_DUPLICATE status indicates
+ * that the insert was aborted because an entry with the same exact
+ * match key was found and REPLACE=0 in the command. EM_ABORT status
+ * indicates that no duplicate was found, the tail bucket in the chain
+ * was full, and TABLE_INDEX2=0. No changes are made to the database in
+ * this case. TABLE_INDEX is the starting address at which to insert the
+ * exact match entry (from the command). TABLE_INDEX2 is the address at
+ * which to insert a new bucket at the tail of the static bucket chain
+ * if needed (from the command). CHAIN_UPD=1 if a new bucket was added
+ * at this address. TABLE_INDEX3 is the static bucket address for the
+ * chain, determined from hashing the exact match entry. Software needs
+ * this address and TABLE_INDEX in order to delete the entry using an
+ * EM_DELETE command. TABLE_INDEX4 is the index of an entry found that
+ * had a matching exact match key to the command entry key. If no
+ * matching entry was found, it is set to 0. There are two cases when
+ * there is a matching entry, depending on REPLACE from the command: *
+ * REPLACE=0: EM_DUPLICATE status is reported and the insert is aborted.
+ * Software can use the static bucket address (TABLE_INDEX3[23:0]) and
+ * the matching entry (TABLE_INDEX4) in an EM_DELETE command if it
+ * wishes to explicity delete the matching entry. * REPLACE=1:
+ * REPLACED_ENTRY=1 to signal that the entry at TABLE_INDEX4 was
+ * replaced by the insert entry. REPLACED_ENTRY will only be 1 if
+ * reporting OK status in this case. Software can de-allocate the entry
+ * at TABLE_INDEX4.
+ */
+struct cfa_mpc_em_insert_cmp {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ uint32_t type:6;
+ #define EM_INSERT_CMP_TYPE_MID_PATH_LONG 31
+ /* Unused field [2] */
+ uint32_t unused0:2;
+ /* The command processing status. */
+ uint32_t status:4;
+ #define EM_INSERT_CMP_STATUS_OK 0
+ #define EM_INSERT_CMP_STATUS_UNSPRT_ERR 1
+ #define EM_INSERT_CMP_STATUS_FMT_ERR 2
+ #define EM_INSERT_CMP_STATUS_SCOPE_ERR 3
+ #define EM_INSERT_CMP_STATUS_ADDR_ERR 4
+ #define EM_INSERT_CMP_STATUS_CACHE_ERR 5
+ #define EM_INSERT_CMP_STATUS_EM_DUPLICATE 7
+ #define EM_INSERT_CMP_STATUS_EM_ABORT 9
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ uint32_t mp_client:4;
+ #define EM_INSERT_CMP_MP_CLIENT_TE_CFA 2
+ #define EM_INSERT_CMP_MP_CLIENT_RE_CFA 3
+ /* OPCODE from the command. */
+ uint32_t opcode:8;
+ #define EM_INSERT_CMP_OPCODE_EM_INSERT 9
+ /* Unused field [8] */
+ uint32_t unused1:8;
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ uint32_t opaque:32;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v1:1;
+ /* Unused field [3] */
+ uint32_t unused2:3;
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ uint32_t hash_msb:12;
+ /* Unused field [8] */
+ uint32_t unused3:8;
+ /* TABLE_SCOPE from the command. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused4:3;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX
+ * from the command, which is the starting address at which to insert
+ * the exact match entry.
+ */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused5:6;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2
+ * from the command, which is the index for the new tail bucket to add
+ * if needed (CHAIN_UPD=1 if it was used).
+ */
+ uint32_t table_index2:26;
+ /* Unused field [6] */
+ uint32_t unused6:6;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. If the hash
+ * is computed (no errors during initial processing of the command),
+ * TABLE_INDEX2[23:0] is the static bucket address determined from the
+ * hash of the exact match entry key in the command and the (EM_SIZE,
+ * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24
+ * in this case are set to 0. For any other status, it is always 0.
+ */
+ uint32_t table_index3:26;
+ /* Unused field [6] */
+ uint32_t unused7:6;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v2:1;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. ENTRY_PTR of
+ * matching entry found. Set to 0 if no matching entry found. If
+ * REPLACED_ENTRY=1, that indicates a matching entry was found and
+ * REPLACE=1 in the command. In this case, the matching entry was
+ * replaced by the new entry in the command and this index can therefore
+ * by de-allocated.
+ */
+ uint32_t table_index4:26;
+ /* Unused field [5] */
+ uint32_t unused8:5;
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ uint32_t bkt_num:8;
+ /* See BKT_NUM description. */
+ uint32_t num_entries:3;
+ /*
+ * Specifies if the chain was updated while processing the command: Set
+ * to 1 when a new bucket is added to the tail of the static bucket
+ * chain at TABLE_INDEX2. This occurs if and only if the insert requires
+ * adding a new entry and the tail bucket is full. If set to 0,
+ * TABLE_INDEX2 was not used and is therefore still free.
+ */
+ uint32_t chain_upd:1;
+ /*
+ * Set to 1 if a matching entry was found and REPLACE=1 in command. In
+ * the case, the entry starting at TABLE_INDEX4 was replaced and can
+ * therefore be de-allocated. Otherwise, this flag is set to 0.
+ */
+ uint32_t replaced_entry:1;
+ /* Unused field [19] */
+ uint32_t unused9:19;
+};
+
+/**
+ * EM_DELETE_CMP: OK status indicates that an ENTRY_PTR matching
+ * TABLE_INDEX was found in the static bucket chain specified and was
+ * therefore deleted. EM_MISS status indicates that no match was found.
+ * TABLE_INDEX is from the command. It is the index of the entry to
+ * delete. TABLE_INDEX2 is from the command. It is the static bucket
+ * address. TABLE_INDEX3 is the index of the tail bucket of the static
+ * bucket chain prior to processing the command. TABLE_INDEX4 is the
+ * index of the tail bucket of the static bucket chain after processing
+ * the command. If CHAIN_UPD=1 and TABLE_INDEX4==TABLE_INDEX2, the
+ * static bucket was the tail bucket, it became empty after the delete,
+ * the scope is a locked scope, and CHAIN_PTR was 0. In this case, the
+ * static bucket has been evicted from the cache. Otherwise, if
+ * CHAIN_UPD=1, the original tail bucket given by TABLE_INDEX3 was
+ * removed from the chain because it went empty. It can therefore be de-
+ * allocated.
+ */
+struct cfa_mpc_em_delete_cmp {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ uint32_t type:6;
+ #define EM_DELETE_CMP_TYPE_MID_PATH_LONG 31
+ /* Unused field [2] */
+ uint32_t unused0:2;
+ /* The command processing status. */
+ uint32_t status:4;
+ #define EM_DELETE_CMP_STATUS_OK 0
+ #define EM_DELETE_CMP_STATUS_UNSPRT_ERR 1
+ #define EM_DELETE_CMP_STATUS_FMT_ERR 2
+ #define EM_DELETE_CMP_STATUS_SCOPE_ERR 3
+ #define EM_DELETE_CMP_STATUS_ADDR_ERR 4
+ #define EM_DELETE_CMP_STATUS_CACHE_ERR 5
+ #define EM_DELETE_CMP_STATUS_EM_MISS 6
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ uint32_t mp_client:4;
+ #define EM_DELETE_CMP_MP_CLIENT_TE_CFA 2
+ #define EM_DELETE_CMP_MP_CLIENT_RE_CFA 3
+ /* OPCODE from the command. */
+ uint32_t opcode:8;
+ #define EM_DELETE_CMP_OPCODE_EM_DELETE 10
+ /* Unused field [8] */
+ uint32_t unused1:8;
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ uint32_t opaque:32;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v1:1;
+ /* Unused field [3] */
+ uint32_t unused2:3;
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ uint32_t hash_msb:12;
+ /* Unused field [8] */
+ uint32_t unused3:8;
+ /* TABLE_SCOPE from the command. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused4:3;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX
+ * from the command, which is the index of the entry to delete.
+ */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused5:6;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2
+ * from the command.
+ */
+ uint32_t table_index2:26;
+ /* Unused field [6] */
+ uint32_t unused6:6;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK or
+ * EM_MISS status, the index of the tail bucket of the chain prior to
+ * processing the command. If CHAIN_UPD=1, the bucket was removed and
+ * this index can be de-allocated. For other status values, it is set to
+ * 0.
+ */
+ uint32_t table_index3:26;
+ /* Unused field [6] */
+ uint32_t unused7:6;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v2:1;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK or
+ * EM_MISS status, the index of the tail bucket of the chain prior to
+ * after the command. If CHAIN_UPD=0 (always for EM_MISS status), it is
+ * always equal to TABLE_INDEX3 as the chain was not updated. For other
+ * status values, it is set to 0.
+ */
+ uint32_t table_index4:26;
+ /* Unused field [5] */
+ uint32_t unused8:5;
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ uint32_t bkt_num:8;
+ /* See BKT_NUM description. */
+ uint32_t num_entries:3;
+ /*
+ * Specifies if the chain was updated while processing the command: Set
+ * to 1 when a bucket is removed from the static bucket chain. This
+ * occurs if after the delete, the tail bucket is a dynamic bucket and
+ * no longer has any valid entries. In this case, software should de-
+ * allocate the dynamic bucket at TABLE_INDEX3. It is also set to 1 when
+ * the static bucket is evicted, which only occurs for locked scopes.
+ * See the EM_DELETE command description for details.
+ */
+ uint32_t chain_upd:1;
+ /* Unused field [20] */
+ uint32_t unused9:20;
+};
+
+/**
+ * EM_CHAIN_CMP: OK status indicates that the CHAIN_PTR of the tail
+ * bucket was successfully updated. TABLE_INDEX is from the command. It
+ * is the value of the new CHAIN_PTR. TABLE_INDEX2 is from the command.
+ * TABLE_INDEX3 is the index of the tail bucket of the static bucket
+ * chain.
+ */
+struct cfa_mpc_em_chain_cmp {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ uint32_t type:6;
+ #define EM_CHAIN_CMP_TYPE_MID_PATH_LONG 31
+ /* Unused field [2] */
+ uint32_t unused0:2;
+ /* The command processing status. */
+ uint32_t status:4;
+ #define EM_CHAIN_CMP_STATUS_OK 0
+ #define EM_CHAIN_CMP_STATUS_UNSPRT_ERR 1
+ #define EM_CHAIN_CMP_STATUS_FMT_ERR 2
+ #define EM_CHAIN_CMP_STATUS_SCOPE_ERR 3
+ #define EM_CHAIN_CMP_STATUS_ADDR_ERR 4
+ #define EM_CHAIN_CMP_STATUS_CACHE_ERR 5
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ uint32_t mp_client:4;
+ #define EM_CHAIN_CMP_MP_CLIENT_TE_CFA 2
+ #define EM_CHAIN_CMP_MP_CLIENT_RE_CFA 3
+ /* OPCODE from the command. */
+ uint32_t opcode:8;
+ #define EM_CHAIN_CMP_OPCODE_EM_CHAIN 11
+ /* Unused field [8] */
+ uint32_t unused1:8;
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ uint32_t opaque:32;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v1:1;
+ /* Unused field [3] */
+ uint32_t unused2:3;
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ uint32_t hash_msb:12;
+ /* Unused field [8] */
+ uint32_t unused3:8;
+ /* TABLE_SCOPE from the command. */
+ uint32_t table_scope:5;
+ /* Unused field [3] */
+ uint32_t unused4:3;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX
+ * from the command, which is the new CHAIN_PTR for the tail bucket of
+ * the static bucket chain.
+ */
+ uint32_t table_index:26;
+ /* Unused field [6] */
+ uint32_t unused5:6;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2
+ * from the command.
+ */
+ uint32_t table_index2:26;
+ /* Unused field [6] */
+ uint32_t unused6:6;
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK
+ * status, the index of the tail bucket of the chain. Otherwise, set to
+ * 0.
+ */
+ uint32_t table_index3:26;
+ /* Unused field [6] */
+ uint32_t unused7:6;
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ uint32_t v2:1;
+ /* Unused field [31] */
+ uint32_t unused8:31;
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ uint32_t bkt_num:8;
+ /* See BKT_NUM description. */
+ uint32_t num_entries:3;
+ /*
+ * Set to 1 when the scope is a locked scope, the tail bucket is the
+ * static bucket, the bucket is empty (all of its ENTRY_PTR values are
+ * 0), and TABLE_INDEX=0 in the command. In this case, the static bucket
+ * is evicted. For all other cases, it is set to 0.
+ */
+ uint32_t chain_upd:1;
+ /* Unused field [20] */
+ uint32_t unused9:20;
+};
+
+/* clang-format on */
+
+#endif /* _CFA_P70_MPC_STRUCTS_H_ */
new file mode 100644
@@ -0,0 +1,927 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_p70_mpc.c
+ *
+ * @brief CFA phase 7.0 api implementation to build CFA Mid-path commands
+ * and Parse CFA Mid-path Command completions
+ */
+
+#define COMP_ID BLD
+
+#include <errno.h>
+#include <string.h>
+#include "sys_util.h"
+#include "cfa_trace.h"
+#include "cfa_types.h"
+#include "cfa_p70.h"
+#include "cfa_bld_p70_mpc.h"
+#include "cfa_bld_p70_mpc_defs.h"
+#include "cfa_p70_mpc_structs.h"
+
+/* CFA MPC client ids */
+#define MP_CLIENT_TE_CFA READ_CMP_MP_CLIENT_TE_CFA
+#define MP_CLIENT_RE_CFA READ_CMP_MP_CLIENT_RE_CFA
+
+/* MPC Client id check in CFA completion messages */
+#define ASSERT_CFA_MPC_CLIENT_ID(MPCID) \
+ do { \
+ if ((MPCID) != MP_CLIENT_TE_CFA && \
+ (MPCID) != MP_CLIENT_RE_CFA) { \
+ CFA_LOG_WARN( \
+ "Unexpected MPC client id in response: %d\n", \
+ (MPCID)); \
+ } \
+ } while (0)
+
+#ifdef NXT_ENV_DEBUG
+#define ASSERT_RETURN(ERRNO) CFA_LOG_ERR("Returning error: %d\n", (ERRNO))
+#else
+#define ASSERT_RETURN(ERRNO)
+#endif
+
+/**
+ * MPC header definition
+ */
+struct mpc_header {
+ uint32_t type:6;
+ uint32_t flags:10;
+ uint32_t len:16;
+ uint32_t opaque;
+ uint64_t unused;
+};
+
+/*
+ * For successful completions of read and read-clear MPC CFA
+ * commands, the responses will contain this dma info structure
+ * following the cfa_mpc_read(|clr)_cmp structure and preceding
+ * the actual data read from the cache.
+ */
+struct mpc_cr_short_dma_data {
+ uint32_t dma_length:8;
+ uint32_t unused0:24;
+ uint32_t dma_addr0;
+ uint32_t dma_addr1;
+};
+
+/** Add MPC header information to MPC command message */
+static int fill_mpc_header(uint8_t *cmd, uint32_t size, uint32_t opaque_val)
+{
+ struct mpc_header hdr = {
+ .opaque = opaque_val,
+ };
+
+ if (size < sizeof(struct mpc_header)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ memcpy(cmd, &hdr, sizeof(hdr));
+
+ return 0;
+}
+
+/** Compose Table read-clear message */
+static int compose_mpc_read_clr_msg(uint8_t *cmd_buff, uint32_t *cmd_buff_len,
+ struct cfa_mpc_cache_axs_params *parms)
+{
+ struct cfa_mpc_read_clr_cmd *cmd;
+ struct cfa_mpc_cache_read_params *rd_parms = &parms->read;
+ uint32_t cmd_size =
+ sizeof(struct mpc_header) + sizeof(struct cfa_mpc_read_clr_cmd);
+
+ if (parms->data_size != 1) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (parms->tbl_type >= CFA_HW_TABLE_MAX) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (*cmd_buff_len < cmd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ cmd = (struct cfa_mpc_read_clr_cmd *)(cmd_buff +
+ sizeof(struct mpc_header));
+
+ /* Populate CFA MPC command header */
+ memset(cmd, 0, sizeof(struct cfa_mpc_read_clr_cmd));
+ cmd->opcode = READ_CLR_CMD_OPCODE_READ_CLR;
+ cmd->table_type = parms->tbl_type;
+ cmd->table_scope = parms->tbl_scope;
+ cmd->data_size = parms->data_size;
+ cmd->table_index = parms->tbl_index;
+ cmd->host_address_1 = (uint32_t)rd_parms->host_address;
+ cmd->host_address_2 = (uint32_t)(rd_parms->host_address >> 32);
+ switch (rd_parms->mode) {
+ case CFA_MPC_RD_EVICT:
+ cmd->cache_option = CACHE_READ_CLR_OPTION_EVICT;
+ break;
+ default:
+ case CFA_MPC_RD_NORMAL:
+ cmd->cache_option = CACHE_READ_CLR_OPTION_NORMAL;
+ break;
+ }
+ cmd->clear_mask = rd_parms->clear_mask;
+ *cmd_buff_len = cmd_size;
+
+ return 0;
+}
+
+/** Compose Table read message */
+static int compose_mpc_read_msg(uint8_t *cmd_buff, uint32_t *cmd_buff_len,
+ struct cfa_mpc_cache_axs_params *parms)
+{
+ struct cfa_mpc_read_cmd *cmd;
+ struct cfa_mpc_cache_read_params *rd_parms = &parms->read;
+ uint32_t cmd_size =
+ sizeof(struct mpc_header) + sizeof(struct cfa_mpc_read_cmd);
+
+ if (parms->data_size < 1 || parms->data_size > 4) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (parms->tbl_type >= CFA_HW_TABLE_MAX) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (*cmd_buff_len < cmd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ cmd = (struct cfa_mpc_read_cmd *)(cmd_buff + sizeof(struct mpc_header));
+
+ /* Populate CFA MPC command header */
+ memset(cmd, 0, sizeof(struct cfa_mpc_read_cmd));
+ cmd->opcode = READ_CMD_OPCODE_READ;
+ cmd->table_type = parms->tbl_type;
+ cmd->table_scope = parms->tbl_scope;
+ cmd->data_size = parms->data_size;
+ cmd->table_index = parms->tbl_index;
+ cmd->host_address_1 = (uint32_t)rd_parms->host_address;
+ cmd->host_address_2 = (uint32_t)(rd_parms->host_address >> 32);
+ switch (rd_parms->mode) {
+ case CFA_MPC_RD_EVICT:
+ cmd->cache_option = CACHE_READ_OPTION_EVICT;
+ break;
+ case CFA_MPC_RD_DEBUG_LINE:
+ cmd->cache_option = CACHE_READ_OPTION_DEBUG_LINE;
+ break;
+ case CFA_MPC_RD_DEBUG_TAG:
+ cmd->cache_option = CACHE_READ_OPTION_DEBUG_TAG;
+ break;
+ default:
+ case CFA_MPC_RD_NORMAL:
+ cmd->cache_option = CACHE_READ_OPTION_NORMAL;
+ break;
+ }
+ *cmd_buff_len = cmd_size;
+
+ return 0;
+}
+
+/** Compose Table write message */
+static int compose_mpc_write_msg(uint8_t *cmd_buff, uint32_t *cmd_buff_len,
+ struct cfa_mpc_cache_axs_params *parms)
+{
+ struct cfa_mpc_write_cmd *cmd;
+ struct cfa_mpc_cache_write_params *wr_parms = &parms->write;
+ uint32_t cmd_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_write_cmd) +
+ parms->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
+
+ if (parms->data_size < 1 || parms->data_size > 4) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (parms->tbl_type >= CFA_HW_TABLE_MAX) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!parms->write.data_ptr) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (*cmd_buff_len < cmd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ cmd = (struct cfa_mpc_write_cmd *)(cmd_buff +
+ sizeof(struct mpc_header));
+
+ /* Populate CFA MPC command header */
+ memset(cmd, 0, sizeof(struct cfa_mpc_write_cmd));
+ cmd->opcode = WRITE_CMD_OPCODE_WRITE;
+ cmd->table_type = parms->tbl_type;
+ cmd->table_scope = parms->tbl_scope;
+ cmd->data_size = parms->data_size;
+ cmd->table_index = parms->tbl_index;
+ switch (wr_parms->mode) {
+ case CFA_MPC_WR_WRITE_THRU:
+ cmd->cache_option = CACHE_WRITE_OPTION_WRITE_THRU;
+ break;
+ default:
+ case CFA_MPC_WR_WRITE_BACK:
+ cmd->cache_option = CACHE_WRITE_OPTION_WRITE_BACK;
+ break;
+ }
+
+ /* Populate CFA MPC command payload following the header */
+ memcpy(cmd + 1, wr_parms->data_ptr,
+ parms->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE);
+
+ *cmd_buff_len = cmd_size;
+
+ return 0;
+}
+
+/** Compose Invalidate message */
+static int compose_mpc_evict_msg(uint8_t *cmd_buff, uint32_t *cmd_buff_len,
+ struct cfa_mpc_cache_axs_params *parms)
+{
+ struct cfa_mpc_invalidate_cmd *cmd;
+ struct cfa_mpc_cache_evict_params *ev_parms = &parms->evict;
+ uint32_t cmd_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_invalidate_cmd);
+
+ if (parms->data_size < 1 || parms->data_size > 4) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (parms->tbl_type >= CFA_HW_TABLE_MAX) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (*cmd_buff_len < cmd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ cmd = (struct cfa_mpc_invalidate_cmd *)(cmd_buff +
+ sizeof(struct mpc_header));
+
+ /* Populate CFA MPC command header */
+ memset(cmd, 0, sizeof(struct cfa_mpc_invalidate_cmd));
+ cmd->opcode = INVALIDATE_CMD_OPCODE_INVALIDATE;
+ cmd->table_type = parms->tbl_type;
+ cmd->table_scope = parms->tbl_scope;
+ cmd->data_size = parms->data_size;
+ cmd->table_index = parms->tbl_index;
+
+ switch (ev_parms->mode) {
+ case CFA_MPC_EV_EVICT_LINE:
+ cmd->cache_option = CACHE_EVICT_OPTION_LINE;
+ break;
+ case CFA_MPC_EV_EVICT_CLEAN_LINES:
+ cmd->cache_option = CACHE_EVICT_OPTION_CLEAN_LINES;
+ break;
+ case CFA_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES:
+ cmd->cache_option = CACHE_EVICT_OPTION_CLEAN_FAST_LINES;
+ break;
+ case CFA_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES:
+ cmd->cache_option =
+ CACHE_EVICT_OPTION_CLEAN_AND_CLEAN_FAST_EVICT_LINES;
+ break;
+ case CFA_MPC_EV_EVICT_TABLE_SCOPE:
+ /* Not supported */
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ default:
+ case CFA_MPC_EV_EVICT_SCOPE_ADDRESS:
+ cmd->cache_option = CACHE_EVICT_OPTION_SCOPE_ADDRESS;
+ break;
+ }
+ *cmd_buff_len = cmd_size;
+
+ return 0;
+}
+
+/**
+ * Build MPC CFA Cache access command
+ *
+ * @param [in] opc MPC opcode
+ *
+ * @param [out] cmd_buff Command data buffer to write the command to
+ *
+ * @param [in/out] cmd_buff_len Pointer to command buffer size param
+ * Set by caller to indicate the input cmd_buff size.
+ * Set to the actual size of the command generated by the api.
+ *
+ * @param [in] parms Pointer to MPC cache access command parameters
+ *
+ * @return 0 on Success, negative errno on failure
+ */
+int cfa_mpc_build_cache_axs_cmd(enum cfa_mpc_opcode opc, uint8_t *cmd_buff,
+ uint32_t *cmd_buff_len,
+ struct cfa_mpc_cache_axs_params *parms)
+{
+ int rc;
+ if (!cmd_buff || !cmd_buff_len || *cmd_buff_len == 0 || !parms) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ rc = fill_mpc_header(cmd_buff, *cmd_buff_len, parms->opaque);
+ if (rc)
+ return rc;
+
+ switch (opc) {
+ case CFA_MPC_READ_CLR:
+ return compose_mpc_read_clr_msg(cmd_buff, cmd_buff_len, parms);
+ case CFA_MPC_READ:
+ return compose_mpc_read_msg(cmd_buff, cmd_buff_len, parms);
+ case CFA_MPC_WRITE:
+ return compose_mpc_write_msg(cmd_buff, cmd_buff_len, parms);
+ case CFA_MPC_INVALIDATE:
+ return compose_mpc_evict_msg(cmd_buff, cmd_buff_len, parms);
+ default:
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+}
+
+/** Compose EM Search message */
+static int compose_mpc_em_search_msg(uint8_t *cmd_buff, uint32_t *cmd_buff_len,
+ struct cfa_mpc_em_op_params *parms)
+{
+ struct cfa_mpc_em_search_cmd *cmd;
+ struct cfa_mpc_em_search_params *e = &parms->search;
+ uint32_t cmd_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_em_search_cmd) +
+ e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
+
+ if (e->data_size < 1 || e->data_size > 4) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (*cmd_buff_len < cmd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!e->em_entry) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ cmd = (struct cfa_mpc_em_search_cmd *)(cmd_buff +
+ sizeof(struct mpc_header));
+
+ /* Populate CFA MPC command header */
+ memset(cmd, 0, sizeof(struct cfa_mpc_em_search_cmd));
+ cmd->opcode = EM_SEARCH_CMD_OPCODE_EM_SEARCH;
+ cmd->table_scope = parms->tbl_scope;
+ cmd->data_size = e->data_size;
+ /* Default to normal read cache option for EM search */
+ cmd->cache_option = CACHE_READ_OPTION_NORMAL;
+
+ /* Populate CFA MPC command payload following the header */
+ memcpy(cmd + 1, e->em_entry,
+ e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE);
+
+ *cmd_buff_len = cmd_size;
+
+ return 0;
+}
+
+/** Compose EM Insert message */
+static int compose_mpc_em_insert_msg(uint8_t *cmd_buff, uint32_t *cmd_buff_len,
+ struct cfa_mpc_em_op_params *parms)
+{
+ struct cfa_mpc_em_insert_cmd *cmd;
+ struct cfa_mpc_em_insert_params *e = &parms->insert;
+ uint32_t cmd_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_em_insert_cmd) +
+ e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
+
+ if (e->data_size < 1 || e->data_size > 4) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (*cmd_buff_len < cmd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!e->em_entry) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ cmd = (struct cfa_mpc_em_insert_cmd *)(cmd_buff +
+ sizeof(struct mpc_header));
+
+ /* Populate CFA MPC command header */
+ memset(cmd, 0, sizeof(struct cfa_mpc_em_insert_cmd));
+ cmd->opcode = EM_INSERT_CMD_OPCODE_EM_INSERT;
+ cmd->write_through = 1;
+ cmd->table_scope = parms->tbl_scope;
+ cmd->data_size = e->data_size;
+ cmd->replace = e->replace;
+ cmd->table_index = e->entry_idx;
+ cmd->table_index2 = e->bucket_idx;
+ /* Default to normal read cache option for EM insert */
+ cmd->cache_option = CACHE_READ_OPTION_NORMAL;
+ /* Default to write through cache write option for EM insert */
+ cmd->cache_option2 = CACHE_WRITE_OPTION_WRITE_THRU;
+
+ /* Populate CFA MPC command payload following the header */
+ memcpy(cmd + 1, e->em_entry,
+ e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE);
+
+ *cmd_buff_len = cmd_size;
+
+ return 0;
+}
+
+/** Compose EM Delete message */
+static int compose_mpc_em_delete_msg(uint8_t *cmd_buff, uint32_t *cmd_buff_len,
+ struct cfa_mpc_em_op_params *parms)
+{
+ struct cfa_mpc_em_delete_cmd *cmd;
+ struct cfa_mpc_em_delete_params *e = &parms->del;
+ uint32_t cmd_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_em_delete_cmd);
+
+ if (*cmd_buff_len < cmd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Populate CFA MPC command header */
+ cmd = (struct cfa_mpc_em_delete_cmd *)(cmd_buff +
+ sizeof(struct mpc_header));
+ memset(cmd, 0, sizeof(struct cfa_mpc_em_delete_cmd));
+ cmd->opcode = EM_DELETE_CMD_OPCODE_EM_DELETE;
+ cmd->table_scope = parms->tbl_scope;
+ cmd->table_index = e->entry_idx;
+ cmd->table_index2 = e->bucket_idx;
+ /* Default to normal read cache option for EM delete */
+ cmd->cache_option = CACHE_READ_OPTION_NORMAL;
+ /* Default to write through cache write option for EM delete */
+ cmd->cache_option2 = CACHE_WRITE_OPTION_WRITE_THRU;
+
+ *cmd_buff_len = cmd_size;
+
+ return 0;
+}
+
+/** Compose EM Chain message */
+static int compose_mpc_em_chain_msg(uint8_t *cmd_buff, uint32_t *cmd_buff_len,
+ struct cfa_mpc_em_op_params *parms)
+{
+ struct cfa_mpc_em_chain_cmd *cmd;
+ struct cfa_mpc_em_chain_params *e = &parms->chain;
+ uint32_t cmd_size =
+ sizeof(struct mpc_header) + sizeof(struct cfa_mpc_em_chain_cmd);
+
+ if (*cmd_buff_len < cmd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Populate CFA MPC command header */
+ cmd = (struct cfa_mpc_em_chain_cmd *)(cmd_buff +
+ sizeof(struct mpc_header));
+ memset(cmd, 0, sizeof(struct cfa_mpc_em_chain_cmd));
+ cmd->opcode = EM_CHAIN_CMD_OPCODE_EM_CHAIN;
+ cmd->table_scope = parms->tbl_scope;
+ cmd->table_index = e->entry_idx;
+ cmd->table_index2 = e->bucket_idx;
+ /* Default to normal read cache option for EM delete */
+ cmd->cache_option = CACHE_READ_OPTION_NORMAL;
+ /* Default to write through cache write option for EM delete */
+ cmd->cache_option2 = CACHE_WRITE_OPTION_WRITE_THRU;
+
+ *cmd_buff_len = cmd_size;
+
+ return 0;
+}
+
+/**
+ * Build MPC CFA EM operation command
+ *
+ * @param [in] opc MPC EM opcode
+ *
+ * @param [in] cmd_buff Command data buffer to write the command to
+ *
+ * @param [in/out] cmd_buff_len Pointer to command buffer size param
+ * Set by caller to indicate the input cmd_buff size.
+ * Set to the actual size of the command generated by the api.
+ *
+ * @param [in] parms Pointer to MPC cache access command parameters
+ *
+ * @return 0 on Success, negative errno on failure
+ */
+int cfa_mpc_build_em_op_cmd(enum cfa_mpc_opcode opc, uint8_t *cmd_buff,
+ uint32_t *cmd_buff_len,
+ struct cfa_mpc_em_op_params *parms)
+{
+ int rc;
+ if (!cmd_buff || !cmd_buff_len || *cmd_buff_len == 0 || !parms) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ rc = fill_mpc_header(cmd_buff, *cmd_buff_len, parms->opaque);
+ if (rc)
+ return rc;
+
+ switch (opc) {
+ case CFA_MPC_EM_SEARCH:
+ return compose_mpc_em_search_msg(cmd_buff, cmd_buff_len, parms);
+ case CFA_MPC_EM_INSERT:
+ return compose_mpc_em_insert_msg(cmd_buff, cmd_buff_len, parms);
+ case CFA_MPC_EM_DELETE:
+ return compose_mpc_em_delete_msg(cmd_buff, cmd_buff_len, parms);
+ case CFA_MPC_EM_CHAIN:
+ return compose_mpc_em_chain_msg(cmd_buff, cmd_buff_len, parms);
+ default:
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+/** Parse MPC read clear completion */
+static int parse_mpc_read_clr_result(uint8_t *resp_buff, uint32_t resp_buff_len,
+ struct cfa_mpc_cache_axs_result *result)
+{
+ uint8_t *rd_data;
+ uint32_t resp_size, rd_size;
+ struct cfa_mpc_read_clr_cmp *cmp;
+
+ /* Minimum data size = 1 32B unit */
+ rd_size = MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
+ resp_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_read_clr_cmp) +
+ sizeof(struct mpc_cr_short_dma_data) + rd_size;
+ cmp = (struct cfa_mpc_read_clr_cmp *)(resp_buff +
+ sizeof(struct mpc_header));
+
+ if (resp_buff_len < resp_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (result->data_len < rd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!result->rd_data) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ ASSERT_CFA_MPC_CLIENT_ID(cmp->mp_client);
+
+ result->status = cmp->status;
+ result->error_data = cmp->hash_msb;
+ result->opaque = cmp->opaque;
+
+ /* No data to copy if there was an error, return early */
+ if (cmp->status != READ_CLR_CMP_STATUS_OK)
+ return 0;
+
+ /* Copy the read data - starting at the end of the completion header including dma data */
+ rd_data = resp_buff + sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_read_clr_cmp) +
+ sizeof(struct mpc_cr_short_dma_data);
+ memcpy(result->rd_data, rd_data, rd_size);
+
+ return 0;
+}
+
+/** Parse MPC table read completion */
+static int parse_mpc_read_result(uint8_t *resp_buff, uint32_t resp_buff_len,
+ struct cfa_mpc_cache_axs_result *result)
+{
+ uint8_t *rd_data;
+ uint32_t resp_size, rd_size;
+ struct cfa_mpc_read_cmp *cmp;
+
+ /* Minimum data size = 1 32B unit */
+ rd_size = MPC_CFA_CACHE_ACCESS_UNIT_SIZE;
+ resp_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_read_cmp) +
+ sizeof(struct mpc_cr_short_dma_data) + rd_size;
+ cmp = (struct cfa_mpc_read_cmp *)(resp_buff +
+ sizeof(struct mpc_header));
+
+ if (resp_buff_len < resp_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (result->data_len < rd_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!result->rd_data) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ ASSERT_CFA_MPC_CLIENT_ID(cmp->mp_client);
+
+ result->status = cmp->status;
+ result->error_data = cmp->hash_msb;
+ result->opaque = cmp->opaque;
+
+ /* No data to copy if there was an error, return early */
+ if (cmp->status != READ_CMP_STATUS_OK)
+ return 0;
+
+ /* Copy max of 4 32B words that can fit into the return buffer */
+ rd_size = MIN(4 * MPC_CFA_CACHE_ACCESS_UNIT_SIZE, result->data_len);
+
+ /* Copy the read data - starting at the end of the completion header */
+ rd_data = resp_buff + sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_read_cmp) +
+ sizeof(struct mpc_cr_short_dma_data);
+ memcpy(result->rd_data, rd_data, rd_size);
+
+ return 0;
+}
+
+/** Parse MPC table write completion */
+static int parse_mpc_write_result(uint8_t *resp_buff, uint32_t resp_buff_len,
+ struct cfa_mpc_cache_axs_result *result)
+{
+ uint32_t resp_size;
+ struct cfa_mpc_write_cmp *cmp;
+
+ resp_size =
+ sizeof(struct mpc_header) + sizeof(struct cfa_mpc_write_cmp);
+ cmp = (struct cfa_mpc_write_cmp *)(resp_buff +
+ sizeof(struct mpc_header));
+
+ if (resp_buff_len < resp_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ ASSERT_CFA_MPC_CLIENT_ID(cmp->mp_client);
+
+ result->status = cmp->status;
+ result->error_data = cmp->hash_msb;
+ result->opaque = cmp->opaque;
+ return 0;
+}
+
+/** Parse MPC table evict completion */
+static int parse_mpc_evict_result(uint8_t *resp_buff, uint32_t resp_buff_len,
+ struct cfa_mpc_cache_axs_result *result)
+{
+ uint32_t resp_size;
+ struct cfa_mpc_invalidate_cmp *cmp;
+
+ resp_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_invalidate_cmp);
+ cmp = (struct cfa_mpc_invalidate_cmp *)(resp_buff +
+ sizeof(struct mpc_header));
+
+ if (resp_buff_len < resp_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ ASSERT_CFA_MPC_CLIENT_ID(cmp->mp_client);
+
+ result->status = cmp->status;
+ result->error_data = cmp->hash_msb;
+ result->opaque = cmp->opaque;
+ return 0;
+}
+
+/**
+ * Parse MPC CFA Cache access command completion result
+ *
+ * @param [in] opc MPC cache access opcode
+ *
+ * @param [in] resp_buff Data buffer containing the response to parse
+ *
+ * @param [in] resp_buff_len Response buffer size
+ *
+ * @param [out] result Pointer to MPC cache access result object. This
+ * object will contain the fields parsed and extracted from the
+ * response buffer.
+ *
+ * @return 0 on Success, negative errno on failure
+ */
+int cfa_mpc_parse_cache_axs_resp(enum cfa_mpc_opcode opc, uint8_t *resp_buff,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_cache_axs_result *result)
+{
+ if (!resp_buff || resp_buff_len == 0 || !result) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ switch (opc) {
+ case CFA_MPC_READ_CLR:
+ return parse_mpc_read_clr_result(resp_buff, resp_buff_len,
+ result);
+ case CFA_MPC_READ:
+ return parse_mpc_read_result(resp_buff, resp_buff_len, result);
+ case CFA_MPC_WRITE:
+ return parse_mpc_write_result(resp_buff, resp_buff_len, result);
+ case CFA_MPC_INVALIDATE:
+ return parse_mpc_evict_result(resp_buff, resp_buff_len, result);
+ default:
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+}
+
+/** Parse MPC EM Search completion */
+static int parse_mpc_em_search_result(uint8_t *resp_buff,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_em_op_result *result)
+{
+ uint32_t resp_size;
+ struct cfa_mpc_em_search_cmp *cmp;
+
+ cmp = (struct cfa_mpc_em_search_cmp *)(resp_buff +
+ sizeof(struct mpc_header));
+ resp_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_em_search_cmp);
+
+ if (resp_buff_len < resp_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ ASSERT_CFA_MPC_CLIENT_ID(cmp->mp_client);
+
+ result->status = cmp->status;
+ result->error_data = cmp->status != CFA_MPC_OK ? cmp->hash_msb : 0;
+ result->opaque = cmp->opaque;
+ result->search.bucket_num = cmp->bkt_num;
+ result->search.num_entries = cmp->num_entries;
+ result->search.hash_msb = cmp->hash_msb;
+ result->search.match_idx = cmp->table_index;
+ result->search.bucket_idx = cmp->table_index2;
+
+ return 0;
+}
+
+/** Parse MPC EM Insert completion */
+static int parse_mpc_em_insert_result(uint8_t *resp_buff,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_em_op_result *result)
+{
+ uint32_t resp_size;
+ struct cfa_mpc_em_insert_cmp *cmp;
+
+ cmp = (struct cfa_mpc_em_insert_cmp *)(resp_buff +
+ sizeof(struct mpc_header));
+ resp_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_em_insert_cmp);
+
+ if (resp_buff_len < resp_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ ASSERT_CFA_MPC_CLIENT_ID(cmp->mp_client);
+
+ result->status = cmp->status;
+ result->error_data = cmp->status != CFA_MPC_OK ? cmp->hash_msb : 0;
+ result->opaque = cmp->opaque;
+ result->insert.bucket_num = cmp->bkt_num;
+ result->insert.num_entries = cmp->num_entries;
+ result->insert.hash_msb = cmp->hash_msb;
+ result->insert.match_idx = cmp->table_index4;
+ result->insert.bucket_idx = cmp->table_index3;
+ result->insert.replaced = cmp->replaced_entry;
+ result->insert.chain_update = cmp->chain_upd;
+
+ return 0;
+}
+
+/** Parse MPC EM Delete completion */
+static int parse_mpc_em_delete_result(uint8_t *resp_buff,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_em_op_result *result)
+{
+ uint32_t resp_size;
+ struct cfa_mpc_em_delete_cmp *cmp;
+
+ cmp = (struct cfa_mpc_em_delete_cmp *)(resp_buff +
+ sizeof(struct mpc_header));
+ resp_size = sizeof(struct mpc_header) +
+ sizeof(struct cfa_mpc_em_delete_cmp);
+
+ if (resp_buff_len < resp_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ ASSERT_CFA_MPC_CLIENT_ID(cmp->mp_client);
+
+ result->status = cmp->status;
+ result->error_data = cmp->hash_msb;
+ result->opaque = cmp->opaque;
+ result->del.bucket_num = cmp->bkt_num;
+ result->del.num_entries = cmp->num_entries;
+ result->del.prev_tail = cmp->table_index3;
+ result->del.new_tail = cmp->table_index4;
+ result->del.chain_update = cmp->chain_upd;
+
+ return 0;
+}
+
+/** Parse MPC EM Chain completion */
+static int parse_mpc_em_chain_result(uint8_t *resp_buff, uint32_t resp_buff_len,
+ struct cfa_mpc_em_op_result *result)
+{
+ uint32_t resp_size;
+ struct cfa_mpc_em_chain_cmp *cmp;
+
+ cmp = (struct cfa_mpc_em_chain_cmp *)(resp_buff +
+ sizeof(struct mpc_header));
+ resp_size =
+ sizeof(struct mpc_header) + sizeof(struct cfa_mpc_em_chain_cmp);
+
+ if (resp_buff_len < resp_size) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ ASSERT_CFA_MPC_CLIENT_ID(cmp->mp_client);
+
+ result->status = cmp->status;
+ result->error_data = cmp->hash_msb;
+ result->opaque = cmp->opaque;
+ result->chain.bucket_num = cmp->bkt_num;
+ result->chain.num_entries = cmp->num_entries;
+
+ return 0;
+}
+
+/**
+ * Parse MPC CFA EM operation command completion result
+ *
+ * @param [in] opc MPC cache access opcode
+ *
+ * @param [in] resp_buff Data buffer containing the response to parse
+ *
+ * @param [in] resp_buff_len Response buffer size
+ *
+ * @param [out] result Pointer to MPC EM operation result object. This
+ * object will contain the fields parsed and extracted from the
+ * response buffer.
+ *
+ * @return 0 on Success, negative errno on failure
+ */
+int cfa_mpc_parse_em_op_resp(enum cfa_mpc_opcode opc, uint8_t *resp_buff,
+ uint32_t resp_buff_len,
+ struct cfa_mpc_em_op_result *result)
+{
+ if (!resp_buff || resp_buff_len == 0 || !result) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ switch (opc) {
+ case CFA_MPC_EM_SEARCH:
+ return parse_mpc_em_search_result(resp_buff, resp_buff_len,
+ result);
+ case CFA_MPC_EM_INSERT:
+ return parse_mpc_em_insert_result(resp_buff, resp_buff_len,
+ result);
+ case CFA_MPC_EM_DELETE:
+ return parse_mpc_em_delete_result(resp_buff, resp_buff_len,
+ result);
+ case CFA_MPC_EM_CHAIN:
+ return parse_mpc_em_chain_result(resp_buff, resp_buff_len,
+ result);
+ default:
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+}
new file mode 100644
@@ -0,0 +1,51 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_p70_mpc_defs.h
+ *
+ * @brief CFA phase 7.0 spefific MPC CFA command/completion field definitions
+ */
+
+#ifndef _CFA_BLD_P70_MPC_DEFS_H_
+#define _CFA_BLD_P70_MPC_DEFS_H_
+
+/*
+ * CFA phase 7.0 Action/Lookup cache option values for various accesses
+ * From EAS
+ */
+#define CACHE_READ_OPTION_NORMAL 0x0
+#define CACHE_READ_OPTION_EVICT 0x1
+#define CACHE_READ_OPTION_FAST_EVICT 0x2
+#define CACHE_READ_OPTION_DEBUG_LINE 0x4
+#define CACHE_READ_OPTION_DEBUG_TAG 0x5
+
+/*
+ * Cache read and clear command expects the cache option bit 3
+ * to be set, failing which the clear is not done.
+ */
+#define CACHE_READ_CLR_MASK (0x1U << 3)
+#define CACHE_READ_CLR_OPTION_NORMAL \
+ (CACHE_READ_CLR_MASK | CACHE_READ_OPTION_NORMAL)
+#define CACHE_READ_CLR_OPTION_EVICT \
+ (CACHE_READ_CLR_MASK | CACHE_READ_OPTION_EVICT)
+#define CACHE_READ_CLR_OPTION_FAST_EVICT \
+ (CACHE_READ_CLR_MASK | CACHE_READ_OPTION_FAST_EVICT)
+
+#define CACHE_WRITE_OPTION_WRITE_BACK 0x0
+#define CACHE_WRITE_OPTION_WRITE_THRU 0x1
+
+#define CACHE_EVICT_OPTION_CLEAN_LINES 0x1
+#define CACHE_EVICT_OPTION_CLEAN_FAST_LINES 0x2
+#define CACHE_EVICT_OPTION_CLEAN_AND_CLEAN_FAST_EVICT_LINES 0x3
+#define CACHE_EVICT_OPTION_LINE 0x4
+#define CACHE_EVICT_OPTION_SCOPE_ADDRESS 0x5
+
+/* EM/action cache access unit size in bytes */
+#define MPC_CFA_CACHE_ACCESS_UNIT_SIZE CFA_P70_CACHE_LINE_BYTES
+
+#endif /* _CFA_BLD_P70_MPC_DEFS_H_ */
new file mode 100644
@@ -0,0 +1,1127 @@
+/****************************************************************************
+ * Copyright(c) 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_p70_host_mpc_wrapper.c
+ *
+ * @brief CFA Phase 7.0 specific MPC Builder Wrapper functions
+ */
+
+#define COMP_ID BLD
+
+#include <errno.h>
+#include <string.h>
+#include "sys_util.h"
+#include "cfa_trace.h"
+
+#include "cfa_types.h"
+#include "cfa_bld_mpcops.h"
+
+#include "host/cfa_bld_mpc_field_ids.h"
+#include "host/cfa_p70_mpc_field_ids.h"
+#include "p70/cfa_p70_mpc_structs.h"
+#include "p70/cfa_bld_p70_mpc.h"
+#include "cfa_bld_p70_host_mpc_wrapper.h"
+#include "host/cfa_p70_mpc_field_mapping.h"
+
+#ifdef NXT_ENV_DEBUG
+#define ASSERT_RETURN(ERRNO) CFA_LOG_ERR("Returning error: %d\n", (ERRNO))
+#else
+#define ASSERT_RETURN(ERRNO)
+#endif
+
+/*
+ * Helper macro to set an input parm field from fields array
+ */
+#define SET_PARM_VALUE(NAME, TYPE, INDEX, FIELDS) \
+ do { \
+ if (FIELDS[INDEX].field_id != INVALID_U16) \
+ parms.NAME = (TYPE)fields[INDEX].val; \
+ } while (0)
+
+/*
+ * Helper macro to set an input parm field from fields array thorugh a mapping
+ * function
+ */
+#define SET_PARM_MAPPED_VALUE(NAME, TYPE, INDEX, FIELDS, MAP_FUNC) \
+ ({ \
+ int retcode = 0; \
+ if (FIELDS[INDEX].field_id != INVALID_U16) { \
+ int retcode; \
+ uint64_t mapped_val; \
+ retcode = MAP_FUNC(fields[INDEX].val, &mapped_val); \
+ if (retcode) \
+ ASSERT_RETURN(retcode); \
+ else \
+ parms.NAME = (TYPE)mapped_val; \
+ } \
+ retcode; \
+ })
+
+/*
+ * Helper macro to set a result field into fields array
+ */
+#define GET_RESP_VALUE(NAME, INDEX, FIELDS) \
+ do { \
+ if (FIELDS[INDEX].field_id != INVALID_U16) \
+ FIELDS[INDEX].val = (uint64_t)result.NAME; \
+ } while (0)
+
+/*
+ * Helper macro to set a result field into fields array thorugh a mapping
+ * function
+ */
+#define GET_RESP_MAPPED_VALUE(NAME, INDEX, FIELDS, MAP_FUNC) \
+ ({ \
+ int retcode = 0; \
+ if (FIELDS[INDEX].field_id != INVALID_U16) { \
+ int retcode; \
+ uint64_t mapped_val; \
+ retcode = MAP_FUNC(result.NAME, &mapped_val); \
+ if (retcode) \
+ ASSERT_RETURN(retcode); \
+ else \
+ fields[INDEX].val = mapped_val; \
+ } \
+ retcode; \
+ })
+
+/*
+ * MPC fields validate routine.
+ */
+static bool fields_valid(struct cfa_mpc_data_obj *fields, uint16_t len,
+ struct field_mapping *fld_map)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ /* Field not requested to be set by caller, skip it */
+ if (fields[i].field_id == INVALID_U16)
+ continue;
+
+ /*
+ * Field id should be index value unless
+ * it is set to UINT16_MAx
+ */
+ if (fields[i].field_id != i)
+ return false;
+
+ /* Field is valid */
+ if (!fld_map[i].valid)
+ return false;
+ }
+
+ return true;
+}
+
+/* Map global table type definition to p70 specific value */
+static int table_type_map(uint64_t val, uint64_t *mapped_val)
+{
+ switch (val) {
+ case CFA_BLD_MPC_HW_TABLE_TYPE_ACTION:
+ *mapped_val = CFA_HW_TABLE_ACTION;
+ break;
+ case CFA_BLD_MPC_HW_TABLE_TYPE_LOOKUP:
+ *mapped_val = CFA_HW_TABLE_LOOKUP;
+ break;
+ default:
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Map global read mode value to p70 specific value */
+static int read_mode_map(uint64_t val, uint64_t *mapped_val)
+{
+ switch (val) {
+ case CFA_BLD_MPC_RD_NORMAL:
+ *mapped_val = CFA_MPC_RD_NORMAL;
+ break;
+ case CFA_BLD_MPC_RD_EVICT:
+ *mapped_val = CFA_MPC_RD_EVICT;
+ break;
+ case CFA_BLD_MPC_RD_DEBUG_LINE:
+ *mapped_val = CFA_MPC_RD_DEBUG_LINE;
+ break;
+ case CFA_BLD_MPC_RD_DEBUG_TAG:
+ *mapped_val = CFA_MPC_RD_DEBUG_TAG;
+ break;
+ default:
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Map global write mode value to p70 specific value */
+static int write_mode_map(uint64_t val, uint64_t *mapped_val)
+{
+ switch (val) {
+ case CFA_BLD_MPC_WR_WRITE_THRU:
+ *mapped_val = CFA_MPC_WR_WRITE_THRU;
+ break;
+ case CFA_BLD_MPC_WR_WRITE_BACK:
+ *mapped_val = CFA_MPC_WR_WRITE_BACK;
+ break;
+ default:
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Map global evict mode value to p70 specific value */
+static int evict_mode_map(uint64_t val, uint64_t *mapped_val)
+{
+ switch (val) {
+ case CFA_BLD_MPC_EV_EVICT_LINE:
+ *mapped_val = CFA_MPC_EV_EVICT_LINE;
+ break;
+ case CFA_BLD_MPC_EV_EVICT_SCOPE_ADDRESS:
+ *mapped_val = CFA_MPC_EV_EVICT_SCOPE_ADDRESS;
+ break;
+ case CFA_BLD_MPC_EV_EVICT_CLEAN_LINES:
+ *mapped_val = CFA_MPC_EV_EVICT_CLEAN_LINES;
+ break;
+ case CFA_BLD_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES:
+ *mapped_val = CFA_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES;
+ break;
+ case CFA_BLD_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES:
+ *mapped_val = CFA_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES;
+ break;
+ case CFA_BLD_MPC_EV_EVICT_TABLE_SCOPE:
+ *mapped_val = CFA_MPC_EV_EVICT_TABLE_SCOPE;
+ break;
+ default:
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Map device specific response status code to global value */
+static int status_code_map(uint64_t val, uint64_t *mapped_val)
+{
+ switch (val) {
+ case CFA_MPC_OK:
+ *mapped_val = CFA_BLD_MPC_OK;
+ break;
+ case CFA_MPC_UNSPRT_ERR:
+ *mapped_val = CFA_BLD_MPC_UNSPRT_ERR;
+ break;
+ case CFA_MPC_FMT_ERR:
+ *mapped_val = CFA_BLD_MPC_FMT_ERR;
+ break;
+ case CFA_MPC_SCOPE_ERR:
+ *mapped_val = CFA_BLD_MPC_SCOPE_ERR;
+ break;
+ case CFA_MPC_ADDR_ERR:
+ *mapped_val = CFA_BLD_MPC_ADDR_ERR;
+ break;
+ case CFA_MPC_CACHE_ERR:
+ *mapped_val = CFA_BLD_MPC_CACHE_ERR;
+ break;
+ case CFA_MPC_EM_MISS:
+ *mapped_val = CFA_BLD_MPC_EM_MISS;
+ break;
+ case CFA_MPC_EM_DUPLICATE:
+ *mapped_val = CFA_BLD_MPC_EM_DUPLICATE;
+ break;
+ case CFA_MPC_EM_EVENT_COLLECTION_FAIL:
+ *mapped_val = CFA_BLD_MPC_EM_EVENT_COLLECTION_FAIL;
+ break;
+ case CFA_MPC_EM_ABORT:
+ *mapped_val = CFA_BLD_MPC_EM_ABORT;
+ break;
+ default:
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static bool has_unsupported_fields(struct cfa_mpc_data_obj *fields,
+ uint16_t len, uint16_t *unsup_flds,
+ uint16_t unsup_flds_len)
+{
+ int i, j;
+
+ for (i = 0; i < len; i++) {
+ /* Skip invalid fields */
+ if (fields[i].field_id == INVALID_U16)
+ continue;
+
+ for (j = 0; j < unsup_flds_len; j++) {
+ if (fields[i].field_id == unsup_flds[j])
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int cfa_bld_p70_mpc_build_cache_read(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_cache_axs_params parms = { 0 };
+
+ /* Parameters check */
+ if (!cmd || !cmd_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_READ_CMD_MAX_FLD,
+ cfa_p70_mpc_read_cmd_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Prepare parameters structure */
+ SET_PARM_VALUE(opaque, uint32_t, CFA_BLD_MPC_READ_CMD_OPAQUE_FLD,
+ fields);
+ SET_PARM_VALUE(tbl_scope, uint8_t, CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD,
+ fields);
+ SET_PARM_VALUE(tbl_index, uint32_t,
+ CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD, fields);
+ SET_PARM_VALUE(data_size, uint8_t, CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD,
+ fields);
+ SET_PARM_VALUE(read.host_address, uint64_t,
+ CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD, fields);
+ rc = SET_PARM_MAPPED_VALUE(tbl_type, enum cfa_hw_table_type,
+ CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD, fields,
+ table_type_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ rc = SET_PARM_MAPPED_VALUE(read.mode, enum cfa_mpc_read_mode,
+ CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD,
+ fields, read_mode_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ return cfa_mpc_build_cache_axs_cmd(CFA_MPC_READ, cmd, cmd_buff_len,
+ &parms);
+}
+
+int cfa_bld_p70_mpc_build_cache_write(uint8_t *cmd, uint32_t *cmd_buff_len,
+ const uint8_t *data,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_cache_axs_params parms = { 0 };
+
+ /* Parameters check */
+ if (!cmd || !cmd_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_WRITE_CMD_MAX_FLD,
+ cfa_p70_mpc_write_cmd_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Prepare parameters structure */
+ SET_PARM_VALUE(opaque, uint32_t, CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD,
+ fields);
+ SET_PARM_VALUE(tbl_scope, uint8_t,
+ CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD, fields);
+ SET_PARM_VALUE(tbl_index, uint32_t,
+ CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD, fields);
+ SET_PARM_VALUE(data_size, uint8_t, CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD,
+ fields);
+ rc = SET_PARM_MAPPED_VALUE(tbl_type, enum cfa_hw_table_type,
+ CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD, fields,
+ table_type_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ parms.write.data_ptr = data;
+ rc = SET_PARM_MAPPED_VALUE(write.mode, enum cfa_mpc_write_mode,
+ CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD,
+ fields, write_mode_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ return cfa_mpc_build_cache_axs_cmd(CFA_MPC_WRITE, cmd, cmd_buff_len,
+ &parms);
+}
+
+int cfa_bld_p70_mpc_build_cache_evict(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_cache_axs_params parms = { 0 };
+
+ /* Parameters check */
+ if (!cmd || !cmd_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD,
+ cfa_p70_mpc_invalidate_cmd_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Prepare parameters structure */
+ SET_PARM_VALUE(opaque, uint32_t, CFA_BLD_MPC_INVALIDATE_CMD_OPAQUE_FLD,
+ fields);
+ SET_PARM_VALUE(tbl_scope, uint8_t,
+ CFA_BLD_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD, fields);
+ SET_PARM_VALUE(tbl_index, uint32_t,
+ CFA_BLD_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD, fields);
+ SET_PARM_VALUE(data_size, uint8_t,
+ CFA_BLD_MPC_INVALIDATE_CMD_DATA_SIZE_FLD, fields);
+ rc = SET_PARM_MAPPED_VALUE(tbl_type, enum cfa_hw_table_type,
+ CFA_BLD_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD,
+ fields, table_type_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ rc = SET_PARM_MAPPED_VALUE(evict.mode, enum cfa_mpc_evict_mode,
+ CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD,
+ fields, evict_mode_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ return cfa_mpc_build_cache_axs_cmd(CFA_MPC_INVALIDATE, cmd,
+ cmd_buff_len, &parms);
+}
+
+int cfa_bld_p70_mpc_build_cache_rdclr(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_cache_axs_params parms = { 0 };
+
+ /* Parameters check */
+ if (!cmd || !cmd_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD,
+ cfa_p70_mpc_read_clr_cmd_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Prepare parameters structure */
+ SET_PARM_VALUE(opaque, uint32_t, CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD,
+ fields);
+ SET_PARM_VALUE(tbl_scope, uint8_t,
+ CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD, fields);
+ SET_PARM_VALUE(tbl_index, uint32_t,
+ CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD, fields);
+ SET_PARM_VALUE(data_size, uint8_t,
+ CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD, fields);
+ SET_PARM_VALUE(read.host_address, uint64_t,
+ CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD, fields);
+ rc = SET_PARM_MAPPED_VALUE(tbl_type, enum cfa_hw_table_type,
+ CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD,
+ fields, table_type_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ SET_PARM_VALUE(read.clear_mask, uint16_t,
+ CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD, fields);
+ rc = SET_PARM_MAPPED_VALUE(read.mode, enum cfa_mpc_read_mode,
+ CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD,
+ fields, read_mode_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ return cfa_mpc_build_cache_axs_cmd(CFA_MPC_READ_CLR, cmd, cmd_buff_len,
+ &parms);
+}
+
+int cfa_bld_p70_mpc_build_em_search(uint8_t *cmd, uint32_t *cmd_buff_len,
+ uint8_t *em_entry,
+ struct cfa_mpc_data_obj *fields)
+{
+ struct cfa_mpc_em_op_params parms = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD,
+ };
+
+ /* Parameters check */
+ if (!cmd || !cmd_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD,
+ cfa_p70_mpc_em_search_cmd_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Prepare parameters structure */
+ SET_PARM_VALUE(opaque, uint32_t, CFA_BLD_MPC_EM_SEARCH_CMD_OPAQUE_FLD,
+ fields);
+ SET_PARM_VALUE(tbl_scope, uint8_t,
+ CFA_BLD_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD, fields);
+
+ parms.search.em_entry = em_entry;
+ SET_PARM_VALUE(search.data_size, uint8_t,
+ CFA_BLD_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD, fields);
+
+ return cfa_mpc_build_em_op_cmd(CFA_MPC_EM_SEARCH, cmd, cmd_buff_len,
+ &parms);
+}
+
+int cfa_bld_p70_mpc_build_em_insert(uint8_t *cmd, uint32_t *cmd_buff_len,
+ const uint8_t *em_entry,
+ struct cfa_mpc_data_obj *fields)
+{
+ struct cfa_mpc_em_op_params parms = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD,
+ CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD,
+ CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD,
+ };
+
+ /* Parameters check */
+ if (!cmd || !cmd_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD,
+ cfa_p70_mpc_em_insert_cmd_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Prepare parameters structure */
+ SET_PARM_VALUE(opaque, uint32_t, CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD,
+ fields);
+ SET_PARM_VALUE(tbl_scope, uint8_t,
+ CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD, fields);
+
+ parms.insert.em_entry = (const uint8_t *)em_entry;
+ SET_PARM_VALUE(insert.replace, uint8_t,
+ CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD, fields);
+ SET_PARM_VALUE(insert.entry_idx, uint32_t,
+ CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD, fields);
+ SET_PARM_VALUE(insert.bucket_idx, uint32_t,
+ CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD, fields);
+ SET_PARM_VALUE(insert.data_size, uint8_t,
+ CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD, fields);
+
+ return cfa_mpc_build_em_op_cmd(CFA_MPC_EM_INSERT, cmd, cmd_buff_len,
+ &parms);
+}
+
+int cfa_bld_p70_mpc_build_em_delete(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ struct cfa_mpc_em_op_params parms = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD,
+ CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD,
+ CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD,
+ };
+
+ /* Parameters check */
+ if (!cmd || !cmd_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD,
+ cfa_p70_mpc_em_delete_cmd_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Prepare parameters structure */
+ SET_PARM_VALUE(opaque, uint32_t, CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD,
+ fields);
+ SET_PARM_VALUE(tbl_scope, uint8_t,
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD, fields);
+
+ SET_PARM_VALUE(del.entry_idx, uint32_t,
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD, fields);
+ SET_PARM_VALUE(del.bucket_idx, uint32_t,
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD, fields);
+
+ return cfa_mpc_build_em_op_cmd(CFA_MPC_EM_DELETE, cmd, cmd_buff_len,
+ &parms);
+}
+
+int cfa_bld_p70_mpc_build_em_chain(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ struct cfa_mpc_em_op_params parms = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD,
+ CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD,
+ CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD,
+ };
+
+ /* Parameters check */
+ if (!cmd || !cmd_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD,
+ cfa_p70_mpc_em_chain_cmd_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Prepare parameters structure */
+ SET_PARM_VALUE(opaque, uint32_t, CFA_BLD_MPC_EM_CHAIN_CMD_OPAQUE_FLD,
+ fields);
+ SET_PARM_VALUE(tbl_scope, uint8_t,
+ CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD, fields);
+
+ SET_PARM_VALUE(chain.entry_idx, uint32_t,
+ CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD, fields);
+ SET_PARM_VALUE(chain.bucket_idx, uint32_t,
+ CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD, fields);
+
+ return cfa_mpc_build_em_op_cmd(CFA_MPC_EM_CHAIN, cmd, cmd_buff_len,
+ &parms);
+}
+
+int cfa_bld_p70_mpc_parse_cache_read(uint8_t *resp, uint32_t resp_buff_len,
+ uint8_t *rd_data, uint32_t rd_data_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_cache_axs_result result = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_READ_CMP_TYPE_FLD,
+ CFA_BLD_MPC_READ_CMP_MP_CLIENT_FLD,
+ CFA_BLD_MPC_READ_CMP_DMA_LENGTH_FLD,
+ CFA_BLD_MPC_READ_CMP_OPCODE_FLD,
+ CFA_BLD_MPC_READ_CMP_V_FLD,
+ CFA_BLD_MPC_READ_CMP_TABLE_TYPE_FLD,
+ CFA_BLD_MPC_READ_CMP_TABLE_SCOPE_FLD,
+ CFA_BLD_MPC_READ_CMP_TABLE_INDEX_FLD,
+ };
+
+ /* Parameters check */
+ if (!resp || !resp_buff_len || !fields || !rd_data) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_READ_CMP_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_READ_CMP_MAX_FLD,
+ cfa_p70_mpc_read_cmp_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Retrieve response parameters */
+ result.rd_data = rd_data;
+ result.data_len = rd_data_len;
+ rc = cfa_mpc_parse_cache_axs_resp(CFA_MPC_READ, resp, resp_buff_len,
+ &result);
+ if (rc)
+ return rc;
+
+ GET_RESP_VALUE(opaque, CFA_BLD_MPC_READ_CMP_OPAQUE_FLD, fields);
+ GET_RESP_VALUE(error_data, CFA_BLD_MPC_READ_CMP_HASH_MSB_FLD, fields);
+ rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_READ_CMP_STATUS_FLD,
+ fields, status_code_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int cfa_bld_p70_mpc_parse_cache_write(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_cache_axs_result result = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_WRITE_CMP_TYPE_FLD,
+ CFA_BLD_MPC_WRITE_CMP_MP_CLIENT_FLD,
+ CFA_BLD_MPC_WRITE_CMP_OPCODE_FLD,
+ CFA_BLD_MPC_WRITE_CMP_V_FLD,
+ CFA_BLD_MPC_WRITE_CMP_TABLE_TYPE_FLD,
+ CFA_BLD_MPC_WRITE_CMP_TABLE_SCOPE_FLD,
+ CFA_BLD_MPC_WRITE_CMP_TABLE_INDEX_FLD,
+ };
+
+ /* Parameters check */
+ if (!resp || !resp_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_WRITE_CMP_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_WRITE_CMP_MAX_FLD,
+ cfa_p70_mpc_write_cmp_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Retrieve response parameters */
+ rc = cfa_mpc_parse_cache_axs_resp(CFA_MPC_WRITE, resp, resp_buff_len,
+ &result);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(opaque, CFA_BLD_MPC_WRITE_CMP_OPAQUE_FLD, fields);
+ GET_RESP_VALUE(error_data, CFA_BLD_MPC_WRITE_CMP_HASH_MSB_FLD, fields);
+ rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_WRITE_CMP_STATUS_FLD,
+ fields, status_code_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int cfa_bld_p70_mpc_parse_cache_evict(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_cache_axs_result result = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_INVALIDATE_CMP_TYPE_FLD,
+ CFA_BLD_MPC_INVALIDATE_CMP_MP_CLIENT_FLD,
+ CFA_BLD_MPC_INVALIDATE_CMP_OPCODE_FLD,
+ CFA_BLD_MPC_INVALIDATE_CMP_V_FLD,
+ CFA_BLD_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD,
+ CFA_BLD_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD,
+ CFA_BLD_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD,
+ };
+
+ /* Parameters check */
+ if (!resp || !resp_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD,
+ cfa_p70_mpc_invalidate_cmp_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Retrieve response parameters */
+ rc = cfa_mpc_parse_cache_axs_resp(CFA_MPC_INVALIDATE, resp,
+ resp_buff_len, &result);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(opaque, CFA_BLD_MPC_INVALIDATE_CMP_OPAQUE_FLD, fields);
+ GET_RESP_VALUE(error_data, CFA_BLD_MPC_INVALIDATE_CMP_HASH_MSB_FLD,
+ fields);
+ rc = GET_RESP_MAPPED_VALUE(status,
+ CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD,
+ fields, status_code_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int cfa_bld_p70_mpc_parse_cache_rdclr(uint8_t *resp, uint32_t resp_buff_len,
+ uint8_t *rd_data, uint32_t rd_data_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_cache_axs_result result = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_READ_CMP_TYPE_FLD,
+ CFA_BLD_MPC_READ_CMP_MP_CLIENT_FLD,
+ CFA_BLD_MPC_READ_CMP_DMA_LENGTH_FLD,
+ CFA_BLD_MPC_READ_CMP_OPCODE_FLD,
+ CFA_BLD_MPC_READ_CMP_V_FLD,
+ CFA_BLD_MPC_READ_CMP_TABLE_TYPE_FLD,
+ CFA_BLD_MPC_READ_CMP_TABLE_SCOPE_FLD,
+ CFA_BLD_MPC_READ_CMP_TABLE_INDEX_FLD,
+ };
+
+ /* Parameters check */
+ if (!resp || !resp_buff_len || !fields || !rd_data) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_READ_CMP_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_READ_CMP_MAX_FLD,
+ cfa_p70_mpc_read_cmp_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Retrieve response parameters */
+ result.rd_data = rd_data;
+ result.data_len = rd_data_len;
+ rc = cfa_mpc_parse_cache_axs_resp(CFA_MPC_READ_CLR, resp, resp_buff_len,
+ &result);
+ if (rc)
+ return rc;
+
+ GET_RESP_VALUE(opaque, CFA_BLD_MPC_READ_CMP_OPAQUE_FLD, fields);
+ GET_RESP_VALUE(error_data, CFA_BLD_MPC_READ_CMP_HASH_MSB_FLD, fields);
+ rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_READ_CMP_STATUS_FLD,
+ fields, status_code_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int cfa_bld_p70_mpc_parse_em_search(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_em_op_result result = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_EM_SEARCH_CMP_TYPE_FLD,
+ CFA_BLD_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD,
+ CFA_BLD_MPC_EM_SEARCH_CMP_OPCODE_FLD,
+ CFA_BLD_MPC_EM_SEARCH_CMP_V1_FLD,
+ CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD,
+ CFA_BLD_MPC_EM_SEARCH_CMP_V2_FLD,
+ };
+
+ /* Parameters check */
+ if (!resp || !resp_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD,
+ cfa_p70_mpc_em_search_cmp_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Retrieve response parameters */
+ rc = cfa_mpc_parse_em_op_resp(CFA_MPC_EM_SEARCH, resp, resp_buff_len,
+ &result);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(opaque, CFA_BLD_MPC_EM_SEARCH_CMP_OPAQUE_FLD, fields);
+ GET_RESP_VALUE(error_data, CFA_BLD_MPC_EM_SEARCH_CMP_HASH_MSB_FLD,
+ fields);
+ rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_EM_SEARCH_CMP_STATUS_FLD,
+ fields, status_code_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(search.bucket_num, CFA_BLD_MPC_EM_SEARCH_CMP_BKT_NUM_FLD,
+ fields);
+ GET_RESP_VALUE(search.num_entries,
+ CFA_BLD_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD, fields);
+ GET_RESP_VALUE(search.hash_msb, CFA_BLD_MPC_EM_SEARCH_CMP_HASH_MSB_FLD,
+ fields);
+ GET_RESP_VALUE(search.match_idx,
+ CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD, fields);
+ GET_RESP_VALUE(search.bucket_idx,
+ CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD, fields);
+
+ return 0;
+}
+
+int cfa_bld_p70_mpc_parse_em_insert(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_em_op_result result = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_EM_INSERT_CMP_TYPE_FLD,
+ CFA_BLD_MPC_EM_INSERT_CMP_MP_CLIENT_FLD,
+ CFA_BLD_MPC_EM_INSERT_CMP_OPCODE_FLD,
+ CFA_BLD_MPC_EM_INSERT_CMP_V1_FLD,
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD,
+ CFA_BLD_MPC_EM_INSERT_CMP_V2_FLD,
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD,
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD,
+ };
+
+ /* Parameters check */
+ if (!resp || !resp_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD,
+ cfa_p70_mpc_em_insert_cmp_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Retrieve response parameters */
+ rc = cfa_mpc_parse_em_op_resp(CFA_MPC_EM_INSERT, resp, resp_buff_len,
+ &result);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(opaque, CFA_BLD_MPC_EM_INSERT_CMP_OPAQUE_FLD, fields);
+ GET_RESP_VALUE(error_data, CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD,
+ fields);
+ rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD,
+ fields, status_code_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(insert.bucket_num, CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD,
+ fields);
+ GET_RESP_VALUE(insert.num_entries,
+ CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD, fields);
+ GET_RESP_VALUE(insert.hash_msb, CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD,
+ fields);
+ GET_RESP_VALUE(insert.match_idx,
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD, fields);
+ GET_RESP_VALUE(insert.bucket_idx,
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD, fields);
+ GET_RESP_VALUE(insert.replaced,
+ CFA_BLD_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD, fields);
+ GET_RESP_VALUE(insert.chain_update,
+ CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD, fields);
+
+ return 0;
+}
+
+int cfa_bld_p70_mpc_parse_em_delete(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_em_op_result result = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_EM_DELETE_CMP_TYPE_FLD,
+ CFA_BLD_MPC_EM_DELETE_CMP_MP_CLIENT_FLD,
+ CFA_BLD_MPC_EM_DELETE_CMP_OPCODE_FLD,
+ CFA_BLD_MPC_EM_DELETE_CMP_V1_FLD,
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD,
+ CFA_BLD_MPC_EM_DELETE_CMP_V2_FLD,
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD,
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD,
+ };
+
+ /* Parameters check */
+ if (!resp || !resp_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD,
+ cfa_p70_mpc_em_delete_cmp_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Retrieve response parameters */
+ rc = cfa_mpc_parse_em_op_resp(CFA_MPC_EM_DELETE, resp, resp_buff_len,
+ &result);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(opaque, CFA_BLD_MPC_EM_DELETE_CMP_OPAQUE_FLD, fields);
+ GET_RESP_VALUE(error_data, CFA_BLD_MPC_EM_DELETE_CMP_HASH_MSB_FLD,
+ fields);
+ rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD,
+ fields, status_code_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(del.new_tail, CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD,
+ fields);
+ GET_RESP_VALUE(del.prev_tail,
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD, fields);
+ GET_RESP_VALUE(del.chain_update,
+ CFA_BLD_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD, fields);
+ GET_RESP_VALUE(del.bucket_num, CFA_BLD_MPC_EM_DELETE_CMP_BKT_NUM_FLD,
+ fields);
+ GET_RESP_VALUE(del.num_entries,
+ CFA_BLD_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD, fields);
+ return 0;
+}
+
+int cfa_bld_p70_mpc_parse_em_chain(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields)
+{
+ int rc;
+ struct cfa_mpc_em_op_result result = { 0 };
+ uint16_t unsupported_fields[] = {
+ CFA_BLD_MPC_EM_CHAIN_CMP_TYPE_FLD,
+ CFA_BLD_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD,
+ CFA_BLD_MPC_EM_CHAIN_CMP_OPCODE_FLD,
+ CFA_BLD_MPC_EM_CHAIN_CMP_V1_FLD,
+ CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD,
+ CFA_BLD_MPC_EM_CHAIN_CMP_V2_FLD,
+ CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD,
+ CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD,
+ };
+
+ /* Parameters check */
+ if (!resp || !resp_buff_len || !fields) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD,
+ unsupported_fields,
+ ARRAY_SIZE(unsupported_fields))) {
+ ASSERT_RETURN(-ENOTSUP);
+ return -ENOTSUP;
+ }
+
+ if (!fields_valid(fields, CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD,
+ cfa_p70_mpc_em_chain_cmp_gbl_to_dev)) {
+ ASSERT_RETURN(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Retrieve response parameters */
+ rc = cfa_mpc_parse_em_op_resp(CFA_MPC_EM_CHAIN, resp, resp_buff_len,
+ &result);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(opaque, CFA_BLD_MPC_EM_CHAIN_CMP_OPAQUE_FLD, fields);
+ GET_RESP_VALUE(error_data, CFA_BLD_MPC_EM_CHAIN_CMP_HASH_MSB_FLD,
+ fields);
+ rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_EM_CHAIN_CMP_STATUS_FLD,
+ fields, status_code_map);
+ if (rc) {
+ ASSERT_RETURN(rc);
+ return rc;
+ }
+
+ GET_RESP_VALUE(chain.bucket_num, CFA_BLD_MPC_EM_CHAIN_CMP_BKT_NUM_FLD,
+ fields);
+ GET_RESP_VALUE(chain.num_entries,
+ CFA_BLD_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD, fields);
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,83 @@
+/****************************************************************************
+ * Copyright(c) 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_p70_host_mpc_wrapper.c
+ *
+ * @brief CFA Phase 7.0 specific MPC Builder Wrapper functions
+ */
+
+#ifndef _CFA_BLD_P70_HOST_MPC_WRAPPER_H_
+#define _CFA_BLD_P70_HOST_MPC_WRAPPER_H_
+
+#include "cfa_bld_mpcops.h"
+/**
+ * MPC Cache operation command build apis
+ */
+int cfa_bld_p70_mpc_build_cache_read(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_build_cache_write(uint8_t *cmd, uint32_t *cmd_buff_len,
+ const uint8_t *data,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_build_cache_evict(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_build_cache_rdclr(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+/**
+ * MPC EM operation command build apis
+ */
+int cfa_bld_p70_mpc_build_em_search(uint8_t *cmd, uint32_t *cmd_buff_len,
+ uint8_t *em_entry,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_build_em_insert(uint8_t *cmd, uint32_t *cmd_buff_len,
+ const uint8_t *em_entry,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_build_em_delete(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_build_em_chain(uint8_t *cmd, uint32_t *cmd_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+/**
+ * MPC Cache operation completion parse apis
+ */
+int cfa_bld_p70_mpc_parse_cache_read(uint8_t *resp, uint32_t resp_buff_len,
+ uint8_t *rd_data, uint32_t rd_data_len,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_parse_cache_write(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_parse_cache_evict(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_parse_cache_rdclr(uint8_t *resp, uint32_t resp_buff_len,
+ uint8_t *rd_data, uint32_t rd_data_len,
+ struct cfa_mpc_data_obj *fields);
+
+/**
+ * MPC EM operation completion parse apis
+ */
+int cfa_bld_p70_mpc_parse_em_search(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_parse_em_insert(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_parse_em_delete(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+int cfa_bld_p70_mpc_parse_em_chain(uint8_t *resp, uint32_t resp_buff_len,
+ struct cfa_mpc_data_obj *fields);
+
+#endif /* _CFA_BLD_P70_HOST_MPC_WRAPPER_H_ */
new file mode 100644
@@ -0,0 +1,56 @@
+/****************************************************************************
+ * Copyright(c) 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_p70_mpcops.c
+ *
+ * @brief CFA Phase 7.0 specific Builder library MPC ops api
+ */
+
+#define COMP_ID BLD
+
+#include <errno.h>
+#include <string.h>
+#include "cfa_trace.h"
+#include "cfa_bld.h"
+#include "host/cfa_bld_mpcops.h"
+#include "cfa_bld_p70_host_mpc_wrapper.h"
+#include "cfa_bld_p70_mpcops.h"
+
+const struct cfa_bld_mpcops cfa_bld_p70_mpcops = {
+ /* Build command apis */
+ .cfa_bld_mpc_build_cache_read = cfa_bld_p70_mpc_build_cache_read,
+ .cfa_bld_mpc_build_cache_write = cfa_bld_p70_mpc_build_cache_write,
+ .cfa_bld_mpc_build_cache_evict = cfa_bld_p70_mpc_build_cache_evict,
+ .cfa_bld_mpc_build_cache_read_clr = cfa_bld_p70_mpc_build_cache_rdclr,
+ .cfa_bld_mpc_build_em_search = cfa_bld_p70_mpc_build_em_search,
+ .cfa_bld_mpc_build_em_insert = cfa_bld_p70_mpc_build_em_insert,
+ .cfa_bld_mpc_build_em_delete = cfa_bld_p70_mpc_build_em_delete,
+ .cfa_bld_mpc_build_em_chain = cfa_bld_p70_mpc_build_em_chain,
+ /* Parse response apis */
+ .cfa_bld_mpc_parse_cache_read = cfa_bld_p70_mpc_parse_cache_read,
+ .cfa_bld_mpc_parse_cache_write = cfa_bld_p70_mpc_parse_cache_write,
+ .cfa_bld_mpc_parse_cache_evict = cfa_bld_p70_mpc_parse_cache_evict,
+ .cfa_bld_mpc_parse_cache_read_clr = cfa_bld_p70_mpc_parse_cache_rdclr,
+ .cfa_bld_mpc_parse_em_search = cfa_bld_p70_mpc_parse_em_search,
+ .cfa_bld_mpc_parse_em_insert = cfa_bld_p70_mpc_parse_em_insert,
+ .cfa_bld_mpc_parse_em_delete = cfa_bld_p70_mpc_parse_em_delete,
+ .cfa_bld_mpc_parse_em_chain = cfa_bld_p70_mpc_parse_em_chain,
+};
+
+int cfa_bld_p70_mpc_bind(enum cfa_ver hw_ver, struct cfa_bld_mpcinfo *mpcinfo)
+{
+ if (hw_ver != CFA_P70)
+ return -EINVAL;
+
+ if (!mpcinfo)
+ return -EINVAL;
+
+ mpcinfo->mpcops = &cfa_bld_p70_mpcops;
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,22 @@
+/****************************************************************************
+ * Copyright(c) 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_bld_p70_mpcops.h
+ *
+ * @brief CFA Phase 7.0 specific Builder library MPC ops api
+ */
+
+#ifndef _CFA_BLD_P70_MPCOPS_H_
+#define _CFA_BLD_P70_MPCOPS_H_
+
+#include "cfa_types.h"
+#include "cfa_bld_mpcops.h"
+
+int cfa_bld_p70_mpc_bind(enum cfa_ver hw_ver, struct cfa_bld_mpcinfo *mpcinfo);
+
+#endif /* _CFA_BLD_P70_MPCOPS_H_ */
new file mode 100644
@@ -0,0 +1,1177 @@
+/****************************************************************************
+ * Copyright(c) 2001-2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * Name: cfa_p70_mpc_field_ids.h
+ *
+ * Description: MPC CFA command and completion field enumeration definitions
+ *
+ * Date: 09/29/22 11:50:38
+ *
+ * Note: This file is scripted generated by ./cfa_header_gen.py.
+ * DO NOT modify this file manually !!!!
+ *
+ ****************************************************************************/
+#ifndef _CFA_P70_MPC_FIELD_IDS_H_
+#define _CFA_P70_MPC_FIELD_IDS_H_
+
+/* clang-format off */
+
+/**
+ * Field IDS for READ_CMD: This command reads 1-4 consecutive 32B words
+ * from the specified address within a table scope.
+ */
+enum cfa_p70_mpc_read_cmd_fields {
+ CFA_P70_MPC_READ_CMD_OPAQUE_FLD = 0,
+ /* This value selects the table type to be acted upon. */
+ CFA_P70_MPC_READ_CMD_TABLE_TYPE_FLD = 1,
+ /* Table scope to access. */
+ CFA_P70_MPC_READ_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_P70_MPC_READ_CMD_DATA_SIZE_FLD = 3,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_P70_MPC_READ_CMD_CACHE_OPTION_FLD = 4,
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ CFA_P70_MPC_READ_CMD_TABLE_INDEX_FLD = 5,
+ /*
+ * The 64-bit host address to which to write the DMA data returned in
+ * the completion. The data will be written to the same function as the
+ * one that owns the SQ this command is read from. DATA_SIZE determines
+ * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0,
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_P70_MPC_READ_CMD_HOST_ADDRESS_FLD = 6,
+ CFA_P70_MPC_READ_CMD_MAX_FLD = 7,
+};
+
+/**
+ * Field IDS for WRITE_CMD: This command writes 1-4 consecutive 32B
+ * words to the specified address within a table scope.
+ */
+enum cfa_p70_mpc_write_cmd_fields {
+ CFA_P70_MPC_WRITE_CMD_OPAQUE_FLD = 0,
+ /* This value selects the table type to be acted upon. */
+ CFA_P70_MPC_WRITE_CMD_TABLE_TYPE_FLD = 1,
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ CFA_P70_MPC_WRITE_CMD_WRITE_THROUGH_FLD = 2,
+ /* Table scope to access. */
+ CFA_P70_MPC_WRITE_CMD_TABLE_SCOPE_FLD = 3,
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_P70_MPC_WRITE_CMD_DATA_SIZE_FLD = 4,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_P70_MPC_WRITE_CMD_CACHE_OPTION_FLD = 5,
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ CFA_P70_MPC_WRITE_CMD_TABLE_INDEX_FLD = 6,
+ CFA_P70_MPC_WRITE_CMD_MAX_FLD = 7,
+};
+
+/**
+ * Field IDS for READ_CLR_CMD: This command performs a read-modify-write
+ * to the specified 32B address using a 16b mask that specifies up to 16
+ * 16b words to clear before writing the data back. It returns the 32B
+ * data word read from cache (not the value written after the clear
+ * operation).
+ */
+enum cfa_p70_mpc_read_clr_cmd_fields {
+ CFA_P70_MPC_READ_CLR_CMD_OPAQUE_FLD = 0,
+ /* This value selects the table type to be acted upon. */
+ CFA_P70_MPC_READ_CLR_CMD_TABLE_TYPE_FLD = 1,
+ /* Table scope to access. */
+ CFA_P70_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * This field is no longer used. The READ_CLR command always reads (and
+ * does a mask-clear) on a single cache line. This field was added for
+ * SR2 A0 to avoid an ADDR_ERR when TABLE_INDEX=0 and TABLE_TYPE=EM (see
+ * CUMULUS-17872). That issue was fixed in SR2 B0.
+ */
+ CFA_P70_MPC_READ_CLR_CMD_DATA_SIZE_FLD = 3,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_P70_MPC_READ_CLR_CMD_CACHE_OPTION_FLD = 4,
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ CFA_P70_MPC_READ_CLR_CMD_TABLE_INDEX_FLD = 5,
+ /*
+ * The 64-bit host address to which to write the DMA data returned in
+ * the completion. The data will be written to the same function as the
+ * one that owns the SQ this command is read from. DATA_SIZE determines
+ * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0,
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_P70_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD = 6,
+ /*
+ * Specifies bits in 32B data word to clear. For x=0..15, when
+ * clear_mask[x]=1, data[x*16+15:x*16] is set to 0.
+ */
+ CFA_P70_MPC_READ_CLR_CMD_CLEAR_MASK_FLD = 7,
+ CFA_P70_MPC_READ_CLR_CMD_MAX_FLD = 8,
+};
+
+/**
+ * Field IDS for INVALIDATE_CMD: This command forces an explicit evict
+ * of 1-4 consecutive cache lines such that the next time the structure
+ * is used it will be re-read from its backing store location.
+ */
+enum cfa_p70_mpc_invalidate_cmd_fields {
+ CFA_P70_MPC_INVALIDATE_CMD_OPAQUE_FLD = 0,
+ /* This value selects the table type to be acted upon. */
+ CFA_P70_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD = 1,
+ /* Table scope to access. */
+ CFA_P70_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * This value identifies the number of cache lines to invalidate. A
+ * FMT_ERR is reported if the value is not in the range of [1, 4].
+ */
+ CFA_P70_MPC_INVALIDATE_CMD_DATA_SIZE_FLD = 3,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_P70_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD = 4,
+ /*
+ * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE):
+ */
+ CFA_P70_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD = 5,
+ CFA_P70_MPC_INVALIDATE_CMD_MAX_FLD = 6,
+};
+
+/**
+ * Field IDS for EM_SEARCH_CMD: This command supplies an exact match
+ * entry of 1-4 32B words to search for in the exact match table. CFA
+ * first computes the hash value of the key in the entry, and determines
+ * the static bucket address to search from the hash and the
+ * (EM_BUCKETS, EM_SIZE) for TABLE_SCOPE. It then searches that static
+ * bucket chain for an entry with a matching key (the LREC in the
+ * command entry is ignored). If a matching entry is found, CFA reports
+ * OK status in the completion. Otherwise, assuming no errors abort the
+ * search before it completes, it reports EM_MISS status.
+ */
+enum cfa_p70_mpc_em_search_cmd_fields {
+ CFA_P70_MPC_EM_SEARCH_CMD_OPAQUE_FLD = 0,
+ /* Table scope to access. */
+ CFA_P70_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD = 1,
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_P70_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD = 2,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_P70_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD = 3,
+ CFA_P70_MPC_EM_SEARCH_CMD_MAX_FLD = 4,
+};
+
+/**
+ * Field IDS for EM_INSERT_CMD: This command supplies an exact match
+ * entry of 1-4 32B words to insert in the exact match table. CFA first
+ * computes the hash value of the key in the entry, and determines the
+ * static bucket address to search from the hash and the (EM_BUCKETS,
+ * EM_SIZE) for TABLE_SCOPE. It then writes the 1-4 32B words of the
+ * exact match entry starting at the TABLE_INDEX location in the
+ * command. When the entry write completes, it searches the static
+ * bucket chain for an existing entry with a key matching the key in the
+ * insert entry (the LREC does not need to match). If a matching entry
+ * is found: * If REPLACE=0, the CFA aborts the insert and returns
+ * EM_DUPLICATE status. * If REPLACE=1, the CFA overwrites the matching
+ * entry with the new entry. REPLACED_ENTRY=1 in the completion in this
+ * case to signal that an entry was replaced. The location of the entry
+ * is provided in the completion. If no match is found, CFA adds the new
+ * entry to the lowest unused entry in the tail bucket. If the current
+ * tail bucket is full, this requires adding a new bucket to the tail.
+ * Then entry is then inserted at entry number 0. TABLE_INDEX2 provides
+ * the address of the new tail bucket, if needed. If set to 0, the
+ * insert is aborted and returns EM_ABORT status instead of adding a new
+ * bucket to the tail. CHAIN_UPD in the completion indicates whether a
+ * new bucket was added (1) or not (0). For locked scopes, if the read
+ * of the static bucket gives a locked scope miss error, indicating that
+ * the address is not in the cache, the static bucket is assumed empty.
+ * In this case, TAI creates a new bucket, setting entry 0 to the new
+ * entry fields and initializing all other fields to 0. It writes this
+ * new bucket to the static bucket address, which installs it in the
+ * cache.
+ */
+enum cfa_p70_mpc_em_insert_cmd_fields {
+ CFA_P70_MPC_EM_INSERT_CMD_OPAQUE_FLD = 0,
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ CFA_P70_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD = 1,
+ /* Table scope to access. */
+ CFA_P70_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * Number of 32B units in access. If value is outside the range [1, 4],
+ * CFA aborts processing and reports FMT_ERR status.
+ */
+ CFA_P70_MPC_EM_INSERT_CMD_DATA_SIZE_FLD = 3,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_P70_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD = 4,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Starting
+ * address to write exact match entry being inserted.
+ */
+ CFA_P70_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD = 5,
+ /*
+ * Determines setting of OPTION field for all cache write requests for
+ * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support
+ * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0.
+ */
+ CFA_P70_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD = 6,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Only used
+ * when no duplicate entry is found and the tail bucket in the chain
+ * searched has no unused entries. In this case, TABLE_INDEX2 provides
+ * the index to the 32B dynamic bucket to add to the tail of the chain
+ * (it is the new tail bucket). In this case, the CFA first writes
+ * TABLE_INDEX2 with a new bucket: * Entry 0 of the bucket sets the
+ * HASH_MSBS computed from the hash and ENTRY_PTR to TABLE_INDEX. *
+ * Entries 1-5 of the bucket set HASH_MSBS and ENTRY_PTR to 0. * CHAIN=0
+ * and CHAIN_PTR is set to CHAIN_PTR from to original tail bucket to
+ * maintain the background chaining. CFA then sets CHAIN=1 and
+ * CHAIN_PTR=TABLE_INDEX2 in the original tail bucket to link the new
+ * bucket to the chain. CHAIN_UPD=1 in the completion to signal that the
+ * new bucket at TABLE_INDEX2 was added to the tail of the chain.
+ */
+ CFA_P70_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD = 7,
+ /*
+ * Only used if an entry is found whose key matches the exact match
+ * entry key in the command: * REPLACE=0: The insert is aborted and
+ * EM_DUPLICATE status is returned, signaling that the insert failed.
+ * The index of the matching entry that blocked the insertion is
+ * returned in the completion. * REPLACE=1: The matching entry is
+ * replaced with that from the command (ENTRY_PTR in the bucket is
+ * overwritten with TABLE_INDEX from the command). HASH_MSBS for the
+ * entry number never changes in this case since it had to match the new
+ * entry key HASH_MSBS to match. When an entry is replaced,
+ * REPLACED_ENTRY=1 in the completion and the index of the matching
+ * entry is returned in the completion so that software can de-allocate
+ * the entry.
+ */
+ CFA_P70_MPC_EM_INSERT_CMD_REPLACE_FLD = 8,
+ CFA_P70_MPC_EM_INSERT_CMD_MAX_FLD = 9,
+};
+
+/**
+ * Field IDS for EM_DELETE_CMD: This command searches for an exact match
+ * entry index in the static bucket chain and deletes it if found.
+ * TABLE_INDEX give the entry index to delete and TABLE_INDEX2 gives the
+ * static bucket index. If a matching entry is found: * If the matching
+ * entry is the last valid entry in the tail bucket, its entry fields
+ * (HASH_MSBS and ENTRY_PTR) are set to 0 to delete the entry. * If the
+ * matching entry is not the last valid entry in the tail bucket, the
+ * entry fields from that last entry are moved to the matching entry,
+ * and the fields of that last entry are set to 0. * If any of the
+ * previous processing results in the tail bucket not having any valid
+ * entries, the tail bucket is the static bucket, the scope is a locked
+ * scope, and CHAIN_PTR=0, hardware evicts the static bucket from the
+ * cache and the completion signals this case with CHAIN_UPD=1. * If any
+ * of the previous processing results in the tail bucket not having any
+ * valid entries, and the tail bucket is not the static bucket, the tail
+ * bucket is removed from the chain. In this case, the penultimate
+ * bucket in the chain becomes the tail bucket. It has CHAIN set to 0 to
+ * unlink the tail bucket, and CHAIN_PTR set to that from the original
+ * tail bucket to preserve background chaining. The completion signals
+ * this case with CHAIN_UPD=1 and returns the index to the bucket
+ * removed so that software can de-allocate it. CFA returns OK status if
+ * the entry was successfully deleted. Otherwise, it returns EM_MISS
+ * status assuming there were no errors that caused processing to be
+ * aborted.
+ */
+enum cfa_p70_mpc_em_delete_cmd_fields {
+ CFA_P70_MPC_EM_DELETE_CMD_OPAQUE_FLD = 0,
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ CFA_P70_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD = 1,
+ /* Table scope to access. */
+ CFA_P70_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_P70_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD = 3,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Entry index
+ * to delete.
+ */
+ CFA_P70_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD = 4,
+ /*
+ * Determines setting of OPTION field for all cache write requests for
+ * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support
+ * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0.
+ */
+ CFA_P70_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD = 5,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Static
+ * bucket address for bucket chain.
+ */
+ CFA_P70_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD = 6,
+ CFA_P70_MPC_EM_DELETE_CMD_MAX_FLD = 7,
+};
+
+/**
+ * Field IDS for EM_CHAIN_CMD: This command updates CHAIN_PTR in the
+ * tail bucket of a static bucket chain, supplying both the static
+ * bucket and the new CHAIN_PTR value. TABLE_INDEX is the new CHAIN_PTR
+ * value and TABLE_INDEX2[23:0] is the static bucket. This command
+ * provides software a means to update background chaining coherently
+ * with other bucket updates. The value of CHAIN is unaffected (stays at
+ * 0). For locked scopes, if the static bucket is the tail bucket, it is
+ * empty (all of its ENTRY_PTR values are 0), and TABLE_INDEX=0 (the
+ * CHAIN_PTR is being set to 0), instead of updating the static bucket
+ * it is evicted from the cache. In this case, CHAIN_UPD=1 in the
+ * completion.
+ */
+enum cfa_p70_mpc_em_chain_cmd_fields {
+ CFA_P70_MPC_EM_CHAIN_CMD_OPAQUE_FLD = 0,
+ /*
+ * Sets the OPTION field on the cache interface to use write-through for
+ * EM entry writes while processing EM_INSERT commands. For all other
+ * cases (inluding EM_INSERT bucket writes), the OPTION field is set by
+ * the CACHE_OPTION and CACHE_OPTION2 fields.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD = 1,
+ /* Table scope to access. */
+ CFA_P70_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD = 2,
+ /*
+ * Determines setting of OPTION field for all cache requests while
+ * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN.
+ * For these latter commands, CACHE_OPTION sets the OPTION field for all
+ * read requests, and CACHE_OPTION2 sets it for all write requests. CFA
+ * does not support posted write requests. Therefore, for WRITE
+ * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that
+ * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set
+ * to 0.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD = 3,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. New
+ * CHAIN_PTR to write to tail bucket.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD = 4,
+ /*
+ * Determines setting of OPTION field for all cache write requests for
+ * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support
+ * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD = 5,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. Static
+ * bucket address for bucket chain.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD = 6,
+ CFA_P70_MPC_EM_CHAIN_CMD_MAX_FLD = 7,
+};
+
+/**
+ * Field IDS for READ_CMP: When no errors, teturns 1-4 consecutive 32B
+ * words from the TABLE_INDEX within the TABLE_SCOPE specified in the
+ * command, writing them to HOST_ADDRESS from the command.
+ */
+enum cfa_p70_mpc_read_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_P70_MPC_READ_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_P70_MPC_READ_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_P70_MPC_READ_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_P70_MPC_READ_CMP_OPCODE_FLD = 3,
+ /*
+ * The length of the DMA that accompanies the completion in units of
+ * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates
+ * that there is no DMA that accompanies the completion.
+ */
+ CFA_P70_MPC_READ_CMP_DMA_LENGTH_FLD = 4,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_P70_MPC_READ_CMP_OPAQUE_FLD = 5,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_READ_CMP_V_FLD = 6,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_P70_MPC_READ_CMP_HASH_MSB_FLD = 7,
+ /* TABLE_TYPE from the command. */
+ CFA_P70_MPC_READ_CMP_TABLE_TYPE_FLD = 8,
+ /* TABLE_SCOPE from the command. */
+ CFA_P70_MPC_READ_CMP_TABLE_SCOPE_FLD = 9,
+ /* TABLE_INDEX from the command. */
+ CFA_P70_MPC_READ_CMP_TABLE_INDEX_FLD = 10,
+ CFA_P70_MPC_READ_CMP_MAX_FLD = 11,
+};
+
+/**
+ * Field IDS for WRITE_CMP: Returns status of the write of 1-4
+ * consecutive 32B words starting at TABLE_INDEX in the table specified
+ * by (TABLE_TYPE, TABLE_SCOPE).
+ */
+enum cfa_p70_mpc_write_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_P70_MPC_WRITE_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_P70_MPC_WRITE_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_P70_MPC_WRITE_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_P70_MPC_WRITE_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_P70_MPC_WRITE_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_WRITE_CMP_V_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_P70_MPC_WRITE_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_TYPE from the command. */
+ CFA_P70_MPC_WRITE_CMP_TABLE_TYPE_FLD = 7,
+ /* TABLE_SCOPE from the command. */
+ CFA_P70_MPC_WRITE_CMP_TABLE_SCOPE_FLD = 8,
+ /* TABLE_INDEX from the command. */
+ CFA_P70_MPC_WRITE_CMP_TABLE_INDEX_FLD = 9,
+ CFA_P70_MPC_WRITE_CMP_MAX_FLD = 10,
+};
+
+/**
+ * Field IDS for READ_CLR_CMP: When no errors, returns 1 32B word from
+ * TABLE_INDEX in the table specified by (TABLE_TYPE, TABLE_SCOPE). The
+ * data returned is the value prior to the clear.
+ */
+enum cfa_p70_mpc_read_clr_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_P70_MPC_READ_CLR_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_P70_MPC_READ_CLR_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_P70_MPC_READ_CLR_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_P70_MPC_READ_CLR_CMP_OPCODE_FLD = 3,
+ /*
+ * The length of the DMA that accompanies the completion in units of
+ * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates
+ * that there is no DMA that accompanies the completion.
+ */
+ CFA_P70_MPC_READ_CLR_CMP_DMA_LENGTH_FLD = 4,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_P70_MPC_READ_CLR_CMP_OPAQUE_FLD = 5,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_READ_CLR_CMP_V_FLD = 6,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_P70_MPC_READ_CLR_CMP_HASH_MSB_FLD = 7,
+ /* TABLE_TYPE from the command. */
+ CFA_P70_MPC_READ_CLR_CMP_TABLE_TYPE_FLD = 8,
+ /* TABLE_SCOPE from the command. */
+ CFA_P70_MPC_READ_CLR_CMP_TABLE_SCOPE_FLD = 9,
+ /* TABLE_INDEX from the command. */
+ CFA_P70_MPC_READ_CLR_CMP_TABLE_INDEX_FLD = 10,
+ CFA_P70_MPC_READ_CLR_CMP_MAX_FLD = 11,
+};
+
+/**
+ * Field IDS for INVALIDATE_CMP: Returns status for INVALIDATE commands.
+ */
+enum cfa_p70_mpc_invalidate_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_P70_MPC_INVALIDATE_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_P70_MPC_INVALIDATE_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_P70_MPC_INVALIDATE_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_P70_MPC_INVALIDATE_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_P70_MPC_INVALIDATE_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_INVALIDATE_CMP_V_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_P70_MPC_INVALIDATE_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_TYPE from the command. */
+ CFA_P70_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD = 7,
+ /* TABLE_SCOPE from the command. */
+ CFA_P70_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD = 8,
+ /* TABLE_INDEX from the command. */
+ CFA_P70_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD = 9,
+ CFA_P70_MPC_INVALIDATE_CMP_MAX_FLD = 10,
+};
+
+/**
+ * Field IDS for EM_SEARCH_CMP: For OK status, returns the index of the
+ * matching entry found for the EM key supplied in the command. Returns
+ * EM_MISS status if no match was found.
+ */
+enum cfa_p70_mpc_em_search_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_P70_MPC_EM_SEARCH_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_P70_MPC_EM_SEARCH_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_P70_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_P70_MPC_EM_SEARCH_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_P70_MPC_EM_SEARCH_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_EM_SEARCH_CMP_V1_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_P70_MPC_EM_SEARCH_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_SCOPE from the command. */
+ CFA_P70_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD = 7,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK
+ * status, gives ENTRY_PTR[25:0] of the matching entry found. Otherwise,
+ * set to 0.
+ */
+ CFA_P70_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD = 8,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. If the hash
+ * is computed (no errors during initial processing of the command),
+ * TABLE_INDEX2[23:0] is the static bucket address determined from the
+ * hash of the exact match entry key in the command and the (EM_SIZE,
+ * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24
+ * in this case are set to 0. For any other status, it is always 0.
+ */
+ CFA_P70_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD = 9,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_EM_SEARCH_CMP_V2_FLD = 10,
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ CFA_P70_MPC_EM_SEARCH_CMP_BKT_NUM_FLD = 11,
+ /* See BKT_NUM description. */
+ CFA_P70_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD = 12,
+ CFA_P70_MPC_EM_SEARCH_CMP_MAX_FLD = 13,
+};
+
+/**
+ * Field IDS for EM_INSERT_CMP: OK status indicates that the exact match
+ * entry from the command was successfully inserted. EM_DUPLICATE status
+ * indicates that the insert was aborted because an entry with the same
+ * exact match key was found and REPLACE=0 in the command. EM_ABORT
+ * status indicates that no duplicate was found, the tail bucket in the
+ * chain was full, and TABLE_INDEX2=0. No changes are made to the
+ * database in this case. TABLE_INDEX is the starting address at which
+ * to insert the exact match entry (from the command). TABLE_INDEX2 is
+ * the address at which to insert a new bucket at the tail of the static
+ * bucket chain if needed (from the command). CHAIN_UPD=1 if a new
+ * bucket was added at this address. TABLE_INDEX3 is the static bucket
+ * address for the chain, determined from hashing the exact match entry.
+ * Software needs this address and TABLE_INDEX in order to delete the
+ * entry using an EM_DELETE command. TABLE_INDEX4 is the index of an
+ * entry found that had a matching exact match key to the command entry
+ * key. If no matching entry was found, it is set to 0. There are two
+ * cases when there is a matching entry, depending on REPLACE from the
+ * command: * REPLACE=0: EM_DUPLICATE status is reported and the insert
+ * is aborted. Software can use the static bucket address
+ * (TABLE_INDEX3[23:0]) and the matching entry (TABLE_INDEX4) in an
+ * EM_DELETE command if it wishes to explicity delete the matching
+ * entry. * REPLACE=1: REPLACED_ENTRY=1 to signal that the entry at
+ * TABLE_INDEX4 was replaced by the insert entry. REPLACED_ENTRY will
+ * only be 1 if reporting OK status in this case. Software can de-
+ * allocate the entry at TABLE_INDEX4.
+ */
+enum cfa_p70_mpc_em_insert_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_P70_MPC_EM_INSERT_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_P70_MPC_EM_INSERT_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_V1_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_SCOPE from the command. */
+ CFA_P70_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD = 7,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX
+ * from the command, which is the starting address at which to insert
+ * the exact match entry.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD = 8,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2
+ * from the command, which is the index for the new tail bucket to add
+ * if needed (CHAIN_UPD=1 if it was used).
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD = 9,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. If the hash
+ * is computed (no errors during initial processing of the command),
+ * TABLE_INDEX2[23:0] is the static bucket address determined from the
+ * hash of the exact match entry key in the command and the (EM_SIZE,
+ * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24
+ * in this case are set to 0. For any other status, it is always 0.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD = 10,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_V2_FLD = 11,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. ENTRY_PTR of
+ * matching entry found. Set to 0 if no matching entry found. If
+ * REPLACED_ENTRY=1, that indicates a matching entry was found and
+ * REPLACE=1 in the command. In this case, the matching entry was
+ * replaced by the new entry in the command and this index can therefore
+ * by de-allocated.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD = 12,
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_BKT_NUM_FLD = 13,
+ /* See BKT_NUM description. */
+ CFA_P70_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD = 14,
+ /*
+ * Specifies if the chain was updated while processing the command: Set
+ * to 1 when a new bucket is added to the tail of the static bucket
+ * chain at TABLE_INDEX2. This occurs if and only if the insert requires
+ * adding a new entry and the tail bucket is full. If set to 0,
+ * TABLE_INDEX2 was not used and is therefore still free.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD = 15,
+ /*
+ * Set to 1 if a matching entry was found and REPLACE=1 in command. In
+ * the case, the entry starting at TABLE_INDEX4 was replaced and can
+ * therefore be de-allocated. Otherwise, this flag is set to 0.
+ */
+ CFA_P70_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD = 16,
+ CFA_P70_MPC_EM_INSERT_CMP_MAX_FLD = 17,
+};
+
+/**
+ * Field IDS for EM_DELETE_CMP: OK status indicates that an ENTRY_PTR
+ * matching TABLE_INDEX was found in the static bucket chain specified
+ * and was therefore deleted. EM_MISS status indicates that no match was
+ * found. TABLE_INDEX is from the command. It is the index of the entry
+ * to delete. TABLE_INDEX2 is from the command. It is the static bucket
+ * address. TABLE_INDEX3 is the index of the tail bucket of the static
+ * bucket chain prior to processing the command. TABLE_INDEX4 is the
+ * index of the tail bucket of the static bucket chain after processing
+ * the command. If CHAIN_UPD=1 and TABLE_INDEX4==TABLE_INDEX2, the
+ * static bucket was the tail bucket, it became empty after the delete,
+ * the scope is a locked scope, and CHAIN_PTR was 0. In this case, the
+ * static bucket has been evicted from the cache. Otherwise, if
+ * CHAIN_UPD=1, the original tail bucket given by TABLE_INDEX3 was
+ * removed from the chain because it went empty. It can therefore be de-
+ * allocated.
+ */
+enum cfa_p70_mpc_em_delete_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_P70_MPC_EM_DELETE_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_P70_MPC_EM_DELETE_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_V1_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_SCOPE from the command. */
+ CFA_P70_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD = 7,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX
+ * from the command, which is the index of the entry to delete.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD = 8,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2
+ * from the command.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD = 9,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK or
+ * EM_MISS status, the index of the tail bucket of the chain prior to
+ * processing the command. If CHAIN_UPD=1, the bucket was removed and
+ * this index can be de-allocated. For other status values, it is set to
+ * 0.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD = 10,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_V2_FLD = 11,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK or
+ * EM_MISS status, the index of the tail bucket of the chain prior to
+ * after the command. If CHAIN_UPD=0 (always for EM_MISS status), it is
+ * always equal to TABLE_INDEX3 as the chain was not updated. For other
+ * status values, it is set to 0.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD = 12,
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_BKT_NUM_FLD = 13,
+ /* See BKT_NUM description. */
+ CFA_P70_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD = 14,
+ /*
+ * Specifies if the chain was updated while processing the command: Set
+ * to 1 when a bucket is removed from the static bucket chain. This
+ * occurs if after the delete, the tail bucket is a dynamic bucket and
+ * no longer has any valid entries. In this case, software should de-
+ * allocate the dynamic bucket at TABLE_INDEX3. It is also set to 1 when
+ * the static bucket is evicted, which only occurs for locked scopes.
+ * See the EM_DELETE command description for details.
+ */
+ CFA_P70_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD = 15,
+ CFA_P70_MPC_EM_DELETE_CMP_MAX_FLD = 16,
+};
+
+/**
+ * Field IDS for EM_CHAIN_CMP: OK status indicates that the CHAIN_PTR of
+ * the tail bucket was successfully updated. TABLE_INDEX is from the
+ * command. It is the value of the new CHAIN_PTR. TABLE_INDEX2 is from
+ * the command. TABLE_INDEX3 is the index of the tail bucket of the
+ * static bucket chain.
+ */
+enum cfa_p70_mpc_em_chain_cmp_fields {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records **(EXCEPT
+ * no_op!!!!)** .
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_TYPE_FLD = 0,
+ /* The command processing status. */
+ CFA_P70_MPC_EM_CHAIN_CMP_STATUS_FLD = 1,
+ /*
+ * This field represents the Mid-Path client that generated the
+ * completion.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD = 2,
+ /* OPCODE from the command. */
+ CFA_P70_MPC_EM_CHAIN_CMP_OPCODE_FLD = 3,
+ /*
+ * This is a copy of the opaque field from the mid path BD of this
+ * command.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_OPAQUE_FLD = 4,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_V1_FLD = 5,
+ /*
+ * For EM_SEARCH and EM_INSERT commands without errors that abort the
+ * command processing prior to the hash computation, set to HASH[35:24]
+ * of the hash computed from the exact match entry key in the command.
+ * For all other cases, set to 0 except for the following error
+ * conditions, which carry debug information in this field as shown by
+ * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0],
+ * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present
+ * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. *
+ * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands -
+ * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} -
+ * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if
+ * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an
+ * error - TABLE_INDEX[n]=0 if the completion does not have the
+ * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0,
+ * DATA_SIZE[2:0]}
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_HASH_MSB_FLD = 6,
+ /* TABLE_SCOPE from the command. */
+ CFA_P70_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD = 7,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX
+ * from the command, which is the new CHAIN_PTR for the tail bucket of
+ * the static bucket chain.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD = 8,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2
+ * from the command.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD = 9,
+ /*
+ * A 32B index into the EM table identified by TABLE_SCOPE. For OK
+ * status, the index of the tail bucket of the chain. Otherwise, set to
+ * 0.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX3_FLD = 10,
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_V2_FLD = 11,
+ /*
+ * BKT_NUM is the bucket number in chain of the tail bucket after
+ * finishing processing the command, except when the command stops
+ * processing before the tail bucket. NUM_ENTRIES is the number of valid
+ * entries in the BKT_NUM bucket. The following describes the cases
+ * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after
+ * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR,
+ * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. *
+ * For CACHE_ERR completion status, BKT_NUM will be set to the bucket
+ * number that was last read without error. If ERR=1 in the response to
+ * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The
+ * static bucket is number 0, BKT_NUM increments for each new bucket in
+ * the chain, and saturates at 255. Therefore, if the value is 255,
+ * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES
+ * will still be the correct value as described above for the bucket.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_BKT_NUM_FLD = 12,
+ /* See BKT_NUM description. */
+ CFA_P70_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD = 13,
+ /*
+ * Set to 1 when the scope is a locked scope, the tail bucket is the
+ * static bucket, the bucket is empty (all of its ENTRY_PTR values are
+ * 0), and TABLE_INDEX=0 in the command. In this case, the static bucket
+ * is evicted. For all other cases, it is set to 0.
+ */
+ CFA_P70_MPC_EM_CHAIN_CMP_CHAIN_UPD_FLD = 14,
+ CFA_P70_MPC_EM_CHAIN_CMP_MAX_FLD = 15,
+};
+
+/* clang-format on */
+
+#endif /* _CFA_P70_MPC_FIELD_IDS_H_ */
new file mode 100644
@@ -0,0 +1,775 @@
+/****************************************************************************
+ * Copyright(c) 2001-2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * Name: cfa_p70_mpc_field_mapping.h
+ *
+ * Description: MPC CFA command and completion field mapping (Global to Device)
+ *
+ * Date: 09/29/22 11:50:38
+ *
+ * Note: This file is scripted generated by ./cfa_header_gen.py.
+ * DO NOT modify this file manually !!!!
+ *
+ ****************************************************************************/
+#ifndef _CFA_P70_MPC_FIELD_MAPPING_H_
+#define _CFA_P70_MPC_FIELD_MAPPING_H_
+
+/* clang-format off */
+/** Device specific Field ID mapping structure */
+struct field_mapping {
+ bool valid;
+ uint16_t mapping;
+};
+
+/**
+ * Global to device field id mapping for READ_CMD
+ */
+struct field_mapping cfa_p70_mpc_read_cmd_gbl_to_dev
+ [CFA_BLD_MPC_READ_CMD_MAX_FLD] = {
+ [CFA_BLD_MPC_READ_CMD_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMD_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMD_TABLE_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMD_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMD_DATA_SIZE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMD_CACHE_OPTION_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMD_TABLE_INDEX_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMD_HOST_ADDRESS_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for WRITE_CMD
+ */
+struct field_mapping cfa_p70_mpc_write_cmd_gbl_to_dev
+ [CFA_BLD_MPC_WRITE_CMD_MAX_FLD] = {
+ [CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMD_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMD_TABLE_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMD_WRITE_THROUGH_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMD_WRITE_THROUGH_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMD_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMD_DATA_SIZE_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMD_CACHE_OPTION_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMD_TABLE_INDEX_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for READ_CLR_CMD
+ */
+struct field_mapping cfa_p70_mpc_read_clr_cmd_gbl_to_dev
+ [CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD] = {
+ [CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMD_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMD_TABLE_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMD_DATA_SIZE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMD_CACHE_OPTION_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMD_TABLE_INDEX_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMD_CLEAR_MASK_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for INVALIDATE_CMD
+ */
+struct field_mapping cfa_p70_mpc_invalidate_cmd_gbl_to_dev
+ [CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD] = {
+ [CFA_BLD_MPC_INVALIDATE_CMD_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMD_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMD_DATA_SIZE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMD_DATA_SIZE_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for EM_SEARCH_CMD
+ */
+struct field_mapping cfa_p70_mpc_em_search_cmd_gbl_to_dev
+ [CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD] = {
+ [CFA_BLD_MPC_EM_SEARCH_CMD_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMD_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for EM_INSERT_CMD
+ */
+struct field_mapping cfa_p70_mpc_em_insert_cmd_gbl_to_dev
+ [CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD] = {
+ [CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMD_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMD_DATA_SIZE_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMD_REPLACE_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for EM_DELETE_CMD
+ */
+struct field_mapping cfa_p70_mpc_em_delete_cmd_gbl_to_dev
+ [CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD] = {
+ [CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMD_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for EM_CHAIN_CMD
+ */
+struct field_mapping cfa_p70_mpc_em_chain_cmd_gbl_to_dev
+ [CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD] = {
+ [CFA_BLD_MPC_EM_CHAIN_CMD_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMD_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for READ_CMP
+ */
+struct field_mapping cfa_p70_mpc_read_cmp_gbl_to_dev
+ [CFA_BLD_MPC_READ_CMP_MAX_FLD] = {
+ [CFA_BLD_MPC_READ_CMP_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_STATUS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_STATUS_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_MP_CLIENT_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_MP_CLIENT_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_OPCODE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_OPCODE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_DMA_LENGTH_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_DMA_LENGTH_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_V_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_V_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_HASH_MSB_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_HASH_MSB_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_TABLE_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_TABLE_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CMP_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CMP_TABLE_INDEX_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for WRITE_CMP
+ */
+struct field_mapping cfa_p70_mpc_write_cmp_gbl_to_dev
+ [CFA_BLD_MPC_WRITE_CMP_MAX_FLD] = {
+ [CFA_BLD_MPC_WRITE_CMP_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMP_STATUS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_STATUS_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMP_MP_CLIENT_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_MP_CLIENT_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMP_OPCODE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_OPCODE_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMP_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMP_V_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_V_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMP_HASH_MSB_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_HASH_MSB_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMP_TABLE_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_TABLE_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMP_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_WRITE_CMP_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_WRITE_CMP_TABLE_INDEX_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for READ_CLR_CMP
+ */
+struct field_mapping cfa_p70_mpc_read_clr_cmp_gbl_to_dev
+ [CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD] = {
+ [CFA_BLD_MPC_READ_CLR_CMP_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_STATUS_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_MP_CLIENT_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_MP_CLIENT_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_OPCODE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_OPCODE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_DMA_LENGTH_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_DMA_LENGTH_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_V_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_V_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_HASH_MSB_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_HASH_MSB_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_TABLE_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_TABLE_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_READ_CLR_CMP_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_READ_CLR_CMP_TABLE_INDEX_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for INVALIDATE_CMP
+ */
+struct field_mapping cfa_p70_mpc_invalidate_cmp_gbl_to_dev
+ [CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD] = {
+ [CFA_BLD_MPC_INVALIDATE_CMP_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_STATUS_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMP_MP_CLIENT_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_MP_CLIENT_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMP_OPCODE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_OPCODE_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMP_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMP_V_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_V_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMP_HASH_MSB_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_HASH_MSB_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for EM_SEARCH_CMP
+ */
+struct field_mapping cfa_p70_mpc_em_search_cmp_gbl_to_dev
+ [CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD] = {
+ [CFA_BLD_MPC_EM_SEARCH_CMP_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_STATUS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_STATUS_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_OPCODE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_OPCODE_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_V1_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_V1_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_HASH_MSB_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_HASH_MSB_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_V2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_V2_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_BKT_NUM_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_BKT_NUM_FLD,
+ },
+ [CFA_BLD_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for EM_INSERT_CMP
+ */
+struct field_mapping cfa_p70_mpc_em_insert_cmp_gbl_to_dev
+ [CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD] = {
+ [CFA_BLD_MPC_EM_INSERT_CMP_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_STATUS_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_MP_CLIENT_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_MP_CLIENT_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_OPCODE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_OPCODE_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_V1_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_V1_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_HASH_MSB_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_V2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_V2_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_BKT_NUM_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD,
+ },
+ [CFA_BLD_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for EM_DELETE_CMP
+ */
+struct field_mapping cfa_p70_mpc_em_delete_cmp_gbl_to_dev
+ [CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD] = {
+ [CFA_BLD_MPC_EM_DELETE_CMP_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_STATUS_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_MP_CLIENT_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_MP_CLIENT_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_OPCODE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_OPCODE_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_V1_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_V1_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_HASH_MSB_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_HASH_MSB_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_V2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_V2_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_BKT_NUM_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_BKT_NUM_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD,
+ },
+ [CFA_BLD_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD,
+ },
+};
+
+/**
+ * Global to device field id mapping for EM_CHAIN_CMP
+ */
+struct field_mapping cfa_p70_mpc_em_chain_cmp_gbl_to_dev
+ [CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD] = {
+ [CFA_BLD_MPC_EM_CHAIN_CMP_TYPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TYPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_STATUS_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_STATUS_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_OPCODE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_OPCODE_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_OPAQUE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_OPAQUE_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_V1_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_V1_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_HASH_MSB_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_HASH_MSB_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX3_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX3_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_V2_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_V2_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_BKT_NUM_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_BKT_NUM_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD,
+ },
+ [CFA_BLD_MPC_EM_CHAIN_CMP_CHAIN_UPD_FLD] = {
+ .valid = true,
+ .mapping = CFA_P70_MPC_EM_CHAIN_CMP_CHAIN_UPD_FLD,
+ },
+};
+
+/* clang-format on */
+
+#endif /* _CFA_P70_MPC_FIELD_MAPPING_H_ */
new file mode 100644
@@ -0,0 +1,185 @@
+/****************************************************************************
+ * Copyright(c) 2021 - 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_resources.h
+ *
+ * @brief CFA Resource type definitions
+ */
+#ifndef _CFA_RESOURCES_H_
+#define _CFA_RESOURCES_H_
+
+/**
+ * @addtogroup CFA_RESC_TYPES CFA Resource Types
+ * \ingroup CFA_V3
+ * CFA HW resource types and sub types definition
+ * @{
+ */
+
+/**
+ * CFA hardware Resource Type
+ *
+ * Depending on the type of CFA hardware resource, the resources are divided
+ * into multiple groups. This group is identified by resource type. The
+ * following enum defines all the CFA resource types
+ */
+enum cfa_resource_type {
+ /** CFA resources using fixed identifiers (IDM)
+ */
+ CFA_RTYPE_IDENT = 0,
+ /** CFA resources accessed by fixed indices (TBM)
+ */
+ CFA_RTYPE_IDX_TBL,
+ /** CFA TCAM resources
+ */
+ CFA_RTYPE_TCAM,
+ /** CFA interface tables (IFM)
+ */
+ CFA_RTYPE_IF_TBL,
+ /** CFA resources accessed using CFA memory manager index
+ */
+ CFA_RTYPE_CMM,
+ /** CFA Global fields (e.g. registers which configure global settings)
+ */
+ CFA_RTYPE_GLB_FLD,
+
+ CFA_RTYPE_HW_MAX = 12,
+
+ /** FIrmware only types
+ */
+ /** CFA Firmware Session Manager
+ */
+ CFA_RTYPE_SM = CFA_RTYPE_HW_MAX,
+ /** CFA Firmware Table Scope Manager
+ */
+ CFA_RTYPE_TSM,
+ /** CFA Firmware Table Scope Instance Manager
+ */
+ CFA_RTYPE_TIM,
+ /** CFA Firmware Global Id Manager
+ */
+ CFA_RTYPE_GIM,
+
+ CFA_RTYPE_MAX
+};
+
+/**
+ * Resource sub-types for CFA_RTYPE_IDENT
+ */
+enum cfa_resource_subtype_ident {
+ CFA_RSUBTYPE_IDENT_L2CTX = 0, /**< Remapped L2 contexts */
+ CFA_RSUBTYPE_IDENT_PROF_FUNC, /**< Profile functions */
+ CFA_RSUBTYPE_IDENT_WC_PROF, /**< WC TCAM profile IDs */
+ CFA_RSUBTYPE_IDENT_EM_PROF, /**< EM profile IDs */
+ CFA_RSUBTYPE_IDENT_L2_FUNC, /**< L2 functions */
+ CFA_RSUBTYPE_IDENT_LAG_ID, /**< LAG IDs */
+ CFA_RSUBTYPE_IDENT_MAX
+};
+
+/**
+ * Resource sub-types for CFA_RTYPE_IDX
+ */
+enum cfa_resource_subtype_idx_tbl {
+ CFA_RSUBTYPE_IDX_TBL_STAT64 = 0, /**< Statistics */
+ CFA_RSUBTYPE_IDX_TBL_METER_PROF, /**< Meter profile */
+ CFA_RSUBTYPE_IDX_TBL_METER_INST, /**< Meter instances */
+ CFA_RSUBTYPE_IDX_TBL_METER_DROP_CNT, /**< Meter Drop Count */
+ CFA_RSUBTYPE_IDX_TBL_MIRROR, /**< Mirror table */
+ /* Metadata mask for profiler block */
+ CFA_RSUBTYPE_IDX_TBL_METADATA_PROF,
+ /* Metadata mask for lookup block (for recycling) */
+ CFA_RSUBTYPE_IDX_TBL_METADATA_LKUP,
+ /* Metadata mask for action block */
+ CFA_RSUBTYPE_IDX_TBL_METADATA_ACT,
+ CFA_RSUBTYPE_IDX_TBL_CT_STATE, /**< Connection tracking */
+ CFA_RSUBTYPE_IDX_TBL_RANGE_PROF, /**< Range profile */
+ CFA_RSUBTYPE_IDX_TBL_RANGE_ENTRY, /**< Range entry */
+ CFA_RSUBTYPE_IDX_TBL_EM_FKB, /**< EM FKB table */
+ CFA_RSUBTYPE_IDX_TBL_WC_FKB, /**< WC TCAM FKB table */
+ CFA_RSUBTYPE_IDX_TBL_EM_FKB_MASK, /**< EM FKB Mask table */
+ CFA_RSUBTYPE_IDX_TBL_MAX
+};
+
+/**
+ * Resource sub-types for CFA_RTYPE_TCAM
+ */
+enum cfa_resource_subtype_tcam {
+ CFA_RSUBTYPE_TCAM_L2CTX = 0, /**< L2 contexts TCAM */
+ CFA_RSUBTYPE_TCAM_PROF_TCAM, /**< Profile TCAM */
+ CFA_RSUBTYPE_TCAM_WC, /**< WC lookup TCAM */
+ CFA_RSUBTYPE_TCAM_CT_RULE, /**< Connection tracking TCAM */
+ CFA_RSUBTYPE_TCAM_VEB, /**< VEB TCAM */
+ CFA_RSUBTYPE_TCAM_FEATURE_CHAIN, /**< Feature chain TCAM */
+ CFA_RSUBTYPE_TCAM_MAX
+};
+
+/**
+ * Resource sub-types for CFA_RTYPE_IF_TBL
+ */
+enum cfa_resource_subtype_if_tbl {
+ /** ILT table indexed by SVIF
+ */
+ CFA_RSUBTYPE_IF_TBL_ILT = 0,
+ /** VSPT table
+ */
+ CFA_RSUBTYPE_IF_TBL_VSPT,
+ /** Profiler partition default action record pointer
+ */
+ CFA_RSUBTYPE_IF_TBL_PROF_PARIF_DFLT_ACT_PTR,
+ /** Profiler partition error action record pointer
+ */
+ CFA_RSUBTYPE_IF_TBL_PROF_PARIF_ERR_ACT_PTR,
+ CFA_RSUBTYPE_IF_TBL_EPOCH0, /**< Epoch0 mask table */
+ CFA_RSUBTYPE_IF_TBL_EPOCH1, /**< Epoch1 mask table */
+ CFA_RSUBTYPE_IF_TBL_LAG, /**< LAG Table */
+ CFA_RSUBTYPE_IF_TBL_MAX
+};
+
+/**
+ * Resource sub-types for CFA_RTYPE_CMM
+ */
+enum cfa_resource_subtype_cmm {
+ CFA_RSUBTYPE_CMM_INT_ACT_B0 = 0, /**< SRAM Bank 0 */
+ CFA_RSUBTYPE_CMM_INT_ACT_B1, /**< SRAM Bank 0 */
+ CFA_RSUBTYPE_CMM_INT_ACT_B2, /**< SRAM Bank 0 */
+ CFA_RSUBTYPE_CMM_INT_ACT_B3, /**< SRAM Bank 0 */
+ CFA_RSUBTYPE_CMM_ACT, /**< Action table */
+ CFA_RSUBTYPE_CMM_LKUP, /**< EM lookup table */
+ CFA_RSUBTYPE_CMM_MAX
+};
+
+#define CFA_RSUBTYPE_GLB_FLD_MAX 1
+#define CFA_RSUBTYPE_SM_MAX 1
+#define CFA_RSUBTYPE_TSM_MAX 1
+#define CFA_RSUBTYPE_TIM_MAX 1
+
+/**
+ * Resource sub-types for CFA_RTYPE_GIM
+ */
+enum cfa_resource_subtype_gim {
+ CFA_RSUBTYPE_GIM_DOMAIN_0 = 0, /**< Domain 0 */
+ CFA_RSUBTYPE_GIM_DOMAIN_1, /**< Domain 1 */
+ CFA_RSUBTYPE_GIM_DOMAIN_2, /**< Domain 2 */
+ CFA_RSUBTYPE_GIM_DOMAIN_3, /**< Domain 3 */
+ CFA_RSUBTYPE_GIM_MAX
+};
+
+/**
+ * Total number of resource subtypes
+ */
+#define CFA_NUM_RSUBTYPES \
+ (CFA_RSUBTYPE_IDENT_MAX + CFA_RSUBTYPE_IDX_TBL_MAX + \
+ CFA_RSUBTYPE_TCAM_MAX + CFA_RSUBTYPE_IF_TBL_MAX + \
+ CFA_RSUBTYPE_CMM_MAX + CFA_RSUBTYPE_GLB_FLD_MAX + \
+ CFA_RSUBTYPE_SM_MAX + CFA_RSUBTYPE_TSM_MAX + CFA_RSUBTYPE_TIM_MAX + \
+ CFA_RSUBTYPE_GIM_MAX)
+
+/**
+ * @}
+ */
+
+#endif /* _CFA_RESOURCES_H_ */
new file mode 100644
@@ -0,0 +1,273 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_trace.h
+ *
+ * @brief CFA logging macros
+ */
+
+#ifndef __CFA_TRACE_H_
+#define __CFA_TRACE_H_
+
+/*!
+ * \file
+ * \brief CFA logging macros and functions
+ * @{
+ */
+
+#ifndef COMP_ID
+#error COMP_ID must be defined in the module including this file.
+#endif
+
+/* These must be defined before the platform specific header is included. */
+#ifndef CFA_DEBUG_LEVEL_DBG
+#define CFA_DEBUG_LEVEL_DBG 0x0
+#define CFA_DEBUG_LEVEL_INFO 0x1
+#define CFA_DEBUG_LEVEL_WARN 0x2
+#define CFA_DEBUG_LEVEL_CRITICAL 0x3
+#define CFA_DEBUG_LEVEL_FATAL 0x4
+#endif
+
+/* Include platform specific CFA logging api header */
+#include "cfa_debug_defs.h"
+
+/* #define CFA_DYNAMIC_TRACE_FILTERING */
+
+/** @name Default Log Levels
+ * Default log level for each component. If not defined for a component, the
+ * log level for that component defaults to 0 (CFA_DEBUG_LEVEL_DBG).
+ * @{
+ */
+#define CFA_BLD_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_CMM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_GIM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_HOSTIF_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_IDM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_OIM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_RM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_SM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_TBM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_TCM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_TIM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_TPM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+#define CFA_TSM_DEBUG_LEVEL CFA_DEBUG_LEVEL_INFO
+/** @} */
+
+/* Do not use these macros directly */
+
+/* \cond DO_NOT_DOCUMENT */
+#define JOIN(a, b) a##b
+#define JOIN3(a, b, c) a##b##c
+#define CFA_COMP_NAME(ID) JOIN(CFA_COMP_, ID)
+#define CFA_COMP_DBG_LEVEL(ID) JOIN3(CFA_, ID, _DEBUG_LEVEL)
+#define CFA_TRACE_STRINGIFY(x) #x
+#define CFA_TRACE_LINE_STRING(line) CFA_TRACE_STRINGIFY(line)
+#define CFA_TRACE_LINE() CFA_TRACE_LINE_STRING(__LINE__)
+#define CFA_LOG(level, format, ...) \
+ CFA_TRACE(level, "%s:" CFA_TRACE_LINE() ": " format, __func__, \
+ ##__VA_ARGS__)
+
+#define CFA_LOG_FL(function, line, level, format, ...) \
+ CFA_TRACE(level, "%s: %s: " format, function, line, ##__VA_ARGS__)
+
+#ifdef CFA_DYNAMIC_TRACE_FILTERING
+#define CFA_TRACE_FILTERED(component, level, format, ...) \
+ do { \
+ int local_level = level; \
+ if (cfa_trace_enabled(component, local_level)) \
+ CFA_LOG(local_level, format, ##__VA_ARGS__); \
+ } while (0)
+
+#define CFA_TRACE_FILTERED_FL(function, line, component, level, format, ...) \
+ do { \
+ int local_level = level; \
+ if (cfa_trace_enabled(component, local_level)) \
+ CFA_LOG_FL(function, line, local_level, format, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#else
+/* Static log filtering */
+#if CFA_COMP_DBG_LEVEL(COMP_ID) <= CFA_DEBUG_LEVEL_DBG
+#define CFA_TRACE_DBG(format, ...) \
+ CFA_LOG(CFA_DEBUG_LEVEL_DBG, format, ##__VA_ARGS__)
+#define CFA_TRACE_DBG_FL(function, line, format, ...) \
+ CFA_LOG_FL(function, line, CFA_DEBUG_LEVEL_DBG, format, ##__VA_ARGS__)
+#else
+#define CFA_TRACE_DBG(format, ...)
+#define CFA_TRACE_DBG_FL(format, ...)
+#endif
+#if CFA_COMP_DBG_LEVEL(COMP_ID) <= CFA_DEBUG_LEVEL_INFO
+#define CFA_TRACE_INFO(format, ...) \
+ CFA_LOG(CFA_DEBUG_LEVEL_INFO, format, ##__VA_ARGS__)
+#define CFA_TRACE_INFO_FL(function, line, format, ...) \
+ CFA_LOG_FL(function, line, CFA_DEBUG_LEVEL_INFO, format, ##__VA_ARGS__)
+#else
+#define CFA_TRACE_INFO(format, ...)
+#define CFA_TRACE_INFO_FL(function, line, format, ...)
+#endif
+#if CFA_COMP_DBG_LEVEL(COMP_ID) <= CFA_DEBUG_LEVEL_WARN
+#define CFA_TRACE_WARN(format, ...) \
+ CFA_LOG(CFA_DEBUG_LEVEL_WARN, format, ##__VA_ARGS__)
+#define CFA_TRACE_WARN_FL(function, line, format, ...) \
+ CFA_LOG_FL(function, line, CFA_DEBUG_LEVEL_WARN, format, ##__VA_ARGS__)
+#else
+#define CFA_TRACE_WARN(format, ...)
+#define CFA_TRACE_WARN_FL(function, line, format, ...)
+#endif
+#if CFA_COMP_DBG_LEVEL(COMP_ID) <= CFA_DEBUG_LEVEL_CRITICAL
+#define CFA_TRACE_ERR(format, ...) \
+ CFA_LOG(CFA_DEBUG_LEVEL_CRITICAL, format, ##__VA_ARGS__)
+#define CFA_TRACE_ERR_FL(function, line, format, ...) \
+ CFA_LOG_FL(function, line, CFA_DEBUG_LEVEL_CRITICAL, format, \
+ ##__VA_ARGS__)
+#else
+#define CFA_TRACE_ERR(format, ...)
+#define CFA_TRACE_ERR_FL(function, line, format, ...)
+#endif
+#define CFA_TRACE_FATAL(format, ...) \
+ CFA_LOG(CFA_DEBUG_LEVEL_FATAL, format, ##__VA_ARGS__)
+
+#endif
+/* \endcond */
+
+/** @name Logging Macros
+ * These macros log with the function and line number of the location invoking
+ * the macro.
+ * @{
+ */
+#ifdef CFA_DYNAMIC_TRACE_FILTERING
+#define CFA_LOG_DBG(format, ...) \
+ CFA_TRACE_FILTERED(CFA_COMP_NAME(COMP_ID), CFA_DEBUG_LEVEL_DBG, \
+ format, ##__VA_ARGS__)
+#define CFA_LOG_INFO(COMP_ID, format, ...) \
+ CFA_TRACE_FILTERED(CFA_COMP_NAME(COMP_ID), CFA_DEBUG_LEVEL_INFO, \
+ format, ##__VA_ARGS__)
+#define CFA_LOG_WARN(format, ...) \
+ CFA_TRACE_FILTERED(CFA_COMP_NAME(COMP_ID), CFA_DEBUG_LEVEL_WARN, \
+ format, ##__VA_ARGS__)
+#define CFA_LOG_ERR(format, ...) \
+ CFA_TRACE_FILTERED(CFA_COMP_NAME(COMP_ID), CFA_DEBUG_LEVEL_CRITICAL, \
+ format, ##__VA_ARGS__)
+#define CFA_LOG_FATAL(format, ...) \
+ CFA_TRACE_FILTERED(CFA_COMP_NAME(COMP_ID), CFA_DEBUG_LEVEL_FATAL, \
+ format, ##__VA_ARGS__)
+#else
+#define CFA_LOG_DBG(format, ...) CFA_TRACE_DBG(format, ##__VA_ARGS__)
+#define CFA_LOG_INFO(format, ...) CFA_TRACE_INFO(format, ##__VA_ARGS__)
+#define CFA_LOG_WARN(format, ...) CFA_TRACE_WARN(format, ##__VA_ARGS__)
+#define CFA_LOG_ERR(format, ...) CFA_TRACE_ERR(format, ##__VA_ARGS__)
+#define CFA_LOG_FATAL(format, ...) CFA_TRACE_FATAL(format, ##__VA_ARGS__)
+#endif
+/** @} */
+
+/** @name Logging Macros with Function
+ * These macros log with the function and line number passed into
+ * the macro.
+ * @{
+ */
+#ifdef CFA_DYNAMIC_TRACE_FILTERING
+#define CFA_LOG_DBG_FL(function, line, format, ...) \
+ CFA_TRACE_FILTERED_FL(function, line, CFA_COMP_NAME(COMP_ID), \
+ CFA_DEBUG_LEVEL_DBG, format, ##__VA_ARGS__)
+#define CFA_LOG_INFO_FL(function, line, format, ...) \
+ CFA_TRACE_FILTERED_FL(function, line, CFA_COMP_NAME(COMP_ID), \
+ CFA_DEBUG_LEVEL_INFO, format, ##__VA_ARGS__)
+#define CFA_LOG_WARN_FL(function, line, format, ...) \
+ CFA_TRACE_FILTERED_FL(function, line, CFA_COMP_NAME(COMP_ID), \
+ CFA_DEBUG_LEVEL_WARN, format, ##__VA_ARGS__)
+#define CFA_LOG_ERR_FL(function, line, format, ...) \
+ CFA_TRACE_FILTERED_FL(function, line, CFA_COMP_NAME(COMP_ID), \
+ CFA_DEBUG_LEVEL_CRITICAL, format, ##__VA_ARGS__)
+#define CFA_LOG_FATAL_FL(function, line, format, ...) \
+ CFA_TRACE_FILTERED_FL(function, line, CFA_COMP_NAME(COMP_ID), \
+ CFA_DEBUG_LEVEL_FATAL, format, ##__VA_ARGS__)
+#else
+#define CFA_LOG_DBG_FL(function, line, format, ...) \
+ CFA_TRACE_DBG_FL(function, line, format, ##__VA_ARGS__)
+#define CFA_LOG_INFO_FL(function, line, format, ...) \
+ CFA_TRACE_INFO_FL(function, line, format, ##__VA_ARGS__)
+#define CFA_LOG_WARN_FL(function, line, format, ...) \
+ CFA_TRACE_WARN_FL(function, line, format, ##__VA_ARGS__)
+#define CFA_LOG_ERR_FL(function, line, format, ...) \
+ CFA_TRACE_ERR_FL(function, line, format, ##__VA_ARGS__)
+#define CFA_LOG_FATAL_FL(function, line, format, ...) \
+ CFA_TRACE_FATAL_FL(function, line, format, ##__VA_ARGS__)
+#endif
+/** @} */
+
+/**
+ * CFA components
+ */
+enum cfa_components {
+ CFA_COMP_BLD = 0,
+ CFA_COMP_FIRST = CFA_COMP_BLD,
+ CFA_COMP_CMM,
+ CFA_COMP_GIM,
+ CFA_COMP_HOSTIF,
+ CFA_COMP_IDM,
+ CFA_COMP_OIM,
+ CFA_COMP_RM,
+ CFA_COMP_SM,
+ CFA_COMP_TBM,
+ CFA_COMP_TCM,
+ CFA_COMP_TIM,
+ CFA_COMP_TPM,
+ CFA_COMP_TSM,
+ CFA_COMP_MAX
+};
+
+#ifdef CFA_DYNAMIC_TRACE_FILTERING
+/**
+ * CFA logging system initialization
+ *
+ * This API initializes the CFA logging infrastructure
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE
+ */
+int cfa_trace_init(void);
+
+/**
+ * CFA logging check if message permitted
+ *
+ * This API indicates if a log message for a component at a given level should
+ * be issued
+ *
+ * @param[in] component
+ * The CFA component
+ *
+ * @param[in] level
+ * The logging level to check for the component
+ *
+ * @return
+ * 0 if message not permitted, non-zero if the message is permitted
+ */
+int cfa_trace_enabled(enum cfa_components component, int level);
+
+/**
+ * CFA logging level set
+ *
+ * This API set the minimum level of log messages to be issued for a component
+ *
+ * @param[in] component
+ * The CFA component
+ *
+ * @param[in] level
+ * The logging level to set for the component
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE
+ */
+int cfa_trace_level_set(enum cfa_components component, int level);
+
+#endif /* CFA_DYNAMIC_TRACE_FILTERING */
+
+/** @} */
+
+#endif /* __CFA_TRACE_H_ */
new file mode 100644
@@ -0,0 +1,122 @@
+/****************************************************************************
+ * Copyright(c) 2021 - 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_types.h
+ *
+ * @brief Basic CFA type definitions
+ */
+#ifndef _CFA_TYPES_H_
+#define _CFA_TYPES_H_
+
+/*!
+ * \file
+ * \brief Exported CFA data structures shared between host and firmware
+ * @{
+ */
+
+/** \defgroup CFA_V3 Common CFA Access Framework
+ *
+ * The primary goal of the CFA common HW access framework is to unify the CFA
+ * resource management and hardware programming design for different CFA
+ * applications so the CFA hardware can be properly shared with different
+ * entities. This framework is collection of the following CFA resource
+ * managers and Libraries listed below:
+ *
+ * 1. CFA Memory Manager
+ * 2. CFA Object Instance Manager
+ * 3. CFA Session Manager
+ * 4. CFA TCAM Manager
+ * 5. CFA Table Scope Manager
+ * 6. CFA Hardware Access Library
+ * 7. CFA Builder Library
+ * 8. CFA Index table manager
+ * 9. CFA Utilities Library
+ *
+ **/
+
+/**
+ * CFA HW version definition
+ */
+enum cfa_ver {
+ CFA_P40 = 0, /**< CFA phase 4.0 */
+ CFA_P45 = 1, /**< CFA phase 4.5 */
+ CFA_P58 = 2, /**< CFA phase 5.8 */
+ CFA_P59 = 3, /**< CFA phase 5.9 */
+ CFA_P70 = 4, /**< CFA phase 7.0 */
+ CFA_PMAX = 5
+};
+
+/**
+ * CFA direction definition
+ */
+enum cfa_dir {
+ CFA_DIR_RX = 0, /**< Receive */
+ CFA_DIR_TX = 1, /**< Transmit */
+ CFA_DIR_MAX = 2
+};
+
+/**
+ * CFA Remap Table Type
+ */
+enum cfa_remap_tbl_type {
+ CFA_REMAP_TBL_TYPE_NORMAL = 0,
+ CFA_REMAP_TBL_TYPE_BYPASS,
+ CFA_REMAP_TBL_TYPE_MAX
+};
+
+/**
+ * CFA tracker types
+ */
+enum cfa_track_type {
+ CFA_TRACK_TYPE_INVALID = 0, /** !< Invalid */
+ CFA_TRACK_TYPE_SID, /** !< Tracked by session id */
+ CFA_TRACK_TYPE_FIRST = CFA_TRACK_TYPE_SID,
+ CFA_TRACK_TYPE_FID, /** !< Tracked by function id */
+ CFA_TRACK_TYPE_MAX
+};
+
+/**
+ * CFA Region Type
+ */
+enum cfa_region_type {
+ CFA_REGION_TYPE_LKUP = 0,
+ CFA_REGION_TYPE_ACT,
+ CFA_REGION_TYPE_MAX
+};
+
+/**
+ * CFA application type
+ */
+enum cfa_app_type {
+ CFA_APP_TYPE_AFM = 0, /** !< AFM firmware */
+ CFA_APP_TYPE_TF = 1, /** !< TruFlow firmware */
+ CFA_APP_TYPE_MAX = 2,
+ CFA_APP_TYPE_INVALID = CFA_APP_TYPE_MAX,
+};
+
+/**
+ * CFA FID types
+ */
+enum cfa_fid_type {
+ CFA_FID_TYPE_FID = 0, /**< General */
+ CFA_FID_TYPE_RFID = 1, /**< Representor */
+ CFA_FID_TYPE_EFID = 2 /**< Endpoint */
+};
+
+/**
+ * CFA srchm modes
+ */
+enum cfa_srch_mode {
+ CFA_SRCH_MODE_FIRST = 0, /** !< Start new iteration */
+ CFA_SRCH_MODE_NEXT, /** !< Next item in iteration */
+ CFA_SRCH_MODE_MAX
+};
+
+/** @} */
+
+#endif /* _CFA_TYPES_H_ */
new file mode 100644
@@ -0,0 +1,44 @@
+/****************************************************************************
+ * Copyright(c) 2021 - 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_util.h
+ *
+ * @brief CFA specific utility macros used by cfa libraries and manager
+ * sources.
+ */
+
+#ifndef _CFA_UTIL_H_
+#define _CFA_UTIL_H_
+
+/*!
+ * \file
+ * \brief CFA specific utility macros
+ * \ingroup CFA_V3
+ * @{
+ */
+
+/* Bounds (closed interval) check helper macro */
+#define CFA_CHECK_BOUNDS(x, l, h) (((x) >= (l)) && ((x) <= (h)))
+#define CFA_CHECK_UPPER_BOUNDS(x, h) ((x) <= (h))
+
+/*
+ * Join macros to generate device specific object/function names for use by
+ * firmware
+ */
+#define CFA_JOIN2(A, B) A##_##B
+#define CFA_JOIN3(A, B, C) A##B##_##C
+#define CFA_OBJ_NAME(PREFIX, VER, NAME) CFA_JOIN3(PREFIX, VER, NAME)
+#define CFA_FUNC_NAME(PREFIX, VER, NAME) CFA_OBJ_NAME(PREFIX, VER, NAME)
+
+/* clang-format off */
+#define CFA_ALIGN_LN2(x) (((x) < 3U) ? (x) : 32U - __builtin_clz((x) - 1U) + 1U)
+/* clang-format on */
+
+/** @} */
+
+#endif /* _CFA_UTIL_H_ */
new file mode 100644
@@ -0,0 +1,52 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_debug_defs.h
+ *
+ * @brief Platform specific CFA Debug log api definitions
+ */
+
+#ifndef __CFA_DEBUG_DEFS_H_
+#define __CFA_DEBUG_DEFS_H_
+
+#include <rte_log.h>
+
+extern int bnxt_logtype_driver;
+
+/*
+ * The cfa_trace infrastructure assumes that log level debug is the lowest
+ * numerically. This is true for firmware, but the RTE log levels have debug as
+ * the highest level. Need to provide a conversion for calling rte_log. We
+ * don't want to simply use the RTE log levels since there are checks such as:
+ *
+ * #if CFA_COMP_DBG_LEVEL(COMP_ID) <= CFA_DEBUG_LEVEL_DBG
+ *
+ * Those checks would not have the desired effect if the RTE log levels are
+ * substituted for the CFA log levels like this:
+ *
+ * #define CFA_DEBUG_LEVEL_DBG RTE_LOG_DEBUG
+ * #define CFA_DEBUG_LEVEL_INFO RTE_LOG_INFO
+ * #define CFA_DEBUG_LEVEL_WARN RTE_LOG_WARNING
+ * #define CFA_DEBUG_LEVEL_CRITICAL RTE_LOG_CRIT
+ * #define CFA_DEBUG_LEVEL_FATAL RTE_LOG_EMERG
+ */
+
+#define CFA_TO_RTE_LOG(level) \
+ ((level) == CFA_DEBUG_LEVEL_DBG ? \
+ RTE_LOG_DEBUG : \
+ (level) == CFA_DEBUG_LEVEL_INFO ? \
+ RTE_LOG_INFO : \
+ (level) == CFA_DEBUG_LEVEL_WARN ? \
+ RTE_LOG_WARNING : \
+ (level) == CFA_DEBUG_LEVEL_CRITICAL ? RTE_LOG_CRIT : \
+ RTE_LOG_EMERG)
+
+#define CFA_TRACE(level, ...) \
+ rte_log(CFA_TO_RTE_LOG(level), bnxt_logtype_driver, ##__VA_ARGS__)
+
+#endif /* __CFA_DEBUG_DEFS_H_ */
new file mode 100644
@@ -0,0 +1,101 @@
+/****************************************************************************
+ * Copyright(c) 2021 - 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file sys_util.h
+ *
+ * @brief Utility macros for bit manipulation, alignments and other
+ * commonly used helper functions. This file should be moved out
+ * of cfa v3 folder to a common utilities folder
+ */
+
+#ifndef _SYS_UTIL_H_
+#define _SYS_UTIL_H_
+
+#include <stdint.h>
+
+#define INVALID_U64 UINT64_MAX
+#define INVALID_U32 UINT32_MAX
+#define INVALID_U16 UINT16_MAX
+#define INVALID_U8 UINT8_MAX
+
+#ifndef ALIGN
+#define ALIGN(x, a) (((x) + (a) - (1)) & ~((a) - (1)))
+#endif
+
+#define ALIGN_256(x) ALIGN(x, 256)
+#define ALIGN_128(x) ALIGN(x, 128)
+#define ALIGN_64(x) ALIGN(x, 64)
+#define ALIGN_32(x) ALIGN(x, 32)
+#define ALIGN_16(x) ALIGN(x, 16)
+#define ALIGN_8(x) ALIGN(x, 8)
+#define ALIGN_4(x) ALIGN(x, 4)
+
+#define NUM_ALIGN_UNITS(x, unit) (((x) + (unit) - (1)) / (unit))
+#define IS_POWER_2(x) (((x) != 0) && (((x) & ((x) - (1))) == 0))
+
+#define NUM_WORDS_ALIGN_32BIT(x) (ALIGN_32(x) / BITS_PER_WORD)
+#define NUM_WORDS_ALIGN_64BIT(x) (ALIGN_64(x) / BITS_PER_WORD)
+#define NUM_WORDS_ALIGN_128BIT(x) (ALIGN_128(x) / BITS_PER_WORD)
+#define NUM_WORDS_ALIGN_256BIT(x) (ALIGN_256(x) / BITS_PER_WORD)
+
+#ifndef MAX
+#define MAX(A, B) ((A) > (B) ? (A) : (B))
+#endif
+
+#ifndef MIN
+#define MIN(A, B) ((A) < (B) ? (A) : (B))
+#endif
+
+#ifndef STRINGIFY
+#define STRINGIFY(X) #X
+#endif
+
+#ifndef ARRAY_SIZE
+#define ELEM_SIZE(ARRAY) sizeof((ARRAY)[0])
+#define ARRAY_SIZE(ARRAY) (sizeof(ARRAY) / ELEM_SIZE(ARRAY))
+#endif
+
+#ifndef BITS_PER_BYTE
+#define BITS_PER_BYTE (8)
+#endif
+
+#ifndef BITS_PER_WORD
+#define BITS_PER_WORD (sizeof(uint32_t) * BITS_PER_BYTE)
+#endif
+
+#ifndef BITS_PER_DWORD
+#define BITS_PER_DWORD (sizeof(uint64_t) * BITS_PER_BYTE)
+#endif
+
+/* Helper macros to get/set/clear Nth bit in a uint8_t bitmap */
+#define BMP_GETBIT(BMP, N) \
+ ((*((uint8_t *)(BMP) + ((N) / 8)) >> ((N) % 8)) & 0x1)
+#define BMP_SETBIT(BMP, N) \
+ do { \
+ uint32_t n = (N); \
+ *((uint8_t *)(BMP) + (n / 8)) |= (0x1U << (n % 8)); \
+ } while (0)
+#define BMP_CLRBIT(BMP, N) \
+ do { \
+ uint32_t n = (N); \
+ *((uint8_t *)(BMP) + (n / 8)) &= \
+ (uint8_t)(~(0x1U << (n % 8))); \
+ } while (0)
+
+#ifndef STATIC_ASSERT
+#ifndef NETXTREME_UT_SUPPORT
+#define STATIC_ASSERT_TYPE_DEFINE(cntr) STATIC_ASSERT_TYPE##cntr
+#define STATIC_ASSERT(x) \
+ typedef int STATIC_ASSERT_TYPE_DEFINE(__COUNTER__)[(x) ? 1 : -1]
+#else
+#define STATIC_ASSERT_TYPE_DEFINE(cntr)
+#define STATIC_ASSERT(x)
+#endif
+#endif
+
+#endif /* _SYS_UTIL_H_ */
new file mode 100644
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+# Copyright(c) 2020 Broadcom
+
+cflags_options = [
+ '-DCFA_BLD_PRINT_OBJ=1',
+]
+
+foreach option:cflags_options
+ if cc.has_argument(option)
+ cflags += option
+ endif
+endforeach
+
+#Include the folder for headers
+includes += include_directories('./')
+includes += include_directories('./include')
+includes += include_directories('./include/platform/dpdk/')
+includes += include_directories('./bld/p70')
+includes += include_directories('./bld/p70/host')
+includes += include_directories('./bld/include')
+includes += include_directories('./bld/include/host')
+includes += include_directories('./bld/include/p70')
+includes += include_directories('./mm/include')
+includes += include_directories('./tim/include')
+includes += include_directories('./tpm/include')
+
+#Add the source files
+sources += files(
+ 'bld/host/cfa_bld_mpc.c',
+ 'bld/p70/cfa_bld_p70_mpc.c',
+ 'bld/p70/host/cfa_bld_p70_host_mpc_wrapper.c',
+ 'bld/p70/host/cfa_bld_p70_mpcops.c',
+ 'mm/cfa_mm.c',
+ 'tim/cfa_tim.c',
+ 'tpm/cfa_tpm.c')
new file mode 100644
@@ -0,0 +1,42 @@
+#
+# Copyright(c) 2021 Broadcom Limited, all rights reserved
+# Contains proprietary and confidential information.
+#
+# This source file is the property of Broadcom Limited, and
+# may not be copied or distributed in any isomorphic form without
+# the prior written consent of Broadcom Limited.
+#
+
+add_library(cfa-mm-lib-common INTERFACE)
+target_include_directories(cfa-mm-lib-common INTERFACE include
+ ../include
+ ../../include)
+
+set (CFA_MM_SRCS cfa_mm.c)
+
+# Production version
+add_library(cfa-mm-lib STATIC EXCLUDE_FROM_ALL ${CFA_MM_SRCS})
+set_property(TARGET cfa-mm-lib PROPERTY POSITION_INDEPENDENT_CODE 1)
+target_link_libraries(cfa-mm-lib PUBLIC cfa-mm-lib-common nxt-platform nxt-arch)
+target_include_directories(cfa-mm-lib PUBLIC ../include/platform/fw)
+
+# UT version
+add_library(cfa-mm-lib-ut STATIC EXCLUDE_FROM_ALL ${CFA_MM_SRCS})
+set_property(TARGET cfa-mm-lib-ut PROPERTY POSITION_INDEPENDENT_CODE 1)
+target_link_libraries(cfa-mm-lib-ut PUBLIC cfa-mm-lib-common nxt-ut nxt-platform nxt-arch nxt-env-ut)
+target_include_directories(cfa-mm-lib-ut PUBLIC ../include/platform/ut)
+
+set(ignoreMe "${SKIP_MM_UT}")
+if(NOT DEFINED SKIP_MM_UT)
+add_subdirectory(ut)
+endif()
+
+# Update Doxygen Path for mm api documentation
+set(CFA_API_DOC_DIRS ${CFA_API_DOC_DIRS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/include # Public api
+ CACHE INTERNAL "")
+
+# Update Doxygen Path for mm design documentation
+set(CFA_DESIGN_DOC_DIRS ${CFA_DESIGN_DOC_DIRS}
+ ${CMAKE_CURRENT_SOURCE_DIR} # mm implementation
+ CACHE INTERNAL "")
new file mode 100644
@@ -0,0 +1,624 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_mm.c
+ *
+ * @brief CFA Memory Manager apis
+ */
+#define COMP_ID CMM
+#include <rte_branch_prediction.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include "sys_util.h"
+#include "cfa_util.h"
+#include "cfa_types.h"
+#include "cfa_mm.h"
+#include "cfa_mm_priv.h"
+#include "cfa_trace.h"
+
+static void cfa_mm_db_info(uint32_t max_records, uint16_t max_contig_records,
+ uint16_t *records_per_block, uint32_t *num_blocks,
+ uint16_t *num_lists, uint32_t *db_size)
+{
+ *records_per_block =
+ MAX(CFA_MM_MIN_RECORDS_PER_BLOCK, max_contig_records);
+
+ *num_blocks = (max_records / (*records_per_block));
+
+ *num_lists = CFA_ALIGN_LN2(max_contig_records) + 1;
+
+ *db_size = sizeof(struct cfa_mm) +
+ ((*num_blocks) * NUM_ALIGN_UNITS(*records_per_block,
+ CFA_MM_RECORDS_PER_BYTE)) +
+ ((*num_blocks) * sizeof(struct cfa_mm_blk)) +
+ ((*num_lists) * sizeof(struct cfa_mm_blk_list));
+}
+
+int cfa_mm_query(struct cfa_mm_query_parms *parms)
+{
+ uint32_t max_records, num_blocks;
+ uint16_t max_contig_records, num_lists, records_per_block;
+
+ if (unlikely(parms == NULL)) {
+ CFA_LOG_ERR("parms = %p\n", parms);
+ return -EINVAL;
+ }
+
+ max_records = parms->max_records;
+ max_contig_records = (uint16_t)parms->max_contig_records;
+
+ if (unlikely(!(CFA_CHECK_BOUNDS(max_records, 1, CFA_MM_MAX_RECORDS) &&
+ IS_POWER_2(max_contig_records) &&
+ CFA_CHECK_BOUNDS(max_contig_records, 1,
+ CFA_MM_MAX_CONTIG_RECORDS)))) {
+ CFA_LOG_ERR("parms = %p, max_records = %d, "
+ "max_contig_records = %d\n",
+ parms, parms->max_records,
+ parms->max_contig_records);
+ return -EINVAL;
+ }
+
+ cfa_mm_db_info(max_records, max_contig_records, &records_per_block,
+ &num_blocks, &num_lists, &parms->db_size);
+
+ return 0;
+}
+
+int cfa_mm_open(void *cmm, struct cfa_mm_open_parms *parms)
+{
+ uint32_t max_records, num_blocks, db_size, i;
+ uint16_t max_contig_records, num_lists, records_per_block;
+ struct cfa_mm *context = (struct cfa_mm *)cmm;
+
+ if (unlikely(cmm == NULL || parms == NULL)) {
+ CFA_LOG_ERR("cmm = %p, parms = %p\n", cmm, parms);
+ return -EINVAL;
+ }
+
+ max_records = parms->max_records;
+ max_contig_records = (uint16_t)parms->max_contig_records;
+
+ if (unlikely(!(CFA_CHECK_BOUNDS(max_records, 1, CFA_MM_MAX_RECORDS) &&
+ IS_POWER_2(max_contig_records) &&
+ CFA_CHECK_BOUNDS(max_contig_records, 1,
+ CFA_MM_MAX_CONTIG_RECORDS)))) {
+ CFA_LOG_ERR("cmm = %p, parms = %p, db_mem_size = %d, "
+ "max_records = %d max_contig_records = %d\n",
+ cmm, parms, parms->db_mem_size, max_records,
+ max_contig_records);
+ return -EINVAL;
+ }
+
+ cfa_mm_db_info(max_records, max_contig_records, &records_per_block,
+ &num_blocks, &num_lists, &db_size);
+
+ if (unlikely(parms->db_mem_size < db_size)) {
+ CFA_LOG_ERR("cmm = %p, parms = %p, db_mem_size = %d, "
+ "max_records = %d max_contig_records = %d\n",
+ cmm, parms, parms->db_mem_size, max_records,
+ max_contig_records);
+ return -EINVAL;
+ }
+
+ memset(context, 0, parms->db_mem_size);
+
+ context->signature = CFA_MM_SIGNATURE;
+ context->max_records = max_records;
+ context->records_in_use = 0;
+ context->records_per_block = records_per_block;
+ context->max_contig_records = max_contig_records;
+
+ context->blk_list_tbl = (struct cfa_mm_blk_list *)(context + 1);
+ context->blk_tbl =
+ (struct cfa_mm_blk *)(context->blk_list_tbl + num_lists);
+ context->blk_bmap_tbl = (uint8_t *)(context->blk_tbl + num_blocks);
+
+ context->blk_list_tbl[0].first_blk_idx = 0;
+ context->blk_list_tbl[0].current_blk_idx = 0;
+
+ for (i = 1; i < num_lists; i++) {
+ context->blk_list_tbl[i].first_blk_idx = CFA_MM_INVALID32;
+ context->blk_list_tbl[i].current_blk_idx = CFA_MM_INVALID32;
+ }
+
+ for (i = 0; i < num_blocks; i++) {
+ context->blk_tbl[i].prev_blk_idx = i - 1;
+ context->blk_tbl[i].next_blk_idx = i + 1;
+ context->blk_tbl[i].num_free_records = records_per_block;
+ context->blk_tbl[i].first_free_record = 0;
+ context->blk_tbl[i].num_contig_records = 0;
+ }
+
+ context->blk_tbl[num_blocks - 1].next_blk_idx = CFA_MM_INVALID32;
+
+ memset(context->blk_bmap_tbl, 0,
+ num_blocks * NUM_ALIGN_UNITS(records_per_block,
+ CFA_MM_RECORDS_PER_BYTE));
+
+ return 0;
+}
+
+int cfa_mm_close(void *cmm)
+{
+ uint32_t db_size, num_blocks;
+ uint16_t num_lists, records_per_block;
+ struct cfa_mm *context = (struct cfa_mm *)cmm;
+
+ if (unlikely(cmm == NULL || context->signature != CFA_MM_SIGNATURE)) {
+ CFA_LOG_ERR("cmm = %p\n", cmm);
+ return -EINVAL;
+ }
+
+ cfa_mm_db_info(context->max_records, context->max_contig_records,
+ &records_per_block, &num_blocks, &num_lists, &db_size);
+
+ memset(cmm, 0, db_size);
+
+ return 0;
+}
+
+static uint32_t cfa_mm_blk_alloc(struct cfa_mm *context)
+{
+ uint32_t blk_idx;
+ struct cfa_mm_blk_list *free_list;
+
+ free_list = context->blk_list_tbl;
+
+ blk_idx = free_list->first_blk_idx;
+
+ if (unlikely(blk_idx == CFA_MM_INVALID32)) {
+ CFA_LOG_ERR("Out of record blocks\n");
+ return CFA_MM_INVALID32;
+ }
+
+ free_list->first_blk_idx =
+ context->blk_tbl[free_list->first_blk_idx].next_blk_idx;
+
+ free_list->current_blk_idx = free_list->first_blk_idx;
+
+ if (free_list->first_blk_idx != CFA_MM_INVALID32) {
+ context->blk_tbl[free_list->first_blk_idx].prev_blk_idx =
+ CFA_MM_INVALID32;
+ }
+
+ context->blk_tbl[blk_idx].prev_blk_idx = CFA_MM_INVALID32;
+ context->blk_tbl[blk_idx].next_blk_idx = CFA_MM_INVALID32;
+
+ return blk_idx;
+}
+
+static void cfa_mm_blk_free(struct cfa_mm *context, uint32_t blk_idx)
+{
+ struct cfa_mm_blk_list *free_list = context->blk_list_tbl;
+
+ context->blk_tbl[blk_idx].prev_blk_idx = CFA_MM_INVALID32;
+ context->blk_tbl[blk_idx].next_blk_idx = free_list->first_blk_idx;
+ context->blk_tbl[blk_idx].num_free_records = context->records_per_block;
+ context->blk_tbl[blk_idx].first_free_record = 0;
+ context->blk_tbl[blk_idx].num_contig_records = 0;
+
+ if (free_list->first_blk_idx != CFA_MM_INVALID32) {
+ context->blk_tbl[free_list->first_blk_idx].prev_blk_idx =
+ blk_idx;
+ }
+
+ free_list->first_blk_idx = blk_idx;
+ free_list->current_blk_idx = blk_idx;
+}
+
+static void cfa_mm_blk_insert(struct cfa_mm *context,
+ struct cfa_mm_blk_list *blk_list,
+ uint32_t blk_idx)
+{
+ if (blk_list->first_blk_idx == CFA_MM_INVALID32) {
+ blk_list->first_blk_idx = blk_idx;
+ blk_list->current_blk_idx = blk_idx;
+ } else {
+ struct cfa_mm_blk *blk_info = &context->blk_tbl[blk_idx];
+
+ blk_info->prev_blk_idx = CFA_MM_INVALID32;
+ blk_info->next_blk_idx = blk_list->first_blk_idx;
+ context->blk_tbl[blk_list->first_blk_idx].prev_blk_idx =
+ blk_idx;
+ blk_list->first_blk_idx = blk_idx;
+ blk_list->current_blk_idx = blk_idx;
+ }
+}
+
+static void cfa_mm_blk_delete(struct cfa_mm *context,
+ struct cfa_mm_blk_list *blk_list,
+ uint32_t blk_idx)
+{
+ struct cfa_mm_blk *blk_info = &context->blk_tbl[blk_idx];
+
+ if (blk_list->first_blk_idx == CFA_MM_INVALID32)
+ return;
+
+ if (blk_list->first_blk_idx == blk_idx) {
+ blk_list->first_blk_idx = blk_info->next_blk_idx;
+ if (blk_list->first_blk_idx != CFA_MM_INVALID32) {
+ context->blk_tbl[blk_list->first_blk_idx].prev_blk_idx =
+ CFA_MM_INVALID32;
+ }
+ if (blk_list->current_blk_idx == blk_idx)
+ blk_list->current_blk_idx = blk_list->first_blk_idx;
+
+ return;
+ }
+
+ if (blk_info->prev_blk_idx != CFA_MM_INVALID32) {
+ context->blk_tbl[blk_info->prev_blk_idx].next_blk_idx =
+ blk_info->next_blk_idx;
+ }
+
+ if (blk_info->next_blk_idx != CFA_MM_INVALID32) {
+ context->blk_tbl[blk_info->next_blk_idx].prev_blk_idx =
+ blk_info->prev_blk_idx;
+ }
+
+ if (blk_list->current_blk_idx == blk_idx) {
+ if (blk_info->next_blk_idx != CFA_MM_INVALID32) {
+ blk_list->current_blk_idx = blk_info->next_blk_idx;
+ } else {
+ if (blk_info->prev_blk_idx != CFA_MM_INVALID32) {
+ blk_list->current_blk_idx =
+ blk_info->prev_blk_idx;
+ } else {
+ blk_list->current_blk_idx =
+ blk_list->first_blk_idx;
+ }
+ }
+ }
+}
+
+/* Returns true if the bit in the bitmap is set to 'val' else returns false */
+static bool cfa_mm_test_bit(uint8_t *bmap, uint16_t index, uint8_t val)
+{
+ uint8_t shift;
+
+ bmap += index / CFA_MM_RECORDS_PER_BYTE;
+ index %= CFA_MM_RECORDS_PER_BYTE;
+
+ shift = CFA_MM_RECORDS_PER_BYTE - (index + 1);
+ if (val) {
+ if ((*bmap >> shift) & 0x1)
+ return true;
+ } else {
+ if (!((*bmap >> shift) & 0x1))
+ return true;
+ }
+
+ return false;
+}
+
+static int cfa_mm_test_and_set_bits(uint8_t *bmap, uint16_t start,
+ uint16_t count, uint8_t val)
+{
+ uint8_t mask[NUM_ALIGN_UNITS(CFA_MM_MAX_CONTIG_RECORDS,
+ CFA_MM_RECORDS_PER_BYTE) +
+ 1];
+ uint16_t i, j, nbits;
+
+ bmap += start / CFA_MM_RECORDS_PER_BYTE;
+ start %= CFA_MM_RECORDS_PER_BYTE;
+
+ if ((start + count - 1) < CFA_MM_RECORDS_PER_BYTE) {
+ nbits = CFA_MM_RECORDS_PER_BYTE - (start + count);
+ mask[0] = (uint8_t)(((uint16_t)1 << count) - 1);
+ mask[0] <<= nbits;
+ if (val) {
+ if (*bmap & mask[0])
+ return -EINVAL;
+ *bmap |= mask[0];
+ } else {
+ if ((*bmap & mask[0]) != mask[0])
+ return -EINVAL;
+ *bmap &= ~(mask[0]);
+ }
+ return 0;
+ }
+
+ i = 0;
+
+ nbits = CFA_MM_RECORDS_PER_BYTE - start;
+ mask[i++] = (uint8_t)(((uint16_t)1 << nbits) - 1);
+
+ count -= nbits;
+
+ while (count > CFA_MM_RECORDS_PER_BYTE && i < sizeof(mask)) {
+ count -= CFA_MM_RECORDS_PER_BYTE;
+ mask[i++] = 0xff;
+ }
+
+ if (i < sizeof(mask)) {
+ mask[i] = (uint8_t)(((uint16_t)1 << count) - 1);
+ mask[i++] <<= (CFA_MM_RECORDS_PER_BYTE - count);
+ } else {
+ CFA_LOG_ERR("Mask array out of bounds; index:%d.\n", i);
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < i; j++) {
+ if (val) {
+ if (bmap[j] & mask[j])
+ return -EINVAL;
+ } else {
+ if ((bmap[j] & mask[j]) != mask[j])
+ return -EINVAL;
+ }
+ }
+
+ for (j = 0; j < i; j++) {
+ if (val)
+ bmap[j] |= mask[j];
+ else
+ bmap[j] &= ~(mask[j]);
+ }
+
+ return 0;
+}
+
+int cfa_mm_alloc(void *cmm, struct cfa_mm_alloc_parms *parms)
+{
+ int ret = 0;
+ uint16_t list_idx, num_records;
+ uint32_t i, cnt, blk_idx, record_idx;
+ struct cfa_mm_blk_list *blk_list;
+ struct cfa_mm_blk *blk_info;
+ uint8_t *blk_bmap;
+ struct cfa_mm *context = (struct cfa_mm *)cmm;
+
+ if (unlikely(cmm == NULL || parms == NULL ||
+ context->signature != CFA_MM_SIGNATURE)) {
+ CFA_LOG_ERR("cmm = %p parms = %p\n", cmm, parms);
+ return -EINVAL;
+ }
+
+ if (unlikely(!(CFA_CHECK_BOUNDS(parms->num_contig_records, 1,
+ context->max_contig_records) &&
+ IS_POWER_2(parms->num_contig_records)))) {
+ CFA_LOG_ERR("cmm = %p parms = %p num_records = %d\n", cmm,
+ parms, parms->num_contig_records);
+ return -EINVAL;
+ }
+
+ list_idx = CFA_ALIGN_LN2(parms->num_contig_records);
+
+ blk_list = context->blk_list_tbl + list_idx;
+
+ num_records = 1 << (list_idx - 1);
+
+ if (unlikely(context->records_in_use + num_records > context->max_records)) {
+ CFA_LOG_ERR("Requested number (%d) of records not available\n",
+ num_records);
+ ret = -ENOMEM;
+ goto cfa_mm_alloc_exit;
+ }
+
+ if (blk_list->first_blk_idx == CFA_MM_INVALID32) {
+ blk_idx = cfa_mm_blk_alloc(context);
+ if (unlikely(blk_idx == CFA_MM_INVALID32)) {
+ ret = -ENOMEM;
+ goto cfa_mm_alloc_exit;
+ }
+
+ cfa_mm_blk_insert(context, blk_list, blk_idx);
+
+ blk_info = &context->blk_tbl[blk_idx];
+
+ blk_info->num_contig_records = num_records;
+ } else {
+ blk_idx = blk_list->current_blk_idx;
+ blk_info = &context->blk_tbl[blk_idx];
+ }
+
+ while (blk_info->num_free_records < num_records) {
+ if (blk_info->next_blk_idx == CFA_MM_INVALID32 || !blk_info->num_free_records) {
+ blk_idx = cfa_mm_blk_alloc(context);
+ if (unlikely(blk_idx == CFA_MM_INVALID32)) {
+ ret = -ENOMEM;
+ goto cfa_mm_alloc_exit;
+ }
+
+ cfa_mm_blk_insert(context, blk_list, blk_idx);
+
+ blk_info = &context->blk_tbl[blk_idx];
+
+ blk_info->num_contig_records = num_records;
+ } else {
+ blk_idx = blk_info->next_blk_idx;
+ blk_info = &context->blk_tbl[blk_idx];
+
+ blk_list->current_blk_idx = blk_idx;
+ }
+ }
+
+ blk_bmap = context->blk_bmap_tbl + blk_idx *
+ context->records_per_block /
+ CFA_MM_RECORDS_PER_BYTE;
+
+ record_idx = blk_info->first_free_record;
+
+ if (unlikely(cfa_mm_test_and_set_bits(blk_bmap, record_idx, num_records, 1))) {
+ CFA_LOG_ERR("Records are already allocated. record_idx = %d, "
+ "num_records = %d\n",
+ record_idx, num_records);
+ return -EINVAL;
+ }
+
+ parms->record_offset =
+ (blk_idx * context->records_per_block) + record_idx;
+
+ parms->num_contig_records = num_records;
+
+ blk_info->num_free_records -= num_records;
+
+ if (!blk_info->num_free_records) {
+ blk_info->first_free_record = context->records_per_block;
+ } else {
+ cnt = NUM_ALIGN_UNITS(context->records_per_block,
+ CFA_MM_RECORDS_PER_BYTE);
+
+ for (i = (record_idx + num_records) / CFA_MM_RECORDS_PER_BYTE;
+ i < cnt; i++) {
+ if (blk_bmap[i] != 0xff) {
+ uint8_t bmap = blk_bmap[i];
+ blk_info->first_free_record =
+ i * CFA_MM_RECORDS_PER_BYTE;
+ while (bmap & 0x80) {
+ bmap <<= 1;
+ blk_info->first_free_record++;
+ }
+ break;
+ }
+ }
+ }
+
+ context->records_in_use += num_records;
+
+ ret = 0;
+
+cfa_mm_alloc_exit:
+
+ parms->used_count = context->records_in_use;
+
+ parms->all_used = (context->records_in_use >= context->max_records);
+
+ return ret;
+}
+
+int cfa_mm_free(void *cmm, struct cfa_mm_free_parms *parms)
+{
+ uint16_t list_idx, num_records;
+ uint32_t blk_idx, record_idx;
+ struct cfa_mm_blk *blk_info;
+ struct cfa_mm_blk_list *blk_list;
+ uint8_t *blk_bmap;
+ struct cfa_mm *context = (struct cfa_mm *)cmm;
+
+ if (unlikely(cmm == NULL || parms == NULL ||
+ context->signature != CFA_MM_SIGNATURE)) {
+ CFA_LOG_ERR("cmm = %p parms = %p\n", cmm, parms);
+ return -EINVAL;
+ }
+
+ if (unlikely(!(parms->record_offset < context->max_records &&
+ CFA_CHECK_BOUNDS(parms->num_contig_records, 1,
+ context->max_contig_records) &&
+ IS_POWER_2(parms->num_contig_records)))) {
+ CFA_LOG_ERR("cmm = %p, parms = %p, record_offset = %d, "
+ "num_contig_records = %d\n",
+ cmm, parms, parms->record_offset,
+ parms->num_contig_records);
+ return -EINVAL;
+ }
+
+ record_idx = parms->record_offset % context->records_per_block;
+ blk_idx = parms->record_offset / context->records_per_block;
+
+ list_idx = CFA_ALIGN_LN2(parms->num_contig_records);
+
+ blk_list = &context->blk_list_tbl[list_idx];
+
+ if (unlikely(blk_list->first_blk_idx == CFA_MM_INVALID32)) {
+ CFA_LOG_ERR("Records were not allocated\n");
+ return -EINVAL;
+ }
+
+ num_records = 1 << (list_idx - 1);
+
+ blk_info = &context->blk_tbl[blk_idx];
+
+ if (unlikely(blk_info->num_contig_records != num_records)) {
+ CFA_LOG_ERR("num_contig_records (%d) doesn't match the "
+ "num_contig_records (%d) of the allocation\n",
+ num_records, blk_info->num_contig_records);
+ return -EINVAL;
+ }
+
+ blk_bmap = context->blk_bmap_tbl + blk_idx *
+ context->records_per_block /
+ CFA_MM_RECORDS_PER_BYTE;
+
+ if (unlikely(cfa_mm_test_and_set_bits(blk_bmap, record_idx, num_records, 0))) {
+ CFA_LOG_ERR("Records are not allocated. record_idx = %d, "
+ "num_records = %d\n",
+ record_idx, num_records);
+ return -EINVAL;
+ }
+
+ blk_info->num_free_records += num_records;
+
+ if (blk_info->num_free_records >= context->records_per_block) {
+ cfa_mm_blk_delete(context, blk_list, blk_idx);
+ cfa_mm_blk_free(context, blk_idx);
+ } else {
+ if (blk_info->num_free_records == num_records) {
+ cfa_mm_blk_delete(context, blk_list, blk_idx);
+ cfa_mm_blk_insert(context, blk_list, blk_idx);
+ blk_info->first_free_record = record_idx;
+ } else {
+ if (record_idx < blk_info->first_free_record)
+ blk_info->first_free_record = record_idx;
+ }
+ }
+
+ context->records_in_use -= num_records;
+
+ parms->used_count = context->records_in_use;
+
+ return 0;
+}
+
+int cfa_mm_entry_size_get(void *cmm, uint32_t entry_id, uint8_t *size)
+{
+ uint8_t *blk_bmap;
+ struct cfa_mm_blk *blk_info;
+ struct cfa_mm *context = (struct cfa_mm *)cmm;
+ uint32_t blk_idx, record_idx;
+
+ if (unlikely(cmm == NULL || size == NULL ||
+ context->signature != CFA_MM_SIGNATURE)) {
+ CFA_LOG_ERR("%s: cmm = %p size = %p\n", __func__, cmm, size);
+ return -EINVAL;
+ }
+
+ if (unlikely(!(entry_id < context->max_records))) {
+ CFA_LOG_ERR("cmm = %p, entry_id = %d\n", cmm, entry_id);
+ return -EINVAL;
+ }
+
+ blk_idx = entry_id / context->records_per_block;
+ blk_info = &context->blk_tbl[blk_idx];
+ record_idx = entry_id % context->records_per_block;
+
+ /*
+ * Block is unused if num contig records is 0 and
+ * there are no allocated entries in the block
+ */
+ if (unlikely(blk_info->num_contig_records == 0))
+ return -ENOENT;
+
+ /*
+ * Check the entry is indeed allocated. Suffices to check if
+ * the first bit in the bitmap is set.
+ */
+ blk_bmap = context->blk_bmap_tbl + blk_idx *
+ context->records_per_block /
+ CFA_MM_RECORDS_PER_BYTE;
+
+ if (cfa_mm_test_bit(blk_bmap, record_idx, 1)) {
+ *size = blk_info->num_contig_records;
+ return 0;
+ } else {
+ return -ENOENT;
+ }
+}
new file mode 100644
@@ -0,0 +1,92 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_mm_priv.h
+ *
+ * @brief CFA Memory Manager private API definitions
+ */
+
+#ifndef _CFA_MM_PRIV_H_
+#define _CFA_MM_PRIV_H_
+
+#define CFA_MM_SIGNATURE 0xCFA66C89
+
+#define CFA_MM_INVALID8 0xFF
+#define CFA_MM_INVALID16 0xFFFF
+#define CFA_MM_INVALID32 0xFFFFFFFF
+#define CFA_MM_INVALID64 0xFFFFFFFFFFFFFFFFULL
+
+#define CFA_MM_MAX_RECORDS (64 * 1024 * 1024)
+#define CFA_MM_MAX_CONTIG_RECORDS 8
+#define CFA_MM_RECORDS_PER_BYTE 8
+#define CFA_MM_MIN_RECORDS_PER_BLOCK 8
+
+/**
+ * CFA Records block
+ *
+ * Structure used to store the CFA record block info
+ */
+struct cfa_mm_blk {
+ /* Index of the previous block in the list */
+ uint32_t prev_blk_idx;
+ /* Index of the next block in the list */
+ uint32_t next_blk_idx;
+ /* Number of free records available in the block */
+ uint16_t num_free_records;
+ /* Location of first free record in the block */
+ uint16_t first_free_record;
+ /* Number of contiguous records */
+ uint16_t num_contig_records;
+ /* Reserved for future use */
+ uint16_t reserved;
+};
+
+/**
+ * CFA Record block list
+ *
+ * Structure used to store CFA Record block list info
+ */
+struct cfa_mm_blk_list {
+ /* Index of the first block in the list */
+ uint32_t first_blk_idx;
+ /* Index of the current block having free records */
+ uint32_t current_blk_idx;
+};
+
+/**
+ * CFA memory manager Database
+ *
+ * Structure used to store CFA memory manager database info
+ */
+struct cfa_mm {
+ /* Signature of the CFA Memory Manager Database */
+ uint32_t signature;
+ /* Maximum number of CFA Records */
+ uint32_t max_records;
+ /* Number of CFA Records in use*/
+ uint32_t records_in_use;
+ /* Number of Records per block */
+ uint16_t records_per_block;
+ /* Maximum number of contiguous records */
+ uint16_t max_contig_records;
+ /**
+ * Block list table stores the info of lists of blocks
+ * for various numbers of contiguous records
+ */
+ struct cfa_mm_blk_list *blk_list_tbl;
+ /**
+ * Block table stores the info about the blocks of CFA Records
+ */
+ struct cfa_mm_blk *blk_tbl;
+ /**
+ * Block bitmap table stores bit maps for the blocks of CFA Records
+ */
+ uint8_t *blk_bmap_tbl;
+};
+
+#endif /* _CFA_MM_PRIV_H_ */
new file mode 100644
@@ -0,0 +1,173 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_mm.h
+ *
+ * @brief CFA Memory Manager Public API definitions
+ */
+#ifndef _CFA_MM_H_
+#define _CFA_MM_H_
+
+/**
+ * @addtogroup CFA_MM CFA Memory Manager
+ * \ingroup CFA_V3
+ * A CFA memory manager (Document Control:DCSG00988445) is a object instance
+ * within the CFA service module that is responsible for managing CFA related
+ * memories such as Thor2 CFA backings stores, Thor CFA action SRAM, etc. It
+ * is designed to operate in firmware or as part of the host Truflow stack.
+ * Each manager instance consists of a number of bank databases with each
+ * database managing a pool of CFA memory.
+ *
+ * @{
+ */
+
+/** CFA Memory Manager database query params structure
+ *
+ * Structure of database params
+ */
+struct cfa_mm_query_parms {
+ /** [in] Maximum number of CFA records */
+ uint32_t max_records;
+ /** [in] Max contiguous CFA records per Alloc (Must be a power of 2). */
+ uint32_t max_contig_records;
+ /** [out] Memory required for Database */
+ uint32_t db_size;
+};
+
+/** CFA Memory Manager open parameters
+ *
+ * Structure to store CFA MM open parameters
+ */
+struct cfa_mm_open_parms {
+ /** [in] Size of memory allocated for CFA MM database */
+ uint32_t db_mem_size;
+ /** [in] Max number of CFA records */
+ uint32_t max_records;
+ /** [in] Maximum number of contiguous CFA records */
+ uint16_t max_contig_records;
+};
+
+/** CFA Memory Manager record alloc parameters
+ *
+ * Structure to contain parameters for record alloc
+ */
+struct cfa_mm_alloc_parms {
+ /** [in] Number of contiguous CFA records */
+ uint32_t num_contig_records;
+ /** [out] Offset of the first of the records allocated */
+ uint32_t record_offset;
+ /** [out] Total number of records already allocated */
+ uint32_t used_count;
+ /** [out] Flag to indicate if all the records are allocated */
+ uint32_t all_used;
+};
+
+/** CFA Memory Manager record free parameters
+ *
+ * Structure to contain parameters for record free
+ */
+struct cfa_mm_free_parms {
+ /** [in] Offset of the first of the records allocated */
+ uint32_t record_offset;
+ /** [in] Number of contiguous CFA records */
+ uint32_t num_contig_records;
+ /** [out] Total number of records already allocated */
+ uint32_t used_count;
+};
+
+/** CFA Memory Manager query API
+ *
+ * This API returns the size of memory required for internal data structures to
+ * manage the pool of CFA Records with given parameters.
+ *
+ * @param[in,out] parms
+ * CFA Memory manager query data base parameters.
+ *
+ * @return
+ * Returns 0 if the query is successful, Error Code otherwise
+ */
+int cfa_mm_query(struct cfa_mm_query_parms *parms);
+
+/** CFA Memory Manager open API
+ *
+ * This API initializes the CFA Memory Manager database
+ *
+ * @param[in] cmm
+ * Pointer to the memory used for the CFA Mmeory Manager Database
+ *
+ * @param[in] parms
+ * CFA Memory manager data base parameters.
+ *
+ * @return
+ * Returns 0 if the initialization is successful, Error Code otherwise
+ */
+int cfa_mm_open(void *cmm, struct cfa_mm_open_parms *parms);
+
+/** CFA Memory Manager close API
+ *
+ * This API frees the CFA Memory NManager database
+ *
+ * @param[in] cmm
+ * Pointer to the database memory for the record pool
+ *
+ * @return
+ * Returns 0 if the initialization is successful, Error Code otherwise
+ */
+int cfa_mm_close(void *cmm);
+
+/** CFA Memory Manager Allocate CFA Records API
+ *
+ * This API allocates the request number of contiguous CFA Records
+ *
+ * @param[in] cmm
+ * Pointer to the database from which to allocate CFA Records
+ *
+ * @param[in,out] parms
+ * CFA MM alloc records parameters
+ *
+ * @return
+ * Returns 0 if the initialization is successful, Error Code otherwise
+ */
+int cfa_mm_alloc(void *cmm, struct cfa_mm_alloc_parms *parms);
+
+/** CFA MemoryManager Free CFA Records API
+ *
+ * This API frees the requested number of contiguous CFA Records
+ *
+ * @param[in] cmm
+ * Pointer to the database from which to free CFA Records
+ *
+ * @param[in,out] parms
+ * CFA MM free records parameters
+ *
+ * @return
+ * Returns 0 if the initialization is successful, Error Code otherwise
+ */
+int cfa_mm_free(void *cmm, struct cfa_mm_free_parms *parms);
+
+/** CFA Memory Manager Get Entry Size API
+ *
+ * This API retrieves the size of an allocated CMM entry.
+ *
+ * @param[in] cmm
+ * Pointer to the database from which to allocate CFA Records
+ *
+ * @param[in] entry_id
+ * Index of the allocated entry.
+ *
+ * @param[out] size
+ * Number of contiguous records in the entry.
+ *
+ * @return
+ * Returns 0 if successful, negative errno otherwise
+ */
+int cfa_mm_entry_size_get(void *cmm, uint32_t entry_id, uint8_t *size);
+
+/**@}*/
+
+#endif /* _CFA_MM_H_ */
new file mode 100644
@@ -0,0 +1,43 @@
+#
+# Copyright(c) 2021 Broadcom Limited, all rights reserved
+# Contains proprietary and confidential information.
+#
+# This source file is the property of Broadcom Limited, and
+# may not be copied or distributed in any isomorphic form without
+# the prior written consent of Broadcom Limited.
+#
+
+add_library(cfa-tim-lib-common INTERFACE)
+target_include_directories(cfa-tim-lib-common INTERFACE include
+ ../include
+ ../../include
+ ../../../tf_core)
+
+set (CFA_TIM_SRCS cfa_tim.c)
+
+# Production version
+add_library(cfa-tim-lib STATIC EXCLUDE_FROM_ALL ${CFA_TIM_SRCS})
+set_property(TARGET cfa-tim-lib PROPERTY POSITION_INDEPENDENT_CODE 1)
+target_link_libraries(cfa-tim-lib PUBLIC cfa-tim-lib-common nxt-platform nxt-arch)
+target_include_directories(cfa-tim-lib PUBLIC ../include/platform/fw)
+
+# UT version
+add_library(cfa-tim-lib-ut STATIC EXCLUDE_FROM_ALL ${CFA_TIM_SRCS})
+set_property(TARGET cfa-tim-lib-ut PROPERTY POSITION_INDEPENDENT_CODE 1)
+target_link_libraries(cfa-tim-lib-ut PUBLIC cfa-tim-lib-common nxt-ut nxt-platform nxt-arch nxt-env-ut)
+target_include_directories(cfa-tim-lib-ut PUBLIC ../include/platform/ut)
+
+set(ignoreMe "${SKIP_TIM_UT}")
+if(NOT DEFINED SKIP_TIM_UT)
+add_subdirectory(ut)
+endif()
+
+# Update Doxygen Path for tim api documentation
+set(CFA_API_DOC_DIRS ${CFA_API_DOC_DIRS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/include # Public api
+ CACHE INTERNAL "")
+
+# Update Doxygen Path for tim design documentation
+set(CFA_DESIGN_DOC_DIRS ${CFA_DESIGN_DOC_DIRS}
+ ${CMAKE_CURRENT_SOURCE_DIR} # tim implementation
+ CACHE INTERNAL "")
new file mode 100644
@@ -0,0 +1,124 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_tim.c
+ *
+ * @brief CFA Table Scope Instance Manager apis
+ */
+#define COMP_ID TIM
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include "cfa_util.h"
+#include "cfa_types.h"
+#include "cfa_tim.h"
+#include "cfa_tim_priv.h"
+#include "cfa_trace.h"
+
+static uint32_t cfa_tim_size(uint8_t max_tbl_scopes, uint8_t max_regions)
+{
+ return (sizeof(struct cfa_tim) +
+ (max_tbl_scopes * max_regions * CFA_DIR_MAX) * sizeof(void *));
+}
+
+int cfa_tim_query(uint8_t max_tbl_scopes, uint8_t max_regions,
+ uint32_t *tim_db_size)
+{
+ if (tim_db_size == NULL) {
+ CFA_LOG_ERR("tim_db_size = %p\n", tim_db_size);
+ return -EINVAL;
+ }
+
+ *tim_db_size = cfa_tim_size(max_tbl_scopes, max_regions);
+
+ return 0;
+}
+
+int cfa_tim_open(void *tim, uint32_t tim_db_size, uint8_t max_tbl_scopes,
+ uint8_t max_regions)
+{
+ struct cfa_tim *ctx = (struct cfa_tim *)tim;
+
+ if (tim == NULL) {
+ CFA_LOG_ERR("tim = %p\n", tim);
+ return -EINVAL;
+ }
+ if (tim_db_size < cfa_tim_size(max_tbl_scopes, max_regions)) {
+ CFA_LOG_ERR("max_tbl_scopes = %d, max_regions = %d\n",
+ max_tbl_scopes, max_regions);
+ return -EINVAL;
+ }
+
+ memset(tim, 0, tim_db_size);
+
+ ctx->signature = CFA_TIM_SIGNATURE;
+ ctx->max_tsid = max_tbl_scopes;
+ ctx->max_regions = max_regions;
+ ctx->tpm_tbl = (void **)(ctx + 1);
+
+ return 0;
+}
+
+int cfa_tim_close(void *tim)
+{
+ struct cfa_tim *ctx = (struct cfa_tim *)tim;
+
+ if (tim == NULL || ctx->signature != CFA_TIM_SIGNATURE) {
+ CFA_LOG_ERR("tim = %p\n", tim);
+ return -EINVAL;
+ }
+
+ memset(tim, 0, cfa_tim_size(ctx->max_tsid, ctx->max_regions));
+
+ return 0;
+}
+
+int cfa_tim_tpm_inst_set(void *tim, uint8_t tsid, uint8_t region_id,
+ int dir, void *tpm_inst)
+{
+ struct cfa_tim *ctx = (struct cfa_tim *)tim;
+
+ if (tim == NULL || ctx->signature != CFA_TIM_SIGNATURE) {
+ CFA_LOG_ERR("tim = %p\n", tim);
+ return -EINVAL;
+ }
+
+ if (!(CFA_CHECK_UPPER_BOUNDS(tsid, ctx->max_tsid - 1) &&
+ CFA_CHECK_UPPER_BOUNDS(region_id, ctx->max_regions - 1))) {
+ CFA_LOG_ERR("tsid = %d, region_id = %d\n", tsid, region_id);
+ return -EINVAL;
+ }
+
+ ctx->tpm_tbl[CFA_TIM_MAKE_INDEX(tsid, region_id, dir, ctx->max_regions, ctx->max_tsid)] =
+ tpm_inst;
+ return 0;
+}
+
+int cfa_tim_tpm_inst_get(void *tim, uint8_t tsid, uint8_t region_id,
+ int dir, void **tpm_inst)
+{
+ struct cfa_tim *ctx = (struct cfa_tim *)tim;
+
+ if (tim == NULL || tpm_inst == NULL ||
+ ctx->signature != CFA_TIM_SIGNATURE) {
+ CFA_LOG_ERR("tim = %p\n", tim);
+ return -EINVAL;
+ }
+
+ if (!(CFA_CHECK_UPPER_BOUNDS(tsid, ctx->max_tsid - 1) &&
+ CFA_CHECK_UPPER_BOUNDS(region_id, ctx->max_regions - 1))) {
+ CFA_LOG_ERR("tsid = %d, region_id = %d\n", tsid, region_id);
+ return -EINVAL;
+ }
+
+ *tpm_inst = ctx->tpm_tbl[CFA_TIM_MAKE_INDEX(tsid, region_id, dir,
+ ctx->max_regions, ctx->max_tsid)];
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,85 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_tim.h
+ *
+ * @brief CFA Table Scope Instance Manager private API definitions
+ */
+
+#ifndef _CFA_TIM_PRIV_H_
+#define _CFA_TIM_PRIV_H_
+
+#include <cfa_types.h>
+
+#define CFA_TIM_SIGNATURE 0xCFACEE11
+
+/*
+ *
+ * Total index space is (MaxDir * MaxRegion * MaxTableScope), the
+ * following macro satisfies that:
+ *
+ * (Dir# * (MaxRegionSpace + MaxTableScope)) +
+ * (TableScope# * (MaxRegionSpace)) +
+ * Region#
+ *
+ * Examples:
+ *
+ * MaxD MaxR MaxT Total
+ * 2 1 1 2
+ *
+ * Dir Region TableScope Index
+ * 0 0 0 0
+ * 1 0 0 1
+ *
+ * MaxD MaxR MaxT Total
+ * 2 2 1 4
+ *
+ * Dir Region TableScope Index
+ * 0 0 0 0
+ * 1 0 0 2
+ * 0 1 0 1
+ * 1 1 0 3
+ *
+ * MaxD MaxR MaxT Total
+ * 2 2 3 12
+ *
+ * Dir Region TableScope Index
+ * 0 0 0 0
+ * 1 0 0 6
+ * 0 1 0 1
+ * 1 1 0 7
+ * 0 0 1 2
+ * 1 0 1 8
+ * 0 1 1 3
+ * 1 1 1 9
+ * 0 0 2 4
+ * 1 0 2 10
+ * 0 1 2 5
+ * 1 1 2 11
+ *
+ */
+#define CFA_TIM_MAKE_INDEX(tsid, region, dir, max_regions, max_tsid) \
+ (((dir) * (max_regions) * (max_tsid)) + ((tsid) * (max_regions)) + (region))
+
+/**
+ * CFA Table Scope Instance Manager Database
+ *
+ * Structure used to store CFA Table Scope Instance Manager database info
+ */
+struct cfa_tim {
+ /* Signature of the CFA Table Scope Instance Manager Database */
+ uint32_t signature;
+ /* Maximum number of Table Scope Ids */
+ uint8_t max_tsid;
+ /* Maximum number of regions per Table Scope */
+ uint8_t max_regions;
+ /* TPM instance table */
+ void **tpm_tbl;
+};
+
+#endif /* _CFA_TIM_PRIV_H_ */
new file mode 100644
@@ -0,0 +1,133 @@
+/****************************************************************************
+ * Copyright(c) 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_tim.h
+ *
+ * @brief CFA Table Scope Instance Manager Public API definitions
+ */
+#ifndef _CFA_TIM_H_
+#define _CFA_TIM_H_
+
+/**
+ * @addtogroup CFA_TIM CFA Table Scope Instance Manager
+ * \ingroup CFA_V3
+ * The purpose of the CFA Table Scope Instance manager is to provide a
+ * centralized management of Table Scope Pool Manager instances. Each instance
+ * is identified by the Table Scope id and Region id. A caller can set and
+ * retrieve the instance handle using the Table Scope Id and Region Id.
+ * @{
+ */
+
+/** CFA Table Scope Instance Manager query DB size API
+ *
+ * This API returns the size of memory required for internal data structures to
+ * manage the table scope instances.
+ *
+ * @param[in] max_tbl_scopes
+ * Maximum number of table scope ids available to manage.
+ *
+ * @param[in] max_regions
+ * Maximum number of regions per table scope.
+ *
+ * @param[out] tim_db_size
+ * Pointer to 32 bit integer to return the amount of memory required.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tim_query(uint8_t max_tbl_scopes, uint8_t max_regions,
+ uint32_t *tim_db_size);
+
+/** CFA Table Scope Instance Manager open API
+ *
+ * This API initializes the CFA Table Scope Instance Manager database
+ *
+ * @param[in] tim
+ * Pointer to the memory used for the CFA Table Scope Instance Manager
+ * Database.
+ *
+ * @param[in] tim_db_size
+ * The size of memory block pointed to by tim parameter.
+ *
+ * @param[in] max_tbl_scopes
+ * Maximum number of table scope ids available to manage.
+ *
+ * @param[in] max_regions
+ * Maximum number of regions per table scope.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tim_open(void *tim, uint32_t tim_db_size, uint8_t max_tbl_scopes,
+ uint8_t max_regions);
+
+/** CFA Table Scope Instance Manager close API
+ *
+ * This API resets the CFA Table Scope Instance Manager database
+ *
+ * @param[in] tim
+ * Pointer to the database memory for the Table Scope Instance Manager.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tim_close(void *tim);
+
+/** CFA Table Scope Instance Manager set instance API
+ *
+ * This API sets the TPM instance handle into TIM.
+ *
+ * @param[in] tim
+ * Pointer to the database memory for the Table Scope Instance Manager.
+ *
+ * @param[in] tsid
+ * The Table scope id of the instance.
+ *
+ * @param[in] region_id
+ * The region id of the instance.
+ *
+ * @param[in] dir
+ * The direction of the instance.
+ *
+ * @param[in] tpm_inst
+ * The handle of TPM instance.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tim_tpm_inst_set(void *tim, uint8_t tsid, uint8_t region_id,
+ int dir, void *tpm_inst);
+
+/** CFA Table Scope Instance Manager get instance API
+ *
+ * This API gets the TPM instance handle from TIM.
+ *
+ * @param[in] tim
+ * Pointer to the database memory for the Table Scope Instance Manager.
+ *
+ * @param[in] tsid
+ * The Table scope id of the instance.
+ *
+ * @param[in] region_id
+ * The region id of the instance.
+ *
+ * @param[in] dir
+ * The direction of the instance.
+ *
+ * @param[out] tpm_inst
+ * Pointer to memory location to return the handle of TPM instance.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tim_tpm_inst_get(void *tim, uint8_t tsid, uint8_t region_id,
+ int dir, void **tpm_inst);
+
+/**@}*/
+
+#endif /* _CFA_TIM_H_ */
new file mode 100644
@@ -0,0 +1,44 @@
+#
+# Copyright(c) 2021 Broadcom Limited, all rights reserved
+# Contains proprietary and confidential information.
+#
+# This source file is the property of Broadcom Limited, and
+# may not be copied or distributed in any isomorphic form without
+# the prior written consent of Broadcom Limited.
+#
+
+add_library(cfa-tpm-lib-common INTERFACE)
+target_include_directories(cfa-tpm-lib-common INTERFACE include
+ ../include
+ ../../include
+ ../../generic-common/include
+ ../../../tf_core)
+
+set (CFA_TPM_SRCS cfa_tpm.c)
+
+# Production version
+add_library(cfa-tpm-lib STATIC EXCLUDE_FROM_ALL ${CFA_TPM_SRCS})
+set_property(TARGET cfa-tpm-lib PROPERTY POSITION_INDEPENDENT_CODE 1)
+target_link_libraries(cfa-tpm-lib PUBLIC cfa-tpm-lib-common nxt-platform nxt-arch)
+target_include_directories(cfa-tpm-lib PUBLIC ../include/platform/fw)
+
+# UT version
+add_library(cfa-tpm-lib-ut STATIC EXCLUDE_FROM_ALL ${CFA_TPM_SRCS})
+set_property(TARGET cfa-tpm-lib-ut PROPERTY POSITION_INDEPENDENT_CODE 1)
+target_link_libraries(cfa-tpm-lib-ut PUBLIC cfa-tpm-lib-common nxt-ut nxt-platform nxt-arch nxt-env-ut)
+target_include_directories(cfa-tpm-lib-ut PUBLIC ../include/platform/ut)
+
+set(ignoreMe "${SKIP_TPM_UT}")
+if(NOT DEFINED SKIP_TPM_UT)
+add_subdirectory(ut)
+endif()
+
+# Update Doxygen Path for tpm api documentation
+set(CFA_API_DOC_DIRS ${CFA_API_DOC_DIRS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/include # Public api
+ CACHE INTERNAL "")
+
+# Update Doxygen Path for tpm design documentation
+set(CFA_DESIGN_DOC_DIRS ${CFA_DESIGN_DOC_DIRS}
+ ${CMAKE_CURRENT_SOURCE_DIR} # tpm implementation
+ CACHE INTERNAL "")
new file mode 100644
@@ -0,0 +1,273 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_tpm.c
+ *
+ * @brief CFA Table Scope Pool Manager apis
+ */
+#define COMP_ID TPM
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include "cfa_util.h"
+#include "cfa_tpm_priv.h"
+#include "cfa_tpm.h"
+#include "cfa_trace.h"
+
+static uint32_t cfa_tpm_size(uint16_t max_pools)
+{
+ return sizeof(struct cfa_tpm) + BITALLOC_SIZEOF(max_pools) +
+ max_pools * sizeof(uint16_t);
+}
+
+int cfa_tpm_query(uint16_t max_pools, uint32_t *tpm_db_size)
+{
+ if (tpm_db_size == NULL) {
+ CFA_LOG_ERR("tpm_db_size = %p\n", tpm_db_size);
+ return -EINVAL;
+ }
+
+ if (!CFA_CHECK_BOUNDS(max_pools, CFA_TPM_MIN_POOLS,
+ CFA_TPM_MAX_POOLS)) {
+ CFA_LOG_ERR("max_pools = %d\n", max_pools);
+ return -EINVAL;
+ }
+
+ *tpm_db_size = cfa_tpm_size(max_pools);
+
+ return 0;
+}
+
+int cfa_tpm_open(void *tpm, uint32_t tpm_db_size, uint16_t max_pools)
+{
+ int i;
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL) {
+ CFA_LOG_ERR("tpm = %p\n", tpm);
+ return -EINVAL;
+ }
+
+ if (!(CFA_CHECK_BOUNDS(max_pools, CFA_TPM_MIN_POOLS,
+ CFA_TPM_MAX_POOLS) &&
+ tpm_db_size >= cfa_tpm_size(max_pools))) {
+ CFA_LOG_ERR("max_pools = %d tpm_db_size = %d\n", max_pools,
+ tpm_db_size);
+ return -EINVAL;
+ }
+
+ memset(tpm, 0, tpm_db_size);
+
+ ctx->signature = CFA_TPM_SIGNATURE;
+ ctx->max_pools = max_pools;
+ ctx->pool_ba = (struct bitalloc *)(ctx + 1);
+ ctx->fid_tbl = (uint16_t *)((uint8_t *)ctx->pool_ba +
+ BITALLOC_SIZEOF(max_pools));
+
+ if (ba_init(ctx->pool_ba, max_pools, true))
+ return -EINVAL;
+
+ for (i = 0; i < max_pools; i++)
+ ctx->fid_tbl[i] = CFA_INVALID_FID;
+
+ return 0;
+}
+
+int cfa_tpm_close(void *tpm)
+{
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL || ctx->signature != CFA_TPM_SIGNATURE) {
+ CFA_LOG_ERR("tpm = %p\n", tpm);
+ return -EINVAL;
+ }
+
+ memset(tpm, 0, cfa_tpm_size(ctx->max_pools));
+
+ return 0;
+}
+
+int cfa_tpm_alloc(void *tpm, uint16_t *pool_id)
+{
+ int rc;
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL || pool_id == NULL ||
+ ctx->signature != CFA_TPM_SIGNATURE) {
+ CFA_LOG_ERR("tpm = %p, pool_id = %p\n", tpm, pool_id);
+ return -EINVAL;
+ }
+
+ rc = ba_alloc(ctx->pool_ba);
+
+ if (rc < 0)
+ return -ENOMEM;
+
+ *pool_id = rc;
+
+ ctx->fid_tbl[rc] = CFA_INVALID_FID;
+
+ return 0;
+}
+
+int cfa_tpm_free(void *tpm, uint16_t pool_id)
+{
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL || ctx->signature != CFA_TPM_SIGNATURE) {
+ CFA_LOG_ERR("tpm = %p, pool_id = %d\n", tpm, pool_id);
+ return -EINVAL;
+ }
+
+ if (ctx->fid_tbl[pool_id] != CFA_INVALID_FID) {
+ CFA_LOG_ERR("A function (%d) is still using the pool (%d)\n",
+ ctx->fid_tbl[pool_id], pool_id);
+ return -EINVAL;
+ }
+
+ return ba_free(ctx->pool_ba, pool_id);
+}
+
+int cfa_tpm_fid_add(void *tpm, uint16_t pool_id, uint16_t fid)
+{
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL || ctx->signature != CFA_TPM_SIGNATURE) {
+ CFA_LOG_ERR("tpm = %p, pool_id = %d\n", tpm, pool_id);
+ return -EINVAL;
+ }
+
+ if (!ba_inuse(ctx->pool_ba, pool_id)) {
+ CFA_LOG_ERR("Pool id (%d) was not allocated\n", pool_id);
+ return -EINVAL;
+ }
+
+ if (ctx->fid_tbl[pool_id] != CFA_INVALID_FID &&
+ ctx->fid_tbl[pool_id] != fid) {
+ CFA_LOG_ERR("A function id %d was already set to the pool %d\n",
+ fid, ctx->fid_tbl[pool_id]);
+ return -EINVAL;
+ }
+
+ ctx->fid_tbl[pool_id] = fid;
+
+ return 0;
+}
+
+int cfa_tpm_fid_rem(void *tpm, uint16_t pool_id, uint16_t fid)
+{
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL || ctx->signature != CFA_TPM_SIGNATURE) {
+ CFA_LOG_ERR("tpm = %p, pool_id = %d\n", tpm, pool_id);
+ return -EINVAL;
+ }
+
+ if (!ba_inuse(ctx->pool_ba, pool_id)) {
+ CFA_LOG_ERR("Pool id (%d) was not allocated\n", pool_id);
+ return -EINVAL;
+ }
+
+ if (ctx->fid_tbl[pool_id] == CFA_INVALID_FID ||
+ ctx->fid_tbl[pool_id] != fid) {
+ CFA_LOG_ERR("The function id %d was not set to the pool %d\n",
+ fid, pool_id);
+ return -EINVAL;
+ }
+
+ ctx->fid_tbl[pool_id] = CFA_INVALID_FID;
+
+ return 0;
+}
+
+int cfa_tpm_srch_by_pool(void *tpm, uint16_t pool_id, uint16_t *fid)
+{
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL || ctx->signature != CFA_TPM_SIGNATURE || fid == NULL ||
+ pool_id >= ctx->max_pools) {
+ CFA_LOG_ERR("tpm = %p, pool_id = %d, fid = %p\n", tpm, pool_id,
+ fid);
+ return -EINVAL;
+ }
+
+ if (!ba_inuse(ctx->pool_ba, pool_id)) {
+ CFA_LOG_ERR("Pool id (%d) was not allocated\n", pool_id);
+ return -EINVAL;
+ }
+
+ if (ctx->fid_tbl[pool_id] == CFA_INVALID_FID) {
+ CFA_LOG_ERR("A function id was not set to the pool (%d)\n",
+ pool_id);
+ return -EINVAL;
+ }
+
+ *fid = ctx->fid_tbl[pool_id];
+
+ return 0;
+}
+
+int cfa_tpm_srchm_by_fid(void *tpm, enum cfa_srch_mode srch_mode, uint16_t fid,
+ uint16_t *pool_id)
+{
+ uint16_t i;
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL || ctx->signature != CFA_TPM_SIGNATURE ||
+ pool_id == NULL) {
+ CFA_LOG_ERR("tpm = %p, pool_id = %p fid = %d\n", tpm, pool_id,
+ fid);
+ return -EINVAL;
+ }
+
+ if (srch_mode == CFA_SRCH_MODE_FIRST)
+ ctx->next_index = 0;
+
+ for (i = ctx->next_index; i < ctx->max_pools; i++) {
+ if (ctx->fid_tbl[i] == fid) {
+ ctx->next_index = i + 1;
+ *pool_id = i;
+ return 0;
+ }
+ }
+
+ ctx->next_index = ctx->max_pools;
+
+ return -ENOENT;
+}
+
+int cfa_tpm_pool_size_set(void *tpm, uint8_t pool_sz_exp)
+{
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL || ctx->signature != CFA_TPM_SIGNATURE) {
+ CFA_LOG_ERR("tpm = %p\n", tpm);
+ return -EINVAL;
+ }
+
+ ctx->pool_sz_exp = pool_sz_exp;
+
+ return 0;
+}
+
+int cfa_tpm_pool_size_get(void *tpm, uint8_t *pool_sz_exp)
+{
+ struct cfa_tpm *ctx = (struct cfa_tpm *)tpm;
+
+ if (tpm == NULL || ctx->signature != CFA_TPM_SIGNATURE ||
+ pool_sz_exp == NULL) {
+ CFA_LOG_ERR("tpm = %p, pool_sz_exp = %p\n", tpm, pool_sz_exp);
+ return -EINVAL;
+ }
+
+ *pool_sz_exp = ctx->pool_sz_exp;
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,47 @@
+/****************************************************************************
+ * Copyright(c) 2021 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_tpm.h
+ *
+ * @brief CFA Table Scope Pool Manager private API definitions
+ */
+
+#ifndef _CFA_TPM_PRIV_H_
+#define _CFA_TPM_PRIV_H_
+
+#include "cfa_types.h"
+#include "bitalloc.h"
+
+#define CFA_TPM_SIGNATURE 0xCFACF0CD
+
+#define CFA_TPM_MAX_POOLS 1040
+#define CFA_TPM_MIN_POOLS 1
+
+#define CFA_INVALID_FID UINT16_MAX
+
+/**
+ * CFA Table Scope Manager Pool Database
+ *
+ * Structure used to store CFA Table Scope Pool Manager database info
+ */
+struct cfa_tpm {
+ /* Signature of the CFA Table Scope Pool Manager Database */
+ uint32_t signature;
+ /* Maximum number of pools */
+ uint16_t max_pools;
+ /* Size of each pool, in powers of 2 */
+ uint8_t pool_sz_exp;
+ /* Next index for search multiple by fid */
+ uint16_t next_index;
+ /* Bitmap to keep track of pool usage */
+ struct bitalloc *pool_ba;
+ /* Fid table */
+ uint16_t *fid_tbl;
+};
+
+#endif /* _CFA_TPM_PRIV_H_ */
new file mode 100644
@@ -0,0 +1,215 @@
+/****************************************************************************
+ * Copyright(c) 2022 Broadcom Corporation, all rights reserved
+ * Proprietary and Confidential Information.
+ *
+ * This source file is the property of Broadcom Corporation, and
+ * may not be copied or distributed in any isomorphic form without
+ * the prior written consent of Broadcom Corporation.
+ *
+ * @file cfa_tpm.h
+ *
+ * @brief CFA Table Scope Pool Manager Public API definitions
+ */
+#ifndef _CFA_TPM_H_
+#define _CFA_TPM_H_
+
+#include "cfa_types.h"
+
+/**
+ * @addtogroup CFA_TPM CFA Table Scope Pool Manager
+ * \ingroup CFA_V3
+ * The purpose of the CFA Table Scope pool manager is to provide a centralized
+ * management of Table Scope region pools. Each CFA TPM instance manages the
+ * pools belonging to one region. The Table Scope Pool Manager(TPM) keeps
+ * track of fids that are using the pools.
+ * @{
+ */
+
+/** CFA Table Scope Pool Manager query DB size API
+ *
+ * This API returns the size of memory required for internal data structures to
+ * manage the table scope pool ids, and user fids.
+ *
+ * @param[in] max_pools
+ * Maximum number of pool ids available to manage.
+ *
+ * @param[out] tpm_db_size
+ * Pointer to 32 bit integer to return the amount of memory required.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_query(uint16_t max_pools, uint32_t *tpm_db_size);
+
+/** CFA Table Scope Pool Manager open API
+ *
+ * This API initializes the CFA Table Scope Pool Manager database
+ *
+ * @param[in] tpm
+ * Pointer to the memory used for the CFA Table Scope Pool Manager Database.
+ *
+ * @param[in] tpm_db_size
+ * The size of memory block pointed to by tpm parameter.
+ *
+ * @param[in] max_pools
+ * Maximum number of pool ids to manage.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_open(void *tpm, uint32_t tpm_db_size, uint16_t max_pools);
+
+/** CFA Table Scope Pool Manager close API
+ *
+ * This API resets the CFA Table Scope Pool Manager database
+ *
+ * @param[in] tpm
+ * Pointer to the database memory for the Table Scope Pool Manager.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_close(void *tpm);
+
+/** CFA Table Scope pool Manager alloc API
+ *
+ * This API allocates a pool Id.
+ *
+ * @param[in] tpm
+ * Pointer to the database memory for the Table Scope Pool Manager.
+ *
+ * @param[out] pool_id
+ * Pointer to memory location to return the allocated Pool Id.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_alloc(void *tpm, uint16_t *pool_id);
+
+/** CFA Table Scope Pool Manager free API
+ *
+ * This API frees a previously allocated Pool Id.
+ *
+ * @param[in] tpm
+ * Pointer to the database memory for the Table Scope Pool Manager.
+ *
+ * @param[in] pool_id
+ * Pool Id to be freed.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_free(void *tpm, uint16_t pool_id);
+
+/** CFA Table Scope Pool Manager add fid API
+ *
+ * This API adds an fid to a Pool Id.
+ *
+ * @param[in] tpm
+ * Pointer to the database memory for the Table Scope Pool Manager.
+ *
+ * @param[in] pool_id
+ * Pool Id to which the fid has to be added.
+ *
+ * @param[in] fid
+ * Function id to be added.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_fid_add(void *tpm, uint16_t pool_id, uint16_t fid);
+
+/** CFA Table Scope Pool Manager remove fid API
+ *
+ * This API removes a previously added fid from a Pool Id.
+ *
+ * @param[in] tpm
+ * Pointer to the database memory for the Table Scope Pool Manager.
+ *
+ * @param[in] pool_id
+ * Pool Id from which the fid has to be removed.
+ *
+ * @param[in] fid
+ * Function id to be removed.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_fid_rem(void *tpm, uint16_t pool_id, uint16_t fid);
+
+/** CFA Table Scope Pool Manager search by pool id API
+ *
+ * This API searches for the fid that is added to the pool id.
+ *
+ * @param[in] tpm
+ * Pointer to the database memory for the Table Scope Pool Manager.
+ *
+ * @param[in] pool_id
+ * Pool id to be searched for.
+ *
+ * @param[out] fid
+ * Pointer to memory location to return the fid that is added
+ * to the Pool id..
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_srch_by_pool(void *tpm, uint16_t pool_id, uint16_t *fid);
+
+/** CFA Table Scope Pool Manager search by fid API
+ *
+ * This API searches for the Pool ids to which fid is added.
+ *
+ * @param[in] tpm
+ * Pointer to the database memory for the Table Scope Pool Manager.
+ *
+ * @param[in] srch_mode
+ * srch_mode indicates if the iteration is for the first match, which
+ * indicates the start of new iteration or for the next match.
+ *
+ * @param[in] fid
+ * Function id to be searched for.
+ *
+ * @param[out] pool_id
+ * Pointer to memory location to return the Pool Id to which fid is
+ * added.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_srchm_by_fid(void *tpm, enum cfa_srch_mode srch_mode, uint16_t fid,
+ uint16_t *pool_id);
+
+/** CFA Table Scope Pool Manager set pool size API
+ *
+ * This API sets the pool size into TPM.
+ *
+ * @param[in] tpm
+ * Pointer to the database memory for the Table Scope Pool Manager.
+ *
+ * @param[in] pool_sz_exp
+ * The size of each pool in power of 2.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_pool_size_set(void *tpm, uint8_t pool_sz_exp);
+
+/** CFA Table Scope Pool Manager get pool size API
+ *
+ * This API returns the pool size from TPM.
+ *
+ * @param[in] tpm
+ * Pointer to the database memory for the Table Scope Pool Manager.
+ *
+ * @param[out] pool_sz_exp
+ * Pointer to memory location to return the pool size in power of 2.
+ *
+ * @return
+ * Returns 0 if successful, Error Code otherwise
+ */
+int cfa_tpm_pool_size_get(void *tpm, uint8_t *pool_sz_exp);
+
+/**@}*/
+
+#endif /* _CFA_TPM_H_ */
@@ -72,6 +72,10 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 UINT32_C(0x4)
/* RoCE slow path command to modify CC Gen1 support. */
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 UINT32_C(0x5)
+/* RoCE slow path command to query CC Gen2 support. */
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 UINT32_C(0x6)
+/* RoCE slow path command to modify CC Gen2 support. */
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 UINT32_C(0x7)
/* Engine CKV - The Alias key EC curve and ECC public key information. */
#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY UINT32_C(0x8001)
/* Engine CKV - Initialization vector. */
@@ -153,14 +157,14 @@ struct tlv {
/* input (size:128b/16B) */
struct input {
/*
- * This value indicates what type of request this is. The format
+ * This value indicates what type of request this is. The format
* for the rest of the command is determined by this field.
*/
uint16_t req_type;
/*
* This value indicates the what completion ring the request will
- * be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
+ * be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
* valid CR ring_id value for this function.
*/
uint16_t cmpl_ring;
@@ -176,7 +180,7 @@ struct input {
uint16_t target_id;
/*
* This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
+ * when the request is complete. This area must be 16B aligned
* and must be cleared to zero before the request is made.
*/
uint64_t resp_addr;
@@ -197,7 +201,7 @@ struct output {
/* This field provides original sequence number of the command. */
uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The
+ * This field is the length of the response in bytes. The
* last byte of the response is a valid flag that will read
* as '1' when the command has been completely written to
* memory.
@@ -366,6 +370,14 @@ struct cmd_nums {
#define HWRM_QUEUE_VLANPRI2PRI_CFG UINT32_C(0x85)
#define HWRM_QUEUE_GLOBAL_CFG UINT32_C(0x86)
#define HWRM_QUEUE_GLOBAL_QCFG UINT32_C(0x87)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG UINT32_C(0x88)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG UINT32_C(0x89)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG UINT32_C(0x8a)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG UINT32_C(0x8b)
+ #define HWRM_QUEUE_QCAPS UINT32_C(0x8c)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG UINT32_C(0x8d)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG UINT32_C(0x8e)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG UINT32_C(0x8f)
#define HWRM_CFA_L2_FILTER_ALLOC UINT32_C(0x90)
#define HWRM_CFA_L2_FILTER_FREE UINT32_C(0x91)
#define HWRM_CFA_L2_FILTER_CFG UINT32_C(0x92)
@@ -389,6 +401,7 @@ struct cmd_nums {
#define HWRM_TUNNEL_DST_PORT_QUERY UINT32_C(0xa0)
#define HWRM_TUNNEL_DST_PORT_ALLOC UINT32_C(0xa1)
#define HWRM_TUNNEL_DST_PORT_FREE UINT32_C(0xa2)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG UINT32_C(0xa3)
#define HWRM_STAT_CTX_ENG_QUERY UINT32_C(0xaf)
#define HWRM_STAT_CTX_ALLOC UINT32_C(0xb0)
#define HWRM_STAT_CTX_FREE UINT32_C(0xb1)
@@ -444,6 +457,8 @@ struct cmd_nums {
#define HWRM_PORT_EP_TX_CFG UINT32_C(0xdb)
#define HWRM_PORT_CFG UINT32_C(0xdc)
#define HWRM_PORT_QCFG UINT32_C(0xdd)
+ /* Queries MAC capabilities for the specified port */
+ #define HWRM_PORT_MAC_QCAPS UINT32_C(0xdf)
#define HWRM_TEMP_MONITOR_QUERY UINT32_C(0xe0)
#define HWRM_REG_POWER_QUERY UINT32_C(0xe1)
#define HWRM_CORE_FREQUENCY_QUERY UINT32_C(0xe2)
@@ -547,7 +562,12 @@ struct cmd_nums {
#define HWRM_CFA_TLS_FILTER_ALLOC UINT32_C(0x128)
/* Experimental */
#define HWRM_CFA_TLS_FILTER_FREE UINT32_C(0x129)
- /* Engine CKV - Get the current allocation status of keys provisioned in the key vault. */
+ /* Release an AFM function for TF control */
+ #define HWRM_CFA_RELEASE_AFM_FUNC UINT32_C(0x12a)
+ /*
+ * Engine CKV - Get the current allocation status of keys provisioned in
+ * the key vault.
+ */
#define HWRM_ENGINE_CKV_STATUS UINT32_C(0x12e)
/* Engine CKV - Add a new CKEK used to encrypt keys. */
#define HWRM_ENGINE_CKV_CKEK_ADD UINT32_C(0x12f)
@@ -607,7 +627,10 @@ struct cmd_nums {
#define HWRM_ENGINE_STATS_CLEAR UINT32_C(0x156)
/* Engine - Query the statistics accumulator for an Engine. */
#define HWRM_ENGINE_STATS_QUERY UINT32_C(0x157)
- /* Engine - Query statistics counters for continuous errors from all CDDIP Engines. */
+ /*
+ * Engine - Query statistics counters for continuous errors from all CDDIP
+ * Engines.
+ */
#define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR UINT32_C(0x158)
/* Engine - Allocate an Engine RQ. */
#define HWRM_ENGINE_RQ_ALLOC UINT32_C(0x15e)
@@ -689,6 +712,20 @@ struct cmd_nums {
#define HWRM_FUNC_SYNCE_CFG UINT32_C(0x1ab)
/* Queries SyncE configurations. */
#define HWRM_FUNC_SYNCE_QCFG UINT32_C(0x1ac)
+ /* The command is used to deallocate KTLS or QUIC key contexts. */
+ #define HWRM_FUNC_KEY_CTX_FREE UINT32_C(0x1ad)
+ /* The command is used to configure link aggr group mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG UINT32_C(0x1ae)
+ /* The command is used to query link aggr group mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG UINT32_C(0x1af)
+ /* The command is used to create a link aggr group. */
+ #define HWRM_FUNC_LAG_CREATE UINT32_C(0x1b0)
+ /* The command is used to update a link aggr group. */
+ #define HWRM_FUNC_LAG_UPDATE UINT32_C(0x1b1)
+ /* The command is used to free a link aggr group. */
+ #define HWRM_FUNC_LAG_FREE UINT32_C(0x1b2)
+ /* The command is used to query a link aggr group. */
+ #define HWRM_FUNC_LAG_QCFG UINT32_C(0x1b3)
/* Experimental */
#define HWRM_SELFTEST_QLIST UINT32_C(0x200)
/* Experimental */
@@ -720,12 +757,12 @@ struct cmd_nums {
#define HWRM_MFG_SOC_IMAGE UINT32_C(0x20c)
/* Retrieves the SoC status and image provisioning information */
#define HWRM_MFG_SOC_QSTATUS UINT32_C(0x20d)
- /* Tells the fw to program the seeprom memory */
- #define HWRM_MFG_PARAM_SEEPROM_SYNC UINT32_C(0x20e)
- /* Tells the fw to read the seeprom memory */
- #define HWRM_MFG_PARAM_SEEPROM_READ UINT32_C(0x20f)
- /* Tells the fw to get the health of seeprom data */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH UINT32_C(0x210)
+ /* Tells the fw to finalize the critical data (store and lock it) */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE UINT32_C(0x20e)
+ /* Tells the fw to read the critical data */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ UINT32_C(0x20f)
+ /* Tells the fw to get the health of critical data */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH UINT32_C(0x210)
/*
* The command is used for certificate provisioning to export a
* Certificate Signing Request (CSR) from the device.
@@ -760,6 +797,37 @@ struct cmd_nums {
#define HWRM_MFG_SELFTEST_EXEC UINT32_C(0x217)
/* Queries the generic stats */
#define HWRM_STAT_GENERIC_QSTATS UINT32_C(0x218)
+ /*
+ * The command is used for certificate provisioning to export a
+ * certificate chain from the device.
+ */
+ #define HWRM_MFG_PRVSN_EXPORT_CERT UINT32_C(0x219)
+ /* Query the statistics for doorbell drops due to various error conditions. */
+ #define HWRM_STAT_DB_ERROR_QSTATS UINT32_C(0x21a)
+ /*
+ * This command returns the capabilities related to User Defined
+ * Congestion Control on a function.
+ */
+ #define HWRM_UDCC_QCAPS UINT32_C(0x258)
+ /* This command configures User Defined Congestion Control on a function. */
+ #define HWRM_UDCC_CFG UINT32_C(0x259)
+ /*
+ * This command queries the configuration of User Defined Congestion
+ * Control on a function.
+ */
+ #define HWRM_UDCC_QCFG UINT32_C(0x25a)
+ /* This command configures an existing UDCC session. */
+ #define HWRM_UDCC_SESSION_CFG UINT32_C(0x25b)
+ /* This command queries the configuration of a UDCC session. */
+ #define HWRM_UDCC_SESSION_QCFG UINT32_C(0x25c)
+ /* This command queries the UDCC session. */
+ #define HWRM_UDCC_SESSION_QUERY UINT32_C(0x25d)
+ /* This command configures the computation unit. */
+ #define HWRM_UDCC_COMP_CFG UINT32_C(0x25e)
+ /* This command queries the configuration of the computation unit. */
+ #define HWRM_UDCC_COMP_QCFG UINT32_C(0x25f)
+ /* This command queries the status and statistics of the computation unit. */
+ #define HWRM_UDCC_COMP_QUERY UINT32_C(0x260)
/* Experimental */
#define HWRM_TF UINT32_C(0x2bc)
/* Experimental */
@@ -767,8 +835,6 @@ struct cmd_nums {
/* Experimental */
#define HWRM_TF_SESSION_OPEN UINT32_C(0x2c6)
/* Experimental */
- #define HWRM_TF_SESSION_ATTACH UINT32_C(0x2c7)
- /* Experimental */
#define HWRM_TF_SESSION_REGISTER UINT32_C(0x2c8)
/* Experimental */
#define HWRM_TF_SESSION_UNREGISTER UINT32_C(0x2c9)
@@ -797,22 +863,6 @@ struct cmd_nums {
/* Experimental */
#define HWRM_TF_TBL_TYPE_BULK_GET UINT32_C(0x2dc)
/* Experimental */
- #define HWRM_TF_CTXT_MEM_ALLOC UINT32_C(0x2e2)
- /* Experimental */
- #define HWRM_TF_CTXT_MEM_FREE UINT32_C(0x2e3)
- /* Experimental */
- #define HWRM_TF_CTXT_MEM_RGTR UINT32_C(0x2e4)
- /* Experimental */
- #define HWRM_TF_CTXT_MEM_UNRGTR UINT32_C(0x2e5)
- /* Experimental */
- #define HWRM_TF_EXT_EM_QCAPS UINT32_C(0x2e6)
- /* Experimental */
- #define HWRM_TF_EXT_EM_OP UINT32_C(0x2e7)
- /* Experimental */
- #define HWRM_TF_EXT_EM_CFG UINT32_C(0x2e8)
- /* Experimental */
- #define HWRM_TF_EXT_EM_QCFG UINT32_C(0x2e9)
- /* Experimental */
#define HWRM_TF_EM_INSERT UINT32_C(0x2ea)
/* Experimental */
#define HWRM_TF_EM_DELETE UINT32_C(0x2eb)
@@ -840,6 +890,10 @@ struct cmd_nums {
#define HWRM_TF_RESC_USAGE_SET UINT32_C(0x300)
/* Experimental */
#define HWRM_TF_RESC_USAGE_QUERY UINT32_C(0x301)
+ /* Truflow command to allocate a table */
+ #define HWRM_TF_TBL_TYPE_ALLOC UINT32_C(0x302)
+ /* Truflow command to free a table */
+ #define HWRM_TF_TBL_TYPE_FREE UINT32_C(0x303)
/* TruFlow command to check firmware table scope capabilities. */
#define HWRM_TFC_TBL_SCOPE_QCAPS UINT32_C(0x380)
/* TruFlow command to allocate a table scope ID and create the pools. */
@@ -852,9 +906,9 @@ struct cmd_nums {
#define HWRM_TFC_TBL_SCOPE_FID_ADD UINT32_C(0x384)
/* TruFlow command to remove a FID from a table scope. */
#define HWRM_TFC_TBL_SCOPE_FID_REM UINT32_C(0x385)
- /* TruFlow command to allocate a table scope pool. */
+ /* DEPRECATED */
#define HWRM_TFC_TBL_SCOPE_POOL_ALLOC UINT32_C(0x386)
- /* TruFlow command to free a table scope pool. */
+ /* DEPRECATED */
#define HWRM_TFC_TBL_SCOPE_POOL_FREE UINT32_C(0x387)
/* Experimental */
#define HWRM_TFC_SESSION_ID_ALLOC UINT32_C(0x388)
@@ -888,8 +942,30 @@ struct cmd_nums {
#define HWRM_TFC_TCAM_ALLOC_SET UINT32_C(0x396)
/* TruFlow command to free a TCAM entry. */
#define HWRM_TFC_TCAM_FREE UINT32_C(0x397)
+ /* Truflow command to set an interface table entry */
+ #define HWRM_TFC_IF_TBL_SET UINT32_C(0x398)
+ /* Truflow command to get an interface table entry */
+ #define HWRM_TFC_IF_TBL_GET UINT32_C(0x399)
+ /* TruFlow command to get configured info about a table scope. */
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET UINT32_C(0x39a)
+ /* TruFlow command to query the resource usage state. */
+ #define HWRM_TFC_RESC_USAGE_QUERY UINT32_C(0x39b)
+ /*
+ * This command is used to query the pfc watchdog max configurable
+ * timeout value.
+ */
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS UINT32_C(0x39c)
+ /* This command is used to set the PFC watchdog timeout value. */
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG UINT32_C(0x39d)
+ /*
+ * This command is used to query the current configured pfc watchdog
+ * timeout value.
+ */
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG UINT32_C(0x39e)
/* Experimental */
#define HWRM_SV UINT32_C(0x400)
+ /* Flush any trace buffer data that has not been sent to the host. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH UINT32_C(0xff0f)
/* Experimental */
#define HWRM_DBG_READ_DIRECT UINT32_C(0xff10)
/* Experimental */
@@ -945,6 +1021,8 @@ struct cmd_nums {
#define HWRM_DBG_USEQ_DELIVERY_REQ UINT32_C(0xff2a)
/* Experimental */
#define HWRM_DBG_USEQ_RESP_HDR UINT32_C(0xff2b)
+ #define HWRM_NVM_GET_VPD_FIELD_INFO UINT32_C(0xffea)
+ #define HWRM_NVM_SET_VPD_FIELD_INFO UINT32_C(0xffeb)
#define HWRM_NVM_DEFRAG UINT32_C(0xffec)
#define HWRM_NVM_REQ_ARBITRATION UINT32_C(0xffed)
/* Experimental */
@@ -1039,14 +1117,14 @@ struct ret_codes {
#define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC UINT32_C(0xc)
/*
* This error code is only reported by firmware when the registered
- * driver instances requested to offloaded a flow but was unable to because
- * the requested key's hash collides with the installed keys.
+ * driver instances requested to offloaded a flow but was unable to
+ * because the requested key's hash collides with the installed keys.
*/
#define HWRM_ERR_CODE_KEY_HASH_COLLISION UINT32_C(0xd)
/*
* This error code is only reported by firmware when the registered
- * driver instances requested to offloaded a flow but was unable to because
- * the same key has already been installed.
+ * driver instances requested to offloaded a flow but was unable to
+ * because the same key has already been installed.
*/
#define HWRM_ERR_CODE_KEY_ALREADY_EXISTS UINT32_C(0xe)
/*
@@ -1055,8 +1133,8 @@ struct ret_codes {
*/
#define HWRM_ERR_CODE_HWRM_ERROR UINT32_C(0xf)
/*
- * Firmware is unable to service the request at the present time. Caller
- * may try again later.
+ * Firmware is unable to service the request at the present time.
+ * Caller may try again later.
*/
#define HWRM_ERR_CODE_BUSY UINT32_C(0x10)
/*
@@ -1071,6 +1149,11 @@ struct ret_codes {
* async completion ring or associated forwarding buffers configured.
*/
#define HWRM_ERR_CODE_PF_UNAVAILABLE UINT32_C(0x12)
+ /*
+ * This error code is reported by Firmware when the specific entity
+ * requested by the host is not present or does not exist.
+ */
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT UINT32_C(0x13)
/*
* This value indicates that the HWRM response is in TLV format and
* should be interpreted as one or more TLVs starting with the
@@ -1103,7 +1186,7 @@ struct hwrm_err_output {
/* This field provides original sequence number of the command. */
uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The
+ * This field is the length of the response in bytes. The
* last byte of the response is a valid flag that will read
* as '1' when the command has been completely written to
* memory.
@@ -1120,9 +1203,9 @@ struct hwrm_err_output {
uint8_t cmd_err;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -1132,7 +1215,12 @@ struct hwrm_err_output {
* applicable (All F's). Need to cast it the size of the field if needed.
*/
#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
-/* hwrm_func_buf_rgtr */
+/*
+ * This is reflecting the size of the PF mailbox and not the maximum
+ * command size for any of the HWRM command structures. To determine
+ * the maximum size of an HWRM command supported by the firmware, see
+ * the max_ext_req_len field in the response of the HWRM_VER_GET command.
+ */
#define HWRM_MAX_REQ_LEN 128
/* hwrm_cfa_flow_info */
#define HWRM_MAX_RESP_LEN 704
@@ -1156,10 +1244,10 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_UPDATE 3
/* non-zero means beta version */
-#define HWRM_VERSION_RSVD 158
-#define HWRM_VERSION_STR "1.10.2.158"
+#define HWRM_VERSION_RSVD 40
+#define HWRM_VERSION_STR "1.10.3.40"
/****************
* hwrm_ver_get *
@@ -1377,53 +1465,58 @@ struct hwrm_ver_get_output {
/*
* If set to 1, then the KONG host mailbox channel is supported.
* If set to 0, then the KONG host mailbox channel is not supported.
- * By default, this flag should be 0 for older version of core firmware.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED \
UINT32_C(0x10)
/*
- * If set to 1, then the 64bit flow handle is supported in addition to the
- * legacy 16bit flow handle. If set to 0, then the 64bit flow handle is not
- * supported. By default, this flag should be 0 for older version of core firmware.
+ * If set to 1, then the 64bit flow handle is supported in addition
+ * to the legacy 16bit flow handle. If set to 0, then the 64bit flow
+ * handle is not supported. By default, this flag should be 0 for
+ * older version of core firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED \
UINT32_C(0x20)
/*
- * If set to 1, then filter type can be provided in filter_alloc or filter_cfg
- * filter types like L2 for l2 traffic and ROCE for roce & l2 traffic.
- * If set to 0, then filter types not supported.
- * By default, this flag should be 0 for older version of core firmware.
+ * If set to 1, then filter type can be provided in filter_alloc or
+ * filter_cfg filter types like L2 for l2 traffic and ROCE for roce &
+ * l2 traffic. If set to 0, then filter types not supported. By
+ * default, this flag should be 0 for older version of core firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED \
UINT32_C(0x40)
/*
- * If set to 1, firmware is capable to support virtio vSwitch offload model.
- * If set to 0, firmware can't supported virtio vSwitch offload model.
- * By default, this flag should be 0 for older version of core firmware.
+ * If set to 1, firmware is capable to support virtio vSwitch offload
+ * model. If set to 0, firmware can't supported virtio vSwitch
+ * offload model.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED \
UINT32_C(0x80)
/*
* If set to 1, firmware is capable to support trusted VF.
* If set to 0, firmware is not capable to support trusted VF.
- * By default, this flag should be 0 for older version of core firmware.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED \
UINT32_C(0x100)
/*
* If set to 1, firmware is capable to support flow aging.
* If set to 0, firmware is not capable to support flow aging.
- * By default, this flag should be 0 for older version of core firmware.
- * (deprecated)
+ * By default, this flag should be 0 for older version of core
+ * firmware. (deprecated)
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED \
UINT32_C(0x200)
/*
- * If set to 1, firmware is capable to support advanced flow counters like,
- * Meter drop counters and EEM counters.
- * If set to 0, firmware is not capable to support advanced flow counters.
- * By default, this flag should be 0 for older version of core firmware.
- * (deprecated)
+ * If set to 1, firmware is capable to support advanced flow counters
+ * like, Meter drop counters and EEM counters.
+ * If set to 0, firmware is not capable to support advanced flow
+ * counters. By default, this flag should be 0 for older version of
+ * core firmware. (deprecated)
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED \
UINT32_C(0x400)
@@ -1432,31 +1525,33 @@ struct hwrm_ver_get_output {
* Extended Exact Match(EEM) feature.
* If set to 0, firmware is not capable to support the use of the
* CFA EEM feature.
- * By default, this flag should be 0 for older version of core firmware.
- * (deprecated)
+ * By default, this flag should be 0 for older version of core
+ * firmware. (deprecated)
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_EEM_SUPPORTED \
UINT32_C(0x800)
/*
- * If set to 1, the firmware is able to support advance CFA flow management
- * features reported in the HWRM_CFA_FLOW_MGNT_QCAPS.
- * If set to 0, then the firmware doesn’t support the advance CFA flow management
- * features.
- * By default, this flag should be 0 for older version of core firmware.
+ * If set to 1, the firmware is able to support advance CFA flow
+ * management features reported in the HWRM_CFA_FLOW_MGNT_QCAPS.
+ * If set to 0, then the firmware doesn't support the advance CFA
+ * flow management features.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED \
UINT32_C(0x1000)
/*
* Deprecated and replaced with cfa_truflow_supported.
* If set to 1, the firmware is able to support TFLIB features.
- * If set to 0, then the firmware doesn’t support TFLIB features.
- * By default, this flag should be 0 for older version of core firmware.
+ * If set to 0, then the firmware doesn't support TFLIB features.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED \
UINT32_C(0x2000)
/*
* If set to 1, the firmware is able to support TruFlow features.
- * If set to 0, then the firmware doesn’t support TruFlow features.
+ * If set to 0, then the firmware doesn't support TruFlow features.
* By default, this flag should be 0 for older version of
* core firmware.
*/
@@ -1520,7 +1615,10 @@ struct hwrm_ver_get_output {
uint8_t chip_metal;
/* This field returns the bond id of the chip. */
uint8_t chip_bond_id;
- /* This value indicates the type of platform used for chip implementation. */
+ /*
+ * This value indicates the type of platform used for chip
+ * implementation.
+ */
uint8_t chip_platform_type;
/* ASIC */
#define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0)
@@ -1576,8 +1674,8 @@ struct hwrm_ver_get_output {
* host drivers that it has not completed resource initialization
* required for data path operations. Host drivers should not send
* any HWRM command that requires data path resources. Firmware will
- * fail those commands with HWRM_ERR_CODE_BUSY. Host drivers can retry
- * those commands once both the flags are cleared.
+ * fail those commands with HWRM_ERR_CODE_BUSY. Host drivers can
+ * retry those commands once both the flags are cleared.
* If this flag and dev_not_rdy flag are set to 0, device is ready
* to accept all HWRM commands.
*/
@@ -1738,9 +1836,9 @@ struct hwrm_ver_get_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -2334,11 +2432,11 @@ struct crypto_presync_bd_cmd {
* Typically, presync BDs are used for packet retransmissions. Source
* port sends all the packets in order over the network to destination
* port and packets get dropped in the network. The destination port
- * will request retranmission of dropped packets and source port driver
- * will send presync BD to setup the transmitter appropriately. It will
- * provide the start and end TCP sequence number of the data to be
- * transmitted. HW keeps two sets of context variable, one for in order
- * traffic and one for retransmission traffic. HW is designed to
+ * will request retransmission of dropped packets and source port
+ * driver will send presync BD to setup the transmitter appropriately.
+ * It will provide the start and end TCP sequence number of the data to
+ * be transmitted. HW keeps two sets of context variable, one for in
+ * order traffic and one for retransmission traffic. HW is designed to
* transmit everything posted in the presync BD and return to in order
* mode after that. No inorder context variables are updated in the
* process. There is a special case where packets can be dropped
@@ -2506,22 +2604,22 @@ struct ce_bds_quic_add_data_msg {
* exchanged as part of sessions setup between the two end
* points for QUIC operations.
*/
- uint64_t quic_iv_lo;
+ uint8_t quic_iv_lo[8];
/*
* Most-significant 32 bits (of 96) of additional IV that is
* exchanged as part of sessions setup between the two end
* points for QUIC operations.
*/
- uint32_t quic_iv_hi;
+ uint8_t quic_iv_hi[4];
uint32_t unused_1;
/*
* Key used for encrypting or decrypting records. The Key is exchanged
* as part of sessions setup between the two end points through this
* mid-path BD.
*/
- uint32_t session_key[8];
+ uint8_t session_key[32];
/* Header protection key. */
- uint32_t hp_key[8];
+ uint8_t hp_key[32];
/* Packet number associated with the QUIC connection. */
uint64_t pkt_number;
} __rte_packed;
@@ -2907,7 +3005,7 @@ struct tx_bd_long_hi {
* 0xffff.
*
* If set to one when LSO is '1', then the IPID will be treated
- * as a 15b number and will be wrapped if it exceeds a value 0f
+ * as a 15b number and will be wrapped if it exceeds a value of
* 0x7fff.
*/
#define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
@@ -2961,7 +3059,7 @@ struct tx_bd_long_hi {
* will be the following behavior for all cases independent of
* settings of inner LSO and checksum offload BD flags.
* If outer UDP checksum is 0, then do not update it.
- * If outer UDP checksum is non zero, then the hardware should
+ * If outer UDP checksum is non zero, then the hardware should
* compute and update it.
*/
#define TX_BD_LONG_LFLAGS_OT_IP_CHKSUM UINT32_C(0x2000)
@@ -3091,7 +3189,7 @@ struct tx_bd_long_hi {
* - Wh+/SR - this option is not supported.
* - Thor - cfa_meta[15:0] is used for metadata output if en_bd_meta
* is set in the Lookup Table.
- * - SR2 - {4’d0, cfa_meta[27:0]} is used for metadata output if
+ * - SR2 - {4'd0, cfa_meta[27:0]} is used for metadata output if
* en_bd_meta is set in the Lookup Table.
*/
#define TX_BD_LONG_CFA_META_KEY_METADATA_TRANSFER \
@@ -3387,7 +3485,7 @@ struct tx_bd_long_inline {
* - Wh+/SR - this option is not supported.
* - Thor - cfa_meta[15:0] is used for metadata output if en_bd_meta
* is set in the Lookup Table.
- * - SR2 - {4’d0, cfa_meta[27:0]} is used for metadata output if
+ * - SR2 - {4'd0, cfa_meta[27:0]} is used for metadata output if
* en_bd_meta is set in the Lookup Table.
*/
#define TX_BD_LONG_INLINE_CFA_META_KEY_METADATA_TRANSFER \
@@ -3505,6 +3603,91 @@ struct tx_bd_presync_cmd {
uint32_t unused_1;
} __rte_packed;
+/*
+ * This structure is used to send additional information for transmitting
+ * packets using timed transmit scheduling. It must only to be applied as
+ * the second BD of a BD chain that represents a packet. Any subsequent
+ * BDs will follow the timed transmit BD.
+ */
+/* tx_bd_timedtx (size:128b/16B) */
+struct tx_bd_timedtx {
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define TX_BD_TIMEDTX_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_TIMEDTX_TYPE_SFT 0
+ /*
+ * Indicates a timed transmit BD. This is a 16b BD that is inserted
+ * into a packet BD chain immediately after the first BD. It is used
+ * to control the flow in a timed transmit operation.
+ */
+ #define TX_BD_TIMEDTX_TYPE_TX_BD_TIMEDTX UINT32_C(0xa)
+ #define TX_BD_TIMEDTX_TYPE_LAST \
+ TX_BD_TIMEDTX_TYPE_TX_BD_TIMEDTX
+ /* Unless otherwise stated, sub-fields of this field are always valid. */
+ #define TX_BD_TIMEDTX_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_TIMEDTX_FLAGS_SFT 6
+ /*
+ * This value identifies the kind of buffer timed transmit mode that
+ * is to be enabled for the packet.
+ */
+ #define TX_BD_TIMEDTX_FLAGS_KIND_MASK UINT32_C(0x1c0)
+ #define TX_BD_TIMEDTX_FLAGS_KIND_SFT 6
+ /*
+ * This timed transmit mode indicates that the packet will be
+ * scheduled and send immediately (or as soon as possible), once
+ * it is scheduled in the transmitter.
+ * Note: This mode is similar to regular (non-timed transmit)
+ * operation. Its main purpose is to cancel pace mode timed
+ * transmit.
+ */
+ #define TX_BD_TIMEDTX_FLAGS_KIND_ASAP (UINT32_C(0x0) << 6)
+ /*
+ * This timed transmit mode is used to schedule transmission of
+ * the packet no earlier than the time given in the tx_time
+ * field of the BD.
+ * Note: In case subsequent packets don't include a timed transmit
+ * BD, they will be scheduled subsequently for transmission
+ * without any timed transmit constraint.
+ */
+ #define TX_BD_TIMEDTX_FLAGS_KIND_SO_TXTIME (UINT32_C(0x1) << 6)
+ /*
+ * This timed transmit mode is used to enable rate control for the
+ * flow (QP) at a rate as defined by the rate field of this BD.
+ * Note: In case subsequent, adjacent packets on the same flow
+ * don't include a timed transmit BD, they will continue to be
+ * paced by the transmitter at the same rate as given in this BD.
+ */
+ #define TX_BD_TIMEDTX_FLAGS_KIND_PACE (UINT32_C(0x2) << 6)
+ #define TX_BD_TIMEDTX_FLAGS_KIND_LAST \
+ TX_BD_TIMEDTX_FLAGS_KIND_PACE
+ /*
+ * This field exists in all Tx BDs. It doesn't apply to this particular
+ * BD type since the BD never represents an SGL or inline data; i.e. it
+ * is only a command. This field must be zero.
+ */
+ uint16_t len;
+ /*
+ * This field represents the rate of the flow (QP) in terms of KB/s.
+ * This applies to pace mode timed transmit.
+ */
+ uint32_t rate;
+ /*
+ * Applying this rate to a QP will result in this and all subsequent
+ * packets of the flow being paced at the given rate, until such time
+ * that the timed transmit mode is either changed or the rate is
+ * updated in a future packet on the flow.
+ * This field is applicable only if flags.kind is pace.
+ */
+ #define TX_BD_TIMEDTX_RATE_VAL_MASK UINT32_C(0x1ffffff)
+ #define TX_BD_TIMEDTX_RATE_VAL_SFT 0
+ /*
+ * This field represents the nano-second time to transmit the
+ * corresponding packet using SO_TXTIME mode of timed transmit.
+ * This field is applicable only if flags.kind is so_txtime.
+ */
+ uint64_t tx_time;
+} __rte_packed;
+
/* rx_prod_pkt_bd (size:128b/16B) */
struct rx_prod_pkt_bd {
/* This value identifies the type of buffer descriptor. */
@@ -6017,8 +6200,20 @@ struct rx_pkt_v3_cmpl {
* is not applicable.
*/
#define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_11 (UINT32_C(0xb) << 7)
+ /* The RSS hash was computed over tunnel context and tunnel ID field. */
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_12 (UINT32_C(0xc) << 7)
+ /*
+ * The RSS hash was computed over tunnel source IP address, tunnel
+ * destination IP address, and tunnel ID field.
+ */
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_13 (UINT32_C(0xd) << 7)
+ /*
+ * The RSS hash was computed over tunnel source IP address, tunnel
+ * destination IP address, tunnel context, and tunnel ID field.
+ */
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_14 (UINT32_C(0xe) << 7)
#define RX_PKT_V3_CMPL_RSS_HASH_TYPE_LAST \
- RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_11
+ RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_14
uint16_t metadata1_payload_offset;
/*
* If truncation placement is not used, this value indicates the offset
@@ -7454,7 +7649,7 @@ struct rx_tpa_start_v2_cmpl_hi {
UINT32_C(0x100)
/*
* This indicates that the complete 1's complement checksum was
- * calculated for the packet in the affregation.
+ * calculated for the packet in the aggregation.
*/
#define RX_TPA_START_V2_CMPL_FLAGS2_COMPLETE_CHECKSUM_CALC \
UINT32_C(0x200)
@@ -8526,7 +8721,7 @@ struct rx_tpa_v2_start_cmpl_hi {
#define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_MASK \
UINT32_C(0xf0)
#define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_SFT 4
- /* No metadata informtaion. Value is zero. */
+ /* No metadata information. Value is zero. */
#define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_NONE \
(UINT32_C(0x0) << 4)
/*
@@ -8545,7 +8740,7 @@ struct rx_tpa_v2_start_cmpl_hi {
* - VXLAN = VNI[23:0] -> VXLAN Network ID
* - Geneve (NGE) = VNI[23:0] a-> Virtual Network Identifier.
* - NVGRE = TNI[23:0] -> Tenant Network ID
- * - GRE = KEY[31:0 -> key fieled with bit mask. zero if K = 0
+ * - GRE = KEY[31:0] -> key field with bit mask. Zero if K = 0
* - IPV4 = 0 (not populated)
* - IPV6 = Flow Label[19:0]
* - PPPoE = sessionID[15:0]
@@ -9534,9 +9729,31 @@ struct hwrm_async_event_cmpl {
*/
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR \
UINT32_C(0x49)
+ /*
+ * An event from firmware indicating that the XID partition was not
+ * allocated/freed by the FW successfully for the request that is
+ * encapsulated in the HWRM_EXEC_FWD_RESP by the PF driver for VF.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR \
+ UINT32_C(0x4a)
+ /*
+ * A UDCC session has been modified in the FW. The session_id can be
+ * used by the driver to retrieve information related to the UDCC
+ * session.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE \
+ UINT32_C(0x4b)
+ /*
+ * Used to notify the host that the firmware has DMA-ed additional
+ * debug data to the host buffer. This is effectively a producer index
+ * update. The host driver can utilize this information to determine
+ * how much of its host buffer has been populated by the firmware.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER \
+ UINT32_C(0x4c)
/* Maximum Registrable event id. */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID \
- UINT32_C(0x4a)
+ UINT32_C(0x4d)
/*
* A trace log message. This contains firmware trace logs string
* embedded in the asynchronous message. This is an experimental
@@ -10210,7 +10427,7 @@ struct hwrm_async_event_cmpl_reset_notify {
* 16-lsb timestamp (100-msec resolution)
* The Maximum Firmware Reset bail out value in the order of 100
* milliseconds. The driver instances will use this value to reinitiate
- * the registration process again if the core firmware didn’t set the
+ * the registration process again if the core firmware didn't set the
* state bit.
*/
uint16_t timestamp_hi;
@@ -10920,6 +11137,14 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
*/
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE \
UINT32_C(0x10)
+ /*
+ * If this bit is set to 1, then the control of VF was relinquished
+ * back to the firmware flow manager following the function takeover
+ * by TruFlow.
+ * If set to 0, then this bit should be ignored.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE \
+ UINT32_C(0x20)
} __rte_packed;
/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */
@@ -11518,8 +11743,8 @@ struct hwrm_async_event_cmpl_quiesce_done {
8
/*
* Additional information about internal hardware state related to
- * idle/quiesce state. QUIESCE may succeed per quiesce_status
- * regardless of idle_state_flags. If QUIESCE fails, the host may
+ * idle/quiesce state. QUIESCE may succeed per quiesce_status
+ * regardless of idle_state_flags. If QUIESCE fails, the host may
* inspect idle_state_flags to determine whether a retry is warranted.
*/
#define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_MASK \
@@ -12237,6 +12462,236 @@ struct hwrm_async_event_cmpl_hw_doorbell_recovery_read_error {
UINT32_C(0x8)
} __rte_packed;
+/* hwrm_async_event_cmpl_ctx_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_ctx_error {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * This async notification message is used to inform the PF driver
+ * that firmware fails to allocate/free the contexts requested. This
+ * message is only valid in the XID partition scheme. Given the start
+ * xid and the number of contexts in error, the PF driver will figure
+ * out the corresponding XID partition(s) in error.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_ID_CTX_ERROR \
+ UINT32_C(0x4a)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_ID_CTX_ERROR
+ /* Event specific data */
+ uint32_t event_data2;
+ /* Context operation code */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE \
+ UINT32_C(0x1)
+ /* Context alloc failure */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_ALLOC \
+ UINT32_C(0x0)
+ /* Context free failure */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_FREE \
+ UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_FREE
+ /* Number of contexts in error */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_NUM_CTXS_MASK \
+ UINT32_C(0xfffe)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_NUM_CTXS_SFT 1
+ /* Function ID which the XID partitions are associated with */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_FID_MASK \
+ UINT32_C(0xffff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_FID_SFT 16
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_OPAQUE_SFT 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Starting XID that has error */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA1_START_XID_MASK \
+ UINT32_C(0xffffffff)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA1_START_XID_SFT 0
+} __rte_packed;
+
+/* hwrm_async_event_udcc_session_change (size:128b/16B) */
+struct hwrm_async_event_udcc_session_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * This async notification message is used to inform the PF driver
+ * that firmware has modified a UDCC session.
+ */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_ID_UDCC_SESSION_CHANGE \
+ UINT32_C(0x4b)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_ID_UDCC_SESSION_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ /* UDCC Session id operation code */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_MASK \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_SFT \
+ 0
+ /* session_id has been created */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_CREATED \
+ UINT32_C(0x0)
+ /* session_id has been freed */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_FREED \
+ UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_LAST \
+ HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_FREED
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* UDCC session id which was modified */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA1_UDCC_SESSION_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA1_UDCC_SESSION_ID_SFT \
+ 0
+} __rte_packed;
+
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+struct hwrm_async_event_cmpl_dbg_buf_producer {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * Used to notify the host that the firmware has DMA-ed additional
+ * debug data to the host buffer. This is effectively a producer index
+ * update. The host driver can utilize this information to determine
+ * how much of its host buffer has been populated by the firmware.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER \
+ UINT32_C(0x4c)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ /* Event specific data */
+ uint32_t event_data2;
+ /*
+ * Specifies the current host buffer offset. Data up to this offset
+ * has been populated by the firmware. For example, if the firmware
+ * has DMA-ed 8192 bytes to the host buffer, then this field has a
+ * value of 8192. This field rolls over to zero once the firmware
+ * writes the last page of the host buffer
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURRENT_BUFFER_OFFSET_MASK \
+ UINT32_C(0xffffffff)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURRENT_BUFFER_OFFSET_SFT \
+ 0
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Type of trace buffer that has been updated. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT \
+ 0
+ /* SRT trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE \
+ UINT32_C(0x0)
+ /* SRT2 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE \
+ UINT32_C(0x1)
+ /* CRT trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE \
+ UINT32_C(0x2)
+ /* CRT2 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE \
+ UINT32_C(0x3)
+ /* RIGP0 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE \
+ UINT32_C(0x4)
+ /* L2 HWRM trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE \
+ UINT32_C(0x5)
+ /* RoCE HWRM trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE \
+ UINT32_C(0x6)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE
+} __rte_packed;
+
/* hwrm_async_event_cmpl_fw_trace_msg (size:128b/16B) */
struct hwrm_async_event_cmpl_fw_trace_msg {
uint16_t type;
@@ -12842,7 +13297,7 @@ struct hwrm_async_event_cmpl_error_report_thermal {
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
/* Event specific data. */
uint32_t event_data2;
- /* Current temperature. In Celsius */
+ /* Current temperature. In Celsius */
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK \
UINT32_C(0xff)
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT \
@@ -12929,6 +13384,72 @@ struct hwrm_async_event_cmpl_error_report_thermal {
HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
} __rte_packed;
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * This async notification message is used to inform
+ * the driver that an error has occurred which may need
+ * the attention of the administrator.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT \
+ UINT32_C(0x45)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ /* Event specific data. */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT \
+ 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates the type of error being reported. */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT \
+ 0
+ /*
+ * Speed change not supported with dual rate transceivers
+ * on this board.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED \
+ UINT32_C(0x6)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+} __rte_packed;
+
/* metadata_base_msg (size:64b/8B) */
struct metadata_base_msg {
uint16_t md_type_link;
@@ -13524,8 +14045,8 @@ struct hwrm_func_reset_input {
* The ID of the VF that this PF is trying to reset.
* Only the parent PF shall be allowed to reset a child VF.
*
- * A parent PF driver shall use this field only when a specific child VF
- * is requested to be reset.
+ * A parent PF driver shall use this field only when a specific child
+ * VF is requested to be reset.
*/
uint16_t vf_id;
/* This value indicates the level of a function reset. */
@@ -13575,9 +14096,9 @@ struct hwrm_func_reset_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -13645,16 +14166,16 @@ struct hwrm_func_getfid_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the PCI
+ * FID value. This value is used to identify operations on the PCI
* bus as belonging to a particular PCI function.
*/
uint16_t fid;
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -13725,9 +14246,9 @@ struct hwrm_func_vf_alloc_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -13799,9 +14320,9 @@ struct hwrm_func_vf_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -13812,7 +14333,7 @@ struct hwrm_func_vf_free_output {
********************/
-/* hwrm_func_vf_cfg_input (size:512b/64B) */
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
struct hwrm_func_vf_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
@@ -13916,17 +14437,29 @@ struct hwrm_func_vf_cfg_input {
#define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \
UINT32_C(0x800)
/*
- * This bit must be '1' for the num_tx_key_ctxs field to be
- * configured.
+ * This bit must be '1' for the num_ktls_tx_key_ctxs field to
+ * be configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_KEY_CTXS \
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_KTLS_TX_KEY_CTXS \
UINT32_C(0x1000)
/*
- * This bit must be '1' for the num_rx_key_ctxs field to be
- * configured.
+ * This bit must be '1' for the num_ktls_rx_key_ctxs field to
+ * be configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_KEY_CTXS \
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_KTLS_RX_KEY_CTXS \
UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the num_quic_tx_key_ctxs field to
+ * be configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_QUIC_TX_KEY_CTXS \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the num_quic_rx_key_ctxs field to
+ * be configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_QUIC_RX_KEY_CTXS \
+ UINT32_C(0x8000)
/*
* The maximum transmission unit requested on the function.
* The HWRM should make sure that the mtu of
@@ -13991,10 +14524,10 @@ struct hwrm_func_vf_cfg_input {
UINT32_C(0x2)
/*
* This bit requests that the firmware test to see if all the assets
- * requested in this command (i.e. number of CMPL rings) are available.
- * The firmware will return an error if the requested assets are
- * not available. The firmware will NOT reserve the assets if they
- * are available.
+ * requested in this command (i.e. number of CMPL rings) are
+ * available. The firmware will return an error if the requested
+ * assets are not available. The firmware will NOT reserve the assets
+ * if they are available.
*/
#define HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \
UINT32_C(0x4)
@@ -14009,10 +14542,10 @@ struct hwrm_func_vf_cfg_input {
UINT32_C(0x8)
/*
* This bit requests that the firmware test to see if all the assets
- * requested in this command (i.e. number of ring groups) are available.
- * The firmware will return an error if the requested assets are
- * not available. The firmware will NOT reserve the assets if they
- * are available.
+ * requested in this command (i.e. number of ring groups) are
+ * available. The firmware will return an error if the requested
+ * assets are not available. The firmware will NOT reserve the assets
+ * if they are available.
*/
#define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \
UINT32_C(0x10)
@@ -14076,11 +14609,17 @@ struct hwrm_func_vf_cfg_input {
uint16_t num_stat_ctxs;
/* The number of HW ring groups requested for the VF. */
uint16_t num_hw_ring_grps;
- /* Number of Tx Key Contexts requested. */
- uint32_t num_tx_key_ctxs;
- /* Number of Rx Key Contexts requested. */
- uint32_t num_rx_key_ctxs;
- uint8_t unused[4];
+ /* Number of KTLS Tx Key Contexts requested. */
+ uint32_t num_ktls_tx_key_ctxs;
+ /* Number of KTLS Rx Key Contexts requested. */
+ uint32_t num_ktls_rx_key_ctxs;
+ /* The number of MSI-X vectors requested for the VF. */
+ uint16_t num_msix;
+ uint8_t unused[2];
+ /* Number of QUIC Tx Key Contexts requested. */
+ uint32_t num_quic_tx_key_ctxs;
+ /* Number of QUIC Rx Key Contexts requested. */
+ uint32_t num_quic_rx_key_ctxs;
} __rte_packed;
/* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -14096,9 +14635,9 @@ struct hwrm_func_vf_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -14161,7 +14700,7 @@ struct hwrm_func_qcaps_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the PCI
+ * FID value. This value is used to identify operations on the PCI
* bus as belonging to a particular PCI function.
*/
uint16_t fid;
@@ -14310,7 +14849,8 @@ struct hwrm_func_qcaps_output {
/*
* If the query is for a VF, then this flag shall be ignored,
* If this query is for a PF and this flag is set to 1,
- * then the PF has the administrative privilege to configure another PF
+ * then the PF has the administrative privilege to configure another
+ * PF.
*/
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED \
UINT32_C(0x40000)
@@ -14770,7 +15310,7 @@ struct hwrm_func_qcaps_output {
UINT32_C(0x2)
/*
* When this bit is '1', it indicates that KDNet mode is
- * supported on the port for this function. This bit is
+ * supported on the port for this function. This bit is
* never set for a VF.
*/
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_KDNET_SUPPORTED \
@@ -14872,11 +15412,11 @@ struct hwrm_func_qcaps_output {
UINT32_C(0x4000)
/*
* This bit is only valid on the condition that both
- * “ktls_supported” and “quic_supported” flags are set. When this
+ * 'ktls_supported' and 'quic_supported' flags are set. When this
* bit is valid, it conveys information below:
- * 1. If it is set to ‘1’, it indicates that the firmware allows the
+ * 1. If it is set to '1', it indicates that the firmware allows the
* driver to run KTLS and QUIC concurrently;
- * 2. If it is cleared to ‘0’, it indicates that the driver has to
+ * 2. If it is cleared to '0', it indicates that the driver has to
* make sure all crypto connections on all functions are of the
* same type, i.e., either KTLS or QUIC.
*/
@@ -14912,6 +15452,51 @@ struct hwrm_func_qcaps_output {
*/
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED \
UINT32_C(0x100000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * UDCC management.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_UDCC_SUPPORTED \
+ UINT32_C(0x200000)
+ /*
+ * When this bit is '1', it indicates that the device supports Timed
+ * Transmit TxTime scheduling; this is applicable to L2 flows only.
+ * It is expected that host software assigns each packet a transmit
+ * time and posts packets for transmit in time order. NIC hardware
+ * transmits the packet at time assigned by software.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED \
+ UINT32_C(0x400000)
+ /*
+ * This bit indicates the method used for the advertisement of the
+ * max resource limit for the PF and its VFs.
+ * When this bit is '1', it indicates that the maximum resource
+ * limits for both RoCE and L2 are software defined. These limits
+ * are queried using the HWRM backing store qcaps v1
+ * and v2(max_num_entries). For RoCE, the resource limits are
+ * derived from nvm options. For L2, the resources will continue
+ * to use FW enforced SW limits based on chip config and per PF
+ * function NVM resource parameters.
+ * If this bit is '0', the FW will use to legacy behavior.
+ * For RoCE, the maximum resource values supported by the chip will
+ * be returned. For L2, the maximum resource values returned will
+ * be the FW enforced SW limits based on chip config and per PF
+ * function NVM resource parameters.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED \
+ UINT32_C(0x800000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * migrating ingress NIC flows to Truflow.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED \
+ UINT32_C(0x1000000)
+ /*
+ * When this bit is '1', it indicates that the Firmware supports
+ * query and clear of the port loopback statistics.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_LPBK_STATS_SUPPORTED \
+ UINT32_C(0x2000000)
uint16_t tunnel_disable_flag;
/*
* When this bit is '1', it indicates that the VXLAN parsing
@@ -15035,7 +15620,7 @@ struct hwrm_func_qcaps_output {
uint8_t unused_3[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -15101,7 +15686,7 @@ struct hwrm_func_qcfg_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the PCI
+ * FID value. This value is used to identify operations on the PCI
* bus as belonging to a particular PCI function.
*/
uint16_t fid;
@@ -15174,15 +15759,15 @@ struct hwrm_func_qcfg_output {
* If the function that is being queried is a PF, then the HWRM shall
* set this field to 0 and the HWRM client shall ignore this field.
* If the function that is being queried is a VF, then the HWRM shall
- * set this field to 1 if the queried VF is trusted, otherwise the HWRM
- * shall set this field to 0.
+ * set this field to 1 if the queried VF is trusted, otherwise the
+ * HWRM shall set this field to 0.
*/
#define HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF \
UINT32_C(0x40)
/*
- * If set to 1, then secure mode is enabled for this function or device.
- * If set to 0, then secure mode is disabled (or normal mode) for this
- * function or device.
+ * If set to 1, then secure mode is enabled for this function or
+ * device. If set to 0, then secure mode is disabled (or normal mode)
+ * for this function or device.
*/
#define HWRM_FUNC_QCFG_OUTPUT_FLAGS_SECURE_MODE_ENABLED \
UINT32_C(0x80)
@@ -15244,6 +15829,13 @@ struct hwrm_func_qcfg_output {
*/
#define HWRM_FUNC_QCFG_OUTPUT_FLAGS_ENABLE_RDMA_SRIOV \
UINT32_C(0x4000)
+ /*
+ * When set to 1, indicates the field roce_vnic_id in the structure
+ * is valid. If this bit is 0, the driver should not use the
+ * 'roce_vnic_id' field.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_ROCE_VNIC_ID_VALID \
+ UINT32_C(0x8000)
/*
* This value is current MAC address configured for this
* function. A value of 00-00-00-00-00-00 indicates no
@@ -15327,10 +15919,10 @@ struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_LAST \
HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN
/*
- * This field will indicate number of physical functions on this port_partition.
- * HWRM shall return unavail (i.e. value of 0) for this field
- * when this command is used to query VF's configuration or
- * from older firmware that doesn't support this field.
+ * This field will indicate number of physical functions on this
+ * port_partition. HWRM shall return unavail (i.e. value of 0) for this
+ * field when this command is used to query VF's configuration or from
+ * older firmware that doesn't support this field.
*/
uint8_t port_pf_cnt;
/* number of PFs is not available */
@@ -15473,7 +16065,10 @@ struct hwrm_func_qcfg_output {
/* Admin link state is in forced up mode. */
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_UP \
(UINT32_C(0x1) << 2)
- /* Admin link state is in auto mode - follows the physical link state. */
+ /*
+ * Admin link state is in auto mode - follows the physical link
+ * state.
+ */
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_AUTO \
(UINT32_C(0x2) << 2)
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_LAST \
@@ -15518,7 +16113,7 @@ struct hwrm_func_qcfg_output {
*/
uint16_t alloc_msix;
/*
- * The number of registered VF’s associated with the PF. This field
+ * The number of registered VF's associated with the PF. This field
* should be ignored when the request received on the VF interface.
* This field will be updated on the PF interface to initiate
* the unregister request on PF in the HOT Reset Process.
@@ -15526,14 +16121,22 @@ struct hwrm_func_qcfg_output {
uint16_t registered_vfs;
/*
* The size of the doorbell BAR in KBytes reserved for L2 including
- * any area that is shared between L2 and RoCE. The L2 driver
- * should only map the L2 portion of the doorbell BAR. Any rounding
+ * any area that is shared between L2 and RoCE. The L2 driver
+ * should only map the L2 portion of the doorbell BAR. Any rounding
* of the BAR size to the native CPU page size should be performed
- * by the driver. If the value is zero, no special partitioning
+ * by the driver. If the value is zero, no special partitioning
* of the doorbell BAR between L2 and RoCE is required.
*/
uint16_t l2_doorbell_bar_size_kb;
- uint8_t unused_1;
+ /*
+ * A bitmask indicating the active endpoints. Each bit represents a
+ * specific endpoint, with bit 0 indicating EP 0 and bit 3 indicating
+ * EP 3. For example:
+ * - a single root system would return 0x1
+ * - a 2x8 system (where EPs 0 and 2 are active) would return 0x5
+ * - a 4x4 system (where EPs 0-3 are active) would return 0xF
+ */
+ uint8_t active_endpoints;
/*
* For backward compatibility this field must be set to 1.
* Older drivers might look for this field to be 1 before
@@ -15541,21 +16144,22 @@ struct hwrm_func_qcfg_output {
*/
uint8_t always_1;
/*
- * This GRC address location is used by the Host driver interfaces to poll
- * the adapter ready state to re-initiate the registration process again
- * after receiving the RESET Notify event.
+ * This GRC address location is used by the Host driver interfaces to
+ * poll the adapter ready state to re-initiate the registration process
+ * again after receiving the RESET Notify event.
*/
uint32_t reset_addr_poll;
/*
- * This field specifies legacy L2 doorbell size in KBytes. Drivers should use
- * this value to find out the doorbell page offset from the BAR.
+ * This field specifies legacy L2 doorbell size in KBytes. Drivers
+ * should use this value to find out the doorbell page offset from the
+ * BAR.
*/
uint16_t legacy_l2_db_size_kb;
uint16_t svif_info;
/*
- * This field specifies the source virtual interface of the function being
- * queried. Drivers can use this to program svif field in the L2 context
- * table
+ * This field specifies the source virtual interface of the function
+ * being queried. Drivers can use this to program svif field in the
+ * L2 context table
*/
#define HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK UINT32_C(0x7fff)
#define HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_SFT 0
@@ -15623,7 +16227,11 @@ struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_DB_PAGE_SIZE_4MB UINT32_C(0xa)
#define HWRM_FUNC_QCFG_OUTPUT_DB_PAGE_SIZE_LAST \
HWRM_FUNC_QCFG_OUTPUT_DB_PAGE_SIZE_4MB
- uint8_t unused_2[2];
+ /*
+ * RoCE VNIC ID for the function. If the function does not have a valid
+ * RoCE vnic id, then the roce_vnic_id_valid bit in flags is set to 0.
+ */
+ uint16_t roce_vnic_id;
/*
* Minimum guaranteed bandwidth for the network partition made up
* of the caller physical function and all its child virtual
@@ -15713,7 +16321,7 @@ struct hwrm_func_qcfg_output {
uint8_t unused_3[2];
uint8_t unused_4[2];
/*
- * KDNet mode for the port for this function. If a VF, KDNet
+ * KDNet mode for the port for this function. If a VF, KDNet
* mode is always disabled.
*/
uint8_t port_kdnet_mode;
@@ -15729,7 +16337,7 @@ struct hwrm_func_qcfg_output {
*/
uint8_t kdnet_pcie_function;
/*
- * Function ID of the KDNET function on this port. If the
+ * Function ID of the KDNET function on this port. If the
* KDNET partition does not exist and the FW supports this
* feature, 0xffff will be returned.
*/
@@ -15750,8 +16358,8 @@ struct hwrm_func_qcfg_output {
uint8_t parif;
/*
* The LAG ID of a hardware link aggregation group (LAG) whose
- * member ports include the port of this function. The LAG was
- * previously created using HWRM_FUNC_LAG_CREATE. If the port of this
+ * member ports include the port of this function. The LAG was
+ * previously created using HWRM_FUNC_LAG_CREATE. If the port of this
* function is not a member of any LAG, the fw_lag_id will be 0xff.
*/
uint8_t fw_lag_id;
@@ -15812,9 +16420,9 @@ struct hwrm_func_qcfg_output {
uint8_t unused_7;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -15923,9 +16531,10 @@ struct hwrm_func_cfg_input {
UINT32_C(0x800)
/*
* This bit only applies to the VF. If this bit is set, the statistic
- * context counters will not be cleared when the statistic context is freed
- * or a function reset is called on VF. This bit will be cleared when the PF
- * is unloaded or a function reset is called on the PF.
+ * context counters will not be cleared when the statistic context is
+ * freed or a function reset is called on VF. This bit will be
+ * cleared when the PF is unloaded or a function reset is called on
+ * the PF.
*/
#define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \
UINT32_C(0x1000)
@@ -15949,10 +16558,10 @@ struct hwrm_func_cfg_input {
UINT32_C(0x4000)
/*
* This bit requests that the firmware test to see if all the assets
- * requested in this command (i.e. number of CMPL rings) are available.
- * The firmware will return an error if the requested assets are
- * not available. The firmware will NOT reserve the assets if they
- * are available.
+ * requested in this command (i.e. number of CMPL rings) are
+ * available. The firmware will return an error if the requested
+ * assets are not available. The firmware will NOT reserve the assets
+ * if they are available.
*/
#define HWRM_FUNC_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \
UINT32_C(0x8000)
@@ -15967,10 +16576,10 @@ struct hwrm_func_cfg_input {
UINT32_C(0x10000)
/*
* This bit requests that the firmware test to see if all the assets
- * requested in this command (i.e. number of ring groups) are available.
- * The firmware will return an error if the requested assets are
- * not available. The firmware will NOT reserve the assets if they
- * are available.
+ * requested in this command (i.e. number of ring groups) are
+ * available. The firmware will return an error if the requested
+ * assets are not available. The firmware will NOT reserve the assets
+ * if they are available.
*/
#define HWRM_FUNC_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \
UINT32_C(0x20000)
@@ -16505,7 +17114,7 @@ struct hwrm_func_cfg_input {
* to configure the EVB mode, it sets the evb_mode_cfg_not_supported
* flag in HWRM_FUNC_QCAPS command response for the function.
* The HWRM takes into account the switching of EVB mode from one to
- * another and reconfigure hardware resources as reqiured. The
+ * another and reconfigure hardware resources as required. The
* switching from VEB to VEPA mode requires the disabling of the
* loopback traffic. Additionally, source knockouts are handled
* differently in VEB and VEPA modes.
@@ -16546,7 +17155,10 @@ struct hwrm_func_cfg_input {
/* Admin state is forced up. */
#define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_UP \
(UINT32_C(0x1) << 2)
- /* Admin state is in auto mode - is to follow the physical link state. */
+ /*
+ * Admin state is in auto mode - is to follow the physical link
+ * state.
+ */
#define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_AUTO \
(UINT32_C(0x2) << 2)
#define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_LAST \
@@ -16566,66 +17178,66 @@ struct hwrm_func_cfg_input {
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the TX crypto engine block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TCE_ENABLE UINT32_C(0x1)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the TX crypto engine block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TCE_DISABLE UINT32_C(0x2)
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the RX crypto engine block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RCE_ENABLE UINT32_C(0x4)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the RX crypto engine block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RCE_DISABLE UINT32_C(0x8)
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the TX configurable flow processing
- * block. When this bit is ‘0’, this flag has no effect.
+ * block. When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TE_CFA_ENABLE \
UINT32_C(0x10)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the TX configurable flow processing
- * block. When this bit is ‘0’, this flag has no effect.
+ * block. When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TE_CFA_DISABLE \
UINT32_C(0x20)
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the RX configurable flow processing
- * block. When this bit is ‘0’, this flag has no effect.
+ * block. When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RE_CFA_ENABLE \
UINT32_C(0x40)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the RX configurable flow processing
- * block. When this bit is ‘0’, this flag has no effect.
+ * block. When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RE_CFA_DISABLE \
UINT32_C(0x80)
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the primate processor block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_PRIMATE_ENABLE \
UINT32_C(0x100)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the primate processor block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_PRIMATE_DISABLE \
UINT32_C(0x200)
@@ -16822,8 +17434,8 @@ struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_ENABLES2_XID_PARTITION_CFG \
UINT32_C(0x400)
/*
- * KDNet mode for the port for this function. If NPAR is
- * also configured on this port, it takes precedence. KDNet
+ * KDNet mode for the port for this function. If NPAR is
+ * also configured on this port, it takes precedence. KDNet
* mode is ignored for a VF.
*/
uint8_t port_kdnet_mode;
@@ -16919,9 +17531,9 @@ struct hwrm_func_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17038,7 +17650,7 @@ struct hwrm_func_qstats_output {
uint64_t tx_bcast_pkts;
/*
* Number of transmitted packets that were discarded due to
- * internal NIC resource problems. For transmit, this
+ * internal NIC resource problems. For transmit, this
* can only happen if TMP is configured to allow dropping
* in HOL blocking conditions, which is not a normal
* configuration.
@@ -17065,7 +17677,7 @@ struct hwrm_func_qstats_output {
uint64_t rx_bcast_pkts;
/*
* Number of received packets that were discarded on the function
- * due to resource limitations. This can happen for 3 reasons.
+ * due to resource limitations. This can happen for 3 reasons.
* # The BD used for the packet has a bad format.
* # There were no BDs available in the ring for the packet.
* # There were no BDs available on-chip for the packet.
@@ -17105,9 +17717,9 @@ struct hwrm_func_qstats_output {
uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17244,9 +17856,9 @@ struct hwrm_func_qstats_ext_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17309,9 +17921,9 @@ struct hwrm_func_clr_stats_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17373,9 +17985,9 @@ struct hwrm_func_vf_resc_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17449,14 +18061,15 @@ struct hwrm_func_drv_rgtr_input {
UINT32_C(0x4)
/*
* When this bit is '1', the function is indicating support of
- * 64bit flow handle. The firmware that only supports 64bit flow
+ * 64bit flow handle. The firmware that only supports 64bit flow
* handle should check this bit before allowing processing of
- * HWRM_CFA_FLOW_XXX commands from the requesting function as firmware
- * with 64bit flow handle support can only be compatible with drivers
- * that support 64bit flow handle. The legacy drivers that don't support
- * 64bit flow handle won't be able to use HWRM_CFA_FLOW_XXX commands when
- * running with new firmware that only supports 64bit flow handle. The new
- * firmware support 64bit flow handle returns HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ * HWRM_CFA_FLOW_XXX commands from the requesting function as
+ * firmware with 64bit flow handle support can only be compatible
+ * with drivers that support 64bit flow handle. The legacy drivers
+ * that don't support 64bit flow handle won't be able to use
+ * HWRM_CFA_FLOW_XXX commands when running with new firmware that
+ * only supports 64bit flow handle. The new firmware support 64bit
+ * flow handle returns HWRM_ERR_CODE_CMD_NOT_SUPPORTED
* status to the legacy driver when encounters these commands.
*/
#define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FLOW_HANDLE_64BIT_MODE \
@@ -17487,11 +18100,12 @@ struct hwrm_func_drv_rgtr_input {
UINT32_C(0x20)
/*
* When this bit is 1, the function is indicating the support of the
- * Master capability. The Firmware will use this capability to select the
- * Master function. The master function will be used to initiate
- * designated functionality like error recovery etc… If none of the
- * registered PF’s or trusted VF’s indicate this support, then
- * firmware will select the 1st registered PF as Master capable instance.
+ * Master capability. The Firmware will use this capability to select
+ * the Master function. The master function will be used to initiate
+ * designated functionality like error recovery etc. If none of the
+ * registered PF's or trusted VF's indicate this support, then
+ * firmware will select the 1st registered PF as Master capable
+ * instance.
*/
#define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT \
UINT32_C(0x40)
@@ -17532,6 +18146,15 @@ struct hwrm_func_drv_rgtr_input {
*/
#define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ASYM_QUEUE_CFG_SUPPORT \
UINT32_C(0x400)
+ /*
+ * When this bit is 1, the function's driver is indicating to the
+ * firmware that the Ingress NIC flows will be programmed by the
+ * TruFlow application and the firmware flow manager should reject
+ * flow-create commands that programs ingress lookup flows for this
+ * function.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_TF_INGRESS_NIC_FLOW_MODE \
+ UINT32_C(0x800)
uint32_t enables;
/*
* This bit must be '1' for the os_type field to be
@@ -17563,7 +18186,10 @@ struct hwrm_func_drv_rgtr_input {
*/
#define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \
UINT32_C(0x10)
- /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ /*
+ * This value indicates the type of OS. The values are based on
+ * CIM_OperatingSystem.mof file as published by the DMTF.
+ */
uint16_t os_type;
/* Unknown */
#define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
@@ -17662,9 +18288,9 @@ struct hwrm_func_drv_rgtr_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17728,9 +18354,9 @@ struct hwrm_func_drv_unrgtr_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17863,9 +18489,9 @@ struct hwrm_func_buf_rgtr_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17933,9 +18559,9 @@ struct hwrm_func_buf_unrgtr_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17984,7 +18610,18 @@ struct hwrm_func_drv_qver_input {
* function.
*/
uint16_t fid;
- uint8_t unused_0[2];
+ /*
+ * This field is used to indicate the driver type.
+ * L2 or RoCE
+ */
+ uint8_t driver_type;
+ /* L2 driver version */
+ #define HWRM_FUNC_DRV_QVER_INPUT_DRIVER_TYPE_L2 UINT32_C(0x0)
+ /* RoCE driver version */
+ #define HWRM_FUNC_DRV_QVER_INPUT_DRIVER_TYPE_ROCE UINT32_C(0x1)
+ #define HWRM_FUNC_DRV_QVER_INPUT_DRIVER_TYPE_LAST \
+ HWRM_FUNC_DRV_QVER_INPUT_DRIVER_TYPE_ROCE
+ uint8_t unused_0;
} __rte_packed;
/* hwrm_func_drv_qver_output (size:256b/32B) */
@@ -17997,7 +18634,10 @@ struct hwrm_func_drv_qver_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ /*
+ * This value indicates the type of OS. The values are based on
+ * CIM_OperatingSystem.mof file as published by the DMTF.
+ */
uint16_t os_type;
/* Unknown */
#define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
@@ -18041,9 +18681,9 @@ struct hwrm_func_drv_qver_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -18093,7 +18733,7 @@ struct hwrm_func_resource_qcaps_input {
uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_func_resource_qcaps_output (size:576b/72B) */
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
struct hwrm_func_resource_qcaps_output {
/* The specific error status for the command. */
uint16_t error_code;
@@ -18103,13 +18743,22 @@ struct hwrm_func_resource_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Maximum guaranteed number of VFs supported by PF. Not applicable for VFs. */
+ /*
+ * Maximum guaranteed number of VFs supported by PF. Not applicable for
+ * VFs.
+ */
uint16_t max_vfs;
- /* Maximum guaranteed number of MSI-X vectors supported by function */
+ /* Maximum guaranteed number of MSI-X vectors supported by function. */
uint16_t max_msix;
- /* Hint of strategy to be used by PF driver to reserve resources for its VF */
+ /*
+ * Hint of strategy to be used by PF driver to reserve resources for
+ * its VF.
+ */
uint16_t vf_reservation_strategy;
- /* The PF driver should evenly divide its remaining resources among all VFs. */
+ /*
+ * The PF driver should evenly divide its remaining resources among
+ * all VFs.
+ */
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL \
UINT32_C(0x0)
/* The PF driver should only reserve minimal resources for each VF. */
@@ -18123,7 +18772,7 @@ struct hwrm_func_resource_qcaps_output {
UINT32_C(0x2)
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_LAST \
HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
- /* Minimum guaranteed number of RSS/COS contexts */
+ /* Minimum guaranteed number of RSS/COS contexts. */
uint16_t min_rsscos_ctx;
/* Maximum non-guaranteed number of RSS/COS contexts */
uint16_t max_rsscos_ctx;
@@ -18156,33 +18805,43 @@ struct hwrm_func_resource_qcaps_output {
/* Maximum non-guaranteed number of ring groups */
uint16_t max_hw_ring_grps;
/*
- * Maximum number of inputs into the transmit scheduler for this function.
- * The number of TX rings assigned to the function cannot exceed this value.
+ * Maximum number of inputs into the transmit scheduler for this
+ * function. The number of TX rings assigned to the function cannot
+ * exceed this value.
*/
uint16_t max_tx_scheduler_inputs;
uint16_t flags;
/*
* When this bit is '1', it indicates that VF_RESOURCE_CFG supports
- * feature to reserve all minimum resources when minimum >= 1, otherwise
- * returns an error.
+ * feature to reserve all minimum resources when minimum >= 1,
+ * otherwise returns an error.
*/
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_FLAGS_MIN_GUARANTEED \
UINT32_C(0x1)
- uint8_t unused_0[2];
- /* Minimum guaranteed number of Tx Key Contexts */
- uint32_t min_tx_key_ctxs;
- /* Maximum non-guaranteed number of Tx Key Contexts */
- uint32_t max_tx_key_ctxs;
- /* Minimum guaranteed number of Rx Key Contexts */
- uint32_t min_rx_key_ctxs;
- /* Maximum non-guaranteed number of Rx Key Contexts */
- uint32_t max_rx_key_ctxs;
- uint8_t unused_1[3];
+ /* Minimum guaranteed number of MSI-X vectors supported by function */
+ uint16_t min_msix;
+ /* Minimum guaranteed number of KTLS Tx Key Contexts */
+ uint32_t min_ktls_tx_key_ctxs;
+ /* Maximum non-guaranteed number of KTLS Tx Key Contexts */
+ uint32_t max_ktls_tx_key_ctxs;
+ /* Minimum guaranteed number of KTLS Rx Key Contexts */
+ uint32_t min_ktls_rx_key_ctxs;
+ /* Maximum non-guaranteed number of KTLS Rx Key Contexts */
+ uint32_t max_ktls_rx_key_ctxs;
+ /* Minimum guaranteed number of QUIC Tx Key Contexts */
+ uint32_t min_quic_tx_key_ctxs;
+ /* Maximum non-guaranteed number of QUIC Tx Key Contexts */
+ uint32_t max_quic_tx_key_ctxs;
+ /* Minimum guaranteed number of QUIC Rx Key Contexts */
+ uint32_t min_quic_rx_key_ctxs;
+ /* Maximum non-guaranteed number of QUIC Rx Key Contexts */
+ uint32_t max_quic_rx_key_ctxs;
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -18193,7 +18852,7 @@ struct hwrm_func_resource_qcaps_output {
*****************************/
-/* hwrm_func_vf_resource_cfg_input (size:576b/72B) */
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
struct hwrm_func_vf_resource_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
@@ -18267,18 +18926,27 @@ struct hwrm_func_vf_resource_cfg_input {
*/
#define HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED \
UINT32_C(0x1)
- uint8_t unused_0[2];
- /* Minimum guaranteed number of Tx Key Contexts */
- uint32_t min_tx_key_ctxs;
- /* Maximum non-guaranteed number of Tx Key Contexts */
- uint32_t max_tx_key_ctxs;
- /* Minimum guaranteed number of Rx Key Contexts */
- uint32_t min_rx_key_ctxs;
- /* Maximum non-guaranteed number of Rx Key Contexts */
- uint32_t max_rx_key_ctxs;
-} __rte_packed;
-
-/* hwrm_func_vf_resource_cfg_output (size:320b/40B) */
+ /* Minimum guaranteed number of MSI-X vectors for the function */
+ uint16_t min_msix;
+ /* Minimum guaranteed number of KTLS Tx Key Contexts */
+ uint32_t min_ktls_tx_key_ctxs;
+ /* Maximum non-guaranteed number of KTLS Tx Key Contexts */
+ uint32_t max_ktls_tx_key_ctxs;
+ /* Minimum guaranteed number of KTLS Rx Key Contexts */
+ uint32_t min_ktls_rx_key_ctxs;
+ /* Maximum non-guaranteed number of KTLS Rx Key Contexts */
+ uint32_t max_ktls_rx_key_ctxs;
+ /* Minimum guaranteed number of QUIC Tx Key Contexts */
+ uint32_t min_quic_tx_key_ctxs;
+ /* Maximum non-guaranteed number of QUIC Tx Key Contexts */
+ uint32_t max_quic_tx_key_ctxs;
+ /* Minimum guaranteed number of QUIC Rx Key Contexts */
+ uint32_t min_quic_rx_key_ctxs;
+ /* Maximum non-guaranteed number of QUIC Rx Key Contexts */
+ uint32_t max_quic_rx_key_ctxs;
+} __rte_packed;
+
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
struct hwrm_func_vf_resource_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
@@ -18304,16 +18972,20 @@ struct hwrm_func_vf_resource_cfg_output {
uint16_t reserved_stat_ctx;
/* Reserved number of ring groups */
uint16_t reserved_hw_ring_grps;
- /* Actual number of Tx Key Contexts reserved */
- uint32_t reserved_tx_key_ctxs;
- /* Actual number of Rx Key Contexts reserved */
- uint32_t reserved_rx_key_ctxs;
+ /* Actual number of KTLS Tx Key Contexts reserved */
+ uint32_t reserved_ktls_tx_key_ctxs;
+ /* Actual number of KTLS Rx Key Contexts reserved */
+ uint32_t reserved_ktls_rx_key_ctxs;
+ /* Actual number of QUIC Tx Key Contexts reserved */
+ uint32_t reserved_quic_tx_key_ctxs;
+ /* Actual number of QUIC Rx Key Contexts reserved */
+ uint32_t reserved_quic_rx_key_ctxs;
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -18395,11 +19067,17 @@ struct hwrm_func_backing_store_qcaps_output {
uint16_t cq_entry_size;
/* Maximum number of VNIC context entries supported for this function. */
uint16_t vnic_max_vnic_entries;
- /* Maximum number of Ring table context entries supported for this function. */
+ /*
+ * Maximum number of Ring table context entries supported for this
+ * function.
+ */
uint16_t vnic_max_ring_table_entries;
/* Number of bytes that must be allocated for each context entry. */
uint16_t vnic_entry_size;
- /* Maximum number of statistic context entries supported for this function. */
+ /*
+ * Maximum number of statistic context entries supported for this
+ * function.
+ */
uint32_t stat_max_entries;
/* Number of bytes that must be allocated for each context entry. */
uint16_t stat_entry_size;
@@ -18421,7 +19099,8 @@ struct hwrm_func_backing_store_qcaps_output {
* num_entries = num_vnics + num_l2_tx_rings + 2 * num_roce_qps + tqm_min_size
*
* Where:
- * num_vnics is the number of VNICs allocated in the VNIC backing store
+ * num_vnics is the number of VNICs allocated in the VNIC backing
+ * store
* num_l2_tx_rings is the number of L2 rings in the QP backing store
* num_roce_qps is the number of RoCE QPs in the QP backing store
* tqm_min_size is tqm_min_entries_per_ring reported by
@@ -18603,9 +19282,9 @@ struct hwrm_func_backing_store_qcaps_output {
uint8_t rsvd1[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -18841,6 +19520,12 @@ struct hwrm_func_backing_store_cfg_input {
*/
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_RKC \
UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the number of QPs reserved for fast
+ * qp modify destroy feature to be configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP_FAST_QPMD \
+ UINT32_C(0x200000)
/* QPC page size and level. */
uint8_t qpc_pg_size_qpc_lvl;
/* QPC PBL indirect levels. */
@@ -18853,7 +19538,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LAST \
@@ -18894,7 +19582,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LAST \
@@ -18935,7 +19626,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LAST \
@@ -18976,7 +19670,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LAST \
@@ -19017,7 +19714,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LAST \
@@ -19058,7 +19758,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LAST \
@@ -19099,7 +19802,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LAST \
@@ -19140,7 +19846,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LAST \
@@ -19181,7 +19890,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LAST \
@@ -19222,7 +19934,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LAST \
@@ -19263,7 +19978,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LAST \
@@ -19304,7 +20022,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LAST \
@@ -19345,7 +20066,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LAST \
@@ -19386,7 +20110,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LAST \
@@ -19427,7 +20154,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LAST \
@@ -19468,7 +20198,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LAST \
@@ -19545,11 +20278,11 @@ struct hwrm_func_backing_store_cfg_input {
* num_entries = num_vnics + num_l2_tx_rings + 2 * num_roce_qps + tqm_min_size
*
* Where:
- * num_vnics is the number of VNICs allocated in the VNIC backing store
- * num_l2_tx_rings is the number of L2 rings in the QP backing store
- * num_roce_qps is the number of RoCE QPs in the QP backing store
- * tqm_min_size is tqm_min_entries_per_ring reported by
- * HWRM_FUNC_BACKING_STORE_QCAPS
+ * num_vnics is the number of VNICs allocated in the VNIC backing
+ * store num_l2_tx_rings is the number of L2 rings in the QP backing
+ * store num_roce_qps is the number of RoCE QPs in the QP backing
+ * store tqm_min_size is tqm_min_entries_per_ring reported by
+ * HWRM_FUNC_BACKING_STORE_QCAPS
*
* Note that TQM ring sizes cannot be extended while the system is
* operational. If a PF driver needs to extend a TQM ring, it needs
@@ -19883,7 +20616,10 @@ struct hwrm_func_backing_store_cfg_input {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TKC_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TKC_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TKC_LVL_LAST \
@@ -19956,8 +20692,11 @@ struct hwrm_func_backing_store_cfg_input {
(UINT32_C(0x5) << 4)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_RKC_PG_SIZE_LAST \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_RKC_PG_SIZE_PG_1G
- /* Reserved for future. */
- uint8_t rsvd[2];
+ /*
+ * Number of RoCE QP context entries reserved for this
+ * function to support fast QP modify destroy feature.
+ */
+ uint16_t qp_num_fast_qpmd_entries;
} __rte_packed;
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -19973,9 +20712,9 @@ struct hwrm_func_backing_store_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -20170,6 +20909,12 @@ struct hwrm_func_backing_store_qcfg_output {
*/
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_RKC \
UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the number of QPs reserved for fast
+ * qp modify destroy feature to be configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_QP_FAST_QPMD \
+ UINT32_C(0x200000)
/* QPC page size and level. */
uint8_t qpc_pg_size_qpc_lvl;
/* QPC PBL indirect levels. */
@@ -20182,7 +20927,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LAST \
@@ -20223,7 +20971,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LAST \
@@ -20264,7 +21015,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LAST \
@@ -20305,7 +21059,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LAST \
@@ -20346,7 +21103,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LAST \
@@ -20387,7 +21147,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LAST \
@@ -20428,7 +21191,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LAST \
@@ -20469,7 +21235,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LAST \
@@ -20510,7 +21279,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LAST \
@@ -20551,7 +21323,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LAST \
@@ -20592,7 +21367,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LAST \
@@ -20633,7 +21411,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LAST \
@@ -20674,7 +21455,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LAST \
@@ -20715,7 +21499,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LAST \
@@ -20756,7 +21543,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LAST \
@@ -20797,7 +21587,10 @@ struct hwrm_func_backing_store_qcfg_output {
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_1 \
UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 \
UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LAST \
@@ -21162,10 +21955,15 @@ struct hwrm_func_backing_store_qcfg_output {
(UINT32_C(0x5) << 4)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_RKC_PG_SIZE_LAST \
HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_RKC_PG_SIZE_PG_1G
- uint8_t unused_1[5];
+ /*
+ * Number of RoCE QP context entries required for this
+ * function to support fast QP modify destroy feature.
+ */
+ uint16_t qp_num_fast_qpmd_entries;
+ uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as 1
+ * is completely written to RAM. This field should be read as 1
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field
@@ -21579,7 +22377,7 @@ struct hwrm_error_recovery_qcfg_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field
@@ -21640,9 +22438,9 @@ struct hwrm_func_echo_response_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -21808,9 +22606,9 @@ struct hwrm_func_ptp_pin_qcfg_output {
uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22026,9 +22824,9 @@ struct hwrm_func_ptp_pin_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22179,8 +22977,11 @@ struct hwrm_func_ptp_cfg_input {
/* 10Mhz sync in frequency. */
#define HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_10M \
UINT32_C(0x3)
+ /* 25Mhz sync in frequency. */
+ #define HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_25M \
+ UINT32_C(0x4)
#define HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_LAST \
- HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_10M
+ HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_25M
uint8_t unused_0[3];
/*
* Period in nanoseconds (ns) for external signal
@@ -22231,9 +23032,9 @@ struct hwrm_func_ptp_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22316,9 +23117,9 @@ struct hwrm_func_ptp_ts_query_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22463,9 +23264,9 @@ struct hwrm_func_ptp_ext_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22558,9 +23359,9 @@ struct hwrm_func_ptp_ext_qcfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22571,7 +23372,7 @@ struct hwrm_func_ptp_ext_qcfg_output {
***************************/
-/* hwrm_func_key_ctx_alloc_input (size:320b/40B) */
+/* hwrm_func_key_ctx_alloc_input (size:384b/48B) */
struct hwrm_func_key_ctx_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
@@ -22603,9 +23404,26 @@ struct hwrm_func_key_ctx_alloc_input {
uint64_t resp_addr;
/* Function ID. */
uint16_t fid;
- /* Number of Key Contexts to be allocated. */
+ /*
+ * Number of Key Contexts to be allocated.
+ * When running in the XID partition mode, if the call is made by
+ * a VF driver, this field specifies the number of XIDs requested
+ * by the VF driver. The XID partitions are managed by the PF
+ * driver in XID partition mode and the VF command will be
+ * redirected to the PF driver. The PF driver may reduce this
+ * number if it cannot allocate a big enough block of XID
+ * partitions to satisfy the request.
+ * This field must not exceed the maximum batch size specified in
+ * the max_key_ctxs_alloc field of the HWRM_FUNC_QCAPS response,
+ * must not be zero, and must be integer multiples of the
+ * partition size specified in the ctxs_per_partition field of
+ * the HWRM_FUNC_QCAPS response.
+ */
uint16_t num_key_ctxs;
- /* DMA buffer size in bytes. */
+ /*
+ * DMA buffer size in bytes. This field in invalid in the XID
+ * partition mode.
+ */
uint32_t dma_bufr_size_bytes;
/* Key Context type. */
uint8_t key_ctx_type;
@@ -22624,11 +23442,24 @@ struct hwrm_func_key_ctx_alloc_input {
#define HWRM_FUNC_KEY_CTX_ALLOC_INPUT_KEY_CTX_TYPE_LAST \
HWRM_FUNC_KEY_CTX_ALLOC_INPUT_KEY_CTX_TYPE_QUIC_RX
uint8_t unused_0[7];
- /* Host DMA address to send back KTLS context IDs. */
+ /*
+ * Host DMA address to send back KTLS context IDs. This field is
+ * invalid in the XID partition mode.
+ */
uint64_t host_dma_addr;
+ /*
+ * This field is only used by the PF driver that manages the XID
+ * partitions. This field specifies the starting XID of one or
+ * more contiguous XID partitions allocated by the PF driver.
+ * This field is not used by the VF driver.
+ * If the call is successful, this starting XID value will be
+ * returned in the partition_start_xid field of the response.
+ */
+ uint32_t partition_start_xid;
+ uint8_t unused_1[4];
} __rte_packed;
-/* hwrm_func_key_ctx_alloc_output (size:128b/16B) */
+/* hwrm_func_key_ctx_alloc_output (size:192b/24B) */
struct hwrm_func_key_ctx_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
@@ -22638,7 +23469,7 @@ struct hwrm_func_key_ctx_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Actual number of Key Contexts allocated. */
+ /* Number of Key Contexts that have been allocated. */
uint16_t num_key_ctxs_allocated;
/* Control flags. */
uint8_t flags;
@@ -22646,22 +23477,116 @@ struct hwrm_func_key_ctx_alloc_output {
* When set, it indicates that all key contexts allocated by this
* command are contiguous. As a result, the driver has to read the
* start context ID from the first entry of the DMA data buffer
- * and figures out the end context ID by “start context ID +
- * num_key_ctxs_allocated - 1”.
+ * and figures out the end context ID by 'start context ID +
+ * num_key_ctxs_allocated - 1'. In XID partition mode,
+ * this bit should always be set.
*/
#define HWRM_FUNC_KEY_CTX_ALLOC_OUTPUT_FLAGS_KEY_CTXS_CONTIGUOUS \
UINT32_C(0x1)
- uint8_t unused_0[4];
+ uint8_t unused_0;
+ /*
+ * This field is only valid in the XID partition mode. It indicates
+ * the starting XID that has been allocated.
+ */
+ uint32_t partition_start_xid;
+ uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
+/**************************
+ * hwrm_func_key_ctx_free *
+ **************************/
+
+
+/* hwrm_func_key_ctx_free_input (size:256b/32B) */
+struct hwrm_func_key_ctx_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Function ID. */
+ uint16_t fid;
+ /* Key Context type. */
+ uint8_t key_ctx_type;
+ /* KTLS Tx Key Context type. */
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_TX UINT32_C(0x0)
+ /* KTLS Rx Key Context type. */
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_RX UINT32_C(0x1)
+ /* QUIC Tx Key Context type. */
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_QUIC_TX UINT32_C(0x2)
+ /* QUIC Rx Key Context type. */
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_QUIC_RX UINT32_C(0x3)
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_LAST \
+ HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_QUIC_RX
+ uint8_t unused_0;
+ /* Starting XID of the partition that needs to be freed. */
+ uint32_t partition_start_xid;
+ /*
+ * Number of entries to be freed.
+ * When running in the XID partition mode, this field is only
+ * used by the PF driver that manages the XID partitions.
+ * The PF driver specifies the number of XIDs to be freed and
+ * this number is always equal to the number of XIDs previously
+ * allocated successfully using HWRM_FUNC_KEY_CTX_ALLOC.
+ * This field is not used by the VF driver.
+ */
+ uint16_t num_entries;
+ uint8_t unused_1[6];
+} __rte_packed;
+
+/* hwrm_func_key_ctx_free_output (size:128b/16B) */
+struct hwrm_func_key_ctx_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t rsvd0[7];
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to
+ * an internal processor, the order of writes has to be such
+ * that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
/**********************************
* hwrm_func_backing_store_cfg_v2 *
**********************************/
@@ -22747,12 +23672,33 @@ struct hwrm_func_backing_store_cfg_v2_input {
/* CQ Doorbell shadow region. */
#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ_DB_SHADOW \
UINT32_C(0x19)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QUIC_TKC \
- UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QUIC_RKC \
- UINT32_C(0x1b)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TBL_SCOPE \
+ UINT32_C(0x1c)
+ /* XID partition context. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_XID_PARTITION \
+ UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRT_TRACE \
+ UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRT2_TRACE \
+ UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CRT_TRACE \
+ UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CRT2_TRACE \
+ UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RIGP0_TRACE \
+ UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_L2_HWRM_TRACE \
+ UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_ROCE_HWRM_TRACE \
+ UINT32_C(0x24)
/* Invalid type. */
#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_INVALID \
UINT32_C(0xffff)
@@ -22799,10 +23745,10 @@ struct hwrm_func_backing_store_cfg_v2_input {
* The size specified in the command will be the new size to be
* configured. The operation is only valid when the specific backing
* store has been configured before. Otherwise, the firmware will
- * return an error. The driver needs to zero out the “entry_size”,
- * “flags”, “page_dir”, and “page_size_pbl_level” fields, and the
+ * return an error. The driver needs to zero out the 'entry_size',
+ * 'flags', 'page_dir', and 'page_size_pbl_level' fields, and the
* firmware will ignore these inputs. Further, the firmware expects
- * the “num_entries” and any valid split entries to be no less than
+ * the 'num_entries' and any valid split entries to be no less than
* the initial value that has been configured. If not, it will
* return an error code.
*/
@@ -22883,6 +23829,7 @@ struct hwrm_func_backing_store_cfg_v2_input {
* | VINC | vnic_split_entries |
* | MRAV | mrav_split_entries |
* | TS | ts_split_entries |
+ * | CK | ck_split_entries |
*/
uint32_t split_entry_0;
/* Split entry #1. */
@@ -22906,7 +23853,7 @@ struct hwrm_func_backing_store_cfg_v2_output {
uint8_t rsvd0[7];
/*
* This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
+ * output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been completely
* written. When writing a command completion or response to
* an internal processor, the order of writes has to be such
@@ -23000,12 +23947,33 @@ struct hwrm_func_backing_store_qcfg_v2_input {
/* CQ Doorbell shadow region. */
#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CQ_DB_SHADOW \
UINT32_C(0x19)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_QUIC_TKC \
- UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_QUIC_RKC \
- UINT32_C(0x1b)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TBL_SCOPE \
+ UINT32_C(0x1c)
+ /* VF XID partition in-use table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_XID_PARTITION_TABLE \
+ UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SRT_TRACE \
+ UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SRT2_TRACE \
+ UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CRT_TRACE \
+ UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CRT2_TRACE \
+ UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_RIGP0_TRACE \
+ UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_L2_HWRM_TRACE \
+ UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_ROCE_HWRM_TRACE \
+ UINT32_C(0x24)
/* Invalid type. */
#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_INVALID \
UINT32_C(0xffff)
@@ -23068,21 +24036,42 @@ struct hwrm_func_backing_store_qcfg_v2_output {
/* TIM. */
#define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TIM \
UINT32_C(0xf)
- /* Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TKC \
+ /* Tx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TX_CK \
UINT32_C(0x13)
- /* Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_RKC \
+ /* Rx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_RX_CK \
UINT32_C(0x14)
/* Mid-path TQM ring. */
#define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_MP_TQM_RING \
UINT32_C(0x15)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_QUIC_TKC \
- UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_QUIC_RKC \
- UINT32_C(0x1b)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TBL_SCOPE \
+ UINT32_C(0x1c)
+ /* XID partition context. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_XID_PARTITION \
+ UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_SRT_TRACE \
+ UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_SRT2_TRACE \
+ UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_CRT_TRACE \
+ UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_CRT2_TRACE \
+ UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_RIGP0_TRACE \
+ UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_L2_HWRM_TRACE \
+ UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_ROCE_HWRM_TRACE \
+ UINT32_C(0x24)
/* Invalid type. */
#define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_INVALID \
UINT32_C(0xffff)
@@ -23191,7 +24180,7 @@ struct hwrm_func_backing_store_qcfg_v2_output {
uint8_t rsvd2[7];
/*
* This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
+ * output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been completely
* written. When writing a command completion or response to
* an internal processor, the order of writes has to be such
@@ -23200,6 +24189,12 @@ struct hwrm_func_backing_store_qcfg_v2_output {
uint8_t valid;
} __rte_packed;
+/* Common structure to cast QPC split entries. This casting is required in the
+ * following HWRM command inputs/outputs if the backing store type is QPC.
+ * 1. hwrm_func_backing_store_cfg_v2_input
+ * 2. hwrm_func_backing_store_qcfg_v2_output
+ * 3. hwrm_func_backing_store_qcaps_v2_output
+ */
/* qpc_split_entries (size:128b/16B) */
struct qpc_split_entries {
/* Number of L2 QP backing store entries. */
@@ -23214,6 +24209,12 @@ struct qpc_split_entries {
uint32_t rsvd;
} __rte_packed;
+/* Common structure to cast SRQ split entries. This casting is required in the
+ * following HWRM command inputs/outputs if the backing store type is SRQ.
+ * 1. hwrm_func_backing_store_cfg_v2_input
+ * 2. hwrm_func_backing_store_qcfg_v2_output
+ * 3. hwrm_func_backing_store_qcaps_v2_output
+ */
/* srq_split_entries (size:128b/16B) */
struct srq_split_entries {
/* Number of L2 SRQ backing store entries. */
@@ -23222,6 +24223,12 @@ struct srq_split_entries {
uint32_t rsvd2[2];
} __rte_packed;
+/* Common structure to cast CQ split entries. This casting is required in the
+ * following HWRM command inputs/outputs if the backing store type is CQ.
+ * 1. hwrm_func_backing_store_cfg_v2_input
+ * 2. hwrm_func_backing_store_qcfg_v2_output
+ * 3. hwrm_func_backing_store_qcaps_v2_output
+ */
/* cq_split_entries (size:128b/16B) */
struct cq_split_entries {
/* Number of L2 CQ backing store entries. */
@@ -23230,6 +24237,12 @@ struct cq_split_entries {
uint32_t rsvd2[2];
} __rte_packed;
+/* Common structure to cast VNIC split entries. This casting is required in the
+ * following HWRM command inputs/outputs if the backing store type is VNIC.
+ * 1. hwrm_func_backing_store_cfg_v2_input
+ * 2. hwrm_func_backing_store_qcfg_v2_output
+ * 3. hwrm_func_backing_store_qcaps_v2_output
+ */
/* vnic_split_entries (size:128b/16B) */
struct vnic_split_entries {
/* Number of VNIC backing store entries. */
@@ -23238,6 +24251,12 @@ struct vnic_split_entries {
uint32_t rsvd2[2];
} __rte_packed;
+/* Common structure to cast MRAV split entries. This casting is required in the
+ * following HWRM command inputs/outputs if the backing store type is MRAV.
+ * 1. hwrm_func_backing_store_cfg_v2_input
+ * 2. hwrm_func_backing_store_qcfg_v2_output
+ * 3. hwrm_func_backing_store_qcaps_v2_output
+ */
/* mrav_split_entries (size:128b/16B) */
struct mrav_split_entries {
/* Number of AV backing store entries. */
@@ -23246,6 +24265,13 @@ struct mrav_split_entries {
uint32_t rsvd2[2];
} __rte_packed;
+/* Common structure to cast TBL_SCOPE split entries. This casting is required
+ * in the following HWRM command inputs/outputs if the backing store type is
+ * TBL_SCOPE.
+ * 1. hwrm_func_backing_store_cfg_v2_input
+ * 2. hwrm_func_backing_store_qcfg_v2_output
+ * 3. hwrm_func_backing_store_qcaps_v2_output
+ */
/* ts_split_entries (size:128b/16B) */
struct ts_split_entries {
/* Max number of TBL_SCOPE region entries (QCAPS). */
@@ -23344,11 +24370,11 @@ struct hwrm_func_backing_store_qcaps_v2_input {
/* TIM. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TIM \
UINT32_C(0xf)
- /* Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TKC \
+ /* Tx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TX_CK \
UINT32_C(0x13)
- /* Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_RKC \
+ /* Rx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_RX_CK \
UINT32_C(0x14)
/* Mid-path TQM ring. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_MP_TQM_RING \
@@ -23365,12 +24391,33 @@ struct hwrm_func_backing_store_qcaps_v2_input {
/* CQ Doorbell shadow region. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CQ_DB_SHADOW \
UINT32_C(0x19)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_QUIC_TKC \
- UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_QUIC_RKC \
- UINT32_C(0x1b)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TBL_SCOPE \
+ UINT32_C(0x1c)
+ /* XID partition context. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_XID_PARTITION \
+ UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SRT_TRACE \
+ UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SRT2_TRACE \
+ UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CRT_TRACE \
+ UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CRT2_TRACE \
+ UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_RIGP0_TRACE \
+ UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_L2_HWRM_TRACE \
+ UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_ROCE_HWRM_TRACE \
+ UINT32_C(0x24)
/* Invalid type. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_INVALID \
UINT32_C(0xffff)
@@ -23418,11 +24465,11 @@ struct hwrm_func_backing_store_qcaps_v2_output {
/* TIM. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TIM \
UINT32_C(0xf)
- /* KTLS Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TKC \
+ /* Tx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TX_CK \
UINT32_C(0x13)
- /* KTLS Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RKC \
+ /* Rx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RX_CK \
UINT32_C(0x14)
/* Mid-path TQM ring. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_MP_TQM_RING \
@@ -23439,12 +24486,33 @@ struct hwrm_func_backing_store_qcaps_v2_output {
/* CQ Doorbell shadow region. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CQ_DB_SHADOW \
UINT32_C(0x19)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_QUIC_TKC \
- UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_QUIC_RKC \
- UINT32_C(0x1b)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TBL_SCOPE \
+ UINT32_C(0x1c)
+ /* XID partition context. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_XID_PARTITION \
+ UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRT_TRACE \
+ UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRT2_TRACE \
+ UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CRT_TRACE \
+ UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CRT2_TRACE \
+ UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RIGP0_TRACE \
+ UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_L2_HWRM_TRACE \
+ UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_ROCE_HWRM_TRACE \
+ UINT32_C(0x24)
/* Invalid type. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_INVALID \
UINT32_C(0xffff)
@@ -23456,7 +24524,7 @@ struct hwrm_func_backing_store_qcaps_v2_output {
uint32_t flags;
/*
* When set, it indicates the context type should be initialized
- * with the “ctx_init_value” at the specified offset.
+ * with the 'ctx_init_value' at the specified offset.
*/
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_ENABLE_CTX_KIND_INIT \
UINT32_C(0x1)
@@ -23596,7 +24664,6 @@ struct hwrm_func_backing_store_qcaps_v2_output {
* | VINC | vnic_split_entries |
* | MRAV | mrav_split_entries |
* | TS | ts_split_entries |
- * | CK | ck_split_entries |
*/
uint32_t split_entry_0;
/* Split entry #1. */
@@ -23608,7 +24675,7 @@ struct hwrm_func_backing_store_qcaps_v2_output {
uint8_t rsvd3[3];
/*
* This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
+ * output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been completely
* written. When writing a command completion or response to
* an internal processor, the order of writes has to be such
@@ -23678,7 +24745,7 @@ struct hwrm_func_dbr_pacing_cfg_input {
#define HWRM_FUNC_DBR_PACING_CFG_INPUT_ENABLES_PACING_THRESHOLD_VALID \
UINT32_C(0x2)
/*
- * Specify primary function’s NQ ID to receive the doorbell pacing
+ * Specify primary function's NQ ID to receive the doorbell pacing
* threshold crossing events.
*/
uint32_t primary_nq_id;
@@ -23703,7 +24770,7 @@ struct hwrm_func_dbr_pacing_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -23895,7 +24962,7 @@ struct hwrm_func_dbr_pacing_qcfg_output {
/* This field indicates the maximum depth of the doorbell FIFO. */
uint32_t dbr_stat_db_max_fifo_depth;
/*
- * Specifies primary function’s NQ ID.
+ * Specifies primary function's NQ ID.
* A value of 0xFFFF FFFF indicates NQ ID is invalid.
*/
uint32_t primary_nq_id;
@@ -23907,7 +24974,7 @@ struct hwrm_func_dbr_pacing_qcfg_output {
uint8_t unused_4[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -23966,7 +25033,7 @@ struct hwrm_func_dbr_pacing_broadcast_event_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -24059,9 +25126,9 @@ struct hwrm_func_dbr_pacing_nqlist_query_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -24128,7 +25195,7 @@ struct hwrm_func_dbr_recovery_completed_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -24241,9 +25308,9 @@ struct hwrm_func_synce_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -24324,21 +25391,21 @@ struct hwrm_func_synce_qcfg_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***********************
- * hwrm_func_vlan_qcfg *
- ***********************/
+/************************
+ * hwrm_func_lag_create *
+ ************************/
-/* hwrm_func_vlan_qcfg_input (size:192b/24B) */
-struct hwrm_func_vlan_qcfg_input {
+/* hwrm_func_lag_create_input (size:192b/24B) */
+struct hwrm_func_lag_create_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -24367,18 +25434,114 @@ struct hwrm_func_vlan_qcfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ uint8_t enables;
/*
- * Function ID of the function that is being
+ * This bit must be '1' for the active_port_map field to be
* configured.
- * If set to 0xFF... (All Fs), then the configuration is
- * for the requesting function.
*/
- uint16_t fid;
- uint8_t unused_0[6];
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_ACTIVE_PORT_MAP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the member_port_map field to be
+ * configured.
+ */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_MEMBER_PORT_MAP \
+ UINT32_C(0x2)
+ /* This bit must be '1' for the aggr_mode field to be configured. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_AGGR_MODE \
+ UINT32_C(0x4)
+ /* rsvd1 is 5 b */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_RSVD1_MASK \
+ UINT32_C(0xf8)
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_RSVD1_SFT 3
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. The active_port_map must always be a subset of the
+ * member_port_map. An active port is eligible to send and receive
+ * traffic.
+ *
+ * If the LAG mode is active-backup, only one port can be an active
+ * port at a given time. All other ports in the member_port_map that
+ * are not the active port are backup port. When the active port
+ * fails, another member port takes over to become the active port.
+ * The driver should use HWRM_FUNC_LAG_UPDATE to update
+ * the active_port_map by only setting the port bit of the new active
+ * port.
+ *
+ * In active-active, balance_xor or 802_3_ad mode, all member ports
+ * can be active ports. If the driver determines that an active
+ * port is down or unable to function, it should use
+ * HWRM_FUNC_LAG_UPDATE to update the active_port_map by clearing
+ * the port bit that has failed.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_RSVD3_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_RSVD3_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. There must be at least 2 ports in the member ports and
+ * each must not be a member of another LAG. Note that on a 4-port
+ * device, there can be either 2 ports or 4 ports in the member ports.
+ * Using 3 member ports is not supported.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd4 is 4 b */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_RSVD4_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_RSVD4_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_ACTIVE_ACTIVE \
+ UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_ACTIVE_BACKUP \
+ UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_BALANCE_XOR \
+ UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_802_3_AD \
+ UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_LAST \
+ HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_802_3_AD
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_func_vlan_qcfg_output (size:320b/40B) */
-struct hwrm_func_vlan_qcfg_output {
+/* hwrm_func_lag_create_output (size:128b/16B) */
+struct hwrm_func_lag_create_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -24387,49 +25550,29 @@ struct hwrm_func_vlan_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint64_t unused_0;
- /* S-TAG VLAN identifier configured for the function. */
- uint16_t stag_vid;
- /* S-TAG PCP value configured for the function. */
- uint8_t stag_pcp;
- uint8_t unused_1;
/*
- * S-TAG TPID value configured for the function. This field is specified in
- * network byte order.
+ * LAG ID of the created LAG. This LAG ID will also be returned
+ * in the HWRM_FUNC_QCFG response of all member ports.
*/
- uint16_t stag_tpid;
- /* C-TAG VLAN identifier configured for the function. */
- uint16_t ctag_vid;
- /* C-TAG PCP value configured for the function. */
- uint8_t ctag_pcp;
- uint8_t unused_2;
- /*
- * C-TAG TPID value configured for the function. This field is specified in
- * network byte order.
- */
- uint16_t ctag_tpid;
- /* Future use. */
- uint32_t rsvd2;
- /* Future use. */
- uint32_t rsvd3;
- uint8_t unused_3[3];
+ uint8_t fw_lag_id;
+ uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/**********************
- * hwrm_func_vlan_cfg *
- **********************/
+/************************
+ * hwrm_func_lag_update *
+ ************************/
-/* hwrm_func_vlan_cfg_input (size:384b/48B) */
-struct hwrm_func_vlan_cfg_input {
+/* hwrm_func_lag_update_input (size:192b/24B) */
+struct hwrm_func_lag_update_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -24458,74 +25601,116 @@ struct hwrm_func_vlan_cfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Link aggregation group ID of the LAG to be updated. */
+ uint8_t fw_lag_id;
+ uint8_t enables;
/*
- * Function ID of the function that is being
- * configured.
- * If set to 0xFF... (All Fs), then the configuration is
- * for the requesting function.
- */
- uint16_t fid;
- uint8_t unused_0[2];
- uint32_t enables;
- /*
- * This bit must be '1' for the stag_vid field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
- /*
- * This bit must be '1' for the ctag_vid field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
- /*
- * This bit must be '1' for the stag_pcp field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
- /*
- * This bit must be '1' for the ctag_pcp field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
- /*
- * This bit must be '1' for the stag_tpid field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
- /*
- * This bit must be '1' for the ctag_tpid field to be
- * configured.
- */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
- /* S-TAG VLAN identifier configured for the function. */
- uint16_t stag_vid;
- /* S-TAG PCP value configured for the function. */
- uint8_t stag_pcp;
- uint8_t unused_1;
- /*
- * S-TAG TPID value configured for the function. This field is specified in
- * network byte order.
+ * This bit must be '1' for the active_port_map field to be
+ * updated.
*/
- uint16_t stag_tpid;
- /* C-TAG VLAN identifier configured for the function. */
- uint16_t ctag_vid;
- /* C-TAG PCP value configured for the function. */
- uint8_t ctag_pcp;
- uint8_t unused_2;
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_ACTIVE_PORT_MAP \
+ UINT32_C(0x1)
/*
- * C-TAG TPID value configured for the function. This field is specified in
- * network byte order.
+ * This bit must be '1' for the member_port_map field to be
+ * updated.
*/
- uint16_t ctag_tpid;
- /* Future use. */
- uint32_t rsvd1;
- /* Future use. */
- uint32_t rsvd2;
- uint8_t unused_3[4];
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_MEMBER_PORT_MAP \
+ UINT32_C(0x2)
+ /* This bit must be '1' for the aggr_mode field to be updated. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_AGGR_MODE \
+ UINT32_C(0x4)
+ /* rsvd1 is 5 b */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_RSVD1_MASK \
+ UINT32_C(0xf8)
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_RSVD1_SFT 3
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. The active_port_map must always be a subset of the
+ * member_port_map. An active port is eligible to send and receive
+ * traffic.
+ *
+ * If the LAG mode is active-backup, only one port can be an active
+ * port at a given time. All other ports in the member_port_map that
+ * are not the active port are backup port. When the active port
+ * fails, another member port takes over to become the active port.
+ * The driver should use HWRM_FUNC_LAG_UPDATE to update
+ * the active_port_map by only setting the port bit of the new active
+ * port.
+ *
+ * In active-active, balance_xor or 802_3_ad mode, all member ports
+ * can be active ports. If the driver determines that an active
+ * port is down or unable to function, it should use
+ * HWRM_FUNC_LAG_UPDATE to update the active_port_map by clearing
+ * the port bit that has failed.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_RSVD3_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_RSVD3_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. There must be at least 2 ports in the member ports and
+ * each must not be a member of another LAG. Note that on a 4-port
+ * device, there can be either 2 ports or 4 ports in the member ports.
+ * Using 3 member ports is not supported.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd4 is 4 b */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_RSVD4_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_RSVD4_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_ACTIVE_ACTIVE \
+ UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_ACTIVE_BACKUP \
+ UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_BALANCE_XOR \
+ UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_802_3_AD \
+ UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_LAST \
+ HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_802_3_AD
+ uint8_t unused_0[3];
} __rte_packed;
-/* hwrm_func_vlan_cfg_output (size:128b/16B) */
-struct hwrm_func_vlan_cfg_output {
+/* hwrm_func_lag_update_output (size:128b/16B) */
+struct hwrm_func_lag_update_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -24537,21 +25722,782 @@ struct hwrm_func_vlan_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*******************************
- * hwrm_func_vf_vnic_ids_query *
- *******************************/
+/**********************
+ * hwrm_func_lag_free *
+ **********************/
-/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
-struct hwrm_func_vf_vnic_ids_query_input {
+/* hwrm_func_lag_free_input (size:192b/24B) */
+struct hwrm_func_lag_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Link aggregation group ID of the LAG to be freed. */
+ uint8_t fw_lag_id;
+ uint8_t unused_0[7];
+} __rte_packed;
+
+/* hwrm_func_lag_free_output (size:128b/16B) */
+struct hwrm_func_lag_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**********************
+ * hwrm_func_lag_qcfg *
+ **********************/
+
+
+/* hwrm_func_lag_qcfg_input (size:192b/24B) */
+struct hwrm_func_lag_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Link aggregation group ID of the LAG to be queried. */
+ uint8_t fw_lag_id;
+ uint8_t unused_0[7];
+} __rte_packed;
+
+/* hwrm_func_lag_qcfg_output (size:128b/16B) */
+struct hwrm_func_lag_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. The active_port_map must always be a subset of the
+ * member_port_map. An active port is eligible to send and receive
+ * traffic.
+ *
+ * If the LAG mode is active-backup, only one port can be an active
+ * port at a given time. All other ports in the member_port_map that
+ * are not the active port are backup port. When the active port
+ * fails, another member port takes over to become the active port.
+ * The driver should use HWRM_FUNC_LAG_UPDATE to update
+ * the active_port_map by only setting the port bit of the new active
+ * port.
+ *
+ * In active-active, balance_xor or 802_3_ad mode, all member ports
+ * can be active ports. If the driver determines that an active
+ * port is down or unable to function, it should use
+ * HWRM_FUNC_LAG_UPDATE to update the active_port_map by clearing
+ * the port bit that has failed.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_RSVD3_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_RSVD3_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. There must be at least 2 ports in the member ports and
+ * each must not be a member of another LAG. Note that on a 4-port
+ * device, there can be either 2 ports or 4 ports in the member ports.
+ * Using 3 member ports is not supported.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd4 is 4 b */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_RSVD4_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_RSVD4_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_ACTIVE_ACTIVE UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_ACTIVE_BACKUP UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_BALANCE_XOR UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_802_3_AD UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_LAST \
+ HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_802_3_AD
+ uint8_t unused_0[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**************************
+ * hwrm_func_lag_mode_cfg *
+ **************************/
+
+
+/* hwrm_func_lag_mode_cfg_input (size:192b/24B) */
+struct hwrm_func_lag_mode_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t enables;
+ /*
+ * This bit must be '1' for the link aggregation enable or
+ * disable flags to be configured.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_FLAGS \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the active_port_map field to be
+ * configured.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_ACTIVE_PORT_MAP \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the member_port_map field to be
+ * configured.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_MEMBER_PORT_MAP \
+ UINT32_C(0x4)
+ /* This bit must be '1' for the aggr_mode field to be configured. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_AGGR_MODE \
+ UINT32_C(0x8)
+ /* This bit must be '1' for the lag id field to be configured. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_LAG_ID \
+ UINT32_C(0x10)
+ /* rsvd1 is 3 b */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_RSVD1_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_RSVD1_SFT 5
+ uint8_t flags;
+ /*
+ * If this bit is set to 1, the driver is requesting FW to disable
+ * link aggregation feature during run time.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_FLAGS_AGGR_DISABLE \
+ UINT32_C(0x1)
+ /*
+ * If this bit is set to 1, the driver is requesting FW to enable
+ * link aggregation feature during run time.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_FLAGS_AGGR_ENABLE \
+ UINT32_C(0x2)
+ /* rsvd2 is 6 b */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_FLAGS_RSVD2_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_FLAGS_RSVD2_SFT 2
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device starting from port 0.
+ * The number of front panel ports is specified in the port_cnt field
+ * of the HWRM_PORT_PHY_QCAPS response.
+ * The term "active port" is one of member ports which is eligible to
+ * send or receive the traffic.
+ * In the active-backup mode, only one member port is active port at
+ * any given time. If the active port fails, another member port
+ * automatically takes over the active role to ensure continuous
+ * network connectivity.
+ * In the active-active, balance_xor or 802_3_ad mode, all member ports
+ * could be active port, if link status on one port is down, driver
+ * needs to send the NIC a new active-port bitmap with marking this
+ * port as not active port.
+ * The PORT_2 and PORT_3 are only valid if the NIC has four front
+ * panel ports.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_RSVD3_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_RSVD3_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device starting from port 0.
+ * The number of front panel ports is specified in the port_cnt field
+ * of the HWRM_PORT_PHY_QCAPS response.
+ * The term "member port" refers to a front panel port that is added to
+ * the bond group as a slave device. These member ports are combined to
+ * create a logical bond interface.
+ * For a 4-port NIC, the LAG member port combination can consist of
+ * either two ports or four ports. However, it is important to note
+ * that the case with three ports in the same lag group is not
+ * supported.
+ * The PORT_2 and PORT_3 are only valid if the NIC has four front
+ * panel ports. There could be a case to use multiple LAG groups,
+ * for example, if the NIC has four front panel ports, the lag feature
+ * can use up to two LAG groups, with two ports assigned to each group.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd4 is 4 b */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_RSVD4_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_RSVD4_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_ACTIVE_ACTIVE \
+ UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_ACTIVE_BACKUP \
+ UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_BALANCE_XOR \
+ UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_802_3_AD \
+ UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_LAST \
+ HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_802_3_AD
+ /* Link aggregation group idx being used. */
+ uint8_t lag_id;
+ uint8_t unused_0[2];
+} __rte_packed;
+
+/* hwrm_func_lag_mode_cfg_output (size:128b/16B) */
+struct hwrm_func_lag_mode_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Link aggregation group idx being used. */
+ uint8_t lag_id;
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/***************************
+ * hwrm_func_lag_mode_qcfg *
+ ***************************/
+
+
+/* hwrm_func_lag_mode_qcfg_input (size:192b/24B) */
+struct hwrm_func_lag_mode_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t unused_0[8];
+} __rte_packed;
+
+/* hwrm_func_lag_mode_qcfg_output (size:128b/16B) */
+struct hwrm_func_lag_mode_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t aggr_enabled;
+ /*
+ * This flag is used to query whether link aggregation is enabled
+ * or disabled during run time.
+ */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_ENABLED UINT32_C(0x1)
+ /* rsvd1 is 7 b */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_RSVD1_MASK UINT32_C(0xfe)
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_RSVD1_SFT 1
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device starting from port 0.
+ * The number of front panel ports is specified in the port_cnt field
+ * of the HWRM_PORT_PHY_QCAPS response.
+ * The term "active port" is one of member ports which is eligible to
+ * send or receive the traffic.
+ * In the active-backup mode, only one member port is active port at
+ * any given time. If the active port fails, another member port
+ * automatically takes over the active role to ensure continuous
+ * network connectivity.
+ * In the active-active, balance_xor or 802_3_ad mode, all member ports
+ * could be active port, if link status on one port is down, driver
+ * needs to send the NIC a new active-port bitmap with marking this
+ * port as not active port.
+ * The PORT_2 and PORT_3 are only valid if the NIC has four front
+ * panel ports.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd2 is 4 b */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_RSVD2_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_RSVD2_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device starting from port 0.
+ * The number of front panel ports is specified in the port_cnt field
+ * of the HWRM_PORT_PHY_QCAPS response.
+ * The term "member port" refers to a front panel port that is added to
+ * the bond group as a slave device. These member ports are combined to
+ * create a logical bond interface.
+ * For a 4-port NIC, the LAG member port combination can consist of
+ * either two ports or four ports. However, it is important to note
+ * that the case with three ports in the same lag group is not
+ * supported.
+ * The PORT_2 and PORT_3 are only valid if the NIC has four front
+ * panel ports. There could be a case to use multiple LAG groups,
+ * for example, if the NIC has four front panel ports, the lag feature
+ * can use up to two LAG groups, with two ports assigned to each group.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_0 \
+ UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_1 \
+ UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_2 \
+ UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_3 \
+ UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_RSVD3_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_RSVD3_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_ACTIVE_ACTIVE \
+ UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_ACTIVE_BACKUP \
+ UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_BALANCE_XOR \
+ UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_802_3_AD \
+ UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_LAST \
+ HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_802_3_AD
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/***********************
+ * hwrm_func_vlan_qcfg *
+ ***********************/
+
+
+/* hwrm_func_vlan_qcfg_input (size:192b/24B) */
+struct hwrm_func_vlan_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __rte_packed;
+
+/* hwrm_func_vlan_qcfg_output (size:320b/40B) */
+struct hwrm_func_vlan_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint64_t unused_0;
+ /* S-TAG VLAN identifier configured for the function. */
+ uint16_t stag_vid;
+ /* S-TAG PCP value configured for the function. */
+ uint8_t stag_pcp;
+ uint8_t unused_1;
+ /*
+ * S-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint16_t stag_tpid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint16_t ctag_vid;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
+ /*
+ * C-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint16_t ctag_tpid;
+ /* Future use. */
+ uint32_t rsvd2;
+ /* Future use. */
+ uint32_t rsvd3;
+ uint8_t unused_3[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**********************
+ * hwrm_func_vlan_cfg *
+ **********************/
+
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
+struct hwrm_func_vlan_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the stag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the ctag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the stag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the ctag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stag_tpid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ctag_tpid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
+ /* S-TAG VLAN identifier configured for the function. */
+ uint16_t stag_vid;
+ /* S-TAG PCP value configured for the function. */
+ uint8_t stag_pcp;
+ uint8_t unused_1;
+ /*
+ * S-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint16_t stag_tpid;
+ /* C-TAG VLAN identifier configured for the function. */
+ uint16_t ctag_vid;
+ /* C-TAG PCP value configured for the function. */
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
+ /*
+ * C-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
+ */
+ uint16_t ctag_tpid;
+ /* Future use. */
+ uint32_t rsvd1;
+ /* Future use. */
+ uint32_t rsvd2;
+ uint8_t unused_3[4];
+} __rte_packed;
+
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
+struct hwrm_func_vlan_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*******************************
+ * hwrm_func_vf_vnic_ids_query *
+ *******************************/
+
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
+struct hwrm_func_vf_vnic_ids_query_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -24611,9 +26557,9 @@ struct hwrm_func_vf_vnic_ids_query_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -24736,9 +26682,9 @@ struct hwrm_func_vf_bw_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -24805,8 +26751,8 @@ struct hwrm_func_vf_bw_qcfg_output {
uint16_t resp_len;
/*
* The number of VF functions that are being queried.
- * The inline response space allows the host to query up to 50 VFs' rate
- * scale percentage
+ * The inline response space allows the host to query up to 50 VFs'
+ * rate scale percentage.
*/
uint16_t num_vfs;
uint16_t unused[3];
@@ -24874,9 +26820,9 @@ struct hwrm_func_vf_bw_qcfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -24920,11 +26866,11 @@ struct hwrm_func_drv_if_change_input {
uint32_t flags;
/*
* When this bit is '1', the function driver is indicating
- * that the IF state is changing to UP state. The call should
+ * that the IF state is changing to UP state. The call should
* be made at the beginning of the driver's open call before
- * resources are allocated. After making the call, the driver
+ * resources are allocated. After making the call, the driver
* should check the response to see if any resources may have
- * changed (see the response below). If the driver fails
+ * changed (see the response below). If the driver fails
* the open call, the driver should make this call again with
* this bit cleared to indicate that the IF state is not UP.
* During the driver's close call when the IF state is changing
@@ -24948,24 +26894,35 @@ struct hwrm_func_drv_if_change_output {
uint32_t flags;
/*
* When this bit is '1', it indicates that the resources reserved
- * for this function may have changed. The driver should check
+ * for this function may have changed. The driver should check
* resource capabilities and reserve resources again before
* allocating resources.
*/
#define HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_RESC_CHANGE \
UINT32_C(0x1)
/*
- * When this bit is '1', it indicates that the firmware got changed / reset.
- * The driver should do complete re-initialization when that bit is set.
+ * When this bit is '1', it indicates that the firmware got changed /
+ * reset. The driver should do complete re-initialization when that
+ * bit is set.
*/
#define HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE \
UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates that capabilities
+ * for this function may have changed. The driver should
+ * query for changes to capabilities.
+ * The CAPS_CHANGE bit will only be set when it is safe for the
+ * driver to completely re-initialize all resources for the function
+ * including any children VFs.
+ */
+ #define HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_CAPS_CHANGE \
+ UINT32_C(0x4)
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -25169,9 +27126,9 @@ struct hwrm_func_host_pf_ids_query_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -25374,9 +27331,9 @@ struct hwrm_func_spd_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -25537,9 +27494,9 @@ struct hwrm_func_spd_qcfg_output {
uint8_t unused_2[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -25597,7 +27554,7 @@ struct hwrm_port_phy_cfg_input {
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY \
UINT32_C(0x1)
- /* deprecated bit. Do not use!!! */
+ /* deprecated bit. Do not use!!! */
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED \
UINT32_C(0x2)
/*
@@ -25664,35 +27621,35 @@ struct hwrm_port_phy_cfg_input {
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE \
UINT32_C(0x80)
/*
- * When set to 1, then the HWRM shall enable FEC autonegotitation
- * on this port if supported. When enabled, at least one of the
+ * When set to 1, then the HWRM shall enable FEC autonegotiation
+ * on this port if supported. When enabled, at least one of the
* FEC modes must be advertised by enabling the fec_clause_74_enable,
* fec_clause_91_enable, fec_rs544_1xn_enable, fec_rs544_ieee_enable,
- * fec_rs272_1xn_enable, or fec_rs272_ieee_enable flag. If none
+ * fec_rs272_1xn_enable, or fec_rs272_ieee_enable flag. If none
* of the FEC mode is currently enabled, the HWRM shall choose
* a default advertisement setting.
* The default advertisement setting can be queried by calling
- * hwrm_port_phy_qcfg. Note that the link speed must be
+ * hwrm_port_phy_qcfg. Note that the link speed must be
* in autonegotiation mode for FEC autonegotiation to take effect.
* When set to 0, then this flag shall be ignored.
- * If FEC autonegotiation is not supported, then the HWRM shall ignore this
- * flag.
+ * If FEC autonegotiation is not supported, then the HWRM shall
+ * ignore this flag.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE \
UINT32_C(0x100)
/*
* When set to 1, then the HWRM shall disable FEC autonegotiation
- * on this port and use forced FEC mode. In forced FEC mode, one
+ * on this port and use forced FEC mode. In forced FEC mode, one
* or more FEC forced settings under the same clause can be set.
* When set to 0, then this flag shall be ignored.
- * If FEC autonegotiation is not supported, then the HWRM shall ignore this
- * flag.
+ * If FEC autonegotiation is not supported, then the HWRM shall
+ * ignore this flag.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \
UINT32_C(0x200)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire Code)
- * on this port if supported, by advertising FEC CLAUSE 74 if
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire
+ * Code) on this port if supported, by advertising FEC CLAUSE 74 if
* FEC autonegotiation is enabled or force enabled otherwise.
* When set to 0, then this flag shall be ignored.
* If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
@@ -25701,9 +27658,9 @@ struct hwrm_port_phy_cfg_input {
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \
UINT32_C(0x400)
/*
- * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire Code)
- * on this port if supported, by not advertising FEC CLAUSE 74 if
- * FEC autonegotiation is enabled or force disabled otherwise.
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire
+ * Code) on this port if supported, by not advertising FEC CLAUSE 74
+ * if FEC autonegotiation is enabled or force disabled otherwise.
* When set to 0, then this flag shall be ignored.
* If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
* flag.
@@ -25714,8 +27671,8 @@ struct hwrm_port_phy_cfg_input {
* When set to 1, then the HWRM shall enable FEC CLAUSE 91
* (Reed Solomon RS(528,514) for NRZ) on this port if supported,
* by advertising FEC RS(528,514) if FEC autonegotiation is enabled
- * or force enabled otherwise. In forced FEC mode, this flag
- * will only take effect if the speed is NRZ. Additional
+ * or force enabled otherwise. In forced FEC mode, this flag
+ * will only take effect if the speed is NRZ. Additional
* RS544 or RS272 flags (also under clause 91) may be set for PAM4
* in forced FEC mode.
* When set to 0, then this flag shall be ignored.
@@ -25728,8 +27685,8 @@ struct hwrm_port_phy_cfg_input {
* When set to 1, then the HWRM shall disable FEC CLAUSE 91
* (Reed Solomon RS(528,514) for NRZ) on this port if supported, by
* not advertising RS(528,514) if FEC autonegotiation is enabled or
- * force disabled otherwise. When set to 0, then this flag shall be
- * ignored. If FEC RS(528,514) is not supported, then the HWRM
+ * force disabled otherwise. When set to 0, then this flag shall be
+ * ignored. If FEC RS(528,514) is not supported, then the HWRM
* shall ignore this flag.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \
@@ -25759,7 +27716,7 @@ struct hwrm_port_phy_cfg_input {
* on this port if supported, by advertising FEC RS544_1XN if
* FEC autonegotiation is enabled or force enabled otherwise.
* In forced mode, this flag will only take effect if the speed is
- * PAM4. If this flag and fec_rs544_ieee_enable are set, the
+ * PAM4. If this flag and fec_rs544_ieee_enable are set, the
* HWRM shall choose one of the RS544 modes.
* When set to 0, then this flag shall be ignored.
* If FEC RS544_1XN is not supported, then the HWRM shall ignore this
@@ -25772,8 +27729,8 @@ struct hwrm_port_phy_cfg_input {
* on this port if supported, by not advertising FEC RS544_1XN if
* FEC autonegotiation is enabled or force disabled otherwise.
* When set to 0, then this flag shall be ignored.
- * If FEC RS544_1XN is not supported, then the HWRM shall ignore this
- * flag.
+ * If FEC RS544_1XN is not supported, then the HWRM shall ignore
+ * this flag.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_RS544_1XN_DISABLE \
UINT32_C(0x10000)
@@ -25782,7 +27739,7 @@ struct hwrm_port_phy_cfg_input {
* on this port if supported, by advertising FEC RS(544,514) if
* FEC autonegotiation is enabled or force enabled otherwise.
* In forced mode, this flag will only take effect if the speed is
- * PAM4. If this flag and fec_rs544_1xn_enable are set, the
+ * PAM4. If this flag and fec_rs544_1xn_enable are set, the
* HWRM shall choose one of the RS544 modes.
* When set to 0, then this flag shall be ignored.
* If FEC RS(544,514) is not supported, then the HWRM shall ignore
@@ -25805,8 +27762,8 @@ struct hwrm_port_phy_cfg_input {
* on this port if supported, by advertising FEC RS272_1XN if
* FEC autonegotiation is enabled or force enabled otherwise.
* In forced mode, this flag will only take effect if the speed is
- * PAM4. If this flag and fec_rs272_ieee_enable are set, the
- * HWRM shall choose one of the RS272 modes. Note that RS272
+ * PAM4. If this flag and fec_rs272_ieee_enable are set, the
+ * HWRM shall choose one of the RS272 modes. Note that RS272
* and RS544 modes cannot be set at the same time in forced FEC mode.
* When set to 0, then this flag shall be ignored.
* If FEC RS272_1XN is not supported, then the HWRM shall ignore this
@@ -25829,8 +27786,8 @@ struct hwrm_port_phy_cfg_input {
* on this port if supported, by advertising FEC RS(272,257) if
* FEC autonegotiation is enabled or force enabled otherwise.
* In forced mode, this flag will only take effect if the speed is
- * PAM4. If this flag and fec_rs272_1xn_enable are set, the
- * HWRM shall choose one of the RS272 modes. Note that RS272
+ * PAM4. If this flag and fec_rs272_1xn_enable are set, the
+ * HWRM shall choose one of the RS272 modes. Note that RS272
* and RS544 modes cannot be set at the same time in forced FEC mode.
* When set to 0, then this flag shall be ignored.
* If FEC RS(272,257) is not supported, then the HWRM shall ignore
@@ -25943,7 +27900,7 @@ struct hwrm_port_phy_cfg_input {
uint16_t port_id;
/*
* This is the speed that will be used if the force
- * bit is '1'. If unsupported speed is selected, an error
+ * bit is '1'. If unsupported speed is selected, an error
* will be generated.
*/
uint16_t force_link_speed;
@@ -25981,18 +27938,19 @@ struct hwrm_port_phy_cfg_input {
/* Select all possible speeds for autoneg mode. */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for autoneg mode. This mode has
- * been DEPRECATED. An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode
+ * has been DEPRECATED. An HWRM client should not use this mode.
*/
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below that speed for autoneg.
- * This mode has been DEPRECATED. An HWRM client should not use this mode.
+ * Select the auto_link_speed or any speed below that speed for
+ * autoneg. This mode has been DEPRECATED. An HWRM client should not
+ * use this mode.
*/
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
- * Select the speeds based on the corresponding link speed mask values
- * that are provided. The included speeds are specified in the
+ * Select the speeds based on the corresponding link speed mask
+ * values that are provided. The included speeds are specified in the
* auto_link_speed and auto_pam4_link_speed fields.
*/
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
@@ -26045,10 +28003,31 @@ struct hwrm_port_phy_cfg_input {
*/
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE \
UINT32_C(0x4)
- uint8_t unused_0;
+ /*
+ * This field is only used by management firmware to communicate with
+ * core firmware regarding phy_port_cfg.
+ * It mainly used to notify core firmware that management firmware is
+ * using port for NCSI over RMII communication or not.
+ */
+ uint8_t mgmt_flag;
+ /*
+ * Bit denoting if management firmware is using the link for
+ * NCSI over RMII communication.
+ * When set to 1, management firmware is no longer using the given
+ * port.
+ * When set to 0, management firmware is using the given port.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_MGMT_FLAG_LINK_RELEASE \
+ UINT32_C(0x1)
+ /*
+ * Validity bit, set to 1 to indicate other bits in mgmt_flags are
+ * valid.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_MGMT_FLAG_MGMT_VALID \
+ UINT32_C(0x80)
/*
* This is the speed that will be used if the autoneg_mode
- * is "one_speed" or "one_or_below". If an unsupported speed
+ * is "one_speed" or "one_or_below". If an unsupported speed
* is selected, an error will be generated.
*/
uint16_t auto_link_speed;
@@ -26078,7 +28057,7 @@ struct hwrm_port_phy_cfg_input {
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB
/*
* This is a mask of link speeds that will be used if
- * autoneg_mode is "mask". If unsupported speed is enabled
+ * autoneg_mode is "mask". If unsupported speed is enabled
* an error will be generated.
*/
uint16_t auto_link_speed_mask;
@@ -26134,7 +28113,7 @@ struct hwrm_port_phy_cfg_input {
HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON
/* This value controls the loopback setting for the PHY. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -26150,7 +28129,8 @@ struct hwrm_port_phy_cfg_input {
/*
* The HW will be configured with external loopback such that
* host data is sent on the transmitter and based on the external
- * loopback connection the data will be received without modification.
+ * loopback connection the data will be received without
+ * modification.
*/
#define HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL UINT32_C(0x3)
#define HWRM_PORT_PHY_CFG_INPUT_LPBK_LAST \
@@ -26173,7 +28153,7 @@ struct hwrm_port_phy_cfg_input {
uint8_t unused_1;
/*
* This value controls the pre-emphasis to be used for the
- * link. Driver should not set this value (use
+ * link. Driver should not set this value (use
* enable.preemphasis = 0) unless driver is sure of setting.
* Normally HWRM FW will determine proper pre-emphasis.
*/
@@ -26211,7 +28191,7 @@ struct hwrm_port_phy_cfg_input {
UINT32_C(0x40)
/*
* This is the speed that will be used if the force and force_pam4
- * bits are '1'. If unsupported speed is selected, an error
+ * bits are '1'. If unsupported speed is selected, an error
* will be generated.
*/
uint16_t force_pam4_link_speed;
@@ -26244,51 +28224,54 @@ struct hwrm_port_phy_cfg_input {
UINT32_C(0x4)
/*
* This is the speed that will be used if the force_link_speeds2
- * bit is '1'. If unsupported speed is selected, an error
+ * bit is '1'. If unsupported speed is selected, an error
* will be generated.
*/
uint16_t force_link_speeds2;
/* 1Gb link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB \
UINT32_C(0xa)
- /* 10Gb link speed */
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB \
UINT32_C(0x64)
- /* 25Gb link speed */
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB \
UINT32_C(0xfa)
- /* 40Gb link speed */
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB \
UINT32_C(0x190)
- /* 50Gb link speed */
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB \
UINT32_C(0x1f4)
- /* 100Gb link speed */
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB \
UINT32_C(0x3e8)
- /* 50Gb (PAM4-56: 50G per lane) link speed */
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56 \
UINT32_C(0x1f5)
- /* 100Gb (PAM4-56: 50G per lane) link speed */
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56 \
UINT32_C(0x3e9)
- /* 200Gb (PAM4-56: 50G per lane) link speed */
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56 \
UINT32_C(0x7d1)
- /* 400Gb (PAM4-56: 50G per lane) link speed */
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56 \
UINT32_C(0xfa1)
- /* 100Gb (PAM4-112: 100G per lane) link speed */
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112 \
UINT32_C(0x3ea)
- /* 200Gb (PAM4-112: 100G per lane) link speed */
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112 \
UINT32_C(0x7d2)
- /* 400Gb (PAM4-112: 100G per lane) link speed */
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112 \
UINT32_C(0xfa2)
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_800GB_PAM4_112 \
+ UINT32_C(0x1f42)
#define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_LAST \
- HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_800GB_PAM4_112
/*
* This is a mask of link speeds that will be used if
* auto_link_speeds2_mask bit in the "enables" field is 1.
@@ -26298,42 +28281,45 @@ struct hwrm_port_phy_cfg_input {
/* 1Gb link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_1GB \
UINT32_C(0x1)
- /* 10Gb link speed */
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_10GB \
UINT32_C(0x2)
- /* 25Gb link speed */
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_25GB \
UINT32_C(0x4)
- /* 40Gb link speed */
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_40GB \
UINT32_C(0x8)
- /* 50Gb link speed */
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_50GB \
UINT32_C(0x10)
- /* 100Gb link speed */
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_100GB \
UINT32_C(0x20)
- /* 50Gb (PAM4-56: 50G per lane) link speed */
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 \
UINT32_C(0x40)
- /* 100Gb (PAM4-56: 50G per lane) link speed */
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 \
UINT32_C(0x80)
- /* 200Gb (PAM4-56: 50G per lane) link speed */
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 \
UINT32_C(0x100)
- /* 400Gb (PAM4-56: 50G per lane) link speed */
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 \
UINT32_C(0x200)
- /* 100Gb (PAM4-112: 100G per lane) link speed */
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 \
UINT32_C(0x400)
- /* 200Gb (PAM4-112: 100G per lane) link speed */
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 \
UINT32_C(0x800)
- /* 400Gb (PAM4-112: 100G per lane) link speed */
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 \
UINT32_C(0x1000)
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 \
+ UINT32_C(0x2000)
uint8_t unused_2[6];
} __rte_packed;
@@ -26350,9 +28336,9 @@ struct hwrm_port_phy_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -26377,7 +28363,7 @@ struct hwrm_port_phy_cfg_cmd_err {
* but if a 0 is returned at any time then this should
* be treated as an un recoverable failure,
*
- * retry interval in milli seconds is returned in opaque_1.
+ * retry interval in milliseconds is returned in opaque_1.
* This specifies the time that user should wait before
* issuing the next port_phy_cfg command.
*/
@@ -26476,19 +28462,19 @@ struct hwrm_port_phy_qcfg_output {
/* FEC CLAUSE 74 (Fire Code) active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE \
(UINT32_C(0x1) << 4)
- /* FEC CLAUSE 91 RS(528,514) active, autonegoatiated or forced. */
+ /* FEC CLAUSE 91 RS(528,514) active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE \
(UINT32_C(0x2) << 4)
- /* FEC RS544_1XN active, autonegoatiated or forced. */
+ /* FEC RS544_1XN active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE \
(UINT32_C(0x3) << 4)
- /* FEC RS(544,528) active, autonegoatiated or forced. */
+ /* FEC RS(544,528) active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE \
(UINT32_C(0x4) << 4)
/* FEC RS272_1XN active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE \
(UINT32_C(0x5) << 4)
- /* FEC RS(272,257) active, autonegoatiated or forced. */
+ /* FEC RS(272,257) active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE \
(UINT32_C(0x6) << 4)
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_LAST \
@@ -26523,6 +28509,8 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB UINT32_C(0x7d0)
/* 400Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_400GB UINT32_C(0xfa0)
+ /* 800Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_800GB UINT32_C(0x1f40)
/* 10Mb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff)
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_LAST \
@@ -26643,13 +28631,14 @@ struct hwrm_port_phy_qcfg_output {
/* Select all possible speeds for autoneg mode. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for autoneg mode. This mode has
- * been DEPRECATED. An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode
+ * has been DEPRECATED. An HWRM client should not use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below that speed for autoneg.
- * This mode has been DEPRECATED. An HWRM client should not use this mode.
+ * Select the auto_link_speed or any speed below that speed for
+ * autoneg. This mode has been DEPRECATED. An HWRM client should not
+ * use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
@@ -26782,7 +28771,7 @@ struct hwrm_port_phy_qcfg_output {
HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON
/* Current setting for loopback. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -26798,7 +28787,8 @@ struct hwrm_port_phy_qcfg_output {
/*
* The HW will be configured with external loopback such that
* host data is sent on the transmitter and based on the external
- * loopback connection the data will be received without modification.
+ * loopback connection the data will be received without
+ * modification.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL UINT32_C(0x3)
#define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LAST \
@@ -27025,8 +29015,26 @@ struct hwrm_port_phy_qcfg_output {
/* 400G_BASEER4 */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER4 \
UINT32_C(0x37)
+ /* 800G_BASECR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASECR8 \
+ UINT32_C(0x38)
+ /* 800G_BASESR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASESR8 \
+ UINT32_C(0x39)
+ /* 800G_BASELR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASELR8 \
+ UINT32_C(0x3a)
+ /* 800G_BASEER8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASEER8 \
+ UINT32_C(0x3b)
+ /* 800G_BASEFR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASEFR8 \
+ UINT32_C(0x3c)
+ /* 800G_BASEDR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASEDR8 \
+ UINT32_C(0x3d)
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_LAST \
- HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER4
+ HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASEDR8
/* This value represents a media type. */
uint8_t media_type;
/* Unknown */
@@ -27064,9 +29072,9 @@ struct hwrm_port_phy_qcfg_output {
UINT32_C(0xe0)
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5
/*
- * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled.
- * Speeds for autoneg with EEE mode enabled
- * are based on eee_link_speed_mask.
+ * When set to 1, Energy Efficient Ethernet (EEE) mode is
+ * enabled. Speeds for autoneg with EEE mode enabled are based on
+ * eee_link_speed_mask.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \
UINT32_C(0x20)
@@ -27175,14 +29183,15 @@ struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \
UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for autoneg mode. This mode has
- * been DEPRECATED. An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode
+ * has been DEPRECATED. An HWRM client should not use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below that speed for autoneg.
- * This mode has been DEPRECATED. An HWRM client should not use this mode.
+ * Select the auto_link_speed or any speed below that speed for
+ * autoneg. This mode has been DEPRECATED. An HWRM client should not
+ * use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
UINT32_C(0x3)
@@ -27312,29 +29321,32 @@ struct hwrm_port_phy_qcfg_output {
*/
uint16_t fec_cfg;
/*
- * When set to 1, then FEC is not supported on this port. If this flag
- * is set to 1, then all other FEC configuration flags shall be ignored.
- * When set to 0, then FEC is supported as indicated by other
- * configuration flags.
+ * When set to 1, then FEC is not supported on this port. If this
+ * flag is set to 1, then all other FEC configuration flags shall be
+ * ignored. When set to 0, then FEC is supported as indicated by
+ * other configuration flags.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
UINT32_C(0x1)
/*
* When set to 1, then FEC autonegotiation is supported on this port.
- * When set to 0, then FEC autonegotiation is not supported on this port.
+ * When set to 0, then FEC autonegotiation is not supported on this
+ * port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \
UINT32_C(0x2)
/*
* When set to 1, then FEC autonegotiation is enabled on this port.
* When set to 0, then FEC autonegotiation is disabled if supported.
- * This flag should be ignored if FEC autonegotiation is not supported on this port.
+ * This flag should be ignored if FEC autonegotiation is not
+ * supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \
UINT32_C(0x4)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this port.
- * When set to 0, then FEC CLAUSE 74 (Fire Code) is not supported on this port.
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this
+ * port. When set to 0, then FEC CLAUSE 74 (Fire Code) is not
+ * supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \
UINT32_C(0x8)
@@ -27342,8 +29354,9 @@ struct hwrm_port_phy_qcfg_output {
* When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on this
* port. This means that FEC CLAUSE 74 is either advertised if
* FEC autonegotiation is enabled or FEC CLAUSE 74 is force enabled.
- * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if supported.
- * This flag should be ignored if FEC CLAUSE 74 is not supported on this port.
+ * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if
+ * supported. This flag should be ignored if FEC CLAUSE 74 is not
+ * supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \
UINT32_C(0x10)
@@ -27358,9 +29371,10 @@ struct hwrm_port_phy_qcfg_output {
* When set to 1, then FEC CLAUSE 91 (Reed Solomon RS(528,514) for
* NRZ) is enabled on this port. This means that FEC RS(528,514) is
* either advertised if FEC autonegotiation is enabled or FEC
- * RS(528,514) is force enabled. When set to 0, then FEC RS(528,514)
+ * RS(528,514) is force enabled. When set to 0, then FEC RS(528,514)
* is disabled if supported.
- * This flag should be ignored if FEC CLAUSE 91 is not supported on this port.
+ * This flag should be ignored if FEC CLAUSE 91 is not supported on
+ * this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \
UINT32_C(0x40)
@@ -27375,7 +29389,8 @@ struct hwrm_port_phy_qcfg_output {
* port. This means that FEC RS544_1XN is either advertised if
* FEC autonegotiation is enabled or FEC RS544_1XN is force enabled.
* When set to 0, then FEC RS544_1XN is disabled if supported.
- * This flag should be ignored if FEC RS544_1XN is not supported on this port.
+ * This flag should be ignored if FEC RS544_1XN is not supported on
+ * this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_RS544_1XN_ENABLED \
UINT32_C(0x100)
@@ -27389,8 +29404,9 @@ struct hwrm_port_phy_qcfg_output {
* When set to 1, then RS(544,514) is enabled on this
* port. This means that FEC RS(544,514) is either advertised if
* FEC autonegotiation is enabled or FEC RS(544,514) is force
- * enabled. When set to 0, then FEC RS(544,514) is disabled if supported.
- * This flag should be ignored if FEC RS(544,514) is not supported on this port.
+ * enabled. When set to 0, then FEC RS(544,514) is disabled if
+ * supported. This flag should be ignored if FEC RS(544,514) is not
+ * supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_RS544_IEEE_ENABLED \
UINT32_C(0x400)
@@ -27404,8 +29420,10 @@ struct hwrm_port_phy_qcfg_output {
* When set to 1, then RS272_1XN is enabled on this
* port. This means that FEC RS272_1XN is either advertised if
* FEC autonegotiation is enabled or FEC RS272_1XN is force
- * enabled. When set to 0, then FEC RS272_1XN is disabled if supported.
- * This flag should be ignored if FEC RS272_1XN is not supported on this port.
+ * enabled. When set to 0, then FEC RS272_1XN is disabled if
+ * supported.
+ * This flag should be ignored if FEC RS272_1XN is not supported on
+ * this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_RS272_1XN_ENABLED \
UINT32_C(0x1000)
@@ -27419,8 +29437,10 @@ struct hwrm_port_phy_qcfg_output {
* When set to 1, then RS(272,257) is enabled on this
* port. This means that FEC RS(272,257) is either advertised if
* FEC autonegotiation is enabled or FEC RS(272,257) is force
- * enabled. When set to 0, then FEC RS(272,257) is disabled if supported.
- * This flag should be ignored if FEC RS(272,257) is not supported on this port.
+ * enabled. When set to 0, then FEC RS(272,257) is disabled if
+ * supported.
+ * This flag should be ignored if FEC RS(272,257) is not supported on
+ * this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_RS272_IEEE_ENABLED \
UINT32_C(0x4000)
@@ -27540,91 +29560,92 @@ struct hwrm_port_phy_qcfg_output {
/* 1Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_1GB \
UINT32_C(0x1)
- /* 10Gb link speed */
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_10GB \
UINT32_C(0x2)
- /* 25Gb link speed */
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_25GB \
UINT32_C(0x4)
- /* 40Gb link speed */
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_40GB \
UINT32_C(0x8)
- /* 50Gb link speed */
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB \
UINT32_C(0x10)
- /* 100Gb link speed */
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB \
UINT32_C(0x20)
- /* 50Gb (PAM4-56: 50G per lane) link speed */
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB_PAM4_56 \
UINT32_C(0x40)
- /* 100Gb (PAM4-56: 50G per lane) link speed */
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_56 \
UINT32_C(0x80)
- /* 200Gb (PAM4-56: 50G per lane) link speed */
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_56 \
UINT32_C(0x100)
- /* 400Gb (PAM4-56: 50G per lane) link speed */
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_56 \
UINT32_C(0x200)
- /* 100Gb (PAM4-112: 100G per lane) link speed */
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_112 \
UINT32_C(0x400)
- /* 200Gb (PAM4-112: 100G per lane) link speed */
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_112 \
UINT32_C(0x800)
- /* 400Gb (PAM4-112: 100G per lane) link speed */
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_112 \
UINT32_C(0x1000)
- /* 800Gb (PAM4-112: 100G per lane) link speed */
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_800GB_PAM4_112 \
UINT32_C(0x2000)
/*
* Current setting of forced link speed. When the link speed is not
* being forced, this value shall be set to 0.
- * This field is valid only if speeds2_supported is set in option_flags.
+ * This field is valid only if speeds2_supported is set in
+ * option_flags.
*/
uint16_t force_link_speeds2;
/* 1Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_1GB \
UINT32_C(0xa)
- /* 10Gb link speed */
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_10GB \
UINT32_C(0x64)
- /* 25Gb link speed */
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_25GB \
UINT32_C(0xfa)
- /* 40Gb link speed */
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_40GB \
UINT32_C(0x190)
- /* 50Gb link speed */
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_50GB \
UINT32_C(0x1f4)
- /* 100Gb link speed */
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_100GB \
UINT32_C(0x3e8)
- /* 50Gb (PAM4-56: 50G per lane) link speed */
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56 \
UINT32_C(0x1f5)
- /* 100Gb (PAM4-56: 50G per lane) link speed */
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56 \
UINT32_C(0x3e9)
- /* 200Gb (PAM4-56: 50G per lane) link speed */
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56 \
UINT32_C(0x7d1)
- /* 400Gb (PAM4-56: 50G per lane) link speed */
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56 \
UINT32_C(0xfa1)
- /* 100Gb (PAM4-112: 100G per lane) link speed */
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112 \
UINT32_C(0x3ea)
- /* 200Gb (PAM4-112: 100G per lane) link speed */
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112 \
UINT32_C(0x7d2)
- /* 400Gb (PAM4-112: 100G per lane) link speed */
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112 \
UINT32_C(0xfa2)
- /* 800Gb (PAM4-112: 100G per lane) link speed */
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_800GB_PAM4_112 \
UINT32_C(0x1f42)
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_LAST \
@@ -27641,43 +29662,43 @@ struct hwrm_port_phy_qcfg_output {
/* 1Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_1GB \
UINT32_C(0x1)
- /* 10Gb link speed */
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_10GB \
UINT32_C(0x2)
- /* 25Gb link speed */
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_25GB \
UINT32_C(0x4)
- /* 40Gb link speed */
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_40GB \
UINT32_C(0x8)
- /* 50Gb link speed */
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_50GB \
UINT32_C(0x10)
- /* 100Gb link speed */
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_100GB \
UINT32_C(0x20)
- /* 50Gb (PAM4-56: 50G per lane) link speed */
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_50GB_PAM4_56 \
UINT32_C(0x40)
- /* 100Gb (PAM4-56: 50G per lane) link speed */
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_100GB_PAM4_56 \
UINT32_C(0x80)
- /* 200Gb (PAM4-56: 50G per lane) link speed */
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_200GB_PAM4_56 \
UINT32_C(0x100)
- /* 400Gb (PAM4-56: 50G per lane) link speed */
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_400GB_PAM4_56 \
UINT32_C(0x200)
- /* 100Gb (PAM4-112: 100G per lane) link speed */
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_100GB_PAM4_112 \
UINT32_C(0x400)
- /* 200Gb (PAM4-112: 100G per lane) link speed */
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_200GB_PAM4_112 \
UINT32_C(0x800)
- /* 400Gb (PAM4-112: 100G per lane) link speed */
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_400GB_PAM4_112 \
UINT32_C(0x1000)
- /* 800Gb (PAM4-112: 100G per lane) link speed */
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_800GB_PAM4_112 \
UINT32_C(0x2000)
/*
@@ -27688,9 +29709,9 @@ struct hwrm_port_phy_qcfg_output {
uint8_t active_lanes;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -27893,14 +29914,14 @@ struct hwrm_port_mac_cfg_input {
#define HWRM_PORT_MAC_CFG_INPUT_ENABLES_DSCP2COS_MAP_PRI \
UINT32_C(0x20)
/*
- * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to be
- * configured.
+ * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to
+ * be configured.
*/
#define HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE \
UINT32_C(0x40)
/*
- * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to be
- * configured.
+ * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to
+ * be configured.
*/
#define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE \
UINT32_C(0x80)
@@ -27922,6 +29943,12 @@ struct hwrm_port_mac_cfg_input {
*/
#define HWRM_PORT_MAC_CFG_INPUT_ENABLES_PTP_ADJ_PHASE \
UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the ptp_load_control field to
+ * be configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_PTP_LOAD_CONTROL \
+ UINT32_C(0x800)
/* Port ID of port that is to be configured. */
uint16_t port_id;
/*
@@ -27931,7 +29958,7 @@ struct hwrm_port_mac_cfg_input {
uint8_t ipg;
/* This value controls the loopback setting for the MAC. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_MAC_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -28114,7 +30141,29 @@ struct hwrm_port_mac_cfg_input {
* of sync timer updates (measured in parts per billion).
*/
int32_t ptp_freq_adj_ppb;
- uint8_t unused_1[4];
+ uint8_t unused_1[3];
+ /*
+ * This value controls how PTP configuration like freq_adj and
+ * phase are loaded in the hardware block.
+ */
+ uint8_t ptp_load_control;
+ /* PTP configuration is not loaded in hardware. */
+ #define HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_NONE \
+ UINT32_C(0x0)
+ /*
+ * PTP configuration will be loaded immediately in the hardware
+ * block. By default, it will always be immediate.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_IMMEDIATE \
+ UINT32_C(0x1)
+ /*
+ * PTP configuration will loaded at the next Pulse per second (PPS)
+ * event in the hardware block.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_PPS_EVENT \
+ UINT32_C(0x2)
+ #define HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_PPS_EVENT
/*
* This unsigned field specifies the phase offset to be applied
* to the PHC (PTP Hardware Clock). This field is specified in
@@ -28151,7 +30200,7 @@ struct hwrm_port_mac_cfg_output {
uint8_t ipg;
/* Current value of the loopback value. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -28169,9 +30218,9 @@ struct hwrm_port_mac_cfg_output {
uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -28248,7 +30297,7 @@ struct hwrm_port_mac_qcfg_output {
uint8_t ipg;
/* The loopback setting for the MAC. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -28460,9 +30509,9 @@ struct hwrm_port_mac_qcfg_output {
uint8_t unused_1;
uint16_t port_svif_info;
/*
- * This field specifies the source virtual interface of the port being
- * queried. Drivers can use this to program port svif field in the
- * L2 context table
+ * This field specifies the source virtual interface of the port
+ * being queried. Drivers can use this to program port svif field in
+ * the L2 context table.
*/
#define HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK \
UINT32_C(0x7fff)
@@ -28470,12 +30519,31 @@ struct hwrm_port_mac_qcfg_output {
/* This field specifies whether port_svif is valid or not */
#define HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID \
UINT32_C(0x8000)
- uint8_t unused_2[5];
+ /*
+ * This field indicates the configured load control for PTP
+ * time of day (TOD) block.
+ */
+ uint8_t ptp_load_control;
+ /* Indicates the current load control is none. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_NONE \
+ UINT32_C(0x0)
+ /* Indicates the current load control is immediate. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_IMMEDIATE \
+ UINT32_C(0x1)
+ /*
+ * Indicates current load control is at next Pulse per Second (PPS)
+ * event.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_PPS_EVENT \
+ UINT32_C(0x2)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_PPS_EVENT
+ uint8_t unused_2[4];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -28567,6 +30635,12 @@ struct hwrm_port_mac_ptp_qcfg_output {
*/
#define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_RTC_CONFIGURED \
UINT32_C(0x20)
+ /*
+ * When this bit is set to '1', it indicates that current time
+ * exposed to driver is 64bit.
+ */
+ #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_64B_PHC_TIME \
+ UINT32_C(0x40)
uint8_t unused_0[3];
/*
* Offset of the PTP register for the lower 32 bits of timestamp
@@ -28617,9 +30691,9 @@ struct hwrm_port_mac_ptp_qcfg_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -29055,9 +31129,9 @@ struct hwrm_port_qstats_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -29098,37 +31172,85 @@ struct tx_port_stats_ext {
uint64_t tx_packets_cos6;
/* Total number of tx packets count on cos queue 7 */
uint64_t tx_packets_cos7;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 0 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 0
+ */
uint64_t pfc_pri0_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 0 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 0
+ */
uint64_t pfc_pri0_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 1 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 1
+ */
uint64_t pfc_pri1_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 1 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 1
+ */
uint64_t pfc_pri1_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 2 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 2
+ */
uint64_t pfc_pri2_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 2 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 2
+ */
uint64_t pfc_pri2_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 3 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 3
+ */
uint64_t pfc_pri3_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 3 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 3
+ */
uint64_t pfc_pri3_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 4 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 4
+ */
uint64_t pfc_pri4_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 4 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 4
+ */
uint64_t pfc_pri4_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 5 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 5
+ */
uint64_t pfc_pri5_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 5 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 5
+ */
uint64_t pfc_pri5_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 6 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 6
+ */
uint64_t pfc_pri6_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 6 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 6
+ */
uint64_t pfc_pri6_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 7 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 7
+ */
uint64_t pfc_pri7_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 7 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 7
+ */
uint64_t pfc_pri7_tx_transitions;
} __rte_packed;
@@ -29141,7 +31263,10 @@ struct rx_port_stats_ext {
uint64_t continuous_pause_events;
/* Number of times the active rings pause bit resumed back */
uint64_t resume_pause_events;
- /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */
+ /*
+ * Number of times, the ROCE cos queue PFC is disabled to avoid pause
+ * flood/burst
+ */
uint64_t continuous_roce_pause_events;
/* Number of times, the ROCE cos queue PFC is enabled back */
uint64_t resume_roce_pause_events;
@@ -29177,37 +31302,85 @@ struct rx_port_stats_ext {
uint64_t rx_packets_cos6;
/* Total number of rx packets count on cos queue 7 */
uint64_t rx_packets_cos7;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 0 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 0
+ */
uint64_t pfc_pri0_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 0 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 0
+ */
uint64_t pfc_pri0_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 1 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 1
+ */
uint64_t pfc_pri1_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 1 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 1
+ */
uint64_t pfc_pri1_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 2 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 2
+ */
uint64_t pfc_pri2_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 2 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 2
+ */
uint64_t pfc_pri2_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 3 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 3
+ */
uint64_t pfc_pri3_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 3 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 3
+ */
uint64_t pfc_pri3_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 4 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 4
+ */
uint64_t pfc_pri4_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 4 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 4
+ */
uint64_t pfc_pri4_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 5 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 5
+ */
uint64_t pfc_pri5_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 5 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 5
+ */
uint64_t pfc_pri5_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 6 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 6
+ */
uint64_t pfc_pri6_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 6 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 6
+ */
uint64_t pfc_pri6_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 7 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 7
+ */
uint64_t pfc_pri7_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 7 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 7
+ */
uint64_t pfc_pri7_rx_transitions;
/* Total number of received bits */
uint64_t rx_bits;
@@ -29787,9 +31960,9 @@ struct hwrm_port_qstats_ext_output {
UINT32_C(0x1)
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -29860,16 +32033,15 @@ struct hwrm_port_qstats_ext_pfc_wd_output {
* statistics block in bytes.
*/
uint16_t pfc_wd_stat_size;
- uint8_t flags;
+ uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
- uint8_t unused_0[4];
} __rte_packed;
/*************************
@@ -29877,7 +32049,7 @@ struct hwrm_port_qstats_ext_pfc_wd_output {
*************************/
-/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
struct hwrm_port_lpbk_qstats_input {
/* The HWRM command request type. */
uint16_t req_type;
@@ -29907,9 +32079,31 @@ struct hwrm_port_lpbk_qstats_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * The size of the loopback statistics buffer passed in the
+ * loopback_stat_host_addr in bytes.
+ * Firmware will not exceed this size when it DMAs the
+ * statistics structure to the host. The actual DMA size
+ * will be returned in the response.
+ */
+ uint16_t lpbk_stat_size;
+ uint8_t flags;
+ /*
+ * This bit is set to 1 when request is for a counter mask,
+ * representing the width of each of the stats counters, rather
+ * than counters themselves.
+ */
+ #define HWRM_PORT_LPBK_QSTATS_INPUT_FLAGS_COUNTER_MASK \
+ UINT32_C(0x1)
+ uint8_t unused_0[5];
+ /*
+ * This is the host address where
+ * loopback statistics will be stored
+ */
+ uint64_t lpbk_stat_host_addr;
} __rte_packed;
-/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
struct hwrm_port_lpbk_qstats_output {
/* The specific error status for the command. */
uint16_t error_code;
@@ -29919,6 +32113,28 @@ struct hwrm_port_lpbk_qstats_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /*
+ * The size of the loopback statistics block in bytes DMA'ed by the
+ * firmware. Note that this size will never exceed the lpbk_stat_size
+ * field passed in by the driver in the hwrm_port_lpbk_qstats_input
+ * structure.
+ */
+ uint16_t lpbk_stat_size;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/* Loopback Port Statistic Format */
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
/* Number of transmitted unicast frames */
uint64_t lpbk_ucast_frames;
/* Number of transmitted multicast frames */
@@ -29931,23 +32147,14 @@ struct hwrm_port_lpbk_qstats_output {
uint64_t lpbk_mcast_bytes;
/* Number of transmitted bytes for broadcast traffic */
uint64_t lpbk_bcast_bytes;
- /* Total Tx Drops for loopback traffic reported by STATS block */
- uint64_t tx_stat_discard;
- /* Total Tx Error Drops for loopback traffic reported by STATS block */
- uint64_t tx_stat_error;
- /* Total Rx Drops for loopback traffic reported by STATS block */
- uint64_t rx_stat_discard;
- /* Total Rx Error Drops for loopback traffic reported by STATS block */
- uint64_t rx_stat_error;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
+ /* Number of dropped tx packets */
+ uint64_t lpbk_tx_discards;
+ /* Number of error dropped tx packets */
+ uint64_t lpbk_tx_errors;
+ /* Number of dropped rx packets */
+ uint64_t lpbk_rx_discards;
+ /* Number of error dropped rx packets */
+ uint64_t lpbk_rx_errors;
} __rte_packed;
/************************
@@ -30030,9 +32237,9 @@ struct hwrm_port_ecn_qstats_output {
uint8_t unused_0[4];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -30135,7 +32342,8 @@ struct hwrm_port_clr_stats_input {
* RoCE associated TX/RX cos counters
* CNP associated TX/RX cos counters
* RoCE/CNP specific TX/RX flow counters
- * Firmware will determine the RoCE/CNP cos queue based on qos profile.
+ * Firmware will determine the RoCE/CNP cos queue based on qos
+ * profile.
* This flag is honored only when RoCE is enabled on that port.
*/
#define HWRM_PORT_CLR_STATS_INPUT_FLAGS_ROCE_COUNTERS UINT32_C(0x1)
@@ -30155,9 +32363,70 @@ struct hwrm_port_clr_stats_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/****************************
+ * hwrm_port_lpbk_clr_stats *
+ ****************************/
+
+
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is to be queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __rte_packed;
+
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -30234,9 +32503,9 @@ struct hwrm_port_phy_qcaps_output {
#define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_AUTONEG_LPBK_SUPPORTED \
UINT32_C(0x4)
/*
- * Indicates if the configuration of shared PHY settings is supported.
- * In cases where a physical port is shared by multiple functions
- * (e.g. NPAR, multihost, etc), the configuration of PHY
+ * Indicates if the configuration of shared PHY settings is
+ * supported. In cases where a physical port is shared by multiple
+ * functions (e.g. NPAR, multihost, etc), the configuration of PHY
* settings may not be allowed. Callers to HWRM_PORT_PHY_CFG will
* get an HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED error in this case.
*/
@@ -30245,7 +32514,7 @@ struct hwrm_port_phy_qcaps_output {
/*
* If set to 1, it indicates that the port counters and extended
* port counters will not reset when the firmware shuts down or
- * resets the PHY. These counters will only be reset during power
+ * resets the PHY. These counters will only be reset during power
* cycle or by calling HWRM_PORT_CLR_STATS.
* If set to 0, the state of the counters is unspecified when
* firmware shuts down or resets the PHY.
@@ -30520,43 +32789,43 @@ struct hwrm_port_phy_qcaps_output {
/* 1Gb link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_1GB \
UINT32_C(0x1)
- /* 10Gb link speed */
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_10GB \
UINT32_C(0x2)
- /* 25Gb link speed */
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_25GB \
UINT32_C(0x4)
- /* 40Gb link speed */
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_40GB \
UINT32_C(0x8)
- /* 50Gb link speed */
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_50GB \
UINT32_C(0x10)
- /* 100Gb link speed */
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_100GB \
UINT32_C(0x20)
- /* 50Gb (PAM4-56: 50G per lane) link speed */
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 \
UINT32_C(0x40)
- /* 100Gb (PAM4-56: 50G per lane) link speed */
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 \
UINT32_C(0x80)
- /* 200Gb (PAM4-56: 50G per lane) link speed */
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 \
UINT32_C(0x100)
- /* 400Gb (PAM4-56: 50G per lane) link speed */
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 \
UINT32_C(0x200)
- /* 100Gb (PAM4-112: 100G per lane) link speed */
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 \
UINT32_C(0x400)
- /* 200Gb (PAM4-112: 100G per lane) link speed */
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 \
UINT32_C(0x800)
- /* 400Gb (PAM4-112: 100G per lane) link speed */
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 \
UINT32_C(0x1000)
- /* 800Gb (PAM4-112: 100G per lane) link speed */
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 \
UINT32_C(0x2000)
/*
@@ -30570,51 +32839,51 @@ struct hwrm_port_phy_qcaps_output {
/* 1Gb link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_1GB \
UINT32_C(0x1)
- /* 10Gb link speed */
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_10GB \
UINT32_C(0x2)
- /* 25Gb link speed */
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_25GB \
UINT32_C(0x4)
- /* 40Gb link speed */
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_40GB \
UINT32_C(0x8)
- /* 50Gb link speed */
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_50GB \
UINT32_C(0x10)
- /* 100Gb link speed */
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_100GB \
UINT32_C(0x20)
- /* 50Gb (PAM4-56: 50G per lane) link speed */
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 \
UINT32_C(0x40)
- /* 100Gb (PAM4-56: 50G per lane) link speed */
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 \
UINT32_C(0x80)
- /* 200Gb (PAM4-56: 50G per lane) link speed */
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 \
UINT32_C(0x100)
- /* 400Gb (PAM4-56: 50G per lane) link speed */
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 \
UINT32_C(0x200)
- /* 100Gb (PAM4-112: 100G per lane) link speed */
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 \
UINT32_C(0x400)
- /* 200Gb (PAM4-112: 100G per lane) link speed */
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 \
UINT32_C(0x800)
- /* 400Gb (PAM4-112: 100G per lane) link speed */
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 \
UINT32_C(0x1000)
- /* 800Gb (PAM4-112: 100G per lane) link speed */
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
#define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 \
UINT32_C(0x2000)
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -30689,9 +32958,9 @@ struct hwrm_port_phy_mdio_write_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -30766,9 +33035,9 @@ struct hwrm_port_phy_mdio_read_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31190,9 +33459,9 @@ struct hwrm_port_led_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31496,9 +33765,9 @@ struct hwrm_port_led_qcfg_output {
uint8_t unused_4[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31848,9 +34117,9 @@ struct hwrm_port_led_qcaps_output {
uint8_t unused_4[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31895,7 +34164,8 @@ struct hwrm_port_prbs_test_input {
uint64_t resp_data_addr;
/*
* Size of the buffer pointed to by resp_data_addr. The firmware may
- * use this entire buffer or less than the entire buffer, but never more.
+ * use this entire buffer or less than the entire buffer, but never
+ * more.
*/
uint16_t data_len;
uint16_t flags;
@@ -32010,9 +34280,9 @@ struct hwrm_port_prbs_test_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -32056,13 +34326,20 @@ struct hwrm_port_dsc_dump_input {
/* Host address where response diagnostic data is returned. */
uint64_t resp_data_addr;
/*
- * Size of the buffer pointed to by resp_data_addr. The firmware
+ * Size of the host buffer pointed to by resp_data_addr. The firmware
* may use this entire buffer or less than the entire buffer, but
* never more.
*/
uint16_t data_len;
uint16_t unused_0;
- uint32_t unused_1;
+ /*
+ * Ignored by the start command.
+ * In legacy buffer mode, this is ignored. The transfer starts
+ * at buffer offset zero and must be transferred in one command.
+ * In big buffer mode, this is the offset into the NIC buffer for
+ * the current retrieve command to start.
+ */
+ uint32_t data_offset;
/* Port ID of port where dsc dump to be collected. */
uint16_t port_id;
/* Diag level specified by the user */
@@ -32109,21 +34386,51 @@ struct hwrm_port_dsc_dump_input {
#define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_LAST \
HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_TIMESTAMP
/*
- * This field is a lane number
- * on which to collect the dsc dump
+ * This field is the lane number on which to collect the dsc dump.
+ * If this is 0xFFFF, the dsc dump will be collected for all lanes,
+ * if the hardware and firmware support this feature.
*/
uint16_t lane_number;
- /*
- * Configuration bits.
- * Use enable bit to start dsc dump or retrieve dump
- */
+ /* Configuration bits. */
uint16_t dsc_dump_config;
/*
* Set 0 to retrieve the dsc dump
* Set 1 to start the dsc dump
+ * Some configuration parameter for the dscdump report are
+ * set by the start request, and can not be modified until the
+ * retrieve operation is complete, on the next start.
*/
#define HWRM_PORT_DSC_DUMP_INPUT_DSC_DUMP_CONFIG_START_RETRIEVE \
UINT32_C(0x1)
+ /*
+ * Set 0 to limit the report size to 65535 bytes.
+ * Set 1 to allow a larger buffer size.
+ * This can only be set 1 in the start operation.
+ * If this is set 0 in the start operation, the firmware will
+ * assume it needs to only expose up to 65535 bytes of the report,
+ * and only allow a single retrieve operation to retrieve the
+ * entire report. This mode will truncate longer reports.
+ * If this is set 1 in the start operation, the firmware will
+ * report the full size of the report (up to the firmware's limit),
+ * permit retrieve operations to hold the buffer using the config
+ * defer_close, and honour the data_offset value so later data
+ * in the report can be retrieved.
+ */
+ #define HWRM_PORT_DSC_DUMP_INPUT_DSC_DUMP_CONFIG_BIG_BUFFER \
+ UINT32_C(0x2)
+ /*
+ * Set 0 on the last 'retrieve' to release the firmware buffer
+ * Set 1 on the other 'retrieve' to hold the firmware buffer
+ * This only affects retrieve operations.
+ * In big_buffer mode, this allows the driver or tool to tell
+ * the firmware to keep the report around, as it intends to read
+ * more of it in. The final read must set this to zero, to tell
+ * the firmware the report buffer can be released.
+ * This only works if the start request specified big_buffer as
+ * one; it is ignored otherwise.
+ */
+ #define HWRM_PORT_DSC_DUMP_INPUT_DSC_DUMP_CONFIG_DEFER_CLOSE \
+ UINT32_C(0x4)
} __rte_packed;
/* hwrm_port_dsc_dump_output (size:128b/16B) */
@@ -32136,15 +34443,49 @@ struct hwrm_port_dsc_dump_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Total length of stored data. */
+ /*
+ * Total length of stored data; if big_buffer is one, this
+ * only contains the lower 16 bits of the total length.
+ * In legacy buffer mode, this is zero in the 'start' response.
+ * In big buffer mode, this has the size of the report even
+ * in the 'start' response.
+ * In both modes, this contains the number of bytes written
+ * to the host in 'retrieve' responses.
+ */
uint16_t total_data_len;
- uint16_t unused_0;
- uint8_t unused_1[3];
+ /*
+ * The upper 16 bits of the total length of stored data.
+ * In legacy buffer mode, this will always be zero.
+ * In big buffer mode, this will be populated even in the
+ * 'start' response.
+ * This is always zero for 'retrieve' responses.
+ */
+ uint16_t total_data_len_high;
+ uint8_t unused_1[2];
+ /* Result information bits. */
+ uint8_t flags;
+ /*
+ * Set according to the start request's input big_buffer.
+ * If this is zero, it indicates the function is acting per
+ * legacy behaviour -- it will report a buffer size up to almost
+ * 64KiB, and allow only one retrieval request before releasing
+ * the firmware buffer containing the report (total_data_len_high
+ * will be zero). The request's data_offset field and defer_close
+ * and use_offset config flags are ignored.
+ * If this is one, it indicates support for (and request of)
+ * support for larger reports. The full 32b report size (up to the
+ * firmware buffer limit) is provided by the start response in
+ * total_data_len (low 16b) and total_data_len_high (high 16b),
+ * and retrieve requests may keep the buffer using the defer_close
+ * flag, and retrieve the later parts of the report using the
+ * data_offset field.
+ */
+ #define HWRM_PORT_DSC_DUMP_OUTPUT_FLAGS_BIG_BUFFER UINT32_C(0x1)
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -32283,7 +34624,7 @@ struct hwrm_port_sfp_sideband_cfg_output {
uint8_t unused[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written. When
* writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
@@ -32406,7 +34747,7 @@ struct hwrm_port_sfp_sideband_qcfg_output {
uint8_t unused[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written. When
* writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
@@ -32460,7 +34801,7 @@ struct hwrm_port_phy_mdio_bus_acquire_input {
*/
uint16_t client_id;
/*
- * Timeout in milli seconds, MDIO BUS will be released automatically
+ * Timeout in milliseconds, MDIO BUS will be released automatically
* after this time, if another mdio acquire command is not received
* within the timeout window from the same client.
* A 0xFFFF will hold the bus until this bus is released.
@@ -32488,9 +34829,9 @@ struct hwrm_port_phy_mdio_bus_acquire_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -32557,9 +34898,9 @@ struct hwrm_port_phy_mdio_bus_release_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -32603,11 +34944,21 @@ struct hwrm_port_tx_fir_cfg_input {
/* Modulation types of TX FIR: NRZ, PAM4. */
uint8_t mod_type;
/* For NRZ */
- #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_NRZ UINT32_C(0x0)
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_NRZ UINT32_C(0x0)
/* For PAM4 */
- #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_PAM4 UINT32_C(0x1)
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_PAM4 UINT32_C(0x1)
+ /* For Optical NRZ */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_C2M_NRZ UINT32_C(0x2)
+ /* For Optical PAM4 */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_C2M_PAM4 UINT32_C(0x3)
+ /* For DAC PAM4 112G */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_PAM4_112 UINT32_C(0x4)
+ /* For Optical PAM4 112G */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_C2M_PAM4_112G UINT32_C(0x5)
+ /* For LPO PAM4 112G */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_LPO_PAM4_112G UINT32_C(0x6)
#define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_LAST \
- HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_PAM4
+ HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_LPO_PAM4_112G
/* The lane mask of the lane TX FIR will be configured. */
uint8_t lane_mask;
uint8_t unused_0[2];
@@ -32635,9 +34986,9 @@ struct hwrm_port_tx_fir_cfg_output {
uint8_t unused[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -32681,11 +35032,21 @@ struct hwrm_port_tx_fir_qcfg_input {
/* Modulation types of TX FIR: NRZ, PAM4. */
uint8_t mod_type;
/* For NRZ */
- #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_NRZ UINT32_C(0x0)
- /* For PAM4 */
- #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_PAM4 UINT32_C(0x1)
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_NRZ UINT32_C(0x0)
+ /* For PAM4 56G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_PAM4 UINT32_C(0x1)
+ /* For Optical NRZ */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_C2M_NRZ UINT32_C(0x2)
+ /* For Optical PAM4 56G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_C2M_PAM4 UINT32_C(0x3)
+ /* For DAC PAM4 112G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_PAM4_112 UINT32_C(0x4)
+ /* For Optical PAM4 112G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_C2M_PAM4_112 UINT32_C(0x5)
+ /* For LPO PAM4 112G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_LPO_PAM4_112 UINT32_C(0x6)
#define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_LAST \
- HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_PAM4
+ HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_LPO_PAM4_112
/* The ID of the lane TX FIR will be queried. */
uint8_t lane_id;
uint8_t unused[6];
@@ -32712,9 +35073,9 @@ struct hwrm_port_tx_fir_qcfg_output {
uint8_t unused[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -32817,9 +35178,9 @@ struct hwrm_port_ep_tx_cfg_input {
*/
uint8_t ep2_min_bw;
/*
- * Specifies the maximum portion of the port's bandwidth that the set of
- * PFs and VFs on PCIe endpoint 2 may use. The value is a percentage of
- * the link bandwidth, from 0 to 100. A value of 0 indicates no
+ * Specifies the maximum portion of the port's bandwidth that the set
+ * of PFs and VFs on PCIe endpoint 2 may use. The value is a percentage
+ * of the link bandwidth, from 0 to 100. A value of 0 indicates no
* maximum rate.
*/
uint8_t ep2_max_bw;
@@ -33096,9 +35457,9 @@ struct hwrm_port_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -33175,9 +35536,84 @@ struct hwrm_port_qcfg_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/***********************
+ * hwrm_port_mac_qcaps *
+ ***********************/
+
+
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __rte_packed;
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* MAC capability flags */
+ uint8_t flags;
+ /*
+ * If set to 1, then this field indicates that the
+ * MAC does not support local loopback.
+ */
+ #define HWRM_PORT_MAC_QCAPS_OUTPUT_FLAGS_LOCAL_LPBK_NOT_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then this field indicates that the
+ * MAC is capable of supporting remote loopback.
+ */
+ #define HWRM_PORT_MAC_QCAPS_OUTPUT_FLAGS_REMOTE_LPBK_SUPPORTED \
+ UINT32_C(0x2)
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -33800,8 +36236,8 @@ struct hwrm_queue_qportcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -33898,8 +36334,8 @@ struct hwrm_queue_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -33942,9 +36378,9 @@ struct hwrm_queue_cfg_input {
uint64_t resp_addr;
uint32_t flags;
/*
- * Enumeration denoting the RX, TX, or both directions applicable to the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
+ * Enumeration denoting the RX, TX, or both directions applicable to
+ * the resource. This enumeration is used for resources that are
+ * similar for both TX and RX paths of the chip.
*/
#define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
#define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_SFT 0
@@ -34002,8 +36438,8 @@ struct hwrm_queue_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34116,8 +36552,8 @@ struct hwrm_queue_pfcenable_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34230,8 +36666,8 @@ struct hwrm_queue_pfcenable_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34381,8 +36817,8 @@ struct hwrm_queue_pri2cos_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34425,9 +36861,9 @@ struct hwrm_queue_pri2cos_cfg_input {
uint64_t resp_addr;
uint32_t flags;
/*
- * Enumeration denoting the RX, TX, or both directions applicable to the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
+ * Enumeration denoting the RX, TX, or both directions applicable to
+ * the resource. This enumeration is used for resources that are
+ * similar for both TX and RX paths of the chip.
*/
#define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
#define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_SFT 0
@@ -34512,7 +36948,7 @@ struct hwrm_queue_pri2cos_cfg_input {
*/
uint8_t pri1_cos_queue_id;
/*
- * CoS Queue assigned to priority 2 This value can only
+ * CoS Queue assigned to priority 2. This value can only
* be changed before traffic has started.
*/
uint8_t pri2_cos_queue_id;
@@ -34558,8 +36994,8 @@ struct hwrm_queue_pri2cos_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -35601,8 +38037,8 @@ struct hwrm_queue_cos2bw_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -36693,8 +39129,8 @@ struct hwrm_queue_cos2bw_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -36763,8 +39199,8 @@ struct hwrm_queue_dscp_qcaps_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -36846,8 +39282,8 @@ struct hwrm_queue_dscp2pri_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -36941,8 +39377,8 @@ struct hwrm_queue_dscp2pri_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37024,8 +39460,8 @@ struct hwrm_queue_mpls_qcaps_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37145,8 +39581,8 @@ struct hwrm_queue_mplstc2pri_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37254,7 +39690,7 @@ struct hwrm_queue_mplstc2pri_cfg_input {
*/
uint8_t tc1_pri_queue_id;
/*
- * pri assigned to MPLS TC(EXP) 2 This value can only
+ * pri assigned to MPLS TC(EXP) 2. This value can only
* be changed before traffic has started.
*/
uint8_t tc2_pri_queue_id;
@@ -37299,8 +39735,8 @@ struct hwrm_queue_mplstc2pri_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37369,8 +39805,8 @@ struct hwrm_queue_vlanpri_qcaps_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37482,8 +39918,8 @@ struct hwrm_queue_vlanpri2pri_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37636,8 +40072,8 @@ struct hwrm_queue_vlanpri2pri_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37768,8 +40204,8 @@ struct hwrm_queue_global_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37914,20 +40350,20 @@ struct hwrm_queue_global_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*******************
- * hwrm_vnic_alloc *
- *******************/
+/****************************************
+ * hwrm_queue_adptv_qos_rx_feature_qcfg *
+ ****************************************/
-/* hwrm_vnic_alloc_input (size:192b/24B) */
-struct hwrm_vnic_alloc_input {
+/* hwrm_queue_adptv_qos_rx_feature_qcfg_input (size:128b/16B) */
+struct hwrm_queue_adptv_qos_rx_feature_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -37956,31 +40392,464 @@ struct hwrm_vnic_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t flags;
+} __rte_packed;
+
+/* hwrm_queue_adptv_qos_rx_feature_qcfg_output (size:128b/16B) */
+struct hwrm_queue_adptv_qos_rx_feature_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * When this bit is '1', this VNIC is requested to
- * be the default VNIC for this function.
+ * Bitmask indicating which RX CoS queues are enabled or disabled.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is not enabled.
+ * A value of 1 indicates that the queue is enabled.
*/
- #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT \
+ uint8_t queue_enable;
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE \
+ UINT32_C(0x1)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_DISABLED \
+ UINT32_C(0x0)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED \
UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE \
+ UINT32_C(0x2)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 1)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE \
+ UINT32_C(0x4)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 2)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE \
+ UINT32_C(0x8)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 3)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE \
+ UINT32_C(0x10)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 4)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE \
+ UINT32_C(0x20)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 5)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE \
+ UINT32_C(0x40)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 6)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE \
+ UINT32_C(0x80)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 7)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED
+ /*
+ * Bitmask indicating which CoS queues are lossy or lossless.
+ * This setting is kept same across Rx and Tx directions, despite
+ * the name mentioning only Rx. Each bit represents a specific queue
+ * where bit 0 represents queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is lossy.
+ * A value of 1 indicates that the queue is lossless.
+ */
+ uint8_t queue_mode;
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE \
+ UINT32_C(0x1)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE_LOSSLESS \
+ UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE \
+ UINT32_C(0x2)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE_LOSSY \
+ (UINT32_C(0x0) << 1)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE \
+ UINT32_C(0x4)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE_LOSSY \
+ (UINT32_C(0x0) << 2)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE \
+ UINT32_C(0x8)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE_LOSSY \
+ (UINT32_C(0x0) << 3)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE \
+ UINT32_C(0x10)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE_LOSSY \
+ (UINT32_C(0x0) << 4)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE \
+ UINT32_C(0x20)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE_LOSSY \
+ (UINT32_C(0x0) << 5)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE \
+ UINT32_C(0x40)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE_LOSSY \
+ (UINT32_C(0x0) << 6)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE \
+ UINT32_C(0x80)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE_LOSSY \
+ (UINT32_C(0x0) << 7)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE_LOSSLESS
+ uint8_t unused_0[5];
/*
- * When this bit is '1', proxy VEE PF is requesting
- * allocation of a default VNIC on behalf of virtio-net
- * function given in virtio_net_fid field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- #define HWRM_VNIC_ALLOC_INPUT_FLAGS_VIRTIO_NET_FID_VALID \
+ uint8_t valid;
+} __rte_packed;
+
+/***************************************
+ * hwrm_queue_adptv_qos_rx_feature_cfg *
+ ***************************************/
+
+
+/* hwrm_queue_adptv_qos_rx_feature_cfg_input (size:192b/24B) */
+struct hwrm_queue_adptv_qos_rx_feature_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /* This bit must be '1' for the queue_enable field to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_ENABLES_QUEUE_ENABLE \
+ UINT32_C(0x1)
+ /* This bit must be '1' for the queue_mode field to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_ENABLES_QUEUE_MODE \
UINT32_C(0x2)
/*
- * Virtio-net function's FID.
- * This virtio-net function is requesting allocation of default
- * VNIC through proxy VEE PF.
+ * Bitmask indicating which RX CoS queues are enabled or disabled.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is not enabled.
+ * A value of 1 indicates that the queue is enabled.
*/
- uint16_t virtio_net_fid;
+ uint8_t queue_enable;
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE \
+ UINT32_C(0x1)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_DISABLED \
+ UINT32_C(0x0)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED \
+ UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE \
+ UINT32_C(0x2)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 1)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE \
+ UINT32_C(0x4)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 2)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE \
+ UINT32_C(0x8)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 3)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE \
+ UINT32_C(0x10)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 4)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE \
+ UINT32_C(0x20)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 5)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE \
+ UINT32_C(0x40)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 6)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE \
+ UINT32_C(0x80)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 7)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED
+ /*
+ * Bitmask indicating which CoS queues are lossy or lossless.
+ * This setting is kept symmetric (or same) across Tx and Rx.
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is lossy.
+ * A value of 1 indicates that the queue is lossless.
+ */
+ uint8_t queue_mode;
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE \
+ UINT32_C(0x1)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE_LOSSLESS \
+ UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE \
+ UINT32_C(0x2)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE_LOSSY \
+ (UINT32_C(0x0) << 1)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE \
+ UINT32_C(0x4)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE_LOSSY \
+ (UINT32_C(0x0) << 2)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE \
+ UINT32_C(0x8)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE_LOSSY \
+ (UINT32_C(0x0) << 3)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE \
+ UINT32_C(0x10)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE_LOSSY \
+ (UINT32_C(0x0) << 4)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE \
+ UINT32_C(0x20)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE_LOSSY \
+ (UINT32_C(0x0) << 5)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE \
+ UINT32_C(0x40)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE_LOSSY \
+ (UINT32_C(0x0) << 6)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE \
+ UINT32_C(0x80)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE_LOSSY \
+ (UINT32_C(0x0) << 7)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE_LOSSLESS \
+ (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE_LOSSLESS
uint8_t unused_0[2];
} __rte_packed;
-/* hwrm_vnic_alloc_output (size:128b/16B) */
-struct hwrm_vnic_alloc_output {
+/* hwrm_queue_adptv_qos_rx_feature_cfg_output (size:128b/16B) */
+struct hwrm_queue_adptv_qos_rx_feature_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -37989,26 +40858,24 @@ struct hwrm_vnic_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Logical vnic ID */
- uint32_t vnic_id;
- uint8_t unused_0[3];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/********************
- * hwrm_vnic_update *
- ********************/
+/****************************************
+ * hwrm_queue_adptv_qos_tx_feature_qcfg *
+ ****************************************/
-/* hwrm_vnic_update_input (size:256b/32B) */
-struct hwrm_vnic_update_input {
+/* hwrm_queue_adptv_qos_tx_feature_qcfg_input (size:128b/16B) */
+struct hwrm_queue_adptv_qos_tx_feature_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -38037,67 +40904,10 @@ struct hwrm_vnic_update_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Logical vnic ID */
- uint32_t vnic_id;
- uint32_t enables;
- /*
- * This bit must be '1' for the vnic_state field to be
- * configured.
- */
- #define HWRM_VNIC_UPDATE_INPUT_ENABLES_VNIC_STATE_VALID \
- UINT32_C(0x1)
- /*
- * This bit must be '1' for the mru field to be
- * configured.
- */
- #define HWRM_VNIC_UPDATE_INPUT_ENABLES_MRU_VALID \
- UINT32_C(0x2)
- /*
- * This bit must be '1' for the metadata_format_type field to be
- * configured.
- */
- #define HWRM_VNIC_UPDATE_INPUT_ENABLES_METADATA_FORMAT_TYPE_VALID \
- UINT32_C(0x4)
- /*
- * This will update the context variable with the same name if
- * the corresponding enable is set.
- */
- uint8_t vnic_state;
- /* Normal operation state for the VNIC. */
- #define HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_NORMAL UINT32_C(0x0)
- /* All packets are dropped in this state. */
- #define HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_DROP UINT32_C(0x1)
- #define HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_LAST \
- HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_DROP
- /*
- * The metadata format type used in all the RX packet completions
- * going through this VNIC. This value is product specific. Refer to
- * the L2 HSI completion ring structures for the detailed
- * descriptions. For Thor and Thor2, it corresponds to “meta_format”
- * in “rx_pkt_cmpl_hi” and “rx_pkt_v3_cmpl_hi”, respectively.
- */
- uint8_t metadata_format_type;
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_0 UINT32_C(0x0)
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_1 UINT32_C(0x1)
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_2 UINT32_C(0x2)
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_3 UINT32_C(0x3)
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_4 UINT32_C(0x4)
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_LAST \
- HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_4
- /*
- * The maximum receive unit of the vnic.
- * Each vnic is associated with a function.
- * The vnic mru value overwrites the mru setting of the
- * associated function.
- * The HWRM shall make sure that vnic mru does not exceed
- * the mru of the port the function is associated with.
- */
- uint16_t mru;
- uint8_t unused_1[4];
} __rte_packed;
-/* hwrm_vnic_update_output (size:128b/16B) */
-struct hwrm_vnic_update_output {
+/* hwrm_queue_adptv_qos_tx_feature_qcfg_output (size:128b/16B) */
+struct hwrm_queue_adptv_qos_tx_feature_qcfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -38106,25 +40916,121 @@ struct hwrm_vnic_update_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /*
+ * Bitmask indicating which TX CoS queues are enabled or disabled.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is not enabled.
+ * A value of 1 indicates that the queue is enabled.
+ */
+ uint8_t queue_enable;
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE \
+ UINT32_C(0x1)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_DISABLED \
+ UINT32_C(0x0)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED \
+ UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE \
+ UINT32_C(0x2)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 1)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE \
+ UINT32_C(0x4)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 2)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE \
+ UINT32_C(0x8)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 3)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE \
+ UINT32_C(0x10)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 4)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE \
+ UINT32_C(0x20)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 5)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE \
+ UINT32_C(0x40)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 6)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE \
+ UINT32_C(0x80)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 7)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED
+ uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/******************
- * hwrm_vnic_free *
- ******************/
+/***************************************
+ * hwrm_queue_adptv_qos_tx_feature_cfg *
+ ***************************************/
-/* hwrm_vnic_free_input (size:192b/24B) */
-struct hwrm_vnic_free_input {
+/* hwrm_queue_adptv_qos_tx_feature_cfg_input (size:192b/24B) */
+struct hwrm_queue_adptv_qos_tx_feature_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -38153,13 +41059,112 @@ struct hwrm_vnic_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Logical vnic ID */
- uint32_t vnic_id;
- uint8_t unused_0[4];
+ uint32_t enables;
+ /* This bit must be '1' for the queue_enable field to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_ENABLES_QUEUE_ENABLE \
+ UINT32_C(0x1)
+ /*
+ * Bitmask indicating which TX CoS queues are enabled or disabled.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is not enabled.
+ * A value of 1 indicates that the queue is enabled.
+ */
+ uint8_t queue_enable;
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE \
+ UINT32_C(0x1)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_DISABLED \
+ UINT32_C(0x0)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED \
+ UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE \
+ UINT32_C(0x2)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 1)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE \
+ UINT32_C(0x4)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 2)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE \
+ UINT32_C(0x8)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 3)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE \
+ UINT32_C(0x10)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 4)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE \
+ UINT32_C(0x20)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 5)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE \
+ UINT32_C(0x40)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 6)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE \
+ UINT32_C(0x80)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_DISABLED \
+ (UINT32_C(0x0) << 7)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED \
+ (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_LAST \
+ HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED
+ uint8_t unused_0[3];
} __rte_packed;
-/* hwrm_vnic_free_output (size:128b/16B) */
-struct hwrm_vnic_free_output {
+/* hwrm_queue_adptv_qos_tx_feature_cfg_output (size:128b/16B) */
+struct hwrm_queue_adptv_qos_tx_feature_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -38171,21 +41176,21 @@ struct hwrm_vnic_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*****************
- * hwrm_vnic_cfg *
- *****************/
+/********************
+ * hwrm_queue_qcaps *
+ ********************/
-/* hwrm_vnic_cfg_input (size:384b/48B) */
-struct hwrm_vnic_cfg_input {
+/* hwrm_queue_qcaps_input (size:128b/16B) */
+struct hwrm_queue_qcaps_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -38214,286 +41219,266 @@ struct hwrm_vnic_cfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t flags;
+} __rte_packed;
+
+/* hwrm_queue_qcaps_output (size:256b/32B) */
+struct hwrm_queue_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Adaptive QoS RX feature parameter capability flags. */
+ uint32_t rx_feature_params;
/*
- * When this bit is '1', the VNIC is requested to
- * be the default VNIC for the function.
+ * When this bit is '1' the capability to configure queue_enable
+ * is supported.
+ * If set to '0', then the capability to configure queue_enable
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT \
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_FEATURE_PARAMS_QUEUE_ENABLE_CAP \
UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC is being configured to
- * strip VLAN in the RX path.
- * If set to '0', then VLAN stripping is disabled on
- * this VNIC.
+ * When this bit is '1' the capability to configure queue_mode
+ * is supported.
+ * If set to '0', then the capability to configure queue_mode
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE \
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_FEATURE_PARAMS_QUEUE_MODE_CAP \
UINT32_C(0x2)
+ /* Adaptive QoS TX feature parameter capability flags. */
+ uint32_t tx_feature_params;
/*
- * When this bit is '1', the VNIC is being configured to
- * buffer receive packets in the hardware until the host
- * posts new receive buffers.
- * If set to '0', then bd_stall is being configured to be
- * disabled on this VNIC.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE \
- UINT32_C(0x4)
- /*
- * When this bit is '1', the VNIC is being configured to
- * receive both RoCE and non-RoCE traffic.
- * If set to '0', then this VNIC is not configured to be
- * operating in dual VNIC mode.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
- UINT32_C(0x8)
- /*
- * When this flag is set to '1', the VNIC is requested to
- * be configured to receive only RoCE traffic.
- * If this flag is set to '0', then this flag shall be
- * ignored by the HWRM.
- * If roce_dual_vnic_mode flag is set to '1'
- * or roce_mirroring_capable_vnic_mode flag to 1,
- * then the HWRM client shall not set this flag to '1'.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
- UINT32_C(0x10)
- /*
- * When a VNIC uses one destination ring group for certain
- * application (e.g. Receive Flow Steering) where
- * exact match is used to direct packets to a VNIC with one
- * destination ring group only, there is no need to configure
- * RSS indirection table for that VNIC as only one destination
- * ring group is used.
- *
- * This flag is used to enable a mode where
- * RSS is enabled in the VNIC using a RSS context
- * for computing RSS hash but the RSS indirection table is
- * not configured using hwrm_vnic_rss_cfg.
- *
- * If this mode is enabled, then the driver should not program
- * RSS indirection table for the RSS context that is used for
- * computing RSS hash only.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE \
- UINT32_C(0x20)
- /*
- * When this bit is '1', the VNIC is being configured to
- * receive both RoCE and non-RoCE traffic, but forward only the
- * RoCE traffic further. Also, RoCE traffic can be mirrored to
- * L2 driver.
+ * When this bit is '1' the capability to configure queue_enable
+ * is supported.
+ * If set to '0', then the capability to configure queue_enable
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
- UINT32_C(0x40)
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_FEATURE_PARAMS_QUEUE_ENABLE_CAP \
+ UINT32_C(0x1)
/*
- * When this bit is '1' it enables ring selection using the incoming
- * spif and lcos for the packet.
+ * The maximum number of queues that can be configured on this device.
+ * Valid values range from 1 through 8.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_PORTCOS_MAPPING_MODE \
- UINT32_C(0x80)
- uint32_t enables;
+ uint8_t max_configurable_queues;
+ uint8_t unused_0[3];
+ /* Adaptive QoS RX tuning parameter capability flags. */
+ uint32_t rx_tuning_params;
/*
- * This bit must be '1' for the dflt_ring_grp field to be
- * configured.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP \
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_WFQ_COST_CAP \
UINT32_C(0x1)
/*
- * This bit must be '1' for the rss_rule field to be
- * configured.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE \
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_WFQ_UPPER_FACTOR_CAP \
UINT32_C(0x2)
/*
- * This bit must be '1' for the cos_rule field to be
- * configured.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE \
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_HYST_WINDOW_SIZE_FACTOR_CAP \
UINT32_C(0x4)
/*
- * This bit must be '1' for the lb_rule field to be
- * configured.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE \
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_PCIE_BW_EFF_CAP \
UINT32_C(0x8)
/*
- * This bit must be '1' for the mru field to be
- * configured.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU \
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_XOFF_HEADROOM_FACTOR_CAP \
UINT32_C(0x10)
/*
- * This bit must be '1' for the default_rx_ring_id field to be
- * configured.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID \
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_L2_MIN_LATENCY_CAP \
UINT32_C(0x20)
/*
- * This bit must be '1' for the default_cmpl_ring_id field to be
- * configured.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_L2_MAX_LATENCY_CAP \
UINT32_C(0x40)
- /* This bit must be '1' for the queue_id field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID \
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_ROCE_MIN_LATENCY_CAP \
UINT32_C(0x80)
- /* This bit must be '1' for the rx_csum_v2_mode field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE \
- UINT32_C(0x100)
- /* This bit must be '1' for the l2_cqe_mode field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_L2_CQE_MODE \
- UINT32_C(0x200)
- /* Logical vnic ID */
- uint16_t vnic_id;
/*
- * Default Completion ring for the VNIC. This ring will
- * be chosen if packet does not match any RSS rules and if
- * there is no COS rule.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint16_t dflt_ring_grp;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_ROCE_MAX_LATENCY_CAP \
+ UINT32_C(0x100)
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
- * there is no RSS rule.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint16_t rss_rule;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_L2_PIPE_COS_LATENCY_CAP \
+ UINT32_C(0x200)
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
- * there is no COS rule.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint16_t cos_rule;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_ROCE_PIPE_COS_LATENCY_CAP \
+ UINT32_C(0x400)
/*
- * RSS ID for load balancing rule/table structure.
- * 0xFF... (All Fs) if there is no LB rule.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint16_t lb_rule;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_COS_SHARED_MIN_RATIO_CAP \
+ UINT32_C(0x800)
/*
- * The maximum receive unit of the vnic.
- * Each vnic is associated with a function.
- * The vnic mru value overwrites the mru setting of the
- * associated function.
- * The HWRM shall make sure that vnic mru does not exceed
- * the mru of the port the function is associated with.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint16_t mru;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_RSVD_CELLS_LIMIT_RATIO_CAP \
+ UINT32_C(0x1000)
/*
- * Default Rx ring for the VNIC. This ring will
- * be chosen if packet does not match any RSS rules.
- * The aggregation ring associated with the Rx ring is
- * implied based on the Rx ring specified when the
- * aggregation ring was allocated.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint16_t default_rx_ring_id;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_SHAPER_REFILL_TIMER_CAP \
+ UINT32_C(0x2000)
+ /* Adaptive QoS TX tuning parameter capability flags. */
+ uint32_t tx_tuning_params;
/*
- * Default completion ring for the VNIC. This ring will
- * be chosen if packet does not match any RSS rules.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint16_t default_cmpl_ring_id;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_WFQ_COST_CAP \
+ UINT32_C(0x1)
/*
- * When specified, only incoming packets classified to the specified CoS
- * queue ID will be arriving on this VNIC. Packet priority to CoS mapping
- * rules can be specified using HWRM_QUEUE_PRI2COS_CFG. In this mode,
- * ntuple filters with VNIC destination specified are invalid since they
- * conflict with the CoS to VNIC steering rules in this mode.
- *
- * If this field is not specified, packet to VNIC steering will be
- * subject to the standard L2 filter rules and any additional ntuple
- * filter rules with destination VNIC specified.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint16_t queue_id;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_WFQ_UPPER_FACTOR_CAP \
+ UINT32_C(0x2)
/*
- * If the device supports the RX V2 and RX TPA start V2 completion
- * records as indicated by the HWRM_VNIC_QCAPS command, this field is
- * used to specify the two RX checksum modes supported by these
- * completion records.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint8_t rx_csum_v2_mode;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_HYST_WINDOW_SIZE_FACTOR_CAP \
+ UINT32_C(0x4)
/*
- * When configured with this checksum mode, the number of header
- * groups in the delivered packet with a valid IP checksum and
- * the number of header groups in the delivered packet with a valid
- * L4 checksum are reported. Valid checksums are counted from the
- * outermost header group to the innermost header group, stopping at
- * the first error. This is the default checksum mode supported if
- * the driver doesn't explicitly configure the RX checksum mode.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_DEFAULT UINT32_C(0x0)
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_RSVD_CELLS_LIMIT_RATIO_CAP \
+ UINT32_C(0x8)
/*
- * When configured with this checksum mode, the checksum status is
- * reported using 'all ok' mode. In the RX completion record, one
- * bit indicates if the IP checksum is valid for all the parsed
- * header groups with an IP checksum. Another bit indicates if the
- * L4 checksum is valid for all the parsed header groups with an L4
- * checksum. The number of header groups that were parsed by the
- * chip and passed in the delivered packet is also reported.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK UINT32_C(0x1)
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_L2_MIN_LATENCY_CAP \
+ UINT32_C(0x10)
/*
- * Any rx_csum_v2_mode value larger than or equal to this is not
- * valid
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_MAX UINT32_C(0x2)
- #define HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_LAST \
- HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_MAX
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_L2_MAX_LATENCY_CAP \
+ UINT32_C(0x20)
/*
- * If the device supports different L2 RX CQE modes, as indicated by
- * the HWRM_VNIC_QCAPS command, this field is used to configure the
- * CQE mode.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- uint8_t l2_cqe_mode;
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_ROCE_MIN_LATENCY_CAP \
+ UINT32_C(0x40)
/*
- * When configured with this cqe mode, A normal (32B) CQE
- * will be generated. This is the default mode.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_DEFAULT UINT32_C(0x0)
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_ROCE_MAX_LATENCY_CAP \
+ UINT32_C(0x80)
/*
- * When configured with this cqe mode, A compressed (16B) CQE
- * will be generated. In this mode TPA and HDS are not supported.
- * Host drivers should not configure the TPA and HDS along with
- * compressed mode, per VNIC. FW returns error, if host drivers
- * try to configure the VNIC with compressed mode and (TPA or HDS).
- * The compressed completion does not include PTP data. Host
- * drivers should not use this mode to receive the PTP data.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_COMPRESSED UINT32_C(0x1)
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_MAX_TBM_CELLS_PRERESERVED_CAP \
+ UINT32_C(0x100)
/*
- * When configured with this cqe mode, HW generates either a 32B
- * completion or a 16B completion depending on use case within a
- * VNIC. For ex. a simple L2 packet could use the compressed form
- * while a PTP packet on the same VNIC would use the 32B form.
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
*/
- #define HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_MIXED UINT32_C(0x2)
- #define HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_LAST \
- HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_MIXED
- uint8_t unused0[4];
-} __rte_packed;
-
-/* hwrm_vnic_cfg_output (size:128b/16B) */
-struct hwrm_vnic_cfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_SHAPER_REFILL_TIMER_CAP \
+ UINT32_C(0x200)
+ uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/******************
- * hwrm_vnic_qcfg *
- ******************/
+/***************************************
+ * hwrm_queue_adptv_qos_rx_tuning_qcfg *
+ ***************************************/
-/* hwrm_vnic_qcfg_input (size:256b/32B) */
-struct hwrm_vnic_qcfg_input {
+/* hwrm_queue_adptv_qos_rx_tuning_qcfg_input (size:128b/16B) */
+struct hwrm_queue_adptv_qos_rx_tuning_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -38522,21 +41507,10 @@ struct hwrm_vnic_qcfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t enables;
- /*
- * This bit must be '1' for the vf_id_valid field to be
- * configured.
- */
- #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
- /* Logical vnic ID */
- uint32_t vnic_id;
- /* ID of Virtual Function whose VNIC resource is being queried. */
- uint16_t vf_id;
- uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_vnic_qcfg_output (size:256b/32B) */
-struct hwrm_vnic_qcfg_output {
+/* hwrm_queue_adptv_qos_rx_tuning_qcfg_output (size:576b/72B) */
+struct hwrm_queue_adptv_qos_rx_tuning_qcfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -38545,199 +41519,276 @@ struct hwrm_vnic_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Default Completion ring for the VNIC. */
- uint16_t dflt_ring_grp;
+ /* Indicates max credit as required by hardware. */
+ uint32_t wfq_cost;
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
- * there is no RSS rule.
+ * Specifies a factor that determines the upper bound for each
+ * cos_wfq_credit_weight.
*/
- uint16_t rss_rule;
+ uint32_t wfq_upper_factor;
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
- * there is no COS rule.
+ * The algorithm multiplies this factor by the MRU size to compute the
+ * hysteresis window size which in turn is used in deassert
+ * threshold calculations.
*/
- uint16_t cos_rule;
+ uint32_t hyst_window_size_factor;
/*
- * RSS ID for load balancing rule/table structure.
- * 0xFF... (All Fs) if there is no LB rule.
+ * Specifies PCIe BW efficiency in the range of 0-100%. System
+ * characterization determines the value of this parameter. A value of
+ * less than 100% accounts for internal PCIe over-subscription. The
+ * algorithm uses this parameter to determine the PCIe BW available
+ * for transferring received packets to the host.
*/
- uint16_t lb_rule;
- /* The maximum receive unit of the vnic. */
- uint16_t mru;
- uint8_t unused_0[2];
- uint32_t flags;
+ uint32_t pcie_bw_eff;
+ /* Scales the number of cells for xoff. */
+ uint32_t xoff_headroom_factor;
/*
- * When this bit is '1', the VNIC is the default VNIC for
- * the function.
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT \
- UINT32_C(0x1)
+ uint32_t l2_min_latency;
/*
- * When this bit is '1', the VNIC is configured to
- * strip VLAN in the RX path.
- * If set to '0', then VLAN stripping is disabled on
- * this VNIC.
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE \
- UINT32_C(0x2)
+ uint32_t l2_max_latency;
/*
- * When this bit is '1', the VNIC is configured to
- * buffer receive packets in the hardware until the host
- * posts new receive buffers.
- * If set to '0', then bd_stall is disabled on
- * this VNIC.
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE \
- UINT32_C(0x4)
+ uint32_t roce_min_latency;
/*
- * When this bit is '1', the VNIC is configured to
- * receive both RoCE and non-RoCE traffic.
- * If set to '0', then this VNIC is not configured to
- * operate in dual VNIC mode.
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
- UINT32_C(0x8)
+ uint32_t roce_max_latency;
/*
- * When this flag is set to '1', the VNIC is configured to
- * receive only RoCE traffic.
- * When this flag is set to '0', the VNIC is not configured
- * to receive only RoCE traffic.
- * If roce_dual_vnic_mode flag and this flag both are set
- * to '1', then it is an invalid configuration of the
- * VNIC. The HWRM should not allow that type of
- * mis-configuration by HWRM clients.
+ * The algorithm uses this parameter to calculate the number of cells
+ * to be excluded from the total buffer pool to account for the
+ * latency of pipeline post RE_DEC to PCIe block. Its value is derived
+ * from system characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
- UINT32_C(0x10)
+ uint32_t l2_pipe_cos_latency;
/*
- * When a VNIC uses one destination ring group for certain
- * application (e.g. Receive Flow Steering) where
- * exact match is used to direct packets to a VNIC with one
- * destination ring group only, there is no need to configure
- * RSS indirection table for that VNIC as only one destination
- * ring group is used.
- *
- * When this bit is set to '1', then the VNIC is enabled in a
- * mode where RSS is enabled in the VNIC using a RSS context
- * for computing RSS hash but the RSS indirection table is
- * not configured.
+ * The algorithm uses this parameter to calculate the number of cells
+ * to be excluded from the total buffer pool to account for the
+ * latency of pipeline post RE_DEC to PCIe block. Its value is derived
+ * from system characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE \
- UINT32_C(0x20)
+ uint32_t roce_pipe_cos_latency;
+ /* Sets the minimum number of shared cells each cos queue can have. */
+ uint32_t cos_shared_min_ratio;
/*
- * When this bit is '1', the VNIC is configured to
- * receive both RoCE and non-RoCE traffic, but forward only
- * RoCE traffic further. Also RoCE traffic can be mirrored to
- * L2 driver.
+ * The parameter limits the total reserved cells. If the computed
+ * total reserved cells becomes larger than rsvd_cells_limit_ratio x
+ * port_cells_avail, then the reserved cells are set to the limit
+ * value. Its range of values is 0-50%.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
- UINT32_C(0x40)
+ uint32_t rsvd_cells_limit_ratio;
/*
- * When this bit is '0', VNIC is in normal operation state.
- * When this bit is '1', VNIC drops all the received packets.
+ * This parameter is used to compute the time interval for
+ * replenishing the shaper credit buckets for all RX cos queues.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_OPERATION_STATE \
+ uint32_t shaper_refill_timer;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**************************************
+ * hwrm_queue_adptv_qos_rx_tuning_cfg *
+ **************************************/
+
+
+/* hwrm_queue_adptv_qos_rx_tuning_cfg_input (size:640b/80B) */
+struct hwrm_queue_adptv_qos_rx_tuning_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_WFQ_COST \
+ UINT32_C(0x1)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_WFQ_UPPER_FACTOR \
+ UINT32_C(0x2)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_HYST_WINDOW_SIZE_FACTOR \
+ UINT32_C(0x4)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_PCIE_BW_EFF \
+ UINT32_C(0x8)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_XOFF_HEADROOM_FACTOR \
+ UINT32_C(0x10)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_L2_MIN_LATENCY \
+ UINT32_C(0x20)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_L2_MAX_LATENCY \
+ UINT32_C(0x40)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_ROCE_MIN_LATENCY \
UINT32_C(0x80)
- /* When this bit is '1' it indicates port cos_mapping_mode enabled. */
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_PORTCOS_MAPPING_MODE \
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_ROCE_MAX_LATENCY \
UINT32_C(0x100)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_L2_PIPE_COS_LATENCY \
+ UINT32_C(0x200)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_ROCE_PIPE_COS_LATENCY \
+ UINT32_C(0x400)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_COS_SHARED_MIN_RATIO \
+ UINT32_C(0x800)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_RSVD_CELLS_LIMIT_RATIO \
+ UINT32_C(0x1000)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_SHAPER_REFILL_TIMER \
+ UINT32_C(0x2000)
+ /* Indicates max credit as required by hardware. */
+ uint32_t wfq_cost;
/*
- * When returned with a valid CoS Queue id, the CoS Queue/VNIC association
- * is valid. Otherwise it will return 0xFFFF to indicate no VNIC/CoS
- * queue association.
+ * Specifies a factor that determines the upper bound for each
+ * cos_wfq_credit_weight.
*/
- uint16_t queue_id;
+ uint32_t wfq_upper_factor;
/*
- * If the device supports the RX V2 and RX TPA start V2 completion
- * records as indicated by the HWRM_VNIC_QCAPS command, this field is
- * used to specify the current RX checksum mode configured for all the
- * RX rings of a VNIC.
+ * The algorithm multiplies this factor by the MRU size to compute the
+ * hysteresis window size which in turn is used in deassert
+ * threshold calculations.
*/
- uint8_t rx_csum_v2_mode;
+ uint32_t hyst_window_size_factor;
/*
- * This value indicates that the VNIC is configured to use the
- * default RX checksum mode for all the rings associated with this
- * VNIC.
+ * Specifies PCIe BW efficiency in the range of 0-100%. System
+ * characterization determines the value of this parameter. A value of
+ * less than 100% accounts for internal PCIe over-subscription. The
+ * algorithm uses this parameter to determine the PCIe BW available
+ * for transferring received packets to the host.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_DEFAULT UINT32_C(0x0)
+ uint32_t pcie_bw_eff;
+ /* Scales the number of cells for xoff. */
+ uint32_t xoff_headroom_factor;
/*
- * This value indicates that the VNIC is configured to use the RX
- * checksum ‘all_ok’ mode for all the rings associated with this
- * VNIC.
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_ALL_OK UINT32_C(0x1)
+ uint32_t l2_min_latency;
/*
- * Any rx_csum_v2_mode value larger than or equal to this is not
- * valid
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_MAX UINT32_C(0x2)
- #define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_LAST \
- HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_MAX
+ uint32_t l2_max_latency;
/*
- * If the device supports different L2 RX CQE modes, as indicated by
- * the HWRM_VNIC_QCAPS command, this field is used to convey the
- * configured CQE mode.
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
*/
- uint8_t l2_cqe_mode;
+ uint32_t roce_min_latency;
/*
- * This value indicates that the VNIC is configured with normal
- * (32B) CQE mode.
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_DEFAULT UINT32_C(0x0)
+ uint32_t roce_max_latency;
/*
- * This value indicates that the VNIC is configured with compressed
- * (16B) CQE mode.
+ * The algorithm uses this parameter to calculate the number of cells
+ * to be excluded from the total buffer pool to account for the
+ * latency of pipeline post RE_DEC to PCIe block. Its value is derived
+ * from system characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_COMPRESSED UINT32_C(0x1)
+ uint32_t l2_pipe_cos_latency;
/*
- * This value indicates that the VNIC is configured with mixed
- * CQE mode. HW generates either a 32B completion or a 16B
- * completion depending on use case within a VNIC.
+ * The algorithm uses this parameter to calculate the number of cells
+ * to be excluded from the total buffer pool to account for the
+ * latency of pipeline post RE_DEC to PCIe block. Its value is derived
+ * from system characterization.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_MIXED UINT32_C(0x2)
- #define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_LAST \
- HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_MIXED
+ uint32_t roce_pipe_cos_latency;
+ /* Sets the minimum number of shared cells each cos queue can have. */
+ uint32_t cos_shared_min_ratio;
/*
- * This field conveys the metadata format type that has been
- * configured. This value is product specific. Refer to the L2 HSI
- * completion ring structures for the detailed descriptions. For Thor
- * and Thor2, it corresponds to “meta_format” in “rx_pkt_cmpl_hi” and
- * “rx_pkt_v3_cmpl_hi”, respectively.
+ * The parameter limits the total reserved cells. If the computed
+ * total reserved cells becomes larger than rsvd_cells_limit_ratio x
+ * port_cells_avail, then the reserved cells are set to the limit
+ * value. Its range of values is 0-50%.
*/
- uint8_t metadata_format_type;
- #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_0 UINT32_C(0x0)
- #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_1 UINT32_C(0x1)
- #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_2 UINT32_C(0x2)
- #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_3 UINT32_C(0x3)
- #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_4 UINT32_C(0x4)
- #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_LAST \
- HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_4
- /* This field conveys the VNIC operation state. */
- uint8_t vnic_state;
- /* Normal operation state. */
- #define HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_NORMAL UINT32_C(0x0)
- /* Drop all packets. */
- #define HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_DROP UINT32_C(0x1)
- #define HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_LAST \
- HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_DROP
- uint8_t unused_1;
+ uint32_t rsvd_cells_limit_ratio;
+ /*
+ * This parameter is used to compute the time interval for
+ * replenishing the shaper credit buckets for all RX cos queues.
+ */
+ uint32_t shaper_refill_timer;
+ uint8_t unused_0[4];
+} __rte_packed;
+
+/* hwrm_queue_adptv_qos_rx_tuning_cfg_output (size:128b/16B) */
+struct hwrm_queue_adptv_qos_rx_tuning_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*******************
- * hwrm_vnic_qcaps *
- *******************/
+/***************************************
+ * hwrm_queue_adptv_qos_tx_tuning_qcfg *
+ ***************************************/
-/* hwrm_vnic_qcaps_input (size:192b/24B) */
-struct hwrm_vnic_qcaps_input {
+/* hwrm_queue_adptv_qos_tx_tuning_qcfg_input (size:128b/16B) */
+struct hwrm_queue_adptv_qos_tx_tuning_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -38766,12 +41817,10 @@ struct hwrm_vnic_qcaps_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t enables;
- uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_vnic_qcaps_output (size:192b/24B) */
-struct hwrm_vnic_qcaps_output {
+/* hwrm_queue_adptv_qos_tx_tuning_qcfg_output (size:448b/56B) */
+struct hwrm_queue_adptv_qos_tx_tuning_qcfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -38780,262 +41829,276 @@ struct hwrm_vnic_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* The maximum receive unit that is settable on a vnic. */
- uint16_t mru;
- uint8_t unused_0[2];
- uint32_t flags;
- /* Unused. */
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_UNUSED \
- UINT32_C(0x1)
- /*
- * When this bit is '1', the capability of stripping VLAN in
- * the RX path is supported on VNIC(s).
- * If set to '0', then VLAN stripping capability is
- * not supported on VNIC(s).
- */
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP \
- UINT32_C(0x2)
- /*
- * When this bit is '1', the capability to buffer receive
- * packets in the hardware until the host posts new receive buffers
- * is supported on VNIC(s).
- * If set to '0', then bd_stall capability is not supported
- * on VNIC(s).
- */
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_BD_STALL_CAP \
- UINT32_C(0x4)
+ /* Indicates max credit as required by hardware. */
+ uint32_t wfq_cost;
/*
- * When this bit is '1', the capability to
- * receive both RoCE and non-RoCE traffic on VNIC(s) is
- * supported.
- * If set to '0', then the capability to receive
- * both RoCE and non-RoCE traffic on VNIC(s) is
- * not supported.
+ * Specifies a factor that determines the upper bound for each
+ * cos_wfq_credit_weight.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_DUAL_VNIC_CAP \
- UINT32_C(0x8)
+ uint32_t wfq_upper_factor;
/*
- * When this bit is set to '1', the capability to configure
- * a VNIC to receive only RoCE traffic is supported.
- * When this flag is set to '0', the VNIC capability to
- * configure to receive only RoCE traffic is not supported.
+ * The algorithm multiplies this factor by the MRU size to compute the
+ * hysteresis window size which in turn is used in deassert
+ * threshold calculations.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_ONLY_VNIC_CAP \
- UINT32_C(0x10)
+ uint32_t hyst_window_size_factor;
/*
- * When this bit is set to '1', then the capability to enable
- * a VNIC in a mode where RSS context without configuring
- * RSS indirection table is supported (for RSS hash computation).
- * When this bit is set to '0', then a VNIC can not be configured
- * with a mode to enable RSS context without configuring RSS
- * indirection table.
+ * The parameter limits the total reserved cells. If the computed
+ * total reserved cells becomes larger than rsvd_cells_limit_ratio x
+ * port_cells_avail, then the reserved cells are set to the limit
+ * value. Its range of values is 0-50%.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_DFLT_CR_CAP \
- UINT32_C(0x20)
+ uint32_t rsvd_cells_limit_ratio;
/*
- * When this bit is '1', the capability to
- * mirror the RoCE traffic is supported.
- * If set to '0', then the capability to mirror the
- * RoCE traffic is not supported.
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP \
- UINT32_C(0x40)
+ uint32_t l2_min_latency;
/*
- * When this bit is '1', the outermost RSS hashing capability
- * is supported. If set to '0', then the outermost RSS hashing
- * capability is not supported.
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \
- UINT32_C(0x80)
+ uint32_t l2_max_latency;
/*
- * When this bit is '1', it indicates that firmware supports the
- * ability to steer incoming packets from one CoS queue to one
- * VNIC. This optional feature can then be enabled
- * using HWRM_VNIC_CFG on any VNIC. This feature is only
- * available when NVM option “enable_cos_classification” is set
- * to 1. If set to '0', firmware does not support this feature.
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP \
- UINT32_C(0x100)
+ uint32_t roce_min_latency;
/*
- * When this bit is '1', it indicates that HW and firmware supports
- * the use of RX V2 and RX TPA start V2 completion records for all
- * the RX rings of a VNIC. Once set, this feature is mandatory to
- * be used for the RX rings of the VNIC. Additionally, two new RX
- * checksum features supported by these completion records can be
- * configured using the HWRM_VNIC_CFG on a VNIC. If set to '0', the
- * HW and the firmware does not support this feature.
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP \
- UINT32_C(0x200)
+ uint32_t roce_max_latency;
+ /* Specifies the number of reserved cells TRP requires per cos queue. */
+ uint32_t max_tbm_cells_prereserved;
/*
- * When this bit is '1', it indicates that HW and firmware support
- * vnic state change. Host drivers can change the vnic state using
- * HWRM_VNIC_UPDATE. If set to '0', the HW and firmware do not
- * support this feature.
+ * This parameter is used to compute the time interval for
+ * replenishing the shaper credit buckets for all TX cos queues.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VNIC_STATE_CAP \
- UINT32_C(0x400)
+ uint32_t shaper_refill_timer;
+ uint8_t unused_0[7];
/*
- * When this bit is '1', it indicates that firmware supports
- * virtio-net functions default VNIC allocation using
- * HWRM_VNIC_ALLOC.
- * This capability is available only on Proxy VEE PF. If set to '0',
- * firmware does not support this feature.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP \
- UINT32_C(0x800)
+ uint8_t valid;
+} __rte_packed;
+
+/**************************************
+ * hwrm_queue_adptv_qos_tx_tuning_cfg *
+ **************************************/
+
+
+/* hwrm_queue_adptv_qos_tx_tuning_cfg_input (size:512b/64B) */
+struct hwrm_queue_adptv_qos_tx_tuning_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * When this bit is set '1', then the capability to configure the
- * metadata format in the RX completion is supported for the VNIC.
- * When this bit is set to '0', then the capability to configure
- * the metadata format in the RX completion is not supported for
- * the VNIC.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_METADATA_FORMAT_CAP \
- UINT32_C(0x1000)
+ uint16_t cmpl_ring;
/*
- * When this bit is set '1', it indicates that firmware returns
- * INVALID_PARAM error, if host drivers choose invalid hash type
- * bit combinations in vnic_rss_cfg.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_STRICT_HASH_TYPE_CAP \
- UINT32_C(0x2000)
+ uint16_t seq_id;
/*
- * When this bit is set '1', it indicates that firmware supports
- * the hash_type include and exclude flags in hwrm_vnic_rss_cfg.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_HASH_TYPE_DELTA_CAP \
- UINT32_C(0x4000)
+ uint16_t target_id;
/*
- * When this bit is '1', it indicates that HW is capable of using
- * Toeplitz algorithm. This mode uses Toeplitz algorithm and
- * provided Toeplitz hash key to hash the packets according to the
- * configured hash type and hash mode. The Toeplitz hash results and
- * the provided Toeplitz RSS indirection table are used to determine
- * the RSS rings.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP \
- UINT32_C(0x8000)
+ uint64_t resp_addr;
+ uint32_t enables;
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_WFQ_COST \
+ UINT32_C(0x1)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_WFQ_UPPER_FACTOR \
+ UINT32_C(0x2)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_HYST_WINDOW_SIZE_FACTOR \
+ UINT32_C(0x4)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_RSVD_CELLS_LIMIT_RATIO \
+ UINT32_C(0x8)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_L2_MIN_LATENCY \
+ UINT32_C(0x10)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_L2_MAX_LATENCY \
+ UINT32_C(0x20)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_ROCE_MIN_LATENCY \
+ UINT32_C(0x40)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_ROCE_MAX_LATENCY \
+ UINT32_C(0x80)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_MAX_TBM_CELLS_PRERESERVED \
+ UINT32_C(0x100)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_SHAPER_REFILL_TIMER \
+ UINT32_C(0x200)
+ /* Indicates max credit as required by hardware. */
+ uint32_t wfq_cost;
/*
- * When this bit is '1', it indicates that HW is capable of using
- * XOR algorithm. This mode uses XOR algorithm to hash the packets
- * according to the configured hash type and hash mode. The XOR
- * hash results and the provided XOR RSS indirection table are
- * used to determine the RSS rings. Host drivers provided hash key
- * is not honored in this mode.
+ * Specifies a factor that determines the upper bound for each
+ * cos_wfq_credit_weight.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_XOR_CAP \
- UINT32_C(0x10000)
+ uint32_t wfq_upper_factor;
/*
- * When this bit is '1', it indicates that HW is capable of using
- * checksum algorithm. In this mode, HW uses inner packets checksum
- * algorithm to distribute the packets across the rings and Toeplitz
- * algorithm to calculate the hash to convey it in the RX
- * completions. Host drivers should provide Toeplitz hash key.
- * As HW uses innermost packets checksum to distribute the packets
- * across the rings, host drivers can't convey hash mode to choose
- * outer headers to calculate Toeplitz hash. FW will fail such
- * configuration.
+ * The algorithm multiplies this factor by the MRU size to compute the
+ * hysteresis window size which in turn is used in deassert
+ * threshold calculations.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP \
- UINT32_C(0x20000)
+ uint32_t hyst_window_size_factor;
/*
- * When this bit is '1' HW supports hash calculation
- * based on IPV6 flow labels.
+ * The parameter limits the total reserved cells. If the computed
+ * total reserved cells becomes larger than rsvd_cells_limit_ratio x
+ * port_cells_avail, then the reserved cells are set to the limit
+ * value. Its range of values is 0-50%.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPV6_FLOW_LABEL_CAP \
- UINT32_C(0x40000)
+ uint32_t rsvd_cells_limit_ratio;
/*
- * When this bit is '1', it indicates that HW and firmware supports
- * the use of RX V3 and RX TPA start V3 completion records for all
- * the RX rings of a VNIC. Once set, this feature is mandatory to
- * be used for the RX rings of the VNIC. If set to '0', the
- * HW and the firmware does not support this feature.
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V3_CAP \
- UINT32_C(0x80000)
+ uint32_t l2_min_latency;
/*
- * When this bit is '1' HW supports different RX CQE record types.
- * Host drivers can choose the mode based on their application
- * requirements like performance, TPA, HDS and PTP.
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_L2_CQE_MODE_CAP \
- UINT32_C(0x100000)
+ uint32_t l2_max_latency;
/*
- * When this bit is '1' HW supports hash calculation
- * based on IPv4 IPSEC AH SPI field.
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP \
- UINT32_C(0x200000)
+ uint32_t roce_min_latency;
/*
- * When this bit is '1' HW supports hash calculation
- * based on IPv4 IPSEC ESP SPI field.
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP \
- UINT32_C(0x400000)
+ uint32_t roce_max_latency;
+ /* Specifies the number of reserved cells TRP requires per cos queue. */
+ uint32_t max_tbm_cells_prereserved;
/*
- * When this bit is '1' HW supports hash calculation
- * based on IPv6 IPSEC AH SPI field.
+ * This parameter is used to compute the time interval for
+ * replenishing the shaper credit buckets for all TX cos queues.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP \
- UINT32_C(0x800000)
+ uint32_t shaper_refill_timer;
+ uint8_t unused_0[4];
+} __rte_packed;
+
+/* hwrm_queue_adptv_qos_tx_tuning_cfg_output (size:128b/16B) */
+struct hwrm_queue_adptv_qos_tx_tuning_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * When this bit is '1' HW supports hash calculation
- * based on IPv6 IPSEC ESP SPI field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP \
- UINT32_C(0x1000000)
+ uint8_t valid;
+} __rte_packed;
+
+/**********************************
+ * hwrm_queue_pfcwd_timeout_qcaps *
+ **********************************/
+
+
+/* hwrm_queue_pfcwd_timeout_qcaps_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * When outermost_rss_cap is '1' and this bit is '1', the outermost
- * RSS hash mode may be set on a PF or trusted VF.
- * When outermost_rss_cap is '1' and this bit is '0', the outermost
- * RSS hash mode may be set on a PF.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP \
- UINT32_C(0x2000000)
+ uint16_t cmpl_ring;
/*
- * When this bit is '1' it indicates HW is capable of enabling ring
- * selection using the incoming spif and lcos for the packet.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_PORTCOS_MAPPING_MODE \
- UINT32_C(0x4000000)
+ uint16_t seq_id;
/*
- * When this bit is '1', it indicates controller enabled
- * RSS profile TCAM mode.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_PROF_TCAM_MODE_ENABLED \
- UINT32_C(0x8000000)
- /* When this bit is '1' FW supports VNIC hash mode. */
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VNIC_RSS_HASH_MODE_CAP \
- UINT32_C(0x10000000)
- /* When this bit is set to '1', hardware supports tunnel TPA. */
- #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_HW_TUNNEL_TPA_CAP \
- UINT32_C(0x20000000)
+ uint16_t target_id;
/*
- * This field advertises the maximum concurrent TPA aggregations
- * supported by the VNIC on new devices that support TPA v2 or v3.
- * '0' means that both the TPA v2 and v3 are not supported.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t max_aggs_supported;
- uint8_t unused_1[5];
+ uint64_t resp_addr;
+} __rte_packed;
+
+/* hwrm_queue_pfcwd_timeout_qcaps_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Max configurable pfc watchdog timeout value in msec. */
+ uint32_t max_pfcwd_timeout;
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_vnic_tpa_cfg *
- *********************/
+/********************************
+ * hwrm_queue_pfcwd_timeout_cfg *
+ ********************************/
-/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
-struct hwrm_vnic_tpa_cfg_input {
+/* hwrm_queue_pfcwd_timeout_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcwd_timeout_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -39064,275 +42127,269 @@ struct hwrm_vnic_tpa_cfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t flags;
+ /* pfc watchdog timeout value in msec. */
+ uint32_t pfcwd_timeout_value;
+ uint8_t unused_0[4];
+} __rte_packed;
+
+/* hwrm_queue_pfcwd_timeout_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * When this bit is '1', the VNIC shall be configured to
- * perform transparent packet aggregation (TPA) of
- * non-tunneled TCP packets.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA \
- UINT32_C(0x1)
+ uint8_t valid;
+} __rte_packed;
+
+/*********************************
+ * hwrm_queue_pfcwd_timeout_qcfg *
+ *********************************/
+
+
+/* hwrm_queue_pfcwd_timeout_qcfg_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * When this bit is '1', the VNIC shall be configured to
- * perform transparent packet aggregation (TPA) of
- * tunneled TCP packets.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA \
- UINT32_C(0x2)
+ uint16_t cmpl_ring;
/*
- * When this bit is '1', the VNIC shall be configured to
- * perform transparent packet aggregation (TPA) according
- * to Windows Receive Segment Coalescing (RSC) rules.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE \
- UINT32_C(0x4)
+ uint16_t seq_id;
/*
- * When this bit is '1', the VNIC shall be configured to
- * perform transparent packet aggregation (TPA) according
- * to Linux Generic Receive Offload (GRO) rules.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO \
- UINT32_C(0x8)
+ uint16_t target_id;
/*
- * When this bit is '1', the VNIC shall be configured to
- * perform transparent packet aggregation (TPA) for TCP
- * packets with IP ECN set to non-zero.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN \
- UINT32_C(0x10)
- /*
- * When this bit is '1', the VNIC shall be configured to
- * perform transparent packet aggregation (TPA) for
- * GRE tunneled TCP packets only if all packets have the
- * same GRE sequence.
+ uint64_t resp_addr;
+} __rte_packed;
+
+/* hwrm_queue_pfcwd_timeout_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Current configured pfc watchdog timeout value in msec. */
+ uint32_t pfcwd_timeout_value;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
- UINT32_C(0x20)
+ uint8_t valid;
+} __rte_packed;
+
+/*******************
+ * hwrm_vnic_alloc *
+ *******************/
+
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * When this bit is '1' and the GRO mode is enabled,
- * the VNIC shall be configured to
- * perform transparent packet aggregation (TPA) for
- * TCP/IPv4 packets with consecutively increasing IPIDs.
- * In other words, the last packet that is being
- * aggregated to an already existing aggregation context
- * shall have IPID 1 more than the IPID of the last packet
- * that was aggregated in that aggregation context.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK \
- UINT32_C(0x40)
+ uint16_t cmpl_ring;
/*
- * When this bit is '1' and the GRO mode is enabled,
- * the VNIC shall be configured to
- * perform transparent packet aggregation (TPA) for
- * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
- * value.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK \
- UINT32_C(0x80)
+ uint16_t seq_id;
/*
- * When this bit is '1' and the GRO mode is enabled,
- * the VNIC shall DMA payload data using GRO rules.
- * When this bit is '0', the VNIC shall DMA payload data
- * using the more efficient LRO rules of filling all
- * aggregation buffers.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_PACK_AS_GRO \
- UINT32_C(0x100)
- uint32_t enables;
+ uint16_t target_id;
/*
- * This bit must be '1' for the max_agg_segs field to be
- * configured.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * This bit must be '1' for the max_aggs field to be
- * configured.
+ * When this bit is '1', this VNIC is requested to
+ * be the default VNIC for this function.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
+ #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT \
+ UINT32_C(0x1)
/*
- * This bit must be '1' for the max_agg_timer field to be
- * configured.
+ * When this bit is '1', proxy VEE PF is requesting
+ * allocation of a default VNIC on behalf of virtio-net
+ * function given in virtio_net_fid field.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
- /* deprecated bit. Do not use!!! */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
+ #define HWRM_VNIC_ALLOC_INPUT_FLAGS_VIRTIO_NET_FID_VALID \
+ UINT32_C(0x2)
/*
- * This bit must be '1' for the tnl_tpa_en_bitmap field to be
- * configured.
+ * Virtio-net function's FID.
+ * This virtio-net function is requesting allocation of default
+ * VNIC through proxy VEE PF.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_TNL_TPA_EN \
- UINT32_C(0x10)
+ uint16_t virtio_net_fid;
+ uint8_t unused_0[2];
+} __rte_packed;
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* Logical vnic ID */
- uint16_t vnic_id;
+ uint32_t vnic_id;
+ uint8_t unused_0[3];
/*
- * This is the maximum number of TCP segments that can
- * be aggregated (unit is Log2). Max value is 31. On new
- * devices supporting TPA v2, the unit is multiples of 4 and
- * valid values are > 0 and <= 63.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t max_agg_segs;
- /* 1 segment */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
- /* 2 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
- /* 4 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
- /* 8 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
- /* Any segment size larger than this is not valid */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_LAST \
- HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX
+ uint8_t valid;
+} __rte_packed;
+
+/********************
+ * hwrm_vnic_update *
+ ********************/
+
+
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This is the maximum number of aggregations this VNIC is
- * allowed (unit is Log2). Max value is 7. On new devices
- * supporting TPA v2, this is in unit of 1 and must be > 0
- * and <= max_aggs_supported in the hwrm_vnic_qcaps response
- * to enable TPA v2.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t max_aggs;
- /* 1 aggregation */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
- /* 2 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
- /* 4 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
- /* 8 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
- /* 16 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
- /* Any aggregation size larger than this is not valid */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_LAST \
- HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX
- uint8_t unused_0[2];
+ uint16_t cmpl_ring;
/*
- * This is the maximum amount of time allowed for
- * an aggregation context to complete after it was initiated.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t max_agg_timer;
+ uint16_t seq_id;
/*
- * This is the minimum amount of payload length required to
- * start an aggregation context. This field is deprecated and
- * should be set to 0. The minimum length is set by firmware
- * and can be queried using hwrm_vnic_tpa_qcfg.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- uint32_t min_agg_len;
+ uint16_t target_id;
/*
- * If the device supports hardware tunnel TPA feature, as indicated by
- * the HWRM_VNIC_QCAPS command, this field is used to configure the
- * tunnel types to be enabled. Each bit corresponds to a specific
- * tunnel type. If a bit is set to '1', then the associated tunnel
- * type is enabled; otherwise, it is disabled.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t tnl_tpa_en_bitmap;
+ uint64_t resp_addr;
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ uint32_t enables;
/*
- * When this bit is '1', enable VXLAN encapsulated packets for
- * aggregation.
+ * This bit must be '1' for the vnic_state field to be
+ * configured.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN \
+ #define HWRM_VNIC_UPDATE_INPUT_ENABLES_VNIC_STATE_VALID \
UINT32_C(0x1)
/*
- * When this bit is set to ‘1’, enable GENEVE encapsulated packets
- * for aggregation.
+ * This bit must be '1' for the mru field to be
+ * configured.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GENEVE \
+ #define HWRM_VNIC_UPDATE_INPUT_ENABLES_MRU_VALID \
UINT32_C(0x2)
/*
- * When this bit is set to ‘1’, enable NVGRE encapsulated packets
- * for aggregation..
+ * This bit must be '1' for the metadata_format_type field to be
+ * configured.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_NVGRE \
+ #define HWRM_VNIC_UPDATE_INPUT_ENABLES_METADATA_FORMAT_TYPE_VALID \
UINT32_C(0x4)
/*
- * When this bit is set to ‘1’, enable GRE encapsulated packets
- * for aggregation..
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GRE \
- UINT32_C(0x8)
- /*
- * When this bit is set to ‘1’, enable IPV4 encapsulated packets
- * for aggregation..
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV4 \
- UINT32_C(0x10)
- /*
- * When this bit is set to ‘1’, enable IPV6 encapsulated packets
- * for aggregation..
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV6 \
- UINT32_C(0x20)
- /*
- * When this bit is '1', enable VXLAN_GPE encapsulated packets for
- * aggregation.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN_GPE \
- UINT32_C(0x40)
- /*
- * When this bit is '1', enable VXLAN_CUSTOMER1 encapsulated packets
- * for aggregation.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN_CUST1 \
- UINT32_C(0x80)
- /*
- * When this bit is '1', enable GRE_CUSTOMER1 encapsulated packets
- * for aggregation.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GRE_CUST1 \
- UINT32_C(0x100)
- /*
- * When this bit is '1', enable UPAR1 encapsulated packets for
- * aggregation.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR1 \
- UINT32_C(0x200)
- /*
- * When this bit is '1', enable UPAR2 encapsulated packets for
- * aggregation.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR2 \
- UINT32_C(0x400)
- /*
- * When this bit is '1', enable UPAR3 encapsulated packets for
- * aggregation.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR3 \
- UINT32_C(0x800)
- /*
- * When this bit is '1', enable UPAR4 encapsulated packets for
- * aggregation.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR4 \
- UINT32_C(0x1000)
- /*
- * When this bit is '1', enable UPAR5 encapsulated packets for
- * aggregation.
- */
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR5 \
- UINT32_C(0x2000)
- /*
- * When this bit is '1', enable UPAR6 encapsulated packets for
- * aggregation.
+ * This will update the context variable with the same name if
+ * the corresponding enable is set.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR6 \
- UINT32_C(0x4000)
+ uint8_t vnic_state;
+ /* Normal operation state for the VNIC. */
+ #define HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_NORMAL UINT32_C(0x0)
+ /* All packets are dropped in this state. */
+ #define HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_DROP UINT32_C(0x1)
+ #define HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_LAST \
+ HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_DROP
/*
- * When this bit is '1', enable UPAR7 encapsulated packets for
- * aggregation.
+ * The metadata format type used in all the RX packet completions
+ * going through this VNIC. This value is product specific. Refer to
+ * the L2 HSI completion ring structures for the detailed
+ * descriptions. For Thor and Thor2, it corresponds to 'meta_format'
+ * in 'rx_pkt_cmpl_hi' and 'rx_pkt_v3_cmpl_hi', respectively.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR7 \
- UINT32_C(0x8000)
+ uint8_t metadata_format_type;
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_0 UINT32_C(0x0)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_1 UINT32_C(0x1)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_2 UINT32_C(0x2)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_3 UINT32_C(0x3)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_4 UINT32_C(0x4)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_LAST \
+ HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_4
/*
- * When this bit is '1', enable UPAR8 encapsulated packets for
- * aggregation.
+ * The maximum receive unit of the vnic.
+ * Each vnic is associated with a function.
+ * The vnic mru value overwrites the mru setting of the
+ * associated function.
+ * The HWRM shall make sure that vnic mru does not exceed
+ * the mru of the port the function is associated with.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR8 \
- UINT32_C(0x10000)
+ uint16_t mru;
uint8_t unused_1[4];
} __rte_packed;
-/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
-struct hwrm_vnic_tpa_cfg_output {
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -39344,21 +42401,22 @@ struct hwrm_vnic_tpa_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
*/
uint8_t valid;
} __rte_packed;
-/**********************
- * hwrm_vnic_tpa_qcfg *
- **********************/
+/******************
+ * hwrm_vnic_free *
+ ******************/
-/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_tpa_qcfg_input {
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -39388,12 +42446,12 @@ struct hwrm_vnic_tpa_qcfg_input {
*/
uint64_t resp_addr;
/* Logical vnic ID */
- uint16_t vnic_id;
- uint8_t unused_0[6];
+ uint32_t vnic_id;
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
-struct hwrm_vnic_tpa_qcfg_output {
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -39402,245 +42460,336 @@ struct hwrm_vnic_tpa_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*****************
+ * hwrm_vnic_cfg *
+ *****************/
+
+
+/* hwrm_vnic_cfg_input (size:384b/48B) */
+struct hwrm_vnic_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
uint32_t flags;
/*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) of
- * non-tunneled TCP packets.
+ * When this bit is '1', the VNIC is requested to
+ * be the default VNIC for the function.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_TPA \
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT \
UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) of
- * tunneled TCP packets.
+ * When this bit is '1', the VNIC is being configured to
+ * strip VLAN in the RX path.
+ * If set to '0', then VLAN stripping is disabled on
+ * this VNIC.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_ENCAP_TPA \
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE \
UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) according
- * to Windows Receive Segment Coalescing (RSC) rules.
+ * When this bit is '1', the VNIC is being configured to
+ * buffer receive packets in the hardware until the host
+ * posts new receive buffers.
+ * If set to '0', then bd_stall is being configured to be
+ * disabled on this VNIC.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_RSC_WND_UPDATE \
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE \
UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) according
- * to Linux Generic Receive Offload (GRO) rules.
+ * When this bit is '1', the VNIC is being configured to
+ * receive both RoCE and non-RoCE traffic.
+ * If set to '0', then this VNIC is not configured to be
+ * operating in dual VNIC mode.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO \
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
UINT32_C(0x8)
/*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) for TCP
- * packets with IP ECN set to non-zero.
+ * When this flag is set to '1', the VNIC is requested to
+ * be configured to receive only RoCE traffic.
+ * If this flag is set to '0', then this flag shall be
+ * ignored by the HWRM.
+ * If roce_dual_vnic_mode flag is set to '1'
+ * or roce_mirroring_capable_vnic_mode flag to 1,
+ * then the HWRM client shall not set this flag to '1'.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_ECN \
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
UINT32_C(0x10)
/*
- * When this bit is '1', the VNIC is configured to
- * perform transparent packet aggregation (TPA) for
- * GRE tunneled TCP packets only if all packets have the
- * same GRE sequence.
+ * When a VNIC uses one destination ring group for certain
+ * application (e.g. Receive Flow Steering) where
+ * exact match is used to direct packets to a VNIC with one
+ * destination ring group only, there is no need to configure
+ * RSS indirection table for that VNIC as only one destination
+ * ring group is used.
+ *
+ * This flag is used to enable a mode where
+ * RSS is enabled in the VNIC using a RSS context
+ * for computing RSS hash but the RSS indirection table is
+ * not configured using hwrm_vnic_rss_cfg.
+ *
+ * If this mode is enabled, then the driver should not program
+ * RSS indirection table for the RSS context that is used for
+ * computing RSS hash only.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE \
UINT32_C(0x20)
/*
- * When this bit is '1' and the GRO mode is enabled,
- * the VNIC is configured to
- * perform transparent packet aggregation (TPA) for
- * TCP/IPv4 packets with consecutively increasing IPIDs.
- * In other words, the last packet that is being
- * aggregated to an already existing aggregation context
- * shall have IPID 1 more than the IPID of the last packet
- * that was aggregated in that aggregation context.
+ * When this bit is '1', the VNIC is being configured to
+ * receive both RoCE and non-RoCE traffic, but forward only the
+ * RoCE traffic further. Also, RoCE traffic can be mirrored to
+ * L2 driver.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_IPID_CHECK \
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
UINT32_C(0x40)
/*
- * When this bit is '1' and the GRO mode is enabled,
- * the VNIC is configured to
- * perform transparent packet aggregation (TPA) for
- * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
- * value.
+ * When this bit is '1' it enables ring selection using the incoming
+ * spif and lcos for the packet.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_TTL_CHECK \
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_PORTCOS_MAPPING_MODE \
UINT32_C(0x80)
+ uint32_t enables;
/*
- * This is the maximum number of TCP segments that can
- * be aggregated (unit is Log2). Max value is 31.
+ * This bit must be '1' for the dflt_ring_grp field to be
+ * configured.
*/
- uint16_t max_agg_segs;
- /* 1 segment */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
- /* 2 segments */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
- /* 4 segments */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
- /* 8 segments */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
- /* Any segment size larger than this is not valid */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_LAST \
- HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP \
+ UINT32_C(0x1)
/*
- * This is the maximum number of aggregations this VNIC is
- * allowed (unit is Log2). Max value is 7
+ * This bit must be '1' for the rss_rule field to be
+ * configured.
*/
- uint16_t max_aggs;
- /* 1 aggregation */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_1 UINT32_C(0x0)
- /* 2 aggregations */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_2 UINT32_C(0x1)
- /* 4 aggregations */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_4 UINT32_C(0x2)
- /* 8 aggregations */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_8 UINT32_C(0x3)
- /* 16 aggregations */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_16 UINT32_C(0x4)
- /* Any aggregation size larger than this is not valid */
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX UINT32_C(0x7)
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_LAST \
- HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE \
+ UINT32_C(0x2)
/*
- * This is the maximum amount of time allowed for
- * an aggregation context to complete after it was initiated.
+ * This bit must be '1' for the cos_rule field to be
+ * configured.
*/
- uint32_t max_agg_timer;
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE \
+ UINT32_C(0x4)
/*
- * This is the minimum amount of payload length required to
- * start an aggregation context.
+ * This bit must be '1' for the lb_rule field to be
+ * configured.
*/
- uint32_t min_agg_len;
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE \
+ UINT32_C(0x8)
/*
- * If the device supports hardware tunnel TPA feature, as indicated by
- * the HWRM_VNIC_QCAPS command, this field conveys the bitmap of the
- * tunnel types that have been configured. Each bit corresponds to a
- * specific tunnel type. If a bit is set to '1', then the associated
- * tunnel type is enabled; otherwise, it is disabled.
+ * This bit must be '1' for the mru field to be
+ * configured.
*/
- uint32_t tnl_tpa_en_bitmap;
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU \
+ UINT32_C(0x10)
/*
- * When this bit is '1', enable VXLAN encapsulated packets for
- * aggregation.
+ * This bit must be '1' for the default_rx_ring_id field to be
+ * configured.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_VXLAN \
- UINT32_C(0x1)
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID \
+ UINT32_C(0x20)
/*
- * When this bit is set to ‘1’, enable GENEVE encapsulated packets
- * for aggregation.
+ * This bit must be '1' for the default_cmpl_ring_id field to be
+ * configured.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_GENEVE \
- UINT32_C(0x2)
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \
+ UINT32_C(0x40)
+ /* This bit must be '1' for the queue_id field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID \
+ UINT32_C(0x80)
/*
- * When this bit is set to ‘1’, enable NVGRE encapsulated packets
- * for aggregation..
+ * This bit must be '1' for the rx_csum_v2_mode field to be
+ * configured.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_NVGRE \
- UINT32_C(0x4)
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE \
+ UINT32_C(0x100)
+ /* This bit must be '1' for the l2_cqe_mode field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_L2_CQE_MODE \
+ UINT32_C(0x200)
+ /* Logical vnic ID */
+ uint16_t vnic_id;
/*
- * When this bit is set to ‘1’, enable GRE encapsulated packets
- * for aggregation..
+ * Default Completion ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules and if
+ * there is no COS rule.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_GRE \
- UINT32_C(0x8)
+ uint16_t dflt_ring_grp;
/*
- * When this bit is set to ‘1’, enable IPV4 encapsulated packets
- * for aggregation..
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * there is no RSS rule.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_IPV4 \
- UINT32_C(0x10)
+ uint16_t rss_rule;
/*
- * When this bit is set to ‘1’, enable IPV6 encapsulated packets
- * for aggregation..
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * there is no COS rule.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_IPV6 \
- UINT32_C(0x20)
+ uint16_t cos_rule;
/*
- * When this bit is '1', enable VXLAN_GPE encapsulated packets for
- * aggregation.
+ * RSS ID for load balancing rule/table structure.
+ * 0xFF... (All Fs) if there is no LB rule.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_VXLAN_GPE \
- UINT32_C(0x40)
+ uint16_t lb_rule;
/*
- * When this bit is '1', enable VXLAN_CUSTOMER1 encapsulated packets
- * for aggregation.
+ * The maximum receive unit of the vnic.
+ * Each vnic is associated with a function.
+ * The vnic mru value overwrites the mru setting of the
+ * associated function.
+ * The HWRM shall make sure that vnic mru does not exceed
+ * the mru of the port the function is associated with.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_VXLAN_CUST1 \
- UINT32_C(0x80)
+ uint16_t mru;
/*
- * When this bit is '1', enable GRE_CUSTOMER1 encapsulated packets
- * for aggregation.
+ * Default Rx ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules.
+ * The aggregation ring associated with the Rx ring is
+ * implied based on the Rx ring specified when the
+ * aggregation ring was allocated.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_GRE_CUST1 \
- UINT32_C(0x100)
+ uint16_t default_rx_ring_id;
/*
- * When this bit is '1', enable UPAR1 encapsulated packets for
- * aggregation.
+ * Default completion ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR1 \
- UINT32_C(0x200)
+ uint16_t default_cmpl_ring_id;
/*
- * When this bit is '1', enable UPAR2 encapsulated packets for
- * aggregation.
+ * When specified, only incoming packets classified to the specified
+ * CoS queue ID will be arriving on this VNIC. Packet priority to CoS
+ * mapping rules can be specified using HWRM_QUEUE_PRI2COS_CFG. In this
+ * mode, ntuple filters with VNIC destination specified are invalid
+ * since they conflict with the CoS to VNIC steering rules in this
+ * mode.
+ *
+ * If this field is not specified, packet to VNIC steering will be
+ * subject to the standard L2 filter rules and any additional ntuple
+ * filter rules with destination VNIC specified.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR2 \
- UINT32_C(0x400)
+ uint16_t queue_id;
/*
- * When this bit is '1', enable UPAR3 encapsulated packets for
- * aggregation.
+ * If the device supports the RX V2 and RX TPA start V2 completion
+ * records as indicated by the HWRM_VNIC_QCAPS command, this field is
+ * used to specify the two RX checksum modes supported by these
+ * completion records.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR3 \
- UINT32_C(0x800)
+ uint8_t rx_csum_v2_mode;
/*
- * When this bit is '1', enable UPAR4 encapsulated packets for
- * aggregation.
+ * When configured with this checksum mode, the number of header
+ * groups in the delivered packet with a valid IP checksum and
+ * the number of header groups in the delivered packet with a valid
+ * L4 checksum are reported. Valid checksums are counted from the
+ * outermost header group to the innermost header group, stopping at
+ * the first error. This is the default checksum mode supported if
+ * the driver doesn't explicitly configure the RX checksum mode.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR4 \
- UINT32_C(0x1000)
+ #define HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_DEFAULT UINT32_C(0x0)
/*
- * When this bit is '1', enable UPAR5 encapsulated packets for
- * aggregation.
+ * When configured with this checksum mode, the checksum status is
+ * reported using 'all ok' mode. In the RX completion record, one
+ * bit indicates if the IP checksum is valid for all the parsed
+ * header groups with an IP checksum. Another bit indicates if the
+ * L4 checksum is valid for all the parsed header groups with an L4
+ * checksum. The number of header groups that were parsed by the
+ * chip and passed in the delivered packet is also reported.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR5 \
- UINT32_C(0x2000)
+ #define HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_ALL_OK UINT32_C(0x1)
/*
- * When this bit is '1', enable UPAR6 encapsulated packets for
- * aggregation.
+ * Any rx_csum_v2_mode value larger than or equal to this is not
+ * valid
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR6 \
- UINT32_C(0x4000)
+ #define HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_MAX UINT32_C(0x2)
+ #define HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_LAST \
+ HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_MAX
/*
- * When this bit is '1', enable UPAR7 encapsulated packets for
- * aggregation.
+ * If the device supports different L2 RX CQE modes, as indicated by
+ * the HWRM_VNIC_QCAPS command, this field is used to configure the
+ * CQE mode.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR7 \
- UINT32_C(0x8000)
+ uint8_t l2_cqe_mode;
/*
- * When this bit is '1', enable UPAR8 encapsulated packets for
- * aggregation.
+ * When configured with this cqe mode, A normal (32B) CQE
+ * will be generated. This is the default mode.
*/
- #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR8 \
- UINT32_C(0x10000)
- uint8_t unused_0[3];
+ #define HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_DEFAULT UINT32_C(0x0)
+ /*
+ * When configured with this cqe mode, A compressed (16B) CQE
+ * will be generated. In this mode TPA and HDS are not supported.
+ * Host drivers should not configure the TPA and HDS along with
+ * compressed mode, per VNIC. FW returns error, if host drivers
+ * try to configure the VNIC with compressed mode and (TPA or HDS).
+ * The compressed completion does not include PTP data. Host
+ * drivers should not use this mode to receive the PTP data.
+ */
+ #define HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_COMPRESSED UINT32_C(0x1)
+ /*
+ * When configured with this cqe mode, HW generates either a 32B
+ * completion or a 16B completion depending on use case within a
+ * VNIC. For ex. a simple L2 packet could use the compressed form
+ * while a PTP packet on the same VNIC would use the 32B form.
+ */
+ #define HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_MIXED UINT32_C(0x2)
+ #define HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_LAST \
+ HWRM_VNIC_CFG_INPUT_L2_CQE_MODE_MIXED
+ uint8_t unused0[4];
+} __rte_packed;
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_vnic_rss_cfg *
- *********************/
+/******************
+ * hwrm_vnic_qcfg *
+ ******************/
-/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
-struct hwrm_vnic_rss_cfg_input {
+/* hwrm_vnic_qcfg_input (size:256b/32B) */
+struct hwrm_vnic_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -39669,278 +42818,222 @@ struct hwrm_vnic_rss_cfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t hash_type;
+ uint32_t enables;
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source and destination IPv4 addresses of IPv4
- * packets.
+ * This bit must be '1' for the vf_id_valid field to be
+ * configured.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 \
+ #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ /* ID of Virtual Function whose VNIC resource is being queried. */
+ uint16_t vf_id;
+ uint8_t unused_0[6];
+} __rte_packed;
+
+/* hwrm_vnic_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Default Completion ring for the VNIC. */
+ uint16_t dflt_ring_grp;
+ /*
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * there is no RSS rule.
+ */
+ uint16_t rss_rule;
+ /*
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * there is no COS rule.
+ */
+ uint16_t cos_rule;
+ /*
+ * RSS ID for load balancing rule/table structure.
+ * 0xFF... (All Fs) if there is no LB rule.
+ */
+ uint16_t lb_rule;
+ /* The maximum receive unit of the vnic. */
+ uint16_t mru;
+ uint8_t unused_0[2];
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is the default VNIC for
+ * the function.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT \
UINT32_C(0x1)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source/destination IPv4 addresses and
- * source/destination ports of TCP/IPv4 packets.
+ * When this bit is '1', the VNIC is configured to
+ * strip VLAN in the RX path.
+ * If set to '0', then VLAN stripping is disabled on
+ * this VNIC.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 \
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE \
UINT32_C(0x2)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source/destination IPv4 addresses and
- * source/destination ports of UDP/IPv4 packets.
+ * When this bit is '1', the VNIC is configured to
+ * buffer receive packets in the hardware until the host
+ * posts new receive buffers.
+ * If set to '0', then bd_stall is disabled on
+ * this VNIC.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 \
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE \
UINT32_C(0x4)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source and destination IPv6 addresses of IPv6
- * packets.
+ * When this bit is '1', the VNIC is configured to
+ * receive both RoCE and non-RoCE traffic.
+ * If set to '0', then this VNIC is not configured to
+ * operate in dual VNIC mode.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 \
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
UINT32_C(0x8)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source/destination IPv6 addresses and
- * source/destination ports of TCP/IPv6 packets.
+ * When this flag is set to '1', the VNIC is configured to
+ * receive only RoCE traffic.
+ * When this flag is set to '0', the VNIC is not configured
+ * to receive only RoCE traffic.
+ * If roce_dual_vnic_mode flag and this flag both are set
+ * to '1', then it is an invalid configuration of the
+ * VNIC. The HWRM should not allow that type of
+ * mis-configuration by HWRM clients.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 \
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
UINT32_C(0x10)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source/destination IPv6 addresses and
- * source/destination ports of UDP/IPv6 packets.
+ * When a VNIC uses one destination ring group for certain
+ * application (e.g. Receive Flow Steering) where
+ * exact match is used to direct packets to a VNIC with one
+ * destination ring group only, there is no need to configure
+ * RSS indirection table for that VNIC as only one destination
+ * ring group is used.
+ *
+ * When this bit is set to '1', then the VNIC is enabled in a
+ * mode where RSS is enabled in the VNIC using a RSS context
+ * for computing RSS hash but the RSS indirection table is
+ * not configured.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 \
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE \
UINT32_C(0x20)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source, destination IPv6 addresses and flow label of IPv6
- * packets. Hash type ipv6 and ipv6_flow_label are mutually
- * exclusive. HW does not include the flow_label in hash
- * calculation for the packets that are matching tcp_ipv6 and
- * udp_ipv6 hash types. Host drivers should set this bit based on
- * rss_ipv6_flow_label_cap.
+ * When this bit is '1', the VNIC is configured to
+ * receive both RoCE and non-RoCE traffic, but forward only
+ * RoCE traffic further. Also RoCE traffic can be mirrored to
+ * L2 driver.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6_FLOW_LABEL \
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
UINT32_C(0x40)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and IPSEC AH SPI field of IPSEC
- * AH/IPv4 packets. Host drivers should set this bit based on
- * rss_ipsec_ah_spi_ipv4_cap.
+ * When this bit is '0', VNIC is in normal operation state.
+ * When this bit is '1', VNIC drops all the received packets.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV4 \
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_OPERATION_STATE \
UINT32_C(0x80)
- /*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and IPSEC ESP SPI field of IPSEC
- * ESP/IPv4 packets. Host drivers should set this bit based on
- * rss_ipsec_esp_spi_ipv4_cap.
- */
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV4 \
+ /* When this bit is '1' it indicates port cos_mapping_mode enabled. */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_PORTCOS_MAPPING_MODE \
UINT32_C(0x100)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and IPSEC AH SPI field of IPSEC
- * AH/IPv6 packets. Host drivers should set this bit based on
- * rss_ipsec_ah_spi_ipv6_cap.
+ * When returned with a valid CoS Queue id, the CoS Queue/VNIC
+ * association is valid. Otherwise it will return 0xFFFF to indicate no
+ * VNIC/CoS queue association.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV6 \
- UINT32_C(0x200)
+ uint16_t queue_id;
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and IPSEC ESP SPI field of IPSEC
- * ESP/IPv6 packets. Host drivers should set this bit based on
- * rss_ipsec_esp_spi_ipv6_cap.
+ * If the device supports the RX V2 and RX TPA start V2 completion
+ * records as indicated by the HWRM_VNIC_QCAPS command, this field is
+ * used to specify the current RX checksum mode configured for all the
+ * RX rings of a VNIC.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV6 \
- UINT32_C(0x400)
- /* VNIC ID of VNIC associated with RSS table being configured. */
- uint16_t vnic_id;
+ uint8_t rx_csum_v2_mode;
/*
- * Specifies which VNIC ring table pair to configure.
- * Valid values range from 0 to 7.
+ * This value indicates that the VNIC is configured to use the
+ * default RX checksum mode for all the rings associated with this
+ * VNIC.
*/
- uint8_t ring_table_pair_index;
+ #define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_DEFAULT UINT32_C(0x0)
/*
- * Flags to specify different RSS hash modes. Global RSS hash mode is
- * indicated when vnic_id and rss_ctx_idx fields are set to value of
- * 0xffff. Only PF can initiate global RSS hash mode setting changes.
- * VNIC RSS hash mode is indicated with valid vnic_id and rss_ctx_idx,
- * if FW is VNIC_RSS_HASH_MODE capable. FW configures the mode based
- * on first come first serve order. Global RSS hash mode and VNIC RSS
- * hash modes are mutually exclusive. FW returns invalid error
- * if FW receives conflicting requests. To change the current hash
- * mode, the mode associated drivers need to be unloaded and apply
- * the new configuration.
+ * This value indicates that the VNIC is configured to use the RX
+ * checksum 'all_ok' mode for all the rings associated with this
+ * VNIC.
*/
- uint8_t hash_mode_flags;
+ #define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_ALL_OK UINT32_C(0x1)
/*
- * When this bit is '1' and FW is VNIC_RSS_HASH_MODE capable,
- * innermost_4 and innermost_2 hash modes are used to configure
- * the tuple mode. When this bit is '1' and FW is not
- * VNIC_RSS_HASH_MODE capable, It indicates using current RSS hash
- * mode setting configured in the device otherwise.
+ * Any rx_csum_v2_mode value larger than or equal to this is not
+ * valid
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT \
- UINT32_C(0x1)
+ #define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_MAX UINT32_C(0x2)
+ #define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_LAST \
+ HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_MAX
/*
- * When this bit is '1', it indicates requesting support of
- * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
- * l4.src, l4.dest} for tunnel packets. For none-tunnel
- * packets, the RSS hash is computed over the normal
- * src/dest l3 and src/dest l4 headers.
+ * If the device supports different L2 RX CQE modes, as indicated by
+ * the HWRM_VNIC_QCAPS command, this field is used to convey the
+ * configured CQE mode.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4 \
- UINT32_C(0x2)
+ uint8_t l2_cqe_mode;
/*
- * When this bit is '1', it indicates requesting support of
- * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
- * tunnel packets. For none-tunnel packets, the RSS hash is
- * computed over the normal src/dest l3 headers.
+ * This value indicates that the VNIC is configured with normal
+ * (32B) CQE mode.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 \
- UINT32_C(0x4)
+ #define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_DEFAULT UINT32_C(0x0)
/*
- * When this bit is '1', it indicates requesting support of
- * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
- * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
- * packets, the RSS hash is computed over the normal
- * src/dest l3 and src/dest l4 headers.
+ * This value indicates that the VNIC is configured with compressed
+ * (16B) CQE mode.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
- UINT32_C(0x8)
+ #define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_COMPRESSED UINT32_C(0x1)
/*
- * When this bit is '1', it indicates requesting support of
- * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
- * tunnel packets. For none-tunnel packets, the RSS hash is
- * computed over the normal src/dest l3 headers.
+ * This value indicates that the VNIC is configured with mixed
+ * CQE mode. HW generates either a 32B completion or a 16B
+ * completion depending on use case within a VNIC.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
- UINT32_C(0x10)
- /* This is the address for rss ring group table */
- uint64_t ring_grp_tbl_addr;
- /* This is the address for rss hash key table */
- uint64_t hash_key_tbl_addr;
- /* Index to the rss indirection table. */
- uint16_t rss_ctx_idx;
- uint8_t flags;
+ #define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_MIXED UINT32_C(0x2)
+ #define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_LAST \
+ HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_MIXED
/*
- * When this bit is '1', it indicates that the hash_type field is
- * interpreted as a change relative the current configuration. Each
- * '1' bit in hash_type represents a header to add to the current
- * hash. Zeroes designate the hash_type state bits that should remain
- * unchanged, if possible. If this constraint on the existing state
- * cannot be satisfied, then the implementation should preference
- * adding other headers so as to honor the request to add the
- * specified headers. It is an error to set this flag concurrently
- * with hash_type_exclude.
+ * This field conveys the metadata format type that has been
+ * configured. This value is product specific. Refer to the L2 HSI
+ * completion ring structures for the detailed descriptions. For Thor
+ * and Thor2, it corresponds to 'meta_format' in 'rx_pkt_cmpl_hi' and
+ * 'rx_pkt_v3_cmpl_hi', respectively.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_HASH_TYPE_INCLUDE \
- UINT32_C(0x1)
- /*
- * When this bit is '1', it indicates that the hash_type field is
- * interpreted as a change relative the current configuration. Each
- * '1' bit in hash_type represents a header to remove from the
- * current hash. Zeroes designate the hash_type state bits that
- * should remain unchanged, if possible. If this constraint on the
- * existing state cannot be satisfied, then the implementation should
- * preference removing other headers so as to honor the request to
- * remove the specified headers. It is an error to set this flag
- * concurrently with hash_type_include.
- */
- #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_HASH_TYPE_EXCLUDE \
- UINT32_C(0x2)
- /*
- * When this bit is '1', it indicates that the support of setting
- * ipsec hash_types by the host drivers.
- */
- #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT \
- UINT32_C(0x4)
- uint8_t ring_select_mode;
- /*
- * In this mode, HW uses Toeplitz algorithm and provided Toeplitz
- * hash key to hash the packets according to the configured hash
- * type and hash mode. The Toeplitz hash results and the provided
- * Toeplitz RSS indirection table are used to determine the RSS
- * rings.
- */
- #define HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ \
- UINT32_C(0x0)
- /*
- * In this mode, HW uses XOR algorithm to hash the packets according
- * to the configured hash type and hash mode. The XOR hash results
- * and the provided XOR RSS indirection table are used to determine
- * the RSS rings. Host drivers provided hash key is not honored in
- * this mode.
- */
- #define HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_XOR \
- UINT32_C(0x1)
- /*
- * In this mode, HW uses inner packets checksum algorithm to
- * distribute the packets across the rings and Toeplitz algorithm
- * to calculate the hash to convey it in the RX completions. Host
- * drivers should provide Toeplitz hash key. As HW uses innermost
- * packets checksum to distribute the packets across the rings,
- * host drivers can't convey hash mode to choose outer headers to
- * calculate Toeplitz hash. FW will fail such configuration.
- */
- #define HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM \
- UINT32_C(0x2)
- #define HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_LAST \
- HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
- uint8_t unused_1[4];
-} __rte_packed;
-
-/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
-struct hwrm_vnic_rss_cfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
+ uint8_t metadata_format_type;
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_0 UINT32_C(0x0)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_1 UINT32_C(0x1)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_2 UINT32_C(0x2)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_3 UINT32_C(0x3)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_4 UINT32_C(0x4)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_LAST \
+ HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_4
+ /* This field conveys the VNIC operation state. */
+ uint8_t vnic_state;
+ /* Normal operation state. */
+ #define HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_NORMAL UINT32_C(0x0)
+ /* Drop all packets. */
+ #define HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_DROP UINT32_C(0x1)
+ #define HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_LAST \
+ HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_DROP
+ uint8_t unused_1;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
-struct hwrm_vnic_rss_cfg_cmd_err {
- /*
- * command specific error codes that goes to
- * the cmd_err field in Common HWRM Error Response.
- */
- uint8_t code;
- /* Unknown error */
- #define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN \
- UINT32_C(0x0)
- /*
- * Unable to change global RSS mode to outer due to all active
- * interfaces are not ready to support outer RSS hashing.
- */
- #define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY \
- UINT32_C(0x1)
- #define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_LAST \
- HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
- uint8_t unused_0[7];
-} __rte_packed;
-
-/**********************
- * hwrm_vnic_rss_qcfg *
- **********************/
+/*******************
+ * hwrm_vnic_qcaps *
+ *******************/
-/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_rss_qcfg_input {
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -39969,21 +43062,12 @@ struct hwrm_vnic_rss_qcfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * Index to the rss indirection table. This field is used as a lookup
- * for chips before Thor - i.e. Cumulus and Whitney.
- */
- uint16_t rss_ctx_idx;
- /*
- * VNIC ID of VNIC associated with RSS table being queried. This field
- * is used as a lookup for Thor and later chips.
- */
- uint16_t vnic_id;
+ uint32_t enables;
uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
-struct hwrm_vnic_rss_qcfg_output {
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -39992,197 +43076,262 @@ struct hwrm_vnic_rss_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint32_t hash_type;
- /*
- * When this bit is '1', the RSS hash shall be computed
- * over source and destination IPv4 addresses of IPv4
- * packets.
- */
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV4 \
+ /* The maximum receive unit that is settable on a vnic. */
+ uint16_t mru;
+ uint8_t unused_0[2];
+ uint32_t flags;
+ /* Unused. */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_UNUSED \
UINT32_C(0x1)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source/destination IPv4 addresses and
- * source/destination ports of TCP/IPv4 packets.
+ * When this bit is '1', the capability of stripping VLAN in
+ * the RX path is supported on VNIC(s).
+ * If set to '0', then VLAN stripping capability is
+ * not supported on VNIC(s).
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV4 \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP \
UINT32_C(0x2)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source/destination IPv4 addresses and
- * source/destination ports of UDP/IPv4 packets.
+ * When this bit is '1', the capability to buffer receive
+ * packets in the hardware until the host posts new receive buffers
+ * is supported on VNIC(s).
+ * If set to '0', then bd_stall capability is not supported
+ * on VNIC(s).
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV4 \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_BD_STALL_CAP \
UINT32_C(0x4)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source and destination IPv6 addresses of IPv6
- * packets.
+ * When this bit is '1', the capability to
+ * receive both RoCE and non-RoCE traffic on VNIC(s) is
+ * supported.
+ * If set to '0', then the capability to receive
+ * both RoCE and non-RoCE traffic on VNIC(s) is
+ * not supported.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6 \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_DUAL_VNIC_CAP \
UINT32_C(0x8)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source/destination IPv6 addresses and
- * source/destination ports of TCP/IPv6 packets.
+ * When this bit is set to '1', the capability to configure
+ * a VNIC to receive only RoCE traffic is supported.
+ * When this flag is set to '0', the VNIC capability to
+ * configure to receive only RoCE traffic is not supported.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV6 \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_ONLY_VNIC_CAP \
UINT32_C(0x10)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source/destination IPv6 addresses and
- * source/destination ports of UDP/IPv6 packets.
+ * When this bit is set to '1', then the capability to enable
+ * a VNIC in a mode where RSS context without configuring
+ * RSS indirection table is supported (for RSS hash computation).
+ * When this bit is set to '0', then a VNIC can not be configured
+ * with a mode to enable RSS context without configuring RSS
+ * indirection table.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV6 \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_DFLT_CR_CAP \
UINT32_C(0x20)
/*
- * When this bit is '1', the RSS hash shall be computed
- * over source, destination IPv6 addresses and flow label of IPv6
- * packets. Hash type ipv6 and ipv6_flow_label are mutually
- * exclusive. HW does not include the flow_label in hash
- * calculation for the packets that are matching tcp_ipv6 and
- * udp_ipv6 hash types. This bit will be '0' if
- * rss_ipv6_flow_label_cap is '0'.
+ * When this bit is '1', the capability to
+ * mirror the RoCE traffic is supported.
+ * If set to '0', then the capability to mirror the
+ * RoCE traffic is not supported.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6_FLOW_LABEL \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP \
UINT32_C(0x40)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and IPSEC AH SPI field of IPSEC
- * AH/IPv4 packets. This bit will be '0' if rss_ipsec_ah_spi_ipv4_cap
- * is '0'.
+ * When this bit is '1', the outermost RSS hashing capability
+ * is supported. If set to '0', then the outermost RSS hashing
+ * capability is not supported.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_AH_SPI_IPV4 \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \
UINT32_C(0x80)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and IPSEC ESP SPI field of IPSEC
- * ESP/IPv4 packets. This bit will be '0' if
- * rss_ipsec_esp_spi_ipv4_cap is '0'.
+ * When this bit is '1', it indicates that firmware supports the
+ * ability to steer incoming packets from one CoS queue to one
+ * VNIC. This optional feature can then be enabled
+ * using HWRM_VNIC_CFG on any VNIC. This feature is only
+ * available when NVM option 'enable_cos_classification' is set
+ * to 1. If set to '0', firmware does not support this feature.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_ESP_SPI_IPV4 \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP \
UINT32_C(0x100)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and IPSEC AH SPI field of IPSEC
- * AH/IPv6 packets. This bit will be '0' if
- * rss_ipsec_ah_spi_ipv6_cap is '0'.
+ * When this bit is '1', it indicates that HW and firmware supports
+ * the use of RX V2 and RX TPA start V2 completion records for all
+ * the RX rings of a VNIC. Once set, this feature is mandatory to
+ * be used for the RX rings of the VNIC. Additionally, two new RX
+ * checksum features supported by these completion records can be
+ * configured using the HWRM_VNIC_CFG on a VNIC. If set to '0', the
+ * HW and the firmware does not support this feature.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_AH_SPI_IPV6 \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V2_CAP \
UINT32_C(0x200)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and IPSEC ESP SPI field of IPSEC
- * ESP/IPv6 packets. This bit will be '0' if
- * rss_ipsec_esp_spi_ipv6_cap is '0'.
+ * When this bit is '1', it indicates that HW and firmware support
+ * vnic state change. Host drivers can change the vnic state using
+ * HWRM_VNIC_UPDATE. If set to '0', the HW and firmware do not
+ * support this feature.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_ESP_SPI_IPV6 \
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VNIC_STATE_CAP \
UINT32_C(0x400)
- uint8_t unused_0[4];
- /* This is the value of rss hash key */
- uint32_t hash_key[10];
/*
- * Flags to specify different RSS hash modes. Setting rss_ctx_idx to
- * the value of 0xffff implies a global RSS configuration query.
- * hash_mode_flags are only valid for global RSS configuration query.
- * Only the PF can initiate a global RSS configuration query.
- * The query request fails if any VNIC is configured with hash mode
- * and rss_ctx_idx is 0xffff.
+ * When this bit is '1', it indicates that firmware supports
+ * virtio-net functions default VNIC allocation using
+ * HWRM_VNIC_ALLOC.
+ * This capability is available only on Proxy VEE PF. If set to '0',
+ * firmware does not support this feature.
*/
- uint8_t hash_mode_flags;
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP \
+ UINT32_C(0x800)
/*
- * When this bit is '1' and FW is VNIC_RSS_HASH_MODE capable,
- * it indicates VNIC's configured RSS hash mode.
- * When this bit is '1' and FW is not VNIC_RSS_HASH_MODE capable,
- * It indicates using current RSS hash mode setting configured in the
- * device.
+ * When this bit is set '1', then the capability to configure the
+ * metadata format in the RX completion is supported for the VNIC.
+ * When this bit is set to '0', then the capability to configure
+ * the metadata format in the RX completion is not supported for
+ * the VNIC.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_DEFAULT \
- UINT32_C(0x1)
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_METADATA_FORMAT_CAP \
+ UINT32_C(0x1000)
/*
- * When this bit is '1', it indicates requesting support of
- * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
- * l4.src, l4.dest} for tunnel packets. For none-tunnel
- * packets, the RSS hash is computed over the normal
- * src/dest l3 and src/dest l4 headers.
+ * When this bit is set '1', it indicates that firmware returns
+ * INVALID_PARAM error, if host drivers choose invalid hash type
+ * bit combinations in vnic_rss_cfg.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_4 \
- UINT32_C(0x2)
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_STRICT_HASH_TYPE_CAP \
+ UINT32_C(0x2000)
/*
- * When this bit is '1', it indicates requesting support of
- * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
- * tunnel packets. For none-tunnel packets, the RSS hash is
- * computed over the normal src/dest l3 headers.
+ * When this bit is set '1', it indicates that firmware supports
+ * the hash_type include and exclude flags in hwrm_vnic_rss_cfg.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_2 \
- UINT32_C(0x4)
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_HASH_TYPE_DELTA_CAP \
+ UINT32_C(0x4000)
/*
- * When this bit is '1', it indicates requesting support of
- * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
- * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
- * packets, the RSS hash is computed over the normal
- * src/dest l3 and src/dest l4 headers.
+ * When this bit is '1', it indicates that HW is capable of using
+ * Toeplitz algorithm. This mode uses Toeplitz algorithm and
+ * provided Toeplitz hash key to hash the packets according to the
+ * configured hash type and hash mode. The Toeplitz hash results and
+ * the provided Toeplitz RSS indirection table are used to determine
+ * the RSS rings.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
- UINT32_C(0x8)
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP \
+ UINT32_C(0x8000)
/*
- * When this bit is '1', it indicates requesting support of
- * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
- * tunnel packets. For none-tunnel packets, the RSS hash is
- * computed over the normal src/dest l3 headers.
+ * When this bit is '1', it indicates that HW is capable of using
+ * XOR algorithm. This mode uses 'XOR' algorithm to hash the packets
+ * according to the configured hash type and hash mode. The XOR
+ * hash results and the provided XOR RSS indirection table are
+ * used to determine the RSS rings. Host drivers provided hash key
+ * is not honored in this mode.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
- UINT32_C(0x10)
- uint8_t ring_select_mode;
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_XOR_CAP \
+ UINT32_C(0x10000)
/*
- * In this mode, HW uses Toeplitz algorithm and provided Toeplitz
- * hash key to hash the packets according to the configured hash
- * type and hash mode. The Toeplitz hash results and the provided
- * Toeplitz RSS indirection table are used to determine the RSS
- * rings.
+ * When this bit is '1', it indicates that HW is capable of using
+ * checksum algorithm. In this mode, HW uses inner packets checksum
+ * algorithm to distribute the packets across the rings and Toeplitz
+ * algorithm to calculate the hash to convey it in the RX
+ * completions. Host drivers should provide Toeplitz hash key.
+ * As HW uses innermost packets checksum to distribute the packets
+ * across the rings, host drivers can't convey hash mode to choose
+ * outer headers to calculate Toeplitz hash. FW will fail such
+ * configuration.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_TOEPLITZ \
- UINT32_C(0x0)
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP \
+ UINT32_C(0x20000)
/*
- * In this mode, HW uses XOR algorithm to hash the packets according
- * to the configured hash type and hash mode. The XOR hash results
- * and the provided XOR RSS indirection table are used to determine
- * the RSS rings. Host drivers provided hash key is not honored in
- * this mode.
+ * When this bit is '1' HW supports hash calculation
+ * based on IPV6 flow labels.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_XOR \
- UINT32_C(0x1)
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPV6_FLOW_LABEL_CAP \
+ UINT32_C(0x40000)
/*
- * In this mode, HW uses inner packets checksum algorithm to
- * distribute the packets across the rings and Toeplitz algorithm
- * to calculate the hash to convey it in the RX completions. Host
- * drivers should provide Toeplitz hash key. As HW uses innermost
- * packets checksum to distribute the packets across the rings,
- * host drivers can't convey hash mode to choose outer headers to
- * calculate Toeplitz hash. FW will fail such configuration.
+ * When this bit is '1', it indicates that HW and firmware supports
+ * the use of RX V3 and RX TPA start V3 completion records for all
+ * the RX rings of a VNIC. Once set, this feature is mandatory to
+ * be used for the RX rings of the VNIC. If set to '0', the
+ * HW and the firmware does not support this feature.
*/
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM \
- UINT32_C(0x2)
- #define HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_LAST \
- HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RX_CMPL_V3_CAP \
+ UINT32_C(0x80000)
+ /*
+ * When this bit is '1' HW supports different RX CQE record types.
+ * Host drivers can choose the mode based on their application
+ * requirements like performance, TPA, HDS and PTP.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_L2_CQE_MODE_CAP \
+ UINT32_C(0x100000)
+ /*
+ * When this bit is '1' HW supports hash calculation
+ * based on IPv4 IPSEC AH SPI field.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP \
+ UINT32_C(0x200000)
+ /*
+ * When this bit is '1' HW supports hash calculation
+ * based on IPv4 IPSEC ESP SPI field.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP \
+ UINT32_C(0x400000)
+ /*
+ * When this bit is '1' HW supports hash calculation
+ * based on IPv6 IPSEC AH SPI field.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP \
+ UINT32_C(0x800000)
+ /*
+ * When this bit is '1' HW supports hash calculation
+ * based on IPv6 IPSEC ESP SPI field.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP \
+ UINT32_C(0x1000000)
+ /*
+ * When outermost_rss_cap is '1' and this bit is '1', the outermost
+ * RSS hash mode may be set on a PF or trusted VF.
+ * When outermost_rss_cap is '1' and this bit is '0', the outermost
+ * RSS hash mode may be set on a PF.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP \
+ UINT32_C(0x2000000)
+ /*
+ * When this bit is '1' it indicates HW is capable of enabling ring
+ * selection using the incoming spif and lcos for the packet.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_PORTCOS_MAPPING_MODE \
+ UINT32_C(0x4000000)
+ /*
+ * When this bit is '1', it indicates controller enabled
+ * RSS profile TCAM mode.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_PROF_TCAM_MODE_ENABLED \
+ UINT32_C(0x8000000)
+ /* When this bit is '1' FW supports VNIC hash mode. */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VNIC_RSS_HASH_MODE_CAP \
+ UINT32_C(0x10000000)
+ /* When this bit is set to '1', hardware supports tunnel TPA. */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_HW_TUNNEL_TPA_CAP \
+ UINT32_C(0x20000000)
+ /*
+ * This field advertises the maximum concurrent TPA aggregations
+ * supported by the VNIC on new devices that support TPA v2 or v3.
+ * '0' means that both the TPA v2 and v3 are not supported.
+ */
+ uint16_t max_aggs_supported;
uint8_t unused_1[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_vnic_plcmodes_cfg *
- **************************/
+/*********************
+ * hwrm_vnic_tpa_cfg *
+ *********************/
-/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
-struct hwrm_vnic_plcmodes_cfg_input {
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
+struct hwrm_vnic_tpa_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -40214,140 +43363,272 @@ struct hwrm_vnic_plcmodes_cfg_input {
uint32_t flags;
/*
* When this bit is '1', the VNIC shall be configured to
- * use regular placement algorithm.
- * By default, the regular placement algorithm shall be
- * enabled on the VNIC.
+ * perform transparent packet aggregation (TPA) of
+ * non-tunneled TCP packets.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA \
UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC shall be configured
- * use the jumbo placement algorithm.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) of
+ * tunneled TCP packets.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA \
UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC shall be configured
- * to enable Header-Data split for IPv4 packets according
- * to the following rules:
- * # If the packet is identified as TCP/IPv4, then the
- * packet is split at the beginning of the TCP payload.
- * # If the packet is identified as UDP/IPv4, then the
- * packet is split at the beginning of UDP payload.
- * # If the packet is identified as non-TCP and non-UDP
- * IPv4 packet, then the packet is split at the beginning
- * of the upper layer protocol header carried in the IPv4
- * packet.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Windows Receive Segment Coalescing (RSC) rules.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 \
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE \
UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC shall be configured
- * to enable Header-Data split for IPv6 packets according
- * to the following rules:
- * # If the packet is identified as TCP/IPv6, then the
- * packet is split at the beginning of the TCP payload.
- * # If the packet is identified as UDP/IPv6, then the
- * packet is split at the beginning of UDP payload.
- * # If the packet is identified as non-TCP and non-UDP
- * IPv6 packet, then the packet is split at the beginning
- * of the upper layer protocol header carried in the IPv6
- * packet.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Linux Generic Receive Offload (GRO) rules.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 \
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO \
UINT32_C(0x8)
/*
- * When this bit is '1', the VNIC shall be configured
- * to enable Header-Data split for FCoE packets at the
- * beginning of FC payload.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for TCP
+ * packets with IP ECN set to non-zero.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE \
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN \
UINT32_C(0x10)
/*
- * When this bit is '1', the VNIC shall be configured
- * to enable Header-Data split for RoCE packets at the
- * beginning of RoCE payload (after BTH/GRH headers).
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * GRE tunneled TCP packets only if all packets have the
+ * same GRE sequence.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE \
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
UINT32_C(0x20)
/*
- * When this bit is '1', the VNIC shall be configured use the virtio
- * placement algorithm. This feature can only be configured when
- * proxy mode is supported on the function.
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP/IPv4 packets with consecutively increasing IPIDs.
+ * In other words, the last packet that is being
+ * aggregated to an already existing aggregation context
+ * shall have IPID 1 more than the IPID of the last packet
+ * that was aggregated in that aggregation context.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_VIRTIO_PLACEMENT \
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK \
UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
+ * value.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK \
+ UINT32_C(0x80)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC shall DMA payload data using GRO rules.
+ * When this bit is '0', the VNIC shall DMA payload data
+ * using the more efficient LRO rules of filling all
+ * aggregation buffers.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_PACK_AS_GRO \
+ UINT32_C(0x100)
uint32_t enables;
/*
- * This bit must be '1' for the jumbo_thresh_valid field to be
+ * This bit must be '1' for the max_agg_segs field to be
* configured.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \
- UINT32_C(0x1)
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
/*
- * This bit must be '1' for the hds_offset_valid field to be
+ * This bit must be '1' for the max_aggs field to be
* configured.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \
- UINT32_C(0x2)
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
/*
- * This bit must be '1' for the hds_threshold_valid field to be
+ * This bit must be '1' for the max_agg_timer field to be
* configured.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \
- UINT32_C(0x4)
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
+ /* deprecated bit. Do not use!!! */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
/*
- * This bit must be '1' for the max_bds_valid field to be
+ * This bit must be '1' for the tnl_tpa_en_bitmap field to be
* configured.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_MAX_BDS_VALID \
- UINT32_C(0x8)
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_TNL_TPA_EN \
+ UINT32_C(0x10)
/* Logical vnic ID */
- uint32_t vnic_id;
+ uint16_t vnic_id;
/*
- * When jumbo placement algorithm is enabled, this value
- * is used to determine the threshold for jumbo placement.
- * Packets with length larger than this value will be
- * placed according to the jumbo placement algorithm.
+ * This is the maximum number of TCP segments that can
+ * be aggregated (unit is Log2). Max value is 31. On new
+ * devices supporting TPA v2, the unit is multiples of 4 and
+ * valid values are > 0 and <= 63.
*/
- uint16_t jumbo_thresh;
+ uint16_t max_agg_segs;
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_LAST \
+ HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX
/*
- * This value is used to determine the offset into
- * packet buffer where the split data (payload) will be
- * placed according to one of HDS placement algorithm.
- *
- * The lengths of packet buffers provided for split data
- * shall be larger than this value.
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7. On new devices
+ * supporting TPA v2, this is in unit of 1 and must be > 0
+ * and <= max_aggs_supported in the hwrm_vnic_qcaps response
+ * to enable TPA v2.
*/
- uint16_t hds_offset;
+ uint16_t max_aggs;
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_LAST \
+ HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX
+ uint8_t unused_0[2];
/*
- * When one of the HDS placement algorithm is enabled, this
- * value is used to determine the threshold for HDS
- * placement.
- * Packets with length larger than this value will be
- * placed according to the HDS placement algorithm.
- * This value shall be in multiple of 4 bytes.
+ * This is the maximum amount of time allowed for
+ * an aggregation context to complete after it was initiated.
*/
- uint16_t hds_threshold;
+ uint32_t max_agg_timer;
/*
- * When virtio placement algorithm is enabled, this
- * value is used to determine the maximum number of BDs
- * that can be used to place an Rx Packet.
- * If an incoming packet does not fit in the buffers described
- * by the max BDs, the packet will be dropped and an error
- * will be reported in the completion. Valid values for this
- * field are between 1 and 8. If the VNIC uses header-data-
- * separation and/or TPA with buffer spanning enabled, valid
- * values for this field are between 2 and 8.
- * This feature can only be configured when proxy mode is
- * supported on the function.
+ * This is the minimum amount of payload length required to
+ * start an aggregation context. This field is deprecated and
+ * should be set to 0. The minimum length is set by firmware
+ * and can be queried using hwrm_vnic_tpa_qcfg.
*/
- uint16_t max_bds;
- uint8_t unused_0[4];
+ uint32_t min_agg_len;
+ /*
+ * If the device supports hardware tunnel TPA feature, as indicated by
+ * the HWRM_VNIC_QCAPS command, this field is used to configure the
+ * tunnel types to be enabled. Each bit corresponds to a specific
+ * tunnel type. If a bit is set to '1', then the associated tunnel
+ * type is enabled; otherwise, it is disabled.
+ */
+ uint32_t tnl_tpa_en_bitmap;
+ /*
+ * When this bit is '1', enable VXLAN encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', enable GENEVE encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GENEVE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', enable NVGRE encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_NVGRE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', enable GRE encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GRE \
+ UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', enable IPV4 encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV4 \
+ UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', enable IPV6 encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV6 \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', enable VXLAN_GPE encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN_GPE \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', enable VXLAN_CUSTOMER1 encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN_CUST1 \
+ UINT32_C(0x80)
+ /*
+ * When this bit is '1', enable GRE_CUSTOMER1 encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GRE_CUST1 \
+ UINT32_C(0x100)
+ /*
+ * When this bit is '1', enable UPAR1 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR1 \
+ UINT32_C(0x200)
+ /*
+ * When this bit is '1', enable UPAR2 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR2 \
+ UINT32_C(0x400)
+ /*
+ * When this bit is '1', enable UPAR3 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR3 \
+ UINT32_C(0x800)
+ /*
+ * When this bit is '1', enable UPAR4 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR4 \
+ UINT32_C(0x1000)
+ /*
+ * When this bit is '1', enable UPAR5 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR5 \
+ UINT32_C(0x2000)
+ /*
+ * When this bit is '1', enable UPAR6 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR6 \
+ UINT32_C(0x4000)
+ /*
+ * When this bit is '1', enable UPAR7 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR7 \
+ UINT32_C(0x8000)
+ /*
+ * When this bit is '1', enable UPAR8 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR8 \
+ UINT32_C(0x10000)
+ uint8_t unused_1[4];
} __rte_packed;
-/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
-struct hwrm_vnic_plcmodes_cfg_output {
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
+struct hwrm_vnic_tpa_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -40359,22 +43640,21 @@ struct hwrm_vnic_plcmodes_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_vnic_plcmodes_qcfg *
- ***************************/
+/**********************
+ * hwrm_vnic_tpa_qcfg *
+ **********************/
-/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */
-struct hwrm_vnic_plcmodes_qcfg_input {
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -40404,12 +43684,12 @@ struct hwrm_vnic_plcmodes_qcfg_input {
*/
uint64_t resp_addr;
/* Logical vnic ID */
- uint32_t vnic_id;
- uint8_t unused_0[4];
+ uint16_t vnic_id;
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */
-struct hwrm_vnic_plcmodes_qcfg_output {
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -40421,111 +43701,242 @@ struct hwrm_vnic_plcmodes_qcfg_output {
uint32_t flags;
/*
* When this bit is '1', the VNIC is configured to
- * use regular placement algorithm.
+ * perform transparent packet aggregation (TPA) of
+ * non-tunneled TCP packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_TPA \
UINT32_C(0x1)
/*
* When this bit is '1', the VNIC is configured to
- * use the jumbo placement algorithm.
+ * perform transparent packet aggregation (TPA) of
+ * tunneled TCP packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_ENCAP_TPA \
UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC is configured
- * to enable Header-Data split for IPv4 packets.
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Windows Receive Segment Coalescing (RSC) rules.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 \
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_RSC_WND_UPDATE \
UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC is configured
- * to enable Header-Data split for IPv6 packets.
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Linux Generic Receive Offload (GRO) rules.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 \
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO \
UINT32_C(0x8)
/*
- * When this bit is '1', the VNIC is configured
- * to enable Header-Data split for FCoE packets.
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for TCP
+ * packets with IP ECN set to non-zero.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE \
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_ECN \
UINT32_C(0x10)
/*
- * When this bit is '1', the VNIC is configured
- * to enable Header-Data split for RoCE packets.
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * GRE tunneled TCP packets only if all packets have the
+ * same GRE sequence.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE \
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
UINT32_C(0x20)
/*
- * When this bit is '1', the VNIC is configured
- * to be the default VNIC of the requesting function.
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP/IPv4 packets with consecutively increasing IPIDs.
+ * In other words, the last packet that is being
+ * aggregated to an already existing aggregation context
+ * shall have IPID 1 more than the IPID of the last packet
+ * that was aggregated in that aggregation context.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC \
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_IPID_CHECK \
UINT32_C(0x40)
/*
- * When this bit is '1', the VNIC is configured to use the virtio
- * placement algorithm. This feature can only be configured when
- * proxy mode is supported on the function.
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
+ * value.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_VIRTIO_PLACEMENT \
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_TTL_CHECK \
UINT32_C(0x80)
/*
- * When jumbo placement algorithm is enabled, this value
- * is used to determine the threshold for jumbo placement.
- * Packets with length larger than this value will be
- * placed according to the jumbo placement algorithm.
+ * This is the maximum number of TCP segments that can
+ * be aggregated (unit is Log2). Max value is 31.
*/
- uint16_t jumbo_thresh;
+ uint16_t max_agg_segs;
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_LAST \
+ HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX
/*
- * This value is used to determine the offset into
- * packet buffer where the split data (payload) will be
- * placed according to one of HDS placement algorithm.
- *
- * The lengths of packet buffers provided for split data
- * shall be larger than this value.
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7
*/
- uint16_t hds_offset;
+ uint16_t max_aggs;
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_LAST \
+ HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX
/*
- * When one of the HDS placement algorithm is enabled, this
- * value is used to determine the threshold for HDS
- * placement.
- * Packets with length larger than this value will be
- * placed according to the HDS placement algorithm.
- * This value shall be in multiple of 4 bytes.
+ * This is the maximum amount of time allowed for
+ * an aggregation context to complete after it was initiated.
*/
- uint16_t hds_threshold;
+ uint32_t max_agg_timer;
/*
- * When virtio placement algorithm is enabled, this
- * value is used to determine the maximum number of BDs
- * that can be used to place an Rx Packet.
- * If an incoming packet does not fit in the buffers described
- * by the max BDs, the packet will be dropped and an error
- * will be reported in the completion. Valid values for this
- * field are between 1 and 8. If the VNIC uses header-data-
- * separation and/or TPA with buffer spanning enabled, valid
- * values for this field are between 2 and 8.
- * This feature can only be configured when proxy mode is supported
- * on the function
+ * This is the minimum amount of payload length required to
+ * start an aggregation context.
*/
- uint16_t max_bds;
+ uint32_t min_agg_len;
+ /*
+ * If the device supports hardware tunnel TPA feature, as indicated by
+ * the HWRM_VNIC_QCAPS command, this field conveys the bitmap of the
+ * tunnel types that have been configured. Each bit corresponds to a
+ * specific tunnel type. If a bit is set to '1', then the associated
+ * tunnel type is enabled; otherwise, it is disabled.
+ */
+ uint32_t tnl_tpa_en_bitmap;
+ /*
+ * When this bit is '1', enable VXLAN encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_VXLAN \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', enable GENEVE encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_GENEVE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', enable NVGRE encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_NVGRE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', enable GRE encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_GRE \
+ UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', enable IPV4 encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_IPV4 \
+ UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', enable IPV6 encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_IPV6 \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', enable VXLAN_GPE encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_VXLAN_GPE \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', enable VXLAN_CUSTOMER1 encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_VXLAN_CUST1 \
+ UINT32_C(0x80)
+ /*
+ * When this bit is '1', enable GRE_CUSTOMER1 encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_GRE_CUST1 \
+ UINT32_C(0x100)
+ /*
+ * When this bit is '1', enable UPAR1 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR1 \
+ UINT32_C(0x200)
+ /*
+ * When this bit is '1', enable UPAR2 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR2 \
+ UINT32_C(0x400)
+ /*
+ * When this bit is '1', enable UPAR3 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR3 \
+ UINT32_C(0x800)
+ /*
+ * When this bit is '1', enable UPAR4 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR4 \
+ UINT32_C(0x1000)
+ /*
+ * When this bit is '1', enable UPAR5 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR5 \
+ UINT32_C(0x2000)
+ /*
+ * When this bit is '1', enable UPAR6 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR6 \
+ UINT32_C(0x4000)
+ /*
+ * When this bit is '1', enable UPAR7 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR7 \
+ UINT32_C(0x8000)
+ /*
+ * When this bit is '1', enable UPAR8 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR8 \
+ UINT32_C(0x10000)
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/**********************************
- * hwrm_vnic_rss_cos_lb_ctx_alloc *
- **********************************/
+/*********************
+ * hwrm_vnic_rss_cfg *
+ *********************/
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
+struct hwrm_vnic_rss_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -40554,10 +43965,231 @@ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ uint32_t hash_type;
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv4
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of UDP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv6 addresses of IPv6
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of TCP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of UDP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source, destination IPv6 addresses and flow label of IPv6
+ * packets. Hash type ipv6 and ipv6_flow_label are mutually
+ * exclusive. HW does not include the flow_label in hash
+ * calculation for the packets that are matching tcp_ipv6 and
+ * udp_ipv6 hash types. Host drivers should set this bit based on
+ * rss_ipv6_flow_label_cap.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6_FLOW_LABEL \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv4 addresses and IPSEC AH SPI field of IPSEC
+ * AH/IPv4 packets. Host drivers should set this bit based on
+ * rss_ipsec_ah_spi_ipv4_cap.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV4 \
+ UINT32_C(0x80)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv4 addresses and IPSEC ESP SPI field of IPSEC
+ * ESP/IPv4 packets. Host drivers should set this bit based on
+ * rss_ipsec_esp_spi_ipv4_cap.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV4 \
+ UINT32_C(0x100)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv6 addresses and IPSEC AH SPI field of IPSEC
+ * AH/IPv6 packets. Host drivers should set this bit based on
+ * rss_ipsec_ah_spi_ipv6_cap.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_AH_SPI_IPV6 \
+ UINT32_C(0x200)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv6 addresses and IPSEC ESP SPI field of IPSEC
+ * ESP/IPv6 packets. Host drivers should set this bit based on
+ * rss_ipsec_esp_spi_ipv6_cap.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_ESP_SPI_IPV6 \
+ UINT32_C(0x400)
+ /* VNIC ID of VNIC associated with RSS table being configured. */
+ uint16_t vnic_id;
+ /*
+ * Specifies which VNIC ring table pair to configure.
+ * Valid values range from 0 to 7.
+ */
+ uint8_t ring_table_pair_index;
+ /*
+ * Flags to specify different RSS hash modes. Global RSS hash mode is
+ * indicated when vnic_id and rss_ctx_idx fields are set to value of
+ * 0xffff. Only PF can initiate global RSS hash mode setting changes.
+ * VNIC RSS hash mode is indicated with valid vnic_id and rss_ctx_idx,
+ * if FW is VNIC_RSS_HASH_MODE capable. FW configures the mode based
+ * on first come first serve order. Global RSS hash mode and VNIC RSS
+ * hash modes are mutually exclusive. FW returns invalid error
+ * if FW receives conflicting requests. To change the current hash
+ * mode, the mode associated drivers need to be unloaded and apply
+ * the new configuration.
+ */
+ uint8_t hash_mode_flags;
+ /*
+ * When this bit is '1' and FW is VNIC_RSS_HASH_MODE capable,
+ * innermost_4 and innermost_2 hash modes are used to configure
+ * the tuple mode. When this bit is '1' and FW is not
+ * VNIC_RSS_HASH_MODE capable, It indicates using current RSS hash
+ * mode setting configured in the device otherwise.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
+ * l4.src, l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4 \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
+ * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
+ UINT32_C(0x10)
+ /* This is the address for rss ring group table */
+ uint64_t ring_grp_tbl_addr;
+ /* This is the address for rss hash key table */
+ uint64_t hash_key_tbl_addr;
+ /* Index to the rss indirection table. */
+ uint16_t rss_ctx_idx;
+ uint8_t flags;
+ /*
+ * When this bit is '1', it indicates that the hash_type field is
+ * interpreted as a change relative the current configuration. Each
+ * '1' bit in hash_type represents a header to add to the current
+ * hash. Zeroes designate the hash_type state bits that should remain
+ * unchanged, if possible. If this constraint on the existing state
+ * cannot be satisfied, then the implementation should preference
+ * adding other headers so as to honor the request to add the
+ * specified headers. It is an error to set this flag concurrently
+ * with hash_type_exclude.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_HASH_TYPE_INCLUDE \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates that the hash_type field is
+ * interpreted as a change relative the current configuration. Each
+ * '1' bit in hash_type represents a header to remove from the
+ * current hash. Zeroes designate the hash_type state bits that
+ * should remain unchanged, if possible. If this constraint on the
+ * existing state cannot be satisfied, then the implementation should
+ * preference removing other headers so as to honor the request to
+ * remove the specified headers. It is an error to set this flag
+ * concurrently with hash_type_include.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_HASH_TYPE_EXCLUDE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates that the support of setting
+ * ipsec hash_types by the host drivers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT \
+ UINT32_C(0x4)
+ uint8_t ring_select_mode;
+ /*
+ * In this mode, HW uses Toeplitz algorithm and provided Toeplitz
+ * hash key to hash the packets according to the configured hash
+ * type and hash mode. The Toeplitz hash results and the provided
+ * Toeplitz RSS indirection table are used to determine the RSS
+ * rings.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ \
+ UINT32_C(0x0)
+ /*
+ * In this mode, HW uses XOR algorithm to hash the packets according
+ * to the configured hash type and hash mode. The XOR hash results
+ * and the provided XOR RSS indirection table are used to determine
+ * the RSS rings. Host drivers provided hash key is not honored in
+ * this mode.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_XOR \
+ UINT32_C(0x1)
+ /*
+ * In this mode, HW uses inner packets checksum algorithm to
+ * distribute the packets across the rings and Toeplitz algorithm
+ * to calculate the hash to convey it in the RX completions. Host
+ * drivers should provide Toeplitz hash key. As HW uses innermost
+ * packets checksum to distribute the packets across the rings,
+ * host drivers can't convey hash mode to choose outer headers to
+ * calculate Toeplitz hash. FW will fail such configuration.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM \
+ UINT32_C(0x2)
+ #define HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_LAST \
+ HWRM_VNIC_RSS_CFG_INPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ uint8_t unused_1[4];
} __rte_packed;
-/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
+struct hwrm_vnic_rss_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -40566,26 +44198,45 @@ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* rss_cos_lb_ctx_id is 16 b */
- uint16_t rss_cos_lb_ctx_id;
- uint8_t unused_0[5];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************************
- * hwrm_vnic_rss_cos_lb_ctx_free *
- *********************************/
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /*
+ * Unable to change global RSS mode to outer due to all active
+ * interfaces are not ready to support outer RSS hashing.
+ */
+ #define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY \
+ UINT32_C(0x1)
+ #define HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_LAST \
+ HWRM_VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
+ uint8_t unused_0[7];
+} __rte_packed;
+
+/**********************
+ * hwrm_vnic_rss_qcfg *
+ **********************/
-/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -40614,13 +44265,21 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* rss_cos_lb_ctx_id is 16 b */
- uint16_t rss_cos_lb_ctx_id;
- uint8_t unused_0[6];
+ /*
+ * Index to the rss indirection table. This field is used as a lookup
+ * for chips before Thor - i.e. Cumulus and Whitney.
+ */
+ uint16_t rss_ctx_idx;
+ /*
+ * VNIC ID of VNIC associated with RSS table being queried. This field
+ * is used as a lookup for Thor and later chips.
+ */
+ uint16_t vnic_id;
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -40629,24 +44288,197 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ uint32_t hash_type;
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv4
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV4 \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV4 \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of UDP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV4 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv6 addresses of IPv6
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of TCP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV6 \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of UDP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV6 \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source, destination IPv6 addresses and flow label of IPv6
+ * packets. Hash type ipv6 and ipv6_flow_label are mutually
+ * exclusive. HW does not include the flow_label in hash
+ * calculation for the packets that are matching tcp_ipv6 and
+ * udp_ipv6 hash types. This bit will be '0' if
+ * rss_ipv6_flow_label_cap is '0'.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6_FLOW_LABEL \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv4 addresses and IPSEC AH SPI field of IPSEC
+ * AH/IPv4 packets. This bit will be '0' if rss_ipsec_ah_spi_ipv4_cap
+ * is '0'.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_AH_SPI_IPV4 \
+ UINT32_C(0x80)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv4 addresses and IPSEC ESP SPI field of IPSEC
+ * ESP/IPv4 packets. This bit will be '0' if
+ * rss_ipsec_esp_spi_ipv4_cap is '0'.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_ESP_SPI_IPV4 \
+ UINT32_C(0x100)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv6 addresses and IPSEC AH SPI field of IPSEC
+ * AH/IPv6 packets. This bit will be '0' if
+ * rss_ipsec_ah_spi_ipv6_cap is '0'.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_AH_SPI_IPV6 \
+ UINT32_C(0x200)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv6 addresses and IPSEC ESP SPI field of IPSEC
+ * ESP/IPv6 packets. This bit will be '0' if
+ * rss_ipsec_esp_spi_ipv6_cap is '0'.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_ESP_SPI_IPV6 \
+ UINT32_C(0x400)
+ uint8_t unused_0[4];
+ /* This is the value of rss hash key */
+ uint32_t hash_key[10];
+ /*
+ * Flags to specify different RSS hash modes. Setting rss_ctx_idx to
+ * the value of 0xffff implies a global RSS configuration query.
+ * hash_mode_flags are only valid for global RSS configuration query.
+ * Only the PF can initiate a global RSS configuration query.
+ * The query request fails if any VNIC is configured with hash mode
+ * and rss_ctx_idx is 0xffff.
+ */
+ uint8_t hash_mode_flags;
+ /*
+ * When this bit is '1' and FW is VNIC_RSS_HASH_MODE capable,
+ * it indicates VNIC's configured RSS hash mode.
+ * When this bit is '1' and FW is not VNIC_RSS_HASH_MODE capable,
+ * It indicates using current RSS hash mode setting configured in the
+ * device.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_DEFAULT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
+ * l4.src, l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_4 \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_2 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
+ * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
+ UINT32_C(0x10)
+ uint8_t ring_select_mode;
+ /*
+ * In this mode, HW uses Toeplitz algorithm and provided Toeplitz
+ * hash key to hash the packets according to the configured hash
+ * type and hash mode. The Toeplitz hash results and the provided
+ * Toeplitz RSS indirection table are used to determine the RSS
+ * rings.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_TOEPLITZ \
+ UINT32_C(0x0)
+ /*
+ * In this mode, HW uses XOR algorithm to hash the packets according
+ * to the configured hash type and hash mode. The XOR hash results
+ * and the provided XOR RSS indirection table are used to determine
+ * the RSS rings. Host drivers provided hash key is not honored in
+ * this mode.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_XOR \
+ UINT32_C(0x1)
+ /*
+ * In this mode, HW uses inner packets checksum algorithm to
+ * distribute the packets across the rings and Toeplitz algorithm
+ * to calculate the hash to convey it in the RX completions. Host
+ * drivers should provide Toeplitz hash key. As HW uses innermost
+ * packets checksum to distribute the packets across the rings,
+ * host drivers can't convey hash mode to choose outer headers to
+ * calculate Toeplitz hash. FW will fail such configuration.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM \
+ UINT32_C(0x2)
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_LAST \
+ HWRM_VNIC_RSS_QCFG_OUTPUT_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ uint8_t unused_1[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*******************
- * hwrm_ring_alloc *
- *******************/
+/**************************
+ * hwrm_vnic_plcmodes_cfg *
+ **************************/
-/* hwrm_ring_alloc_input (size:704b/88B) */
-struct hwrm_ring_alloc_input {
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
+struct hwrm_vnic_plcmodes_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -40675,25 +44507,489 @@ struct hwrm_ring_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t enables;
+ uint32_t flags;
/*
- * This bit must be '1' for the ring_arb_cfg field to be
- * configured.
+ * When this bit is '1', the VNIC shall be configured to
+ * use regular placement algorithm.
+ * By default, the regular placement algorithm shall be
+ * enabled on the VNIC.
*/
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG \
- UINT32_C(0x2)
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \
+ UINT32_C(0x1)
/*
- * This bit must be '1' for the stat_ctx_id_valid field to be
- * configured.
+ * When this bit is '1', the VNIC shall be configured
+ * use the jumbo placement algorithm.
*/
- #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID \
- UINT32_C(0x8)
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \
+ UINT32_C(0x2)
/*
- * This bit must be '1' for the max_bw_valid field to be
- * configured.
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for IPv4 packets according
+ * to the following rules:
+ * # If the packet is identified as TCP/IPv4, then the
+ * packet is split at the beginning of the TCP payload.
+ * # If the packet is identified as UDP/IPv4, then the
+ * packet is split at the beginning of UDP payload.
+ * # If the packet is identified as non-TCP and non-UDP
+ * IPv4 packet, then the packet is split at the beginning
+ * of the upper layer protocol header carried in the IPv4
+ * packet.
*/
- #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID \
- UINT32_C(0x20)
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for IPv6 packets according
+ * to the following rules:
+ * # If the packet is identified as TCP/IPv6, then the
+ * packet is split at the beginning of the TCP payload.
+ * # If the packet is identified as UDP/IPv6, then the
+ * packet is split at the beginning of UDP payload.
+ * # If the packet is identified as non-TCP and non-UDP
+ * IPv6 packet, then the packet is split at the beginning
+ * of the upper layer protocol header carried in the IPv6
+ * packet.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for FCoE packets at the
+ * beginning of FC payload.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for RoCE packets at the
+ * beginning of RoCE payload (after BTH/GRH headers).
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC shall be configured use the virtio
+ * placement algorithm. This feature can only be configured when
+ * proxy mode is supported on the function.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_VIRTIO_PLACEMENT \
+ UINT32_C(0x40)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the jumbo_thresh_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the hds_offset_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the hds_threshold_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the max_bds_valid field to be
+ * configured.
+ */
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_MAX_BDS_VALID \
+ UINT32_C(0x8)
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ /*
+ * When jumbo placement algorithm is enabled, this value
+ * is used to determine the threshold for jumbo placement.
+ * Packets with length larger than this value will be
+ * placed according to the jumbo placement algorithm.
+ */
+ uint16_t jumbo_thresh;
+ /*
+ * This value is used to determine the offset into
+ * packet buffer where the split data (payload) will be
+ * placed according to one of HDS placement algorithm.
+ *
+ * The lengths of packet buffers provided for split data
+ * shall be larger than this value.
+ */
+ uint16_t hds_offset;
+ /*
+ * When one of the HDS placement algorithm is enabled, this
+ * value is used to determine the threshold for HDS
+ * placement.
+ * Packets with length larger than this value will be
+ * placed according to the HDS placement algorithm.
+ * This value shall be in multiple of 4 bytes.
+ */
+ uint16_t hds_threshold;
+ /*
+ * When virtio placement algorithm is enabled, this
+ * value is used to determine the maximum number of BDs
+ * that can be used to place an Rx Packet.
+ * If an incoming packet does not fit in the buffers described
+ * by the max BDs, the packet will be dropped and an error
+ * will be reported in the completion. Valid values for this
+ * field are between 1 and 8. If the VNIC uses header-data-
+ * separation and/or TPA with buffer spanning enabled, valid
+ * values for this field are between 2 and 8.
+ * This feature can only be configured when proxy mode is
+ * supported on the function.
+ */
+ uint16_t max_bds;
+ uint8_t unused_0[4];
+} __rte_packed;
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/***************************
+ * hwrm_vnic_plcmodes_qcfg *
+ ***************************/
+
+
+/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_plcmodes_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ uint8_t unused_0[4];
+} __rte_packed;
+
+/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */
+struct hwrm_vnic_plcmodes_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * use regular placement algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * use the jumbo placement algorithm.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for IPv4 packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for IPv6 packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for FCoE packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for RoCE packets.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is configured
+ * to be the default VNIC of the requesting function.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', the VNIC is configured to use the virtio
+ * placement algorithm. This feature can only be configured when
+ * proxy mode is supported on the function.
+ */
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_VIRTIO_PLACEMENT \
+ UINT32_C(0x80)
+ /*
+ * When jumbo placement algorithm is enabled, this value
+ * is used to determine the threshold for jumbo placement.
+ * Packets with length larger than this value will be
+ * placed according to the jumbo placement algorithm.
+ */
+ uint16_t jumbo_thresh;
+ /*
+ * This value is used to determine the offset into
+ * packet buffer where the split data (payload) will be
+ * placed according to one of HDS placement algorithm.
+ *
+ * The lengths of packet buffers provided for split data
+ * shall be larger than this value.
+ */
+ uint16_t hds_offset;
+ /*
+ * When one of the HDS placement algorithm is enabled, this
+ * value is used to determine the threshold for HDS
+ * placement.
+ * Packets with length larger than this value will be
+ * placed according to the HDS placement algorithm.
+ * This value shall be in multiple of 4 bytes.
+ */
+ uint16_t hds_threshold;
+ /*
+ * When virtio placement algorithm is enabled, this
+ * value is used to determine the maximum number of BDs
+ * that can be used to place an Rx Packet.
+ * If an incoming packet does not fit in the buffers described
+ * by the max BDs, the packet will be dropped and an error
+ * will be reported in the completion. Valid values for this
+ * field are between 1 and 8. If the VNIC uses header-data-
+ * separation and/or TPA with buffer spanning enabled, valid
+ * values for this field are between 2 and 8.
+ * This feature can only be configured when proxy mode is supported
+ * on the function
+ */
+ uint16_t max_bds;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**********************************
+ * hwrm_vnic_rss_cos_lb_ctx_alloc *
+ **********************************/
+
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __rte_packed;
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* rss_cos_lb_ctx_id is 16 b */
+ uint16_t rss_cos_lb_ctx_id;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*********************************
+ * hwrm_vnic_rss_cos_lb_ctx_free *
+ *********************************/
+
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* rss_cos_lb_ctx_id is 16 b */
+ uint16_t rss_cos_lb_ctx_id;
+ uint8_t unused_0[6];
+} __rte_packed;
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*******************
+ * hwrm_ring_alloc *
+ *******************/
+
+
+/* hwrm_ring_alloc_input (size:704b/88B) */
+struct hwrm_ring_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the ring_arb_cfg field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the stat_ctx_id_valid field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the max_bw_valid field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID \
+ UINT32_C(0x20)
/*
* This bit must be '1' for the rx_ring_id field to be
* configured.
@@ -40872,7 +45168,7 @@ struct hwrm_ring_alloc_input {
/* Used by a PF driver to associate a SCHQ with one of its TX rings. */
uint16_t schq_id;
/*
- * Number of 16B units in the ring. Minimum size for
+ * Number of 16B units in the ring. Minimum size for
* a ring is 16 16B entries.
*/
uint32_t length;
@@ -41109,9 +45405,9 @@ struct hwrm_ring_alloc_output {
uint8_t unused_0[2];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -41214,9 +45510,9 @@ struct hwrm_ring_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -41268,7 +45564,7 @@ struct hwrm_ring_reset_input {
/* RoCE Notification Completion Ring (ROCE_CR) */
#define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
/*
- * Rx Ring Group. This is to reset rx and aggregation in an atomic
+ * Rx Ring Group. This is to reset rx and aggregation in an atomic
* operation. Completion ring associated with this ring group is
* not reset.
*/
@@ -41312,9 +45608,9 @@ struct hwrm_ring_reset_output {
uint8_t consumer_idx[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -41472,7 +45768,7 @@ struct hwrm_ring_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -41614,7 +45910,7 @@ struct hwrm_ring_qcfg_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -41702,8 +45998,8 @@ struct hwrm_ring_aggint_qcaps_output {
#define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_DMA_AGGR \
UINT32_C(0x10)
/*
- * When this bit is set to '1', num_cmpl_dma_aggr_during_int can be configured
- * on completion rings.
+ * When this bit is set to '1', num_cmpl_dma_aggr_during_int can be
+ * configured on completion rings.
*/
#define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT \
UINT32_C(0x20)
@@ -41714,8 +46010,8 @@ struct hwrm_ring_aggint_qcaps_output {
#define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_CMPL_AGGR_DMA_TMR \
UINT32_C(0x40)
/*
- * When this bit is set to '1', cmpl_aggr_dma_tmr_during_int can be configured
- * on completion rings.
+ * When this bit is set to '1', cmpl_aggr_dma_tmr_during_int can be
+ * configured on completion rings.
*/
#define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT \
UINT32_C(0x80)
@@ -41765,9 +46061,9 @@ struct hwrm_ring_aggint_qcaps_output {
uint8_t unused_0[1];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -41883,9 +46179,9 @@ struct hwrm_ring_cmpl_ring_qaggint_params_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -41995,8 +46291,8 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR \
UINT32_C(0x1)
/*
- * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to be
- * configured.
+ * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to
+ * be configured.
*/
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT \
UINT32_C(0x2)
@@ -42040,9 +46336,9 @@ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -42095,7 +46391,7 @@ struct hwrm_ring_grp_alloc_input {
uint16_t rr;
/*
* This value identifies the aggregation RR associated with
- * the ring group. If this value is 0xFF... (All Fs), then no
+ * the ring group. If this value is 0xFF... (All Fs), then no
* Aggregation ring will be set.
*/
uint16_t ar;
@@ -42117,7 +46413,7 @@ struct hwrm_ring_grp_alloc_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * This is the ring group ID value. Use this value to program
+ * This is the ring group ID value. Use this value to program
* the default ring group for the VNIC or as table entries
* in an RSS/COS context.
*/
@@ -42125,9 +46421,9 @@ struct hwrm_ring_grp_alloc_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -42186,9 +46482,9 @@ struct hwrm_ring_grp_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -42760,9 +47056,9 @@ struct hwrm_ring_schq_alloc_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -42819,8 +47115,8 @@ struct hwrm_ring_schq_cfg_input {
/* The tc_max_bw array and the max_bw parameters are valid */
#define HWRM_RING_SCHQ_CFG_INPUT_FLAGS_TC_MAX_BW_ENABLED \
UINT32_C(0x1)
- /* The tc_min_bw array is valid */
- #define HWRM_RING_SCHQ_CFG_INPUT_FLAGS_TC_MIN_BW_ENABLED \
+ /* The tc_bw_reservation array is valid */
+ #define HWRM_RING_SCHQ_CFG_INPUT_FLAGS_TC_RESERVATION_ENABLED \
UINT32_C(0x2)
/* Maximum bandwidth of the traffic class, specified in Mbps. */
uint32_t max_bw_tc0;
@@ -42839,61 +47135,61 @@ struct hwrm_ring_schq_cfg_input {
/* Maximum bandwidth of the traffic class, specified in Mbps. */
uint32_t max_bw_tc7;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc0;
+ uint32_t tc_bw_reservation0;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc1;
+ uint32_t tc_bw_reservation1;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc2;
+ uint32_t tc_bw_reservation2;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc3;
+ uint32_t tc_bw_reservation3;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc4;
+ uint32_t tc_bw_reservation4;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc5;
+ uint32_t tc_bw_reservation5;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc6;
+ uint32_t tc_bw_reservation6;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc7;
+ uint32_t tc_bw_reservation7;
/*
* Indicates the max bandwidth for all enabled traffic classes in
* this SCHQ, specified in Mbps.
@@ -42915,9 +47211,9 @@ struct hwrm_ring_schq_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -42976,9 +47272,9 @@ struct hwrm_ring_schq_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -43564,7 +47860,7 @@ struct hwrm_cfa_l2_filter_free_output {
**************************/
-/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_l2_filter_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
@@ -43622,7 +47918,7 @@ struct hwrm_cfa_l2_filter_cfg_input {
*/
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_MASK \
UINT32_C(0xc)
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_SFT 2
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_SFT 2
/* To support old drivers */
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_NO_ROCE_L2 \
(UINT32_C(0x0) << 2)
@@ -43634,6 +47930,24 @@ struct hwrm_cfa_l2_filter_cfg_input {
(UINT32_C(0x2) << 2)
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_LAST \
HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_ROCE
+ /*
+ * Enumeration denoting how the L2 Context TCAM remap operation is
+ * updated.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_MASK \
+ UINT32_C(0x30)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_SFT 4
+ /* No change to remap opcode */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_NO_UPDATE \
+ (UINT32_C(0x0) << 4)
+ /* Bypass CFA Lookup */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_BYPASS_LKUP \
+ (UINT32_C(0x1) << 4)
+ /* Enable CFA Lookup */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_ENABLE_LKUP \
+ (UINT32_C(0x2) << 4)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_LAST \
+ HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_ENABLE_LKUP
uint32_t enables;
/*
* This bit must be '1' for the dst_id field to be
@@ -43647,6 +47961,18 @@ struct hwrm_cfa_l2_filter_cfg_input {
*/
#define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the prof_func field to be configured in
+ * the remap entry.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_PROF_FUNC \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the l2_context_id field to be configured
+ * in the remap entry.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_L2_CONTEXT_ID \
+ UINT32_C(0x8)
/*
* This value identifies a set of CFA data structures used for an L2
* context.
@@ -43664,6 +47990,18 @@ struct hwrm_cfa_l2_filter_cfg_input {
* mirrored.
*/
uint32_t new_mirror_vnic_id;
+ /*
+ * Profile function value to be programmed into the L2 context entry's
+ * remap. This will be used by the host application to program the CFA
+ * Profile TCAM entry for further classification.
+ */
+ uint32_t prof_func;
+ /*
+ * L2 context ID value to be programmed into the L2 context entry's
+ * remap. This will be used by the host application to program the CFA
+ * Lookup entry for further classification.
+ */
+ uint32_t l2_context_id;
} __rte_packed;
/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
@@ -45162,8 +49500,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
/*
* Setting of this flag indicates that the dst_id field contains RFS
* ring table index. If this is not set it indicates dst_id is VNIC
- * or VPORT or function ID. Note dest_fid and dest_rfs_ring_idx
- * can't be set at the same time. Updated drivers should pass ring
+ * or VPORT or function ID. Note dest_fid and dest_rfs_ring_idx
+ * can't be set at the same time. Updated drivers should pass ring
* idx in the rfs_ring_tbl_idx field if the firmware indicates
* support for the new field in the HWRM_CFA_ADV_FLOW_MGMT_QCAPS
* response.
@@ -45172,7 +49510,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
UINT32_C(0x20)
/*
* Setting of this flag indicates that when the ntuple filter is
- * created, the L2 context should not be used in the filter. This
+ * created, the L2 context should not be used in the filter. This
* allows packet from different L2 contexts to match and be directed
* to the same destination.
*/
@@ -45327,7 +49665,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header.
+ * The value of protocol field in IP header.
* Applies to UDP and TCP traffic.
* 6 - TCP
* 17 - UDP
@@ -45712,14 +50050,14 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
/*
* Setting of this flag indicates that the new_dst_id field contains
* RFS ring table index. If this is not set it indicates new_dst_id
- * is VNIC or VPORT or function ID. Note dest_fid and
- * dest_rfs_ring_idx can’t be set at the same time.
+ * is VNIC or VPORT or function ID. Note dest_fid and
+ * dest_rfs_ring_idx can't be set at the same time.
*/
#define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_FLAGS_DEST_RFS_RING_IDX \
UINT32_C(0x2)
/*
* Setting of this flag indicates that when the ntuple filter is
- * created, the L2 context should not be used in the filter. This
+ * created, the L2 context should not be used in the filter. This
* allows packet from different L2 contexts to match and be directed
* to the same destination.
*/
@@ -46083,7 +50421,7 @@ struct hwrm_cfa_em_flow_alloc_input {
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header.
+ * The value of protocol field in IP header.
* Applies to UDP and TCP traffic.
* 6 - TCP
* 17 - UDP
@@ -47548,7 +51886,7 @@ struct hwrm_cfa_decap_filter_alloc_input {
#define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header.
+ * The value of protocol field in IP header.
* Applies to UDP and TCP traffic.
* 6 - TCP
* 17 - UDP
@@ -48391,13 +52729,1752 @@ struct hwrm_cfa_flow_key_data {
uint32_t l4_key_mask[2];
} __rte_packed;
-/**********************
- * hwrm_cfa_flow_info *
- **********************/
+/**********************
+ * hwrm_cfa_flow_info *
+ **********************/
+
+
+/* hwrm_cfa_flow_info_input (size:256b/32B) */
+struct hwrm_cfa_flow_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ /* Max flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_MASK \
+ UINT32_C(0xfff)
+ /* CNP flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT \
+ UINT32_C(0x1000)
+ /* RoCEv1 flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT \
+ UINT32_C(0x2000)
+ /* NIC flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_NIC_TX \
+ UINT32_C(0x3000)
+ /* RoCEv2 flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT \
+ UINT32_C(0x4000)
+ /* Direction rx = 1 */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX \
+ UINT32_C(0x8000)
+ /* CNP flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT_RX \
+ UINT32_C(0x9000)
+ /* RoCEv1 flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT_RX \
+ UINT32_C(0xa000)
+ /* NIC flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_NIC_RX \
+ UINT32_C(0xb000)
+ /* RoCEv2 flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT_RX \
+ UINT32_C(0xc000)
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_LAST \
+ HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT_RX
+ uint8_t unused_0[6];
+ /* This value identifies a set of CFA data structures used for a flow. */
+ uint64_t ext_flow_handle;
+} __rte_packed;
+
+/* hwrm_cfa_flow_info_output (size:5632b/704B) */
+struct hwrm_cfa_flow_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t flags;
+ /* When set to 1, indicates the configuration is the TX flow. */
+ #define HWRM_CFA_FLOW_INFO_OUTPUT_FLAGS_PATH_TX UINT32_C(0x1)
+ /* When set to 1, indicates the configuration is the RX flow. */
+ #define HWRM_CFA_FLOW_INFO_OUTPUT_FLAGS_PATH_RX UINT32_C(0x2)
+ /* profile is 8 b */
+ uint8_t profile;
+ /* src_fid is 16 b */
+ uint16_t src_fid;
+ /* dst_fid is 16 b */
+ uint16_t dst_fid;
+ /* l2_ctxt_id is 16 b */
+ uint16_t l2_ctxt_id;
+ /* em_info is 64 b */
+ uint64_t em_info;
+ /* tcam_info is 64 b */
+ uint64_t tcam_info;
+ /* vfp_tcam_info is 64 b */
+ uint64_t vfp_tcam_info;
+ /* ar_id is 16 b */
+ uint16_t ar_id;
+ /* flow_handle is 16 b */
+ uint16_t flow_handle;
+ /* tunnel_handle is 32 b */
+ uint32_t tunnel_handle;
+ /* The flow aging timer for the flow, the unit is 100 milliseconds */
+ uint16_t flow_timer;
+ uint8_t unused_0[6];
+ /* Flow associated L2, L3 and L4 headers info. */
+ uint32_t flow_key_data[130];
+ /* Flow associated action record info. */
+ uint32_t flow_action_info[30];
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/***********************
+ * hwrm_cfa_flow_flush *
+ ***********************/
+
+
+/* hwrm_cfa_flow_flush_input (size:256b/32B) */
+struct hwrm_cfa_flow_flush_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* flags is 32 b */
+ uint32_t flags;
+ /*
+ * Set to 1 to indicate the page size, page layers, and
+ * flow_handle_table_dma_addr fields are valid. The flow flush
+ * operation should only flush the flows from the flow table
+ * specified. This flag is set to 0 by older driver. For older
+ * firmware, setting this flag has no effect.
+ */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_TABLE_VALID \
+ UINT32_C(0x1)
+ /*
+ * Set to 1 to indicate flow flush operation to cleanup all the
+ * flows, meters, CFA context memory tables etc. This flag is set to
+ * 0 by older driver. For older firmware, setting this flag has no
+ * effect.
+ */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_RESET_ALL \
+ UINT32_C(0x2)
+ /*
+ * Set to 1 to indicate flow flush operation to cleanup all the
+ * flows by the caller. This flag is set to 0 by older driver. For
+ * older firmware, setting this flag has no effect.
+ */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_RESET_PORT \
+ UINT32_C(0x4)
+ /*
+ * Set to 1 to indicate the flow counter IDs are included in the
+ * flow table.
+ */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_INCL_FC \
+ UINT32_C(0x8000000)
+ /*
+ * This specifies the size of flow handle entries provided by the
+ * driver in the flow table specified below. Only two flow handle
+ * size enums are defined.
+ */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_MASK \
+ UINT32_C(0xc0000000)
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_SFT \
+ 30
+ /* The flow handle is 16bit */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_16BIT \
+ (UINT32_C(0x0) << 30)
+ /* The flow handle is 64bit */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT \
+ (UINT32_C(0x1) << 30)
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_LAST \
+ HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT
+ /* Specify page size of the flow table memory. */
+ uint8_t page_size;
+ /* The page size is 4K */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_4K UINT32_C(0x0)
+ /* The page size is 8K */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_8K UINT32_C(0x1)
+ /* The page size is 64K */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_64K UINT32_C(0x4)
+ /* The page size is 256K */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_256K UINT32_C(0x6)
+ /* The page size is 1M */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_1M UINT32_C(0x8)
+ /* The page size is 2M */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_2M UINT32_C(0x9)
+ /* The page size is 4M */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_4M UINT32_C(0xa)
+ /* The page size is 1G */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_1G UINT32_C(0x12)
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_LAST \
+ HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_1G
+ /* FLow table memory indirect levels. */
+ uint8_t page_level;
+ /* PBL pointer is physical start address. */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
+ #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LAST \
+ HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_2
+ /* number of flows in the flow table */
+ uint16_t num_flows;
+ /* Pointer to the PBL, or PDL depending on number of levels */
+ uint64_t page_dir;
+} __rte_packed;
+
+/* hwrm_cfa_flow_flush_output (size:128b/16B) */
+struct hwrm_cfa_flow_flush_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/***********************
+ * hwrm_cfa_flow_stats *
+ ***********************/
+
+
+/* hwrm_cfa_flow_stats_input (size:640b/80B) */
+struct hwrm_cfa_flow_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Number of valid flows in this command. */
+ uint16_t num_flows;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_0 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_0;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_1 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_1;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_2 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_2;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_3 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_3;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_4 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_4;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_5 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_5;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_6 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_6;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_7 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_7;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_8 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_8;
+ /*
+ * Flow handle.
+ * For a listing of applicable flow_handle_9 values, see enumeration
+ * in hwrm_cfa_flow_info_input.
+ */
+ uint16_t flow_handle_9;
+ uint8_t unused_0[2];
+ /* Flow ID of a flow. */
+ uint32_t flow_id_0;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_1;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_2;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_3;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_4;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_5;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_6;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_7;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_8;
+ /* Flow ID of a flow. */
+ uint32_t flow_id_9;
+} __rte_packed;
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
+struct hwrm_cfa_flow_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* packet_0 is 64 b */
+ uint64_t packet_0;
+ /* packet_1 is 64 b */
+ uint64_t packet_1;
+ /* packet_2 is 64 b */
+ uint64_t packet_2;
+ /* packet_3 is 64 b */
+ uint64_t packet_3;
+ /* packet_4 is 64 b */
+ uint64_t packet_4;
+ /* packet_5 is 64 b */
+ uint64_t packet_5;
+ /* packet_6 is 64 b */
+ uint64_t packet_6;
+ /* packet_7 is 64 b */
+ uint64_t packet_7;
+ /* packet_8 is 64 b */
+ uint64_t packet_8;
+ /* packet_9 is 64 b */
+ uint64_t packet_9;
+ /* byte_0 is 64 b */
+ uint64_t byte_0;
+ /* byte_1 is 64 b */
+ uint64_t byte_1;
+ /* byte_2 is 64 b */
+ uint64_t byte_2;
+ /* byte_3 is 64 b */
+ uint64_t byte_3;
+ /* byte_4 is 64 b */
+ uint64_t byte_4;
+ /* byte_5 is 64 b */
+ uint64_t byte_5;
+ /* byte_6 is 64 b */
+ uint64_t byte_6;
+ /* byte_7 is 64 b */
+ uint64_t byte_7;
+ /* byte_8 is 64 b */
+ uint64_t byte_8;
+ /* byte_9 is 64 b */
+ uint64_t byte_9;
+ /*
+ * If a flow has been hit, the bit representing the flow will be 1.
+ * Likewise, if a flow has not, the bit representing the flow
+ * will be 0. Mapping will match flow numbers where bitX is for flowX
+ * (ex: bit 0 is flow0). This only applies for NIC flows. Upon
+ * reading of the flow, the bit will be cleared for the flow and only
+ * set again when traffic is received by the flow.
+ */
+ uint16_t flow_hits;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/***********************************
+ * hwrm_cfa_flow_aging_timer_reset *
+ ***********************************/
+
+
+/* hwrm_cfa_flow_aging_timer_reset_input (size:256b/32B) */
+struct hwrm_cfa_flow_aging_timer_reset_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ uint8_t unused_0[2];
+ /*
+ * New flow timer value for the flow specified in the ext_flow_handle.
+ * The flow timer unit is 100ms.
+ */
+ uint32_t flow_timer;
+ /* This value identifies a set of CFA data structures used for a flow. */
+ uint64_t ext_flow_handle;
+} __rte_packed;
+
+/* hwrm_cfa_flow_aging_timer_reset_output (size:128b/16B) */
+struct hwrm_cfa_flow_aging_timer_reset_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/***************************
+ * hwrm_cfa_flow_aging_cfg *
+ ***************************/
+
+
+/* hwrm_cfa_flow_aging_cfg_input (size:384b/48B) */
+struct hwrm_cfa_flow_aging_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The bit field to enable per flow aging configuration. */
+ uint16_t enables;
+ /*
+ * This bit must be '1' for the tcp flow timer field to be
+ * configured
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_TCP_FLOW_TIMER \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the tcp finish timer field to be
+ * configured
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_TCP_FIN_TIMER \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the udp flow timer field to be
+ * configured
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_UDP_FLOW_TIMER \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the eem dma interval field to be
+ * configured
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_DMA_INTERVAL \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the eem notice interval field to be
+ * configured
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_NOTICE_INTERVAL \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the eem context memory maximum entries
+ * field to be configured
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_CTX_MAX_ENTRIES \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the eem context memory ID field to be
+ * configured
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_CTX_ID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the eem context memory type field to be
+ * configured
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_CTX_MEM_TYPE \
+ UINT32_C(0x80)
+ uint8_t flags;
+ /* Enumeration denoting the RX, TX type of the resource. */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_RX
+ /*
+ * Enumeration denoting the enable, disable eem flow aging
+ * configuration.
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM UINT32_C(0x2)
+ /* tx path */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_DISABLE \
+ (UINT32_C(0x0) << 1)
+ /* rx path */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_ENABLE \
+ (UINT32_C(0x1) << 1)
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_LAST \
+ HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_ENABLE
+ uint8_t unused_0;
+ /*
+ * The flow aging timer for all TCP flows, the unit is 100
+ * milliseconds.
+ */
+ uint32_t tcp_flow_timer;
+ /*
+ * The TCP finished timer for all TCP flows, the unit is 100
+ * milliseconds.
+ */
+ uint32_t tcp_fin_timer;
+ /*
+ * The flow aging timer for all UDP flows, the unit is 100
+ * milliseconds.
+ */
+ uint32_t udp_flow_timer;
+ /*
+ * The interval to dma eem ejection data to host memory, the unit is
+ * milliseconds.
+ */
+ uint16_t eem_dma_interval;
+ /*
+ * The interval to notify driver to read the eem ejection data, the
+ * unit is milliseconds.
+ */
+ uint16_t eem_notice_interval;
+ /* The maximum entries number in the eem context memory. */
+ uint32_t eem_ctx_max_entries;
+ /* The context memory ID for eem flow aging. */
+ uint16_t eem_ctx_id;
+ uint16_t eem_ctx_mem_type;
+ /*
+ * The content of context memory is eem ejection data, the size of
+ * each entry is 4 bytes.
+ */
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_EJECTION_DATA \
+ UINT32_C(0x0)
+ #define HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_LAST \
+ HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_EJECTION_DATA
+ uint8_t unused_1[4];
+} __rte_packed;
+
+/* hwrm_cfa_flow_aging_cfg_output (size:128b/16B) */
+struct hwrm_cfa_flow_aging_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/****************************
+ * hwrm_cfa_flow_aging_qcfg *
+ ****************************/
+
+
+/* hwrm_cfa_flow_aging_qcfg_input (size:192b/24B) */
+struct hwrm_cfa_flow_aging_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * The direction for the flow aging configuration, 1 is rx path, 2 is
+ * tx path.
+ */
+ uint8_t flags;
+ /* Enumeration denoting the RX, TX type of the resource. */
+ #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0[7];
+} __rte_packed;
+
+/* hwrm_cfa_flow_aging_qcfg_output (size:320b/40B) */
+struct hwrm_cfa_flow_aging_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * The current flow aging timer for all TCP flows, the unit is 100
+ * millisecond.
+ */
+ uint32_t tcp_flow_timer;
+ /*
+ * The current TCP finished timer for all TCP flows, the unit is 100
+ * millisecond.
+ */
+ uint32_t tcp_fin_timer;
+ /*
+ * The current flow aging timer for all UDP flows, the unit is 100
+ * millisecond.
+ */
+ uint32_t udp_flow_timer;
+ /*
+ * The interval to dma eem ejection data to host memory, the unit is
+ * milliseconds.
+ */
+ uint16_t eem_dma_interval;
+ /*
+ * The interval to notify driver to read the eem ejection data, the
+ * unit is milliseconds.
+ */
+ uint16_t eem_notice_interval;
+ /* The maximum entries number in the eem context memory. */
+ uint32_t eem_ctx_max_entries;
+ /* The context memory ID for eem flow aging. */
+ uint16_t eem_ctx_id;
+ /* The context memory type for eem flow aging. */
+ uint16_t eem_ctx_mem_type;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*****************************
+ * hwrm_cfa_flow_aging_qcaps *
+ *****************************/
+
+
+/* hwrm_cfa_flow_aging_qcaps_input (size:192b/24B) */
+struct hwrm_cfa_flow_aging_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * The direction for the flow aging configuration, 1 is rx path, 2 is
+ * tx path.
+ */
+ uint8_t flags;
+ /* Enumeration denoting the RX, TX type of the resource. */
+ #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0[7];
+} __rte_packed;
+
+/* hwrm_cfa_flow_aging_qcaps_output (size:256b/32B) */
+struct hwrm_cfa_flow_aging_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * The maximum flow aging timer for all TCP flows, the unit is 100
+ * millisecond.
+ */
+ uint32_t max_tcp_flow_timer;
+ /*
+ * The maximum TCP finished timer for all TCP flows, the unit is 100
+ * millisecond.
+ */
+ uint32_t max_tcp_fin_timer;
+ /*
+ * The maximum flow aging timer for all UDP flows, the unit is 100
+ * millisecond.
+ */
+ uint32_t max_udp_flow_timer;
+ /* The maximum aging flows that HW can support. */
+ uint32_t max_aging_flows;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**********************************
+ * hwrm_cfa_tcp_flag_process_qcfg *
+ **********************************/
+
+
+/* hwrm_cfa_tcp_flag_process_qcfg_input (size:128b/16B) */
+struct hwrm_cfa_tcp_flag_process_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __rte_packed;
+
+/* hwrm_cfa_tcp_flag_process_qcfg_output (size:192b/24B) */
+struct hwrm_cfa_tcp_flag_process_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The port 0 RX mirror action record ID. */
+ uint16_t rx_ar_id_port0;
+ /* The port 1 RX mirror action record ID. */
+ uint16_t rx_ar_id_port1;
+ /*
+ * The port 0 RX action record ID for TX TCP flag packets from
+ * loopback path.
+ */
+ uint16_t tx_ar_id_port0;
+ /*
+ * The port 1 RX action record ID for TX TCP flag packets from
+ * loopback path.
+ */
+ uint16_t tx_ar_id_port1;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**************************
+ * hwrm_cfa_vf_pair_alloc *
+ **************************/
+
+
+/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vf_pair_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_a_id;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_b_id;
+ uint8_t unused_0[4];
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+} __rte_packed;
+
+/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vf_pair_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*************************
+ * hwrm_cfa_vf_pair_free *
+ *************************/
+
+
+/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */
+struct hwrm_cfa_vf_pair_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+} __rte_packed;
+
+/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */
+struct hwrm_cfa_vf_pair_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*************************
+ * hwrm_cfa_vf_pair_info *
+ *************************/
+
+
+/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */
+struct hwrm_cfa_vf_pair_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* If this flag is set, lookup by name else lookup by index. */
+ #define HWRM_CFA_VF_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
+ /* vf pair table index. */
+ uint16_t vf_pair_index;
+ uint8_t unused_0[2];
+ /* VF Pair name (32 byte string). */
+ char vf_pair_name[32];
+} __rte_packed;
+
+/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */
+struct hwrm_cfa_vf_pair_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* vf pair table index. */
+ uint16_t next_vf_pair_index;
+ /* vf pair member a's vf_fid. */
+ uint16_t vf_a_fid;
+ /* vf pair member a's Linux logical VF number. */
+ uint16_t vf_a_index;
+ /* vf pair member b's vf_fid. */
+ uint16_t vf_b_fid;
+ /* vf pair member a's Linux logical VF number. */
+ uint16_t vf_b_index;
+ /* vf pair state. */
+ uint8_t pair_state;
+ /* Pair has been allocated */
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
+ /* Both pair members are active */
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
+ HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
+ uint8_t unused_0[5];
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/***********************
+ * hwrm_cfa_pair_alloc *
+ ***********************/
+
+
+/* hwrm_cfa_pair_alloc_input (size:576b/72B) */
+struct hwrm_cfa_pair_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair,
+ * 5-rep2fn_mod, 6-rep2fn_modall, 7-rep2fn_truflow).
+ */
+ uint16_t pair_mode;
+ /*
+ * Pair between VF on local host with PF or VF on specified host.
+ * (deprecated)
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_VF2FN \
+ UINT32_C(0x0)
+ /*
+ * Pair between REP on local host with PF or VF on specified host.
+ * (deprecated)
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN \
+ UINT32_C(0x1)
+ /*
+ * Pair between REP on local host with REP on specified host.
+ * (deprecated)
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2REP \
+ UINT32_C(0x2)
+ /* Pair for the proxy interface. (deprecated) */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PROXY \
+ UINT32_C(0x3)
+ /* Pair for the PF interface. (deprecated) */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PFPAIR \
+ UINT32_C(0x4)
+ /* Modify existing rep2fn pair and move pair to new PF. (deprecated) */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MOD \
+ UINT32_C(0x5)
+ /*
+ * Modify existing rep2fn pairs paired with same PF and move pairs
+ * to new PF. (deprecated)
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL \
+ UINT32_C(0x6)
+ /*
+ * Truflow pair between REP on local host with PF or VF on specified
+ * host.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_TRUFLOW \
+ UINT32_C(0x7)
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_LAST \
+ HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_TRUFLOW
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_a_id;
+ /* Logical Host (0xff-local host). */
+ uint8_t host_b_id;
+ /* Logical PF (0xff-PF for command channel). */
+ uint8_t pf_b_id;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_b_id;
+ /* Loopback port (0xff-internal loopback), valid for mode-3. */
+ uint8_t port_id;
+ /* Priority used for encap of loopback packets valid for mode-3. */
+ uint8_t pri;
+ /* New PF for rep2fn modify, valid for mode 5. */
+ uint16_t new_pf_fid;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the q_ab field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the q_ba field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the fc_ab field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the fc_ba field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID UINT32_C(0x8)
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+ /*
+ * The q_ab value specifies the logical index of the TX/RX CoS
+ * queue to be assigned for traffic in the A to B direction of
+ * the interface pair. The default value is 0.
+ */
+ uint8_t q_ab;
+ /*
+ * The q_ba value specifies the logical index of the TX/RX CoS
+ * queue to be assigned for traffic in the B to A direction of
+ * the interface pair. The default value is 1.
+ */
+ uint8_t q_ba;
+ /*
+ * Specifies whether RX ring flow control is disabled (0) or enabled
+ * (1) in the A to B direction. The default value is 0, meaning that
+ * packets will be dropped when the B-side RX rings are full.
+ */
+ uint8_t fc_ab;
+ /*
+ * Specifies whether RX ring flow control is disabled (0) or enabled
+ * (1) in the B to A direction. The default value is 1, meaning that
+ * the RX CoS queue will be flow controlled when the A-side RX rings
+ * are full.
+ */
+ uint8_t fc_ba;
+ uint8_t unused_1[4];
+} __rte_packed;
+
+/* hwrm_cfa_pair_alloc_output (size:192b/24B) */
+struct hwrm_cfa_pair_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Only valid for modes 1 and 2. */
+ uint16_t rx_cfa_code_a;
+ /* Only valid for modes 1 and 2. */
+ uint16_t tx_cfa_action_a;
+ /* Only valid for mode 2. */
+ uint16_t rx_cfa_code_b;
+ /* Only valid for mode 2. */
+ uint16_t tx_cfa_action_b;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**********************
+ * hwrm_cfa_pair_free *
+ **********************/
+
+
+/* hwrm_cfa_pair_free_input (size:448b/56B) */
+struct hwrm_cfa_pair_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+ /* Logical PF (0xff-PF for command channel). */
+ uint8_t pf_b_id;
+ uint8_t unused_0[3];
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ /*
+ * Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair,
+ * 5-rep2fn_mod, 6-rep2fn_modall, 7-rep2fn_truflow).
+ */
+ uint16_t pair_mode;
+ /*
+ * Pair between VF on local host with PF or VF on specified host.
+ * (deprecated)
+ */
+ #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
+ /*
+ * Pair between REP on local host with PF or VF on specified host.
+ * (deprecated)
+ */
+ #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
+ /*
+ * Pair between REP on local host with REP on specified host.
+ * (deprecated)
+ */
+ #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
+ /* Pair for the proxy interface. (deprecated) */
+ #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_PROXY UINT32_C(0x3)
+ /* Pair for the PF interface. (deprecated) */
+ #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
+ /* Modify existing rep2fn pair and move pair to new PF. (deprecated) */
+ #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_MOD UINT32_C(0x5)
+ /*
+ * Modify existing rep2fn pairs paired with same PF and move pairs
+ * to new PF. (deprecated)
+ */
+ #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_MODALL UINT32_C(0x6)
+ /*
+ * Truflow pair between REP on local host with PF or VF on
+ * specified host.
+ */
+ #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW UINT32_C(0x7)
+ #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_LAST \
+ HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW
+} __rte_packed;
+
+/* hwrm_cfa_pair_free_output (size:128b/16B) */
+struct hwrm_cfa_pair_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**********************
+ * hwrm_cfa_pair_info *
+ **********************/
+
+
+/* hwrm_cfa_pair_info_input (size:448b/56B) */
+struct hwrm_cfa_pair_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* If this flag is set, lookup by name else lookup by index. */
+ #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
+ /* If this flag is set, lookup by PF id and VF id. */
+ #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_REPRE UINT32_C(0x2)
+ /* Pair table index. */
+ uint16_t pair_index;
+ /* Pair pf index. */
+ uint8_t pair_pfid;
+ /* Pair vf index. */
+ uint8_t pair_vfid;
+ /* Pair name (32 byte string). */
+ char pair_name[32];
+} __rte_packed;
+
+/* hwrm_cfa_pair_info_output (size:576b/72B) */
+struct hwrm_cfa_pair_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Pair table index. */
+ uint16_t next_pair_index;
+ /* Pair member a's fid. */
+ uint16_t a_fid;
+ /* Logical host number. */
+ uint8_t host_a_index;
+ /* Logical PF number. */
+ uint8_t pf_a_index;
+ /* Pair member a's Linux logical VF number. */
+ uint16_t vf_a_index;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code_a;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action_a;
+ /* Pair member b's fid. */
+ uint16_t b_fid;
+ /* Logical host number. */
+ uint8_t host_b_index;
+ /* Logical PF number. */
+ uint8_t pf_b_index;
+ /* Pair member a's Linux logical VF number. */
+ uint16_t vf_b_index;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code_b;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action_b;
+ /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair). */
+ uint8_t pair_mode;
+ /*
+ * Pair between VF on local host with PF or VF on specified host.
+ * (deprecated)
+ */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
+ /*
+ * Pair between REP on local host with PF or VF on specified host.
+ * (deprecated)
+ */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
+ /*
+ * Pair between REP on local host with REP on specified host.
+ * (deprecated)
+ */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
+ /* Pair for the proxy interface. (deprecated) */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PROXY UINT32_C(0x3)
+ /* Pair for the PF interface. (deprecated) */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_LAST \
+ HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR
+ /* Pair state. */
+ uint8_t pair_state;
+ /* Pair has been allocated */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
+ /* Both pair members are active */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
+ HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
+ /* Pair name (32 byte string). */
+ char pair_name[32];
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/**********************
+ * hwrm_cfa_vfr_alloc *
+ **********************/
+
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved;
+ uint8_t unused_0[4];
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+} __rte_packed;
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*********************
+ * hwrm_cfa_vfr_free *
+ *********************/
+
+
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
+struct hwrm_cfa_vfr_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved;
+ uint8_t unused_0[4];
+} __rte_packed;
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+/***************************************
+ * hwrm_cfa_redirect_query_tunnel_type *
+ ***************************************/
-/* hwrm_cfa_flow_info_input (size:256b/32B) */
-struct hwrm_cfa_flow_info_input {
+
+/* hwrm_cfa_redirect_query_tunnel_type_input (size:192b/24B) */
+struct hwrm_cfa_redirect_query_tunnel_type_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -48426,47 +54503,13 @@ struct hwrm_cfa_flow_info_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Flow record index. */
- uint16_t flow_handle;
- /* Max flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_MASK \
- UINT32_C(0xfff)
- /* CNP flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT \
- UINT32_C(0x1000)
- /* RoCEv1 flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT \
- UINT32_C(0x2000)
- /* NIC flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_NIC_TX \
- UINT32_C(0x3000)
- /* RoCEv2 flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT \
- UINT32_C(0x4000)
- /* Direction rx = 1 */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX \
- UINT32_C(0x8000)
- /* CNP flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT_RX \
- UINT32_C(0x9000)
- /* RoCEv1 flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT_RX \
- UINT32_C(0xa000)
- /* NIC flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_NIC_RX \
- UINT32_C(0xb000)
- /* RoCEv2 flow handle */
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT_RX \
- UINT32_C(0xc000)
- #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_LAST \
- HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT_RX
+ /* The source function id. */
+ uint16_t src_fid;
uint8_t unused_0[6];
- /* This value identifies a set of CFA data structures used for a flow. */
- uint64_t ext_flow_handle;
} __rte_packed;
-/* hwrm_cfa_flow_info_output (size:5632b/704B) */
-struct hwrm_cfa_flow_info_output {
+/* hwrm_cfa_redirect_query_tunnel_type_output (size:128b/16B) */
+struct hwrm_cfa_redirect_query_tunnel_type_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -48475,39 +54518,60 @@ struct hwrm_cfa_flow_info_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t flags;
- /* When set to 1, indicates the configuration is the TX flow. */
- #define HWRM_CFA_FLOW_INFO_OUTPUT_FLAGS_PATH_TX UINT32_C(0x1)
- /* When set to 1, indicates the configuration is the RX flow. */
- #define HWRM_CFA_FLOW_INFO_OUTPUT_FLAGS_PATH_RX UINT32_C(0x2)
- /* profile is 8 b */
- uint8_t profile;
- /* src_fid is 16 b */
- uint16_t src_fid;
- /* dst_fid is 16 b */
- uint16_t dst_fid;
- /* l2_ctxt_id is 16 b */
- uint16_t l2_ctxt_id;
- /* em_info is 64 b */
- uint64_t em_info;
- /* tcam_info is 64 b */
- uint64_t tcam_info;
- /* vfp_tcam_info is 64 b */
- uint64_t vfp_tcam_info;
- /* ar_id is 16 b */
- uint16_t ar_id;
- /* flow_handle is 16 b */
- uint16_t flow_handle;
- /* tunnel_handle is 32 b */
- uint32_t tunnel_handle;
- /* The flow aging timer for the flow, the unit is 100 milliseconds */
- uint16_t flow_timer;
- uint8_t unused_0[6];
- /* Flow associated L2, L3 and L4 headers info. */
- uint32_t flow_key_data[130];
- /* Flow associated action record info. */
- uint32_t flow_action_info[30];
- uint8_t unused_1[7];
+ /* Tunnel Mask. */
+ uint32_t tunnel_mask;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_NONTUNNEL \
+ UINT32_C(0x1)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN \
+ UINT32_C(0x2)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_NVGRE \
+ UINT32_C(0x4)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_L2GRE \
+ UINT32_C(0x8)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPIP \
+ UINT32_C(0x10)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_GENEVE \
+ UINT32_C(0x20)
+ /* Multi-Protocol Label Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_MPLS \
+ UINT32_C(0x40)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_STT \
+ UINT32_C(0x80)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPGRE \
+ UINT32_C(0x100)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN_V4 \
+ UINT32_C(0x200)
+ /*
+ * Enhance Generic Routing Encapsulation (GRE version 1) inside IP
+ * datagram payload
+ */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPGRE_V1 \
+ UINT32_C(0x400)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_ANYTUNNEL \
+ UINT32_C(0x800)
+ /* Use fixed layer 2 ether type of 0xFFFF */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_L2_ETYPE \
+ UINT32_C(0x1000)
+ /*
+ * IPV6 over virtual eXtensible Local Area Network with GPE header
+ * (IPV6oVXLANGPE)
+ */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN_GPE_V6 \
+ UINT32_C(0x2000)
+ /* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN_GPE \
+ UINT32_C(0x4000)
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -48519,13 +54583,13 @@ struct hwrm_cfa_flow_info_output {
uint8_t valid;
} __rte_packed;
-/***********************
- * hwrm_cfa_flow_flush *
- ***********************/
+/*************************
+ * hwrm_cfa_ctx_mem_rgtr *
+ *************************/
-/* hwrm_cfa_flow_flush_input (size:256b/32B) */
-struct hwrm_cfa_flow_flush_input {
+/* hwrm_cfa_ctx_mem_rgtr_input (size:256b/32B) */
+struct hwrm_cfa_ctx_mem_rgtr_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -48554,96 +54618,47 @@ struct hwrm_cfa_flow_flush_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* flags is 32 b */
- uint32_t flags;
- /*
- * Set to 1 to indicate the page size, page layers, and
- * flow_handle_table_dma_addr fields are valid. The flow flush
- * operation should only flush the flows from the flow table
- * specified. This flag is set to 0 by older driver. For older
- * firmware, setting this flag has no effect.
- */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_TABLE_VALID \
- UINT32_C(0x1)
- /*
- * Set to 1 to indicate flow flush operation to cleanup all the
- * flows, meters, CFA context memory tables etc. This flag is set to
- * 0 by older driver. For older firmware, setting this flag has no
- * effect.
- */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_RESET_ALL \
- UINT32_C(0x2)
- /*
- * Set to 1 to indicate flow flush operation to cleanup all the
- * flows by the caller. This flag is set to 0 by older driver. For
- * older firmware, setting this flag has no effect.
- */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_RESET_PORT \
- UINT32_C(0x4)
- /*
- * Set to 1 to indicate the flow counter IDs are included in the
- * flow table.
- */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_INCL_FC \
- UINT32_C(0x8000000)
- /*
- * This specifies the size of flow handle entries provided by the
- * driver in the flow table specified below. Only two flow handle
- * size enums are defined.
- */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_MASK \
- UINT32_C(0xc0000000)
- #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_SFT \
- 30
- /* The flow handle is 16bit */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_16BIT \
- (UINT32_C(0x0) << 30)
- /* The flow handle is 64bit */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT \
- (UINT32_C(0x1) << 30)
- #define HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_LAST \
- HWRM_CFA_FLOW_FLUSH_INPUT_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT
- /* Specify page size of the flow table memory. */
- uint8_t page_size;
- /* The page size is 4K */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_4K UINT32_C(0x0)
- /* The page size is 8K */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_8K UINT32_C(0x1)
- /* The page size is 64K */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_64K UINT32_C(0x4)
- /* The page size is 256K */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_256K UINT32_C(0x6)
- /* The page size is 1M */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_1M UINT32_C(0x8)
- /* The page size is 2M */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_2M UINT32_C(0x9)
- /* The page size is 4M */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_4M UINT32_C(0xa)
- /* The page size is 1G */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_1G UINT32_C(0x12)
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_LAST \
- HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_SIZE_1G
- /* FLow table memory indirect levels. */
+ uint16_t flags;
+ /* Counter PBL indirect levels. */
uint8_t page_level;
/* PBL pointer is physical start address. */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
/*
* PBL pointer points to PDE table with each entry pointing to PTE
* tables.
*/
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
- #define HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LAST \
- HWRM_CFA_FLOW_FLUSH_INPUT_PAGE_LEVEL_LVL_2
- /* number of flows in the flow table */
- uint16_t num_flows;
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LAST \
+ HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2
+ /* Page size. */
+ uint8_t page_size;
+ /* 4KB page size. */
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_4K UINT32_C(0x0)
+ /* 8KB page size. */
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_8K UINT32_C(0x1)
+ /* 64KB page size. */
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_64K UINT32_C(0x4)
+ /* 256KB page size. */
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_256K UINT32_C(0x6)
+ /* 1MB page size. */
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_1M UINT32_C(0x8)
+ /* 2MB page size. */
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M UINT32_C(0x9)
+ /* 4MB page size. */
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_4M UINT32_C(0xa)
+ /* 1GB page size. */
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_1G UINT32_C(0x12)
+ #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_LAST \
+ HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_1G
+ uint32_t unused_0;
/* Pointer to the PBL, or PDL depending on number of levels */
uint64_t page_dir;
} __rte_packed;
-/* hwrm_cfa_flow_flush_output (size:128b/16B) */
-struct hwrm_cfa_flow_flush_output {
+/* hwrm_cfa_ctx_mem_rgtr_output (size:128b/16B) */
+struct hwrm_cfa_ctx_mem_rgtr_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -48652,7 +54667,12 @@ struct hwrm_cfa_flow_flush_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /*
+ * Id/Handle to the recently register context memory. This handle is
+ * passed to the CFA feature.
+ */
+ uint16_t ctx_id;
+ uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -48664,13 +54684,13 @@ struct hwrm_cfa_flow_flush_output {
uint8_t valid;
} __rte_packed;
-/***********************
- * hwrm_cfa_flow_stats *
- ***********************/
+/***************************
+ * hwrm_cfa_ctx_mem_unrgtr *
+ ***************************/
-/* hwrm_cfa_flow_stats_input (size:640b/80B) */
-struct hwrm_cfa_flow_stats_input {
+/* hwrm_cfa_ctx_mem_unrgtr_input (size:192b/24B) */
+struct hwrm_cfa_ctx_mem_unrgtr_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -48699,93 +54719,81 @@ struct hwrm_cfa_flow_stats_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Number of valid flows in this command. */
- uint16_t num_flows;
- /*
- * Flow handle.
- * For a listing of applicable flow_handle_0 values, see enumeration
- * in hwrm_cfa_flow_info_input.
- */
- uint16_t flow_handle_0;
- /*
- * Flow handle.
- * For a listing of applicable flow_handle_1 values, see enumeration
- * in hwrm_cfa_flow_info_input.
- */
- uint16_t flow_handle_1;
- /*
- * Flow handle.
- * For a listing of applicable flow_handle_2 values, see enumeration
- * in hwrm_cfa_flow_info_input.
- */
- uint16_t flow_handle_2;
/*
- * Flow handle.
- * For a listing of applicable flow_handle_3 values, see enumeration
- * in hwrm_cfa_flow_info_input.
+ * Id/Handle to the recently register context memory. This handle is
+ * passed to the CFA feature.
*/
- uint16_t flow_handle_3;
+ uint16_t ctx_id;
+ uint8_t unused_0[6];
+} __rte_packed;
+
+/* hwrm_cfa_ctx_mem_unrgtr_output (size:128b/16B) */
+struct hwrm_cfa_ctx_mem_unrgtr_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Flow handle.
- * For a listing of applicable flow_handle_4 values, see enumeration
- * in hwrm_cfa_flow_info_input.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
*/
- uint16_t flow_handle_4;
+ uint8_t valid;
+} __rte_packed;
+
+/*************************
+ * hwrm_cfa_ctx_mem_qctx *
+ *************************/
+
+
+/* hwrm_cfa_ctx_mem_qctx_input (size:192b/24B) */
+struct hwrm_cfa_ctx_mem_qctx_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Flow handle.
- * For a listing of applicable flow_handle_5 values, see enumeration
- * in hwrm_cfa_flow_info_input.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t flow_handle_5;
+ uint16_t cmpl_ring;
/*
- * Flow handle.
- * For a listing of applicable flow_handle_6 values, see enumeration
- * in hwrm_cfa_flow_info_input.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t flow_handle_6;
+ uint16_t seq_id;
/*
- * Flow handle.
- * For a listing of applicable flow_handle_7 values, see enumeration
- * in hwrm_cfa_flow_info_input.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- uint16_t flow_handle_7;
+ uint16_t target_id;
/*
- * Flow handle.
- * For a listing of applicable flow_handle_8 values, see enumeration
- * in hwrm_cfa_flow_info_input.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t flow_handle_8;
+ uint64_t resp_addr;
/*
- * Flow handle.
- * For a listing of applicable flow_handle_9 values, see enumeration
- * in hwrm_cfa_flow_info_input.
+ * Id/Handle to the recently register context memory. This handle is
+ * passed to the CFA feature.
*/
- uint16_t flow_handle_9;
- uint8_t unused_0[2];
- /* Flow ID of a flow. */
- uint32_t flow_id_0;
- /* Flow ID of a flow. */
- uint32_t flow_id_1;
- /* Flow ID of a flow. */
- uint32_t flow_id_2;
- /* Flow ID of a flow. */
- uint32_t flow_id_3;
- /* Flow ID of a flow. */
- uint32_t flow_id_4;
- /* Flow ID of a flow. */
- uint32_t flow_id_5;
- /* Flow ID of a flow. */
- uint32_t flow_id_6;
- /* Flow ID of a flow. */
- uint32_t flow_id_7;
- /* Flow ID of a flow. */
- uint32_t flow_id_8;
- /* Flow ID of a flow. */
- uint32_t flow_id_9;
+ uint16_t ctx_id;
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
-struct hwrm_cfa_flow_stats_output {
+/* hwrm_cfa_ctx_mem_qctx_output (size:256b/32B) */
+struct hwrm_cfa_ctx_mem_qctx_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -48794,56 +54802,44 @@ struct hwrm_cfa_flow_stats_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* packet_0 is 64 b */
- uint64_t packet_0;
- /* packet_1 is 64 b */
- uint64_t packet_1;
- /* packet_2 is 64 b */
- uint64_t packet_2;
- /* packet_3 is 64 b */
- uint64_t packet_3;
- /* packet_4 is 64 b */
- uint64_t packet_4;
- /* packet_5 is 64 b */
- uint64_t packet_5;
- /* packet_6 is 64 b */
- uint64_t packet_6;
- /* packet_7 is 64 b */
- uint64_t packet_7;
- /* packet_8 is 64 b */
- uint64_t packet_8;
- /* packet_9 is 64 b */
- uint64_t packet_9;
- /* byte_0 is 64 b */
- uint64_t byte_0;
- /* byte_1 is 64 b */
- uint64_t byte_1;
- /* byte_2 is 64 b */
- uint64_t byte_2;
- /* byte_3 is 64 b */
- uint64_t byte_3;
- /* byte_4 is 64 b */
- uint64_t byte_4;
- /* byte_5 is 64 b */
- uint64_t byte_5;
- /* byte_6 is 64 b */
- uint64_t byte_6;
- /* byte_7 is 64 b */
- uint64_t byte_7;
- /* byte_8 is 64 b */
- uint64_t byte_8;
- /* byte_9 is 64 b */
- uint64_t byte_9;
+ uint16_t flags;
+ /* Counter PBL indirect levels. */
+ uint8_t page_level;
+ /* PBL pointer is physical start address. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
/*
- * If a flow has been hit, the bit representing the flow will be 1.
- * Likewise, if a flow has not, the bit representing the flow
- * will be 0. Mapping will match flow numbers where bitX is for flowX
- * (ex: bit 0 is flow0). This only applies for NIC flows. Upon
- * reading of the flow, the bit will be cleared for the flow and only
- * set again when traffic is received by the flow.
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
*/
- uint16_t flow_hits;
- uint8_t unused_0[5];
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LAST \
+ HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_2
+ /* Page size. */
+ uint8_t page_size;
+ /* 4KB page size. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_4K UINT32_C(0x0)
+ /* 8KB page size. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_8K UINT32_C(0x1)
+ /* 64KB page size. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_64K UINT32_C(0x4)
+ /* 256KB page size. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_256K UINT32_C(0x6)
+ /* 1MB page size. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_1M UINT32_C(0x8)
+ /* 2MB page size. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_2M UINT32_C(0x9)
+ /* 4MB page size. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_4M UINT32_C(0xa)
+ /* 1GB page size. */
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_1G UINT32_C(0x12)
+ #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_LAST \
+ HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_1G
+ uint8_t unused_0[4];
+ /* Pointer to the PBL, or PDL depending on number of levels */
+ uint64_t page_dir;
+ uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -48855,13 +54851,13 @@ struct hwrm_cfa_flow_stats_output {
uint8_t valid;
} __rte_packed;
-/***********************************
- * hwrm_cfa_flow_aging_timer_reset *
- ***********************************/
+/**************************
+ * hwrm_cfa_ctx_mem_qcaps *
+ **************************/
-/* hwrm_cfa_flow_aging_timer_reset_input (size:256b/32B) */
-struct hwrm_cfa_flow_aging_timer_reset_input {
+/* hwrm_cfa_ctx_mem_qcaps_input (size:128b/16B) */
+struct hwrm_cfa_ctx_mem_qcaps_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -48890,20 +54886,10 @@ struct hwrm_cfa_flow_aging_timer_reset_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Flow record index. */
- uint16_t flow_handle;
- uint8_t unused_0[2];
- /*
- * New flow timer value for the flow specified in the ext_flow_handle.
- * The flow timer unit is 100ms.
- */
- uint32_t flow_timer;
- /* This value identifies a set of CFA data structures used for a flow. */
- uint64_t ext_flow_handle;
} __rte_packed;
-/* hwrm_cfa_flow_aging_timer_reset_output (size:128b/16B) */
-struct hwrm_cfa_flow_aging_timer_reset_output {
+/* hwrm_cfa_ctx_mem_qcaps_output (size:128b/16B) */
+struct hwrm_cfa_ctx_mem_qcaps_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -48912,7 +54898,12 @@ struct hwrm_cfa_flow_aging_timer_reset_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /*
+ * Indicates the maximum number of context memory which can be
+ * registered.
+ */
+ uint16_t max_entries;
+ uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -48924,13 +54915,13 @@ struct hwrm_cfa_flow_aging_timer_reset_output {
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_cfa_flow_aging_cfg *
- ***************************/
+/**************************
+ * hwrm_cfa_counter_qcaps *
+ **************************/
-/* hwrm_cfa_flow_aging_cfg_input (size:384b/48B) */
-struct hwrm_cfa_flow_aging_cfg_input {
+/* hwrm_cfa_counter_qcaps_input (size:128b/16B) */
+struct hwrm_cfa_counter_qcaps_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -48959,131 +54950,97 @@ struct hwrm_cfa_flow_aging_cfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* The bit field to enable per flow aging configuration. */
- uint16_t enables;
- /*
- * This bit must be '1' for the tcp flow timer field to be
- * configured
- */
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_TCP_FLOW_TIMER \
+} __rte_packed;
+
+/* hwrm_cfa_counter_qcaps_output (size:576b/72B) */
+struct hwrm_cfa_counter_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /* Enumeration denoting the supported CFA counter format. */
+ #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT \
UINT32_C(0x1)
+ /* CFA counter types are not supported. */
+ #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_NONE \
+ UINT32_C(0x0)
+ /* 64-bit packet counters followed by 64-bit byte counters format. */
+ #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_64_BIT \
+ UINT32_C(0x1)
+ #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_LAST \
+ HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_64_BIT
+ uint32_t unused_0;
/*
- * This bit must be '1' for the tcp finish timer field to be
- * configured
- */
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_TCP_FIN_TIMER \
- UINT32_C(0x2)
- /*
- * This bit must be '1' for the udp flow timer field to be
- * configured
+ * Minimum guaranteed number of flow counters supported for this
+ * function, in RX direction.
*/
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_UDP_FLOW_TIMER \
- UINT32_C(0x4)
+ uint32_t min_rx_fc;
/*
- * This bit must be '1' for the eem dma interval field to be
- * configured
+ * Maximum non-guaranteed number of flow counters supported for this
+ * function, in RX direction.
*/
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_DMA_INTERVAL \
- UINT32_C(0x8)
+ uint32_t max_rx_fc;
/*
- * This bit must be '1' for the eem notice interval field to be
- * configured
+ * Minimum guaranteed number of flow counters supported for this
+ * function, in TX direction.
*/
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_NOTICE_INTERVAL \
- UINT32_C(0x10)
+ uint32_t min_tx_fc;
/*
- * This bit must be '1' for the eem context memory maximum entries
- * field to be configured
+ * Maximum non-guaranteed number of flow counters supported for this
+ * function, in TX direction.
*/
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_CTX_MAX_ENTRIES \
- UINT32_C(0x20)
+ uint32_t max_tx_fc;
/*
- * This bit must be '1' for the eem context memory ID field to be
- * configured
+ * Minimum guaranteed number of extension flow counters supported for
+ * this function, in RX direction.
*/
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_CTX_ID \
- UINT32_C(0x40)
+ uint32_t min_rx_efc;
/*
- * This bit must be '1' for the eem context memory type field to be
- * configured
+ * Maximum non-guaranteed number of extension flow counters supported
+ * for this function, in RX direction.
*/
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_ENABLES_EEM_CTX_MEM_TYPE \
- UINT32_C(0x80)
- uint8_t flags;
- /* Enumeration denoting the RX, TX type of the resource. */
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
- /* tx path */
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
- /* rx path */
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_PATH_RX
+ uint32_t max_rx_efc;
/*
- * Enumeration denoting the enable, disable eem flow aging
- * configuration.
+ * Minimum guaranteed number of extension flow counters supported for
+ * this function, in TX direction.
*/
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM UINT32_C(0x2)
- /* tx path */
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_DISABLE \
- (UINT32_C(0x0) << 1)
- /* rx path */
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_ENABLE \
- (UINT32_C(0x1) << 1)
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_LAST \
- HWRM_CFA_FLOW_AGING_CFG_INPUT_FLAGS_EEM_ENABLE
- uint8_t unused_0;
+ uint32_t min_tx_efc;
/*
- * The flow aging timer for all TCP flows, the unit is 100
- * milliseconds.
+ * Maximum non-guaranteed number of extension flow counters supported
+ * for this function, in TX direction.
*/
- uint32_t tcp_flow_timer;
+ uint32_t max_tx_efc;
/*
- * The TCP finished timer for all TCP flows, the unit is 100
- * milliseconds.
+ * Minimum guaranteed number of meter drop counters supported for
+ * this function, in RX direction.
*/
- uint32_t tcp_fin_timer;
+ uint32_t min_rx_mdc;
/*
- * The flow aging timer for all UDP flows, the unit is 100
- * milliseconds.
+ * Maximum non-guaranteed number of meter drop counters supported for
+ * this function, in RX direction.
*/
- uint32_t udp_flow_timer;
+ uint32_t max_rx_mdc;
/*
- * The interval to dma eem ejection data to host memory, the unit is
- * milliseconds.
+ * Minimum guaranteed number of meter drop counters supported for this
+ * function, in TX direction.
*/
- uint16_t eem_dma_interval;
+ uint32_t min_tx_mdc;
/*
- * The interval to notify driver to read the eem ejection data, the
- * unit is milliseconds.
+ * Maximum non-guaranteed number of meter drop counters supported for
+ * this function, in TX direction.
*/
- uint16_t eem_notice_interval;
- /* The maximum entries number in the eem context memory. */
- uint32_t eem_ctx_max_entries;
- /* The context memory ID for eem flow aging. */
- uint16_t eem_ctx_id;
- uint16_t eem_ctx_mem_type;
+ uint32_t max_tx_mdc;
/*
- * The content of context memory is eem ejection data, the size of
- * each entry is 4 bytes.
+ * Maximum guaranteed number of flow counters which can be used during
+ * flow alloc.
*/
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_EJECTION_DATA \
- UINT32_C(0x0)
- #define HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_LAST \
- HWRM_CFA_FLOW_AGING_CFG_INPUT_EEM_CTX_MEM_TYPE_EJECTION_DATA
- uint8_t unused_1[4];
-} __rte_packed;
-
-/* hwrm_cfa_flow_aging_cfg_output (size:128b/16B) */
-struct hwrm_cfa_flow_aging_cfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
+ uint32_t max_flow_alloc_fc;
+ uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -49095,13 +55052,13 @@ struct hwrm_cfa_flow_aging_cfg_output {
uint8_t valid;
} __rte_packed;
-/****************************
- * hwrm_cfa_flow_aging_qcfg *
- ****************************/
+/************************
+ * hwrm_cfa_counter_cfg *
+ ************************/
-/* hwrm_cfa_flow_aging_qcfg_input (size:192b/24B) */
-struct hwrm_cfa_flow_aging_qcfg_input {
+/* hwrm_cfa_counter_cfg_input (size:256b/32B) */
+struct hwrm_cfa_counter_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49130,24 +55087,64 @@ struct hwrm_cfa_flow_aging_qcfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * The direction for the flow aging configuration, 1 is rx path, 2 is
- * tx path.
- */
- uint8_t flags;
+ uint16_t flags;
+ /* Enumeration denoting the configuration mode. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE \
+ UINT32_C(0x1)
+ /* Disable the configuration mode. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE \
+ UINT32_C(0x0)
+ /* Enable the configuration mode. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE \
+ UINT32_C(0x1)
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_LAST \
+ HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE
/* Enumeration denoting the RX, TX type of the resource. */
- #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
- /* tx path */
- #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
- /* rx path */
- #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
- #define HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_FLOW_AGING_QCFG_INPUT_FLAGS_PATH_RX
- uint8_t unused_0[7];
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH \
+ UINT32_C(0x2)
+ /* Tx path. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX \
+ (UINT32_C(0x0) << 1)
+ /* Rx path. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX \
+ (UINT32_C(0x1) << 1)
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX
+ /* Enumeration denoting the data transfer mode. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_MASK \
+ UINT32_C(0xc)
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_SFT 2
+ /* Push mode. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PUSH \
+ (UINT32_C(0x0) << 2)
+ /* Pull mode. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL \
+ (UINT32_C(0x1) << 2)
+ /* Pull on async update. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC \
+ (UINT32_C(0x2) << 2)
+ #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_LAST \
+ HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC
+ uint16_t counter_type;
+ /* Flow counters. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_FC UINT32_C(0x0)
+ /* Extended flow counters. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_EFC UINT32_C(0x1)
+ /* Meter drop counters. */
+ #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_MDC UINT32_C(0x2)
+ #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_LAST \
+ HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_MDC
+ /* Ctx memory handle to be used for the counter. */
+ uint16_t ctx_id;
+ /* Counter update cadence hint (only in Push mode). */
+ uint16_t update_tmr_ms;
+ /* Total number of entries. */
+ uint32_t num_entries;
+ uint32_t unused_0;
} __rte_packed;
-/* hwrm_cfa_flow_aging_qcfg_output (size:320b/40B) */
-struct hwrm_cfa_flow_aging_qcfg_output {
+/* hwrm_cfa_counter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_counter_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -49156,37 +55153,6 @@ struct hwrm_cfa_flow_aging_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * The current flow aging timer for all TCP flows, the unit is 100
- * millisecond.
- */
- uint32_t tcp_flow_timer;
- /*
- * The current TCP finished timer for all TCP flows, the unit is 100
- * millisecond.
- */
- uint32_t tcp_fin_timer;
- /*
- * The current flow aging timer for all UDP flows, the unit is 100
- * millisecond.
- */
- uint32_t udp_flow_timer;
- /*
- * The interval to dma eem ejection data to host memory, the unit is
- * milliseconds.
- */
- uint16_t eem_dma_interval;
- /*
- * The interval to notify driver to read the eem ejection data, the
- * unit is milliseconds.
- */
- uint16_t eem_notice_interval;
- /* The maximum entries number in the eem context memory. */
- uint32_t eem_ctx_max_entries;
- /* The context memory ID for eem flow aging. */
- uint16_t eem_ctx_id;
- /* The context memory type for eem flow aging. */
- uint16_t eem_ctx_mem_type;
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
@@ -49199,13 +55165,13 @@ struct hwrm_cfa_flow_aging_qcfg_output {
uint8_t valid;
} __rte_packed;
-/*****************************
- * hwrm_cfa_flow_aging_qcaps *
- *****************************/
+/***************************
+ * hwrm_cfa_counter_qstats *
+ ***************************/
-/* hwrm_cfa_flow_aging_qcaps_input (size:192b/24B) */
-struct hwrm_cfa_flow_aging_qcaps_input {
+/* hwrm_cfa_counter_qstats_input (size:320b/40B) */
+struct hwrm_cfa_counter_qstats_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49234,24 +55200,27 @@ struct hwrm_cfa_flow_aging_qcaps_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * The direction for the flow aging configuration, 1 is rx path, 2 is
- * tx path.
- */
- uint8_t flags;
+ uint16_t flags;
/* Enumeration denoting the RX, TX type of the resource. */
- #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH UINT32_C(0x1)
- /* tx path */
- #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
- /* rx path */
- #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
- #define HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_FLOW_AGING_QCAPS_INPUT_FLAGS_PATH_RX
- uint8_t unused_0[7];
+ #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* Tx path. */
+ #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* Rx path. */
+ #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX
+ uint16_t counter_type;
+ uint16_t input_flow_ctx_id;
+ uint16_t num_entries;
+ uint16_t delta_time_ms;
+ uint16_t meter_instance_id;
+ uint16_t mdc_ctx_id;
+ uint8_t unused_0[2];
+ uint64_t expected_count;
} __rte_packed;
-/* hwrm_cfa_flow_aging_qcaps_output (size:256b/32B) */
-struct hwrm_cfa_flow_aging_qcaps_output {
+/* hwrm_cfa_counter_qstats_output (size:128b/16B) */
+struct hwrm_cfa_counter_qstats_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -49260,23 +55229,6 @@ struct hwrm_cfa_flow_aging_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * The maximum flow aging timer for all TCP flows, the unit is 100
- * millisecond.
- */
- uint32_t max_tcp_flow_timer;
- /*
- * The maximum TCP finished timer for all TCP flows, the unit is 100
- * millisecond.
- */
- uint32_t max_tcp_fin_timer;
- /*
- * The maximum flow aging timer for all UDP flows, the unit is 100
- * millisecond.
- */
- uint32_t max_udp_flow_timer;
- /* The maximum aging flows that HW can support. */
- uint32_t max_aging_flows;
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
@@ -49289,13 +55241,13 @@ struct hwrm_cfa_flow_aging_qcaps_output {
uint8_t valid;
} __rte_packed;
-/**********************************
- * hwrm_cfa_tcp_flag_process_qcfg *
- **********************************/
+/**********************
+ * hwrm_cfa_eem_qcaps *
+ **********************/
-/* hwrm_cfa_tcp_flag_process_qcfg_input (size:128b/16B) */
-struct hwrm_cfa_tcp_flag_process_qcfg_input {
+/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcaps_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49324,10 +55276,29 @@ struct hwrm_cfa_tcp_flag_process_qcfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When set to 1, indicates the configuration will apply to TX flows
+ * which are to be offloaded.
+ * Note if this bit is set then the path_rx bit can't be set.
+ */
+ #define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x1)
+ /*
+ * When set to 1, indicates the configuration will apply to RX flows
+ * which are to be offloaded.
+ * Note if this bit is set then the path_tx bit can't be set.
+ */
+ #define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x2)
+ /* When set to 1, all offloaded flows will be sent to EEM. */
+ #define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD \
+ UINT32_C(0x4)
+ uint32_t unused_0;
} __rte_packed;
-/* hwrm_cfa_tcp_flag_process_qcfg_output (size:192b/24B) */
-struct hwrm_cfa_tcp_flag_process_qcfg_output {
+/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
+struct hwrm_cfa_eem_qcaps_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -49336,21 +55307,91 @@ struct hwrm_cfa_tcp_flag_process_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* The port 0 RX mirror action record ID. */
- uint16_t rx_ar_id_port0;
- /* The port 1 RX mirror action record ID. */
- uint16_t rx_ar_id_port1;
+ uint32_t flags;
/*
- * The port 0 RX action record ID for TX TCP flag packets from
- * loopback path.
+ * When set to 1, indicates the configuration will apply to TX flows
+ * which are to be offloaded.
+ * Note if this bit is set then the path_rx bit can't be set.
*/
- uint16_t tx_ar_id_port0;
+ #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_PATH_TX \
+ UINT32_C(0x1)
/*
- * The port 1 RX action record ID for TX TCP flag packets from
- * loopback path.
+ * When set to 1, indicates the configuration will apply to RX flows
+ * which are to be offloaded.
+ * Note if this bit is set then the path_tx bit can't be set.
*/
- uint16_t tx_ar_id_port1;
- uint8_t unused_0[7];
+ #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_PATH_RX \
+ UINT32_C(0x2)
+ /*
+ * When set to 1, indicates the FW supports the Centralized
+ * Memory Model. The concept designates one entity for the
+ * memory allocation while all others 'subscribe' to it.
+ */
+ #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * When set to 1, indicates the FW supports the Detached
+ * Centralized Memory Model. The memory is allocated and managed
+ * as a separate entity. All PFs and VFs will be granted direct
+ * or semi-direct access to the allocated memory while none of
+ * which can interfere with the management of the memory.
+ */
+ #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED \
+ UINT32_C(0x8)
+ uint32_t unused_0;
+ uint32_t supported;
+ /*
+ * If set to 1, then EEM KEY0 table is supported using crc32 hash.
+ * If set to 0, EEM KEY0 table is not supported.
+ */
+ #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_KEY0_TABLE \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then EEM KEY1 table is supported using lookup3 hash.
+ * If set to 0, EEM KEY1 table is not supported.
+ */
+ #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_KEY1_TABLE \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, then EEM External Record table is supported.
+ * If set to 0, EEM External Record table is not supported.
+ * (This table includes action record, EFC pointers, encap pointers)
+ */
+ #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_RECORD_TABLE \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, then EEM External Flow Counters table is supported.
+ * If set to 0, EEM External Flow Counters table is not supported.
+ */
+ #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE \
+ UINT32_C(0x8)
+ /*
+ * If set to 1, then FID table used for implicit flow flush is
+ * supported.
+ * If set to 0, then FID table used for implicit flow flush is
+ * not supported.
+ */
+ #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_FID_TABLE \
+ UINT32_C(0x10)
+ /*
+ * The maximum number of entries supported by EEM. When configuring
+ * the host memory, the number of numbers of entries that can
+ * supported are:
+ * 32k, 64k 128k, 256k, 512k, 1M, 2M, 4M, 8M, 32M, 64M, 128M
+ * entries.
+ * Any value that are not these values, the FW will round down to the
+ * closest support number of entries.
+ */
+ uint32_t max_entries_supported;
+ /* The entry size in bytes of each entry in the EEM KEY0/KEY1 tables. */
+ uint16_t key_entry_size;
+ /* The entry size in bytes of each entry in the EEM RECORD tables. */
+ uint16_t record_entry_size;
+ /* The entry size in bytes of each entry in the EEM EFC tables. */
+ uint16_t efc_entry_size;
+ /* The FID size in bytes of each entry in the EEM FID tables. */
+ uint16_t fid_entry_size;
+ uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -49362,13 +55403,13 @@ struct hwrm_cfa_tcp_flag_process_qcfg_output {
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_cfa_vf_pair_alloc *
- **************************/
+/********************
+ * hwrm_cfa_eem_cfg *
+ ********************/
-/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */
-struct hwrm_cfa_vf_pair_alloc_input {
+/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
+struct hwrm_cfa_eem_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49397,17 +55438,57 @@ struct hwrm_cfa_vf_pair_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_a_id;
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_b_id;
- uint8_t unused_0[4];
- /* VF Pair name (32 byte string). */
- char pair_name[32];
+ uint32_t flags;
+ /*
+ * When set to 1, indicates the configuration will apply to TX flows
+ * which are to be offloaded.
+ * Note if this bit is set then the path_rx bit can't be set.
+ */
+ #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x1)
+ /*
+ * When set to 1, indicates the configuration will apply to RX flows
+ * which are to be offloaded.
+ * Note if this bit is set then the path_tx bit can't be set.
+ */
+ #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x2)
+ /* When set to 1, all offloaded flows will be sent to EEM. */
+ #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_PREFERRED_OFFLOAD \
+ UINT32_C(0x4)
+ /* When set to 1, secondary, 0 means primary. */
+ #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_SECONDARY_PF \
+ UINT32_C(0x8)
+ /*
+ * Group_id which used by Firmware to identify memory pools belonging
+ * to certain group.
+ */
+ uint16_t group_id;
+ uint16_t unused_0;
+ /*
+ * Configured EEM with the given number of entries. All the EEM tables
+ * KEY0, KEY1, RECORD, EFC all have the same number of entries and all
+ * tables will be configured using this value. Current minimum value
+ * is 32k. Current maximum value is 128M.
+ */
+ uint32_t num_entries;
+ uint32_t unused_1;
+ /* Configured EEM with the given context if for KEY0 table. */
+ uint16_t key0_ctx_id;
+ /* Configured EEM with the given context if for KEY1 table. */
+ uint16_t key1_ctx_id;
+ /* Configured EEM with the given context if for RECORD table. */
+ uint16_t record_ctx_id;
+ /* Configured EEM with the given context if for EFC table. */
+ uint16_t efc_ctx_id;
+ /* Configured EEM with the given context if for EFC table. */
+ uint16_t fid_ctx_id;
+ uint16_t unused_2;
+ uint32_t unused_3;
} __rte_packed;
-/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */
-struct hwrm_cfa_vf_pair_alloc_output {
+/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
+struct hwrm_cfa_eem_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -49428,13 +55509,13 @@ struct hwrm_cfa_vf_pair_alloc_output {
uint8_t valid;
} __rte_packed;
-/*************************
- * hwrm_cfa_vf_pair_free *
- *************************/
+/*********************
+ * hwrm_cfa_eem_qcfg *
+ *********************/
-/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */
-struct hwrm_cfa_vf_pair_free_input {
+/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49463,12 +55544,16 @@ struct hwrm_cfa_vf_pair_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* VF Pair name (32 byte string). */
- char pair_name[32];
+ uint32_t flags;
+ /* When set to 1, indicates the configuration is the TX flow. */
+ #define HWRM_CFA_EEM_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x1)
+ /* When set to 1, indicates the configuration is the RX flow. */
+ #define HWRM_CFA_EEM_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x2)
+ uint32_t unused_0;
} __rte_packed;
-/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */
-struct hwrm_cfa_vf_pair_free_output {
+/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
+struct hwrm_cfa_eem_qcfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -49477,7 +55562,29 @@ struct hwrm_cfa_vf_pair_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ uint32_t flags;
+ /* When set to 1, indicates the configuration is the TX flow. */
+ #define HWRM_CFA_EEM_QCFG_OUTPUT_FLAGS_PATH_TX \
+ UINT32_C(0x1)
+ /* When set to 1, indicates the configuration is the RX flow. */
+ #define HWRM_CFA_EEM_QCFG_OUTPUT_FLAGS_PATH_RX \
+ UINT32_C(0x2)
+ /* When set to 1, all offloaded flows will be sent to EEM. */
+ #define HWRM_CFA_EEM_QCFG_OUTPUT_FLAGS_PREFERRED_OFFLOAD \
+ UINT32_C(0x4)
+ /* The number of entries the FW has configured for EEM. */
+ uint32_t num_entries;
+ /* Configured EEM with the given context if for KEY0 table. */
+ uint16_t key0_ctx_id;
+ /* Configured EEM with the given context if for KEY1 table. */
+ uint16_t key1_ctx_id;
+ /* Configured EEM with the given context if for RECORD table. */
+ uint16_t record_ctx_id;
+ /* Configured EEM with the given context if for EFC table. */
+ uint16_t efc_ctx_id;
+ /* Configured EEM with the given context if for EFC table. */
+ uint16_t fid_ctx_id;
+ uint8_t unused_2[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -49489,13 +55596,13 @@ struct hwrm_cfa_vf_pair_free_output {
uint8_t valid;
} __rte_packed;
-/*************************
- * hwrm_cfa_vf_pair_info *
- *************************/
+/*******************
+ * hwrm_cfa_eem_op *
+ *******************/
-/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */
-struct hwrm_cfa_vf_pair_info_input {
+/* hwrm_cfa_eem_op_input (size:192b/24B) */
+struct hwrm_cfa_eem_op_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49525,17 +55632,48 @@ struct hwrm_cfa_vf_pair_info_input {
*/
uint64_t resp_addr;
uint32_t flags;
- /* If this flag is set, lookup by name else lookup by index. */
- #define HWRM_CFA_VF_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
- /* vf pair table index. */
- uint16_t vf_pair_index;
- uint8_t unused_0[2];
- /* VF Pair name (32 byte string). */
- char vf_pair_name[32];
+ /*
+ * When set to 1, indicates the host memory which is passed will be
+ * used for the TX flow offload function specified in fid.
+ * Note if this bit is set then the path_rx bit can't be set.
+ */
+ #define HWRM_CFA_EEM_OP_INPUT_FLAGS_PATH_TX UINT32_C(0x1)
+ /*
+ * When set to 1, indicates the host memory which is passed will be
+ * used for the RX flow offload function specified in fid.
+ * Note if this bit is set then the path_tx bit can't be set.
+ */
+ #define HWRM_CFA_EEM_OP_INPUT_FLAGS_PATH_RX UINT32_C(0x2)
+ uint16_t unused_0;
+ /* The number of EEM key table entries to be configured. */
+ uint16_t op;
+ /* This value is reserved and should not be used. */
+ #define HWRM_CFA_EEM_OP_INPUT_OP_RESERVED UINT32_C(0x0)
+ /*
+ * To properly stop EEM and ensure there are no DMA's, the caller
+ * must disable EEM for the given PF, using this call. This will
+ * safely disable EEM and ensure that all DMA'ed to the
+ * keys/records/efc have been completed.
+ */
+ #define HWRM_CFA_EEM_OP_INPUT_OP_EEM_DISABLE UINT32_C(0x1)
+ /*
+ * Once the EEM host memory has been configured, EEM options have
+ * been configured. Then the caller should enable EEM for the given
+ * PF. Note once this call has been made, then the EEM mechanism
+ * will be active and DMA's will occur as packets are processed.
+ */
+ #define HWRM_CFA_EEM_OP_INPUT_OP_EEM_ENABLE UINT32_C(0x2)
+ /*
+ * Clear EEM settings for the given PF so that the register values
+ * are reset back to there initial state.
+ */
+ #define HWRM_CFA_EEM_OP_INPUT_OP_EEM_CLEANUP UINT32_C(0x3)
+ #define HWRM_CFA_EEM_OP_INPUT_OP_LAST \
+ HWRM_CFA_EEM_OP_INPUT_OP_EEM_CLEANUP
} __rte_packed;
-/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */
-struct hwrm_cfa_vf_pair_info_output {
+/* hwrm_cfa_eem_op_output (size:128b/16B) */
+struct hwrm_cfa_eem_op_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -49544,28 +55682,7 @@ struct hwrm_cfa_vf_pair_info_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* vf pair table index. */
- uint16_t next_vf_pair_index;
- /* vf pair member a's vf_fid. */
- uint16_t vf_a_fid;
- /* vf pair member a's Linux logical VF number. */
- uint16_t vf_a_index;
- /* vf pair member b's vf_fid. */
- uint16_t vf_b_fid;
- /* vf pair member a's Linux logical VF number. */
- uint16_t vf_b_index;
- /* vf pair state. */
- uint8_t pair_state;
- /* Pair has been allocated */
- #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
- /* Both pair members are active */
- #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
- #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
- HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
- uint8_t unused_0[5];
- /* VF Pair name (32 byte string). */
- char pair_name[32];
- uint8_t unused_1[7];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -49577,13 +55694,13 @@ struct hwrm_cfa_vf_pair_info_output {
uint8_t valid;
} __rte_packed;
-/***********************
- * hwrm_cfa_pair_alloc *
- ***********************/
+/********************************
+ * hwrm_cfa_adv_flow_mgnt_qcaps *
+ ********************************/
-/* hwrm_cfa_pair_alloc_input (size:576b/72B) */
-struct hwrm_cfa_pair_alloc_input {
+/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49612,240 +55729,178 @@ struct hwrm_cfa_pair_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ uint32_t unused_0[4];
+} __rte_packed;
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
/*
- * Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair,
- * 5-rep2fn_mod, 6-rep2fn_modall, 7-rep2fn_truflow).
- */
- uint16_t pair_mode;
- /*
- * Pair between VF on local host with PF or VF on specified host.
- * (deprecated)
- */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_VF2FN \
- UINT32_C(0x0)
- /*
- * Pair between REP on local host with PF or VF on specified host.
- * (deprecated)
+ * Value of 1 to indicate firmware support 16-bit flow handle.
+ * Value of 0 to indicate firmware not support 16-bit flow handle.
*/
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN \
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_HND_16BIT_SUPPORTED \
UINT32_C(0x1)
/*
- * Pair between REP on local host with REP on specified host.
- * (deprecated)
+ * Value of 1 to indicate firmware support 64-bit flow handle.
+ * Value of 0 to indicate firmware not support 64-bit flow handle.
*/
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2REP \
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_HND_64BIT_SUPPORTED \
UINT32_C(0x2)
- /* Pair for the proxy interface. (deprecated) */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PROXY \
- UINT32_C(0x3)
- /* Pair for the PF interface. (deprecated) */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PFPAIR \
- UINT32_C(0x4)
- /* Modify existing rep2fn pair and move pair to new PF. (deprecated) */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MOD \
- UINT32_C(0x5)
/*
- * Modify existing rep2fn pairs paired with same PF and move pairs
- * to new PF. (deprecated)
- */
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL \
- UINT32_C(0x6)
- /*
- * Truflow pair between REP on local host with PF or VF on specified
- * host.
+ * Value of 1 to indicate firmware support flow batch delete
+ * operation through HWRM_CFA_FLOW_FLUSH command.
+ * Value of 0 to indicate that the firmware does not support flow
+ * batch delete operation. (deprecated)
*/
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_TRUFLOW \
- UINT32_C(0x7)
- #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_LAST \
- HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_TRUFLOW
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_a_id;
- /* Logical Host (0xff-local host). */
- uint8_t host_b_id;
- /* Logical PF (0xff-PF for command channel). */
- uint8_t pf_b_id;
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_b_id;
- /* Loopback port (0xff-internal loopback), valid for mode-3. */
- uint8_t port_id;
- /* Priority used for encap of loopback packets valid for mode-3. */
- uint8_t pri;
- /* New PF for rep2fn modify, valid for mode 5. */
- uint16_t new_pf_fid;
- uint32_t enables;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_BATCH_DELETE_SUPPORTED \
+ UINT32_C(0x4)
/*
- * This bit must be '1' for the q_ab field to be
- * configured.
+ * Value of 1 to indicate that the firmware support flow reset all
+ * operation through HWRM_CFA_FLOW_FLUSH command.
+ * Value of 0 indicates firmware does not support flow reset all
+ * operation. (deprecated)
*/
- #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID UINT32_C(0x1)
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_RESET_ALL_SUPPORTED \
+ UINT32_C(0x8)
/*
- * This bit must be '1' for the q_ba field to be
- * configured.
+ * Value of 1 to indicate that firmware supports use of FID as
+ * dest_id in HWRM_CFA_NTUPLE_ALLOC/CFG commands.
+ * Value of 0 indicates firmware does not support use of FID as
+ * dest_id.
*/
- #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID UINT32_C(0x2)
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED \
+ UINT32_C(0x10)
/*
- * This bit must be '1' for the fc_ab field to be
- * configured.
+ * Value of 1 to indicate that firmware supports TX EEM flows.
+ * Value of 0 indicates firmware does not support TX EEM flows.
+ * (deprecated)
*/
- #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID UINT32_C(0x4)
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_TX_EEM_FLOW_SUPPORTED \
+ UINT32_C(0x20)
/*
- * This bit must be '1' for the fc_ba field to be
- * configured.
+ * Value of 1 to indicate that firmware supports RX EEM flows.
+ * Value of 0 indicates firmware does not support RX EEM flows.
+ * (deprecated)
*/
- #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID UINT32_C(0x8)
- /* VF Pair name (32 byte string). */
- char pair_name[32];
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RX_EEM_FLOW_SUPPORTED \
+ UINT32_C(0x40)
/*
- * The q_ab value specifies the logical index of the TX/RX CoS
- * queue to be assigned for traffic in the A to B direction of
- * the interface pair. The default value is 0.
+ * Value of 1 to indicate that firmware supports the dynamic
+ * allocation of an on-chip flow counter which can be used for EEM
+ * flows. Value of 0 indicates firmware does not support the dynamic
+ * allocation of an on-chip flow counter.
+ * (deprecated)
*/
- uint8_t q_ab;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED \
+ UINT32_C(0x80)
/*
- * The q_ba value specifies the logical index of the TX/RX CoS
- * queue to be assigned for traffic in the B to A direction of
- * the interface pair. The default value is 1.
+ * Value of 1 to indicate that firmware supports setting of
+ * rfs_ring_tbl_idx in HWRM_CFA_NTUPLE_ALLOC command.
+ * Value of 0 indicates firmware does not support rfs_ring_tbl_idx.
*/
- uint8_t q_ba;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_SUPPORTED \
+ UINT32_C(0x100)
/*
- * Specifies whether RX ring flow control is disabled (0) or enabled
- * (1) in the A to B direction. The default value is 0, meaning that
- * packets will be dropped when the B-side RX rings are full.
+ * Value of 1 to indicate that firmware supports untagged matching
+ * criteria on HWRM_CFA_L2_FILTER_ALLOC command. Value of 0
+ * indicates firmware does not support untagged matching.
*/
- uint8_t fc_ab;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_UNTAGGED_VLAN_SUPPORTED \
+ UINT32_C(0x200)
/*
- * Specifies whether RX ring flow control is disabled (0) or enabled
- * (1) in the B to A direction. The default value is 1, meaning that
- * the RX CoS queue will be flow controlled when the A-side RX rings
- * are full.
+ * Value of 1 to indicate that firmware supports XDP filter. Value
+ * of 0 indicates firmware does not support XDP filter.
*/
- uint8_t fc_ba;
- uint8_t unused_1[4];
-} __rte_packed;
-
-/* hwrm_cfa_pair_alloc_output (size:192b/24B) */
-struct hwrm_cfa_pair_alloc_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* Only valid for modes 1 and 2. */
- uint16_t rx_cfa_code_a;
- /* Only valid for modes 1 and 2. */
- uint16_t tx_cfa_action_a;
- /* Only valid for mode 2. */
- uint16_t rx_cfa_code_b;
- /* Only valid for mode 2. */
- uint16_t tx_cfa_action_b;
- uint8_t unused_0[7];
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_XDP_SUPPORTED \
+ UINT32_C(0x400)
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * Value of 1 to indicate that the firmware support L2 header source
+ * fields matching criteria on HWRM_CFA_L2_FILTER_ALLOC command.
+ * Value of 0 indicates firmware does not support L2 header source
+ * fields matching.
*/
- uint8_t valid;
-} __rte_packed;
-
-/**********************
- * hwrm_cfa_pair_free *
- **********************/
-
-
-/* hwrm_cfa_pair_free_input (size:448b/56B) */
-struct hwrm_cfa_pair_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED \
+ UINT32_C(0x800)
/*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
+ * If set to 1, firmware is capable of supporting ARP ethertype as
+ * matching criteria for HWRM_CFA_NTUPLE_FILTER_ALLOC command on the
+ * RX direction. By default, this flag should be 0 for older version
+ * of firmware.
*/
- uint16_t cmpl_ring;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED \
+ UINT32_C(0x1000)
/*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
+ * Value of 1 to indicate that firmware supports setting of
+ * rfs_ring_tbl_idx in dst_id field of the HWRM_CFA_NTUPLE_ALLOC
+ * command. Value of 0 indicates firmware does not support
+ * rfs_ring_tbl_idx in dst_id field.
*/
- uint16_t seq_id;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED \
+ UINT32_C(0x2000)
/*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
+ * If set to 1, firmware is capable of supporting IPv4/IPv6 as
+ * ethertype in HWRM_CFA_NTUPLE_FILTER_ALLOC command on the RX
+ * direction. By default, this flag should be 0 for older version
+ * of firmware.
*/
- uint16_t target_id;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED \
+ UINT32_C(0x4000)
/*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
+ * When this bit is '1', it indicates that core firmware is
+ * capable of TruFlow. Driver can restrict sending HWRM CFA_FLOW_XXX
+ * and CFA_ENCAP_XXX, CFA_DECAP_XXX commands.
*/
- uint64_t resp_addr;
- /* VF Pair name (32 byte string). */
- char pair_name[32];
- /* Logical PF (0xff-PF for command channel). */
- uint8_t pf_b_id;
- uint8_t unused_0[3];
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_id;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_TRUFLOW_CAPABLE \
+ UINT32_C(0x8000)
/*
- * Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair,
- * 5-rep2fn_mod, 6-rep2fn_modall, 7-rep2fn_truflow).
+ * If set to 1, firmware is capable of supporting L2/ROCE as
+ * traffic type in flags field of HWRM_CFA_L2_FILTER_ALLOC command.
+ * By default, this flag should be 0 for older version of firmware.
*/
- uint16_t pair_mode;
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED \
+ UINT32_C(0x10000)
/*
- * Pair between VF on local host with PF or VF on specified host.
- * (deprecated)
+ * If set to 1, firmware is capable of HW LAG. This bit is only
+ * advertised if the calling function is a PAXC function.
*/
- #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_LAG_SUPPORTED \
+ UINT32_C(0x20000)
/*
- * Pair between REP on local host with PF or VF on specified host.
- * (deprecated)
+ * If set to 1, firmware is capable installing ntuple rules without
+ * additional classification on the L2 Context.
*/
- #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED \
+ UINT32_C(0x40000)
/*
- * Pair between REP on local host with REP on specified host.
- * (deprecated)
+ * If set to 1, firmware is capable returning stats for nic flows
+ * in cfa_flow_stats command where flow_handle value 0xF000.
*/
- #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
- /* Pair for the proxy interface. (deprecated) */
- #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_PROXY UINT32_C(0x3)
- /* Pair for the PF interface. (deprecated) */
- #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
- /* Modify existing rep2fn pair and move pair to new PF. (deprecated) */
- #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_MOD UINT32_C(0x5)
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NIC_FLOW_STATS_SUPPORTED \
+ UINT32_C(0x80000)
/*
- * Modify existing rep2fn pairs paired with same PF and move pairs
- * to new PF. (deprecated)
+ * If set to 1, firmware is capable of supporting these additional
+ * ip_protocols: ICMP, ICMPV6, RSVD for ntuple rules. By default,
+ * this flag should be 0 for older version of firmware.
*/
- #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_MODALL UINT32_C(0x6)
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED \
+ UINT32_C(0x100000)
/*
- * Truflow pair between REP on local host with PF or VF on
- * specified host.
- */
- #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW UINT32_C(0x7)
- #define HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_LAST \
- HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW
-} __rte_packed;
-
-/* hwrm_cfa_pair_free_output (size:128b/16B) */
-struct hwrm_cfa_pair_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
+ * Value of 1 to indicate that firmware supports setting of
+ * rfs_ring_tbl_idx (new offset) in HWRM_CFA_NTUPLE_ALLOC command.
+ * Value of 0 indicates ring tbl idx should be passed using dst_id.
+ */
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED \
+ UINT32_C(0x200000)
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -49857,13 +55912,13 @@ struct hwrm_cfa_pair_free_output {
uint8_t valid;
} __rte_packed;
-/**********************
- * hwrm_cfa_pair_info *
- **********************/
+/******************
+ * hwrm_cfa_tflib *
+ ******************/
-/* hwrm_cfa_pair_info_input (size:448b/56B) */
-struct hwrm_cfa_pair_info_input {
+/* hwrm_cfa_tflib_input (size:1024b/128B) */
+struct hwrm_cfa_tflib_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49892,23 +55947,18 @@ struct hwrm_cfa_pair_info_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t flags;
- /* If this flag is set, lookup by name else lookup by index. */
- #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
- /* If this flag is set, lookup by PF id and VF id. */
- #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_REPRE UINT32_C(0x2)
- /* Pair table index. */
- uint16_t pair_index;
- /* Pair pf index. */
- uint8_t pair_pfid;
- /* Pair vf index. */
- uint8_t pair_vfid;
- /* Pair name (32 byte string). */
- char pair_name[32];
+ /* TFLIB message type. */
+ uint16_t tf_type;
+ /* TFLIB message subtype. */
+ uint16_t tf_subtype;
+ /* unused. */
+ uint8_t unused0[4];
+ /* TFLIB request data. */
+ uint32_t tf_req[26];
} __rte_packed;
-/* hwrm_cfa_pair_info_output (size:576b/72B) */
-struct hwrm_cfa_pair_info_output {
+/* hwrm_cfa_tflib_output (size:5632b/704B) */
+struct hwrm_cfa_tflib_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -49917,66 +55967,16 @@ struct hwrm_cfa_pair_info_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Pair table index. */
- uint16_t next_pair_index;
- /* Pair member a's fid. */
- uint16_t a_fid;
- /* Logical host number. */
- uint8_t host_a_index;
- /* Logical PF number. */
- uint8_t pf_a_index;
- /* Pair member a's Linux logical VF number. */
- uint16_t vf_a_index;
- /* Rx CFA code. */
- uint16_t rx_cfa_code_a;
- /* Tx CFA action. */
- uint16_t tx_cfa_action_a;
- /* Pair member b's fid. */
- uint16_t b_fid;
- /* Logical host number. */
- uint8_t host_b_index;
- /* Logical PF number. */
- uint8_t pf_b_index;
- /* Pair member a's Linux logical VF number. */
- uint16_t vf_b_index;
- /* Rx CFA code. */
- uint16_t rx_cfa_code_b;
- /* Tx CFA action. */
- uint16_t tx_cfa_action_b;
- /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair). */
- uint8_t pair_mode;
- /*
- * Pair between VF on local host with PF or VF on specified host.
- * (deprecated)
- */
- #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
- /*
- * Pair between REP on local host with PF or VF on specified host.
- * (deprecated)
- */
- #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
- /*
- * Pair between REP on local host with REP on specified host.
- * (deprecated)
- */
- #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
- /* Pair for the proxy interface. (deprecated) */
- #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PROXY UINT32_C(0x3)
- /* Pair for the PF interface. (deprecated) */
- #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
- #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_LAST \
- HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR
- /* Pair state. */
- uint8_t pair_state;
- /* Pair has been allocated */
- #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
- /* Both pair members are active */
- #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
- #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
- HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
- /* Pair name (32 byte string). */
- char pair_name[32];
- uint8_t unused_0[7];
+ /* TFLIB message type. */
+ uint16_t tf_type;
+ /* TFLIB message subtype. */
+ uint16_t tf_subtype;
+ /* TFLIB response code */
+ uint32_t tf_resp_code;
+ /* TFLIB response data. */
+ uint32_t tf_resp[170];
+ /* unused. */
+ uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -49988,13 +55988,13 @@ struct hwrm_cfa_pair_info_output {
uint8_t valid;
} __rte_packed;
-/**********************
- * hwrm_cfa_vfr_alloc *
- **********************/
+/**********************************
+ * hwrm_cfa_lag_group_member_rgtr *
+ **********************************/
-/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
-struct hwrm_cfa_vfr_alloc_input {
+/* hwrm_cfa_lag_group_member_rgtr_input (size:192b/24B) */
+struct hwrm_cfa_lag_group_member_rgtr_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50023,20 +56023,42 @@ struct hwrm_cfa_vfr_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_id;
+ uint8_t mode;
/*
- * This field is reserved for the future use.
- * It shall be set to 0.
+ * Transmit only on the active port. Automatically failover
+ * to backup port.
*/
- uint16_t reserved;
- uint8_t unused_0[4];
- /* VF Representor name (32 byte string). */
- char vfr_name[32];
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_ACTIVE_BACKUP \
+ UINT32_C(0x1)
+ /*
+ * Transmit based on packet header ntuple hash. Packet with only
+ * layer 2 headers will hash using the destination MAC, source MAC
+ * and Ethertype fields. Packets with layer 3 (IP) headers will
+ * hash using the destination MAC, source MAC, IP protocol/next
+ * header, source IP address and destination IP address. Packets
+ * with layer 4 (TCP/UDP) headers will hash using the destination
+ * MAC, source MAC, IP protocol/next header, source IP address,
+ * destination IP address, source port and destination port fields.
+ */
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_BALANCE_XOR \
+ UINT32_C(0x2)
+ /* Transmit packets on all specified ports. */
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_BROADCAST \
+ UINT32_C(0x3)
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_LAST \
+ HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_BROADCAST
+ /*
+ * Supports up to 5 ports. bit0 = port 0, bit1 = port 1,
+ * bit2 = port 2, bit3 = port 4, bit4 = loopback port
+ */
+ uint8_t port_bitmap;
+ /* Specify the active port when active-backup mode is specified */
+ uint8_t active_port;
+ uint8_t unused_0[5];
} __rte_packed;
-/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
-struct hwrm_cfa_vfr_alloc_output {
+/* hwrm_cfa_lag_group_member_rgtr_output (size:128b/16B) */
+struct hwrm_cfa_lag_group_member_rgtr_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50045,11 +56067,9 @@ struct hwrm_cfa_vfr_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Rx CFA code. */
- uint16_t rx_cfa_code;
- /* Tx CFA action. */
- uint16_t tx_cfa_action;
- uint8_t unused_0[3];
+ /* lag group ID configured for the function */
+ uint16_t lag_id;
+ uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -50061,13 +56081,13 @@ struct hwrm_cfa_vfr_alloc_output {
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_cfa_vfr_free *
- *********************/
+/************************************
+ * hwrm_cfa_lag_group_member_unrgtr *
+ ************************************/
-/* hwrm_cfa_vfr_free_input (size:448b/56B) */
-struct hwrm_cfa_vfr_free_input {
+/* hwrm_cfa_lag_group_member_unrgtr_input (size:192b/24B) */
+struct hwrm_cfa_lag_group_member_unrgtr_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50096,20 +56116,13 @@ struct hwrm_cfa_vfr_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* VF Representor name (32 byte string). */
- char vfr_name[32];
- /* Logical VF number (range: 0 -> MAX_VFS -1). */
- uint16_t vf_id;
- /*
- * This field is reserved for the future use.
- * It shall be set to 0.
- */
- uint16_t reserved;
- uint8_t unused_0[4];
+ /* lag group ID configured for the function */
+ uint16_t lag_id;
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_cfa_vfr_free_output (size:128b/16B) */
-struct hwrm_cfa_vfr_free_output {
+/* hwrm_cfa_lag_group_member_unrgtr_output (size:128b/16B) */
+struct hwrm_cfa_lag_group_member_unrgtr_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50130,13 +56143,13 @@ struct hwrm_cfa_vfr_free_output {
uint8_t valid;
} __rte_packed;
-/***************************************
- * hwrm_cfa_redirect_query_tunnel_type *
- ***************************************/
+/*****************************
+ * hwrm_cfa_tls_filter_alloc *
+ *****************************/
-/* hwrm_cfa_redirect_query_tunnel_type_input (size:192b/24B) */
-struct hwrm_cfa_redirect_query_tunnel_type_input {
+/* hwrm_cfa_tls_filter_alloc_input (size:768b/96B) */
+struct hwrm_cfa_tls_filter_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50165,162 +56178,168 @@ struct hwrm_cfa_redirect_query_tunnel_type_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* The source function id. */
- uint16_t src_fid;
- uint8_t unused_0[6];
-} __rte_packed;
-
-/* hwrm_cfa_redirect_query_tunnel_type_output (size:128b/16B) */
-struct hwrm_cfa_redirect_query_tunnel_type_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* Tunnel Mask. */
- uint32_t tunnel_mask;
- /* Non-tunnel */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_NONTUNNEL \
+ uint32_t unused_0;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
UINT32_C(0x1)
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN \
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
UINT32_C(0x2)
- /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_NVGRE \
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
UINT32_C(0x4)
- /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_L2GRE \
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
UINT32_C(0x8)
- /* IP in IP */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPIP \
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
UINT32_C(0x10)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_GENEVE \
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
UINT32_C(0x20)
- /* Multi-Protocol Label Switching (MPLS) */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_MPLS \
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
UINT32_C(0x40)
- /* Stateless Transport Tunnel (STT) */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_STT \
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
UINT32_C(0x80)
- /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPGRE \
+ /*
+ * This bit must be '1' for the kid field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_KID \
UINT32_C(0x100)
- /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN_V4 \
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
UINT32_C(0x200)
/*
- * Enhance Generic Routing Encapsulation (GRE version 1) inside IP
- * datagram payload
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
*/
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_IPGRE_V1 \
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
UINT32_C(0x400)
- /* Any tunneled traffic */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_ANYTUNNEL \
+ /*
+ * This bit must be '1' for the quic_dst_connect_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_QUIC_DST_CONNECT_ID \
UINT32_C(0x800)
- /* Use fixed layer 2 ether type of 0xFFFF */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_L2_ETYPE \
- UINT32_C(0x1000)
/*
- * IPV6 over virtual eXtensible Local Area Network with GPE header
- * (IPV6oVXLANGPE)
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN_GPE_V6 \
- UINT32_C(0x2000)
- /* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
- #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE_OUTPUT_TUNNEL_MASK_VXLAN_GPE \
- UINT32_C(0x4000)
- uint8_t unused_0[3];
+ uint64_t l2_filter_id;
+ uint8_t unused_1[6];
+ /* This value indicates the ethertype in the Ethernet header. */
+ uint16_t ethertype;
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
*/
- uint8_t valid;
-} __rte_packed;
-
-/*************************
- * hwrm_cfa_ctx_mem_rgtr *
- *************************/
-
-
-/* hwrm_cfa_ctx_mem_rgtr_input (size:256b/32B) */
-struct hwrm_cfa_ctx_mem_rgtr_input {
- /* The HWRM command request type. */
- uint16_t req_type;
+ uint8_t ip_addr_type;
+ /* invalid */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \
+ UINT32_C(0x0)
+ /* IPv4 */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
+ UINT32_C(0x4)
+ /* IPv6 */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ UINT32_C(0x6)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
+ * The value of protocol field in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
*/
- uint16_t cmpl_ring;
+ uint8_t ip_protocol;
+ /* invalid */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
+ UINT32_C(0x0)
+ /* TCP */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \
+ UINT32_C(0x6)
+ /* UDP */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \
+ UINT32_C(0x11)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
/*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
*/
- uint16_t seq_id;
+ uint16_t dst_id;
/*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
*/
- uint16_t target_id;
+ uint16_t mirror_vnic_id;
+ uint8_t unused_2[2];
/*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- uint64_t resp_addr;
- uint16_t flags;
- /* Counter PBL indirect levels. */
- uint8_t page_level;
- /* PBL pointer is physical start address. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
- /* PBL pointer points to PTE table. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
+ uint32_t src_ipaddr[4];
/*
- * PBL pointer points to PDE table with each entry pointing to PTE
- * tables.
+ * The value of destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LAST \
- HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2
- /* Page size. */
- uint8_t page_size;
- /* 4KB page size. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_4K UINT32_C(0x0)
- /* 8KB page size. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_8K UINT32_C(0x1)
- /* 64KB page size. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_64K UINT32_C(0x4)
- /* 256KB page size. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_256K UINT32_C(0x6)
- /* 1MB page size. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_1M UINT32_C(0x8)
- /* 2MB page size. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M UINT32_C(0x9)
- /* 4MB page size. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_4M UINT32_C(0xa)
- /* 1GB page size. */
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_1G UINT32_C(0x12)
- #define HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_LAST \
- HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_1G
- uint32_t unused_0;
- /* Pointer to the PBL, or PDL depending on number of levels */
- uint64_t page_dir;
+ uint32_t dst_ipaddr[4];
+ /*
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t src_port;
+ /*
+ * The value of destination port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t dst_port;
+ /*
+ * The Key Context Identifier (KID) for use with KTLS or QUIC.
+ * KID is limited to 20-bits.
+ */
+ uint32_t kid;
+ /* The Destination Connection ID of QUIC. */
+ uint64_t quic_dst_connect_id;
} __rte_packed;
-/* hwrm_cfa_ctx_mem_rgtr_output (size:128b/16B) */
-struct hwrm_cfa_ctx_mem_rgtr_output {
+/* hwrm_cfa_tls_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tls_filter_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50329,12 +56348,49 @@ struct hwrm_cfa_ctx_mem_rgtr_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t tls_filter_id;
/*
- * Id/Handle to the recently register context memory. This handle is
- * passed to the CFA feature.
+ * The flow id value in bit 0-29 is the actual ID of the flow
+ * associated with this filter and it shall be used to match
+ * and associate the flow identifier returned in completion
+ * records. A value of 0xFFFFFFFF in the 32-bit flow_id field
+ * shall indicate no valid flow id.
*/
- uint16_t ctx_id;
- uint8_t unused_0[5];
+ uint32_t flow_id;
+ /* Indicate the flow id value. */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_MASK \
+ UINT32_C(0x3fffffff)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_SFT 0
+ /* Indicate type of the flow. */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE \
+ UINT32_C(0x40000000)
+ /*
+ * If this bit set to 0, then it indicates that the flow is
+ * internal flow.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_INT \
+ (UINT32_C(0x0) << 30)
+ /*
+ * If this bit is set to 1, then it indicates that the flow is
+ * external flow.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT \
+ (UINT32_C(0x1) << 30)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_LAST \
+ HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT
+ /* Indicate the flow direction. */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR \
+ UINT32_C(0x80000000)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_RX \
+ (UINT32_C(0x0) << 31)
+ /* If this bit is set to 1, then it indicates that tx flow. */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX \
+ (UINT32_C(0x1) << 31)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_LAST \
+ HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -50346,13 +56402,13 @@ struct hwrm_cfa_ctx_mem_rgtr_output {
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_cfa_ctx_mem_unrgtr *
- ***************************/
+/****************************
+ * hwrm_cfa_tls_filter_free *
+ ****************************/
-/* hwrm_cfa_ctx_mem_unrgtr_input (size:192b/24B) */
-struct hwrm_cfa_ctx_mem_unrgtr_input {
+/* hwrm_cfa_tls_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tls_filter_free_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50381,16 +56437,12 @@ struct hwrm_cfa_ctx_mem_unrgtr_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * Id/Handle to the recently register context memory. This handle is
- * passed to the CFA feature.
- */
- uint16_t ctx_id;
- uint8_t unused_0[6];
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t tls_filter_id;
} __rte_packed;
-/* hwrm_cfa_ctx_mem_unrgtr_output (size:128b/16B) */
-struct hwrm_cfa_ctx_mem_unrgtr_output {
+/* hwrm_cfa_tls_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tls_filter_free_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50411,13 +56463,13 @@ struct hwrm_cfa_ctx_mem_unrgtr_output {
uint8_t valid;
} __rte_packed;
-/*************************
- * hwrm_cfa_ctx_mem_qctx *
- *************************/
+/*****************************
+ * hwrm_cfa_release_afm_func *
+ *****************************/
-/* hwrm_cfa_ctx_mem_qctx_input (size:192b/24B) */
-struct hwrm_cfa_ctx_mem_qctx_input {
+/* hwrm_cfa_release_afm_func_input (size:256b/32B) */
+struct hwrm_cfa_release_afm_func_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50446,16 +56498,40 @@ struct hwrm_cfa_ctx_mem_qctx_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Function identifier, may be of type efid, rfid or dfid. */
+ uint16_t fid;
+ /* Representor function identifier. */
+ uint16_t rfid;
+ /* Fid type. */
+ uint8_t type;
+ /* Endpoint fid. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_EFID UINT32_C(0x1)
+ /* Representor fid. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_RFID UINT32_C(0x2)
+ /* Redirect fid. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_DFID UINT32_C(0x3)
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_LAST \
+ HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_DFID
+ uint8_t unused_0[3];
/*
- * Id/Handle to the recently register context memory. This handle is
- * passed to the CFA feature.
+ * Flags used to control AFMs actions when releasing the function.
+ * Only used when type is dfid.
*/
- uint16_t ctx_id;
- uint8_t unused_0[6];
+ uint32_t flags;
+ /* Remove broadcast. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_FLAGS_BC_REM \
+ UINT32_C(0x1)
+ /* Remove multicast. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_FLAGS_MC_REM \
+ UINT32_C(0x2)
+ /* Remove promiscuous. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_FLAGS_PROMISC_REM \
+ UINT32_C(0x4)
+ uint32_t unused_1;
} __rte_packed;
-/* hwrm_cfa_ctx_mem_qctx_output (size:256b/32B) */
-struct hwrm_cfa_ctx_mem_qctx_output {
+/* hwrm_cfa_release_afm_func_output (size:128b/16B) */
+struct hwrm_cfa_release_afm_func_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50464,44 +56540,7 @@ struct hwrm_cfa_ctx_mem_qctx_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint16_t flags;
- /* Counter PBL indirect levels. */
- uint8_t page_level;
- /* PBL pointer is physical start address. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
- /* PBL pointer points to PTE table. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
- /*
- * PBL pointer points to PDE table with each entry pointing to PTE
- * tables.
- */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LAST \
- HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_LEVEL_LVL_2
- /* Page size. */
- uint8_t page_size;
- /* 4KB page size. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_4K UINT32_C(0x0)
- /* 8KB page size. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_8K UINT32_C(0x1)
- /* 64KB page size. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_64K UINT32_C(0x4)
- /* 256KB page size. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_256K UINT32_C(0x6)
- /* 1MB page size. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_1M UINT32_C(0x8)
- /* 2MB page size. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_2M UINT32_C(0x9)
- /* 4MB page size. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_4M UINT32_C(0xa)
- /* 1GB page size. */
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_1G UINT32_C(0x12)
- #define HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_LAST \
- HWRM_CFA_CTX_MEM_QCTX_OUTPUT_PAGE_SIZE_1G
- uint8_t unused_0[4];
- /* Pointer to the PBL, or PDL depending on number of levels */
- uint64_t page_dir;
- uint8_t unused_1[7];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -50513,13 +56552,13 @@ struct hwrm_cfa_ctx_mem_qctx_output {
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_cfa_ctx_mem_qcaps *
- **************************/
+/***********
+ * hwrm_tf *
+ ***********/
-/* hwrm_cfa_ctx_mem_qcaps_input (size:128b/16B) */
-struct hwrm_cfa_ctx_mem_qcaps_input {
+/* hwrm_tf_input (size:1024b/128B) */
+struct hwrm_tf_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50548,10 +56587,18 @@ struct hwrm_cfa_ctx_mem_qcaps_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* TF message type. */
+ uint16_t type;
+ /* TF message subtype. */
+ uint16_t subtype;
+ /* unused. */
+ uint8_t unused0[4];
+ /* TF request data. */
+ uint32_t req[26];
} __rte_packed;
-/* hwrm_cfa_ctx_mem_qcaps_output (size:128b/16B) */
-struct hwrm_cfa_ctx_mem_qcaps_output {
+/* hwrm_tf_output (size:5632b/704B) */
+struct hwrm_tf_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50560,30 +56607,34 @@ struct hwrm_cfa_ctx_mem_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /* TF message type. */
+ uint16_t type;
+ /* TF message subtype. */
+ uint16_t subtype;
+ /* TF response code */
+ uint32_t resp_code;
+ /* TF response data. */
+ uint32_t resp[170];
+ /* unused. */
+ uint8_t unused1[7];
/*
- * Indicates the maximum number of context memory which can be
- * registered.
- */
- uint16_t max_entries;
- uint8_t unused_0[5];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been
+ * completely written. When writing a command completion or
+ * response to an internal processor, the order of writes has
+ * to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_cfa_counter_qcaps *
- **************************/
+/***********************
+ * hwrm_tf_version_get *
+ ***********************/
-/* hwrm_cfa_counter_qcaps_input (size:128b/16B) */
-struct hwrm_cfa_counter_qcaps_input {
+/* hwrm_tf_version_get_input (size:128b/16B) */
+struct hwrm_tf_version_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50614,8 +56665,8 @@ struct hwrm_cfa_counter_qcaps_input {
uint64_t resp_addr;
} __rte_packed;
-/* hwrm_cfa_counter_qcaps_output (size:576b/72B) */
-struct hwrm_cfa_counter_qcaps_output {
+/* hwrm_tf_version_get_output (size:256b/32B) */
+struct hwrm_tf_version_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50624,85 +56675,21 @@ struct hwrm_cfa_counter_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint32_t flags;
- /* Enumeration denoting the supported CFA counter format. */
- #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT \
- UINT32_C(0x1)
- /* CFA counter types are not supported. */
- #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_NONE \
- UINT32_C(0x0)
- /* 64-bit packet counters followed by 64-bit byte counters format. */
- #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_64_BIT \
- UINT32_C(0x1)
- #define HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_LAST \
- HWRM_CFA_COUNTER_QCAPS_OUTPUT_FLAGS_COUNTER_FORMAT_64_BIT
- uint32_t unused_0;
- /*
- * Minimum guaranteed number of flow counters supported for this
- * function, in RX direction.
- */
- uint32_t min_rx_fc;
- /*
- * Maximum non-guaranteed number of flow counters supported for this
- * function, in RX direction.
- */
- uint32_t max_rx_fc;
- /*
- * Minimum guaranteed number of flow counters supported for this
- * function, in TX direction.
- */
- uint32_t min_tx_fc;
- /*
- * Maximum non-guaranteed number of flow counters supported for this
- * function, in TX direction.
- */
- uint32_t max_tx_fc;
- /*
- * Minimum guaranteed number of extension flow counters supported for
- * this function, in RX direction.
- */
- uint32_t min_rx_efc;
- /*
- * Maximum non-guaranteed number of extension flow counters supported
- * for this function, in RX direction.
- */
- uint32_t max_rx_efc;
- /*
- * Minimum guaranteed number of extension flow counters supported for
- * this function, in TX direction.
- */
- uint32_t min_tx_efc;
- /*
- * Maximum non-guaranteed number of extension flow counters supported
- * for this function, in TX direction.
- */
- uint32_t max_tx_efc;
- /*
- * Minimum guaranteed number of meter drop counters supported for
- * this function, in RX direction.
- */
- uint32_t min_rx_mdc;
- /*
- * Maximum non-guaranteed number of meter drop counters supported for
- * this function, in RX direction.
- */
- uint32_t max_rx_mdc;
- /*
- * Minimum guaranteed number of meter drop counters supported for this
- * function, in TX direction.
- */
- uint32_t min_tx_mdc;
- /*
- * Maximum non-guaranteed number of meter drop counters supported for
- * this function, in TX direction.
- */
- uint32_t max_tx_mdc;
+ /* Version Major number. */
+ uint8_t major;
+ /* Version Minor number. */
+ uint8_t minor;
+ /* Version Update number. */
+ uint8_t update;
+ /* unused. */
+ uint8_t unused0[5];
/*
- * Maximum guaranteed number of flow counters which can be used during
- * flow alloc.
+ * This field is used to indicate device's capabilities and
+ * configurations.
*/
- uint32_t max_flow_alloc_fc;
- uint8_t unused_1[3];
+ uint64_t dev_caps_cfg;
+ /* unused. */
+ uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -50715,12 +56702,12 @@ struct hwrm_cfa_counter_qcaps_output {
} __rte_packed;
/************************
- * hwrm_cfa_counter_cfg *
+ * hwrm_tf_session_open *
************************/
-/* hwrm_cfa_counter_cfg_input (size:256b/32B) */
-struct hwrm_cfa_counter_cfg_input {
+/* hwrm_tf_session_open_input (size:640b/80B) */
+struct hwrm_tf_session_open_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50749,64 +56736,12 @@ struct hwrm_cfa_counter_cfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint16_t flags;
- /* Enumeration denoting the configuration mode. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE \
- UINT32_C(0x1)
- /* Disable the configuration mode. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE \
- UINT32_C(0x0)
- /* Enable the configuration mode. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE \
- UINT32_C(0x1)
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_LAST \
- HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE
- /* Enumeration denoting the RX, TX type of the resource. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH \
- UINT32_C(0x2)
- /* Tx path. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX \
- (UINT32_C(0x0) << 1)
- /* Rx path. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX \
- (UINT32_C(0x1) << 1)
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX
- /* Enumeration denoting the data transfer mode. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_MASK \
- UINT32_C(0xc)
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_SFT 2
- /* Push mode. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PUSH \
- (UINT32_C(0x0) << 2)
- /* Pull mode. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL \
- (UINT32_C(0x1) << 2)
- /* Pull on async update. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC \
- (UINT32_C(0x2) << 2)
- #define HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_LAST \
- HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC
- uint16_t counter_type;
- /* Flow counters. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_FC UINT32_C(0x0)
- /* Extended flow counters. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_EFC UINT32_C(0x1)
- /* Meter drop counters. */
- #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_MDC UINT32_C(0x2)
- #define HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_LAST \
- HWRM_CFA_COUNTER_CFG_INPUT_COUNTER_TYPE_MDC
- /* Ctx memory handle to be used for the counter. */
- uint16_t ctx_id;
- /* Counter update cadence hint (only in Push mode). */
- uint16_t update_tmr_ms;
- /* Total number of entries. */
- uint32_t num_entries;
- uint32_t unused_0;
+ /* Name of the session. */
+ uint8_t session_name[64];
} __rte_packed;
-/* hwrm_cfa_counter_cfg_output (size:128b/16B) */
-struct hwrm_cfa_counter_cfg_output {
+/* hwrm_tf_session_open_output (size:192b/24B) */
+struct hwrm_tf_session_open_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50815,7 +56750,41 @@ struct hwrm_cfa_counter_cfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /*
+ * Unique session identifier for the session created by the
+ * firmware.
+ */
+ uint32_t fw_session_id;
+ /*
+ * Unique session client identifier for the first client on
+ * the newly created session.
+ */
+ uint32_t fw_session_client_id;
+ /* This field is used to return the status of fw session to host. */
+ uint32_t flags;
+ /*
+ * Indicates if the shared session has been created. Shared session
+ * should be the first session created ever. Its fw_rm_client_id
+ * should be 1. The AFM session's fw_rm_client_id is 0.
+ */
+ #define HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION \
+ UINT32_C(0x1)
+ /*
+ * If this bit set to 0, then it indicates the shared session
+ * has been created by another session.
+ */
+ #define HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION_NOT_CREATOR \
+ UINT32_C(0x0)
+ /*
+ * If this bit is set to 1, then it indicates the shared session
+ * is created by this session.
+ */
+ #define HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION_CREATOR \
+ UINT32_C(0x1)
+ #define HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION_LAST \
+ HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION_CREATOR
+ /* unused. */
+ uint8_t unused1[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -50827,13 +56796,13 @@ struct hwrm_cfa_counter_cfg_output {
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_cfa_counter_qstats *
- ***************************/
+/****************************
+ * hwrm_tf_session_register *
+ ****************************/
-/* hwrm_cfa_counter_qstats_input (size:320b/40B) */
-struct hwrm_cfa_counter_qstats_input {
+/* hwrm_tf_session_register_input (size:704b/88B) */
+struct hwrm_tf_session_register_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50862,27 +56831,23 @@ struct hwrm_cfa_counter_qstats_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint16_t flags;
- /* Enumeration denoting the RX, TX type of the resource. */
- #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH UINT32_C(0x1)
- /* Tx path. */
- #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
- /* Rx path. */
- #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
- #define HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX
- uint16_t counter_type;
- uint16_t input_flow_ctx_id;
- uint16_t num_entries;
- uint16_t delta_time_ms;
- uint16_t meter_instance_id;
- uint16_t mdc_ctx_id;
- uint8_t unused_0[2];
- uint64_t expected_count;
+ /*
+ * Unique session identifier for the session that the
+ * register request want to create a new client on. This
+ * value originates from the first open request.
+ * The fw_session_id of the attach session includes PCIe bus
+ * info to distinguish the PF and session info to identify
+ * the associated TruFlow session.
+ */
+ uint32_t fw_session_id;
+ /* unused. */
+ uint32_t unused0;
+ /* Name of the session client. */
+ uint8_t session_client_name[64];
} __rte_packed;
-/* hwrm_cfa_counter_qstats_output (size:128b/16B) */
-struct hwrm_cfa_counter_qstats_output {
+/* hwrm_tf_session_register_output (size:128b/16B) */
+struct hwrm_tf_session_register_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50891,7 +56856,14 @@ struct hwrm_cfa_counter_qstats_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /*
+ * Unique session client identifier for the session created
+ * by the firmware. It includes the session the client it
+ * attached to and session client info.
+ */
+ uint32_t fw_session_client_id;
+ /* unused. */
+ uint8_t unused0[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -50903,13 +56875,13 @@ struct hwrm_cfa_counter_qstats_output {
uint8_t valid;
} __rte_packed;
-/**********************
- * hwrm_cfa_eem_qcaps *
- **********************/
+/******************************
+ * hwrm_tf_session_unregister *
+ ******************************/
-/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcaps_input {
+/* hwrm_tf_session_unregister_input (size:192b/24B) */
+struct hwrm_tf_session_unregister_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50938,29 +56910,20 @@ struct hwrm_cfa_eem_qcaps_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t flags;
/*
- * When set to 1, indicates the configuration will apply to TX flows
- * which are to be offloaded.
- * Note if this bit is set then the path_rx bit can't be set.
+ * Unique session identifier for the session that the
+ * unregister request want to close a session client on.
*/
- #define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PATH_TX \
- UINT32_C(0x1)
+ uint32_t fw_session_id;
/*
- * When set to 1, indicates the configuration will apply to RX flows
- * which are to be offloaded.
- * Note if this bit is set then the path_tx bit can't be set.
+ * Unique session client identifier for the session that the
+ * unregister request want to close.
*/
- #define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PATH_RX \
- UINT32_C(0x2)
- /* When set to 1, all offloaded flows will be sent to EEM. */
- #define HWRM_CFA_EEM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD \
- UINT32_C(0x4)
- uint32_t unused_0;
+ uint32_t fw_session_client_id;
} __rte_packed;
-/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
-struct hwrm_cfa_eem_qcaps_output {
+/* hwrm_tf_session_unregister_output (size:128b/16B) */
+struct hwrm_tf_session_unregister_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50969,91 +56932,8 @@ struct hwrm_cfa_eem_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint32_t flags;
- /*
- * When set to 1, indicates the configuration will apply to TX flows
- * which are to be offloaded.
- * Note if this bit is set then the path_rx bit can't be set.
- */
- #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_PATH_TX \
- UINT32_C(0x1)
- /*
- * When set to 1, indicates the configuration will apply to RX flows
- * which are to be offloaded.
- * Note if this bit is set then the path_tx bit can't be set.
- */
- #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_PATH_RX \
- UINT32_C(0x2)
- /*
- * When set to 1, indicates the FW supports the Centralized
- * Memory Model. The concept designates one entity for the
- * memory allocation while all others ‘subscribe’ to it.
- */
- #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED \
- UINT32_C(0x4)
- /*
- * When set to 1, indicates the FW supports the Detached
- * Centralized Memory Model. The memory is allocated and managed
- * as a separate entity. All PFs and VFs will be granted direct
- * or semi-direct access to the allocated memory while none of
- * which can interfere with the management of the memory.
- */
- #define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED \
- UINT32_C(0x8)
- uint32_t unused_0;
- uint32_t supported;
- /*
- * If set to 1, then EEM KEY0 table is supported using crc32 hash.
- * If set to 0, EEM KEY0 table is not supported.
- */
- #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_KEY0_TABLE \
- UINT32_C(0x1)
- /*
- * If set to 1, then EEM KEY1 table is supported using lookup3 hash.
- * If set to 0, EEM KEY1 table is not supported.
- */
- #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_KEY1_TABLE \
- UINT32_C(0x2)
- /*
- * If set to 1, then EEM External Record table is supported.
- * If set to 0, EEM External Record table is not supported.
- * (This table includes action record, EFC pointers, encap pointers)
- */
- #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_RECORD_TABLE \
- UINT32_C(0x4)
- /*
- * If set to 1, then EEM External Flow Counters table is supported.
- * If set to 0, EEM External Flow Counters table is not supported.
- */
- #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE \
- UINT32_C(0x8)
- /*
- * If set to 1, then FID table used for implicit flow flush is
- * supported.
- * If set to 0, then FID table used for implicit flow flush is
- * not supported.
- */
- #define HWRM_CFA_EEM_QCAPS_OUTPUT_SUPPORTED_FID_TABLE \
- UINT32_C(0x10)
- /*
- * The maximum number of entries supported by EEM. When configuring
- * the host memory, the number of numbers of entries that can
- * supported are:
- * 32k, 64k 128k, 256k, 512k, 1M, 2M, 4M, 8M, 32M, 64M, 128M
- * entries.
- * Any value that are not these values, the FW will round down to the
- * closest support number of entries.
- */
- uint32_t max_entries_supported;
- /* The entry size in bytes of each entry in the EEM KEY0/KEY1 tables. */
- uint16_t key_entry_size;
- /* The entry size in bytes of each entry in the EEM RECORD tables. */
- uint16_t record_entry_size;
- /* The entry size in bytes of each entry in the EEM EFC tables. */
- uint16_t efc_entry_size;
- /* The FID size in bytes of each entry in the EEM FID tables. */
- uint16_t fid_entry_size;
- uint8_t unused_1[7];
+ /* unused. */
+ uint8_t unused0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -51063,15 +56943,15 @@ struct hwrm_cfa_eem_qcaps_output {
* written last.
*/
uint8_t valid;
-} __rte_packed;
-
-/********************
- * hwrm_cfa_eem_cfg *
- ********************/
+} __rte_packed;
+
+/*************************
+ * hwrm_tf_session_close *
+ *************************/
-/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
-struct hwrm_cfa_eem_cfg_input {
+/* hwrm_tf_session_close_input (size:192b/24B) */
+struct hwrm_tf_session_close_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -51100,57 +56980,14 @@ struct hwrm_cfa_eem_cfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t flags;
- /*
- * When set to 1, indicates the configuration will apply to TX flows
- * which are to be offloaded.
- * Note if this bit is set then the path_rx bit can't be set.
- */
- #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_PATH_TX \
- UINT32_C(0x1)
- /*
- * When set to 1, indicates the configuration will apply to RX flows
- * which are to be offloaded.
- * Note if this bit is set then the path_tx bit can't be set.
- */
- #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_PATH_RX \
- UINT32_C(0x2)
- /* When set to 1, all offloaded flows will be sent to EEM. */
- #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_PREFERRED_OFFLOAD \
- UINT32_C(0x4)
- /* When set to 1, secondary, 0 means primary. */
- #define HWRM_CFA_EEM_CFG_INPUT_FLAGS_SECONDARY_PF \
- UINT32_C(0x8)
- /*
- * Group_id which used by Firmware to identify memory pools belonging
- * to certain group.
- */
- uint16_t group_id;
- uint16_t unused_0;
- /*
- * Configured EEM with the given number of entries. All the EEM tables
- * KEY0, KEY1, RECORD, EFC all have the same number of entries and all
- * tables will be configured using this value. Current minimum value
- * is 32k. Current maximum value is 128M.
- */
- uint32_t num_entries;
- uint32_t unused_1;
- /* Configured EEM with the given context if for KEY0 table. */
- uint16_t key0_ctx_id;
- /* Configured EEM with the given context if for KEY1 table. */
- uint16_t key1_ctx_id;
- /* Configured EEM with the given context if for RECORD table. */
- uint16_t record_ctx_id;
- /* Configured EEM with the given context if for EFC table. */
- uint16_t efc_ctx_id;
- /* Configured EEM with the given context if for EFC table. */
- uint16_t fid_ctx_id;
- uint16_t unused_2;
- uint32_t unused_3;
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* unused. */
+ uint8_t unused0[4];
} __rte_packed;
-/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
-struct hwrm_cfa_eem_cfg_output {
+/* hwrm_tf_session_close_output (size:128b/16B) */
+struct hwrm_tf_session_close_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -51159,25 +56996,26 @@ struct hwrm_cfa_eem_cfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* unused. */
+ uint8_t unused0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_cfa_eem_qcfg *
- *********************/
+/************************
+ * hwrm_tf_session_qcfg *
+ ************************/
-/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
-struct hwrm_cfa_eem_qcfg_input {
+/* hwrm_tf_session_qcfg_input (size:192b/24B) */
+struct hwrm_tf_session_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -51206,16 +57044,14 @@ struct hwrm_cfa_eem_qcfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t flags;
- /* When set to 1, indicates the configuration is the TX flow. */
- #define HWRM_CFA_EEM_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x1)
- /* When set to 1, indicates the configuration is the RX flow. */
- #define HWRM_CFA_EEM_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x2)
- uint32_t unused_0;
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* unused. */
+ uint8_t unused0[4];
} __rte_packed;
-/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
-struct hwrm_cfa_eem_qcfg_output {
+/* hwrm_tf_session_qcfg_output (size:128b/16B) */
+struct hwrm_tf_session_qcfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -51224,47 +57060,74 @@ struct hwrm_cfa_eem_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint32_t flags;
- /* When set to 1, indicates the configuration is the TX flow. */
- #define HWRM_CFA_EEM_QCFG_OUTPUT_FLAGS_PATH_TX \
+ /* RX action control settings flags. */
+ uint8_t rx_act_flags;
+ /*
+ * A value of 1 in this field indicates that Global Flow ID
+ * reporting into cfa_code and cfa_metadata is enabled.
+ */
+ #define HWRM_TF_SESSION_QCFG_OUTPUT_RX_ACT_FLAGS_ABCR_GFID_EN \
UINT32_C(0x1)
- /* When set to 1, indicates the configuration is the RX flow. */
- #define HWRM_CFA_EEM_QCFG_OUTPUT_FLAGS_PATH_RX \
+ /*
+ * A value of 1 in this field indicates that both inner and outer
+ * are stripped and inner tag is passed.
+ * Enabled.
+ */
+ #define HWRM_TF_SESSION_QCFG_OUTPUT_RX_ACT_FLAGS_ABCR_VTAG_DLT_BOTH \
UINT32_C(0x2)
- /* When set to 1, all offloaded flows will be sent to EEM. */
- #define HWRM_CFA_EEM_QCFG_OUTPUT_FLAGS_PREFERRED_OFFLOAD \
+ /*
+ * A value of 1 in this field indicates that the re-use of
+ * existing tunnel L2 header SMAC is enabled for
+ * Non-tunnel L2, L2-L3 and IP-IP tunnel.
+ */
+ #define HWRM_TF_SESSION_QCFG_OUTPUT_RX_ACT_FLAGS_TECT_SMAC_OVR_RUTNSL2 \
UINT32_C(0x4)
- /* The number of entries the FW has configured for EEM. */
- uint32_t num_entries;
- /* Configured EEM with the given context if for KEY0 table. */
- uint16_t key0_ctx_id;
- /* Configured EEM with the given context if for KEY1 table. */
- uint16_t key1_ctx_id;
- /* Configured EEM with the given context if for RECORD table. */
- uint16_t record_ctx_id;
- /* Configured EEM with the given context if for EFC table. */
- uint16_t efc_ctx_id;
- /* Configured EEM with the given context if for EFC table. */
- uint16_t fid_ctx_id;
- uint8_t unused_2[5];
+ /* TX Action control settings flags. */
+ uint8_t tx_act_flags;
+ /* Disabled. */
+ #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_ABCR_VEB_EN \
+ UINT32_C(0x1)
+ /*
+ * When set to 1 any GRE tunnels will include the
+ * optional Key field.
+ */
+ #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_TECT_GRE_SET_K \
+ UINT32_C(0x2)
+ /*
+ * When set to 1, for GRE tunnels, the IPV6 Traffic Class (TC)
+ * field of the outer header is inherited from the inner header
+ * (if present) or the fixed value as taken from the encap
+ * record.
+ */
+ #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_TECT_IPV6_TC_IH \
+ UINT32_C(0x4)
+ /*
+ * When set to 1, for GRE tunnels, the IPV4 Type Of Service (TOS)
+ * field of the outer header is inherited from the inner header
+ * (if present) or the fixed value as taken from the encap record.
+ */
+ #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_TECT_IPV4_TOS_IH \
+ UINT32_C(0x8)
+ /* unused. */
+ uint8_t unused0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/*******************
- * hwrm_cfa_eem_op *
- *******************/
+/******************************
+ * hwrm_tf_session_resc_qcaps *
+ ******************************/
-/* hwrm_cfa_eem_op_input (size:192b/24B) */
-struct hwrm_cfa_eem_op_input {
+/* hwrm_tf_session_resc_qcaps_input (size:256b/32B) */
+struct hwrm_tf_session_resc_qcaps_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -51293,49 +57156,36 @@ struct hwrm_cfa_eem_op_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t flags;
- /*
- * When set to 1, indicates the host memory which is passed will be
- * used for the TX flow offload function specified in fid.
- * Note if this bit is set then the path_rx bit can't be set.
- */
- #define HWRM_CFA_EEM_OP_INPUT_FLAGS_PATH_TX UINT32_C(0x1)
- /*
- * When set to 1, indicates the host memory which is passed will be
- * used for the RX flow offload function specified in fid.
- * Note if this bit is set then the path_tx bit can't be set.
- */
- #define HWRM_CFA_EEM_OP_INPUT_FLAGS_PATH_RX UINT32_C(0x2)
- uint16_t unused_0;
- /* The number of EEM key table entries to be configured. */
- uint16_t op;
- /* This value is reserved and should not be used. */
- #define HWRM_CFA_EEM_OP_INPUT_OP_RESERVED UINT32_C(0x0)
- /*
- * To properly stop EEM and ensure there are no DMA's, the caller
- * must disable EEM for the given PF, using this call. This will
- * safely disable EEM and ensure that all DMA'ed to the
- * keys/records/efc have been completed.
- */
- #define HWRM_CFA_EEM_OP_INPUT_OP_EEM_DISABLE UINT32_C(0x1)
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_TX
/*
- * Once the EEM host memory has been configured, EEM options have
- * been configured. Then the caller should enable EEM for the given
- * PF. Note once this call has been made, then the EEM mechanism
- * will be active and DMA's will occur as packets are processed.
+ * Defines the size of the provided qcaps_addr array
+ * buffer. The size should be set to the Resource Manager
+ * provided max number of qcaps entries which is device
+ * specific. Resource Manager gets the max size from HCAPI
+ * RM.
*/
- #define HWRM_CFA_EEM_OP_INPUT_OP_EEM_ENABLE UINT32_C(0x2)
+ uint16_t qcaps_size;
/*
- * Clear EEM settings for the given PF so that the register values
- * are reset back to there initial state.
+ * This is the DMA address for the qcaps output data array
+ * buffer. Array is of tf_rm_resc_req_entry type and is
+ * device specific.
*/
- #define HWRM_CFA_EEM_OP_INPUT_OP_EEM_CLEANUP UINT32_C(0x3)
- #define HWRM_CFA_EEM_OP_INPUT_OP_LAST \
- HWRM_CFA_EEM_OP_INPUT_OP_EEM_CLEANUP
+ uint64_t qcaps_addr;
} __rte_packed;
-/* hwrm_cfa_eem_op_output (size:128b/16B) */
-struct hwrm_cfa_eem_op_output {
+/* hwrm_tf_session_resc_qcaps_output (size:192b/24B) */
+struct hwrm_tf_session_resc_qcaps_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -51344,7 +57194,42 @@ struct hwrm_cfa_eem_op_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* Control flags. */
+ uint32_t flags;
+ /* Session reservation strategy. */
+ #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK \
+ UINT32_C(0x3)
+ #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_SFT \
+ 0
+ /* Static partitioning. */
+ #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_STATIC \
+ UINT32_C(0x0)
+ /* Strategy 1. */
+ #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_1 \
+ UINT32_C(0x1)
+ /* Strategy 2. */
+ #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_2 \
+ UINT32_C(0x2)
+ /* Strategy 3. */
+ #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_3 \
+ UINT32_C(0x3)
+ #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_LAST \
+ HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_3
+ /*
+ * Size of the returned qcaps_addr data array buffer. The
+ * value cannot exceed the size defined by the input msg,
+ * qcaps_size.
+ */
+ uint16_t size;
+ /*
+ * SRAM profile number that sets the partition of SRAM memory
+ * between TF and AFM within the 4 internal memory banks (Thor).
+ */
+ uint8_t sram_profile;
+ /* unused. */
+ uint8_t unused0;
+ /* unused. */
+ uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -51356,13 +57241,13 @@ struct hwrm_cfa_eem_op_output {
uint8_t valid;
} __rte_packed;
-/********************************
- * hwrm_cfa_adv_flow_mgnt_qcaps *
- ********************************/
+/******************************
+ * hwrm_tf_session_resc_alloc *
+ ******************************/
-/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
+/* hwrm_tf_session_resc_alloc_input (size:320b/40B) */
+struct hwrm_tf_session_resc_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -51391,11 +57276,42 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t unused_0[4];
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_TX
+ /*
+ * Defines the array size of the provided req_addr and
+ * resv_addr array buffers. Should be set to the number of
+ * request entries.
+ */
+ uint16_t req_size;
+ /*
+ * This is the DMA address for the request input data array
+ * buffer. Array is of tf_rm_resc_req_entry type. Size of the
+ * array buffer is provided by the 'req_size' field in this
+ * message.
+ */
+ uint64_t req_addr;
+ /*
+ * This is the DMA address for the resc output data array
+ * buffer. Array is of tf_rm_resc_entry type. Size of the array
+ * buffer is provided by the 'req_size' field in this
+ * message.
+ */
+ uint64_t resc_addr;
} __rte_packed;
-/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
-struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
+/* hwrm_tf_session_resc_alloc_output (size:128b/16B) */
+struct hwrm_tf_session_resc_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -51404,165 +57320,99 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint32_t flags;
- /*
- * Value of 1 to indicate firmware support 16-bit flow handle.
- * Value of 0 to indicate firmware not support 16-bit flow handle.
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_HND_16BIT_SUPPORTED \
- UINT32_C(0x1)
- /*
- * Value of 1 to indicate firmware support 64-bit flow handle.
- * Value of 0 to indicate firmware not support 64-bit flow handle.
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_HND_64BIT_SUPPORTED \
- UINT32_C(0x2)
- /*
- * Value of 1 to indicate firmware support flow batch delete
- * operation through HWRM_CFA_FLOW_FLUSH command.
- * Value of 0 to indicate that the firmware does not support flow
- * batch delete operation. (deprecated)
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_BATCH_DELETE_SUPPORTED \
- UINT32_C(0x4)
- /*
- * Value of 1 to indicate that the firmware support flow reset all
- * operation through HWRM_CFA_FLOW_FLUSH command.
- * Value of 0 indicates firmware does not support flow reset all
- * operation. (deprecated)
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_RESET_ALL_SUPPORTED \
- UINT32_C(0x8)
- /*
- * Value of 1 to indicate that firmware supports use of FID as
- * dest_id in HWRM_CFA_NTUPLE_ALLOC/CFG commands.
- * Value of 0 indicates firmware does not support use of FID as
- * dest_id.
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED \
- UINT32_C(0x10)
- /*
- * Value of 1 to indicate that firmware supports TX EEM flows.
- * Value of 0 indicates firmware does not support TX EEM flows.
- * (deprecated)
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_TX_EEM_FLOW_SUPPORTED \
- UINT32_C(0x20)
- /*
- * Value of 1 to indicate that firmware supports RX EEM flows.
- * Value of 0 indicates firmware does not support RX EEM flows.
- * (deprecated)
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RX_EEM_FLOW_SUPPORTED \
- UINT32_C(0x40)
- /*
- * Value of 1 to indicate that firmware supports the dynamic
- * allocation of an on-chip flow counter which can be used for EEM
- * flows. Value of 0 indicates firmware does not support the dynamic
- * allocation of an on-chip flow counter.
- * (deprecated)
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED \
- UINT32_C(0x80)
- /*
- * Value of 1 to indicate that firmware supports setting of
- * rfs_ring_tbl_idx in HWRM_CFA_NTUPLE_ALLOC command.
- * Value of 0 indicates firmware does not support rfs_ring_tbl_idx.
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_SUPPORTED \
- UINT32_C(0x100)
- /*
- * Value of 1 to indicate that firmware supports untagged matching
- * criteria on HWRM_CFA_L2_FILTER_ALLOC command. Value of 0
- * indicates firmware does not support untagged matching.
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_UNTAGGED_VLAN_SUPPORTED \
- UINT32_C(0x200)
- /*
- * Value of 1 to indicate that firmware supports XDP filter. Value
- * of 0 indicates firmware does not support XDP filter.
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_XDP_SUPPORTED \
- UINT32_C(0x400)
- /*
- * Value of 1 to indicate that the firmware support L2 header source
- * fields matching criteria on HWRM_CFA_L2_FILTER_ALLOC command.
- * Value of 0 indicates firmware does not support L2 header source
- * fields matching.
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED \
- UINT32_C(0x800)
- /*
- * If set to 1, firmware is capable of supporting ARP ethertype as
- * matching criteria for HWRM_CFA_NTUPLE_FILTER_ALLOC command on the
- * RX direction. By default, this flag should be 0 for older version
- * of firmware.
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED \
- UINT32_C(0x1000)
- /*
- * Value of 1 to indicate that firmware supports setting of
- * rfs_ring_tbl_idx in dst_id field of the HWRM_CFA_NTUPLE_ALLOC
- * command. Value of 0 indicates firmware does not support
- * rfs_ring_tbl_idx in dst_id field.
- */
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED \
- UINT32_C(0x2000)
/*
- * If set to 1, firmware is capable of supporting IPv4/IPv6 as
- * ethertype in HWRM_CFA_NTUPLE_FILTER_ALLOC command on the RX
- * direction. By default, this flag should be 0 for older version
- * of firmware.
+ * Size of the returned tf_rm_resc_entry data array. The value
+ * cannot exceed the req_size defined by the input msg. The data
+ * array is returned using the resv_addr specified DMA
+ * address also provided by the input msg.
*/
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED \
- UINT32_C(0x4000)
+ uint16_t size;
+ /* unused. */
+ uint8_t unused0[5];
/*
- * When this bit is '1', it indicates that core firmware is
- * capable of TruFlow. Driver can restrict sending HWRM CFA_FLOW_XXX
- * and CFA_ENCAP_XXX, CFA_DECAP_XXX commands.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
*/
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_TRUFLOW_CAPABLE \
- UINT32_C(0x8000)
+ uint8_t valid;
+} __rte_packed;
+
+/******************************
+ * hwrm_tf_session_resc_flush *
+ ******************************/
+
+
+/* hwrm_tf_session_resc_flush_input (size:256b/32B) */
+struct hwrm_tf_session_resc_flush_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * If set to 1, firmware is capable of supporting L2/ROCE as
- * traffic type in flags field of HWRM_CFA_L2_FILTER_ALLOC command.
- * By default, this flag should be 0 for older version of firmware.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED \
- UINT32_C(0x10000)
+ uint16_t cmpl_ring;
/*
- * If set to 1, firmware is capable of HW LAG. This bit is only
- * advertised if the calling function is a PAXC function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_LAG_SUPPORTED \
- UINT32_C(0x20000)
+ uint16_t seq_id;
/*
- * If set to 1, firmware is capable installing ntuple rules without
- * additional classification on the L2 Context.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED \
- UINT32_C(0x40000)
+ uint16_t target_id;
/*
- * If set to 1, firmware is capable returning stats for nic flows
- * in cfa_flow_stats command where flow_handle value 0xF000.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NIC_FLOW_STATS_SUPPORTED \
- UINT32_C(0x80000)
+ uint64_t resp_addr;
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_TX
/*
- * If set to 1, firmware is capable of supporting these additional
- * ip_protoccols: ICMP, ICMPV6, RSVD for ntuple rules. By default,
- * this flag should be 0 for older version of firmware.
+ * Defines the size, in bytes, of the provided flush_addr
+ * buffer.
*/
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED \
- UINT32_C(0x100000)
+ uint16_t flush_size;
/*
- * Value of 1 to indicate that firmware supports setting of
- * rfs_ring_tbl_idx (new offset) in HWRM_CFA_NTUPLE_ALLOC command.
- * Value of 0 indicates ring tbl idx should be passed using dst_id.
+ * This is the DMA address for the flush input data array
+ * buffer. Array of tf_rm_resc_entry type. Size of the
+ * buffer is provided by the 'flush_size' field in this
+ * message.
*/
- #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED \
- UINT32_C(0x200000)
- uint8_t unused_0[3];
+ uint64_t flush_addr;
+} __rte_packed;
+
+/* hwrm_tf_session_resc_flush_output (size:128b/16B) */
+struct hwrm_tf_session_resc_flush_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* unused. */
+ uint8_t unused0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -51574,13 +57424,13 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
uint8_t valid;
} __rte_packed;
-/******************
- * hwrm_cfa_tflib *
- ******************/
+/*****************************
+ * hwrm_tf_session_resc_info *
+ *****************************/
-/* hwrm_cfa_tflib_input (size:1024b/128B) */
-struct hwrm_cfa_tflib_input {
+/* hwrm_tf_session_resc_info_input (size:320b/40B) */
+struct hwrm_tf_session_resc_info_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -51609,18 +57459,42 @@ struct hwrm_cfa_tflib_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* TFLIB message type. */
- uint16_t tf_type;
- /* TFLIB message subtype. */
- uint16_t tf_subtype;
- /* unused. */
- uint8_t unused0[4];
- /* TFLIB request data. */
- uint32_t tf_req[26];
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR_TX
+ /*
+ * Defines the array size of the provided req_addr and
+ * resv_addr array buffers. Should be set to the number of
+ * request entries.
+ */
+ uint16_t req_size;
+ /*
+ * This is the DMA address for the request input data array
+ * buffer. Array is of tf_rm_resc_req_entry type. Size of the
+ * array buffer is provided by the 'req_size' field in this
+ * message.
+ */
+ uint64_t req_addr;
+ /*
+ * This is the DMA address for the resc output data array
+ * buffer. Array is of tf_rm_resc_entry type. Size of the array
+ * buffer is provided by the 'req_size' field in this
+ * message.
+ */
+ uint64_t resc_addr;
} __rte_packed;
-/* hwrm_cfa_tflib_output (size:5632b/704B) */
-struct hwrm_cfa_tflib_output {
+/* hwrm_tf_session_resc_info_output (size:128b/16B) */
+struct hwrm_tf_session_resc_info_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -51629,16 +57503,15 @@ struct hwrm_cfa_tflib_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* TFLIB message type. */
- uint16_t tf_type;
- /* TFLIB message subtype. */
- uint16_t tf_subtype;
- /* TFLIB response code */
- uint32_t tf_resp_code;
- /* TFLIB response data. */
- uint32_t tf_resp[170];
+ /*
+ * Size of the returned tf_rm_resc_entry data array. The value
+ * cannot exceed the req_size defined by the input msg. The data
+ * array is returned using the resv_addr specified DMA
+ * address also provided by the input msg.
+ */
+ uint16_t size;
/* unused. */
- uint8_t unused1[7];
+ uint8_t unused0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -51650,13 +57523,35 @@ struct hwrm_cfa_tflib_output {
uint8_t valid;
} __rte_packed;
-/**********************************
- * hwrm_cfa_lag_group_member_rgtr *
- **********************************/
+/* TruFlow RM capability of a resource. */
+/* tf_rm_resc_req_entry (size:64b/8B) */
+struct tf_rm_resc_req_entry {
+ /* Type of the resource, defined globally in HCAPI RM. */
+ uint32_t type;
+ /* Minimum value. */
+ uint16_t min;
+ /* Maximum value. */
+ uint16_t max;
+} __rte_packed;
+
+/* TruFlow RM reservation information. */
+/* tf_rm_resc_entry (size:64b/8B) */
+struct tf_rm_resc_entry {
+ /* Type of the resource, defined globally in HCAPI RM. */
+ uint32_t type;
+ /* Start offset. */
+ uint16_t start;
+ /* Number of resources. */
+ uint16_t stride;
+} __rte_packed;
+
+/**************************
+ * hwrm_tf_tbl_type_alloc *
+ **************************/
-/* hwrm_cfa_lag_group_member_rgtr_input (size:192b/24B) */
-struct hwrm_cfa_lag_group_member_rgtr_input {
+/* hwrm_tf_tbl_type_alloc_input (size:192b/24B) */
+struct hwrm_tf_tbl_type_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -51685,42 +57580,44 @@ struct hwrm_cfa_lag_group_member_rgtr_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint8_t mode;
- /*
- * Transmit only on the active port. Automatically failover
- * to backup port.
- */
- #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_ACTIVE_BACKUP \
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR_TX
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_CFA \
+ UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_RXP \
UINT32_C(0x1)
- /*
- * Transmit based on packet header ntuple hash. Packet with only
- * layer 2 headers will hash using the destination MAC, source MAC
- * and Ethertype fields. Packets with layer 3 (IP) headers will
- * hash using the destination MAC, source MAC, IP protocol/next
- * header, source IP address and destination IP address. Packets
- * with layer 4 (TCP/UDP) headers will hash using the destination
- * MAC, source MAC, IP protocol/next header, source IP address,
- * destination IP address, source port and destination port fields.
- */
- #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_BALANCE_XOR \
+ /* RE gparse block type */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE \
UINT32_C(0x2)
- /* Transmit packets on all specified ports. */
- #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_BROADCAST \
+ /* TE gparse block type */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE \
UINT32_C(0x3)
- #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_LAST \
- HWRM_CFA_LAG_GROUP_MEMBER_RGTR_INPUT_MODE_BROADCAST
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_LAST \
+ HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
/*
- * Supports up to 5 ports. bit0 = port 0, bit1 = port 1,
- * bit2 = port 2, bit3 = port 4, bit4 = loopback port
+ * This field is blktype specific. For any of the UPAR types it is
+ * set to a non-zero value in case of a re-alloc, specifies a
+ * tunnel-type of dynamic UPAR tunnel.
*/
- uint8_t port_bitmap;
- /* Specify the active port when active-backup mode is specified */
- uint8_t active_port;
- uint8_t unused_0[5];
+ uint8_t type;
} __rte_packed;
-/* hwrm_cfa_lag_group_member_rgtr_output (size:128b/16B) */
-struct hwrm_cfa_lag_group_member_rgtr_output {
+/* hwrm_tf_tbl_type_alloc_output (size:128b/16B) */
+struct hwrm_tf_tbl_type_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -51729,27 +57626,33 @@ struct hwrm_cfa_lag_group_member_rgtr_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* lag group ID configured for the function */
- uint16_t lag_id;
- uint8_t unused_0[5];
+ /* Response code. */
+ uint32_t resp_code;
+ /*
+ * Table entry allocated by the firmware using the
+ * parameters above.
+ */
+ uint16_t idx_tbl_id;
+ /* unused */
+ uint8_t unused0;
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/************************************
- * hwrm_cfa_lag_group_member_unrgtr *
- ************************************/
+/************************
+ * hwrm_tf_tbl_type_get *
+ ************************/
-/* hwrm_cfa_lag_group_member_unrgtr_input (size:192b/24B) */
-struct hwrm_cfa_lag_group_member_unrgtr_input {
+/* hwrm_tf_tbl_type_get_input (size:256b/32B) */
+struct hwrm_tf_tbl_type_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -51778,13 +57681,56 @@ struct hwrm_cfa_lag_group_member_unrgtr_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* lag group ID configured for the function */
- uint16_t lag_id;
- uint8_t unused_0[6];
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR \
+ UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_RX \
+ UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX \
+ UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX
+ /*
+ * When set use the special access register access to clear
+ * the table entry on read.
+ */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_CLEAR_ON_READ \
+ UINT32_C(0x2)
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_CFA \
+ UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_RXP \
+ UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE \
+ UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE \
+ UINT32_C(0x3)
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_LAST \
+ HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
+ /* unused. */
+ uint8_t unused0;
+ /*
+ * Type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
+ */
+ uint32_t type;
+ /* Index of the type to retrieve. */
+ uint32_t index;
} __rte_packed;
-/* hwrm_cfa_lag_group_member_unrgtr_output (size:128b/16B) */
-struct hwrm_cfa_lag_group_member_unrgtr_output {
+/* hwrm_tf_tbl_type_get_output (size:2240b/280B) */
+struct hwrm_tf_tbl_type_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -51793,25 +57739,34 @@ struct hwrm_cfa_lag_group_member_unrgtr_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* Response code. */
+ uint32_t resp_code;
+ /* Response size. */
+ uint16_t size;
+ /* unused */
+ uint16_t unused0;
+ /* Response data. */
+ uint8_t data[256];
+ /* unused */
+ uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/*****************************
- * hwrm_cfa_tls_filter_alloc *
- *****************************/
+/************************
+ * hwrm_tf_tbl_type_set *
+ ************************/
-/* hwrm_cfa_tls_filter_alloc_input (size:768b/96B) */
-struct hwrm_cfa_tls_filter_alloc_input {
+/* hwrm_tf_tbl_type_set_input (size:1024b/128B) */
+struct hwrm_tf_tbl_type_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -51840,168 +57795,238 @@ struct hwrm_cfa_tls_filter_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t unused_0;
- uint32_t enables;
- /*
- * This bit must be '1' for the l2_filter_id field to be
- * configured.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX
+ /* Indicate table data is being sent via DMA. */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_CFA \
+ UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_RXP \
UINT32_C(0x1)
- /*
- * This bit must be '1' for the ethertype field to be
- * configured.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ /* RE gparse block type */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE \
UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE \
+ UINT32_C(0x3)
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_LAST \
+ HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
+ /* unused. */
+ uint8_t unused0;
/*
- * This bit must be '1' for the ipaddr_type field to be
- * configured.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
- UINT32_C(0x4)
- /*
- * This bit must be '1' for the src_ipaddr field to be
- * configured.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
- UINT32_C(0x8)
- /*
- * This bit must be '1' for the dst_ipaddr field to be
- * configured.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
- UINT32_C(0x10)
- /*
- * This bit must be '1' for the ip_protocol field to be
- * configured.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
- UINT32_C(0x20)
- /*
- * This bit must be '1' for the src_port field to be
- * configured.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
- UINT32_C(0x40)
- /*
- * This bit must be '1' for the dst_port field to be
- * configured.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
- UINT32_C(0x80)
- /*
- * This bit must be '1' for the kid field to be
- * configured.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_KID \
- UINT32_C(0x100)
- /*
- * This bit must be '1' for the dst_id field to be
- * configured.
+ * Type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
- UINT32_C(0x200)
+ uint32_t type;
+ /* Index of the type to retrieve. */
+ uint32_t index;
+ /* Size of the data to set. */
+ uint16_t size;
+ /* unused */
+ uint8_t unused1[6];
+ /* Data to be set. */
+ uint8_t data[88];
+} __rte_packed;
+
+/* hwrm_tf_tbl_type_set_output (size:128b/16B) */
+struct hwrm_tf_tbl_type_set_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* unused. */
+ uint8_t unused0[7];
/*
- * This bit must be '1' for the mirror_vnic_id field to be
- * configured.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
- UINT32_C(0x400)
+ uint8_t valid;
+} __rte_packed;
+
+/*************************
+ * hwrm_tf_tbl_type_free *
+ *************************/
+
+
+/* hwrm_tf_tbl_type_free_input (size:256b/32B) */
+struct hwrm_tf_tbl_type_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This bit must be '1' for the quic_dst_connect_id field to be
- * configured.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_QUIC_DST_CONNECT_ID \
- UINT32_C(0x800)
+ uint16_t cmpl_ring;
/*
- * This value identifies a set of CFA data structures used for an L2
- * context.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t l2_filter_id;
- uint8_t unused_1[6];
- /* This value indicates the ethertype in the Ethernet header. */
- uint16_t ethertype;
+ uint16_t seq_id;
/*
- * This value indicates the type of IP address.
- * 4 - IPv4
- * 6 - IPv6
- * All others are invalid.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- uint8_t ip_addr_type;
- /* invalid */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \
- UINT32_C(0x0)
- /* IPv4 */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
- UINT32_C(0x4)
- /* IPv6 */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
- UINT32_C(0x6)
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
- HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
+ uint16_t target_id;
/*
- * The value of protocol filed in IP header.
- * Applies to UDP and TCP traffic.
- * 6 - TCP
- * 17 - UDP
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint8_t ip_protocol;
- /* invalid */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
+ uint64_t resp_addr;
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR_TX
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_CFA \
UINT32_C(0x0)
- /* TCP */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \
- UINT32_C(0x6)
- /* UDP */
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \
- UINT32_C(0x11)
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \
- HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
- /*
- * If set, this value shall represent the
- * Logical VNIC ID of the destination VNIC for the RX
- * path and network port id of the destination port for
- * the TX path.
- */
- uint16_t dst_id;
+ /* RXP gparse block type */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_RXP \
+ UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE \
+ UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE \
+ UINT32_C(0x3)
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_LAST \
+ HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
+ /* Unused */
+ uint8_t unused0;
/*
- * Logical VNIC ID of the VNIC where traffic is
- * mirrored.
+ * Table entry to be freed by the firmware using the parameters
+ * above.
*/
- uint16_t mirror_vnic_id;
- uint8_t unused_2[2];
+ uint16_t idx_tbl_id;
+ /* Unused */
+ uint8_t unused1[6];
+} __rte_packed;
+
+/* hwrm_tf_tbl_type_free_output (size:128b/16B) */
+struct hwrm_tf_tbl_type_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Response code. */
+ uint32_t resp_code;
+ /* unused */
+ uint8_t unused0[3];
/*
- * The value of source IP address to be used in filtering.
- * For IPv4, first four bytes represent the IP address.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
- uint32_t src_ipaddr[4];
+ uint8_t valid;
+} __rte_packed;
+
+/*********************
+ * hwrm_tf_em_insert *
+ *********************/
+
+
+/* hwrm_tf_em_insert_input (size:832b/104B) */
+struct hwrm_tf_em_insert_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * The value of destination IP address to be used in filtering.
- * For IPv4, first four bytes represent the IP address.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t dst_ipaddr[4];
+ uint16_t cmpl_ring;
/*
- * The value of source port to be used in filtering.
- * Applies to UDP and TCP traffic.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t src_port;
+ uint16_t seq_id;
/*
- * The value of destination port to be used in filtering.
- * Applies to UDP and TCP traffic.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- uint16_t dst_port;
+ uint16_t target_id;
/*
- * The Key Context Identifier (KID) for use with KTLS or QUIC.
- * KID is limited to 20-bits.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t kid;
- /* The Destination Connection ID of QUIC. */
- uint64_t quic_dst_connect_id;
+ uint64_t resp_addr;
+ /* Firmware Session Id. */
+ uint32_t fw_session_id;
+ /* Control Flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX
+ /* Reported match strength. */
+ uint16_t strength;
+ /* Index to action. */
+ uint32_t action_ptr;
+ /* Index of EM record. */
+ uint32_t em_record_idx;
+ /* EM Key value. */
+ uint64_t em_key[8];
+ /* Number of bits in em_key. */
+ uint16_t em_key_bitlen;
+ /* unused. */
+ uint16_t unused0[3];
} __rte_packed;
-/* hwrm_cfa_tls_filter_alloc_output (size:192b/24B) */
-struct hwrm_cfa_tls_filter_alloc_output {
+/* hwrm_tf_em_insert_output (size:128b/16B) */
+struct hwrm_tf_em_insert_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52010,67 +58035,32 @@ struct hwrm_cfa_tls_filter_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* This value is an opaque id into CFA data structures. */
- uint64_t tls_filter_id;
- /*
- * The flow id value in bit 0-29 is the actual ID of the flow
- * associated with this filter and it shall be used to match
- * and associate the flow identifier returned in completion
- * records. A value of 0xFFFFFFFF in the 32-bit flow_id field
- * shall indicate no valid flow id.
- */
- uint32_t flow_id;
- /* Indicate the flow id value. */
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_MASK \
- UINT32_C(0x3fffffff)
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_VALUE_SFT 0
- /* Indicate type of the flow. */
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE \
- UINT32_C(0x40000000)
- /*
- * If this bit set to 0, then it indicates that the flow is
- * internal flow.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_INT \
- (UINT32_C(0x0) << 30)
- /*
- * If this bit is set to 1, then it indicates that the flow is
- * external flow.
- */
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT \
- (UINT32_C(0x1) << 30)
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_LAST \
- HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_TYPE_EXT
- /* Indicate the flow direction. */
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR \
- UINT32_C(0x80000000)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_RX \
- (UINT32_C(0x0) << 31)
- /* If this bit is set to 1, then it indicates that tx flow. */
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX \
- (UINT32_C(0x1) << 31)
- #define HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_LAST \
- HWRM_CFA_TLS_FILTER_ALLOC_OUTPUT_FLOW_ID_DIR_TX
- uint8_t unused_0[3];
+ /* EM record pointer index. */
+ uint16_t rptr_index;
+ /* EM record offset 0~3. */
+ uint8_t rptr_entry;
+ /* Number of word entries consumed by the key. */
+ uint8_t num_of_entries;
+ /* unused. */
+ uint8_t unused0[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/****************************
- * hwrm_cfa_tls_filter_free *
- ****************************/
+/**************************
+ * hwrm_tf_em_hash_insert *
+ **************************/
-/* hwrm_cfa_tls_filter_free_input (size:192b/24B) */
-struct hwrm_cfa_tls_filter_free_input {
+/* hwrm_tf_em_hash_insert_input (size:1024b/128B) */
+struct hwrm_tf_em_hash_insert_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52099,12 +58089,36 @@ struct hwrm_cfa_tls_filter_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* This value is an opaque id into CFA data structures. */
- uint64_t tls_filter_id;
+ /* Firmware Session Id. */
+ uint32_t fw_session_id;
+ /* Control Flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_TX
+ /* Indicates table data is being sent via DMA. */
+ #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DMA UINT32_C(0x2)
+ /* Number of bits in the EM record. */
+ uint16_t em_record_size_bits;
+ /* CRC32 hash of key. */
+ uint32_t key0_hash;
+ /* Lookup3 hash of key. */
+ uint32_t key1_hash;
+ /* Index of EM record. */
+ uint32_t em_record_idx;
+ /* Unused. */
+ uint32_t unused0;
+ /* EM record. */
+ uint64_t em_record[11];
} __rte_packed;
-/* hwrm_cfa_tls_filter_free_output (size:128b/16B) */
-struct hwrm_cfa_tls_filter_free_output {
+/* hwrm_tf_em_hash_insert_output (size:128b/16B) */
+struct hwrm_tf_em_hash_insert_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52113,25 +58127,32 @@ struct hwrm_cfa_tls_filter_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* EM record pointer index. */
+ uint16_t rptr_index;
+ /* EM record offset 0~3. */
+ uint8_t rptr_entry;
+ /* Number of word entries consumed by the key. */
+ uint8_t num_of_entries;
+ /* unused. */
+ uint8_t unused0[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/***********
- * hwrm_tf *
- ***********/
+/*********************
+ * hwrm_tf_em_delete *
+ *********************/
-/* hwrm_tf_input (size:1024b/128B) */
-struct hwrm_tf_input {
+/* hwrm_tf_em_delete_input (size:832b/104B) */
+struct hwrm_tf_em_delete_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52160,18 +58181,32 @@ struct hwrm_tf_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* TF message type. */
- uint16_t type;
- /* TF message subtype. */
- uint16_t subtype;
+ /* Session Id. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX
+ /* Unused0 */
+ uint16_t unused0;
+ /* EM internal flow handle. */
+ uint64_t flow_handle;
+ /* EM Key value */
+ uint64_t em_key[8];
+ /* Number of bits in em_key. */
+ uint16_t em_key_bitlen;
/* unused. */
- uint8_t unused0[4];
- /* TF request data. */
- uint32_t req[26];
+ uint16_t unused1[3];
} __rte_packed;
-/* hwrm_tf_output (size:5632b/704B) */
-struct hwrm_tf_output {
+/* hwrm_tf_em_delete_output (size:128b/16B) */
+struct hwrm_tf_em_delete_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52180,34 +58215,28 @@ struct hwrm_tf_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* TF message type. */
- uint16_t type;
- /* TF message subtype. */
- uint16_t subtype;
- /* TF response code */
- uint32_t resp_code;
- /* TF response data. */
- uint32_t resp[170];
+ /* Original stack allocation index. */
+ uint16_t em_index;
/* unused. */
- uint8_t unused1[7];
+ uint8_t unused0[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/***********************
- * hwrm_tf_version_get *
- ***********************/
+/*******************
+ * hwrm_tf_em_move *
+ *******************/
-/* hwrm_tf_version_get_input (size:128b/16B) */
-struct hwrm_tf_version_get_input {
+/* hwrm_tf_em_move_input (size:320b/40B) */
+struct hwrm_tf_em_move_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52236,10 +58265,30 @@ struct hwrm_tf_version_get_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Session Id. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR_TX
+ /* Number of EM entry blocks */
+ uint16_t num_blocks;
+ /* New index for entry */
+ uint32_t new_index;
+ /* Unused */
+ uint32_t unused0;
+ /* EM internal flow handle. */
+ uint64_t flow_handle;
} __rte_packed;
-/* hwrm_tf_version_get_output (size:256b/32B) */
-struct hwrm_tf_version_get_output {
+/* hwrm_tf_em_move_output (size:128b/16B) */
+struct hwrm_tf_em_move_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52248,39 +58297,28 @@ struct hwrm_tf_version_get_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Version Major number. */
- uint8_t major;
- /* Version Minor number. */
- uint8_t minor;
- /* Version Update number. */
- uint8_t update;
+ /* Index of old entry. */
+ uint16_t em_index;
/* unused. */
uint8_t unused0[5];
- /*
- * This field is used to indicate device's capabilities and
- * configurations.
- */
- uint64_t dev_caps_cfg;
- /* unused. */
- uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/************************
- * hwrm_tf_session_open *
- ************************/
+/********************
+ * hwrm_tf_tcam_set *
+ ********************/
-/* hwrm_tf_session_open_input (size:640b/80B) */
-struct hwrm_tf_session_open_input {
+/* hwrm_tf_tcam_set_input (size:1024b/128B) */
+struct hwrm_tf_tcam_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52309,12 +58347,52 @@ struct hwrm_tf_session_open_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Name of the session. */
- uint8_t session_name[64];
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint32_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX
+ /*
+ * Indicate device data is being sent via DMA, the device
+ * data is packing does not change.
+ */
+ #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ /*
+ * TCAM type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
+ */
+ uint32_t type;
+ /* Index of TCAM entry. */
+ uint16_t idx;
+ /* Number of bytes in the TCAM key. */
+ uint8_t key_size;
+ /* Number of bytes in the TCAM result. */
+ uint8_t result_size;
+ /*
+ * Offset from which the mask bytes start in the device data
+ * array, key offset is always 0.
+ */
+ uint8_t mask_offset;
+ /* Offset from which the result bytes start in the device data array. */
+ uint8_t result_offset;
+ /* unused. */
+ uint8_t unused0[6];
+ /*
+ * TCAM key located at offset 0, mask located at mask_offset
+ * and result at result_offset for the device.
+ */
+ uint8_t dev_data[88];
} __rte_packed;
-/* hwrm_tf_session_open_output (size:192b/24B) */
-struct hwrm_tf_session_open_output {
+/* hwrm_tf_tcam_set_output (size:128b/16B) */
+struct hwrm_tf_tcam_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52323,59 +58401,26 @@ struct hwrm_tf_session_open_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Unique session identifier for the session created by the
- * firmware.
- */
- uint32_t fw_session_id;
- /*
- * Unique session client identifier for the first client on
- * the newly created session.
- */
- uint32_t fw_session_client_id;
- /* This field is used to return the status of fw session to host. */
- uint32_t flags;
- /*
- * Indicates if the shared session has been created. Shared session
- * should be the first session created ever. Its fw_rm_client_id
- * should be 1. The AFM session's fw_rm_client_id is 0.
- */
- #define HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION \
- UINT32_C(0x1)
- /*
- * If this bit set to 0, then it indicates the shared session
- * has been created by another session.
- */
- #define HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION_NOT_CREATOR \
- UINT32_C(0x0)
- /*
- * If this bit is set to 1, then it indicates the shared session
- * is created by this session.
- */
- #define HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION_CREATOR \
- UINT32_C(0x1)
- #define HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION_LAST \
- HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION_CREATOR
/* unused. */
- uint8_t unused1[3];
+ uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been
+ * completely written. When writing a command completion or
+ * response to an internal processor, the order of writes has
+ * to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_tf_session_attach *
- **************************/
+/********************
+ * hwrm_tf_tcam_get *
+ ********************/
-/* hwrm_tf_session_attach_input (size:704b/88B) */
-struct hwrm_tf_session_attach_input {
+/* hwrm_tf_tcam_get_input (size:256b/32B) */
+struct hwrm_tf_tcam_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52404,25 +58449,31 @@ struct hwrm_tf_session_attach_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint32_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_TX
/*
- * Unique session identifier for the session that the attach
- * request want to attach to. This value originates from the
- * shared session memory that the attach request opened by
- * way of the 'attach name' that was passed in to the core
- * attach API.
- * The fw_session_id of the attach session includes PCIe bus
- * info to distinguish the PF and session info to identify
- * the associated TruFlow session.
+ * TCAM type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
*/
- uint32_t attach_fw_session_id;
+ uint32_t type;
+ /* Index of a TCAM entry. */
+ uint16_t idx;
/* unused. */
- uint32_t unused0;
- /* Name of the session it self. */
- uint8_t session_name[64];
+ uint16_t unused0;
} __rte_packed;
-/* hwrm_tf_session_attach_output (size:128b/16B) */
-struct hwrm_tf_session_attach_output {
+/* hwrm_tf_tcam_get_output (size:2368b/296B) */
+struct hwrm_tf_tcam_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52431,34 +58482,41 @@ struct hwrm_tf_session_attach_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /* Number of bytes in the TCAM key. */
+ uint8_t key_size;
+ /* Number of bytes in the TCAM entry. */
+ uint8_t result_size;
+ /* Offset from which the mask bytes start in the device data array. */
+ uint8_t mask_offset;
+ /* Offset from which the result bytes start in the device data array. */
+ uint8_t result_offset;
+ /* unused. */
+ uint8_t unused0[4];
/*
- * Unique session identifier for the session created by the
- * firmware. It includes PCIe bus info to distinguish the PF
- * and session info to identify the associated TruFlow
- * session. This fw_session_id is unique to the attach
- * request.
+ * TCAM key located at offset 0, mask located at mask_offset
+ * and result at result_offset for the device.
*/
- uint32_t fw_session_id;
+ uint8_t dev_data[272];
/* unused. */
- uint8_t unused0[3];
+ uint8_t unused1[7];
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been
+ * completely written. When writing a command completion or
+ * response to an internal processor, the order of writes has
+ * to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/****************************
- * hwrm_tf_session_register *
- ****************************/
+/*********************
+ * hwrm_tf_tcam_move *
+ *********************/
-/* hwrm_tf_session_register_input (size:704b/88B) */
-struct hwrm_tf_session_register_input {
+/* hwrm_tf_tcam_move_input (size:1024b/128B) */
+struct hwrm_tf_tcam_move_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52487,23 +58545,33 @@ struct hwrm_tf_session_register_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint32_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_TX
/*
- * Unique session identifier for the session that the
- * register request want to create a new client on. This
- * value originates from the first open request.
- * The fw_session_id of the attach session includes PCIe bus
- * info to distinguish the PF and session info to identify
- * the associated TruFlow session.
+ * TCAM type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
*/
- uint32_t fw_session_id;
+ uint32_t type;
+ /* Number of TCAM index pairs to be swapped for the device. */
+ uint16_t count;
/* unused. */
- uint32_t unused0;
- /* Name of the session client. */
- uint8_t session_client_name[64];
+ uint16_t unused0;
+ /* TCAM index pairs to be swapped for the device. */
+ uint16_t idx_pairs[48];
} __rte_packed;
-/* hwrm_tf_session_register_output (size:128b/16B) */
-struct hwrm_tf_session_register_output {
+/* hwrm_tf_tcam_move_output (size:128b/16B) */
+struct hwrm_tf_tcam_move_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52512,32 +58580,26 @@ struct hwrm_tf_session_register_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Unique session client identifier for the session created
- * by the firmware. It includes the session the client it
- * attached to and session client info.
- */
- uint32_t fw_session_client_id;
/* unused. */
- uint8_t unused0[3];
+ uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been
+ * completely written. When writing a command completion or
+ * response to an internal processor, the order of writes has
+ * to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/******************************
- * hwrm_tf_session_unregister *
- ******************************/
+/*********************
+ * hwrm_tf_tcam_free *
+ *********************/
-/* hwrm_tf_session_unregister_input (size:192b/24B) */
-struct hwrm_tf_session_unregister_input {
+/* hwrm_tf_tcam_free_input (size:1024b/128B) */
+struct hwrm_tf_tcam_free_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52566,20 +58628,33 @@ struct hwrm_tf_session_unregister_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * Unique session identifier for the session that the
- * unregister request want to close a session client on.
- */
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
uint32_t fw_session_id;
+ /* Control flags. */
+ uint32_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX
/*
- * Unique session client identifier for the session that the
- * unregister request want to close.
+ * TCAM type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
*/
- uint32_t fw_session_client_id;
+ uint32_t type;
+ /* Number of TCAM index to be deleted for the device. */
+ uint16_t count;
+ /* unused. */
+ uint16_t unused0;
+ /* TCAM index list to be deleted for the device. */
+ uint16_t idx_list[48];
} __rte_packed;
-/* hwrm_tf_session_unregister_output (size:128b/16B) */
-struct hwrm_tf_session_unregister_output {
+/* hwrm_tf_tcam_free_output (size:128b/16B) */
+struct hwrm_tf_tcam_free_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52591,23 +58666,23 @@ struct hwrm_tf_session_unregister_output {
/* unused. */
uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been
+ * completely written. When writing a command completion or
+ * response to an internal processor, the order of writes has
+ * to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*************************
- * hwrm_tf_session_close *
- *************************/
+/**************************
+ * hwrm_tf_global_cfg_set *
+ **************************/
-/* hwrm_tf_session_close_input (size:192b/24B) */
-struct hwrm_tf_session_close_input {
+/* hwrm_tf_global_cfg_set_input (size:448b/56B) */
+struct hwrm_tf_global_cfg_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52638,12 +58713,34 @@ struct hwrm_tf_session_close_input {
uint64_t resp_addr;
/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
uint32_t fw_session_id;
+ /* Control flags. */
+ uint32_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_TX
+ /* Indicate device data is being sent via DMA. */
+ #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ /* Global Cfg type */
+ uint32_t type;
+ /* Offset of the type */
+ uint32_t offset;
+ /* Size of the data to set in bytes */
+ uint16_t size;
/* unused. */
- uint8_t unused0[4];
+ uint8_t unused0[6];
+ /* Data to set */
+ uint8_t data[8];
+ /* Mask of data to set, 0 indicates no mask */
+ uint8_t mask[8];
} __rte_packed;
-/* hwrm_tf_session_close_output (size:128b/16B) */
-struct hwrm_tf_session_close_output {
+/* hwrm_tf_global_cfg_set_output (size:128b/16B) */
+struct hwrm_tf_global_cfg_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52655,23 +58752,23 @@ struct hwrm_tf_session_close_output {
/* unused. */
uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been
+ * completely written. When writing a command completion or
+ * response to an internal processor, the order of writes has
+ * to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/************************
- * hwrm_tf_session_qcfg *
- ************************/
+/**************************
+ * hwrm_tf_global_cfg_get *
+ **************************/
-/* hwrm_tf_session_qcfg_input (size:192b/24B) */
-struct hwrm_tf_session_qcfg_input {
+/* hwrm_tf_global_cfg_get_input (size:320b/40B) */
+struct hwrm_tf_global_cfg_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52702,12 +58799,28 @@ struct hwrm_tf_session_qcfg_input {
uint64_t resp_addr;
/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
uint32_t fw_session_id;
+ /* Control flags. */
+ uint32_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_TX
+ /* Global Cfg type */
+ uint32_t type;
+ /* Offset of the type */
+ uint32_t offset;
+ /* Size of the data to set in bytes */
+ uint16_t size;
/* unused. */
- uint8_t unused0[4];
+ uint8_t unused0[6];
} __rte_packed;
-/* hwrm_tf_session_qcfg_output (size:128b/16B) */
-struct hwrm_tf_session_qcfg_output {
+/* hwrm_tf_global_cfg_get_output (size:2240b/280B) */
+struct hwrm_tf_global_cfg_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52716,74 +58829,32 @@ struct hwrm_tf_session_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* RX action control settings flags. */
- uint8_t rx_act_flags;
- /*
- * A value of 1 in this field indicates that Global Flow ID
- * reporting into cfa_code and cfa_metadata is enabled.
- */
- #define HWRM_TF_SESSION_QCFG_OUTPUT_RX_ACT_FLAGS_ABCR_GFID_EN \
- UINT32_C(0x1)
- /*
- * A value of 1 in this field indicates that both inner and outer
- * are stripped and inner tag is passed.
- * Enabled.
- */
- #define HWRM_TF_SESSION_QCFG_OUTPUT_RX_ACT_FLAGS_ABCR_VTAG_DLT_BOTH \
- UINT32_C(0x2)
- /*
- * A value of 1 in this field indicates that the re-use of
- * existing tunnel L2 header SMAC is enabled for
- * Non-tunnel L2, L2-L3 and IP-IP tunnel.
- */
- #define HWRM_TF_SESSION_QCFG_OUTPUT_RX_ACT_FLAGS_TECT_SMAC_OVR_RUTNSL2 \
- UINT32_C(0x4)
- /* TX Action control settings flags. */
- uint8_t tx_act_flags;
- /* Disabled. */
- #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_ABCR_VEB_EN \
- UINT32_C(0x1)
- /*
- * When set to 1 any GRE tunnels will include the
- * optional Key field.
- */
- #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_TECT_GRE_SET_K \
- UINT32_C(0x2)
- /*
- * When set to 1, for GRE tunnels, the IPV6 Traffic Class (TC)
- * field of the outer header is inherited from the inner header
- * (if present) or the fixed value as taken from the encap
- * record.
- */
- #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_TECT_IPV6_TC_IH \
- UINT32_C(0x4)
- /*
- * When set to 1, for GRE tunnels, the IPV4 Type Of Service (TOS)
- * field of the outer header is inherited from the inner header
- * (if present) or the fixed value as taken from the encap record.
- */
- #define HWRM_TF_SESSION_QCFG_OUTPUT_TX_ACT_FLAGS_TECT_IPV4_TOS_IH \
- UINT32_C(0x8)
+ /* Size of the data read in bytes */
+ uint16_t size;
/* unused. */
- uint8_t unused0[5];
+ uint8_t unused0[6];
+ /* Data to set */
+ uint8_t data[256];
+ /* unused. */
+ uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * processor, the order of writes has to be such that this field is
+ * written last.
*/
uint8_t valid;
} __rte_packed;
-/******************************
- * hwrm_tf_session_resc_qcaps *
- ******************************/
+/**********************
+ * hwrm_tf_if_tbl_get *
+ **********************/
-/* hwrm_tf_session_resc_qcaps_input (size:256b/32B) */
-struct hwrm_tf_session_resc_qcaps_input {
+/* hwrm_tf_if_tbl_get_input (size:256b/32B) */
+struct hwrm_tf_if_tbl_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52817,31 +58888,26 @@ struct hwrm_tf_session_resc_qcaps_input {
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_SESSION_RESC_QCAPS_INPUT_FLAGS_DIR_TX
- /*
- * Defines the size of the provided qcaps_addr array
- * buffer. The size should be set to the Resource Manager
- * provided max number of qcaps entries which is device
- * specific. Resource Manager gets the max size from HCAPI
- * RM.
- */
- uint16_t qcaps_size;
+ #define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_TX
+ /* Size of the data to set. */
+ uint16_t size;
/*
- * This is the DMA address for the qcaps output data array
- * buffer. Array is of tf_rm_resc_req_entry type and is
- * device specific.
+ * Type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
*/
- uint64_t qcaps_addr;
+ uint32_t type;
+ /* Index of the type to retrieve. */
+ uint32_t index;
} __rte_packed;
-/* hwrm_tf_session_resc_qcaps_output (size:192b/24B) */
-struct hwrm_tf_session_resc_qcaps_output {
+/* hwrm_tf_if_tbl_get_output (size:1216b/152B) */
+struct hwrm_tf_if_tbl_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52850,60 +58916,34 @@ struct hwrm_tf_session_resc_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Control flags. */
- uint32_t flags;
- /* Session reservation strategy. */
- #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK \
- UINT32_C(0x3)
- #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_SFT \
- 0
- /* Static partitioning. */
- #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_STATIC \
- UINT32_C(0x0)
- /* Strategy 1. */
- #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_1 \
- UINT32_C(0x1)
- /* Strategy 2. */
- #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_2 \
- UINT32_C(0x2)
- /* Strategy 3. */
- #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_3 \
- UINT32_C(0x3)
- #define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_LAST \
- HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_3
- /*
- * Size of the returned qcaps_addr data array buffer. The
- * value cannot exceed the size defined by the input msg,
- * qcaps_size.
- */
+ /* Response code. */
+ uint32_t resp_code;
+ /* Response size. */
uint16_t size;
- /*
- * SRAM profile number that sets the partition of SRAM memory
- * between TF and AFM within the 4 internal memory banks (Thor).
- */
- uint8_t sram_profile;
- /* unused. */
- uint8_t unused0;
- /* unused. */
+ /* unused */
+ uint16_t unused0;
+ /* Response data. */
+ uint8_t data[128];
+ /* unused */
uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/******************************
- * hwrm_tf_session_resc_alloc *
- ******************************/
+/***************************
+ * hwrm_tf_if_tbl_type_set *
+ ***************************/
-/* hwrm_tf_session_resc_alloc_input (size:320b/40B) */
-struct hwrm_tf_session_resc_alloc_input {
+/* hwrm_tf_if_tbl_set_input (size:1024b/128B) */
+struct hwrm_tf_if_tbl_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -52937,37 +58977,32 @@ struct hwrm_tf_session_resc_alloc_input {
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_SESSION_RESC_ALLOC_INPUT_FLAGS_DIR_TX
- /*
- * Defines the array size of the provided req_addr and
- * resv_addr array buffers. Should be set to the number of
- * request entries.
- */
- uint16_t req_size;
- /*
- * This is the DMA address for the request input data array
- * buffer. Array is of tf_rm_resc_req_entry type. Size of the
- * array buffer is provided by the 'req_size' field in this
- * message.
- */
- uint64_t req_addr;
+ #define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_TX
+ /* unused. */
+ uint8_t unused0[2];
/*
- * This is the DMA address for the resc output data array
- * buffer. Array is of tf_rm_resc_entry type. Size of the array
- * buffer is provided by the 'req_size' field in this
- * message.
+ * Type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
*/
- uint64_t resc_addr;
+ uint32_t type;
+ /* Index of the type to set. */
+ uint32_t index;
+ /* Size of the data to set. */
+ uint16_t size;
+ /* unused */
+ uint8_t unused1[6];
+ /* Data to be set. */
+ uint8_t data[88];
} __rte_packed;
-/* hwrm_tf_session_resc_alloc_output (size:128b/16B) */
-struct hwrm_tf_session_resc_alloc_output {
+/* hwrm_tf_if_tbl_set_output (size:128b/16B) */
+struct hwrm_tf_if_tbl_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -52976,33 +59011,26 @@ struct hwrm_tf_session_resc_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Size of the returned tf_rm_resc_entry data array. The value
- * cannot exceed the req_size defined by the input msg. The data
- * array is returned using the resv_addr specified DMA
- * address also provided by the input msg.
- */
- uint16_t size;
/* unused. */
- uint8_t unused0[5];
+ uint8_t unused0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
/*****************************
- * hwrm_tf_session_resc_free *
+ * hwrm_tf_tbl_type_bulk_get *
*****************************/
-/* hwrm_tf_session_resc_free_input (size:256b/32B) */
-struct hwrm_tf_session_resc_free_input {
+/* hwrm_tf_tbl_type_bulk_get_input (size:384b/48B) */
+struct hwrm_tf_tbl_type_bulk_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53036,29 +59064,41 @@ struct hwrm_tf_session_resc_free_input {
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR \
+ UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_RX \
+ UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_TX
+ #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_TX \
+ UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_TX
/*
- * Defines the size, in bytes, of the provided free_addr
- * buffer.
+ * When set use the special access register access to clear
+ * the table entries on read.
*/
- uint16_t free_size;
+ #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_CLEAR_ON_READ \
+ UINT32_C(0x2)
+ /* unused. */
+ uint8_t unused0[2];
/*
- * This is the DMA address for the free input data array
- * buffer. Array is of tf_rm_resc_entry type. Size of the
- * buffer is provided by the 'free_size' field of this
- * message.
+ * Type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
*/
- uint64_t free_addr;
+ uint32_t type;
+ /* Starting index of the type to retrieve. */
+ uint32_t start_index;
+ /* Number of entries to retrieve. */
+ uint32_t num_entries;
+ /* Number of entries to retrieve. */
+ uint32_t unused1;
+ /* Host memory where data will be stored. */
+ uint64_t host_addr;
} __rte_packed;
-/* hwrm_tf_session_resc_free_output (size:128b/16B) */
-struct hwrm_tf_session_resc_free_output {
+/* hwrm_tf_tbl_type_bulk_get_output (size:128b/16B) */
+struct hwrm_tf_tbl_type_bulk_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -53067,26 +59107,30 @@ struct hwrm_tf_session_resc_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
+ /* Response code. */
+ uint32_t resp_code;
+ /* Response size. */
+ uint16_t size;
+ /* unused */
+ uint8_t unused0;
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/******************************
- * hwrm_tf_session_resc_flush *
- ******************************/
+/***********************************
+ * hwrm_tf_session_hotup_state_set *
+ ***********************************/
-/* hwrm_tf_session_resc_flush_input (size:256b/32B) */
-struct hwrm_tf_session_resc_flush_input {
+/* hwrm_tf_session_hotup_state_set_input (size:192b/24B) */
+struct hwrm_tf_session_hotup_state_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53117,32 +59161,25 @@ struct hwrm_tf_session_resc_flush_input {
uint64_t resp_addr;
/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
uint32_t fw_session_id;
+ /* Shared session state. */
+ uint16_t state;
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR \
+ UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR_RX \
+ UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_SESSION_RESC_FLUSH_INPUT_FLAGS_DIR_TX
- /*
- * Defines the size, in bytes, of the provided flush_addr
- * buffer.
- */
- uint16_t flush_size;
- /*
- * This is the DMA address for the flush input data array
- * buffer. Array of tf_rm_resc_entry type. Size of the
- * buffer is provided by the 'flush_size' field in this
- * message.
- */
- uint64_t flush_addr;
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR_TX \
+ UINT32_C(0x1)
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR_TX
} __rte_packed;
-/* hwrm_tf_session_resc_flush_output (size:128b/16B) */
-struct hwrm_tf_session_resc_flush_output {
+/* hwrm_tf_session_hotup_state_set_output (size:128b/16B) */
+struct hwrm_tf_session_hotup_state_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -53158,19 +59195,19 @@ struct hwrm_tf_session_resc_flush_output {
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/*****************************
- * hwrm_tf_session_resc_info *
- *****************************/
+/***********************************
+ * hwrm_tf_session_hotup_state_get *
+ ***********************************/
-/* hwrm_tf_session_resc_info_input (size:320b/40B) */
-struct hwrm_tf_session_resc_info_input {
+/* hwrm_tf_session_hotup_state_get_input (size:192b/24B) */
+struct hwrm_tf_session_hotup_state_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53204,37 +59241,22 @@ struct hwrm_tf_session_resc_info_input {
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR \
+ UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR_RX \
+ UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_SESSION_RESC_INFO_INPUT_FLAGS_DIR_TX
- /*
- * Defines the array size of the provided req_addr and
- * resv_addr array buffers. Should be set to the number of
- * request entries.
- */
- uint16_t req_size;
- /*
- * This is the DMA address for the request input data array
- * buffer. Array is of tf_rm_resc_req_entry type. Size of the
- * array buffer is provided by the 'req_size' field in this
- * message.
- */
- uint64_t req_addr;
- /*
- * This is the DMA address for the resc output data array
- * buffer. Array is of tf_rm_resc_entry type. Size of the array
- * buffer is provided by the 'req_size' field in this
- * message.
- */
- uint64_t resc_addr;
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR_TX \
+ UINT32_C(0x1)
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR_TX
+ /* unused. */
+ uint8_t unused0[2];
} __rte_packed;
-/* hwrm_tf_session_resc_info_output (size:128b/16B) */
-struct hwrm_tf_session_resc_info_output {
+/* hwrm_tf_session_hotup_state_get_output (size:128b/16B) */
+struct hwrm_tf_session_hotup_state_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -53243,55 +59265,30 @@ struct hwrm_tf_session_resc_info_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Size of the returned tf_rm_resc_entry data array. The value
- * cannot exceed the req_size defined by the input msg. The data
- * array is returned using the resv_addr specified DMA
- * address also provided by the input msg.
- */
- uint16_t size;
- /* unused. */
- uint8_t unused0[5];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
- */
- uint8_t valid;
-} __rte_packed;
-
-/* TruFlow RM capability of a resource. */
-/* tf_rm_resc_req_entry (size:64b/8B) */
-struct tf_rm_resc_req_entry {
- /* Type of the resource, defined globally in HCAPI RM. */
- uint32_t type;
- /* Minimum value. */
- uint16_t min;
- /* Maximum value. */
- uint16_t max;
-} __rte_packed;
-
-/* TruFlow RM reservation information. */
-/* tf_rm_resc_entry (size:64b/8B) */
-struct tf_rm_resc_entry {
- /* Type of the resource, defined globally in HCAPI RM. */
- uint32_t type;
- /* Start offset. */
- uint16_t start;
- /* Number of resources. */
- uint16_t stride;
+ /* Shared session HA state. */
+ uint16_t state;
+ /* Shared session HA reference count. */
+ uint16_t ref_cnt;
+ /* unused. */
+ uint8_t unused0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
} __rte_packed;
-/************************
- * hwrm_tf_tbl_type_get *
- ************************/
+/**************************
+ * hwrm_tf_resc_usage_set *
+ **************************/
-/* hwrm_tf_tbl_type_get_input (size:256b/32B) */
-struct hwrm_tf_tbl_type_get_input {
+/* hwrm_tf_resc_usage_set_input (size:1024b/128B) */
+struct hwrm_tf_resc_usage_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53325,35 +59322,51 @@ struct hwrm_tf_tbl_type_get_input {
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR \
- UINT32_C(0x1)
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX \
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_TX
+ /* Indicate table data is being sent via DMA. */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ /* Types of the resource to set their usage state. */
+ uint16_t types;
+ /* WC TCAM Pool */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_WC_TCAM \
UINT32_C(0x1)
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX
- /*
- * When set use the special access register access to clear
- * the table entry on read.
- */
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_CLEAR_ON_READ \
+ /* EM Internal Memory Pool */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_EM \
UINT32_C(0x2)
- /* unused. */
- uint8_t unused0[2];
- /*
- * Type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
- */
- uint32_t type;
- /* Index of the type to retrieve. */
- uint32_t index;
+ /* Meter Instance */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_METER \
+ UINT32_C(0x4)
+ /* Counter Record Table */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_COUNTER \
+ UINT32_C(0x8)
+ /* Action Record Table */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ACTION \
+ UINT32_C(0x10)
+ /* ACT MODIFY/ENCAP Record Table */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ACT_MOD_ENCAP \
+ UINT32_C(0x20)
+ /* Source Property SMAC Record Table */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_SP_SMAC \
+ UINT32_C(0x40)
+ /* All Resource Types */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ALL \
+ UINT32_C(0x80)
+ /* Size of the data to set. */
+ uint16_t size;
+ /* unused */
+ uint8_t unused1[6];
+ /* Data to be set. */
+ uint8_t data[96];
} __rte_packed;
-/* hwrm_tf_tbl_type_get_output (size:2240b/280B) */
-struct hwrm_tf_tbl_type_get_output {
+/* hwrm_tf_resc_usage_set_output (size:128b/16B) */
+struct hwrm_tf_resc_usage_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -53362,16 +59375,8 @@ struct hwrm_tf_tbl_type_get_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Response code. */
- uint32_t resp_code;
- /* Response size. */
- uint16_t size;
- /* unused */
- uint16_t unused0;
- /* Response data. */
- uint8_t data[256];
- /* unused */
- uint8_t unused1[7];
+ /* unused. */
+ uint8_t unused0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -53383,13 +59388,13 @@ struct hwrm_tf_tbl_type_get_output {
uint8_t valid;
} __rte_packed;
-/************************
- * hwrm_tf_tbl_type_set *
- ************************/
+/****************************
+ * hwrm_tf_resc_usage_query *
+ ****************************/
-/* hwrm_tf_tbl_type_set_input (size:1024b/128B) */
-struct hwrm_tf_tbl_type_set_input {
+/* hwrm_tf_resc_usage_query_input (size:256b/32B) */
+struct hwrm_tf_resc_usage_query_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53423,34 +59428,47 @@ struct hwrm_tf_tbl_type_set_input {
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX
- /* Indicate table data is being sent via DMA. */
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_LAST \
+ HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX
/* unused. */
uint8_t unused0[2];
- /*
- * Type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
- */
- uint32_t type;
- /* Index of the type to retrieve. */
- uint32_t index;
- /* Size of the data to set. */
- uint16_t size;
+ /* Types of the resource to retrieve their usage state. */
+ uint16_t types;
+ /* WC TCAM Pool */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_WC_TCAM \
+ UINT32_C(0x1)
+ /* EM Internal Memory Pool */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_EM \
+ UINT32_C(0x2)
+ /* Meter Instance */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_METER \
+ UINT32_C(0x4)
+ /* Counter Record Table */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_COUNTER \
+ UINT32_C(0x8)
+ /* Action Record Table */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ACTION \
+ UINT32_C(0x10)
+ /* ACT MODIFY/ENCAP Record Table */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ACT_MOD_ENCAP \
+ UINT32_C(0x20)
+ /* Source Property SMAC Record Table */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_SP_SMAC \
+ UINT32_C(0x40)
+ /* All Resource Types */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ALL \
+ UINT32_C(0x80)
/* unused */
uint8_t unused1[6];
- /* Data to be set. */
- uint8_t data[88];
} __rte_packed;
-/* hwrm_tf_tbl_type_set_output (size:128b/16B) */
-struct hwrm_tf_tbl_type_set_output {
+/* hwrm_tf_resc_usage_query_output (size:960b/120B) */
+struct hwrm_tf_resc_usage_query_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -53459,8 +59477,16 @@ struct hwrm_tf_tbl_type_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
+ /* Response code. */
+ uint32_t resp_code;
+ /* Response size. */
+ uint16_t size;
+ /* unused */
+ uint16_t unused0;
+ /* Response data. */
+ uint8_t data[96];
+ /* unused */
+ uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -53472,13 +59498,17 @@ struct hwrm_tf_tbl_type_set_output {
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_tf_ctxt_mem_alloc *
- **************************/
+/****************************
+ * hwrm_tfc_tbl_scope_qcaps *
+ ****************************/
-/* hwrm_tf_ctxt_mem_alloc_input (size:192b/24B) */
-struct hwrm_tf_ctxt_mem_alloc_input {
+/*
+ * TruFlow command to check if firmware is capable of
+ * supporting table scopes.
+ */
+/* hwrm_tfc_tbl_scope_qcaps_input (size:128b/16B) */
+struct hwrm_tfc_tbl_scope_qcaps_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53507,14 +59537,10 @@ struct hwrm_tf_ctxt_mem_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Size in KB of memory to be allocated. */
- uint32_t mem_size;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
} __rte_packed;
-/* hwrm_tf_ctxt_mem_alloc_output (size:192b/24B) */
-struct hwrm_tf_ctxt_mem_alloc_output {
+/* hwrm_tfc_tbl_scope_qcaps_output (size:192b/24B) */
+struct hwrm_tfc_tbl_scope_qcaps_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -53523,69 +59549,52 @@ struct hwrm_tf_ctxt_mem_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Pointer to the PBL, or PDL depending on number of levels */
- uint64_t page_dir;
- /* Size of memory allocated. */
- uint32_t mem_size;
- /* Counter PBL indirect levels. */
- uint8_t page_level;
- /* PBL pointer is physical start address. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
- /* PBL pointer points to PTE table. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
/*
- * PBL pointer points to PDE table with each entry pointing
- * to PTE tables.
+ * The maximum number of lookup records that a table scope can support.
+ * This field is only valid if tbl_scope_capable is not zero.
*/
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LAST \
- HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LVL_2
- /* Page size. */
- uint8_t page_size;
- /* 4KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_4K UINT32_C(0x0)
- /* 8KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_8K UINT32_C(0x1)
- /* 64KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_64K UINT32_C(0x4)
- /* 128KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_128K UINT32_C(0x5)
- /* 256KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_256K UINT32_C(0x6)
- /* 512KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_512K UINT32_C(0x7)
- /* 1MB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_1M UINT32_C(0x8)
- /* 2MB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_2M UINT32_C(0x9)
- /* 4MB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_4M UINT32_C(0xa)
- /* 8MB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_8M UINT32_C(0xb)
- /* 1GB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_1G UINT32_C(0x12)
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_LAST \
- HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_1G
+ uint32_t max_lkup_rec_cnt;
+ /*
+ * The maximum number of action records that a table scope can support.
+ * This field is only valid if tbl_scope_capable is not zero.
+ */
+ uint32_t max_act_rec_cnt;
+ /* Not zero if firmware capable of table scopes. */
+ uint8_t tbl_scope_capable;
+ /*
+ * log2 of the number of lookup static buckets that a table scope can
+ * support. This field is only valid if tbl_scope_capable is not zero.
+ */
+ uint8_t max_lkup_static_buckets_exp;
/* unused. */
- uint8_t unused0;
+ uint8_t unused0[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/*************************
- * hwrm_tf_ctxt_mem_free *
- *************************/
+/*******************************
+ * hwrm_tfc_tbl_scope_id_alloc *
+ *******************************/
-/* hwrm_tf_ctxt_mem_free_input (size:320b/40B) */
-struct hwrm_tf_ctxt_mem_free_input {
+/*
+ * TruFlow command to allocate a table scope ID and create the pools.
+ *
+ * There is no corresponding free command since a table scope
+ * ID will automatically be freed once the last FID is removed.
+ * That is, when the hwrm_tfc_tbl_scope_fid_rem command returns
+ * a fid_cnt of 0 that also means that the table scope ID has
+ * been freed.
+ */
+/* hwrm_tfc_tbl_scope_id_alloc_input (size:256b/32B) */
+struct hwrm_tfc_tbl_scope_id_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53614,59 +59623,41 @@ struct hwrm_tf_ctxt_mem_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* Counter PBL indirect levels. */
- uint8_t page_level;
- /* PBL pointer is physical start address. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
- /* PBL pointer points to PTE table. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
/*
- * PBL pointer points to PDE table with each entry pointing
- * to PTE tables.
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
*/
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LAST \
- HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LVL_2
- /* Page size. */
- uint8_t page_size;
- /* 4KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_4K UINT32_C(0x0)
- /* 8KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_8K UINT32_C(0x1)
- /* 64KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_64K UINT32_C(0x4)
- /* 128KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_128K UINT32_C(0x5)
- /* 256KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_256K UINT32_C(0x6)
- /* 512KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_512K UINT32_C(0x7)
- /* 1MB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_1M UINT32_C(0x8)
- /* 2MB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_2M UINT32_C(0x9)
- /* 4MB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_4M UINT32_C(0xa)
- /* 8MB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_8M UINT32_C(0xb)
- /* 1GB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_1G UINT32_C(0x12)
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_LAST \
- HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_1G
- /* unused. */
- uint8_t unused0[2];
- /* Pointer to the PBL, or PDL depending on number of levels */
- uint64_t page_dir;
- /* Size of memory allocated. */
- uint32_t mem_size;
+ uint16_t fid;
+ /* The maximum number of pools for this table scope. */
+ uint16_t max_pools;
+ /* Non-zero if this table scope is shared. */
+ uint8_t shared;
+ /*
+ * The size of the lookup pools per direction expressed as
+ * log2(max_records/max_pools). That is, size=2^exp.
+ *
+ * Array is indexed by enum cfa_dir.
+ */
+ uint8_t lkup_pool_sz_exp[2];
+ /*
+ * The size of the action pools per direction expressed as
+ * log2(max_records/max_pools). That is, size=2^exp.
+ *
+ * Array is indexed by enum cfa_dir.
+ */
+ uint8_t act_pool_sz_exp[2];
+ /* Application type. 0 (AFM), 1 (TF) */
+ uint8_t app_type;
/* unused. */
- uint8_t unused1[4];
+ uint8_t unused0[6];
} __rte_packed;
-/* hwrm_tf_ctxt_mem_free_output (size:128b/16B) */
-struct hwrm_tf_ctxt_mem_free_output {
+/* hwrm_tfc_tbl_scope_id_alloc_output (size:128b/16B) */
+struct hwrm_tfc_tbl_scope_id_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -53675,26 +59666,34 @@ struct hwrm_tf_ctxt_mem_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /* The table scope ID that was allocated. */
+ uint8_t tsid;
+ /*
+ * Non-zero if this is the first FID associated with this table scope
+ * ID.
+ */
+ uint8_t first;
/* unused. */
- uint8_t unused0[7];
+ uint8_t unused0[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/*************************
- * hwrm_tf_ctxt_mem_rgtr *
- *************************/
+/*****************************
+ * hwrm_tfc_tbl_scope_config *
+ *****************************/
-/* hwrm_tf_ctxt_mem_rgtr_input (size:256b/32B) */
-struct hwrm_tf_ctxt_mem_rgtr_input {
+/* TruFlow command to configure the table scope memory. */
+/* hwrm_tfc_tbl_scope_config_input (size:704b/88B) */
+struct hwrm_tfc_tbl_scope_config_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53723,55 +59722,58 @@ struct hwrm_tf_ctxt_mem_rgtr_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint16_t flags;
- /* Counter PBL indirect levels. */
- uint8_t page_level;
- /* PBL pointer is physical start address. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
- /* PBL pointer points to PTE table. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
/*
- * PBL pointer points to PDE table with each entry pointing
- * to PTE tables.
+ * The base addresses for lookup memory.
+ * Array is indexed by enum cfa_dir.
*/
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LAST \
- HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2
- /* Page size. */
- uint8_t page_size;
- /* 4KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4K UINT32_C(0x0)
- /* 8KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_8K UINT32_C(0x1)
- /* 64KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_64K UINT32_C(0x4)
- /* 128KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_128K UINT32_C(0x5)
- /* 256KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_256K UINT32_C(0x6)
- /* 512KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_512K UINT32_C(0x7)
- /* 1MB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1M UINT32_C(0x8)
- /* 2MB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_2M UINT32_C(0x9)
- /* 4MB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4M UINT32_C(0xa)
- /* 8MB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_8M UINT32_C(0xb)
- /* 1GB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1G UINT32_C(0x12)
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_LAST \
- HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1G
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* Pointer to the PBL, or PDL depending on number of levels */
- uint64_t page_dir;
+ uint64_t lkup_base_addr[2];
+ /*
+ * The base addresses for action memory.
+ * Array is indexed by enum cfa_dir.
+ */
+ uint64_t act_base_addr[2];
+ /*
+ * The number of minimum sized lkup records per direction.
+ * In this usage, records are the minimum lookup memory
+ * allocation unit in a table scope. This value is the total
+ * memory required for buckets and entries.
+ *
+ * Array is indexed by enum cfa_dir.
+ */
+ uint32_t lkup_rec_cnt[2];
+ /*
+ * The number of minimum sized action records per direction.
+ * Similar to the lkup_rec_cnt, records are the minimum
+ * action memory allocation unit in a table scope.
+ *
+ * Array is indexed by enum cfa_dir.
+ */
+ uint32_t act_rec_cnt[2];
+ /*
+ * The number of static lookup buckets in the table scope.
+ * Array is indexed by enum cfa_dir.
+ */
+ uint32_t lkup_static_bucket_cnt[2];
+ /* The page size of the table scope. */
+ uint32_t pbl_page_sz;
+ /*
+ * The PBL level for lookup memory.
+ * Array is indexed by enum cfa_dir.
+ */
+ uint8_t lkup_pbl_level[2];
+ /*
+ * The PBL level for action memory.
+ * Array is indexed by enum cfa_dir.
+ */
+ uint8_t act_pbl_level[2];
+ /* The table scope ID. */
+ uint8_t tsid;
+ /* unused. */
+ uint8_t unused0[7];
} __rte_packed;
-/* hwrm_tf_ctxt_mem_rgtr_output (size:128b/16B) */
-struct hwrm_tf_ctxt_mem_rgtr_output {
+/* hwrm_tfc_tbl_scope_config_output (size:128b/16B) */
+struct hwrm_tfc_tbl_scope_config_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -53780,31 +59782,27 @@ struct hwrm_tf_ctxt_mem_rgtr_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Id/Handle to the recently register context memory. This
- * handle is passed to the TF session.
- */
- uint16_t ctx_id;
/* unused. */
- uint8_t unused0[5];
+ uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_tf_ctxt_mem_unrgtr *
- ***************************/
+/*******************************
+ * hwrm_tfc_tbl_scope_deconfig *
+ *******************************/
-/* hwrm_tf_ctxt_mem_unrgtr_input (size:192b/24B) */
-struct hwrm_tf_ctxt_mem_unrgtr_input {
+/* TruFlow command to deconfigure the table scope memory. */
+/* hwrm_tfc_tbl_scope_deconfig_input (size:192b/24B) */
+struct hwrm_tfc_tbl_scope_deconfig_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53833,19 +59831,14 @@ struct hwrm_tf_ctxt_mem_unrgtr_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * Id/Handle to the recently register context memory. This
- * handle is passed to the TF session.
- */
- uint16_t ctx_id;
+ /* The table scope ID. */
+ uint8_t tsid;
/* unused. */
- uint8_t unused0[2];
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
+ uint8_t unused0[7];
} __rte_packed;
-/* hwrm_tf_ctxt_mem_unrgtr_output (size:128b/16B) */
-struct hwrm_tf_ctxt_mem_unrgtr_output {
+/* hwrm_tfc_tbl_scope_deconfig_output (size:128b/16B) */
+struct hwrm_tfc_tbl_scope_deconfig_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -53857,23 +59850,24 @@ struct hwrm_tf_ctxt_mem_unrgtr_output {
/* unused. */
uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/************************
- * hwrm_tf_ext_em_qcaps *
- ************************/
+/******************************
+ * hwrm_tfc_tbl_scope_fid_add *
+ ******************************/
-/* hwrm_tf_ext_em_qcaps_input (size:192b/24B) */
-struct hwrm_tf_ext_em_qcaps_input {
+/* TruFlow command to add a FID to a table scope. */
+/* hwrm_tfc_tbl_scope_fid_add_input (size:192b/24B) */
+struct hwrm_tfc_tbl_scope_fid_add_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -53902,158 +59896,54 @@ struct hwrm_tf_ext_em_qcaps_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint32_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR \
- UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX \
- UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX
- /* When set to 1, all offloaded flows will be sent to EXT EM. */
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD \
- UINT32_C(0x2)
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
-} __rte_packed;
-
-/* hwrm_tf_ext_em_qcaps_output (size:384b/48B) */
-struct hwrm_tf_ext_em_qcaps_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint32_t flags;
- /*
- * When set to 1, indicates the FW supports the Centralized
- * Memory Model. The concept designates one entity for the
- * memory allocation while all others ‘subscribe’ to it.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED \
- UINT32_C(0x1)
/*
- * When set to 1, indicates the FW supports the Detached
- * Centralized Memory Model. The memory is allocated and managed
- * as a separate entity. All PFs and VFs will be granted direct
- * or semi-direct access to the allocated memory while none of
- * which can interfere with the management of the memory.
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
*/
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED \
- UINT32_C(0x2)
- /* When set to 1, indicates FW support for host based EEM memory. */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_HOST_MEMORY_SUPPORTED \
- UINT32_C(0x4)
- /* When set to 1, indicates FW support for on-chip based EEM memory. */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_FW_MEMORY_SUPPORTED \
- UINT32_C(0x8)
+ uint16_t fid;
+ /* The table scope ID. */
+ uint8_t tsid;
/* unused. */
- uint32_t unused0;
- /* Support flags. */
- uint32_t supported;
- /*
- * If set to 1, then EXT EM KEY0 table is supported using
- * crc32 hash.
- * If set to 0, EXT EM KEY0 table is not supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_KEY0_TABLE \
- UINT32_C(0x1)
- /*
- * If set to 1, then EXT EM KEY1 table is supported using
- * lookup3 hash.
- * If set to 0, EXT EM KEY1 table is not supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_KEY1_TABLE \
- UINT32_C(0x2)
- /*
- * If set to 1, then EXT EM External Record table is supported.
- * If set to 0, EXT EM External Record table is not
- * supported. (This table includes action record, EFC
- * pointers, encap pointers)
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_RECORD_TABLE \
- UINT32_C(0x4)
- /*
- * If set to 1, then EXT EM External Flow Counters table is
- * supported.
- * If set to 0, EXT EM External Flow Counters table is not
- * supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE \
- UINT32_C(0x8)
- /*
- * If set to 1, then FID table used for implicit flow flush
- * is supported.
- * If set to 0, then FID table used for implicit flow flush
- * is not supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_FID_TABLE \
- UINT32_C(0x10)
- /*
- * If set to 1, then table scopes are supported.
- * If set to 0, then table scopes are not supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_TBL_SCOPES \
- UINT32_C(0x20)
- /*
- * The maximum number of entries supported by EXT EM. When
- * configuring the host memory the number of numbers of
- * entries that can supported are -
- * 32k, 64k 128k, 256k, 512k, 1M, 2M, 4M, 8M, 32M, 64M,
- * 128M entries.
- * Any value that are not these values, the FW will round
- * down to the closest support number of entries.
- */
- uint32_t max_entries_supported;
- /*
- * The entry size in bytes of each entry in the EXT EM
- * KEY0/KEY1 tables.
- */
- uint16_t key_entry_size;
- /*
- * The entry size in bytes of each entry in the EXT EM RECORD
- * tables.
- */
- uint16_t record_entry_size;
- /* The entry size in bytes of each entry in the EXT EM EFC tables. */
- uint16_t efc_entry_size;
- /* The FID size in bytes of each entry in the EXT EM FID tables. */
- uint16_t fid_entry_size;
- /* Maximum number of ctxt mem allocations allowed. */
- uint32_t max_ctxt_mem_allocs;
- /*
- * Maximum number of static buckets that can be assigned to lookup
- * table scopes.
- */
- uint32_t max_static_buckets;
+ uint8_t unused0[5];
+} __rte_packed;
+
+/* hwrm_tfc_tbl_scope_fid_add_output (size:128b/16B) */
+struct hwrm_tfc_tbl_scope_fid_add_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The number of FIDs currently in the table scope ID. */
+ uint8_t fid_cnt;
/* unused. */
- uint8_t unused1[7];
+ uint8_t unused0[6];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_tf_ext_em_op *
- *********************/
+/******************************
+ * hwrm_tfc_tbl_scope_fid_rem *
+ ******************************/
-/* hwrm_tf_ext_em_op_input (size:256b/32B) */
-struct hwrm_tf_ext_em_op_input {
+/* TruFlow command to remove a FID from a table scope. */
+/* hwrm_tfc_tbl_scope_fid_rem_input (size:192b/24B) */
+struct hwrm_tfc_tbl_scope_fid_rem_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -54082,55 +59972,23 @@ struct hwrm_tf_ext_em_op_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint16_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_TX
- /* unused. */
- uint16_t unused0;
- /* The number of EXT EM key table entries to be configured. */
- uint16_t op;
- /* This value is reserved and should not be used. */
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_RESERVED UINT32_C(0x0)
- /*
- * To properly stop EXT EM and ensure there are no DMA's,
- * the caller must disable EXT EM for the given PF, using
- * this call. This will safely disable EXT EM and ensure
- * that all DMA'ed to the keys/records/efc have been
- * completed.
- */
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE UINT32_C(0x1)
/*
- * Once the EXT EM host memory has been configured, EXT EM
- * options have been configured. Then the caller should
- * enable EXT EM for the given PF. Note once this call has
- * been made, then the EXT EM mechanism will be active and
- * DMA's will occur as packets are processed.
- */
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE UINT32_C(0x2)
- /*
- * Clear EXT EM settings for the given PF so that the
- * register values are reset back to their initial state.
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
*/
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_CLEANUP UINT32_C(0x3)
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_LAST \
- HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_CLEANUP
- /* unused. */
- uint16_t unused1;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
+ uint16_t fid;
+ /* The table scope ID. */
+ uint8_t tsid;
/* unused. */
- uint32_t unused2;
+ uint8_t unused0[5];
} __rte_packed;
-/* hwrm_tf_ext_em_op_output (size:128b/16B) */
-struct hwrm_tf_ext_em_op_output {
+/* hwrm_tfc_tbl_scope_fid_rem_output (size:128b/16B) */
+struct hwrm_tfc_tbl_scope_fid_rem_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -54139,26 +59997,35 @@ struct hwrm_tf_ext_em_op_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /* The number of FIDs remaining in the table scope ID. */
+ uint16_t fid_cnt;
/* unused. */
- uint8_t unused0[7];
+ uint8_t unused0[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/**********************
- * hwrm_tf_ext_em_cfg *
- **********************/
+/*****************************
+ * hwrm_tfc_session_id_alloc *
+ *****************************/
-/* hwrm_tf_ext_em_cfg_input (size:512b/64B) */
-struct hwrm_tf_ext_em_cfg_input {
+/*
+ * Allocate a TFC session. Requests the firmware to allocate a TFC
+ * session identifier and associate a forwarding function with the
+ * session. Though there's not an explicit matching free for a session
+ * id alloc, dis-associating the last fid from a session id (fid_cnt goes
+ * to 0), will result in this session id being freed automatically.
+ */
+/* hwrm_tfc_session_id_alloc_input (size:192b/24B) */
+struct hwrm_tfc_session_id_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -54187,157 +60054,21 @@ struct hwrm_tf_ext_em_cfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint32_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR \
- UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX \
- UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX
- /* When set to 1, all offloaded flows will be sent to EXT EM. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_PREFERRED_OFFLOAD \
- UINT32_C(0x2)
- /* When set to 1, secondary, 0 means primary. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_SECONDARY_PF \
- UINT32_C(0x4)
- /*
- * Group_id which used by Firmware to identify memory pools belonging
- * to certain group.
- */
- uint16_t group_id;
- /*
- * Dynamically reconfigure EEM pending cache every 1/10th of second.
- * If set to 0 it will disable the EEM HW flush of the pending cache.
- */
- uint8_t flush_interval;
- /* unused. */
- uint8_t unused0;
- /*
- * Configured EXT EM with the given number of entries. All
- * the EXT EM tables KEY0, KEY1, RECORD, EFC all have the
- * same number of entries and all tables will be configured
- * using this value. Current minimum value is 32k. Current
- * maximum value is 128M.
- */
- uint32_t num_entries;
- uint32_t enables;
- /*
- * This bit must be '1' for the group_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_GROUP_ID \
- UINT32_C(0x1)
- /*
- * This bit must be '1' for the flush_interval field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_FLUSH_INTERVAL \
- UINT32_C(0x2)
- /*
- * This bit must be '1' for the num_entries field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_NUM_ENTRIES \
- UINT32_C(0x4)
- /*
- * This bit must be '1' for the key0_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_KEY0_CTX_ID \
- UINT32_C(0x8)
/*
- * This bit must be '1' for the key1_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_KEY1_CTX_ID \
- UINT32_C(0x10)
- /*
- * This bit must be '1' for the record_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_RECORD_CTX_ID \
- UINT32_C(0x20)
- /*
- * This bit must be '1' for the efc_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_EFC_CTX_ID \
- UINT32_C(0x40)
- /*
- * This bit must be '1' for the fid_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_FID_CTX_ID \
- UINT32_C(0x80)
- /*
- * This bit must be '1' for the action_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_ACTION_CTX_ID \
- UINT32_C(0x100)
- /*
- * This bit must be '1' for the action_tbl_scope field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_ACTION_TBL_SCOPE \
- UINT32_C(0x200)
- /*
- * This bit must be '1' for the lkup_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_LKUP_CTX_ID \
- UINT32_C(0x400)
- /*
- * This bit must be '1' for the lkup_tbl_scope field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_LKUP_TBL_SCOPE \
- UINT32_C(0x800)
- /*
- * This bit must be '1' for the lkup_static_buckets field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_LKUP_STATIC_BUCKETS \
- UINT32_C(0x1000)
- /* Configured EXT EM with the given context if for KEY0 table. */
- uint16_t key0_ctx_id;
- /* Configured EXT EM with the given context if for KEY1 table. */
- uint16_t key1_ctx_id;
- /* Configured EXT EM with the given context if for RECORD table. */
- uint16_t record_ctx_id;
- /* Configured EXT EM with the given context if for EFC table. */
- uint16_t efc_ctx_id;
- /* Configured EXT EM with the given context if for EFC table. */
- uint16_t fid_ctx_id;
- /* Context id of action table scope. */
- uint16_t action_ctx_id;
- /* Table scope id used for action record entries. */
- uint16_t action_tbl_scope;
- /* Context id of lookup table scope. */
- uint16_t lkup_ctx_id;
- /* Table scope id used for EM lookup entries. */
- uint16_t lkup_tbl_scope;
- /* unused. */
- uint16_t unused1;
- /*
- * Number of 32B static buckets to be allocated at the beginning
- * of table scope.
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
*/
- uint32_t lkup_static_buckets;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* unused. */
- uint32_t unused2;
+ uint16_t fid;
+ /* Unused field */
+ uint8_t unused0[6];
} __rte_packed;
-/* hwrm_tf_ext_em_cfg_output (size:128b/16B) */
-struct hwrm_tf_ext_em_cfg_output {
+/* hwrm_tfc_session_id_alloc_output (size:128b/16B) */
+struct hwrm_tfc_session_id_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -54346,26 +60077,35 @@ struct hwrm_tf_ext_em_cfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * Unique session identifier for the session created by the
+ * firmware.
+ */
+ uint16_t sid;
+ /* Unused field */
+ uint8_t unused0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
*/
uint8_t valid;
} __rte_packed;
-/***********************
- * hwrm_tf_ext_em_qcfg *
- ***********************/
+/****************************
+ * hwrm_tfc_session_fid_add *
+ ****************************/
-/* hwrm_tf_ext_em_qcfg_input (size:192b/24B) */
-struct hwrm_tf_ext_em_qcfg_input {
+/*
+ * Associate a TFC session id with a forwarding function. The target_fid
+ * will be associated with the passed in sid.
+ */
+/* hwrm_tfc_session_fid_add_input (size:192b/24B) */
+struct hwrm_tfc_session_fid_add_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -54394,22 +60134,26 @@ struct hwrm_tf_ext_em_qcfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint32_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_TX
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Unique session identifier for the session created by the
+ * firmware.
+ */
+ uint16_t sid;
+ /* Unused field */
+ uint8_t unused0[4];
} __rte_packed;
-/* hwrm_tf_ext_em_qcfg_output (size:448b/56B) */
-struct hwrm_tf_ext_em_qcfg_output {
+/* hwrm_tfc_session_fid_add_output (size:128b/16B) */
+struct hwrm_tfc_session_fid_add_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -54418,118 +60162,35 @@ struct hwrm_tf_ext_em_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Control flags. */
- uint32_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR \
- UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_TX \
- UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_LAST \
- HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_TX
- /* When set to 1, all offloaded flows will be sent to EXT EM. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_PREFERRED_OFFLOAD \
- UINT32_C(0x2)
- /* The number of entries the FW has configured for EXT EM. */
- uint32_t num_entries;
- /* Configured EXT EM with the given context if for KEY0 table. */
- uint16_t key0_ctx_id;
- /* Configured EXT EM with the given context if for KEY1 table. */
- uint16_t key1_ctx_id;
- /* Configured EXT EM with the given context if for RECORD table. */
- uint16_t record_ctx_id;
- /* Configured EXT EM with the given context if for EFC table. */
- uint16_t efc_ctx_id;
- /* Configured EXT EM with the given context if for EFC table. */
- uint16_t fid_ctx_id;
- /* unused. */
- uint16_t unused0;
- uint32_t supported;
- /* This bit must be '1' for the group_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_GROUP_ID \
- UINT32_C(0x1)
- /* This bit must be '1' for the flush_interval field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_FLUSH_INTERVAL \
- UINT32_C(0x2)
- /* This bit must be '1' for the num_entries field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_NUM_ENTRIES \
- UINT32_C(0x4)
- /* This bit must be '1' for the key0_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_KEY0_CTX_ID \
- UINT32_C(0x8)
- /* This bit must be '1' for the key1_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_KEY1_CTX_ID \
- UINT32_C(0x10)
- /* This bit must be '1' for the record_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_RECORD_CTX_ID \
- UINT32_C(0x20)
- /* This bit must be '1' for the efc_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_EFC_CTX_ID \
- UINT32_C(0x40)
- /* This bit must be '1' for the fid_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_FID_CTX_ID \
- UINT32_C(0x80)
- /* This bit must be '1' for the action_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_ACTION_CTX_ID \
- UINT32_C(0x100)
- /* This bit must be '1' for the action_tbl_scope field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_ACTION_TBL_SCOPE \
- UINT32_C(0x200)
- /* This bit must be '1' for the lkup_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_LKUP_CTX_ID \
- UINT32_C(0x400)
- /* This bit must be '1' for the lkup_tbl_scope field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_LKUP_TBL_SCOPE \
- UINT32_C(0x800)
- /* This bit must be '1' for the lkup_static_buckets field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_LKUP_STATIC_BUCKETS \
- UINT32_C(0x1000)
- /*
- * Group id is used by firmware to identify memory pools belonging
- * to certain group.
- */
- uint16_t group_id;
- /* EEM pending cache flush interval in 1/10th of second. */
- uint8_t flush_interval;
- /* unused. */
- uint8_t unused1;
- /* Context id of action table scope. */
- uint16_t action_ctx_id;
- /* Table scope id used for action record entries. */
- uint16_t action_tbl_scope;
- /* Context id of lookup table scope. */
- uint16_t lkup_ctx_id;
- /* Table scope id used for EM lookup entries. */
- uint16_t lkup_tbl_scope;
- /*
- * Number of 32B static buckets to be allocated at the beginning
- * of table scope.
- */
- uint32_t lkup_static_buckets;
- /* unused. */
- uint8_t unused2[7];
+ /* The number of FIDs that share this session. */
+ uint16_t fid_cnt;
+ /* Unused field */
+ uint8_t unused0[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_tf_em_insert *
- *********************/
+/****************************
+ * hwrm_tfc_session_fid_rem *
+ ****************************/
-/* hwrm_tf_em_insert_input (size:832b/104B) */
-struct hwrm_tf_em_insert_input {
+/*
+ * Dis-associate a TFC session from the target_fid.
+ * Though there's not an explicit matching free for a
+ * session id alloc, dis-associating the last fid from a session id
+ * (fid_cnt goes to 0), will result in this session id being freed
+ * automatically.
+ */
+/* hwrm_tfc_session_fid_rem_input (size:192b/24B) */
+struct hwrm_tfc_session_fid_rem_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -54558,34 +60219,26 @@ struct hwrm_tf_em_insert_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware Session Id. */
- uint32_t fw_session_id;
- /* Control Flags. */
- uint16_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX
- /* Reported match strength. */
- uint16_t strength;
- /* Index to action. */
- uint32_t action_ptr;
- /* Index of EM record. */
- uint32_t em_record_idx;
- /* EM Key value. */
- uint64_t em_key[8];
- /* Number of bits in em_key. */
- uint16_t em_key_bitlen;
- /* unused. */
- uint16_t unused0[3];
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Unique session identifier for the session created by the
+ * firmware.
+ */
+ uint16_t sid;
+ /* Unused field */
+ uint8_t unused0[4];
} __rte_packed;
-/* hwrm_tf_em_insert_output (size:128b/16B) */
-struct hwrm_tf_em_insert_output {
+/* hwrm_tfc_session_fid_rem_output (size:128b/16B) */
+struct hwrm_tfc_session_fid_rem_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -54594,23 +60247,35 @@ struct hwrm_tf_em_insert_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* EM record pointer index. */
- uint16_t rptr_index;
- /* EM record offset 0~3. */
- uint8_t rptr_entry;
- /* Number of word entries consumed by the key. */
- uint8_t num_of_entries;
- /* unused. */
- uint32_t unused0;
+ /* The number of FIDs that share this session. */
+ uint16_t fid_cnt;
+ /* Unused field */
+ uint8_t unused0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_tf_em_hash_insert *
- **************************/
+/************************
+ * hwrm_tfc_ident_alloc *
+ ************************/
-/* hwrm_tf_em_hash_insert_input (size:1024b/128B) */
-struct hwrm_tf_em_hash_insert_input {
+/*
+ * Allocate a TFC identifier. Requests the firmware to
+ * allocate a TFC identifier. The session id and track_type are passed
+ * in. The tracking_id is either the sid or target_fid depends on the
+ * track_type. The resource subtype is passed in, an id corresponding
+ * to all these is allocated and returned in the HWRM response.
+ */
+/* hwrm_tfc_ident_alloc_input (size:192b/24B) */
+struct hwrm_tfc_ident_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -54639,34 +60304,54 @@ struct hwrm_tf_em_hash_insert_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware Session Id. */
- uint32_t fw_session_id;
- /* Control Flags. */
- uint16_t flags;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Unique session identifier for the session created by the
+ * firmware. Will be used to track this identifier.
+ */
+ uint16_t sid;
+ /* Control flags. Direction. */
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_TX
- /* Number of bits in the EM record. */
- uint16_t em_record_size_bits;
- /* CRC32 hash of key. */
- uint32_t key0_hash;
- /* Lookup3 hash of key. */
- uint32_t key1_hash;
- /* Index of EM record. */
- uint32_t em_record_idx;
- /* Unused. */
- uint32_t unused0;
- /* EM record. */
- uint64_t em_record[11];
+ #define HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_TX
+ /*
+ * CFA resource subtype. For definitions, please see
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
+ /* Describes the type of tracking tag to be used */
+ uint8_t track_type;
+ /* Invalid track type */
+ #define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
+ UINT32_C(0x0)
+ /* Tracked by session id */
+ #define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
+ UINT32_C(0x1)
+ /* Tracked by function id */
+ #define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
+ UINT32_C(0x2)
+ #define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_LAST \
+ HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID
+ /* Unused field */
+ uint8_t unused0;
} __rte_packed;
-/* hwrm_tf_em_hash_insert_output (size:128b/16B) */
-struct hwrm_tf_em_hash_insert_output {
+/* hwrm_tfc_ident_alloc_output (size:128b/16B) */
+struct hwrm_tfc_ident_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -54675,23 +60360,37 @@ struct hwrm_tf_em_hash_insert_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* EM record pointer index. */
- uint16_t rptr_index;
- /* EM record offset 0~3. */
- uint8_t rptr_entry;
- /* Number of word entries consumed by the key. */
- uint8_t num_of_entries;
- /* unused. */
- uint32_t unused0;
+ /*
+ * Resource identifier allocated by the firmware using
+ * parameters above.
+ */
+ uint16_t ident_id;
+ /* Unused field */
+ uint8_t unused0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_tf_em_delete *
- *********************/
+/***********************
+ * hwrm_tfc_ident_free *
+ ***********************/
-/* hwrm_tf_em_delete_input (size:832b/104B) */
-struct hwrm_tf_em_delete_input {
+/*
+ * Requests the firmware to free a TFC resource identifier.
+ * A resource subtype and session id are passed in.
+ * An identifier (previously allocated) corresponding to all these is
+ * freed, only after various sanity checks are completed.
+ */
+/* hwrm_tfc_ident_free_input (size:192b/24B) */
+struct hwrm_tfc_ident_free_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -54720,32 +60419,41 @@ struct hwrm_tf_em_delete_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Session Id. */
- uint32_t fw_session_id;
- /* Control flags. */
- uint16_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX
- /* Unused0 */
- uint16_t unused0;
- /* EM internal flow handle. */
- uint64_t flow_handle;
- /* EM Key value */
- uint64_t em_key[8];
- /* Number of bits in em_key. */
- uint16_t em_key_bitlen;
- /* unused. */
- uint16_t unused1[3];
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Unique session identifier for the session created by the
+ * firmware. Will be used to validate this request.
+ */
+ uint16_t sid;
+ /*
+ * CFA resource subtype. For definitions, please see
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
+ /* Control flags. Direction. */
+ uint8_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_TX
+ /* The resource identifier to be freed */
+ uint16_t ident_id;
} __rte_packed;
-/* hwrm_tf_em_delete_output (size:128b/16B) */
-struct hwrm_tf_em_delete_output {
+/* hwrm_tfc_ident_free_output (size:128b/16B) */
+struct hwrm_tfc_ident_free_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -54754,19 +60462,26 @@ struct hwrm_tf_em_delete_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Original stack allocation index. */
- uint16_t em_index;
- /* unused. */
- uint16_t unused0[3];
+ /* Reserved */
+ uint8_t unused0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
} __rte_packed;
-/*******************
- * hwrm_tf_em_move *
- *******************/
+/**************************
+ * hwrm_tfc_idx_tbl_alloc *
+ **************************/
-/* hwrm_tf_em_move_input (size:320b/40B) */
-struct hwrm_tf_em_move_input {
+/* hwrm_tfc_idx_tbl_alloc_input (size:192b/24B) */
+struct hwrm_tfc_idx_tbl_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -54795,30 +60510,74 @@ struct hwrm_tf_em_move_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Session Id. */
- uint32_t fw_session_id;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Unique session id for the session created by the
+ * firmware. Will be used to track this index table entry
+ * only if track type is track_type_sid.
+ */
+ uint16_t sid;
/* Control flags. */
- uint16_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_EM_MOVE_INPUT_FLAGS_DIR_TX
- /* Number of EM entry blocks */
- uint16_t num_blocks;
- /* New index for entry */
- uint32_t new_index;
- /* Unused */
- uint32_t unused0;
- /* EM internal flow handle. */
- uint64_t flow_handle;
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_TX
+ /*
+ * This field is blktype specific.
+ * For blktype CFA - CFA resource subtype. For definitions,
+ * please see cfa_v3/include/cfa_resources.h.
+ * For blktype rxp, re_gparse, te_gparse -
+ * Tunnel Type. A value of zero (or unknown) means alloc. A known
+ * value (previously allocated dynamic UPAR for tunnel_type) means
+ * realloc. Will fail if a realloc is for previously allocated FID,
+ */
+ uint8_t subtype;
+ /* Describes the type of tracking id to be used */
+ uint8_t track_type;
+ /* Invalid track type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
+ UINT32_C(0x0)
+ /* Tracked by session id */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
+ UINT32_C(0x1)
+ /* Tracked by function id */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
+ UINT32_C(0x2)
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_LAST \
+ HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_CFA \
+ UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_RXP \
+ UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE \
+ UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE \
+ UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_LAST \
+ HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
} __rte_packed;
-/* hwrm_tf_em_move_output (size:128b/16B) */
-struct hwrm_tf_em_move_output {
+/* hwrm_tfc_idx_tbl_alloc_output (size:128b/16B) */
+struct hwrm_tfc_idx_tbl_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -54827,19 +60586,31 @@ struct hwrm_tf_em_move_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Index of old entry. */
- uint16_t em_index;
- /* unused. */
- uint16_t unused0[3];
+ /*
+ * Index table entry allocated by the firmware using the
+ * parameters above.
+ */
+ uint16_t idx_tbl_id;
+ /* Reserved */
+ uint8_t unused0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
} __rte_packed;
-/********************
- * hwrm_tf_tcam_set *
- ********************/
+/******************************
+ * hwrm_tfc_idx_tbl_alloc_set *
+ ******************************/
-/* hwrm_tf_tcam_set_input (size:1024b/128B) */
-struct hwrm_tf_tcam_set_input {
+/* hwrm_tfc_idx_tbl_alloc_set_input (size:1088b/136B) */
+struct hwrm_tfc_idx_tbl_alloc_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -54868,52 +60639,90 @@ struct hwrm_tf_tcam_set_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Unique session id for the session created by the
+ * firmware. Will be used to track this index table entry
+ * only if track type is track_type_sid.
+ */
+ uint16_t sid;
/* Control flags. */
- uint32_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_TX
/*
* Indicate device data is being sent via DMA, the device
- * data is packing does not change.
- */
- #define HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
- /*
- * TCAM type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
+ * data packing does not change.
*/
- uint32_t type;
- /* Index of TCAM entry. */
- uint16_t idx;
- /* Number of bytes in the TCAM key. */
- uint8_t key_size;
- /* Number of bytes in the TCAM result. */
- uint8_t result_size;
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
/*
- * Offset from which the mask bytes start in the device data
- * array, key offset is always 0.
+ * This field is blktype specific.
+ * For blktype CFA - CFA resource subtype. For definitions,
+ * please see cfa_v3/include/cfa_resources.h.
+ * For blktype rxp, re_gparse, te_gparse -
+ * Tunnel Type. A value of zero (or unknown) means alloc. A known
+ * value (previously allocated dynamic UPAR for tunnel_type) means
+ * realloc. Will fail if a realloc is for previously allocated FID,
*/
- uint8_t mask_offset;
- /* Offset from which the result bytes start in the device data array. */
- uint8_t result_offset;
- /* unused. */
- uint8_t unused0[6];
+ uint8_t subtype;
+ /* Describes the type of tracking id to be used */
+ uint8_t track_type;
+ /* Invalid track type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
+ UINT32_C(0x0)
+ /* Tracked by session id */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
+ UINT32_C(0x1)
+ /* Tracked by function id */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
+ UINT32_C(0x2)
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_LAST \
+ HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_CFA \
+ UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_RXP \
+ UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE \
+ UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE \
+ UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_LAST \
+ HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
+ /* The size of the index table entry in bytes. */
+ uint16_t data_size;
+ /* Reserved */
+ uint8_t unused1[6];
+ /* The location of the dma buffer */
+ uint64_t dma_addr;
/*
- * TCAM key located at offset 0, mask located at mask_offset
- * and result at result_offset for the device.
+ * Index table data located at offset 0. If dma bit is set,
+ * then this field contains the DMA buffer pointer.
*/
- uint8_t dev_data[88];
+ uint8_t dev_data[96];
} __rte_packed;
-/* hwrm_tf_tcam_set_output (size:128b/16B) */
-struct hwrm_tf_tcam_set_output {
+/* hwrm_tfc_idx_tbl_alloc_set_output (size:128b/16B) */
+struct hwrm_tfc_idx_tbl_alloc_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -54922,26 +60731,31 @@ struct hwrm_tf_tcam_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * Index table entry allocated by the firmware using the
+ * parameters above.
+ */
+ uint16_t idx_tbl_id;
+ /* Reserved */
+ uint8_t unused0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/********************
- * hwrm_tf_tcam_get *
- ********************/
+/************************
+ * hwrm_tfc_idx_tbl_set *
+ ************************/
-/* hwrm_tf_tcam_get_input (size:256b/32B) */
-struct hwrm_tf_tcam_get_input {
+/* hwrm_tfc_idx_tbl_set_input (size:1088b/136B) */
+struct hwrm_tfc_idx_tbl_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -54970,31 +60784,76 @@ struct hwrm_tf_tcam_get_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
/* Control flags. */
- uint32_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_TX
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_TX
/*
- * TCAM type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
+ * Indicate device data is being sent via DMA, the device
+ * data packing does not change.
*/
- uint32_t type;
- /* Index of a TCAM entry. */
- uint16_t idx;
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ /*
+ * CFA resource subtype. For definitions, please see
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Session id associated with the firmware. Will be used
+ * for validation if the track type matches.
+ */
+ uint16_t sid;
+ /*
+ * Index table index returned during alloc by the
+ * firmware.
+ */
+ uint16_t idx_tbl_id;
+ /* The size of the index table entry in bytes. */
+ uint16_t data_size;
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_CFA \
+ UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_RXP \
+ UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE \
+ UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE \
+ UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_LAST \
+ HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
/* unused. */
- uint16_t unused0;
+ uint8_t unused0[5];
+ /* The location of the dma buffer */
+ uint64_t dma_addr;
+ /*
+ * Index table data located at offset 0. If dma bit is set,
+ * then this field contains the DMA buffer pointer.
+ */
+ uint8_t dev_data[96];
} __rte_packed;
-/* hwrm_tf_tcam_get_output (size:2368b/296B) */
-struct hwrm_tf_tcam_get_output {
+/* hwrm_tfc_idx_tbl_set_output (size:128b/16B) */
+struct hwrm_tfc_idx_tbl_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55003,41 +60862,26 @@ struct hwrm_tf_tcam_get_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Number of bytes in the TCAM key. */
- uint8_t key_size;
- /* Number of bytes in the TCAM entry. */
- uint8_t result_size;
- /* Offset from which the mask bytes start in the device data array. */
- uint8_t mask_offset;
- /* Offset from which the result bytes start in the device data array. */
- uint8_t result_offset;
- /* unused. */
- uint8_t unused0[4];
- /*
- * TCAM key located at offset 0, mask located at mask_offset
- * and result at result_offset for the device.
- */
- uint8_t dev_data[272];
/* unused. */
- uint8_t unused1[7];
+ uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_tf_tcam_move *
- *********************/
+/************************
+ * hwrm_tfc_idx_tbl_get *
+ ************************/
-/* hwrm_tf_tcam_move_input (size:1024b/128B) */
-struct hwrm_tf_tcam_move_input {
+/* hwrm_tfc_idx_tbl_get_input (size:320b/40B) */
+struct hwrm_tfc_idx_tbl_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55066,33 +60910,75 @@ struct hwrm_tf_tcam_move_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
/* Control flags. */
- uint32_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR \
+ UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_RX \
+ UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_TCAM_MOVE_INPUT_FLAGS_DIR_TX
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_TX \
+ UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_TX
/*
- * TCAM type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
+ * When set use the special access register access to clear
+ * the table entry on read.
*/
- uint32_t type;
- /* Number of TCAM index pairs to be swapped for the device. */
- uint16_t count;
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_CLEAR_ON_READ \
+ UINT32_C(0x2)
+ /*
+ * CFA resource subtype. For definitions, please see
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Session id associated with the firmware. Will be used
+ * for validation if the track type matches.
+ */
+ uint16_t sid;
+ /*
+ * Index table index returned during alloc by the
+ * firmware.
+ */
+ uint16_t idx_tbl_id;
+ /* The size of the index table entry buffer in bytes. */
+ uint16_t buffer_size;
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_CFA \
+ UINT32_C(0x0)
+ /* RXP block type */
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_RXP \
+ UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE \
+ UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE \
+ UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_LAST \
+ HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
/* unused. */
- uint16_t unused0;
- /* TCAM index pairs to be swapped for the device. */
- uint16_t idx_pairs[48];
+ uint8_t unused0[5];
+ /* The location of the response dma buffer */
+ uint64_t dma_addr;
} __rte_packed;
-/* hwrm_tf_tcam_move_output (size:128b/16B) */
-struct hwrm_tf_tcam_move_output {
+/* hwrm_tfc_idx_tbl_get_output (size:128b/16B) */
+struct hwrm_tfc_idx_tbl_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55101,26 +60987,28 @@ struct hwrm_tf_tcam_move_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
+ /* The size of the index table buffer returned in device size bytes. */
+ uint16_t data_size;
+ /* unused */
+ uint8_t unused1[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_tf_tcam_free *
- *********************/
+/*************************
+ * hwrm_tfc_idx_tbl_free *
+ *************************/
-/* hwrm_tf_tcam_free_input (size:1024b/128B) */
-struct hwrm_tf_tcam_free_input {
+/* hwrm_tfc_idx_tbl_free_input (size:256b/32B) */
+struct hwrm_tfc_idx_tbl_free_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55149,33 +61037,59 @@ struct hwrm_tf_tcam_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
/* Control flags. */
- uint32_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_TX
/*
- * TCAM type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
+ * CFA resource subtype. For definitions, please see
+ * cfa_v3/include/cfa_resources.h.
*/
- uint32_t type;
- /* Number of TCAM index to be deleted for the device. */
- uint16_t count;
+ uint8_t subtype;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Session id associated with the firmware. Will be used
+ * for validation if the track type matches.
+ */
+ uint16_t sid;
+ /* Index table id to be freed by the firmware. */
+ uint16_t idx_tbl_id;
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_CFA \
+ UINT32_C(0x0)
+ /* RXP block type */
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_RXP \
+ UINT32_C(0x1)
+ /* RE parse block type */
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE \
+ UINT32_C(0x2)
+ /* TE parse block type */
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE \
+ UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_LAST \
+ HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
/* unused. */
- uint16_t unused0;
- /* TCAM index list to be deleted for the device. */
- uint16_t idx_list[48];
+ uint8_t unused0[7];
} __rte_packed;
-/* hwrm_tf_tcam_free_output (size:128b/16B) */
-struct hwrm_tf_tcam_free_output {
+/* hwrm_tfc_idx_tbl_free_output (size:128b/16B) */
+struct hwrm_tfc_idx_tbl_free_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55184,26 +61098,52 @@ struct hwrm_tf_tcam_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
+ /* Reserved */
uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_tf_global_cfg_set *
- **************************/
+/* TruFlow resources request for a global id. */
+/* tfc_global_id_hwrm_req (size:64b/8B) */
+struct tfc_global_id_hwrm_req {
+ /* Type of the resource, defined in enum cfa_resource_type HCAPI RM. */
+ uint16_t rtype;
+ /* Indicates the flow direction in type of cfa_dir. */
+ uint16_t dir;
+ /* Subtype of the resource type. */
+ uint16_t subtype;
+ /* Number of the type of resources. */
+ uint16_t cnt;
+} __rte_packed;
+
+/* The reserved resources for the global id. */
+/* tfc_global_id_hwrm_rsp (size:64b/8B) */
+struct tfc_global_id_hwrm_rsp {
+ /* Type of the resource, defined in enum cfa_resource_type HCAPI RM. */
+ uint16_t rtype;
+ /* Indicates the flow direction in type of cfa_dir. */
+ uint16_t dir;
+ /* Subtype of the resource type. */
+ uint16_t subtype;
+ /* The global id that the resources reserved for. */
+ uint16_t id;
+} __rte_packed;
+
+/****************************
+ * hwrm_tfc_global_id_alloc *
+ ****************************/
-/* hwrm_tf_global_cfg_set_input (size:448b/56B) */
-struct hwrm_tf_global_cfg_set_input {
+/* hwrm_tfc_global_id_alloc_input (size:320b/40B) */
+struct hwrm_tfc_global_id_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55232,34 +61172,43 @@ struct hwrm_tf_global_cfg_set_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* Control flags. */
- uint32_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_TX
- /* Global Cfg type */
- uint32_t type;
- /* Offset of the type */
- uint32_t offset;
- /* Size of the data to set in bytes */
- uint16_t size;
- /* unused. */
- uint8_t unused0[6];
- /* Data to set */
- uint8_t data[8];
- /* Mask of data to set, 0 indicates no mask */
- uint8_t mask[8];
+ uint16_t sid;
+ /* Global domain id. */
+ uint16_t global_id;
+ /*
+ * Defines the array size of the provided req_addr and
+ * resv_addr array buffers. Should be set to the number of
+ * request entries.
+ */
+ uint16_t req_cnt;
+ /*
+ * This is the DMA address for the request input data array
+ * buffer. Array is of tfc_global_id_hwrm_req type. Size of the
+ * array buffer is provided by the 'req_cnt' field in this
+ * message.
+ */
+ uint64_t req_addr;
+ /*
+ * This is the DMA address for the resc output data array
+ * buffer. Array is of tfc_global_id_hwrm_rsp type. Size of the array
+ * buffer is provided by the 'req_cnt' field in this
+ * message.
+ */
+ uint64_t resc_addr;
} __rte_packed;
-/* hwrm_tf_global_cfg_set_output (size:128b/16B) */
-struct hwrm_tf_global_cfg_set_output {
+/* hwrm_tfc_global_id_alloc_output (size:128b/16B) */
+struct hwrm_tfc_global_id_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55268,26 +61217,35 @@ struct hwrm_tf_global_cfg_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /*
+ * Size of the returned hwrm_tfc_global_id_req data array. The value
+ * cannot exceed the req_cnt defined by the input msg. The data
+ * array is returned using the resv_addr specified DMA
+ * address also provided by the input msg.
+ */
+ uint16_t rsp_cnt;
+ /* Non-zero if this is the first allocation for the global ID. */
+ uint8_t first;
/* unused. */
- uint8_t unused0[7];
+ uint8_t unused0[4];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_tf_global_cfg_get *
- **************************/
+/*********************
+ * hwrm_tfc_tcam_set *
+ *********************/
-/* hwrm_tf_global_cfg_get_input (size:320b/40B) */
-struct hwrm_tf_global_cfg_get_input {
+/* hwrm_tfc_tcam_set_input (size:1088b/136B) */
+struct hwrm_tfc_tcam_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55316,30 +61274,56 @@ struct hwrm_tf_global_cfg_get_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Session id associated with the firmware. Will be used
+ * for validation if the track type matches.
+ */
+ uint16_t sid;
+ /* Logical TCAM ID. */
+ uint16_t tcam_id;
+ /* Number of bytes in the TCAM key. */
+ uint16_t key_size;
+ /* Number of bytes in the TCAM result. */
+ uint16_t result_size;
/* Control flags. */
- uint32_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_TX
- /* Global Cfg type */
- uint32_t type;
- /* Offset of the type */
- uint32_t offset;
- /* Size of the data to set in bytes */
- uint16_t size;
+ #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_TX
+ /* Indicate device data is being sent via DMA. */
+ #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ /*
+ * Subtype of TCAM resource. See
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
/* unused. */
- uint8_t unused0[6];
+ uint8_t unused0[4];
+ /* The location of the response dma buffer */
+ uint64_t dma_addr;
+ /*
+ * TCAM key located at offset 0, mask located at mask_offset
+ * and result at result_offset for the device.
+ */
+ uint8_t dev_data[96];
} __rte_packed;
-/* hwrm_tf_global_cfg_get_output (size:256b/32B) */
-struct hwrm_tf_global_cfg_get_output {
+/* hwrm_tfc_tcam_set_output (size:128b/16B) */
+struct hwrm_tfc_tcam_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55348,21 +61332,26 @@ struct hwrm_tf_global_cfg_get_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Size of the data read in bytes */
- uint16_t size;
/* unused. */
- uint8_t unused0[6];
- /* Data to set */
- uint8_t data[16];
+ uint8_t unused0[7];
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been
+ * completely written. When writing a command completion or
+ * response to an internal processor, the order of writes has
+ * to be such that this field is written last.
+ */
+ uint8_t valid;
} __rte_packed;
-/**********************
- * hwrm_tf_if_tbl_get *
- **********************/
+/*********************
+ * hwrm_tfc_tcam_get *
+ *********************/
-/* hwrm_tf_if_tbl_get_input (size:256b/32B) */
-struct hwrm_tf_if_tbl_get_input {
+/* hwrm_tfc_tcam_get_input (size:192b/24B) */
+struct hwrm_tfc_tcam_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55391,31 +61380,41 @@ struct hwrm_tf_if_tbl_get_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
/* Control flags. */
- uint16_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_TX
- /* Size of the data to set. */
- uint16_t size;
+ #define HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_TX
/*
- * Type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
+ * Subtype of TCAM resource See
+ * cfa_v3/include/cfa_resources.h.
*/
- uint32_t type;
- /* Index of the type to retrieve. */
- uint32_t index;
+ uint8_t subtype;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Session id associated with the firmware. Will be used
+ * for validation if the track type matches.
+ */
+ uint16_t sid;
+ /* Logical TCAM ID. */
+ uint16_t tcam_id;
} __rte_packed;
-/* hwrm_tf_if_tbl_get_output (size:1216b/152B) */
-struct hwrm_tf_if_tbl_get_output {
+/* hwrm_tfc_tcam_get_output (size:2368b/296B) */
+struct hwrm_tfc_tcam_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55424,34 +61423,37 @@ struct hwrm_tf_if_tbl_get_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Response code. */
- uint32_t resp_code;
- /* Response size. */
- uint16_t size;
- /* unused */
- uint16_t unused0;
- /* Response data. */
- uint8_t data[128];
- /* unused */
+ /* Number of bytes in the TCAM key. */
+ uint16_t key_size;
+ /* Number of bytes in the TCAM result. */
+ uint16_t result_size;
+ /* unused. */
+ uint8_t unused0[4];
+ /*
+ * TCAM key located at offset 0, mask located at key_size
+ * and result at 2 * key_size for the device.
+ */
+ uint8_t dev_data[272];
+ /* unused. */
uint8_t unused1[7];
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been
+ * completely written. When writing a command completion or
+ * response to an internal processor, the order of writes has
+ * to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_tf_if_tbl_type_set *
- ***************************/
+/***********************
+ * hwrm_tfc_tcam_alloc *
+ ***********************/
-/* hwrm_tf_if_tbl_set_input (size:1024b/128B) */
-struct hwrm_tf_if_tbl_set_input {
+/* hwrm_tfc_tcam_alloc_input (size:256b/32B) */
+struct hwrm_tfc_tcam_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55480,37 +61482,59 @@ struct hwrm_tf_if_tbl_set_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
/* Control flags. */
- uint16_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_TX
- /* unused. */
- uint8_t unused0[2];
+ #define HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_TX
/*
- * Type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
+ * Subtype of TCAM resource. See
+ * cfa_v3/include/cfa_resources.h.
*/
- uint32_t type;
- /* Index of the type to set. */
- uint32_t index;
- /* Size of the data to set. */
- uint16_t size;
- /* unused */
- uint8_t unused1[6];
- /* Data to be set. */
- uint8_t data[88];
+ uint8_t subtype;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Unique session id for the session created by the
+ * firmware. Will be used to track this index table entry
+ * only if track type is track_type_sid.
+ */
+ uint16_t sid;
+ /* Number of bytes in the TCAM key. */
+ uint16_t key_size;
+ /* Entry priority. */
+ uint16_t priority;
+ /* Describes the type of tracking id to be used */
+ uint8_t track_type;
+ /* Invalid track type */
+ #define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
+ UINT32_C(0x0)
+ /* Tracked by session id */
+ #define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
+ UINT32_C(0x1)
+ /* Tracked by function id */
+ #define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
+ UINT32_C(0x2)
+ #define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_LAST \
+ HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID
+ /* Unused. */
+ uint8_t unused0[5];
} __rte_packed;
-/* hwrm_tf_if_tbl_set_output (size:128b/16B) */
-struct hwrm_tf_if_tbl_set_output {
+/* hwrm_tfc_tcam_alloc_output (size:128b/16B) */
+struct hwrm_tfc_tcam_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55519,8 +61543,13 @@ struct hwrm_tf_if_tbl_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
+ /*
+ * Index table entry allocated by the firmware using the
+ * parameters above.
+ */
+ uint16_t idx;
+ /* Reserved */
+ uint8_t unused0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -55532,13 +61561,13 @@ struct hwrm_tf_if_tbl_set_output {
uint8_t valid;
} __rte_packed;
-/*****************************
- * hwrm_tf_tbl_type_bulk_get *
- *****************************/
+/***************************
+ * hwrm_tfc_tcam_alloc_set *
+ ***************************/
-/* hwrm_tf_tbl_type_bulk_get_input (size:384b/48B) */
-struct hwrm_tf_tbl_type_bulk_get_input {
+/* hwrm_tfc_tcam_alloc_set_input (size:1088b/136B) */
+struct hwrm_tfc_tcam_alloc_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55567,46 +61596,70 @@ struct hwrm_tf_tbl_type_bulk_get_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
/* Control flags. */
- uint16_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR \
- UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
+ #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_TX \
- UINT32_C(0x1)
- #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_DIR_TX
+ #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_TX
+ /* Indicate device data is being sent via DMA. */
+ #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
/*
- * When set use the special access register access to clear
- * the table entries on read.
+ * Subtype of TCAM resource. See
+ * cfa_v3/include/cfa_resources.h.
*/
- #define HWRM_TF_TBL_TYPE_BULK_GET_INPUT_FLAGS_CLEAR_ON_READ \
+ uint8_t subtype;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Unique session id for the session created by the
+ * firmware. Will be used to track this index table entry
+ * only if track type is track_type_sid.
+ */
+ uint16_t sid;
+ /* Number of bytes in the TCAM key. */
+ uint16_t key_size;
+ /* The size of the TCAM table entry in bytes. */
+ uint16_t result_size;
+ /* Entry priority. */
+ uint16_t priority;
+ /* Describes the type of tracking id to be used */
+ uint8_t track_type;
+ /* Invalid track type */
+ #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
+ UINT32_C(0x0)
+ /* Tracked by session id */
+ #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
+ UINT32_C(0x1)
+ /* Tracked by function id */
+ #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
UINT32_C(0x2)
- /* unused. */
- uint8_t unused0[2];
+ #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_LAST \
+ HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID
+ /* Unused */
+ uint8_t unused[3];
+ /* The location of the response dma buffer */
+ uint64_t dma_addr;
/*
- * Type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
+ * Index table data located at offset 0. If dma bit is set,
+ * then this field contains the DMA buffer pointer.
*/
- uint32_t type;
- /* Starting index of the type to retrieve. */
- uint32_t start_index;
- /* Number of entries to retrieve. */
- uint32_t num_entries;
- /* Number of entries to retrieve. */
- uint32_t unused1;
- /* Host memory where data will be stored. */
- uint64_t host_addr;
+ uint8_t dev_data[96];
} __rte_packed;
-/* hwrm_tf_tbl_type_bulk_get_output (size:128b/16B) */
-struct hwrm_tf_tbl_type_bulk_get_output {
+/* hwrm_tfc_tcam_alloc_set_output (size:128b/16B) */
+struct hwrm_tfc_tcam_alloc_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55615,12 +61668,10 @@ struct hwrm_tf_tbl_type_bulk_get_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Response code. */
- uint32_t resp_code;
- /* Response size. */
- uint16_t size;
- /* unused */
- uint8_t unused0;
+ /* Logical TCAM ID. */
+ uint16_t tcam_id;
+ /* Reserved */
+ uint8_t unused0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -55632,13 +61683,13 @@ struct hwrm_tf_tbl_type_bulk_get_output {
uint8_t valid;
} __rte_packed;
-/***********************************
- * hwrm_tf_session_hotup_state_set *
- ***********************************/
+/**********************
+ * hwrm_tfc_tcam_free *
+ **********************/
-/* hwrm_tf_session_hotup_state_set_input (size:192b/24B) */
-struct hwrm_tf_session_hotup_state_set_input {
+/* hwrm_tfc_tcam_free_input (size:192b/24B) */
+struct hwrm_tfc_tcam_free_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55667,27 +61718,41 @@ struct hwrm_tf_session_hotup_state_set_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* Shared session state. */
- uint16_t state;
/* Control flags. */
- uint16_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR \
- UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
+ #define HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR_TX \
- UINT32_C(0x1)
- #define HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_SESSION_HOTUP_STATE_SET_INPUT_FLAGS_DIR_TX
+ #define HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_TX
+ /*
+ * Subtype of TCAM resource. See
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
+ * Session id associated with the firmware. Will be used
+ * for validation if the track type matches.
+ */
+ uint16_t sid;
+ /* Logical TCAM ID. */
+ uint16_t tcam_id;
} __rte_packed;
-/* hwrm_tf_session_hotup_state_set_output (size:128b/16B) */
-struct hwrm_tf_session_hotup_state_set_output {
+/* hwrm_tfc_tcam_free_output (size:128b/16B) */
+struct hwrm_tfc_tcam_free_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55696,7 +61761,7 @@ struct hwrm_tf_session_hotup_state_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
+ /* Reserved */
uint8_t unused0[7];
/*
* This field is used in Output records to indicate that the output
@@ -55709,13 +61774,13 @@ struct hwrm_tf_session_hotup_state_set_output {
uint8_t valid;
} __rte_packed;
-/***********************************
- * hwrm_tf_session_hotup_state_get *
- ***********************************/
+/***********************
+ * hwrm_tfc_if_tbl_set *
+ ***********************/
-/* hwrm_tf_session_hotup_state_get_input (size:192b/24B) */
-struct hwrm_tf_session_hotup_state_get_input {
+/* hwrm_tfc_if_tbl_set_input (size:960b/120B) */
+struct hwrm_tfc_if_tbl_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55744,27 +61809,37 @@ struct hwrm_tf_session_hotup_state_get_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
+ /* Session identifier. */
+ uint16_t sid;
+ /* Function identifier. */
+ uint16_t fid;
+ /*
+ * Subtype identifying IF table type. See
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
/* Control flags. */
- uint16_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR \
- UINT32_C(0x1)
+ #define HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
+ #define HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR_TX \
- UINT32_C(0x1)
- #define HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_SESSION_HOTUP_STATE_GET_INPUT_FLAGS_DIR_TX
- /* unused. */
- uint8_t unused0[2];
+ #define HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_TX
+ /* Table entry index. */
+ uint16_t index;
+ /* Size of data in data field. */
+ uint8_t data_size;
+ /* Reserved */
+ uint8_t unused0[7];
+ /* Table data. */
+ uint8_t data[88];
} __rte_packed;
-/* hwrm_tf_session_hotup_state_get_output (size:128b/16B) */
-struct hwrm_tf_session_hotup_state_get_output {
+/* hwrm_tfc_if_tbl_set_output (size:128b/16B) */
+struct hwrm_tfc_if_tbl_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55773,12 +61848,8 @@ struct hwrm_tf_session_hotup_state_get_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Shared session HA state. */
- uint16_t state;
- /* Shared session HA reference count. */
- uint16_t ref_cnt;
- /* unused. */
- uint8_t unused0[3];
+ /* Reserved */
+ uint8_t unused0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -55790,13 +61861,13 @@ struct hwrm_tf_session_hotup_state_get_output {
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_tf_resc_usage_set *
- **************************/
+/***********************
+ * hwrm_tfc_if_tbl_get *
+ ***********************/
-/* hwrm_tf_resc_usage_set_input (size:1024b/128B) */
-struct hwrm_tf_resc_usage_set_input {
+/* hwrm_tfc_if_tbl_get_input (size:256b/32B) */
+struct hwrm_tfc_if_tbl_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55825,56 +61896,35 @@ struct hwrm_tf_resc_usage_set_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
+ /* Session identifier. */
+ uint16_t sid;
+ /* Function identifier. */
+ uint16_t fid;
+ /*
+ * Subtype identifying IF table type. See
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
/* Control flags. */
- uint16_t flags;
+ uint8_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_TX
- /* Indicate table data is being sent via DMA. */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
- /* Types of the resource to set their usage state. */
- uint16_t types;
- /* WC TCAM Pool */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_WC_TCAM \
- UINT32_C(0x1)
- /* EM Internal Memory Pool */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_EM \
- UINT32_C(0x2)
- /* Meter Instance */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_METER \
- UINT32_C(0x4)
- /* Counter Record Table */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_COUNTER \
- UINT32_C(0x8)
- /* Action Record Table */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ACTION \
- UINT32_C(0x10)
- /* ACT MODIFY/ENCAP Record Table */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ACT_MOD_ENCAP \
- UINT32_C(0x20)
- /* Source Property SMAC Record Table */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_SP_SMAC \
- UINT32_C(0x40)
- /* All Resource Types */
- #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ALL \
- UINT32_C(0x80)
- /* Size of the data to set. */
- uint16_t size;
- /* unused */
- uint8_t unused1[6];
- /* Data to be set. */
- uint8_t data[96];
+ #define HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_TX
+ /* Table entry index. */
+ uint16_t index;
+ /* Size of data in data field. */
+ uint8_t data_size;
+ /* Reserved */
+ uint8_t unused0[7];
} __rte_packed;
-/* hwrm_tf_resc_usage_set_output (size:128b/16B) */
-struct hwrm_tf_resc_usage_set_output {
+/* hwrm_tfc_if_tbl_get_output (size:960b/120B) */
+struct hwrm_tfc_if_tbl_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55883,8 +61933,35 @@ struct hwrm_tf_resc_usage_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
+ /* Session identifier. */
+ uint16_t sid;
+ /* Function identifier. */
+ uint16_t fid;
+ /*
+ * Subtype identifying IF table type. See
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
+ /* Control flags. */
+ uint8_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR_TX
+ /* Table entry index. */
+ uint16_t index;
+ /* Size of data in data field. */
+ uint8_t data_size;
+ /* Reserved */
uint8_t unused0[7];
+ /* Table data. */
+ uint8_t data[88];
+ /* Reserved */
+ uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -55896,13 +61973,14 @@ struct hwrm_tf_resc_usage_set_output {
uint8_t valid;
} __rte_packed;
-/****************************
- * hwrm_tf_resc_usage_query *
- ****************************/
+/*********************************
+ * hwrm_tfc_tbl_scope_config_get *
+ *********************************/
-/* hwrm_tf_resc_usage_query_input (size:256b/32B) */
-struct hwrm_tf_resc_usage_query_input {
+/* TruFlow command to return whether the table scope is fully configured. */
+/* hwrm_tfc_tbl_scope_config_get_input (size:192b/24B) */
+struct hwrm_tfc_tbl_scope_config_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -55931,52 +62009,14 @@ struct hwrm_tf_resc_usage_query_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* Control flags. */
- uint16_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_LAST \
- HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX
+ /* The table scope ID. */
+ uint8_t tsid;
/* unused. */
- uint8_t unused0[2];
- /* Types of the resource to retrieve their usage state. */
- uint16_t types;
- /* WC TCAM Pool */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_WC_TCAM \
- UINT32_C(0x1)
- /* EM Internal Memory Pool */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_EM \
- UINT32_C(0x2)
- /* Meter Instance */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_METER \
- UINT32_C(0x4)
- /* Counter Record Table */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_COUNTER \
- UINT32_C(0x8)
- /* Action Record Table */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ACTION \
- UINT32_C(0x10)
- /* ACT MODIFY/ENCAP Record Table */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ACT_MOD_ENCAP \
- UINT32_C(0x20)
- /* Source Property SMAC Record Table */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_SP_SMAC \
- UINT32_C(0x40)
- /* All Resource Types */
- #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ALL \
- UINT32_C(0x80)
- /* unused */
- uint8_t unused1[6];
+ uint8_t unused0[7];
} __rte_packed;
-/* hwrm_tf_resc_usage_query_output (size:960b/120B) */
-struct hwrm_tf_resc_usage_query_output {
+/* hwrm_tfc_tbl_scope_config_get_output (size:128b/16B) */
+struct hwrm_tfc_tbl_scope_config_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -55985,16 +62025,10 @@ struct hwrm_tf_resc_usage_query_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Response code. */
- uint32_t resp_code;
- /* Response size. */
- uint16_t size;
- /* unused */
- uint16_t unused0;
- /* Response data. */
- uint8_t data[96];
- /* unused */
- uint8_t unused1[7];
+ /* If set to 1, the table scope is configured. */
+ uint8_t configured;
+ /* unused. */
+ uint8_t unused0[6];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -56006,17 +62040,13 @@ struct hwrm_tf_resc_usage_query_output {
uint8_t valid;
} __rte_packed;
-/****************************
- * hwrm_tfc_tbl_scope_qcaps *
- ****************************/
+/*****************************
+ * hwrm_tfc_resc_usage_query *
+ *****************************/
-/*
- * TruFlow command to check if firmware is capable of
- * supporting table scopes.
- */
-/* hwrm_tfc_tbl_scope_qcaps_input (size:128b/16B) */
-struct hwrm_tfc_tbl_scope_qcaps_input {
+/* hwrm_tfc_resc_usage_query_input (size:256b/32B) */
+struct hwrm_tfc_resc_usage_query_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56045,10 +62075,41 @@ struct hwrm_tfc_tbl_scope_qcaps_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Session identifier. */
+ uint16_t sid;
+ /* Function identifier. */
+ uint16_t fid;
+ /* Control flags. */
+ uint8_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_LAST \
+ HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX
+ /* Describes the type of tracking id to be used */
+ uint8_t track_type;
+ /* Invalid track type */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
+ UINT32_C(0x0)
+ /* Tracked by session id */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
+ UINT32_C(0x1)
+ /* Tracked by function id */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
+ UINT32_C(0x2)
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_LAST \
+ HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_TRACK_TYPE_FID
+ /* Size of data in data field. */
+ uint16_t data_size;
+ /* unused */
+ uint8_t unused1[8];
} __rte_packed;
-/* hwrm_tfc_tbl_scope_qcaps_output (size:192b/24B) */
-struct hwrm_tfc_tbl_scope_qcaps_output {
+/* hwrm_tfc_resc_usage_query_output (size:960b/120B) */
+struct hwrm_tfc_resc_usage_query_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56057,25 +62118,16 @@ struct hwrm_tfc_tbl_scope_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * The maximum number of lookup records that a table scope can support.
- * This field is only valid if tbl_scope_capable is not zero.
- */
- uint32_t max_lkup_rec_cnt;
- /*
- * The maximum number of action records that a table scope can support.
- * This field is only valid if tbl_scope_capable is not zero.
- */
- uint32_t max_act_rec_cnt;
- /* Not zero if firmware capable of table scopes. */
- uint8_t tbl_scope_capable;
- /*
- * log2 of the number of lookup static buckets that a table scope can
- * support. This field is only valid if tbl_scope_capable is not zero.
- */
- uint8_t max_lkup_static_buckets_exp;
- /* unused. */
- uint8_t unused0[5];
+ /* Response code. */
+ uint32_t resp_code;
+ /* Size of data in data field. */
+ uint16_t data_size;
+ /* unused */
+ uint16_t unused0;
+ /* Response data. */
+ uint8_t data[96];
+ /* unused */
+ uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -56087,22 +62139,13 @@ struct hwrm_tfc_tbl_scope_qcaps_output {
uint8_t valid;
} __rte_packed;
-/*******************************
- * hwrm_tfc_tbl_scope_id_alloc *
- *******************************/
+/******************************
+ * hwrm_tunnel_dst_port_query *
+ ******************************/
-/*
- * TruFlow command to allocate a table scope ID and create the pools.
- *
- * There is no corresponding free command since a table scope
- * ID will automatically be freed once the last FID is removed.
- * That is, when the hwrm_tfc_tbl_scope_fid_rem command returns
- * a fid_cnt of 0 that also means that the table scope ID has
- * been freed.
- */
-/* hwrm_tfc_tbl_scope_id_alloc_input (size:192b/24B) */
-struct hwrm_tfc_tbl_scope_id_alloc_input {
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_query_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56131,30 +62174,83 @@ struct hwrm_tfc_tbl_scope_id_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* The maximum number of pools for this table scope. */
- uint16_t max_pools;
- /* Non-zero if this table scope is shared. */
- uint8_t shared;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
/*
- * The size of the lookup pools per direction expressed as
- * log2(max_records/max_pools). That is, size=2^exp.
- *
- * Array is indexed by enum cfa_dir.
+ * Enhance Generic Routing Encapsulation (GRE version 1) inside IP
+ * datagram payload
*/
- uint8_t lkup_pool_sz_exp[2];
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Use fixed layer 2 ether type of 0xFFFF */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_L2_ETYPE \
+ UINT32_C(0xb)
/*
- * The size of the action pools per direction expressed as
- * log2(max_records/max_pools). That is, size=2^exp.
- *
- * Array is indexed by enum cfa_dir.
+ * IPV6 over virtual eXtensible Local Area Network with GPE header
+ * (IPV6oVXLANGPE)
*/
- uint8_t act_pool_sz_exp[2];
- /* unused. */
- uint8_t unused0;
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \
+ UINT32_C(0xc)
+ /* Custom GRE uses UPAR to parse customized GRE packets */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_CUSTOM_GRE \
+ UINT32_C(0xd)
+ /* Enhanced Common Packet Radio Interface (eCPRI) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ECPRI \
+ UINT32_C(0xe)
+ /* IPv6 Segment Routing (SRv6) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_SRV6 \
+ UINT32_C(0xf)
+ /* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE \
+ UINT32_C(0x10)
+ /* Generic Routing Encapsulation */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GRE \
+ UINT32_C(0x11)
+ /* ULP Dynamic UPAR tunnel */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR \
+ UINT32_C(0x12)
+ /* ULP Dynamic UPAR tunnel reserved 1 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 \
+ UINT32_C(0x13)
+ /* ULP Dynamic UPAR tunnel reserved 2 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 \
+ UINT32_C(0x14)
+ /* ULP Dynamic UPAR tunnel reserved 3 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 \
+ UINT32_C(0x15)
+ /* ULP Dynamic UPAR tunnel reserved 4 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 \
+ UINT32_C(0x16)
+ /* ULP Dynamic UPAR tunnel reserved 5 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 \
+ UINT32_C(0x17)
+ /* ULP Dynamic UPAR tunnel reserved 6 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 \
+ UINT32_C(0x18)
+ /* ULP Dynamic UPAR tunnel reserved 7 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 \
+ UINT32_C(0x19)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ /*
+ * This field is used to specify the next protocol value defined in the
+ * corresponding RFC spec for the applicable tunnel type.
+ */
+ uint8_t tunnel_next_proto;
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_tfc_tbl_scope_id_alloc_output (size:128b/16B) */
-struct hwrm_tfc_tbl_scope_id_alloc_output {
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_query_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56163,34 +62259,87 @@ struct hwrm_tfc_tbl_scope_id_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* The table scope ID that was allocated. */
- uint8_t tsid;
/*
- * Non-zero if this is the first FID associated with this table scope
- * ID.
+ * This field represents the identifier of L4 destination port
+ * used for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
*/
- uint8_t first;
- /* unused. */
- uint8_t unused0[5];
+ uint16_t tunnel_dst_port_id;
+ /*
+ * This field represents the value of L4 destination port
+ * identified by tunnel_dst_port_id. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ * This field is in network byte order.
+ *
+ * A value of 0 means that the destination port is not
+ * configured.
+ */
+ uint16_t tunnel_dst_port_val;
+ /*
+ * This field represents the UPAR usage status.
+ * Available UPARs on wh+ are UPAR0 and UPAR1
+ * Available UPARs on Thor are UPAR0 to UPAR3
+ * Available UPARs on Thor2 are UPAR0 to UPAR7
+ */
+ uint8_t upar_in_use;
+ /* This bit will be '1' when UPAR0 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR0 \
+ UINT32_C(0x1)
+ /* This bit will be '1' when UPAR1 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR1 \
+ UINT32_C(0x2)
+ /* This bit will be '1' when UPAR2 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR2 \
+ UINT32_C(0x4)
+ /* This bit will be '1' when UPAR3 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR3 \
+ UINT32_C(0x8)
+ /* This bit will be '1' when UPAR4 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR4 \
+ UINT32_C(0x10)
+ /* This bit will be '1' when UPAR5 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR5 \
+ UINT32_C(0x20)
+ /* This bit will be '1' when UPAR6 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR6 \
+ UINT32_C(0x40)
+ /* This bit will be '1' when UPAR7 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR7 \
+ UINT32_C(0x80)
+ /*
+ * This field is used to convey the status of non udp port based
+ * tunnel parsing at chip level and at function level.
+ */
+ uint8_t status;
+ /* This bit will be '1' when tunnel parsing is enabled globally. */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_STATUS_CHIP_LEVEL \
+ UINT32_C(0x1)
+ /*
+ * This bit will be '1' when tunnel parsing is enabled
+ * on the corresponding function.
+ */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_STATUS_FUNC_LEVEL \
+ UINT32_C(0x2)
+ uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*****************************
- * hwrm_tfc_tbl_scope_config *
- *****************************/
+/******************************
+ * hwrm_tunnel_dst_port_alloc *
+ ******************************/
-/* TruFlow command to configure the table scope memory. */
-/* hwrm_tfc_tbl_scope_config_input (size:704b/88B) */
-struct hwrm_tfc_tbl_scope_config_input {
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56219,58 +62368,97 @@ struct hwrm_tfc_tbl_scope_config_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
/*
- * The base addresses for lookup memory.
- * Array is indexed by enum cfa_dir.
- */
- uint64_t lkup_base_addr[2];
- /*
- * The base addresses for action memory.
- * Array is indexed by enum cfa_dir.
- */
- uint64_t act_base_addr[2];
- /*
- * The number of minimum sized lkup records per direction.
- * In this usage, records are the minimum lookup memory
- * allocation unit in a table scope. This value is the total
- * memory required for buckets and entries.
- *
- * Array is indexed by enum cfa_dir.
+ * Enhance Generic Routing Encapsulation (GRE version 1) inside IP
+ * datagram payload
*/
- uint32_t lkup_rec_cnt[2];
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Use fixed layer 2 ether type of 0xFFFF */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \
+ UINT32_C(0xb)
/*
- * The number of minimum sized action records per direction.
- * Similar to the lkup_rec_cnt, records are the minimum
- * action memory allocation unit in a table scope.
- *
- * Array is indexed by enum cfa_dir.
+ * IPV6 over virtual eXtensible Local Area Network with GPE header
+ * (IPV6oVXLANGPE)
*/
- uint32_t act_rec_cnt[2];
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \
+ UINT32_C(0xc)
/*
- * The number of static lookup buckets in the table scope.
- * Array is indexed by enum cfa_dir.
+ * Custom GRE uses UPAR to parse customized GRE packets. This is not
+ * supported.
*/
- uint32_t lkup_static_bucket_cnt[2];
- /* The page size of the table scope. */
- uint32_t pbl_page_sz;
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_CUSTOM_GRE \
+ UINT32_C(0xd)
+ /* Enhanced Common Packet Radio Interface (eCPRI) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI \
+ UINT32_C(0xe)
+ /* IPv6 Segment Routing (SRv6) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_SRV6 \
+ UINT32_C(0xf)
+ /* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE \
+ UINT32_C(0x10)
+ /* Generic Routing Encapsulation */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GRE \
+ UINT32_C(0x11)
+ /* ULP Dynamic UPAR tunnel */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR \
+ UINT32_C(0x12)
+ /* ULP Dynamic UPAR tunnel reserved 1 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 \
+ UINT32_C(0x13)
+ /* ULP Dynamic UPAR tunnel reserved 2 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 \
+ UINT32_C(0x14)
+ /* ULP Dynamic UPAR tunnel reserved 3 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 \
+ UINT32_C(0x15)
+ /* ULP Dynamic UPAR tunnel reserved 4 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 \
+ UINT32_C(0x16)
+ /* ULP Dynamic UPAR tunnel reserved 5 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 \
+ UINT32_C(0x17)
+ /* ULP Dynamic UPAR tunnel reserved 6 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 \
+ UINT32_C(0x18)
+ /* ULP Dynamic UPAR tunnel reserved 7 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 \
+ UINT32_C(0x19)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
/*
- * The PBL level for lookup memory.
- * Array is indexed by enum cfa_dir.
+ * This field is used to specify the next protocol value defined in the
+ * corresponding RFC spec for the applicable tunnel type.
*/
- uint8_t lkup_pbl_level[2];
+ uint8_t tunnel_next_proto;
/*
- * The PBL level for action memory.
- * Array is indexed by enum cfa_dir.
+ * This field represents the value of L4 destination port used
+ * for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ *
+ * This field is in network byte order.
+ *
+ * A value of 0 shall fail the command.
*/
- uint8_t act_pbl_level[2];
- /* The table scope ID. */
- uint8_t tsid;
- /* unused. */
- uint8_t unused0[7];
+ uint16_t tunnel_dst_port_val;
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_tfc_tbl_scope_config_output (size:128b/16B) */
-struct hwrm_tfc_tbl_scope_config_output {
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56279,27 +62467,76 @@ struct hwrm_tfc_tbl_scope_config_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
+ /*
+ * Identifier of a tunnel L4 destination port value. Only applies to
+ * tunnel types that has l4 destination port parameters.
+ */
+ uint16_t tunnel_dst_port_id;
+ /* Error information */
+ uint8_t error_info;
+ /* No error */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_SUCCESS \
+ UINT32_C(0x0)
+ /* Tunnel port is already allocated */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ALLOCATED \
+ UINT32_C(0x1)
+ /* Out of resources error */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_NO_RESOURCE \
+ UINT32_C(0x2)
+ /* Tunnel type is already enabled */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ENABLED \
+ UINT32_C(0x3)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_LAST \
+ HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ENABLED
+ /*
+ * This field represents the UPAR usage status.
+ * Available UPARs on wh+ are UPAR0 and UPAR1
+ * Available UPARs on Thor are UPAR0 to UPAR3
+ * Available UPARs on Thor2 are UPAR0 to UPAR7
+ */
+ uint8_t upar_in_use;
+ /* This bit will be '1' when UPAR0 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR0 \
+ UINT32_C(0x1)
+ /* This bit will be '1' when UPAR1 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR1 \
+ UINT32_C(0x2)
+ /* This bit will be '1' when UPAR2 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR2 \
+ UINT32_C(0x4)
+ /* This bit will be '1' when UPAR3 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR3 \
+ UINT32_C(0x8)
+ /* This bit will be '1' when UPAR4 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR4 \
+ UINT32_C(0x10)
+ /* This bit will be '1' when UPAR5 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR5 \
+ UINT32_C(0x20)
+ /* This bit will be '1' when UPAR6 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR6 \
+ UINT32_C(0x40)
+ /* This bit will be '1' when UPAR7 is IN_USE */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR7 \
+ UINT32_C(0x80)
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*******************************
- * hwrm_tfc_tbl_scope_deconfig *
- *******************************/
+/*****************************
+ * hwrm_tunnel_dst_port_free *
+ *****************************/
-/* TruFlow command to deconfigure the table scope memory. */
-/* hwrm_tfc_tbl_scope_deconfig_input (size:192b/24B) */
-struct hwrm_tfc_tbl_scope_deconfig_input {
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56328,14 +62565,91 @@ struct hwrm_tfc_tbl_scope_deconfig_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* The table scope ID. */
- uint8_t tsid;
- /* unused. */
- uint8_t unused0[7];
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /*
+ * Enhance Generic Routing Encapsulation (GRE version 1) inside IP
+ * datagram payload
+ */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Use fixed layer 2 ether type of 0xFFFF */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_L2_ETYPE \
+ UINT32_C(0xb)
+ /*
+ * IPV6 over virtual eXtensible Local Area Network with GPE header
+ * (IPV6oVXLANGPE)
+ */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \
+ UINT32_C(0xc)
+ /*
+ * Custom GRE uses UPAR to parse customized GRE packets. This is not
+ * supported.
+ */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_CUSTOM_GRE \
+ UINT32_C(0xd)
+ /* Enhanced Common Packet Radio Interface (eCPRI) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI \
+ UINT32_C(0xe)
+ /* IPv6 Segment Routing (SRv6) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_SRV6 \
+ UINT32_C(0xf)
+ /* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE \
+ UINT32_C(0x10)
+ /* Generic Routing Encapsulation */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GRE \
+ UINT32_C(0x11)
+ /* ULP Dynamic UPAR tunnel */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR \
+ UINT32_C(0x12)
+ /* ULP Dynamic UPAR tunnel reserved 1 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 \
+ UINT32_C(0x13)
+ /* ULP Dynamic UPAR tunnel reserved 2 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 \
+ UINT32_C(0x14)
+ /* ULP Dynamic UPAR tunnel reserved 3 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 \
+ UINT32_C(0x15)
+ /* ULP Dynamic UPAR tunnel reserved 4 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 \
+ UINT32_C(0x16)
+ /* ULP Dynamic UPAR tunnel reserved 5 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 \
+ UINT32_C(0x17)
+ /* ULP Dynamic UPAR tunnel reserved 6 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 \
+ UINT32_C(0x18)
+ /* ULP Dynamic UPAR tunnel reserved 7 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 \
+ UINT32_C(0x19)
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ /*
+ * This field is used to specify the next protocol value defined in the
+ * corresponding RFC spec for the applicable tunnel type.
+ */
+ uint8_t tunnel_next_proto;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only applies to
+ * tunnel types that has l4 destination port parameters.
+ */
+ uint16_t tunnel_dst_port_id;
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_tfc_tbl_scope_deconfig_output (size:128b/16B) */
-struct hwrm_tfc_tbl_scope_deconfig_output {
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56344,94 +62658,181 @@ struct hwrm_tfc_tbl_scope_deconfig_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
+ /* Error information */
+ uint8_t error_info;
+ /* No error */
+ #define HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_SUCCESS \
+ UINT32_C(0x0)
+ /* Not owner error */
+ #define HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_ERR_NOT_OWNER \
+ UINT32_C(0x1)
+ /* Not allocated error */
+ #define HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_ERR_NOT_ALLOCATED \
+ UINT32_C(0x2)
+ #define HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_LAST \
+ HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_ERR_NOT_ALLOCATED
+ uint8_t unused_1[6];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/******************************
- * hwrm_tfc_tbl_scope_fid_add *
- ******************************/
+/* Periodic statistics context DMA to host. */
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of discarded packets on receive path */
+ uint64_t rx_discard_pkts;
+ /* Number of packets on receive path with error */
+ uint64_t rx_error_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of packets on transmit path with error */
+ uint64_t tx_error_pkts;
+ /* Number of discarded packets on transmit path */
+ uint64_t tx_discard_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of TPA packets */
+ uint64_t tpa_pkts;
+ /* Number of TPA bytes */
+ uint64_t tpa_bytes;
+ /* Number of TPA events */
+ uint64_t tpa_events;
+ /* Number of TPA aborts */
+ uint64_t tpa_aborts;
+} __rte_packed;
+/*
+ * Extended periodic statistics context DMA to host. On cards that
+ * support TPA v2, additional TPA related stats exist and can be retrieved
+ * by DMA of ctx_hw_stats_ext, rather than legacy ctx_hw_stats structure.
+ */
+/* ctx_hw_stats_ext (size:1408b/176B) */
+struct ctx_hw_stats_ext {
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of discarded packets on receive path */
+ uint64_t rx_discard_pkts;
+ /* Number of packets on receive path with error */
+ uint64_t rx_error_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of packets on transmit path with error */
+ uint64_t tx_error_pkts;
+ /* Number of discarded packets on transmit path */
+ uint64_t tx_discard_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of TPA eligible packets */
+ uint64_t rx_tpa_eligible_pkt;
+ /* Number of TPA eligible bytes */
+ uint64_t rx_tpa_eligible_bytes;
+ /* Number of TPA packets */
+ uint64_t rx_tpa_pkt;
+ /* Number of TPA bytes */
+ uint64_t rx_tpa_bytes;
+ /* Number of TPA errors */
+ uint64_t rx_tpa_errors;
+ /* Number of TPA events */
+ uint64_t rx_tpa_events;
+} __rte_packed;
-/* TruFlow command to add a FID to a table scope. */
-/* hwrm_tfc_tbl_scope_fid_add_input (size:192b/24B) */
-struct hwrm_tfc_tbl_scope_fid_add_input {
- /* The HWRM command request type. */
- uint16_t req_type;
+/* Periodic Engine statistics context DMA to host. */
+/* ctx_eng_stats (size:512b/64B) */
+struct ctx_eng_stats {
/*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
+ * Count of data bytes into the Engine.
+ * This includes any user supplied prefix,
+ * but does not include any predefined
+ * prefix data.
*/
- uint16_t cmpl_ring;
+ uint64_t eng_bytes_in;
+ /* Count of data bytes out of the Engine. */
+ uint64_t eng_bytes_out;
/*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
+ * Count, in 4-byte (dword) units, of bytes
+ * that are input as auxiliary data.
+ * This includes the aux_cmd data.
*/
- uint16_t seq_id;
+ uint64_t aux_bytes_in;
/*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
+ * Count, in 4-byte (dword) units, of bytes
+ * that are output as auxiliary data.
+ * This count is the buffer space for aux_data
+ * output provided in the RQE, not the actual
+ * aux_data written
*/
- uint16_t target_id;
+ uint64_t aux_bytes_out;
+ /* Count of number of commands executed. */
+ uint64_t commands;
/*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
+ * Count of number of error commands.
+ * These are the commands with a
+ * non-zero status value.
*/
- uint64_t resp_addr;
- /* The table scope ID. */
- uint8_t tsid;
- /* unused. */
- uint8_t unused0[7];
-} __rte_packed;
-
-/* hwrm_tfc_tbl_scope_fid_add_output (size:128b/16B) */
-struct hwrm_tfc_tbl_scope_fid_add_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* The number of FIDs currently in the table scope ID. */
- uint8_t fid_cnt;
- /* unused. */
- uint8_t unused0[6];
+ uint64_t error_commands;
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * Compression/Encryption Engine usage,
+ * the unit is count of clock cycles
*/
- uint8_t valid;
+ uint64_t cce_engine_usage;
+ /*
+ * De-Compression/De-cryption Engine usage,
+ * the unit is count of clock cycles
+ */
+ uint64_t cdd_engine_usage;
} __rte_packed;
-/******************************
- * hwrm_tfc_tbl_scope_fid_rem *
- ******************************/
+/***********************
+ * hwrm_stat_ctx_alloc *
+ ***********************/
-/* TruFlow command to remove a FID from a table scope. */
-/* hwrm_tfc_tbl_scope_fid_rem_input (size:192b/24B) */
-struct hwrm_tfc_tbl_scope_fid_rem_input {
+/* hwrm_stat_ctx_alloc_input (size:320b/40B) */
+struct hwrm_stat_ctx_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56460,14 +62861,63 @@ struct hwrm_tfc_tbl_scope_fid_rem_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* The table scope ID. */
- uint8_t tsid;
- /* unused. */
- uint8_t unused0[7];
+ /*
+ * This is the address for statistic block.
+ * > For new versions of the chip, this address should be 128B
+ * > aligned.
+ */
+ uint64_t stats_dma_addr;
+ /*
+ * The statistic block update period in ms.
+ * e.g. 250ms, 500ms, 750ms, 1000ms.
+ * If update_period_ms is 0, then the stats update
+ * shall be never done and the DMA address shall not be used.
+ * In this case, the stat block can only be read by
+ * hwrm_stat_ctx_query command.
+ * On Ethernet/L2 based devices:
+ * if tpa v2 supported (hwrm_vnic_qcaps[max_aggs_supported]>0),
+ * ctx_hw_stats_ext is used for DMA,
+ * else
+ * ctx_hw_stats is used for DMA.
+ */
+ uint32_t update_period_ms;
+ /*
+ * This field is used to specify statistics context specific
+ * configuration flags.
+ */
+ uint8_t stat_ctx_flags;
+ /*
+ * When this bit is set to '1', the statistics context shall be
+ * allocated for RoCE traffic only. In this case, traffic other
+ * than offloaded RoCE traffic shall not be included in this
+ * statistic context.
+ * When this bit is set to '0', the statistics context shall be
+ * used for network traffic or engine traffic.
+ */
+ #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
+ uint8_t unused_0;
+ /*
+ * This is the size of the structure (ctx_hw_stats or
+ * ctx_hw_stats_ext) that the driver has allocated to be used
+ * for the periodic DMA updates.
+ */
+ uint16_t stats_dma_length;
+ uint16_t flags;
+ /* This stats context uses the steering tag specified in the command. */
+ #define HWRM_STAT_CTX_ALLOC_INPUT_FLAGS_STEERING_TAG_VALID \
+ UINT32_C(0x1)
+ /*
+ * Steering tag to use for memory transactions from the periodic DMA
+ * updates. 'steering_tag_valid' should be set and 'steering_tag'
+ * should be specified, when the 'steering_tag_supported' bit is set
+ * under the 'flags_ext2' field of the hwrm_func_qcaps_output.
+ */
+ uint16_t steering_tag;
+ uint32_t unused_1;
} __rte_packed;
-/* hwrm_tfc_tbl_scope_fid_rem_output (size:128b/16B) */
-struct hwrm_tfc_tbl_scope_fid_rem_output {
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56476,28 +62926,26 @@ struct hwrm_tfc_tbl_scope_fid_rem_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* The number of FIDs remaining in the table scope ID. */
- uint16_t fid_cnt;
- /* unused. */
- uint8_t unused0[5];
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************************
- * hwrm_tfc_tbl_scope_pool_alloc *
- *********************************/
+/**********************
+ * hwrm_stat_ctx_free *
+ **********************/
-/* hwrm_tfc_tbl_scope_pool_alloc_input (size:192b/24B) */
-struct hwrm_tfc_tbl_scope_pool_alloc_input {
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56526,38 +62974,13 @@ struct hwrm_tfc_tbl_scope_pool_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Table Scope ID */
- uint8_t tsid;
- /* Control flags. Direction and type. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR \
- UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR_TX \
- UINT32_C(0x1)
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR_TX
- /* Indicates the table type. */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE \
- UINT32_C(0x2)
- /* Lookup table */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE_LOOKUP \
- (UINT32_C(0x0) << 1)
- /* Action table */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE_ACTION \
- (UINT32_C(0x1) << 1)
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE_LAST \
- HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE_ACTION
- /* Unused */
- uint8_t unused[6];
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_tfc_tbl_scope_pool_alloc_output (size:128b/16B) */
-struct hwrm_tfc_tbl_scope_pool_alloc_output {
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56566,30 +62989,26 @@ struct hwrm_tfc_tbl_scope_pool_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Pool ID */
- uint16_t pool_id;
- /* Pool size exponent. An exponent of 0 indicates a failure. */
- uint8_t pool_sz_exp;
- /* unused. */
- uint8_t unused1[4];
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/********************************
- * hwrm_tfc_tbl_scope_pool_free *
- ********************************/
+/***********************
+ * hwrm_stat_ctx_query *
+ ***********************/
-/* hwrm_tfc_tbl_scope_pool_free_input (size:192b/24B) */
-struct hwrm_tfc_tbl_scope_pool_free_input {
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56618,40 +63037,20 @@ struct hwrm_tfc_tbl_scope_pool_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Table Scope ID */
- uint8_t tsid;
- /* Control flags. Direction and type. */
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR \
- UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR_TX \
- UINT32_C(0x1)
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR_TX
- /* Indicates the table type. */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE \
- UINT32_C(0x2)
- /* Lookup table */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE_LOOKUP \
- (UINT32_C(0x0) << 1)
- /* Action table */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE_ACTION \
- (UINT32_C(0x1) << 1)
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE_LAST \
- HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE_ACTION
- /* Pool ID */
- uint16_t pool_id;
- /* Unused */
- uint8_t unused[4];
+ /*
+ * This bit is set to 1 when request is for a counter mask,
+ * representing the width of each of the stats counters, rather
+ * than counters themselves.
+ */
+ #define HWRM_STAT_CTX_QUERY_INPUT_FLAGS_COUNTER_MASK UINT32_C(0x1)
+ uint8_t unused_0[3];
} __rte_packed;
-/* hwrm_tfc_tbl_scope_pool_free_output (size:128b/16B) */
-struct hwrm_tfc_tbl_scope_pool_free_output {
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56660,33 +63059,64 @@ struct hwrm_tfc_tbl_scope_pool_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused1[7];
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of packets discarded in transmit path */
+ uint64_t tx_discard_pkts;
+ /* Number of packets in transmit path with error */
+ uint64_t tx_error_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of packets discarded in receive path */
+ uint64_t rx_discard_pkts;
+ /* Number of packets in receive path with errors */
+ uint64_t rx_error_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of aggregated unicast packets */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast bytes */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregation events */
+ uint64_t rx_agg_events;
+ /* Number of aborted aggregations */
+ uint64_t rx_agg_aborts;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*****************************
- * hwrm_tfc_session_id_alloc *
- *****************************/
+/***************************
+ * hwrm_stat_ext_ctx_query *
+ ***************************/
-/*
- * Allocate a TFC session. Requests the firmware to allocate a TFC
- * session identifier and associate a forwarding function with the
- * session. Though there's not an explicit matching free for a session
- * id alloc, dis-associating the last fid from a session id (fid_cnt goes
- * to 0), will result in this session id being freed automatically.
- */
-/* hwrm_tfc_session_id_alloc_input (size:128b/16B) */
-struct hwrm_tfc_session_id_alloc_input {
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56715,10 +63145,21 @@ struct hwrm_tfc_session_id_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* ID of the extended statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t flags;
+ /*
+ * This bit is set to 1 when request is for a counter mask,
+ * representing the width of each of the stats counters, rather
+ * than counters themselves.
+ */
+ #define HWRM_STAT_EXT_CTX_QUERY_INPUT_FLAGS_COUNTER_MASK \
+ UINT32_C(0x1)
+ uint8_t unused_0[3];
} __rte_packed;
-/* hwrm_tfc_session_id_alloc_output (size:128b/16B) */
-struct hwrm_tfc_session_id_alloc_output {
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
+struct hwrm_stat_ext_ctx_query_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56727,35 +63168,68 @@ struct hwrm_tfc_session_id_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Unique session identifier for the session created by the
- * firmware.
- */
- uint16_t sid;
- /* Unused field */
- uint8_t unused0[5];
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of discarded packets on receive path */
+ uint64_t rx_discard_pkts;
+ /* Number of packets on receive path with error */
+ uint64_t rx_error_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of packets on transmit path with error */
+ uint64_t tx_error_pkts;
+ /* Number of discarded packets on transmit path */
+ uint64_t tx_discard_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of TPA eligible packets */
+ uint64_t rx_tpa_eligible_pkt;
+ /* Number of TPA eligible bytes */
+ uint64_t rx_tpa_eligible_bytes;
+ /* Number of TPA packets */
+ uint64_t rx_tpa_pkt;
+ /* Number of TPA bytes */
+ uint64_t rx_tpa_bytes;
+ /* Number of TPA errors */
+ uint64_t rx_tpa_errors;
+ /* Number of TPA events */
+ uint64_t rx_tpa_events;
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/****************************
- * hwrm_tfc_session_fid_add *
- ****************************/
+/***************************
+ * hwrm_stat_ctx_eng_query *
+ ***************************/
-/*
- * Associate a TFC session id with a forwarding function. The target_fid
- * will be associated with the passed in sid.
- */
-/* hwrm_tfc_session_fid_add_input (size:192b/24B) */
-struct hwrm_tfc_session_fid_add_input {
+/* hwrm_stat_ctx_eng_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_eng_query_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56784,17 +63258,13 @@ struct hwrm_tfc_session_fid_add_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * Unique session identifier for the session created by the
- * firmware.
- */
- uint16_t sid;
- /* Unused field */
- uint8_t unused0[6];
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_tfc_session_fid_add_output (size:128b/16B) */
-struct hwrm_tfc_session_fid_add_output {
+/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */
+struct hwrm_stat_ctx_eng_query_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56803,35 +63273,65 @@ struct hwrm_tfc_session_fid_add_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* The number of FIDs that share this session. */
- uint16_t fid_cnt;
- /* Unused field */
- uint8_t unused0[5];
+ /*
+ * Count of data bytes into the Engine.
+ * This includes any user supplied prefix,
+ * but does not include any predefined
+ * prefix data.
+ */
+ uint64_t eng_bytes_in;
+ /* Count of data bytes out of the Engine. */
+ uint64_t eng_bytes_out;
+ /*
+ * Count, in 4-byte (dword) units, of bytes
+ * that are input as auxiliary data.
+ * This includes the aux_cmd data.
+ */
+ uint64_t aux_bytes_in;
+ /*
+ * Count, in 4-byte (dword) units, of bytes
+ * that are output as auxiliary data.
+ * This count is the buffer space for aux_data
+ * output provided in the RQE, not the actual
+ * aux_data written
+ */
+ uint64_t aux_bytes_out;
+ /* Count of number of commands executed. */
+ uint64_t commands;
+ /*
+ * Count of number of error commands.
+ * These are the commands with a
+ * non-zero status value.
+ */
+ uint64_t error_commands;
+ /*
+ * Compression/Encryption Engine usage,
+ * the unit is count of clock cycles
+ */
+ uint64_t cce_engine_usage;
+ /*
+ * De-Compression/De-cryption Engine usage,
+ * the unit is count of clock cycles
+ */
+ uint64_t cdd_engine_usage;
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/****************************
- * hwrm_tfc_session_fid_rem *
- ****************************/
+/***************************
+ * hwrm_stat_ctx_clr_stats *
+ ***************************/
-/*
- * Dis-associate a TFC session from the target_fid.
- * Though there's not an explicit matching free for a
- * session id alloc, dis-associating the last fid from a session id
- * (fid_cnt goes to 0), will result in this session id being freed
- * automatically.
- */
-/* hwrm_tfc_session_fid_rem_input (size:192b/24B) */
-struct hwrm_tfc_session_fid_rem_input {
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56860,17 +63360,13 @@ struct hwrm_tfc_session_fid_rem_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * Unique session identifier for the session created by the
- * firmware.
- */
- uint16_t sid;
- /* Unused field */
- uint8_t unused0[6];
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_tfc_session_fid_rem_output (size:128b/16B) */
-struct hwrm_tfc_session_fid_rem_output {
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56879,35 +63375,24 @@ struct hwrm_tfc_session_fid_rem_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* The number of FIDs that share this session. */
- uint16_t fid_cnt;
- /* Unused field */
- uint8_t unused0[5];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/************************
- * hwrm_tfc_ident_alloc *
- ************************/
+/********************
+ * hwrm_pcie_qstats *
+ ********************/
-/*
- * Allocate a TFC identifier. Requests the firmware to
- * allocate a TFC identifier. The session id and track_type are passed
- * in. The tracking_id is either the sid or target_fid depends on the
- * track_type. The resource subtype is passed in, an id corresponding
- * to all these is allocated and returned in the HWRM response.
- */
-/* hwrm_tfc_ident_alloc_input (size:192b/24B) */
-struct hwrm_tfc_ident_alloc_input {
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -56937,44 +63422,21 @@ struct hwrm_tfc_ident_alloc_input {
*/
uint64_t resp_addr;
/*
- * Unique session identifier for the session created by the
- * firmware. Will be used to track this identifier.
+ * The size of PCIe statistics block in bytes.
+ * Firmware will DMA the PCIe statistics to
+ * the host with this field size in the response.
*/
- uint16_t sid;
- /* Control flags. Direction. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_TX
+ uint16_t pcie_stat_size;
+ uint8_t unused_0[6];
/*
- * CFA resource subtype. For definitions, please see
- * cfa_v3/include/cfa_resources.h.
+ * This is the host address where
+ * PCIe statistics will be stored
*/
- uint8_t subtype;
- /* Describes the type of tracking tag to be used */
- uint8_t track_type;
- /* Invalid track type */
- #define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
- UINT32_C(0x0)
- /* Tracked by session id */
- #define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
- UINT32_C(0x1)
- /* Tracked by function id */
- #define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
- UINT32_C(0x2)
- #define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_LAST \
- HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID
- /* Unused field */
- uint8_t unused0[3];
+ uint64_t pcie_stat_host_addr;
} __rte_packed;
-/* hwrm_tfc_ident_alloc_output (size:128b/16B) */
-struct hwrm_tfc_ident_alloc_output {
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -56983,37 +63445,62 @@ struct hwrm_tfc_ident_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Resource identifier allocated by the firmware using
- * parameters above.
- */
- uint16_t ident_id;
- /* Unused field */
- uint8_t unused0[5];
+ /* The size of PCIe statistics block in bytes. */
+ uint16_t pcie_stat_size;
+ uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***********************
- * hwrm_tfc_ident_free *
- ***********************/
+/* PCIe Statistics Formats */
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ /* Number of physical layer receiver errors */
+ uint64_t pcie_pl_signal_integrity;
+ /* Number of DLLP CRC errors detected by Data Link Layer */
+ uint64_t pcie_dl_signal_integrity;
+ /*
+ * Number of TLP LCRC and sequence number errors detected
+ * by Data Link Layer
+ */
+ uint64_t pcie_tl_signal_integrity;
+ /* Number of times LTSSM entered Recovery state */
+ uint64_t pcie_link_integrity;
+ /* Report number of TLP bits that have been transmitted in Mbps */
+ uint64_t pcie_tx_traffic_rate;
+ /* Report number of TLP bits that have been received in Mbps */
+ uint64_t pcie_rx_traffic_rate;
+ /* Number of DLLP bytes that have been transmitted */
+ uint64_t pcie_tx_dllp_statistics;
+ /* Number of DLLP bytes that have been received */
+ uint64_t pcie_rx_dllp_statistics;
+ /*
+ * Number of times spent in each phase of gen3
+ * equalization
+ */
+ uint64_t pcie_equalization_time;
+ /* Records the last 16 transitions of the LTSSM */
+ uint32_t pcie_ltssm_histogram[4];
+ /*
+ * Record the last 8 reasons on why LTSSM transitioned
+ * to Recovery
+ */
+ uint64_t pcie_recovery_histogram;
+} __rte_packed;
+/****************************
+ * hwrm_stat_generic_qstats *
+ ****************************/
-/*
- * Requests the firmware to free a TFC resource identifier.
- * A resource subtype and session id are passed in.
- * An identifier (previously allocated) corresponding to all these is
- * freed, only after various sanity checks are completed.
- */
-/* hwrm_tfc_ident_free_input (size:192b/24B) */
-struct hwrm_tfc_ident_free_input {
+
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57043,33 +63530,31 @@ struct hwrm_tfc_ident_free_input {
*/
uint64_t resp_addr;
/*
- * Unique session identifier for the session created by the
- * firmware. Will be used to validate this request.
+ * The size of the generic statistics buffer passed in the
+ * generic_stat_host_addr in bytes.
+ * Firmware will not exceed this size when it DMAs the
+ * statistics structure to the host. The actual DMA size
+ * will be returned in the response.
*/
- uint16_t sid;
+ uint16_t generic_stat_size;
+ uint8_t flags;
/*
- * CFA resource subtype. For definitions, please see
- * cfa_v3/include/cfa_resources.h.
+ * The bit should be set to 1 when request is for the counter mask
+ * representing the width of each of the stats counters, rather
+ * than counters themselves.
*/
- uint8_t subtype;
- /* Control flags. Direction. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_TX
- /* The resource identifier to be freed */
- uint16_t ident_id;
- /* Reserved */
- uint8_t unused0[2];
+ #define HWRM_STAT_GENERIC_QSTATS_INPUT_FLAGS_COUNTER_MASK \
+ UINT32_C(0x1)
+ uint8_t unused_0[5];
+ /*
+ * This is the host address where
+ * generic statistics will be stored
+ */
+ uint64_t generic_stat_host_addr;
} __rte_packed;
-/* hwrm_tfc_ident_free_output (size:128b/16B) */
-struct hwrm_tfc_ident_free_output {
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57078,8 +63563,9 @@ struct hwrm_tfc_ident_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Reserved */
- uint8_t unused0[7];
+ /* The size of Generic Statistics block in bytes. */
+ uint16_t generic_stat_size;
+ uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -57091,13 +63577,115 @@ struct hwrm_tfc_ident_free_output {
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_tfc_idx_tbl_alloc *
- **************************/
+/* Generic Statistic Format */
+/* generic_sw_hw_stats (size:1472b/184B) */
+struct generic_sw_hw_stats {
+ /*
+ * This is the number of TLP bytes that have been transmitted for
+ * the caller PF.
+ */
+ uint64_t pcie_statistics_tx_tlp;
+ /*
+ * This is the number of TLP bytes that have been received
+ * for the caller PF.
+ */
+ uint64_t pcie_statistics_rx_tlp;
+ /* Posted Header Flow Control credits available for the caller PF. */
+ uint64_t pcie_credit_fc_hdr_posted;
+ /* Non-posted Header Flow Control credits available for the caller PF. */
+ uint64_t pcie_credit_fc_hdr_nonposted;
+ /* Completion Header Flow Control credits available for the caller PF. */
+ uint64_t pcie_credit_fc_hdr_cmpl;
+ /* Posted Data Flow Control credits available for the caller PF. */
+ uint64_t pcie_credit_fc_data_posted;
+ /* Non-Posted Data Flow Control credits available for the caller PF. */
+ uint64_t pcie_credit_fc_data_nonposted;
+ /* Completion Data Flow Control credits available for the caller PF. */
+ uint64_t pcie_credit_fc_data_cmpl;
+ /*
+ * Available Non-posted credit for target flow control reads or
+ * config for the caller PF.
+ */
+ uint64_t pcie_credit_fc_tgt_nonposted;
+ /*
+ * Available posted data credit for target flow control writes
+ * for the caller PF.
+ */
+ uint64_t pcie_credit_fc_tgt_data_posted;
+ /*
+ * Available posted header credit for target flow control writes
+ * for the caller PF.
+ */
+ uint64_t pcie_credit_fc_tgt_hdr_posted;
+ /* Available completion flow control header credits for the caller PF. */
+ uint64_t pcie_credit_fc_cmpl_hdr_posted;
+ /* Available completion flow control data credits. */
+ uint64_t pcie_credit_fc_cmpl_data_posted;
+ /*
+ * Displays Time information of the longest completion time from any of
+ * the 4 tags for the caller PF. The unit of time recorded is in
+ * microseconds.
+ */
+ uint64_t pcie_cmpl_longest;
+ /*
+ * Displays Time information of the shortest completion time from any
+ * of the 4 tags for the caller PF. The unit of time recorded is in
+ * microseconds.
+ */
+ uint64_t pcie_cmpl_shortest;
+ /*
+ * This field contains the total number of CFCQ 'misses' observed for
+ * all the PF's.
+ */
+ uint64_t cache_miss_count_cfcq;
+ /*
+ * This field contains the total number of CFCS 'misses' observed for
+ * all the PF's.
+ */
+ uint64_t cache_miss_count_cfcs;
+ /*
+ * This field contains the total number of CFCC 'misses' observed for
+ * all the PF's.
+ */
+ uint64_t cache_miss_count_cfcc;
+ /*
+ * This field contains the total number of CFCM 'misses' observed
+ * for all the PF's.
+ */
+ uint64_t cache_miss_count_cfcm;
+ /*
+ * Total number of Doorbell messages dropped from the DB FIFO.
+ * This counter is only applicable for devices that support
+ * the hardware based doorbell drop recovery feature.
+ */
+ uint64_t hw_db_recov_dbs_dropped;
+ /*
+ * Total number of doorbell drops serviced.
+ * This counter is only applicable for devices that support
+ * the hardware based doorbell drop recovery feature.
+ */
+ uint64_t hw_db_recov_drops_serviced;
+ /*
+ * Total number of dropped doorbells recovered.
+ * This counter is only applicable for devices that support
+ * the hardware based doorbell drop recovery feature.
+ */
+ uint64_t hw_db_recov_dbs_recovered;
+ /*
+ * Total number of out of order doorbell messages dropped.
+ * This counter is only applicable for devices that support
+ * the hardware based doorbell drop recovery feature.
+ */
+ uint64_t hw_db_recov_oo_drop_count;
+} __rte_packed;
+
+/*****************************
+ * hwrm_stat_db_error_qstats *
+ *****************************/
-/* hwrm_tfc_idx_tbl_alloc_input (size:192b/24B) */
-struct hwrm_tfc_idx_tbl_alloc_input {
+/* hwrm_stat_db_error_qstats_input (size:128b/16B) */
+struct hwrm_stat_db_error_qstats_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57125,47 +63713,11 @@ struct hwrm_tfc_idx_tbl_alloc_input {
* physical address (HPA) or a guest physical address (GPA) and must
* point to a physically contiguous block of memory.
*/
- uint64_t resp_addr;
- /*
- * Unique session id for the session created by the
- * firmware. Will be used to track this index table entry
- * only if track type is track_type_sid.
- */
- uint16_t sid;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_TX
- /*
- * CFA resource subtype. For definitions, please see
- * cfa_v3/include/cfa_resources.h.
- */
- uint8_t subtype;
- /* Describes the type of tracking id to be used */
- uint8_t track_type;
- /* Invalid track type */
- #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
- UINT32_C(0x0)
- /* Tracked by session id */
- #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
- UINT32_C(0x1)
- /* Tracked by function id */
- #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
- UINT32_C(0x2)
- #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_LAST \
- HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID
- /* Reserved */
- uint8_t unused0[3];
+ uint64_t resp_addr;
} __rte_packed;
-/* hwrm_tfc_idx_tbl_alloc_output (size:128b/16B) */
-struct hwrm_tfc_idx_tbl_alloc_output {
+/* hwrm_stat_db_error_qstats_output (size:320b/40B) */
+struct hwrm_stat_db_error_qstats_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57175,30 +63727,56 @@ struct hwrm_tfc_idx_tbl_alloc_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * Index table entry allocated by the firmware using the
- * parameters above.
+ * Specifies count of doorbells dropped due to RoCE SQs or L2
+ * Tx Rings being in invalid state.
*/
- uint16_t idx_tbl_id;
- /* Reserved */
- uint8_t unused0[5];
+ uint32_t tx_db_drop_invalid_qp_state;
+ /*
+ * Specifies count of doorbells dropped due to RoCE RQs/SRQs or
+ * L2 Rx Rings being used in invalid state.
+ */
+ uint32_t rx_db_drop_invalid_rq_state;
+ /*
+ * Specifies count of doorbells dropped for any doorbell type
+ * due to formatting errors such as illegal doorbell message
+ * type, index out of range etc.
+ */
+ uint32_t tx_db_drop_format_error;
+ /*
+ * Specifies count of express mode doorbells dropped for any
+ * doorbell type due to error conditions such as DPI check,
+ * context load error etc.
+ */
+ uint32_t express_db_dropped_misc_error;
+ /*
+ * Specifies count of express mode doorbells dropped due to
+ * RoCE SQ overflow.
+ */
+ uint32_t express_db_dropped_sq_overflow;
+ /*
+ * Specifies count of express mode doorbells dropped due to
+ * RoCE RQ overflow.
+ */
+ uint32_t express_db_dropped_rq_overflow;
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * processor, the order of writes has to be such that this field is
+ * written last.
*/
uint8_t valid;
} __rte_packed;
-/******************************
- * hwrm_tfc_idx_tbl_alloc_set *
- ******************************/
+/**********************
+ * hwrm_exec_fwd_resp *
+ **********************/
-/* hwrm_tfc_idx_tbl_alloc_set_input (size:1088b/136B) */
-struct hwrm_tfc_idx_tbl_alloc_set_input {
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57228,59 +63806,25 @@ struct hwrm_tfc_idx_tbl_alloc_set_input {
*/
uint64_t resp_addr;
/*
- * Unique session id for the session created by the
- * firmware. Will be used to track this index table entry
- * only if track type is track_type_sid.
- */
- uint16_t sid;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_TX
- /*
- * Indicate device data is being sent via DMA, the device
- * data packing does not change.
- */
- #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
- /*
- * CFA resource subtype. For definitions, please see
- * cfa_v3/include/cfa_resources.h.
+ * This is an encapsulated request. This request should
+ * be executed by the HWRM and the response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
*/
- uint8_t subtype;
- /* Describes the type of tracking id to be used */
- uint8_t track_type;
- /* Invalid track type */
- #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
- UINT32_C(0x0)
- /* Tracked by session id */
- #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
- UINT32_C(0x1)
- /* Tracked by function id */
- #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
- UINT32_C(0x2)
- #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_LAST \
- HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID
- /* Reserved */
- uint8_t unused0;
- /* The size of the index table entry in bytes. */
- uint16_t data_size;
- /* The location of the dma buffer */
- uint64_t dma_addr;
+ uint32_t encap_request[26];
/*
- * Index table data located at offset 0. If dma bit is set,
- * then this field contains the DMA buffer pointer.
+ * This value indicates the target id of the response to
+ * the encapsulated request.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
*/
- uint8_t dev_data[104];
+ uint16_t encap_resp_target_id;
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_tfc_idx_tbl_alloc_set_output (size:128b/16B) */
-struct hwrm_tfc_idx_tbl_alloc_set_output {
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57289,31 +63833,24 @@ struct hwrm_tfc_idx_tbl_alloc_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Index table entry allocated by the firmware using the
- * parameters above.
- */
- uint16_t idx_tbl_id;
- /* Reserved */
- uint8_t unused0[5];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
/************************
- * hwrm_tfc_idx_tbl_set *
+ * hwrm_reject_fwd_resp *
************************/
-/* hwrm_tfc_idx_tbl_set_input (size:1088b/136B) */
-struct hwrm_tfc_idx_tbl_set_input {
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57342,49 +63879,26 @@ struct hwrm_tfc_idx_tbl_set_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_TX
- /*
- * Indicate device data is being sent via DMA, the device
- * data packing does not change.
- */
- #define HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
- /*
- * CFA resource subtype. For definitions, please see
- * cfa_v3/include/cfa_resources.h.
- */
- uint8_t subtype;
- /*
- * Session id associated with the firmware. Will be used
- * for validation if the track type matches.
- */
- uint16_t sid;
/*
- * Index table index returned during alloc by the
- * firmware.
+ * This is an encapsulated request. This request should
+ * be rejected by the HWRM and the error response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
*/
- uint16_t idx_tbl_id;
- /* The size of the index table entry in bytes. */
- uint16_t data_size;
- /* The location of the dma buffer */
- uint64_t dma_addr;
+ uint32_t encap_request[26];
/*
- * Index table data located at offset 0. If dma bit is set,
- * then this field contains the DMA buffer pointer.
+ * This value indicates the target id of the response to
+ * the encapsulated request.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
*/
- uint8_t dev_data[104];
+ uint16_t encap_resp_target_id;
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_tfc_idx_tbl_set_output (size:128b/16B) */
-struct hwrm_tfc_idx_tbl_set_output {
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57393,26 +63907,24 @@ struct hwrm_tfc_idx_tbl_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/************************
- * hwrm_tfc_idx_tbl_get *
- ************************/
+/*****************
+ * hwrm_fwd_resp *
+ *****************/
-/* hwrm_tfc_idx_tbl_get_input (size:256b/32B) */
-struct hwrm_tfc_idx_tbl_get_input {
+/* hwrm_fwd_resp_input (size:1024b/128B) */
+struct hwrm_fwd_resp_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57441,48 +63953,41 @@ struct hwrm_tfc_idx_tbl_get_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR \
- UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_RX \
- UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_TX \
- UINT32_C(0x1)
- #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_TX
- /*
- * When set use the special access register access to clear
- * the table entry on read.
- */
- #define HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_CLEAR_ON_READ \
- UINT32_C(0x2)
/*
- * CFA resource subtype. For definitions, please see
- * cfa_v3/include/cfa_resources.h.
+ * This value indicates the target id of the encapsulated
+ * response.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
*/
- uint8_t subtype;
+ uint16_t encap_resp_target_id;
/*
- * Session id associated with the firmware. Will be used
- * for validation if the track type matches.
+ * This value indicates the completion ring the encapsulated
+ * response will be optionally completed on. If the value is
+ * -1, then no CR completion shall be generated for the
+ * encapsulated response. Any other value must be a
+ * valid CR ring_id value. If a valid encap_resp_cmpl_ring
+ * is provided, then a CR completion shall be generated for
+ * the encapsulated response.
*/
- uint16_t sid;
+ uint16_t encap_resp_cmpl_ring;
+ /* This field indicates the length of encapsulated response. */
+ uint16_t encap_resp_len;
+ uint8_t unused_0;
+ uint8_t unused_1;
/*
- * Index table index returned during alloc by the
- * firmware.
+ * This is the host address where the encapsulated response
+ * will be written.
+ * This area must be 16B aligned and must be cleared to zero
+ * before the original request is made.
*/
- uint16_t idx_tbl_id;
- /* The size of the index table entry buffer in bytes. */
- uint16_t buffer_size;
- /* The location of the response dma buffer */
- uint64_t dma_addr;
+ uint64_t encap_resp_addr;
+ /* This is an encapsulated response. */
+ uint32_t encap_resp[24];
} __rte_packed;
-/* hwrm_tfc_idx_tbl_get_output (size:128b/16B) */
-struct hwrm_tfc_idx_tbl_get_output {
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57491,28 +63996,24 @@ struct hwrm_tfc_idx_tbl_get_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* The size of the index table buffer returned in device size bytes. */
- uint16_t data_size;
- /* unused */
- uint8_t unused1[5];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*************************
- * hwrm_tfc_idx_tbl_free *
- *************************/
+/*****************************
+ * hwrm_fwd_async_event_cmpl *
+ *****************************/
-/* hwrm_tfc_idx_tbl_free_input (size:192b/24B) */
-struct hwrm_tfc_idx_tbl_free_input {
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57541,34 +64042,22 @@ struct hwrm_tfc_idx_tbl_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_TX
- /*
- * CFA resource subtype. For definitions, please see
- * cfa_v3/include/cfa_resources.h.
- */
- uint8_t subtype;
/*
- * Session id associated with the firmware. Will be used
- * for validation if the track type matches.
+ * This value indicates the target id of the encapsulated
+ * asynchronous event.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - Broadcast to all children VFs (only applicable when
+ * a PF is the requester)
*/
- uint16_t sid;
- /* Index table id to be freed by the firmware. */
- uint16_t idx_tbl_id;
- /* Reserved */
- uint8_t unused0[2];
+ uint16_t encap_async_event_target_id;
+ uint8_t unused_0[6];
+ /* This is an encapsulated asynchronous event completion. */
+ uint32_t encap_async_event_cmpl[4];
} __rte_packed;
-/* hwrm_tfc_idx_tbl_free_output (size:128b/16B) */
-struct hwrm_tfc_idx_tbl_free_output {
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57577,52 +64066,24 @@ struct hwrm_tfc_idx_tbl_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Reserved */
- uint8_t unused0[7];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/* TruFlow resources request for a global id. */
-/* tfc_global_id_hwrm_req (size:64b/8B) */
-struct tfc_global_id_hwrm_req {
- /* Type of the resource, defined in enum cfa_resource_type HCAPI RM. */
- uint16_t rtype;
- /* Indicates the flow direction in type of cfa_dir. */
- uint16_t dir;
- /* Subtype of the resource type. */
- uint16_t subtype;
- /* Number of the type of resources. */
- uint16_t cnt;
-} __rte_packed;
-
-/* The reserved resources for the global id. */
-/* tfc_global_id_hwrm_rsp (size:64b/8B) */
-struct tfc_global_id_hwrm_rsp {
- /* Type of the resource, defined in enum cfa_resource_type HCAPI RM. */
- uint16_t rtype;
- /* Indicates the flow direction in type of cfa_dir. */
- uint16_t dir;
- /* Subtype of the resource type. */
- uint16_t subtype;
- /* The global id that the resources reserved for. */
- uint16_t id;
-} __rte_packed;
-
-/****************************
- * hwrm_tfc_global_id_alloc *
- ****************************/
+/**************************
+ * hwrm_nvm_raw_write_blk *
+ **************************/
-/* hwrm_tfc_global_id_alloc_input (size:320b/40B) */
-struct hwrm_tfc_global_id_alloc_input {
+/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */
+struct hwrm_nvm_raw_write_blk_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57651,36 +64112,23 @@ struct hwrm_tfc_global_id_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint16_t sid;
- /* Global domain id. */
- uint16_t global_id;
- /*
- * Defines the array size of the provided req_addr and
- * resv_addr array buffers. Should be set to the number of
- * request entries.
- */
- uint16_t req_cnt;
- /* unused. */
- uint8_t unused0[2];
/*
- * This is the DMA address for the request input data array
- * buffer. Array is of tfc_global_id_hwrm_req type. Size of the
- * array buffer is provided by the 'req_cnt' field in this
- * message.
+ * 64-bit Host Source Address.
+ * This is the location of the source data to be written.
*/
- uint64_t req_addr;
+ uint64_t host_src_addr;
/*
- * This is the DMA address for the resc output data array
- * buffer. Array is of tfc_global_id_hwrm_rsp type. Size of the array
- * buffer is provided by the 'req_cnt' field in this
- * message.
+ * 32-bit Destination Address.
+ * This is the NVRAM byte-offset where the source data will be written
+ * to.
*/
- uint64_t resc_addr;
+ uint32_t dest_addr;
+ /* Length of data to be written, in bytes. */
+ uint32_t len;
} __rte_packed;
-/* hwrm_tfc_global_id_alloc_output (size:128b/16B) */
-struct hwrm_tfc_global_id_alloc_output {
+/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */
+struct hwrm_nvm_raw_write_blk_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57689,35 +64137,24 @@ struct hwrm_tfc_global_id_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Size of the returned hwrm_tfc_global_id_req data array. The value
- * cannot exceed the req_cnt defined by the input msg. The data
- * array is returned using the resv_addr specified DMA
- * address also provided by the input msg.
- */
- uint16_t rsp_cnt;
- /* Non-zero if this is the first allocation for the global ID. */
- uint8_t first;
- /* unused. */
- uint8_t unused0[4];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_tfc_tcam_set *
- *********************/
+/*****************
+ * hwrm_nvm_read *
+ *****************/
-/* hwrm_tfc_tcam_set_input (size:1088b/136B) */
-struct hwrm_tfc_tcam_set_input {
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57747,46 +64184,22 @@ struct hwrm_tfc_tcam_set_input {
*/
uint64_t resp_addr;
/*
- * Session id associated with the firmware. Will be used
- * for validation if the track type matches.
- */
- uint16_t sid;
- /* Logical TCAM ID. */
- uint16_t tcam_id;
- /* Number of bytes in the TCAM key. */
- uint16_t key_size;
- /* Number of bytes in the TCAM result. */
- uint16_t result_size;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_TX
- /* Indicate device data is being sent via DMA. */
- #define HWRM_TFC_TCAM_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
- /*
- * Subtype of TCAM resource. See
- * cfa_v3/include/cfa_resources.h.
- */
- uint8_t subtype;
- /* unused. */
- uint8_t unused0[6];
- /* The location of the response dma buffer */
- uint64_t dma_addr;
- /*
- * TCAM key located at offset 0, mask located at mask_offset
- * and result at result_offset for the device.
+ * 64-bit Host Destination Address.
+ * This is the host address where the data will be written to.
*/
- uint8_t dev_data[96];
+ uint64_t host_dest_addr;
+ /* The 0-based index of the directory entry. */
+ uint16_t dir_idx;
+ uint8_t unused_0[2];
+ /* The NVRAM byte-offset to read from. */
+ uint32_t offset;
+ /* The length of the data to be read, in bytes. */
+ uint32_t len;
+ uint8_t unused_1[4];
} __rte_packed;
-/* hwrm_tfc_tcam_set_output (size:128b/16B) */
-struct hwrm_tfc_tcam_set_output {
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57795,26 +64208,24 @@ struct hwrm_tfc_tcam_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
/*********************
- * hwrm_tfc_tcam_get *
+ * hwrm_nvm_raw_dump *
*********************/
-/* hwrm_tfc_tcam_get_input (size:192b/24B) */
-struct hwrm_tfc_tcam_get_input {
+/* hwrm_nvm_raw_dump_input (size:256b/32B) */
+struct hwrm_nvm_raw_dump_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57834,43 +64245,28 @@ struct hwrm_tfc_tcam_get_input {
* * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
* * 0xFFFD - Reserved for user-space HWRM interface
* * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_TX
+ */
+ uint16_t target_id;
/*
- * Subtype of TCAM resource See
- * cfa_v3/include/cfa_resources.h.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint8_t subtype;
+ uint64_t resp_addr;
/*
- * Session id associated with the firmware. Will be used
- * for validation if the track type matches.
+ * 64-bit Host Destination Address.
+ * This is the host address where the data will be written to.
*/
- uint16_t sid;
- /* Logical TCAM ID. */
- uint16_t tcam_id;
- /* unused. */
- uint8_t unused0[2];
+ uint64_t host_dest_addr;
+ /* 32-bit NVRAM byte-offset to read from. */
+ uint32_t offset;
+ /* Total length of NVRAM contents to be read, in bytes. */
+ uint32_t len;
} __rte_packed;
-/* hwrm_tfc_tcam_get_output (size:2368b/296B) */
-struct hwrm_tfc_tcam_get_output {
+/* hwrm_nvm_raw_dump_output (size:128b/16B) */
+struct hwrm_nvm_raw_dump_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57879,37 +64275,24 @@ struct hwrm_tfc_tcam_get_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Number of bytes in the TCAM key. */
- uint16_t key_size;
- /* Number of bytes in the TCAM result. */
- uint16_t result_size;
- /* unused. */
- uint8_t unused0[4];
- /*
- * TCAM key located at offset 0, mask located at key_size
- * and result at 2 * key_size for the device.
- */
- uint8_t dev_data[272];
- /* unused. */
- uint8_t unused1[7];
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***********************
- * hwrm_tfc_tcam_alloc *
- ***********************/
+/****************************
+ * hwrm_nvm_get_dir_entries *
+ ****************************/
-/* hwrm_tfc_tcam_alloc_input (size:256b/32B) */
-struct hwrm_tfc_tcam_alloc_input {
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -57938,50 +64321,15 @@ struct hwrm_tfc_tcam_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_TX
/*
- * Subtype of TCAM resource. See
- * cfa_v3/include/cfa_resources.h.
- */
- uint8_t subtype;
- /*
- * Unique session id for the session created by the
- * firmware. Will be used to track this index table entry
- * only if track type is track_type_sid.
+ * 64-bit Host Destination Address.
+ * This is the host address where the directory will be written.
*/
- uint16_t sid;
- /* Number of bytes in the TCAM key. */
- uint16_t key_size;
- /* Entry priority. */
- uint16_t priority;
- /* Describes the type of tracking id to be used */
- uint8_t track_type;
- /* Invalid track type */
- #define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
- UINT32_C(0x0)
- /* Tracked by session id */
- #define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
- UINT32_C(0x1)
- /* Tracked by function id */
- #define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
- UINT32_C(0x2)
- #define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_LAST \
- HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID
- /* Unused. */
- uint8_t unused0[7];
+ uint64_t host_dest_addr;
} __rte_packed;
-/* hwrm_tfc_tcam_alloc_output (size:128b/16B) */
-struct hwrm_tfc_tcam_alloc_output {
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -57990,31 +64338,24 @@ struct hwrm_tfc_tcam_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Index table entry allocated by the firmware using the
- * parameters above.
- */
- uint16_t idx;
- /* Reserved */
- uint8_t unused0[5];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_tfc_tcam_alloc_set *
- ***************************/
+/*************************
+ * hwrm_nvm_get_dir_info *
+ *************************/
-/* hwrm_tfc_tcam_alloc_set_input (size:1088b/136B) */
-struct hwrm_tfc_tcam_alloc_set_input {
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -58043,61 +64384,10 @@ struct hwrm_tfc_tcam_alloc_set_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_TX
- /* Indicate device data is being sent via DMA. */
- #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
- /*
- * Subtype of TCAM resource. See
- * cfa_v3/include/cfa_resources.h.
- */
- uint8_t subtype;
- /*
- * Unique session id for the session created by the
- * firmware. Will be used to track this index table entry
- * only if track type is track_type_sid.
- */
- uint16_t sid;
- /* Number of bytes in the TCAM key. */
- uint16_t key_size;
- /* The size of the TCAM table entry in bytes. */
- uint16_t result_size;
- /* Entry priority. */
- uint16_t priority;
- /* Describes the type of tracking id to be used */
- uint8_t track_type;
- /* Invalid track type */
- #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID \
- UINT32_C(0x0)
- /* Tracked by session id */
- #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_SID \
- UINT32_C(0x1)
- /* Tracked by function id */
- #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID \
- UINT32_C(0x2)
- #define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_LAST \
- HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID
- /* Unused */
- uint8_t unused[5];
- /* The location of the response dma buffer */
- uint64_t dma_addr;
- /*
- * Index table data located at offset 0. If dma bit is set,
- * then this field contains the DMA buffer pointer.
- */
- uint8_t dev_data[96];
} __rte_packed;
-/* hwrm_tfc_tcam_alloc_set_output (size:128b/16B) */
-struct hwrm_tfc_tcam_alloc_set_output {
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -58106,28 +64396,28 @@ struct hwrm_tfc_tcam_alloc_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Logical TCAM ID. */
- uint16_t tcam_id;
- /* Reserved */
- uint8_t unused0[5];
+ /* Number of directory entries in the directory. */
+ uint32_t entries;
+ /* Size of each directory entry, in bytes. */
+ uint32_t entry_length;
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/**********************
- * hwrm_tfc_tcam_free *
- **********************/
+/******************
+ * hwrm_nvm_write *
+ ******************/
-/* hwrm_tfc_tcam_free_input (size:192b/24B) */
-struct hwrm_tfc_tcam_free_input {
+/* hwrm_nvm_write_input (size:448b/56B) */
+struct hwrm_nvm_write_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -58156,34 +64446,95 @@ struct hwrm_tfc_tcam_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_LAST \
- HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_TX
/*
- * Subtype of TCAM resource. See
- * cfa_v3/include/cfa_resources.h.
+ * 64-bit Host Source Address.
+ * This is where the source data is.
*/
- uint8_t subtype;
+ uint64_t host_src_addr;
/*
- * Session id associated with the firmware. Will be used
- * for validation if the track type matches.
+ * The Directory Entry Type (valid values are defined in the
+ * bnxnvm_directory_type enum defined in the file bnxnvm_defs.h).
*/
- uint16_t sid;
- /* Logical TCAM ID. */
- uint16_t tcam_id;
- /* Reserved */
- uint8_t unused0[2];
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The 0-based instance of the combined Directory Entry Type and
+ * Extension.
+ */
+ uint16_t dir_ordinal;
+ /*
+ * The Directory Entry Extension flags (see BNX_DIR_EXT_* in the file
+ * bnxnvm_defs.h).
+ */
+ uint16_t dir_ext;
+ /*
+ * Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the file
+ * bnxnvm_defs.h).
+ */
+ uint16_t dir_attr;
+ /*
+ * Length of data to write, in bytes. May be less than or equal to the
+ * allocated size for the directory entry.
+ * The data length stored in the directory entry will be updated to
+ * reflect this value once the write is complete.
+ */
+ uint32_t dir_data_length;
+ /* Option. */
+ uint16_t option;
+ uint16_t flags;
+ /*
+ * When this bit is '1', the original active image
+ * will not be removed. TBD: what purpose is this?
+ */
+ #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG \
+ UINT32_C(0x1)
+ /*
+ * This flag indicates the sender wants to modify a continuous
+ * NVRAM area using a batch of this HWRM requests. The
+ * offset of a request must be continuous to the end of previous
+ * request's. Firmware does not update the directory entry until
+ * receiving the last request, which is indicated by the batch_last
+ * flag. This flag is set usually when a sender does not have a
+ * block of memory that is big enough to hold the entire NVRAM
+ * data for send at one time.
+ */
+ #define HWRM_NVM_WRITE_INPUT_FLAGS_BATCH_MODE \
+ UINT32_C(0x2)
+ /*
+ * This flag can be used only when the batch_mode flag is set. It
+ * indicates this request is the last of batch requests.
+ */
+ #define HWRM_NVM_WRITE_INPUT_FLAGS_BATCH_LAST \
+ UINT32_C(0x4)
+ /*
+ * The requested length of the allocated NVM for the item, in bytes.
+ * This value may be greater than or equal to the specified data
+ * length (dir_data_length).
+ * If this value is less than the specified data length, it will be
+ * ignored. The response will contain the actual allocated item length,
+ * which may be greater than the requested item length.
+ * The purpose for allocating more than the required number of bytes
+ * for an item's data is to pre-allocate extra storage (padding) to
+ * accommodate the potential future growth of an item (e.g. upgraded
+ * firmware with a size increase, log growth, expanded configuration
+ * data).
+ */
+ uint32_t dir_item_length;
+ /*
+ * 32-bit offset of data blob from where data is being written.
+ * Only valid for batch mode. For non-batch writes 'dont care'.
+ */
+ uint32_t offset;
+ /*
+ * Length of data to be written.Should be non-zero.
+ * Only valid for batch mode. For non-batch writes 'dont care'.
+ */
+ uint32_t len;
+ uint32_t unused_0;
} __rte_packed;
-/* hwrm_tfc_tcam_free_output (size:128b/16B) */
-struct hwrm_tfc_tcam_free_output {
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -58192,26 +64543,52 @@ struct hwrm_tfc_tcam_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Reserved */
- uint8_t unused0[7];
+ /*
+ * Length of the allocated NVM for the item, in bytes. The value may be
+ * greater than or equal to the specified data length or the requested
+ * item length.
+ * The actual item length used when creating a new directory entry will
+ * be a multiple of an NVM block size.
+ */
+ uint32_t dir_item_length;
+ /* The directory index of the created or modified item. */
+ uint16_t dir_idx;
+ uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field
- * is written last.
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/******************************
- * hwrm_tunnel_dst_port_query *
- ******************************/
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to fragmentation */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1)
+ /* nvm is completely full. */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2)
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ uint8_t unused_0[7];
+} __rte_packed;
+
+/*******************
+ * hwrm_nvm_modify *
+ *******************/
-/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_query_input {
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -58240,53 +64617,42 @@ struct hwrm_tunnel_dst_port_query_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Tunnel Type. */
- uint8_t tunnel_type;
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \
- UINT32_C(0x1)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
- UINT32_C(0x5)
- /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \
- UINT32_C(0x9)
- /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 \
- UINT32_C(0xa)
- /* Use fixed layer 2 ether type of 0xFFFF */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_L2_ETYPE \
- UINT32_C(0xb)
- /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \
- UINT32_C(0xc)
- /* Custom GRE uses UPAR to parse customized GRE packets */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_CUSTOM_GRE \
- UINT32_C(0xd)
- /* Enhanced Common Packet Radio Interface (eCPRI) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ECPRI \
- UINT32_C(0xe)
- /* IPv6 Segment Routing (SRv6) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_SRV6 \
- UINT32_C(0xf)
- /* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE \
- UINT32_C(0x10)
- /* Generic Routing Encapsulation */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GRE \
- UINT32_C(0x11)
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST \
- HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GRE
/*
- * This field is used to specify the next protocol value defined in the
- * corresponding RFC spec for the applicable tunnel type.
+ * 64-bit Host Source Address.
+ * This is where the modified data is.
*/
- uint8_t tunnel_next_proto;
- uint8_t unused_0[6];
+ uint64_t host_src_addr;
+ /* 16-bit directory entry index. */
+ uint16_t dir_idx;
+ uint16_t flags;
+ /*
+ * This flag indicates the sender wants to modify a continuous NVRAM
+ * area using a batch of this HWRM requests. The offset of a request
+ * must be continuous to the end of previous request's. Firmware does
+ * not update the directory entry until receiving the last request,
+ * which is indicated by the batch_last flag.
+ * This flag is set usually when a sender does not have a block of
+ * memory that is big enough to hold the entire NVRAM data for send
+ * at one time.
+ */
+ #define HWRM_NVM_MODIFY_INPUT_FLAGS_BATCH_MODE UINT32_C(0x1)
+ /*
+ * This flag can be used only when the batch_mode flag is set.
+ * It indicates this request is the last of batch requests.
+ */
+ #define HWRM_NVM_MODIFY_INPUT_FLAGS_BATCH_LAST UINT32_C(0x2)
+ /* 32-bit NVRAM byte-offset to modify content from. */
+ uint32_t offset;
+ /*
+ * Length of data to be modified, in bytes. The length shall
+ * be non-zero.
+ */
+ uint32_t len;
+ uint8_t unused_1[4];
} __rte_packed;
-/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_query_output {
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -58295,87 +64661,24 @@ struct hwrm_tunnel_dst_port_query_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * This field represents the identifier of L4 destination port
- * used for the given tunnel type. This field is valid for
- * specific tunnel types that use layer 4 (e.g. UDP)
- * transports for tunneling.
- */
- uint16_t tunnel_dst_port_id;
- /*
- * This field represents the value of L4 destination port
- * identified by tunnel_dst_port_id. This field is valid for
- * specific tunnel types that use layer 4 (e.g. UDP)
- * transports for tunneling.
- * This field is in network byte order.
- *
- * A value of 0 means that the destination port is not
- * configured.
- */
- uint16_t tunnel_dst_port_val;
- /*
- * This field represents the UPAR usage status.
- * Available UPARs on wh+ are UPAR0 and UPAR1
- * Available UPARs on Thor are UPAR0 to UPAR3
- * Available UPARs on Thor2 are UPAR0 to UPAR7
- */
- uint8_t upar_in_use;
- /* This bit will be '1' when UPAR0 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR0 \
- UINT32_C(0x1)
- /* This bit will be '1' when UPAR1 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR1 \
- UINT32_C(0x2)
- /* This bit will be '1' when UPAR2 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR2 \
- UINT32_C(0x4)
- /* This bit will be '1' when UPAR3 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR3 \
- UINT32_C(0x8)
- /* This bit will be '1' when UPAR4 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR4 \
- UINT32_C(0x10)
- /* This bit will be '1' when UPAR5 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR5 \
- UINT32_C(0x20)
- /* This bit will be '1' when UPAR6 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR6 \
- UINT32_C(0x40)
- /* This bit will be '1' when UPAR7 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR7 \
- UINT32_C(0x80)
- /*
- * This field is used to convey the status of non udp port based
- * tunnel parsing at chip level and at function level.
- */
- uint8_t status;
- /* This bit will be '1' when tunnel parsing is enabled globally. */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_STATUS_CHIP_LEVEL \
- UINT32_C(0x1)
- /*
- * This bit will be '1' when tunnel parsing is enabled
- * on the corresponding function.
- */
- #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_STATUS_FUNC_LEVEL \
- UINT32_C(0x2)
- uint8_t unused_0;
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/******************************
- * hwrm_tunnel_dst_port_alloc *
- ******************************/
+/***************************
+ * hwrm_nvm_find_dir_entry *
+ ***************************/
-/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_alloc_input {
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -58404,64 +64707,42 @@ struct hwrm_tunnel_dst_port_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Tunnel Type. */
- uint8_t tunnel_type;
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
- UINT32_C(0x1)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
- UINT32_C(0x5)
- /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
- UINT32_C(0x9)
- /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \
- UINT32_C(0xa)
- /* Use fixed layer 2 ether type of 0xFFFF */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE \
- UINT32_C(0xb)
- /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \
- UINT32_C(0xc)
- /* Custom GRE uses UPAR to parse customized GRE packets. This is not supported. */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_CUSTOM_GRE \
- UINT32_C(0xd)
- /* Enhanced Common Packet Radio Interface (eCPRI) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI \
- UINT32_C(0xe)
- /* IPv6 Segment Routing (SRv6) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_SRV6 \
- UINT32_C(0xf)
- /* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE \
- UINT32_C(0x10)
- /* Generic Routing Encapsulation */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GRE \
- UINT32_C(0x11)
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST \
- HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GRE
+ uint32_t enables;
/*
- * This field is used to specify the next protocol value defined in the
- * corresponding RFC spec for the applicable tunnel type.
+ * This bit must be '1' for the dir_idx_valid field to be
+ * configured.
*/
- uint8_t tunnel_next_proto;
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID \
+ UINT32_C(0x1)
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ /* Directory Entry (Image) Type */
+ uint16_t dir_type;
/*
- * This field represents the value of L4 destination port used
- * for the given tunnel type. This field is valid for
- * specific tunnel types that use layer 4 (e.g. UDP)
- * transports for tunneling.
- *
- * This field is in network byte order.
- *
- * A value of 0 shall fail the command.
+ * Directory ordinal.
+ * The instance of this Directory Type
*/
- uint16_t tunnel_dst_port_val;
- uint8_t unused_0[4];
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags. */
+ uint16_t dir_ext;
+ /* This value indicates the search option using dir_ordinal. */
+ uint8_t opt_ordinal;
+ /* This value indicates the search option using dir_ordinal. */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_MASK UINT32_C(0x3)
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_SFT 0
+ /* Equal to specified ordinal value. */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_EQ UINT32_C(0x0)
+ /* Greater than or equal to specified ordinal value */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GE UINT32_C(0x1)
+ /* Greater than specified ordinal value */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT UINT32_C(0x2)
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_LAST \
+ HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT
+ uint8_t unused_0[3];
} __rte_packed;
-/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_alloc_output {
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -58470,76 +64751,38 @@ struct hwrm_tunnel_dst_port_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /* Allocated NVRAM for this directory entry, in bytes. */
+ uint32_t dir_item_length;
+ /* Size of the stored data for this directory entry, in bytes. */
+ uint32_t dir_data_length;
/*
- * Identifier of a tunnel L4 destination port value. Only applies to tunnel
- * types that has l4 destination port parameters.
- */
- uint16_t tunnel_dst_port_id;
- /* Error information */
- uint8_t error_info;
- /* No error */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_SUCCESS \
- UINT32_C(0x0)
- /* Tunnel port is already allocated */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ALLOCATED \
- UINT32_C(0x1)
- /* Out of resources error */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_NO_RESOURCE \
- UINT32_C(0x2)
- /* Tunnel type is already enabled */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ENABLED \
- UINT32_C(0x3)
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_LAST \
- HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ENABLED
- /*
- * This field represents the UPAR usage status.
- * Available UPARs on wh+ are UPAR0 and UPAR1
- * Available UPARs on Thor are UPAR0 to UPAR3
- * Available UPARs on Thor2 are UPAR0 to UPAR7
+ * Firmware version.
+ * Only valid if the directory entry is for embedded firmware stored
+ * in APE_BIN Format.
*/
- uint8_t upar_in_use;
- /* This bit will be '1' when UPAR0 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR0 \
- UINT32_C(0x1)
- /* This bit will be '1' when UPAR1 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR1 \
- UINT32_C(0x2)
- /* This bit will be '1' when UPAR2 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR2 \
- UINT32_C(0x4)
- /* This bit will be '1' when UPAR3 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR3 \
- UINT32_C(0x8)
- /* This bit will be '1' when UPAR4 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR4 \
- UINT32_C(0x10)
- /* This bit will be '1' when UPAR5 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR5 \
- UINT32_C(0x20)
- /* This bit will be '1' when UPAR6 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR6 \
- UINT32_C(0x40)
- /* This bit will be '1' when UPAR7 is IN_USE */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_UPAR_IN_USE_UPAR7 \
- UINT32_C(0x80)
- uint8_t unused_0[3];
+ uint32_t fw_ver;
+ /* Directory ordinal. */
+ uint16_t dir_ordinal;
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*****************************
- * hwrm_tunnel_dst_port_free *
- *****************************/
+/****************************
+ * hwrm_nvm_erase_dir_entry *
+ ****************************/
-/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
-struct hwrm_tunnel_dst_port_free_input {
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -58568,58 +64811,13 @@ struct hwrm_tunnel_dst_port_free_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Tunnel Type. */
- uint8_t tunnel_type;
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN \
- UINT32_C(0x1)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE \
- UINT32_C(0x5)
- /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
- UINT32_C(0x9)
- /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \
- UINT32_C(0xa)
- /* Use fixed layer 2 ether type of 0xFFFF */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_L2_ETYPE \
- UINT32_C(0xb)
- /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 \
- UINT32_C(0xc)
- /* Custom GRE uses UPAR to parse customized GRE packets. This is not supported. */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_CUSTOM_GRE \
- UINT32_C(0xd)
- /* Enhanced Common Packet Radio Interface (eCPRI) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI \
- UINT32_C(0xe)
- /* IPv6 Segment Routing (SRv6) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_SRV6 \
- UINT32_C(0xf)
- /* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE \
- UINT32_C(0x10)
- /* Generic Routing Encapsulation */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GRE \
- UINT32_C(0x11)
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST \
- HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GRE
- /*
- * This field is used to specify the next protocol value defined in the
- * corresponding RFC spec for the applicable tunnel type.
- */
- uint8_t tunnel_next_proto;
- /*
- * Identifier of a tunnel L4 destination port value. Only applies to tunnel
- * types that has l4 destination port parameters.
- */
- uint16_t tunnel_dst_port_id;
- uint8_t unused_0[4];
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
-struct hwrm_tunnel_dst_port_free_output {
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -58628,181 +64826,24 @@ struct hwrm_tunnel_dst_port_free_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Error information */
- uint8_t error_info;
- /* No error */
- #define HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_SUCCESS \
- UINT32_C(0x0)
- /* Not owner error */
- #define HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_ERR_NOT_OWNER \
- UINT32_C(0x1)
- /* Not allocated error */
- #define HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_ERR_NOT_ALLOCATED \
- UINT32_C(0x2)
- #define HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_LAST \
- HWRM_TUNNEL_DST_PORT_FREE_OUTPUT_ERROR_INFO_ERR_NOT_ALLOCATED
- uint8_t unused_1[6];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/* Periodic statistics context DMA to host. */
-/* ctx_hw_stats (size:1280b/160B) */
-struct ctx_hw_stats {
- /* Number of received unicast packets */
- uint64_t rx_ucast_pkts;
- /* Number of received multicast packets */
- uint64_t rx_mcast_pkts;
- /* Number of received broadcast packets */
- uint64_t rx_bcast_pkts;
- /* Number of discarded packets on receive path */
- uint64_t rx_discard_pkts;
- /* Number of packets on receive path with error */
- uint64_t rx_error_pkts;
- /* Number of received bytes for unicast traffic */
- uint64_t rx_ucast_bytes;
- /* Number of received bytes for multicast traffic */
- uint64_t rx_mcast_bytes;
- /* Number of received bytes for broadcast traffic */
- uint64_t rx_bcast_bytes;
- /* Number of transmitted unicast packets */
- uint64_t tx_ucast_pkts;
- /* Number of transmitted multicast packets */
- uint64_t tx_mcast_pkts;
- /* Number of transmitted broadcast packets */
- uint64_t tx_bcast_pkts;
- /* Number of packets on transmit path with error */
- uint64_t tx_error_pkts;
- /* Number of discarded packets on transmit path */
- uint64_t tx_discard_pkts;
- /* Number of transmitted bytes for unicast traffic */
- uint64_t tx_ucast_bytes;
- /* Number of transmitted bytes for multicast traffic */
- uint64_t tx_mcast_bytes;
- /* Number of transmitted bytes for broadcast traffic */
- uint64_t tx_bcast_bytes;
- /* Number of TPA packets */
- uint64_t tpa_pkts;
- /* Number of TPA bytes */
- uint64_t tpa_bytes;
- /* Number of TPA events */
- uint64_t tpa_events;
- /* Number of TPA aborts */
- uint64_t tpa_aborts;
-} __rte_packed;
-
-/*
- * Extended periodic statistics context DMA to host. On cards that
- * support TPA v2, additional TPA related stats exist and can be retrieved
- * by DMA of ctx_hw_stats_ext, rather than legacy ctx_hw_stats structure.
- */
-/* ctx_hw_stats_ext (size:1408b/176B) */
-struct ctx_hw_stats_ext {
- /* Number of received unicast packets */
- uint64_t rx_ucast_pkts;
- /* Number of received multicast packets */
- uint64_t rx_mcast_pkts;
- /* Number of received broadcast packets */
- uint64_t rx_bcast_pkts;
- /* Number of discarded packets on receive path */
- uint64_t rx_discard_pkts;
- /* Number of packets on receive path with error */
- uint64_t rx_error_pkts;
- /* Number of received bytes for unicast traffic */
- uint64_t rx_ucast_bytes;
- /* Number of received bytes for multicast traffic */
- uint64_t rx_mcast_bytes;
- /* Number of received bytes for broadcast traffic */
- uint64_t rx_bcast_bytes;
- /* Number of transmitted unicast packets */
- uint64_t tx_ucast_pkts;
- /* Number of transmitted multicast packets */
- uint64_t tx_mcast_pkts;
- /* Number of transmitted broadcast packets */
- uint64_t tx_bcast_pkts;
- /* Number of packets on transmit path with error */
- uint64_t tx_error_pkts;
- /* Number of discarded packets on transmit path */
- uint64_t tx_discard_pkts;
- /* Number of transmitted bytes for unicast traffic */
- uint64_t tx_ucast_bytes;
- /* Number of transmitted bytes for multicast traffic */
- uint64_t tx_mcast_bytes;
- /* Number of transmitted bytes for broadcast traffic */
- uint64_t tx_bcast_bytes;
- /* Number of TPA eligible packets */
- uint64_t rx_tpa_eligible_pkt;
- /* Number of TPA eligible bytes */
- uint64_t rx_tpa_eligible_bytes;
- /* Number of TPA packets */
- uint64_t rx_tpa_pkt;
- /* Number of TPA bytes */
- uint64_t rx_tpa_bytes;
- /* Number of TPA errors */
- uint64_t rx_tpa_errors;
- /* Number of TPA events */
- uint64_t rx_tpa_events;
-} __rte_packed;
-
-/* Periodic Engine statistics context DMA to host. */
-/* ctx_eng_stats (size:512b/64B) */
-struct ctx_eng_stats {
- /*
- * Count of data bytes into the Engine.
- * This includes any user supplied prefix,
- * but does not include any predefined
- * prefix data.
- */
- uint64_t eng_bytes_in;
- /* Count of data bytes out of the Engine. */
- uint64_t eng_bytes_out;
- /*
- * Count, in 4-byte (dword) units, of bytes
- * that are input as auxiliary data.
- * This includes the aux_cmd data.
- */
- uint64_t aux_bytes_in;
- /*
- * Count, in 4-byte (dword) units, of bytes
- * that are output as auxiliary data.
- * This count is the buffer space for aux_data
- * output provided in the RQE, not the actual
- * aux_data written
- */
- uint64_t aux_bytes_out;
- /* Count of number of commands executed. */
- uint64_t commands;
- /*
- * Count of number of error commands.
- * These are the commands with a
- * non-zero status value.
- */
- uint64_t error_commands;
- /*
- * Compression/Encryption Engine usage,
- * the unit is count of clock cycles
- */
- uint64_t cce_engine_usage;
- /*
- * De-Compression/De-cryption Engine usage,
- * the unit is count of clock cycles
- */
- uint64_t cdd_engine_usage;
-} __rte_packed;
-
-/***********************
- * hwrm_stat_ctx_alloc *
- ***********************/
+/*************************
+ * hwrm_nvm_get_dev_info *
+ *************************/
-/* hwrm_stat_ctx_alloc_input (size:320b/40B) */
-struct hwrm_stat_ctx_alloc_input {
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dev_info_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -58831,154 +64872,152 @@ struct hwrm_stat_ctx_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+} __rte_packed;
+
+/* hwrm_nvm_get_dev_info_output (size:704b/88B) */
+struct hwrm_nvm_get_dev_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Manufacturer ID. */
+ uint16_t manufacturer_id;
+ /* Device ID. */
+ uint16_t device_id;
+ /* Sector size of the NVRAM device. */
+ uint32_t sector_size;
+ /* Total size, in bytes of the NVRAM device. */
+ uint32_t nvram_size;
+ uint32_t reserved_size;
/*
- * This is the address for statistic block.
- * > For new versions of the chip, this address should be 128B
- * > aligned.
+ * Available size that can be used, in bytes. Available size is the
+ * NVRAM size take away the used size and reserved size.
*/
- uint64_t stats_dma_addr;
+ uint32_t available_size;
+ /* This field represents the major version of NVM cfg */
+ uint8_t nvm_cfg_ver_maj;
+ /* This field represents the minor version of NVM cfg */
+ uint8_t nvm_cfg_ver_min;
+ /* This field represents the update version of NVM cfg */
+ uint8_t nvm_cfg_ver_upd;
+ uint8_t flags;
/*
- * The statistic block update period in ms.
- * e.g. 250ms, 500ms, 750ms, 1000ms.
- * If update_period_ms is 0, then the stats update
- * shall be never done and the DMA address shall not be used.
- * In this case, the stat block can only be read by
- * hwrm_stat_ctx_query command.
- * On Ethernet/L2 based devices:
- * if tpa v2 supported (hwrm_vnic_qcaps[max_aggs_supported]>0),
- * ctx_hw_stats_ext is used for DMA,
- * else
- * ctx_hw_stats is used for DMA.
+ * If set to 1, firmware will provide various firmware version
+ * information stored in the flash.
*/
- uint32_t update_period_ms;
+ #define HWRM_NVM_GET_DEV_INFO_OUTPUT_FLAGS_FW_VER_VALID \
+ UINT32_C(0x1)
/*
- * This field is used to specify statistics context specific
- * configuration flags.
+ * This field represents the board package name stored in the flash.
+ * (ASCII chars with NULL at the end).
*/
- uint8_t stat_ctx_flags;
+ char pkg_name[16];
/*
- * When this bit is set to '1', the statistics context shall be
- * allocated for RoCE traffic only. In this case, traffic other
- * than offloaded RoCE traffic shall not be included in this
- * statistic context.
- * When this bit is set to '0', the statistics context shall be
- * used for network traffic or engine traffic.
+ * This field represents the major version of HWRM firmware, stored in
+ * the flash.
*/
- #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
- uint8_t unused_0;
+ uint16_t hwrm_fw_major;
/*
- * This is the size of the structure (ctx_hw_stats or
- * ctx_hw_stats_ext) that the driver has allocated to be used
- * for the periodic DMA updates.
+ * This field represents the minor version of HWRM firmware, stored in
+ * the flash.
*/
- uint16_t stats_dma_length;
- uint16_t flags;
- /* This stats context uses the steering tag specified in the command. */
- #define HWRM_STAT_CTX_ALLOC_INPUT_FLAGS_STEERING_TAG_VALID \
- UINT32_C(0x1)
+ uint16_t hwrm_fw_minor;
/*
- * Steering tag to use for memory transactions from the periodic DMA
- * updates. 'steering_tag_valid' should be set and 'steering_tag'
- * should be specified, when the 'steering_tag_supported' bit is set
- * under the 'flags_ext2' field of the hwrm_func_qcaps_output.
+ * This field represents the build version of HWRM firmware, stored in
+ * the flash.
*/
- uint16_t steering_tag;
- uint32_t unused_1;
-} __rte_packed;
-
-/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
-struct hwrm_stat_ctx_alloc_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* This is the statistics context ID value. */
- uint32_t stat_ctx_id;
- uint8_t unused_0[3];
+ uint16_t hwrm_fw_build;
+ /*
+ * This field can be used to represent firmware branches or customer
+ * specific releases tied to a specific (major, minor, build) version
+ * of the HWRM firmware.
+ */
+ uint16_t hwrm_fw_patch;
+ /*
+ * This field represents the major version of mgmt firmware, stored in
+ * the flash.
+ */
+ uint16_t mgmt_fw_major;
+ /*
+ * This field represents the minor version of mgmt firmware, stored in
+ * the flash.
+ */
+ uint16_t mgmt_fw_minor;
+ /*
+ * This field represents the build version of mgmt firmware, stored in
+ * the flash.
+ */
+ uint16_t mgmt_fw_build;
+ /*
+ * This field can be used to represent firmware branches or customer
+ * specific releases tied to a specific (major, minor, build) version
+ * of the mgmt firmware.
+ */
+ uint16_t mgmt_fw_patch;
+ /*
+ * This field represents the major version of roce firmware, stored in
+ * the flash.
+ */
+ uint16_t roce_fw_major;
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
+ * This field represents the minor version of roce firmware, stored in
+ * the flash.
*/
- uint8_t valid;
-} __rte_packed;
-
-/**********************
- * hwrm_stat_ctx_free *
- **********************/
-
-
-/* hwrm_stat_ctx_free_input (size:192b/24B) */
-struct hwrm_stat_ctx_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
+ uint16_t roce_fw_minor;
/*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
+ * This field represents the build version of roce firmware, stored in
+ * the flash.
*/
- uint16_t cmpl_ring;
+ uint16_t roce_fw_build;
/*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
+ * This field can be used to represent firmware branches or customer
+ * specific releases tied to a specific (major, minor, build) version
+ * of the roce firmware.
*/
- uint16_t seq_id;
+ uint16_t roce_fw_patch;
/*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
+ * This field represents the major version of network control firmware,
+ * stored in the flash.
*/
- uint16_t target_id;
+ uint16_t netctrl_fw_major;
/*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
+ * This field represents the minor version of network control firmware,
+ * stored in the flash.
*/
- uint64_t resp_addr;
- /* ID of the statistics context that is being queried. */
- uint32_t stat_ctx_id;
- uint8_t unused_0[4];
-} __rte_packed;
-
-/* hwrm_stat_ctx_free_output (size:128b/16B) */
-struct hwrm_stat_ctx_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* This is the statistics context ID value. */
- uint32_t stat_ctx_id;
- uint8_t unused_0[3];
+ uint16_t netctrl_fw_minor;
+ /*
+ * This field represents the build version of network control firmware,
+ * stored in the flash.
+ */
+ uint16_t netctrl_fw_build;
+ /*
+ * This field can be used to represent firmware branches or customer
+ * specific releases tied to a specific (major, minor, build) version
+ * of the network control firmware.
+ */
+ uint16_t netctrl_fw_patch;
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***********************
- * hwrm_stat_ctx_query *
- ***********************/
+/**************************
+ * hwrm_nvm_mod_dir_entry *
+ **************************/
-/* hwrm_stat_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ctx_query_input {
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59007,20 +65046,38 @@ struct hwrm_stat_ctx_query_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* ID of the statistics context that is being queried. */
- uint32_t stat_ctx_id;
- uint8_t flags;
+ uint32_t enables;
/*
- * This bit is set to 1 when request is for a counter mask,
- * representing the width of each of the stats counters, rather
- * than counters themselves.
+ * This bit must be '1' for the checksum field to be
+ * configured.
*/
- #define HWRM_STAT_CTX_QUERY_INPUT_FLAGS_COUNTER_MASK UINT32_C(0x1)
- uint8_t unused_0[3];
+ #define HWRM_NVM_MOD_DIR_ENTRY_INPUT_ENABLES_CHECKSUM UINT32_C(0x1)
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ /*
+ * Directory ordinal.
+ * The (0-based) instance of this Directory Type.
+ */
+ uint16_t dir_ordinal;
+ /*
+ * The Directory Entry Extension flags (see BNX_DIR_EXT_* for
+ * extension flag definitions).
+ */
+ uint16_t dir_ext;
+ /*
+ * Directory Entry Attribute flags (see BNX_DIR_ATTR_* for attribute
+ * flag definitions).
+ */
+ uint16_t dir_attr;
+ /*
+ * If valid, then this field updates the checksum
+ * value of the content in the directory entry.
+ */
+ uint32_t checksum;
} __rte_packed;
-/* hwrm_stat_ctx_query_output (size:1408b/176B) */
-struct hwrm_stat_ctx_query_output {
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -59029,64 +65086,24 @@ struct hwrm_stat_ctx_query_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Number of transmitted unicast packets */
- uint64_t tx_ucast_pkts;
- /* Number of transmitted multicast packets */
- uint64_t tx_mcast_pkts;
- /* Number of transmitted broadcast packets */
- uint64_t tx_bcast_pkts;
- /* Number of packets discarded in transmit path */
- uint64_t tx_discard_pkts;
- /* Number of packets in transmit path with error */
- uint64_t tx_error_pkts;
- /* Number of transmitted bytes for unicast traffic */
- uint64_t tx_ucast_bytes;
- /* Number of transmitted bytes for multicast traffic */
- uint64_t tx_mcast_bytes;
- /* Number of transmitted bytes for broadcast traffic */
- uint64_t tx_bcast_bytes;
- /* Number of received unicast packets */
- uint64_t rx_ucast_pkts;
- /* Number of received multicast packets */
- uint64_t rx_mcast_pkts;
- /* Number of received broadcast packets */
- uint64_t rx_bcast_pkts;
- /* Number of packets discarded in receive path */
- uint64_t rx_discard_pkts;
- /* Number of packets in receive path with errors */
- uint64_t rx_error_pkts;
- /* Number of received bytes for unicast traffic */
- uint64_t rx_ucast_bytes;
- /* Number of received bytes for multicast traffic */
- uint64_t rx_mcast_bytes;
- /* Number of received bytes for broadcast traffic */
- uint64_t rx_bcast_bytes;
- /* Number of aggregated unicast packets */
- uint64_t rx_agg_pkts;
- /* Number of aggregated unicast bytes */
- uint64_t rx_agg_bytes;
- /* Number of aggregation events */
- uint64_t rx_agg_events;
- /* Number of aborted aggregations */
- uint64_t rx_agg_aborts;
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_stat_ext_ctx_query *
- ***************************/
+/**************************
+ * hwrm_nvm_verify_update *
+ **************************/
-/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
-struct hwrm_stat_ext_ctx_query_input {
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59115,21 +65132,28 @@ struct hwrm_stat_ext_ctx_query_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* ID of the extended statistics context that is being queried. */
- uint32_t stat_ctx_id;
- uint8_t flags;
+ /* Directory Entry Type, to be verified. */
+ uint16_t dir_type;
/*
- * This bit is set to 1 when request is for a counter mask,
- * representing the width of each of the stats counters, rather
- * than counters themselves.
+ * Directory ordinal.
+ * The instance of the Directory Type to be verified.
*/
- #define HWRM_STAT_EXT_CTX_QUERY_INPUT_FLAGS_COUNTER_MASK \
- UINT32_C(0x1)
- uint8_t unused_0[3];
+ uint16_t dir_ordinal;
+ /*
+ * The Directory Entry Extension flags.
+ * The "UPDATE" extension flag must be set in this value.
+ * A corresponding directory entry with the same type and ordinal
+ * values but *without*
+ * the "UPDATE" extension flag must also exist. The other flags of
+ * the extension must
+ * be identical between the active and update entries.
+ */
+ uint16_t dir_ext;
+ uint8_t unused_0[2];
} __rte_packed;
-/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
-struct hwrm_stat_ext_ctx_query_output {
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -59138,68 +65162,24 @@ struct hwrm_stat_ext_ctx_query_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Number of received unicast packets */
- uint64_t rx_ucast_pkts;
- /* Number of received multicast packets */
- uint64_t rx_mcast_pkts;
- /* Number of received broadcast packets */
- uint64_t rx_bcast_pkts;
- /* Number of discarded packets on receive path */
- uint64_t rx_discard_pkts;
- /* Number of packets on receive path with error */
- uint64_t rx_error_pkts;
- /* Number of received bytes for unicast traffic */
- uint64_t rx_ucast_bytes;
- /* Number of received bytes for multicast traffic */
- uint64_t rx_mcast_bytes;
- /* Number of received bytes for broadcast traffic */
- uint64_t rx_bcast_bytes;
- /* Number of transmitted unicast packets */
- uint64_t tx_ucast_pkts;
- /* Number of transmitted multicast packets */
- uint64_t tx_mcast_pkts;
- /* Number of transmitted broadcast packets */
- uint64_t tx_bcast_pkts;
- /* Number of packets on transmit path with error */
- uint64_t tx_error_pkts;
- /* Number of discarded packets on transmit path */
- uint64_t tx_discard_pkts;
- /* Number of transmitted bytes for unicast traffic */
- uint64_t tx_ucast_bytes;
- /* Number of transmitted bytes for multicast traffic */
- uint64_t tx_mcast_bytes;
- /* Number of transmitted bytes for broadcast traffic */
- uint64_t tx_bcast_bytes;
- /* Number of TPA eligible packets */
- uint64_t rx_tpa_eligible_pkt;
- /* Number of TPA eligible bytes */
- uint64_t rx_tpa_eligible_bytes;
- /* Number of TPA packets */
- uint64_t rx_tpa_pkt;
- /* Number of TPA bytes */
- uint64_t rx_tpa_bytes;
- /* Number of TPA errors */
- uint64_t rx_tpa_errors;
- /* Number of TPA events */
- uint64_t rx_tpa_events;
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
/***************************
- * hwrm_stat_ctx_eng_query *
+ * hwrm_nvm_install_update *
***************************/
-/* hwrm_stat_ctx_eng_query_input (size:192b/24B) */
-struct hwrm_stat_ctx_eng_query_input {
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59228,141 +65208,248 @@ struct hwrm_stat_ctx_eng_query_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* ID of the statistics context that is being queried. */
- uint32_t stat_ctx_id;
- uint8_t unused_0[4];
-} __rte_packed;
-
-/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */
-struct hwrm_stat_ctx_eng_query_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
/*
- * Count of data bytes into the Engine.
- * This includes any user supplied prefix,
- * but does not include any predefined
- * prefix data.
+ * Installation type. If the value 3 through 0xffff is used,
+ * only packaged items with that type value will be installed and
+ * conditional installation directives for those packaged items
+ * will be over-ridden (i.e. 'create' or 'replace' will be treated
+ * as 'install').
*/
- uint64_t eng_bytes_in;
- /* Count of data bytes out of the Engine. */
- uint64_t eng_bytes_out;
+ uint32_t install_type;
/*
- * Count, in 4-byte (dword) units, of bytes
- * that are input as auxiliary data.
- * This includes the aux_cmd data.
+ * Perform a normal package installation. Conditional installation
+ * directives (e.g. 'create' and 'replace') of packaged items
+ * will be followed.
*/
- uint64_t aux_bytes_in;
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_NORMAL UINT32_C(0x0)
/*
- * Count, in 4-byte (dword) units, of bytes
- * that are output as auxiliary data.
- * This count is the buffer space for aux_data
- * output provided in the RQE, not the actual
- * aux_data written
+ * Install all packaged items regardless of installation directive
+ * (i.e. treat all packaged items as though they have an installation
+ * directive of 'install').
*/
- uint64_t aux_bytes_out;
- /* Count of number of commands executed. */
- uint64_t commands;
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL \
+ UINT32_C(0xffffffff)
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_LAST \
+ HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL
+ uint16_t flags;
/*
- * Count of number of error commands.
- * These are the commands with a
- * non-zero status value.
+ * If set to 1, then securely erase all unused locations in
+ * persistent storage.
*/
- uint64_t error_commands;
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ERASE_UNUSED_SPACE \
+ UINT32_C(0x1)
/*
- * Compression/Encryption Engine usage,
- * the unit is count of clock cycles
+ * If set to 1, then unspecified images, images not in the package
+ * file, will be safely deleted.
+ * When combined with erase_unused_space then unspecified images will
+ * be securely erased.
*/
- uint64_t cce_engine_usage;
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_REMOVE_UNUSED_PKG \
+ UINT32_C(0x2)
/*
- * De-Compression/De-cryption Engine usage,
- * the unit is count of clock cycles
+ * If set to 1, FW will defragment the NVM if defragmentation is
+ * required for the update.
+ * Allow additional time for this command to complete if this bit is
+ * set to 1.
*/
- uint64_t cdd_engine_usage;
- uint8_t unused_0[7];
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ALLOWED_TO_DEFRAG \
+ UINT32_C(0x4)
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
+ * If set to 1, FW will verify the package in the "UPDATE" NVM item
+ * without installing it. This flag is for FW internal use only.
+ * Users should not set this flag. The request will otherwise fail.
*/
- uint8_t valid;
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_VERIFY_ONLY \
+ UINT32_C(0x8)
+ uint8_t unused_0[2];
} __rte_packed;
-/***************************
- * hwrm_stat_ctx_clr_stats *
- ***************************/
-
-
-/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
-struct hwrm_stat_ctx_clr_stats_input {
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
/* The HWRM command request type. */
uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
+ * Bit-mask of successfully installed items.
+ * Bit-0 corresponding to the first packaged item, Bit-1 for the second
+ * item, etc. A value of 0 indicates that no items were successfully
+ * installed.
*/
- uint16_t cmpl_ring;
+ uint64_t installed_items;
+ /* result is 8 b corresponding to BCMRETVAL error codes */
+ uint8_t result;
+ /* There was no problem with the package installation. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS \
+ UINT32_C(0x0)
+ /* Generic failure */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_FAILURE \
+ UINT32_C(0xff)
+ /* Allocation error malloc failure */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_MALLOC_FAILURE \
+ UINT32_C(0xfd)
+ /* NVM install error due to invalid index */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_INDEX_PARAMETER \
+ UINT32_C(0xfb)
+ /* NVM install error due to invalid type */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_TYPE_PARAMETER \
+ UINT32_C(0xf3)
+ /* Invalid package due to invalid prerequisite */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_PREREQUISITE \
+ UINT32_C(0xf2)
+ /* Invalid package due to invalid file header */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_FILE_HEADER \
+ UINT32_C(0xec)
+ /* Invalid package due to invalid format */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_SIGNATURE \
+ UINT32_C(0xeb)
+ /* Invalid package due to invalid property stream */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_PROP_STREAM \
+ UINT32_C(0xea)
+ /* Invalid package due to invalid property length */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_PROP_LENGTH \
+ UINT32_C(0xe9)
+ /* Invalid package due to invalid manifest */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_MANIFEST \
+ UINT32_C(0xe8)
+ /* Invalid package due to invalid trailer */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_TRAILER \
+ UINT32_C(0xe7)
+ /* Invalid package due to invalid checksum */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_CHECKSUM \
+ UINT32_C(0xe6)
+ /* Invalid package due to invalid item checksum */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_ITEM_CHECKSUM \
+ UINT32_C(0xe5)
+ /* Invalid package due to invalid length */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_DATA_LENGTH \
+ UINT32_C(0xe4)
+ /* Invalid package due to invalid directive */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_DIRECTIVE \
+ UINT32_C(0xe1)
+ /* Invalid device due to unsupported chip revision */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_CHIP_REV \
+ UINT32_C(0xce)
+ /* Invalid device due to unsupported device ID */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_DEVICE_ID \
+ UINT32_C(0xcd)
+ /* Invalid device due to unsupported subsystem vendor */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_SUBSYS_VENDOR \
+ UINT32_C(0xcc)
+ /* Invalid device due to unsupported subsystem ID */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_SUBSYS_ID \
+ UINT32_C(0xcb)
+ /* Invalid device due to unsupported product ID or customer ID */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_PLATFORM \
+ UINT32_C(0xc5)
+ /* Invalid package due to duplicate item */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_DUPLICATE_ITEM \
+ UINT32_C(0xc4)
+ /* Invalid package due to zero length item */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_ZERO_LENGTH_ITEM \
+ UINT32_C(0xc3)
+ /* NVM integrity error checksum */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INSTALL_CHECKSUM_ERROR \
+ UINT32_C(0xb9)
+ /* NVM integrity error */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INSTALL_DATA_ERROR \
+ UINT32_C(0xb8)
+ /* Authentication error */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INSTALL_AUTHENTICATION_ERROR \
+ UINT32_C(0xb7)
+ /* NVM install error item not found */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_ITEM_NOT_FOUND \
+ UINT32_C(0xb0)
+ /* NVM install error item locked */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_ITEM_LOCKED \
+ UINT32_C(0xa7)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_ITEM_LOCKED
+ /* problem_item is 8 b */
+ uint8_t problem_item;
+ /* There was no problem with any packaged items. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_NONE \
+ UINT32_C(0x0)
+ /* There was a problem with the NVM package itself. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE \
+ UINT32_C(0xff)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE
+ /* reset_required is 8 b */
+ uint8_t reset_required;
/*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
+ * No reset is required for installed/updated firmware or
+ * microcode to take effect.
*/
- uint16_t seq_id;
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_NONE \
+ UINT32_C(0x0)
/*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
+ * A PCIe reset (e.g. system reboot) is
+ * required for newly installed/updated firmware or
+ * microcode to take effect.
*/
- uint16_t target_id;
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_PCI \
+ UINT32_C(0x1)
/*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
+ * A controller power reset (e.g. system power-cycle) is
+ * required for newly installed/updated firmware or
+ * microcode to take effect. Some newly installed/updated
+ * firmware or microcode may still take effect upon the
+ * next PCIe reset.
*/
- uint64_t resp_addr;
- /* ID of the statistics context that is being queried. */
- uint32_t stat_ctx_id;
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER \
+ UINT32_C(0x2)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER
uint8_t unused_0[4];
-} __rte_packed;
-
-/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
-struct hwrm_stat_ctx_clr_stats_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/********************
- * hwrm_pcie_qstats *
- ********************/
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* Unable to complete operation due to fragmentation */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR \
+ UINT32_C(0x1)
+ /* nvm is completely full. */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE \
+ UINT32_C(0x2)
+ /* Firmware update failed due to Anti-rollback. */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK \
+ UINT32_C(0x3)
+ /* Firmware update does not support voltage regulators on the device. */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT \
+ UINT32_C(0x4)
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
+ uint8_t unused_0[7];
+} __rte_packed;
+/******************
+ * hwrm_nvm_flush *
+ ******************/
-/* hwrm_pcie_qstats_input (size:256b/32B) */
-struct hwrm_pcie_qstats_input {
+
+/* hwrm_nvm_flush_input (size:128b/16B) */
+struct hwrm_nvm_flush_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59391,22 +65478,10 @@ struct hwrm_pcie_qstats_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * The size of PCIe statistics block in bytes.
- * Firmware will DMA the PCIe statistics to
- * the host with this field size in the response.
- */
- uint16_t pcie_stat_size;
- uint8_t unused_0[6];
- /*
- * This is the host address where
- * PCIe statistics will be stored
- */
- uint64_t pcie_stat_host_addr;
} __rte_packed;
-/* hwrm_pcie_qstats_output (size:128b/16B) */
-struct hwrm_pcie_qstats_output {
+/* hwrm_nvm_flush_output (size:128b/16B) */
+struct hwrm_nvm_flush_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -59415,62 +65490,40 @@ struct hwrm_pcie_qstats_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* The size of PCIe statistics block in bytes. */
- uint16_t pcie_stat_size;
- uint8_t unused_0[5];
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/* PCIe Statistics Formats */
-/* pcie_ctx_hw_stats (size:768b/96B) */
-struct pcie_ctx_hw_stats {
- /* Number of physical layer receiver errors */
- uint64_t pcie_pl_signal_integrity;
- /* Number of DLLP CRC errors detected by Data Link Layer */
- uint64_t pcie_dl_signal_integrity;
- /*
- * Number of TLP LCRC and sequence number errors detected
- * by Data Link Layer
- */
- uint64_t pcie_tl_signal_integrity;
- /* Number of times LTSSM entered Recovery state */
- uint64_t pcie_link_integrity;
- /* Report number of TLP bits that have been transmitted in Mbps */
- uint64_t pcie_tx_traffic_rate;
- /* Report number of TLP bits that have been received in Mbps */
- uint64_t pcie_rx_traffic_rate;
- /* Number of DLLP bytes that have been transmitted */
- uint64_t pcie_tx_dllp_statistics;
- /* Number of DLLP bytes that have been received */
- uint64_t pcie_rx_dllp_statistics;
- /*
- * Number of times spent in each phase of gen3
- * equalization
- */
- uint64_t pcie_equalization_time;
- /* Records the last 16 transitions of the LTSSM */
- uint32_t pcie_ltssm_histogram[4];
+/* hwrm_nvm_flush_cmd_err (size:64b/8B) */
+struct hwrm_nvm_flush_cmd_err {
/*
- * Record the last 8 reasons on why LTSSM transitioned
- * to Recovery
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
*/
- uint64_t pcie_recovery_histogram;
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* flush could not be performed */
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL UINT32_C(0x1)
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_LAST \
+ HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL
+ uint8_t unused_0[7];
} __rte_packed;
-/****************************
- * hwrm_stat_generic_qstats *
- ****************************/
+/*************************
+ * hwrm_nvm_get_variable *
+ *************************/
-/* hwrm_stat_generic_qstats_input (size:256b/32B) */
-struct hwrm_stat_generic_qstats_input {
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59500,31 +65553,49 @@ struct hwrm_stat_generic_qstats_input {
*/
uint64_t resp_addr;
/*
- * The size of the generic statistics buffer passed in the
- * generic_stat_host_addr in bytes.
- * Firmware will not exceed this size when it DMAs the
- * statistics structure to the host. The actual DMA size
- * will be returned in the response.
+ * This is the host address where
+ * nvm variable will be stored
*/
- uint16_t generic_stat_size;
- uint8_t flags;
+ uint64_t dest_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
/*
- * The bit should be set to 1 when request is for the counter mask
- * representing the width of each of the stats counters, rather
- * than counters themselves.
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
*/
- #define HWRM_STAT_GENERIC_QSTATS_INPUT_FLAGS_COUNTER_MASK \
- UINT32_C(0x1)
- uint8_t unused_0[5];
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t flags;
/*
- * This is the host address where
- * generic statistics will be stored
+ * When this bit is set to 1, the factory default value will be
+ * returned, 0 returns the operational value.
*/
- uint64_t generic_stat_host_addr;
+ #define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT \
+ UINT32_C(0x1)
+ uint8_t unused_0;
} __rte_packed;
-/* hwrm_stat_generic_qstats_output (size:128b/16B) */
-struct hwrm_stat_generic_qstats_output {
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -59533,123 +65604,63 @@ struct hwrm_stat_generic_qstats_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* The size of Generic Statistics block in bytes. */
- uint16_t generic_stat_size;
- uint8_t unused_0[5];
+ /* size of data of the actual variable retrieved in bits */
+ uint16_t data_len;
+ /*
+ * option_num is the option number for the data retrieved. It is
+ * possible in the future that the option number returned would be
+ * different than requested. This condition could occur if an option is
+ * deprecated and a new option id is defined with similar
+ * characteristics, but has a slightly different definition. This
+ * also makes it convenient for the caller to identify the variable
+ * result with the option id from the response.
+ */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_LAST \
+ HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/* Generic Statistic Format */
-/* generic_sw_hw_stats (size:1408b/176B) */
-struct generic_sw_hw_stats {
- /*
- * This is the number of TLP bytes that have been transmitted for
- * the caller PF.
- */
- uint64_t pcie_statistics_tx_tlp;
- /*
- * This is the number of TLP bytes that have been received
- * for the caller PF.
- */
- uint64_t pcie_statistics_rx_tlp;
- /* Posted Header Flow Control credits available for the caller PF. */
- uint64_t pcie_credit_fc_hdr_posted;
- /* Non-posted Header Flow Control credits available for the caller PF. */
- uint64_t pcie_credit_fc_hdr_nonposted;
- /* Completion Header Flow Control credits available for the caller PF. */
- uint64_t pcie_credit_fc_hdr_cmpl;
- /* Posted Data Flow Control credits available for the caller PF. */
- uint64_t pcie_credit_fc_data_posted;
- /* Non-Posted Data Flow Control credits available for the caller PF. */
- uint64_t pcie_credit_fc_data_nonposted;
- /* Completion Data Flow Control credits available for the caller PF. */
- uint64_t pcie_credit_fc_data_cmpl;
- /*
- * Available Non-posted credit for target flow control reads or
- * config for the caller PF.
- */
- uint64_t pcie_credit_fc_tgt_nonposted;
- /*
- * Available posted data credit for target flow control writes
- * for the caller PF.
- */
- uint64_t pcie_credit_fc_tgt_data_posted;
- /*
- * Available posted header credit for target flow control writes
- * for the caller PF.
- */
- uint64_t pcie_credit_fc_tgt_hdr_posted;
- /* Available completion flow control header credits for the caller PF. */
- uint64_t pcie_credit_fc_cmpl_hdr_posted;
- /* Available completion flow control data credits. */
- uint64_t pcie_credit_fc_cmpl_data_posted;
- /*
- * Displays Time information of the longest completon time from any of
- * the 4 tags for the caller PF. The unit of time recorded is in
- * microseconds.
- */
- uint64_t pcie_cmpl_longest;
- /*
- * Displays Time information of the shortest completon time from any of
- * the 4 tags for the caller PF. The unit of time recorded is in
- * microseconds.
- */
- uint64_t pcie_cmpl_shortest;
- /*
- * This field contains the total number of CFCQ 'misses' observed for
- * all the PF's.
- */
- uint64_t cache_miss_count_cfcq;
- /*
- * This field contains the total number of CFCS 'misses' observed for
- * all the PF's.
- */
- uint64_t cache_miss_count_cfcs;
- /*
- * This field contains the total number of CFCC 'misses' observed for
- * all the PF's.
- */
- uint64_t cache_miss_count_cfcc;
- /*
- * This field contains the total number of CFCM 'misses' observed
- * for all the PF's.
- */
- uint64_t cache_miss_count_cfcm;
- /*
- * Total number of Doorbell messages dropped from the DB FIFO.
- * This counter is only applicable for devices that support
- * the hardware based doorbell drop recovery feature.
- */
- uint64_t hw_db_recov_dbs_dropped;
- /*
- * Total number of doorbell drops serviced.
- * This counter is only applicable for devices that support
- * the hardware based doorbell drop recovery feature.
- */
- uint64_t hw_db_recov_drops_serviced;
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
/*
- * Total number of dropped doorbells recovered.
- * This counter is only applicable for devices that support
- * the hardware based doorbell drop recovery feature.
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
*/
- uint64_t hw_db_recov_dbs_recovered;
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* variable does not exist */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
+ /* configuration is corrupted and the variable cannot be saved */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
+ /* length specified is too small */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT UINT32_C(0x3)
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ uint8_t unused_0[7];
} __rte_packed;
-/**********************
- * hwrm_exec_fwd_resp *
- **********************/
+/*************************
+ * hwrm_nvm_set_variable *
+ *************************/
-/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
-struct hwrm_exec_fwd_resp_input {
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59679,25 +65690,73 @@ struct hwrm_exec_fwd_resp_input {
*/
uint64_t resp_addr;
/*
- * This is an encapsulated request. This request should
- * be executed by the HWRM and the response should be
- * provided in the response buffer inside the encapsulated
- * request.
+ * This is the host address where
+ * nvm variable will be copied from
*/
- uint32_t encap_request[26];
+ uint64_t src_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
/*
- * This value indicates the target id of the response to
- * the encapsulated request.
- * 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors
- * 0xFFFF - HWRM
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
*/
- uint16_t encap_resp_target_id;
- uint8_t unused_0[6];
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t flags;
+ /*
+ * When this bit is 1, flush internal cache after this write
+ * operation (see hwrm_nvm_flush command.)
+ */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FORCE_FLUSH \
+ UINT32_C(0x1)
+ /* encryption method */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_MASK \
+ UINT32_C(0xe)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_SFT 1
+ /* No encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_NONE \
+ (UINT32_C(0x0) << 1)
+ /* one-way encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1 \
+ (UINT32_C(0x1) << 1)
+ /* symmetric AES256 encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_AES256 \
+ (UINT32_C(0x2) << 1)
+ /* SHA1 digest appended to plaintext contents, for authentication */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_LAST \
+ HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FLAGS_UNUSED_0_MASK \
+ UINT32_C(0x70)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FLAGS_UNUSED_0_SFT 4
+ /* When this bit is 1, update the factory default region */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FACTORY_DEFAULT \
+ UINT32_C(0x80)
+ uint8_t unused_0;
} __rte_packed;
-/* hwrm_exec_fwd_resp_output (size:128b/16B) */
-struct hwrm_exec_fwd_resp_output {
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -59709,21 +65768,39 @@ struct hwrm_exec_fwd_resp_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/************************
- * hwrm_reject_fwd_resp *
- ************************/
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* variable does not exist */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
+ /* configuration is corrupted and the variable cannot be saved */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ uint8_t unused_0[7];
+} __rte_packed;
+
+/****************************
+ * hwrm_nvm_validate_option *
+ ****************************/
-/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
-struct hwrm_reject_fwd_resp_input {
+/* hwrm_nvm_validate_option_input (size:320b/40B) */
+struct hwrm_nvm_validate_option_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59753,25 +65830,43 @@ struct hwrm_reject_fwd_resp_input {
*/
uint64_t resp_addr;
/*
- * This is an encapsulated request. This request should
- * be rejected by the HWRM and the error response should be
- * provided in the response buffer inside the encapsulated
- * request.
+ * This is the host address where
+ * nvm variable will be copied from
*/
- uint32_t encap_request[26];
+ uint64_t src_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_0 \
+ UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF
/*
- * This value indicates the target id of the response to
- * the encapsulated request.
- * 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors
- * 0xFFFF - HWRM
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
*/
- uint16_t encap_resp_target_id;
- uint8_t unused_0[6];
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t unused_0[2];
} __rte_packed;
-/* hwrm_reject_fwd_resp_output (size:128b/16B) */
-struct hwrm_reject_fwd_resp_output {
+/* hwrm_nvm_validate_option_output (size:128b/16B) */
+struct hwrm_nvm_validate_option_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -59780,24 +65875,51 @@ struct hwrm_reject_fwd_resp_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ uint8_t result;
+ /*
+ * indicates that the value provided for the option is not matching
+ * with the saved data.
+ */
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_NOT_MATCH UINT32_C(0x0)
+ /*
+ * indicates that the value provided for the option is matching the
+ * saved data.
+ */
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH UINT32_C(0x1)
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_LAST \
+ HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH
+ uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*****************
- * hwrm_fwd_resp *
- *****************/
+/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */
+struct hwrm_nvm_validate_option_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST \
+ HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN
+ uint8_t unused_0[7];
+} __rte_packed;
+
+/*******************
+ * hwrm_nvm_defrag *
+ *******************/
-/* hwrm_fwd_resp_input (size:1024b/128B) */
-struct hwrm_fwd_resp_input {
+/* hwrm_nvm_defrag_input (size:192b/24B) */
+struct hwrm_nvm_defrag_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59826,41 +65948,14 @@ struct hwrm_fwd_resp_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * This value indicates the target id of the encapsulated
- * response.
- * 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors
- * 0xFFFF - HWRM
- */
- uint16_t encap_resp_target_id;
- /*
- * This value indicates the completion ring the encapsulated
- * response will be optionally completed on. If the value is
- * -1, then no CR completion shall be generated for the
- * encapsulated response. Any other value must be a
- * valid CR ring_id value. If a valid encap_resp_cmpl_ring
- * is provided, then a CR completion shall be generated for
- * the encapsulated response.
- */
- uint16_t encap_resp_cmpl_ring;
- /* This field indicates the length of encapsulated response. */
- uint16_t encap_resp_len;
- uint8_t unused_0;
- uint8_t unused_1;
- /*
- * This is the host address where the encapsulated response
- * will be written.
- * This area must be 16B aligned and must be cleared to zero
- * before the original request is made.
- */
- uint64_t encap_resp_addr;
- /* This is an encapsulated response. */
- uint32_t encap_resp[24];
+ uint32_t flags;
+ /* This bit must be '1' to perform NVM defragmentation. */
+ #define HWRM_NVM_DEFRAG_INPUT_FLAGS_DEFRAG UINT32_C(0x1)
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_fwd_resp_output (size:128b/16B) */
-struct hwrm_fwd_resp_output {
+/* hwrm_nvm_defrag_output (size:128b/16B) */
+struct hwrm_nvm_defrag_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -59872,21 +65967,37 @@ struct hwrm_fwd_resp_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*****************************
- * hwrm_fwd_async_event_cmpl *
- *****************************/
+/* hwrm_nvm_defrag_cmd_err (size:64b/8B) */
+struct hwrm_nvm_defrag_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_DEFRAG_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* NVM defragmentation could not be performed */
+ #define HWRM_NVM_DEFRAG_CMD_ERR_CODE_FAIL UINT32_C(0x1)
+ #define HWRM_NVM_DEFRAG_CMD_ERR_CODE_LAST \
+ HWRM_NVM_DEFRAG_CMD_ERR_CODE_FAIL
+ uint8_t unused_0[7];
+} __rte_packed;
+
+/*******************************
+ * hwrm_nvm_get_vpd_field_info *
+ *******************************/
-/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
-struct hwrm_fwd_async_event_cmpl_input {
+/* hwrm_nvm_get_vpd_field_info_input (size:192b/24B) */
+struct hwrm_nvm_get_vpd_field_info_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59916,21 +66027,18 @@ struct hwrm_fwd_async_event_cmpl_input {
*/
uint64_t resp_addr;
/*
- * This value indicates the target id of the encapsulated
- * asynchronous event.
- * 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors
- * 0xFFFF - Broadcast to all children VFs (only applicable when
- * a PF is the requester)
+ * Tag ID of the requested field. To request the Product Name
+ * a value of [0x00, 0x82] should be used. All other fields
+ * would use the two byte hexadecimal value of the ASCII
+ * characters. The first letter of the ASCII keyword is recorded
+ * in tag_id[0] and the next letter in tag_id[1].
*/
- uint16_t encap_async_event_target_id;
+ uint8_t tag_id[2];
uint8_t unused_0[6];
- /* This is an encapsulated asynchronous event completion. */
- uint32_t encap_async_event_cmpl[4];
} __rte_packed;
-/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
-struct hwrm_fwd_async_event_cmpl_output {
+/* hwrm_nvm_get_vpd_field_info_output (size:2176b/272B) */
+struct hwrm_nvm_get_vpd_field_info_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -59939,24 +66047,28 @@ struct hwrm_fwd_async_event_cmpl_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* Data retrieved from VPD field */
+ uint8_t data[256];
+ /* size of data retrieved in bytes */
+ uint16_t data_len;
+ uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_nvm_raw_write_blk *
- **************************/
+/*******************************
+ * hwrm_nvm_set_vpd_field_info *
+ *******************************/
-/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */
-struct hwrm_nvm_raw_write_blk_input {
+/* hwrm_nvm_set_vpd_field_info_input (size:256b/32B) */
+struct hwrm_nvm_set_vpd_field_info_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -59986,21 +66098,25 @@ struct hwrm_nvm_raw_write_blk_input {
*/
uint64_t resp_addr;
/*
- * 64-bit Host Source Address.
- * This is the location of the source data to be written.
+ * This is the host address where
+ * VPD data value will be copied from
*/
uint64_t host_src_addr;
/*
- * 32-bit Destination Address.
- * This is the NVRAM byte-offset where the source data will be written to.
+ * Tag ID of the requested field. To request the Product Name
+ * a value of [0x00, 0x82] should be used. All other fields
+ * would use the two byte hexadecimal value of the ASCII
+ * characters. The first letter of the ASCII keyword is recorded
+ * in tag_id[0] and the next letter in tag_id[1].
*/
- uint32_t dest_addr;
- /* Length of data to be written, in bytes. */
- uint32_t len;
+ uint8_t tag_id[2];
+ /* size of data in bytes */
+ uint16_t data_len;
+ uint8_t unused_0[4];
} __rte_packed;
-/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */
-struct hwrm_nvm_raw_write_blk_output {
+/* hwrm_nvm_set_vpd_field_info_output (size:128b/16B) */
+struct hwrm_nvm_set_vpd_field_info_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60012,21 +66128,21 @@ struct hwrm_nvm_raw_write_blk_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*****************
- * hwrm_nvm_read *
- *****************/
+/****************
+ * hwrm_oem_cmd *
+ ****************/
-/* hwrm_nvm_read_input (size:320b/40B) */
-struct hwrm_nvm_read_input {
+/* hwrm_oem_cmd_input (size:1024b/128B) */
+struct hwrm_oem_cmd_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60056,22 +66172,36 @@ struct hwrm_nvm_read_input {
*/
uint64_t resp_addr;
/*
- * 64-bit Host Destination Address.
- * This is the host address where the data will be written to.
- */
- uint64_t host_dest_addr;
- /* The 0-based index of the directory entry. */
- uint16_t dir_idx;
- uint8_t unused_0[2];
- /* The NVRAM byte-offset to read from. */
- uint32_t offset;
- /* The length of the data to be read, in bytes. */
- uint32_t len;
- uint8_t unused_1[4];
+ * The organization owning the message format. Set this field
+ * to 0x14e4 when used for Broadcom internal use when
+ * the naming authority is set to PCI_SIG.
+ */
+ uint32_t oem_id;
+ /* The naming authority used for setting the oem_id. */
+ uint8_t naming_authority;
+ /* Invalid naming authority */
+ #define HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_INVALID UINT32_C(0x0)
+ /* PCI_SIG naming authority numbering is used */
+ #define HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_PCI_SIG UINT32_C(0x1)
+ #define HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_LAST \
+ HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_PCI_SIG
+ /* The message family within the organization. */
+ uint8_t message_family;
+ /* Invalid message family */
+ #define HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_INVALID UINT32_C(0x0)
+ /* This message is targeted for Truflow */
+ #define HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_TRUFLOW UINT32_C(0x1)
+ /* This message is targeted for RoCE */
+ #define HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_ROCE UINT32_C(0x2)
+ #define HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_LAST \
+ HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_ROCE
+ uint16_t unused;
+ /* This field contains the vendor specific command data. */
+ uint32_t oem_data[26];
} __rte_packed;
-/* hwrm_nvm_read_output (size:128b/16B) */
-struct hwrm_nvm_read_output {
+/* hwrm_oem_cmd_output (size:768b/96B) */
+struct hwrm_oem_cmd_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60080,24 +66210,33 @@ struct hwrm_nvm_read_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* The organization owning the message format. */
+ uint32_t oem_id;
+ /* The naming authority used for setting the oem_id. */
+ uint8_t naming_authority;
+ /* The message family within the organization. */
+ uint8_t message_family;
+ uint16_t unused;
+ /* This field contains the vendor specific response data. */
+ uint32_t oem_data[18];
+ uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*********************
- * hwrm_nvm_raw_dump *
- *********************/
+/*****************************
+ * hwrm_dbg_crashdump_header *
+ *****************************/
-/* hwrm_nvm_raw_dump_input (size:256b/32B) */
-struct hwrm_nvm_raw_dump_input {
+/* hwrm_dbg_crashdump_header_input (size:192b/24B) */
+struct hwrm_dbg_crashdump_header_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60126,19 +66265,11 @@ struct hwrm_nvm_raw_dump_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /*
- * 64-bit Host Destination Address.
- * This is the host address where the data will be written to.
- */
- uint64_t host_dest_addr;
- /* 32-bit NVRAM byte-offset to read from. */
- uint32_t offset;
- /* Total length of NVRAM contents to be read, in bytes. */
- uint32_t len;
+ uint32_t unused_0[2];
} __rte_packed;
-/* hwrm_nvm_raw_dump_output (size:128b/16B) */
-struct hwrm_nvm_raw_dump_output {
+/* hwrm_dbg_crashdump_header_output (size:512b/64B) */
+struct hwrm_dbg_crashdump_header_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60147,24 +66278,151 @@ struct hwrm_nvm_raw_dump_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* Major version. */
+ uint8_t version_hi;
+ /* Minor version. */
+ uint8_t version_low;
+ /*
+ * Header length in bytes. This includes all fields from version
+ * to dev_uid (whose length is specified in dev_uid_length).
+ */
+ uint16_t header_len;
+ /* This is the crash dump size in bytes. */
+ uint32_t dump_size;
+ /*
+ * This is a "wall clock" timestamp value of when the crash occurred.
+ * Format is of time_t type.
+ */
+ uint32_t crash_time;
+ /* This is the timezone information for the crash_time. */
+ int8_t utc_offset;
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_UTC \
+ INT32_C(0)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AMSTERDAM \
+ INT32_C(4)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_EGYPT \
+ INT32_C(8)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_EUROPE_MOSCOW \
+ INT32_C(12)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_IRAN \
+ INT32_C(14)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ASIA_DUBAI \
+ INT32_C(16)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ASIA_KABUL \
+ INT32_C(18)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ANTARCTICA_MAWSON \
+ INT32_C(20)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ASIA_COLOMBO \
+ INT32_C(22)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ASIA_KATHMANDU \
+ INT32_C(23)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_INDIAN_CHAGOS \
+ INT32_C(24)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_INDIAN_COCOS \
+ INT32_C(26)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ASIA_BANGKOK \
+ INT32_C(28)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ASIA_HONG_KONG \
+ INT32_C(32)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ASIA_PYONGYANG \
+ INT32_C(34)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AUSTRALIA_EUCLA \
+ INT32_C(35)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ASIA_TOKYO \
+ INT32_C(36)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AUSTRALIA_ADELAIDE \
+ INT32_C(38)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AUSTRALIA_BROKEN_HILL \
+ INT32_C(38)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AUSTRALIA_DARWIN \
+ INT32_C(38)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AUSTRALIA_SYDNEY \
+ INT32_C(40)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AUSTRALIA_LORD_HOWE \
+ INT32_C(42)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ANTARCTICA_MACQUARIE \
+ INT32_C(44)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ANTARCTICA_SOUTH_POLE \
+ INT32_C(48)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_PACIFIC_CHATHAM \
+ INT32_C(51)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_PACIFIC_APIA \
+ INT32_C(52)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_PACIFIC_KIRITIMATIS \
+ INT32_C(56)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ATLANTIC_CAPE_VERDE \
+ INT32_C(-4)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_ATLANTIC_SOUTH_GEORGIA \
+ INT32_C(-8)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AMERICA_ARGENTINA_BUENOS_AIRES \
+ INT32_C(-12)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AMERICA_SAO_PAULO \
+ INT32_C(-12)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AMERICA_NEWFOUNDLAND \
+ INT32_C(-14)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AMERICA_BARBADOS \
+ INT32_C(-16)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AMERICA_CANCUN \
+ INT32_C(-20)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AMERICA_COSTA_RICA \
+ INT32_C(-24)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_AMERICA_PHOENIX \
+ INT32_C(-28)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_US_ARIZONA \
+ INT32_C(-28)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_US_PACIFIC \
+ INT32_C(-32)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_US_ALASKA \
+ INT32_C(-36)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_PACIFIC_MARQUESAS \
+ INT32_C(-38)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_PACIFIC_HAWAII \
+ INT32_C(-40)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_PACIFIC_MIDWAY \
+ INT32_C(-44)
+ #define HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_LAST \
+ HWRM_DBG_CRASHDUMP_HEADER_OUTPUT_UTC_OFFSET_PACIFIC_MIDWAY
+ /*
+ * This field is a counter value of the crash dump available. This
+ * value is incremented monotonically at each crash.
+ */
+ uint8_t crash_cntr;
+ /*
+ * This specifies the length of the dev_uid in bytes. The maximum
+ * value is 31.
+ */
+ uint16_t dev_uid_length;
+ /*
+ * This is a unique device identifier (e.g. the first port MAC
+ * address for a network controller or a serial number for an
+ * en/decryption device) in ASCII format. It is used to identify
+ * where the crash dump content is coming from. Unused bytes must
+ * have '\0' character.
+ */
+ uint8_t dev_uid[32];
+ /*
+ * This is a count value tracking the number of successful boots
+ * before the crash occurred.
+ */
+ uint32_t power_on_count;
+ uint8_t unused_2[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
/****************************
- * hwrm_nvm_get_dir_entries *
+ * hwrm_dbg_crashdump_erase *
****************************/
-/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
-struct hwrm_nvm_get_dir_entries_input {
+/* hwrm_dbg_crashdump_erase_input (size:192b/24B) */
+struct hwrm_dbg_crashdump_erase_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60193,15 +66451,29 @@ struct hwrm_nvm_get_dir_entries_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* The scope of the erase */
+ uint8_t scope;
/*
- * 64-bit Host Destination Address.
- * This is the host address where the directory will be written.
+ * Wipe all crashdump data blocks, making them available for
+ * the next crash(es). This is the typical value to be used.
*/
- uint64_t host_dest_addr;
+ #define HWRM_DBG_CRASHDUMP_ERASE_INPUT_SCOPE_INVALIDATE UINT32_C(0x0)
+ /*
+ * Experimental: Remove all data blocks from the directory
+ * (without erasing any existing contents), re-allocate and
+ * re-initialize new ones. In case where the crash dump feature
+ * stops functioning, this can be used to restore it back to the
+ * clean slate.
+ */
+ #define HWRM_DBG_CRASHDUMP_ERASE_INPUT_SCOPE_REINIT UINT32_C(0x1)
+ #define HWRM_DBG_CRASHDUMP_ERASE_INPUT_SCOPE_LAST \
+ HWRM_DBG_CRASHDUMP_ERASE_INPUT_SCOPE_REINIT
+ uint8_t unused_0[3];
+ uint32_t unused_1;
} __rte_packed;
-/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
-struct hwrm_nvm_get_dir_entries_output {
+/* hwrm_dbg_crashdump_erase_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_erase_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60210,24 +66482,24 @@ struct hwrm_nvm_get_dir_entries_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*************************
- * hwrm_nvm_get_dir_info *
- *************************/
+/******************
+ * hwrm_dbg_qcaps *
+ ******************/
-/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
-struct hwrm_nvm_get_dir_info_input {
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60256,10 +66528,17 @@ struct hwrm_nvm_get_dir_info_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
-struct hwrm_nvm_get_dir_info_output {
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60268,28 +66547,54 @@ struct hwrm_nvm_get_dir_info_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Number of directory entries in the directory. */
- uint32_t entries;
- /* Size of each directory entry, in bytes. */
- uint32_t entry_length;
- uint8_t unused_0[7];
+ /*
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+ /*
+ * Bitwise field of components FW supports skipping during collection
+ * of coredump as part of a crash collection.
+ */
+ uint32_t coredump_component_disable_caps;
+ /*
+ * If 1, FW supports disabling the collection of NVM during a
+ * coredump taken as part of crash collection.
+ */
+ #define HWRM_DBG_QCAPS_OUTPUT_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM \
+ UINT32_C(0x1)
+ uint32_t flags;
+ /* If 1, FW supports writing a crashdump to NVM. */
+ #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_CRASHDUMP_NVM \
+ UINT32_C(0x1)
+ /* If 1, FW supports writing a crashdump to host ddr. */
+ #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_CRASHDUMP_HOST_DDR \
+ UINT32_C(0x2)
+ /* If 1, FW supports writing a crashdump to soc ddr. */
+ #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_CRASHDUMP_SOC_DDR \
+ UINT32_C(0x4)
+ /* If 1, FW supports USEQ operations */
+ #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_USEQ \
+ UINT32_C(0x8)
+ uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/******************
- * hwrm_nvm_write *
- ******************/
+/*****************
+ * hwrm_dbg_qcfg *
+ *****************/
-/* hwrm_nvm_write_input (size:448b/56B) */
-struct hwrm_nvm_write_input {
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60319,92 +66624,227 @@ struct hwrm_nvm_write_input {
*/
uint64_t resp_addr;
/*
- * 64-bit Host Source Address.
- * This is where the source data is.
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint64_t host_src_addr;
+ uint16_t fid;
+ uint16_t flags;
/*
- * The Directory Entry Type (valid values are defined in the
- * bnxnvm_directory_type enum defined in the file bnxnvm_defs.h).
+ * The crashdump size represents size of crashdump
+ * written to the specified destination.
*/
- uint16_t dir_type;
+ #define HWRM_DBG_QCFG_INPUT_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK \
+ UINT32_C(0x3)
+ #define HWRM_DBG_QCFG_INPUT_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT \
+ 0
+ /* crashdump size written to nvm */
+ #define HWRM_DBG_QCFG_INPUT_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM \
+ UINT32_C(0x0)
+ /* crashdump size written to host_ddr */
+ #define HWRM_DBG_QCFG_INPUT_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR \
+ UINT32_C(0x1)
+ /* crashdump size written to soc_ddr */
+ #define HWRM_DBG_QCFG_INPUT_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR \
+ UINT32_C(0x2)
+ #define HWRM_DBG_QCFG_INPUT_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST \
+ HWRM_DBG_QCFG_INPUT_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
/*
- * Directory ordinal.
- * The 0-based instance of the combined Directory Entry Type and Extension.
+ * Bitwise field of components requested for FW to skip when
+ * calculating the size of a coredump collection.
*/
- uint16_t dir_ordinal;
+ uint32_t coredump_component_disable_flags;
/*
- * The Directory Entry Extension flags (see BNX_DIR_EXT_* in the file
- * bnxnvm_defs.h).
+ * If 1, NVM will not be collected during a coredump taken as part
+ * of crash collection.
*/
- uint16_t dir_ext;
+ #define HWRM_DBG_QCFG_INPUT_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM \
+ UINT32_C(0x1)
+} __rte_packed;
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the file
- * bnxnvm_defs.h).
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
*/
- uint16_t dir_attr;
+ uint16_t fid;
+ uint8_t unused_0[2];
/*
- * Length of data to write, in bytes. May be less than or equal to the
- * allocated size for the directory entry.
- * The data length stored in the directory entry will be updated to
- * reflect this value once the write is complete.
+ * Size in bytes of a coredump file created by the FW. This takes into
+ * consideration any components selected in the
+ * coredump_component_disable_flags field from hwrm_dbg_qcfg_input.
*/
- uint32_t dir_data_length;
- /* Option. */
- uint16_t option;
- uint16_t flags;
+ uint32_t coredump_size;
+ uint32_t flags;
/*
- * When this bit is '1', the original active image
- * will not be removed. TBD: what purpose is this?
+ * If set to 1, then UART logging is enabled for the primary
+ * firmware. Disabled otherwise.
*/
- #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG \
- UINT32_C(0x1)
+ #define HWRM_DBG_QCFG_OUTPUT_FLAGS_UART_LOG UINT32_C(0x1)
/*
- * This flag indicates the sender wants to modify a continuous
- * NVRAM area using a batch of this HWRM requests. The
- * offset of a request must be continuous to the end of previous
- * request's. Firmware does not update the directory entry until
- * receiving the last request, which is indicated by the batch_last
- * flag. This flag is set usually when a sender does not have a
- * block of memory that is big enough to hold the entire NVRAM
- * data for send at one time.
+ * If set to 1, then UART logging is enabled for the secondary
+ * firmware. Disabled otherwise.
*/
- #define HWRM_NVM_WRITE_INPUT_FLAGS_BATCH_MODE \
- UINT32_C(0x2)
+ #define HWRM_DBG_QCFG_OUTPUT_FLAGS_UART_LOG_SECONDARY UINT32_C(0x2)
/*
- * This flag can be used only when the batch_mode flag is set. It
- * indicates this request is the last of batch requests.
+ * If set to 1, then completion ring logging is enabled for the
+ * primary firmware. Disabled otherwise.
*/
- #define HWRM_NVM_WRITE_INPUT_FLAGS_BATCH_LAST \
- UINT32_C(0x4)
+ #define HWRM_DBG_QCFG_OUTPUT_FLAGS_FW_TRACE UINT32_C(0x4)
/*
- * The requested length of the allocated NVM for the item, in bytes.
- * This value may be greater than or equal to the specified data
- * length (dir_data_length).
- * If this value is less than the specified data length, it will be ignored.
- * The response will contain the actual allocated item length,
- * which may be greater than the requested item length.
- * The purpose for allocating more than the required number of bytes
- * for an item's data is to pre-allocate extra storage (padding) to
- * accommodate the potential future growth of an item (e.g. upgraded
- * firmware with a size increase, log growth, expanded configuration data).
+ * If set to 1, then completion ring logging is enabled for the
+ * secondary firmware. Disabled otherwise.
*/
- uint32_t dir_item_length;
+ #define HWRM_DBG_QCFG_OUTPUT_FLAGS_FW_TRACE_SECONDARY UINT32_C(0x8)
/*
- * 32-bit offset of data blob from where data is being written.
- * Only valid for batch mode. For non-batch writes 'dont care'.
+ * If set to 1, firmware will generate debug_notification async
+ * events to the driver as applicable.
*/
- uint32_t offset;
+ #define HWRM_DBG_QCFG_OUTPUT_FLAGS_DEBUG_NOTIFY \
+ UINT32_C(0x10)
/*
- * Length of data to be written.Should be non-zero.
- * Only valid for batch mode. For non-batch writes 'dont care'.
+ * If set to 1, firmware is allowed to be unresponsive to heartbeat
+ * health checks, allowing for JTAG debugging scenarios where the
+ * debugger has the firmware processes stopped indefinitely. This
+ * flag has effect only on debug builds of firmware.
*/
- uint32_t len;
+ #define HWRM_DBG_QCFG_OUTPUT_FLAGS_JTAG_DEBUG \
+ UINT32_C(0x20)
+ /*
+ * Notification queue (completion ring) used by the firmware to post
+ * async debug notifications and fw trace logs. This field is valid
+ * when fw_trace, fw_trace_secondary or debug_notify flags are set.
+ */
+ uint16_t async_cmpl_ring;
+ uint8_t unused_2[2];
+ /*
+ * Size in bytes of a crashdump file created by the FW. Uses input
+ * flags to determine medium destination and corresponding size.
+ */
+ uint32_t crashdump_size;
+ uint8_t unused_3[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*********************************
+ * hwrm_dbg_crashdump_medium_cfg *
+ *********************************/
+
+
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint16_t output_dest_flags;
+ /* Destination is DDR ram. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_TYPE_DDR UINT32_C(0x1)
+ uint16_t pg_size_lvl;
+ /* PBL indirect levels. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_LVL_MASK UINT32_C(0x3)
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /*
+ * PBL pointer points to PDE table with each entry pointing to
+ * PTE tables.
+ */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_LVL_LAST \
+ HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_LVL_LVL_2
+ /* page size. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_MASK \
+ UINT32_C(0x1c)
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_SFT 2
+ /* 4KB. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 2)
+ /* 8KB. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 2)
+ /* 64KB. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 2)
+ /* 2MB. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 2)
+ /* 8MB. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 2)
+ /* 1GB. */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 2)
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_LAST \
+ HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_PG_SIZE_PG_1G
+ /* unused11 is 11 b */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_UNUSED11_MASK \
+ UINT32_C(0xffe0)
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_UNUSED11_SFT 5
+ /* Crashdump buffer size. */
+ uint32_t size;
+ /*
+ * Bitwise field of components that FW is requested to skip during
+ * coredump as part of a crash collection.
+ */
+ uint32_t coredump_component_disable_flags;
+ /*
+ * If 1, then NVM will not be collected during a coredump taken as
+ * part of crash collection.
+ */
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG_INPUT_NVRAM UINT32_C(0x1)
uint32_t unused_0;
+ /* Crashdump buffer PBL physical address. */
+ uint64_t pbl;
} __rte_packed;
-/* hwrm_nvm_write_output (size:128b/16B) */
-struct hwrm_nvm_write_output {
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60413,52 +66853,56 @@ struct hwrm_nvm_write_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /*
- * Length of the allocated NVM for the item, in bytes. The value may be
- * greater than or equal to the specified data length or the requested
- * item length.
- * The actual item length used when creating a new directory entry will
- * be a multiple of an NVM block size.
- */
- uint32_t dir_item_length;
- /* The directory index of the created or modified item. */
- uint16_t dir_idx;
- uint8_t unused_0;
+ uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/* hwrm_nvm_write_cmd_err (size:64b/8B) */
-struct hwrm_nvm_write_cmd_err {
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ /* Component id of the returned component. */
+ uint16_t component_id;
+ /* Segment id of the returned component. */
+ uint16_t segment_id;
+ /* Not used. */
+ uint16_t max_instances;
+ /* Major version. */
+ uint8_t version_hi;
+ /* Minor version. */
+ uint8_t version_low;
/*
- * command specific error codes that goes to
- * the cmd_err field in Common HWRM Error Response.
+ * bit 0: live data
+ * bit 1: crashed data
*/
- uint8_t code;
- /* Unknown error */
- #define HWRM_NVM_WRITE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
- /* Unable to complete operation due to fragmentation */
- #define HWRM_NVM_WRITE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1)
- /* nvm is completely full. */
- #define HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2)
- #define HWRM_NVM_WRITE_CMD_ERR_CODE_LAST \
- HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE
- uint8_t unused_0[7];
+ uint8_t seg_flags;
+ /* This field is used to indicate the segment is compressed. */
+ uint8_t compress_flags;
+ /*
+ * SFLAG_COMPRESSED_ZLIB indicates that the segment data is
+ * compressed.
+ */
+ #define SFLAG_COMPRESSED_ZLIB UINT32_C(0x1)
+ uint8_t unused_0[2];
+ /*
+ * This field is the length of the segment data. It will be zero if
+ * the firmware does not support returning the segment data length.
+ */
+ uint32_t segment_len;
} __rte_packed;
-/*******************
- * hwrm_nvm_modify *
- *******************/
+/**************************
+ * hwrm_dbg_coredump_list *
+ **************************/
-/* hwrm_nvm_modify_input (size:320b/40B) */
-struct hwrm_nvm_modify_input {
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60488,41 +66932,26 @@ struct hwrm_nvm_modify_input {
*/
uint64_t resp_addr;
/*
- * 64-bit Host Source Address.
- * This is where the modified data is.
- */
- uint64_t host_src_addr;
- /* 16-bit directory entry index. */
- uint16_t dir_idx;
- uint16_t flags;
- /*
- * This flag indicates the sender wants to modify a continuous NVRAM
- * area using a batch of this HWRM requests. The offset of a request
- * must be continuous to the end of previous request's. Firmware does
- * not update the directory entry until receiving the last request,
- * which is indicated by the batch_last flag.
- * This flag is set usually when a sender does not have a block of
- * memory that is big enough to hold the entire NVRAM data for send
- * at one time.
- */
- #define HWRM_NVM_MODIFY_INPUT_FLAGS_BATCH_MODE UINT32_C(0x1)
- /*
- * This flag can be used only when the batch_mode flag is set.
- * It indicates this request is the last of batch requests.
+ * host address where the data content will be written
+ * when the request is complete. This area must be 16B aligned.
*/
- #define HWRM_NVM_MODIFY_INPUT_FLAGS_BATCH_LAST UINT32_C(0x2)
- /* 32-bit NVRAM byte-offset to modify content from. */
- uint32_t offset;
+ uint64_t host_dest_addr;
+ /* Length of host buffer used for transferring debug data. */
+ uint32_t host_buf_len;
+ /* Sequence number of the request. Starts at 0. */
+ uint16_t seq_no;
+ /* */
+ uint8_t flags;
/*
- * Length of data to be modified, in bytes. The length shall
- * be non-zero.
+ * If set to 1, crash dump is requested.
+ * If set to 0, both live core and crash dump are requested.
*/
- uint32_t len;
- uint8_t unused_1[4];
+ #define HWRM_DBG_COREDUMP_LIST_INPUT_FLAGS_CRASHDUMP UINT32_C(0x1)
+ uint8_t unused_0[1];
} __rte_packed;
-/* hwrm_nvm_modify_output (size:128b/16B) */
-struct hwrm_nvm_modify_output {
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60531,24 +66960,35 @@ struct hwrm_nvm_modify_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ uint8_t flags;
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * Value of 1 means that there is more data available.
+ * Issue the request again with the next sequence number.
+ */
+ #define HWRM_DBG_COREDUMP_LIST_OUTPUT_FLAGS_MORE UINT32_C(0x1)
+ uint8_t unused_0;
+ /* Total number of segments to be returned. */
+ uint16_t total_segments;
+ /* Actual length of data returned in bytes. */
+ uint16_t data_len;
+ uint8_t unused_1;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_nvm_find_dir_entry *
- ***************************/
+/******************************
+ * hwrm_dbg_coredump_initiate *
+ ******************************/
-/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_input {
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60577,42 +67017,25 @@ struct hwrm_nvm_find_dir_entry_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t enables;
- /*
- * This bit must be '1' for the dir_idx_valid field to be
- * configured.
- */
- #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID \
- UINT32_C(0x1)
- /* Directory Entry Index */
- uint16_t dir_idx;
- /* Directory Entry (Image) Type */
- uint16_t dir_type;
+ /* Component id of the returned component. */
+ uint16_t component_id;
+ /* Segment id of the returned component. */
+ uint16_t segment_id;
+ /* Not used. */
+ uint16_t instance;
+ /* Not used. */
+ uint16_t unused_0;
/*
- * Directory ordinal.
- * The instance of this Directory Type
+ * bit 0: live data
+ * bit 1: crashed data
*/
- uint16_t dir_ordinal;
- /* The Directory Entry Extension flags. */
- uint16_t dir_ext;
- /* This value indicates the search option using dir_ordinal. */
- uint8_t opt_ordinal;
- /* This value indicates the search option using dir_ordinal. */
- #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_MASK UINT32_C(0x3)
- #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_SFT 0
- /* Equal to specified ordinal value. */
- #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_EQ UINT32_C(0x0)
- /* Greater than or equal to specified ordinal value */
- #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GE UINT32_C(0x1)
- /* Greater than specified ordinal value */
- #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT UINT32_C(0x2)
- #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_LAST \
- HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT
- uint8_t unused_0[3];
+ uint8_t seg_flags;
+ /* Not used. */
+ uint8_t unused_1[7];
} __rte_packed;
-/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
-struct hwrm_nvm_find_dir_entry_output {
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60621,38 +67044,47 @@ struct hwrm_nvm_find_dir_entry_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Allocated NVRAM for this directory entry, in bytes. */
- uint32_t dir_item_length;
- /* Size of the stored data for this directory entry, in bytes. */
- uint32_t dir_data_length;
- /*
- * Firmware version.
- * Only valid if the directory entry is for embedded firmware stored
- * in APE_BIN Format.
- */
- uint32_t fw_ver;
- /* Directory ordinal. */
- uint16_t dir_ordinal;
- /* Directory Entry Index */
- uint16_t dir_idx;
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/****************************
- * hwrm_nvm_erase_dir_entry *
- ****************************/
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ /* Starting address of the register range. */
+ uint32_t address;
+ /*
+ * length: 0 - 23 bits represents the actual data without the pad.
+ * flags: 24 - 31 bits represents indirect register ranges.
+ * - bit 24: Set if registers in this segment are indirect accessed.
+ */
+ uint32_t flags_length;
+ /* These bits represents the actual length of the data segment */
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK \
+ UINT32_C(0xffffff)
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ /* Set if registers in this segment are indirect accessed. */
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS \
+ UINT32_C(0x1000000)
+ /* Value in the partner register for indirect or multi-field registers. */
+ uint32_t instance;
+ /* Starting address of the next register after the current data range */
+ uint32_t next_offset;
+} __rte_packed;
+
+/******************************
+ * hwrm_dbg_coredump_retrieve *
+ ******************************/
-/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
-struct hwrm_nvm_erase_dir_entry_input {
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60681,13 +67113,39 @@ struct hwrm_nvm_erase_dir_entry_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Directory Entry Index */
- uint16_t dir_idx;
- uint8_t unused_0[6];
+ /*
+ * host address where the data content will be written
+ * when the request is complete. This area must be 16B aligned.
+ */
+ uint64_t host_dest_addr;
+ /* Length of host buffer used for transferring debug data. */
+ uint32_t host_buf_len;
+ /* Not used. */
+ uint32_t unused_0;
+ /* Component id of the returned component. */
+ uint16_t component_id;
+ /* Segment id of the returned component. */
+ uint16_t segment_id;
+ /* Not used. */
+ uint16_t instance;
+ /* Not used. */
+ uint16_t unused_1;
+ /*
+ * bit 0: live data
+ * bit 1: crashed data
+ */
+ uint8_t seg_flags;
+ uint8_t unused_2;
+ uint16_t unused_3;
+ /* Not used. */
+ uint32_t unused_4;
+ /* Sequence number is used per segment request. Starts at 0. */
+ uint32_t seq_no;
+ uint32_t unused_5;
} __rte_packed;
-/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_erase_dir_entry_output {
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60696,24 +67154,33 @@ struct hwrm_nvm_erase_dir_entry_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ uint8_t flags;
+ /*
+ * Value of 1 means that there is more data available.
+ * Issue the request again with the next sequence number.
+ */
+ #define HWRM_DBG_COREDUMP_RETRIEVE_OUTPUT_FLAGS_MORE UINT32_C(0x1)
+ uint8_t unused_0;
+ /* Actual length of data returned in bytes. */
+ uint16_t data_len;
+ uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/*************************
- * hwrm_nvm_get_dev_info *
- *************************/
+/*******************
+ * hwrm_dbg_fw_cli *
+ *******************/
-/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
-struct hwrm_nvm_get_dev_info_input {
+/* hwrm_dbg_fw_cli_input (size:1024b/128B) */
+struct hwrm_dbg_fw_cli_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60742,10 +67209,22 @@ struct hwrm_nvm_get_dev_info_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * Address of the host buffer where debug CLI data
+ * is requested to be dumped.
+ */
+ uint64_t host_dest_addr;
+ /* Length of host buffer used for transferring debug data. */
+ uint32_t host_buf_len;
+ /* Length of CLI command. */
+ uint16_t cli_cmd_len;
+ uint8_t unused_0[2];
+ /* CLI command string, a single ASCII encoded null terminated string. */
+ uint8_t cli_cmd[96];
} __rte_packed;
-/* hwrm_nvm_get_dev_info_output (size:640b/80B) */
-struct hwrm_nvm_get_dev_info_output {
+/* hwrm_dbg_fw_cli_output (size:128b/16B) */
+struct hwrm_dbg_fw_cli_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60754,119 +67233,26 @@ struct hwrm_nvm_get_dev_info_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Manufacturer ID. */
- uint16_t manufacturer_id;
- /* Device ID. */
- uint16_t device_id;
- /* Sector size of the NVRAM device. */
- uint32_t sector_size;
- /* Total size, in bytes of the NVRAM device. */
- uint32_t nvram_size;
- uint32_t reserved_size;
- /*
- * Available size that can be used, in bytes. Available size is the
- * NVRAM size take away the used size and reserved size.
- */
- uint32_t available_size;
- /* This field represents the major version of NVM cfg */
- uint8_t nvm_cfg_ver_maj;
- /* This field represents the minor version of NVM cfg */
- uint8_t nvm_cfg_ver_min;
- /* This field represents the update version of NVM cfg */
- uint8_t nvm_cfg_ver_upd;
- uint8_t flags;
- /*
- * If set to 1, firmware will provide various firmware version
- * information stored in the flash.
- */
- #define HWRM_NVM_GET_DEV_INFO_OUTPUT_FLAGS_FW_VER_VALID \
- UINT32_C(0x1)
- /*
- * This field represents the board package name stored in the flash.
- * (ASCII chars with NULL at the end).
- */
- char pkg_name[16];
- /*
- * This field represents the major version of HWRM firmware, stored in
- * the flash.
- */
- uint16_t hwrm_fw_major;
- /*
- * This field represents the minor version of HWRM firmware, stored in
- * the flash.
- */
- uint16_t hwrm_fw_minor;
- /*
- * This field represents the build version of HWRM firmware, stored in
- * the flash.
- */
- uint16_t hwrm_fw_build;
- /*
- * This field can be used to represent firmware branches or customer
- * specific releases tied to a specific (major, minor, build) version
- * of the HWRM firmware.
- */
- uint16_t hwrm_fw_patch;
- /*
- * This field represents the major version of mgmt firmware, stored in
- * the flash.
- */
- uint16_t mgmt_fw_major;
- /*
- * This field represents the minor version of mgmt firmware, stored in
- * the flash.
- */
- uint16_t mgmt_fw_minor;
- /*
- * This field represents the build version of mgmt firmware, stored in
- * the flash.
- */
- uint16_t mgmt_fw_build;
- /*
- * This field can be used to represent firmware branches or customer
- * specific releases tied to a specific (major, minor, build) version
- * of the mgmt firmware.
- */
- uint16_t mgmt_fw_patch;
- /*
- * This field represents the major version of roce firmware, stored in
- * the flash.
- */
- uint16_t roce_fw_major;
- /*
- * This field represents the minor version of roce firmware, stored in
- * the flash.
- */
- uint16_t roce_fw_minor;
- /*
- * This field represents the build version of roce firmware, stored in
- * the flash.
- */
- uint16_t roce_fw_build;
- /*
- * This field can be used to represent firmware branches or customer
- * specific releases tied to a specific (major, minor, build) version
- * of the roce firmware.
- */
- uint16_t roce_fw_patch;
- uint8_t unused_0[7];
+ /* Size of debug CLI data returned in bytes. */
+ uint32_t cli_data_len;
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
/**************************
- * hwrm_nvm_mod_dir_entry *
+ * hwrm_dbg_ring_info_get *
**************************/
-/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
-struct hwrm_nvm_mod_dir_entry_input {
+/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60895,38 +67281,25 @@ struct hwrm_nvm_mod_dir_entry_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t enables;
- /*
- * This bit must be '1' for the checksum field to be
- * configured.
- */
- #define HWRM_NVM_MOD_DIR_ENTRY_INPUT_ENABLES_CHECKSUM UINT32_C(0x1)
- /* Directory Entry Index */
- uint16_t dir_idx;
- /*
- * Directory ordinal.
- * The (0-based) instance of this Directory Type.
- */
- uint16_t dir_ordinal;
- /*
- * The Directory Entry Extension flags (see BNX_DIR_EXT_* for
- * extension flag definitions).
- */
- uint16_t dir_ext;
- /*
- * Directory Entry Attribute flags (see BNX_DIR_ATTR_* for attribute
- * flag definitions).
- */
- uint16_t dir_attr;
- /*
- * If valid, then this field updates the checksum
- * value of the content in the directory entry.
- */
- uint32_t checksum;
+ /* Ring Type. */
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_DBG_RING_INFO_GET_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_DBG_RING_INFO_GET_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_DBG_RING_INFO_GET_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* Notification Queue (NQ) */
+ #define HWRM_DBG_RING_INFO_GET_INPUT_RING_TYPE_NQ UINT32_C(0x3)
+ #define HWRM_DBG_RING_INFO_GET_INPUT_RING_TYPE_LAST \
+ HWRM_DBG_RING_INFO_GET_INPUT_RING_TYPE_NQ
+ uint8_t unused_0[3];
+ /* Firmware ring ID associated with ring being queried. */
+ uint32_t fw_ring_id;
} __rte_packed;
-/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
-struct hwrm_nvm_mod_dir_entry_output {
+/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -60935,24 +67308,39 @@ struct hwrm_nvm_mod_dir_entry_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* Producer index for the queried ring. */
+ uint32_t producer_index;
+ /* Consumer index for the queried ring. */
+ uint32_t consumer_index;
+ /*
+ * CAG Vector Control for the queried NQ ring.
+ * Not valid for other ring types.
+ */
+ uint32_t cag_vector_ctrl;
+ /*
+ * Steering Tag. The current value of the steering tag for the ring.
+ * The steering tag is only valid if it is advertised by Firmware in
+ * flags_ext2.steering_tag_supported of hwrm_func_qcaps response.
+ */
+ uint16_t st_tag;
+ uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/**************************
- * hwrm_nvm_verify_update *
- **************************/
+/**********************
+ * hwrm_dbg_drv_trace *
+ **********************/
-/* hwrm_nvm_verify_update_input (size:192b/24B) */
-struct hwrm_nvm_verify_update_input {
+/* hwrm_dbg_drv_trace_input (size:1024b/128B) */
+struct hwrm_dbg_drv_trace_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -60981,28 +67369,37 @@ struct hwrm_nvm_verify_update_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Directory Entry Type, to be verified. */
- uint16_t dir_type;
- /*
- * Directory ordinal.
- * The instance of the Directory Type to be verified.
- */
- uint16_t dir_ordinal;
+ /* Severity of the message. */
+ uint8_t severity;
+ /* Fatal */
+ #define HWRM_DBG_DRV_TRACE_INPUT_SEVERITY_TRACE_LEVEL_FATAL \
+ UINT32_C(0x0)
+ /* Error */
+ #define HWRM_DBG_DRV_TRACE_INPUT_SEVERITY_TRACE_LEVEL_ERROR \
+ UINT32_C(0x1)
+ /* Warning */
+ #define HWRM_DBG_DRV_TRACE_INPUT_SEVERITY_TRACE_LEVEL_WARNING \
+ UINT32_C(0x2)
+ /* Info */
+ #define HWRM_DBG_DRV_TRACE_INPUT_SEVERITY_TRACE_LEVEL_INFO \
+ UINT32_C(0x3)
+ /* Debug */
+ #define HWRM_DBG_DRV_TRACE_INPUT_SEVERITY_TRACE_LEVEL_DEBUG \
+ UINT32_C(0x4)
+ #define HWRM_DBG_DRV_TRACE_INPUT_SEVERITY_LAST \
+ HWRM_DBG_DRV_TRACE_INPUT_SEVERITY_TRACE_LEVEL_DEBUG
+ /* Number of bytes to write including terminating 'NULL' if any. */
+ uint8_t write_len;
+ uint8_t unused_0[6];
/*
- * The Directory Entry Extension flags.
- * The "UPDATE" extension flag must be set in this value.
- * A corresponding directory entry with the same type and ordinal
- * values but *without*
- * the "UPDATE" extension flag must also exist. The other flags of
- * the extension must
- * be identical between the active and update entries.
+ * This field represents the debug data sent by driver
+ * ASCII chars, 'NULL' termination not required.
*/
- uint16_t dir_ext;
- uint8_t unused_0[2];
+ char trace_data[104];
} __rte_packed;
-/* hwrm_nvm_verify_update_output (size:128b/16B) */
-struct hwrm_nvm_verify_update_output {
+/* hwrm_dbg_drv_trace_output (size:128b/16B) */
+struct hwrm_dbg_drv_trace_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -61014,21 +67411,21 @@ struct hwrm_nvm_verify_update_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} __rte_packed;
-/***************************
- * hwrm_nvm_install_update *
- ***************************/
+/***********************
+ * hwrm_dbg_useq_alloc *
+ ***********************/
-/* hwrm_nvm_install_update_input (size:192b/24B) */
-struct hwrm_nvm_install_update_input {
+/* hwrm_dbg_useq_alloc_input (size:192b/24B) */
+struct hwrm_dbg_useq_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -61058,63 +67455,21 @@ struct hwrm_nvm_install_update_input {
*/
uint64_t resp_addr;
/*
- * Installation type. If the value 3 through 0xffff is used,
- * only packaged items with that type value will be installed and
- * conditional installation directives for those packaged items
- * will be over-ridden (i.e. 'create' or 'replace' will be treated
- * as 'install').
- */
- uint32_t install_type;
- /*
- * Perform a normal package installation. Conditional installation
- * directives (e.g. 'create' and 'replace') of packaged items
- * will be followed.
- */
- #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_NORMAL UINT32_C(0x0)
- /*
- * Install all packaged items regardless of installation directive
- * (i.e. treat all packaged items as though they have an installation
- * directive of 'install').
- */
- #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL \
- UINT32_C(0xffffffff)
- #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_LAST \
- HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL
- uint16_t flags;
- /*
- * If set to 1, then securely erase all unused locations in
- * persistent storage.
- */
- #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ERASE_UNUSED_SPACE \
- UINT32_C(0x1)
- /*
- * If set to 1, then unspecified images, images not in the package
- * file, will be safely deleted.
- * When combined with erase_unused_space then unspecified images will
- * be securely erased.
- */
- #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_REMOVE_UNUSED_PKG \
- UINT32_C(0x2)
- /*
- * If set to 1, FW will defragment the NVM if defragmentation is
- * required for the update.
- * Allow additional time for this command to complete if this bit is
- * set to 1.
+ * Number size of the allocation, in bytes, for the USEQ in the code
+ * words array
*/
- #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ALLOWED_TO_DEFRAG \
- UINT32_C(0x4)
+ uint32_t size;
/*
- * If set to 1, FW will verify the package in the "UPDATE" NVM item
- * without installing it. This flag is for FW internal use only.
- * Users should not set this flag. The request will otherwise fail.
+ * Number of bytes executing the USEQ will produce. Must be a multiple
+ * of 4
*/
- #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_VERIFY_ONLY \
- UINT32_C(0x8)
- uint8_t unused_0[2];
+ uint16_t output_bytes;
+ /* This field is reserved */
+ uint16_t unused_0;
} __rte_packed;
-/* hwrm_nvm_install_update_output (size:192b/24B) */
-struct hwrm_nvm_install_update_output {
+/* hwrm_dbg_useq_alloc_output (size:256b/32B) */
+struct hwrm_dbg_useq_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -61123,181 +67478,130 @@ struct hwrm_nvm_install_update_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /* Non-zero firmware timestamp */
+ uint32_t nz_fw_timestamp;
+ /* The last selected USID */
+ uint16_t last_usid;
+ /* The number of USEQs currently allocated */
+ uint16_t num_useq_allocd;
+ /* Flags indicating current USEQ engine state */
+ uint32_t useq_resp_flags;
+ /* When set, there is at least some data available to be delivered */
+ #define HWRM_DBG_USEQ_ALLOC_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL \
+ UINT32_C(0x1)
+ /* When set, all internal buffers are full */
+ #define HWRM_DBG_USEQ_ALLOC_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW \
+ UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_ALLOC_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST \
+ HWRM_DBG_USEQ_ALLOC_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW
+ /* Current count of the number of full buffers available for delivery */
+ uint8_t full_cnt;
+ /* Reserved */
+ uint8_t useq_resp_unused_0[3];
+ /* This is the allocated usid */
+ uint16_t alloc_usid;
+ /* This field is reserved */
+ uint16_t unused_0;
/*
- * Bit-mask of successfully installed items.
- * Bit-0 corresponding to the first packaged item, Bit-1 for the second item, etc.
- * A value of 0 indicates that no items were successfully installed.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint64_t installed_items;
- /* result is 8 b corresponding to BCMRETVAL error codes */
- uint8_t result;
- /* There was no problem with the package installation. */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS \
- UINT32_C(0x0)
- /* Generic failure */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_FAILURE \
- UINT32_C(0xff)
- /* Allocation error malloc failure */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_MALLOC_FAILURE \
- UINT32_C(0xfd)
- /* NVM install error due to invalid index */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_INDEX_PARAMETER \
- UINT32_C(0xfb)
- /* NVM install error due to invalid type */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_TYPE_PARAMETER \
- UINT32_C(0xf3)
- /* Invalid package due to invalid prerequisite */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_PREREQUISITE \
- UINT32_C(0xf2)
- /* Invalid package due to invalid file header */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_FILE_HEADER \
- UINT32_C(0xec)
- /* Invalid package due to invalid format */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_SIGNATURE \
- UINT32_C(0xeb)
- /* Invalid package due to invalid property stream */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_PROP_STREAM \
- UINT32_C(0xea)
- /* Invalid package due to invalid property length */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_PROP_LENGTH \
- UINT32_C(0xe9)
- /* Invalid package due to invalid manifest */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_MANIFEST \
- UINT32_C(0xe8)
- /* Invalid package due to invalid trailer */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_TRAILER \
- UINT32_C(0xe7)
- /* Invalid package due to invalid checksum */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_CHECKSUM \
- UINT32_C(0xe6)
- /* Invalid package due to invalid item checksum */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_ITEM_CHECKSUM \
- UINT32_C(0xe5)
- /* Invalid package due to invalid length */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_DATA_LENGTH \
- UINT32_C(0xe4)
- /* Invalid package due to invalid directive */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INVALID_DIRECTIVE \
- UINT32_C(0xe1)
- /* Invalid device due to unsupported chip revision */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_CHIP_REV \
- UINT32_C(0xce)
- /* Invalid device due to unsupported device ID */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_DEVICE_ID \
- UINT32_C(0xcd)
- /* Invalid device due to unsupported subsystem vendor */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_SUBSYS_VENDOR \
- UINT32_C(0xcc)
- /* Invalid device due to unsupported subsystem ID */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_SUBSYS_ID \
- UINT32_C(0xcb)
- /* Invalid device due to unsupported product ID or customer ID */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_UNSUPPORTED_PLATFORM \
- UINT32_C(0xc5)
- /* Invalid package due to duplicate item */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_DUPLICATE_ITEM \
- UINT32_C(0xc4)
- /* Invalid package due to zero length item */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_ZERO_LENGTH_ITEM \
- UINT32_C(0xc3)
- /* NVM integrity error checksum */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INSTALL_CHECKSUM_ERROR \
- UINT32_C(0xb9)
- /* NVM integrity error */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INSTALL_DATA_ERROR \
- UINT32_C(0xb8)
- /* Authentication error */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_INSTALL_AUTHENTICATION_ERROR \
- UINT32_C(0xb7)
- /* NVM install error item not found */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_ITEM_NOT_FOUND \
- UINT32_C(0xb0)
- /* NVM install error item locked */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_ITEM_LOCKED \
- UINT32_C(0xa7)
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_LAST \
- HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_ITEM_LOCKED
- /* problem_item is 8 b */
- uint8_t problem_item;
- /* There was no problem with any packaged items. */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_NONE \
- UINT32_C(0x0)
- /* There was a problem with the NVM package itself. */
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE \
- UINT32_C(0xff)
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_LAST \
- HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE
- /* reset_required is 8 b */
- uint8_t reset_required;
+ uint32_t valid;
+} __rte_packed;
+
+/**********************
+ * hwrm_dbg_useq_free *
+ **********************/
+
+
+/* hwrm_dbg_useq_free_input (size:192b/24B) */
+struct hwrm_dbg_useq_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * No reset is required for installed/updated firmware or
- * microcode to take effect.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_NONE \
- UINT32_C(0x0)
+ uint16_t cmpl_ring;
/*
- * A PCIe reset (e.g. system reboot) is
- * required for newly installed/updated firmware or
- * microcode to take effect.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_PCI \
- UINT32_C(0x1)
- /*
- * A controller power reset (e.g. system power-cycle) is
- * required for newly installed/updated firmware or
- * microcode to take effect. Some newly installed/updated
- * firmware or microcode may still take effect upon the
- * next PCIe reset.
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
*/
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER \
- UINT32_C(0x2)
- #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_LAST \
- HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER
- uint8_t unused_0[4];
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint8_t valid;
+ uint64_t resp_addr;
+ /* The USID of the sequence to free */
+ uint16_t usid;
+ /* This field is reserved */
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
-struct hwrm_nvm_install_update_cmd_err {
- /*
- * command specific error codes that goes to
- * the cmd_err field in Common HWRM Error Response.
- */
- uint8_t code;
- /* Unknown error */
- #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN \
- UINT32_C(0x0)
- /* Unable to complete operation due to fragmentation */
- #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR \
+/* hwrm_dbg_useq_free_output (size:256b/32B) */
+struct hwrm_dbg_useq_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Non-zero firmware timestamp */
+ uint32_t nz_fw_timestamp;
+ /* The last selected USID */
+ uint16_t last_usid;
+ /* The number of USEQs currently allocated */
+ uint16_t num_useq_allocd;
+ /* Flags indicating current USEQ engine state */
+ uint32_t useq_resp_flags;
+ /* When set, there is at least some data available to be delivered */
+ #define HWRM_DBG_USEQ_FREE_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL \
UINT32_C(0x1)
- /* nvm is completely full. */
- #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE \
+ /* When set, all internal buffers are full */
+ #define HWRM_DBG_USEQ_FREE_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW \
UINT32_C(0x2)
- /* Firmware update failed due to Anti-rollback. */
- #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK \
- UINT32_C(0x3)
- /* Firmware update does not support voltage regulators on the device. */
- #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT \
- UINT32_C(0x4)
- #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST \
- HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
- uint8_t unused_0[7];
+ #define HWRM_DBG_USEQ_FREE_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST \
+ HWRM_DBG_USEQ_FREE_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW
+ /* Current count of the number of full buffers available for delivery */
+ uint8_t full_cnt;
+ /* Reserved */
+ uint8_t useq_resp_unused_0[3];
+ /* This field is reserved */
+ uint32_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint32_t valid;
} __rte_packed;
-/******************
- * hwrm_nvm_flush *
- ******************/
+/***********************
+ * hwrm_dbg_useq_flush *
+ ***********************/
-/* hwrm_nvm_flush_input (size:128b/16B) */
-struct hwrm_nvm_flush_input {
+/* hwrm_dbg_useq_flush_input (size:192b/24B) */
+struct hwrm_dbg_useq_flush_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -61326,10 +67630,20 @@ struct hwrm_nvm_flush_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Bitwise flags described below */
+ uint16_t flags;
+ /* Flush all USEQ code words, resetting all USIDs to invalid */
+ #define HWRM_DBG_USEQ_FLUSH_INPUT_USEQ_CODE_WORDS UINT32_C(0x1)
+ /* Initialize all buffers, clearing out any collected data */
+ #define HWRM_DBG_USEQ_FLUSH_INPUT_BUFFERS UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_FLUSH_INPUT_LAST \
+ HWRM_DBG_USEQ_FLUSH_INPUT_BUFFERS
+ /* This field is reserved */
+ uint8_t unused_0[6];
} __rte_packed;
-/* hwrm_nvm_flush_output (size:128b/16B) */
-struct hwrm_nvm_flush_output {
+/* hwrm_dbg_useq_flush_output (size:256b/32B) */
+struct hwrm_dbg_useq_flush_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -61338,40 +67652,45 @@ struct hwrm_nvm_flush_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* Non-zero firmware timestamp */
+ uint32_t nz_fw_timestamp;
+ /* The last selected USID */
+ uint16_t last_usid;
+ /* The number of USEQs currently allocated */
+ uint16_t num_useq_allocd;
+ /* Flags indicating current USEQ engine state */
+ uint32_t useq_resp_flags;
+ /* When set, there is at least some data available to be delivered */
+ #define HWRM_DBG_USEQ_FLUSH_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL \
+ UINT32_C(0x1)
+ /* When set, all internal buffers are full */
+ #define HWRM_DBG_USEQ_FLUSH_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW \
+ UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_FLUSH_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST \
+ HWRM_DBG_USEQ_FLUSH_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW
+ /* Current count of the number of full buffers available for delivery */
+ uint8_t full_cnt;
+ /* Reserved */
+ uint8_t useq_resp_unused_0[3];
+ /* This field is reserved */
+ uint32_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
- uint8_t valid;
-} __rte_packed;
-
-/* hwrm_nvm_flush_cmd_err (size:64b/8B) */
-struct hwrm_nvm_flush_cmd_err {
- /*
- * command specific error codes that goes to
- * the cmd_err field in Common HWRM Error Response.
- */
- uint8_t code;
- /* Unknown error */
- #define HWRM_NVM_FLUSH_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
- /* flush could not be performed */
- #define HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL UINT32_C(0x1)
- #define HWRM_NVM_FLUSH_CMD_ERR_CODE_LAST \
- HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL
- uint8_t unused_0[7];
+ uint32_t valid;
} __rte_packed;
-/*************************
- * hwrm_nvm_get_variable *
- *************************/
+/************************
+ * hwrm_dbg_useq_cw_cfg *
+ ************************/
-/* hwrm_nvm_get_variable_input (size:320b/40B) */
-struct hwrm_nvm_get_variable_input {
+/* hwrm_dbg_useq_cw_cfg_input (size:960b/120B) */
+struct hwrm_dbg_useq_cw_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -61400,50 +67719,56 @@ struct hwrm_nvm_get_variable_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* The USID of the sequence being configured */
+ uint16_t usid;
/*
- * This is the host address where
- * nvm variable will be stored
+ * The code words given in this message will be placed
+ * at this offset from the starting code word for this
+ * usid. NOTE: when offset is zero, the first 6 32-bit
+ * words may contain values for F0-F7 as well as the
+ * main code word index. This is determined by checking
+ * the usid_ctrl_present flag.
*/
- uint64_t dest_data_addr;
- /* size of data in bits */
- uint16_t data_len;
- /* nvm cfg option number */
- uint16_t option_num;
- /* reserved. */
- #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
- /* reserved. */
- #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
- UINT32_C(0xffff)
- #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_LAST \
- HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
+ uint16_t offset;
/*
- * Number of dimensions for this nvm configuration variable.
- * This value indicates how many of the indexN values to use.
- * A value of 0 means that none of the indexN values are valid.
- * A value of 1 requires at index0 is valued, a value of 2
- * requires that index0 and index1 are valid, and so forth
+ * When the use_dma flag is clear, this is the length in bytes
+ * to be digested from the opaque data area.
*/
- uint16_t dimensions;
- /* index for the 1st dimensions */
- uint16_t index_0;
- /* index for the 2nd dimensions */
- uint16_t index_1;
- /* index for the 3rd dimensions */
- uint16_t index_2;
- /* index for the 4th dimensions */
- uint16_t index_3;
- uint8_t flags;
+ uint16_t size;
/*
- * When this bit is set to 1, the factory default value will be returned,
- * 0 returns the operational value.
+ * Flags associated with the current message
+ * data area.
*/
- #define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT \
+ uint16_t flags;
+ /*
+ * When set, the opaque data begins with a block of control
+ * information to be associated with the usid. This includes
+ * F0-F7 code word indexes as well as the code word index for
+ * main.
+ */
+ #define HWRM_DBG_USEQ_CW_CFG_INPUT_FLAGS_USID_CTRL_PRESENT \
UINT32_C(0x1)
- uint8_t unused_0;
+ /*
+ * When set, opaque contains a 64b host address used to DMA
+ * the entire code word sequence. The offset within the
+ * opaque data depends on the state of other flags.
+ */
+ #define HWRM_DBG_USEQ_CW_CFG_INPUT_FLAGS_USE_DMA \
+ UINT32_C(0x2)
+ /*
+ * When set, this message is the last configuration message
+ * for the given usid.
+ */
+ #define HWRM_DBG_USEQ_CW_CFG_INPUT_FLAGS_END \
+ UINT32_C(0x8000)
+ #define HWRM_DBG_USEQ_CW_CFG_INPUT_FLAGS_LAST \
+ HWRM_DBG_USEQ_CW_CFG_INPUT_FLAGS_END
+ /* command dependent data (e.g. function id for host dma command) */
+ uint32_t opaque[24];
} __rte_packed;
-/* hwrm_nvm_get_variable_output (size:128b/16B) */
-struct hwrm_nvm_get_variable_output {
+/* hwrm_dbg_useq_cw_cfg_output (size:192b/24B) */
+struct hwrm_dbg_useq_cw_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -61452,63 +67777,122 @@ struct hwrm_nvm_get_variable_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* size of data of the actual variable retrieved in bits */
- uint16_t data_len;
+ /* Non-zero firmware timestamp */
+ uint32_t nz_fw_timestamp;
+ /* The last selected USID */
+ uint16_t last_usid;
+ /* The number of USEQs currently allocated */
+ uint16_t num_useq_allocd;
+ /* Flags indicating current USEQ engine state */
+ uint32_t useq_resp_flags;
+ /* When set, there is at least some data available to be delivered */
+ #define HWRM_DBG_USEQ_CW_CFG_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL \
+ UINT32_C(0x1)
+ /* When set, all internal buffers are full */
+ #define HWRM_DBG_USEQ_CW_CFG_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW \
+ UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_CW_CFG_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST \
+ HWRM_DBG_USEQ_CW_CFG_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW
+ /* Current count of the number of full buffers available for delivery */
+ uint8_t full_cnt;
+ /* Reserved */
+ uint8_t useq_resp_unused_0[3];
+} __rte_packed;
+
+/***********************
+ * hwrm_dbg_useq_qcaps *
+ ***********************/
+
+
+/* hwrm_dbg_useq_qcaps_input (size:128b/16B) */
+struct hwrm_dbg_useq_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * option_num is the option number for the data retrieved. It is
- * possible in the future that the option number returned would be
- * different than requested. This condition could occur if an option is
- * deprecated and a new option id is defined with similar
- * characteristics, but has a slightly different definition. This
- * also makes it convenient for the caller to identify the variable
- * result with the option id from the response.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t option_num;
- /* reserved. */
- #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
- /* reserved. */
- #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF \
- UINT32_C(0xffff)
- #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_LAST \
- HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF
- uint8_t unused_0[3];
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
} __rte_packed;
-/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_get_variable_cmd_err {
+/* hwrm_dbg_useq_qcaps_output (size:384b/48B) */
+struct hwrm_dbg_useq_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Non-zero firmware timestamp */
+ uint32_t nz_fw_timestamp;
+ /* The last selected USID */
+ uint16_t last_usid;
+ /* The number of USEQs currently allocated */
+ uint16_t num_useq_allocd;
+ /* Flags indicating current USEQ engine state */
+ uint32_t useq_resp_flags;
+ /* When set, there is at least some data available to be delivered */
+ #define HWRM_DBG_USEQ_QCAPS_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL \
+ UINT32_C(0x1)
+ /* When set, all internal buffers are full */
+ #define HWRM_DBG_USEQ_QCAPS_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW \
+ UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_QCAPS_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST \
+ HWRM_DBG_USEQ_QCAPS_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW
+ /* Current count of the number of full buffers available for delivery */
+ uint8_t full_cnt;
+ /* Reserved */
+ uint8_t useq_resp_unused_0[3];
+ /* Maximum number of USEQ that can be tracked by firmware */
+ uint32_t max_num_useq;
+ /* Maximum number of code word bytes for a single USEQ */
+ uint32_t max_useq_size;
+ /* The maximum number of output bytes a single USEQ may generate */
+ uint32_t max_useq_32b_output_size;
+ /* The number of internal USEQ output buffers, each of 4096 bytes */
+ uint32_t num_buf;
+ /* This field is reserved */
+ uint32_t unused_0;
/*
- * command specific error codes that goes to
- * the cmd_err field in Common HWRM Error Response.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint8_t code;
- /* Unknown error */
- #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
- /* variable does not exist */
- #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
- /* configuration is corrupted and the variable cannot be saved */
- #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
- /* length specified is too small */
- #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT UINT32_C(0x3)
- #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LAST \
- HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
- uint8_t unused_0[7];
+ uint32_t valid;
} __rte_packed;
-/*************************
- * hwrm_nvm_set_variable *
- *************************/
+/***************************
+ * hwrm_dbg_useq_sched_cfg *
+ ***************************/
-/* hwrm_nvm_set_variable_input (size:320b/40B) */
-struct hwrm_nvm_set_variable_input {
+/* hwrm_dbg_useq_sched_cfg_input (size:192b/24B) */
+struct hwrm_dbg_useq_sched_cfg_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -61537,74 +67921,35 @@ struct hwrm_nvm_set_variable_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Enumeration values for enabling, disabling scheduler */
+ uint16_t global_cfg;
+ /* This value will leave the global scheduler in its current state */
+ #define HWRM_DBG_USEQ_SCHED_CFG_INPUT_NO_CHANGE UINT32_C(0x0)
/*
- * This is the host address where
- * nvm variable will be copied from
+ * This value disables the global scheduler. This mode must be used
+ * when the RUN command is being used to run individual sequences.
*/
- uint64_t src_data_addr;
- /* size of data in bits */
- uint16_t data_len;
- /* nvm cfg option number */
- uint16_t option_num;
- /* reserved. */
- #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
- /* reserved. */
- #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
- UINT32_C(0xffff)
- #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_LAST \
- HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
+ #define HWRM_DBG_USEQ_SCHED_CFG_INPUT_DISABLE UINT32_C(0x1)
/*
- * Number of dimensions for this nvm configuration variable.
- * This value indicates how many of the indexN values to use.
- * A value of 0 means that none of the indexN values are valid.
- * A value of 1 requires at index0 is valued, a value of 2
- * requires that index0 and index1 are valid, and so forth
+ * This value enables the global scheduler. When enabled, USEQs will
+ * be scheduled based on their polling intervals
*/
- uint16_t dimensions;
- /* index for the 1st dimensions */
- uint16_t index_0;
- /* index for the 2nd dimensions */
- uint16_t index_1;
- /* index for the 3rd dimensions */
- uint16_t index_2;
- /* index for the 4th dimensions */
- uint16_t index_3;
- uint8_t flags;
+ #define HWRM_DBG_USEQ_SCHED_CFG_INPUT_ENABLE UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_SCHED_CFG_INPUT_LAST \
+ HWRM_DBG_USEQ_SCHED_CFG_INPUT_ENABLE
/*
- * When this bit is 1, flush internal cache after this write
- * operation (see hwrm_nvm_flush command.)
+ * The given polling interval will be associated with this USID. A
+ * value of -1 indicates that the USID is invalid. The invalid USID is
+ * used when using this message only for global scheduler
+ * configuration.
*/
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FORCE_FLUSH \
- UINT32_C(0x1)
- /* encryption method */
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_MASK \
- UINT32_C(0xe)
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_SFT 1
- /* No encryption. */
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_NONE \
- (UINT32_C(0x0) << 1)
- /* one-way encryption. */
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1 \
- (UINT32_C(0x1) << 1)
- /* symmetric AES256 encryption. */
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_AES256 \
- (UINT32_C(0x2) << 1)
- /* SHA1 digest appended to plaintext contents, for authentication */
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH \
- (UINT32_C(0x3) << 1)
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_LAST \
- HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FLAGS_UNUSED_0_MASK \
- UINT32_C(0x70)
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FLAGS_UNUSED_0_SFT 4
- /* When this bit is 1, update the factory default region */
- #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FACTORY_DEFAULT \
- UINT32_C(0x80)
- uint8_t unused_0;
+ uint16_t usid;
+ /* This value represents microseconds between runs of the USEQ */
+ uint32_t polling_interval;
} __rte_packed;
-/* hwrm_nvm_set_variable_output (size:128b/16B) */
-struct hwrm_nvm_set_variable_output {
+/* hwrm_dbg_useq_sched_cfg_output (size:256b/32B) */
+struct hwrm_dbg_useq_sched_cfg_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -61613,42 +67958,45 @@ struct hwrm_nvm_set_variable_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* Non-zero firmware timestamp */
+ uint32_t nz_fw_timestamp;
+ /* The last selected USID */
+ uint16_t last_usid;
+ /* The number of USEQs currently allocated */
+ uint16_t num_useq_allocd;
+ /* Flags indicating current USEQ engine state */
+ uint32_t useq_resp_flags;
+ /* When set, there is at least some data available to be delivered */
+ #define HWRM_DBG_USEQ_SCHED_CFG_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL \
+ UINT32_C(0x1)
+ /* When set, all internal buffers are full */
+ #define HWRM_DBG_USEQ_SCHED_CFG_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW \
+ UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_SCHED_CFG_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST \
+ HWRM_DBG_USEQ_SCHED_CFG_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW
+ /* Current count of the number of full buffers available for delivery */
+ uint8_t full_cnt;
+ /* Reserved */
+ uint8_t useq_resp_unused_0[3];
+ /* This field is reserved */
+ uint32_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
- uint8_t valid;
-} __rte_packed;
-
-/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
-struct hwrm_nvm_set_variable_cmd_err {
- /*
- * command specific error codes that goes to
- * the cmd_err field in Common HWRM Error Response.
- */
- uint8_t code;
- /* Unknown error */
- #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
- /* variable does not exist */
- #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
- /* configuration is corrupted and the variable cannot be saved */
- #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
- #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_LAST \
- HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
- uint8_t unused_0[7];
+ uint32_t valid;
} __rte_packed;
-/****************************
- * hwrm_nvm_validate_option *
- ****************************/
+/*********************
+ * hwrm_dbg_useq_run *
+ *********************/
-/* hwrm_nvm_validate_option_input (size:320b/40B) */
-struct hwrm_nvm_validate_option_input {
+/* hwrm_dbg_useq_run_input (size:320b/40B) */
+struct hwrm_dbg_useq_run_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -61677,44 +68025,56 @@ struct hwrm_nvm_validate_option_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* The USID to be run */
+ uint16_t usid;
+ /* Type of run to execute for the given USID */
+ uint8_t run_type;
+ /* This run type will execute the requested USEQ only a single time */
+ #define HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_SINGLE UINT32_C(0x0)
/*
- * This is the host address where
- * nvm variable will be copied from
+ * This run type will execute the requested USEQ a number of times
+ * given by run_cnt with a run interval given by the run_interval
+ * parameter.
*/
- uint64_t src_data_addr;
- /* size of data in bits */
- uint16_t data_len;
- /* nvm cfg option number */
- uint16_t option_num;
- /* reserved. */
- #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_0 \
- UINT32_C(0x0)
- /* reserved. */
- #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF \
- UINT32_C(0xffff)
- #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_LAST \
- HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF
+ #define HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_CNT UINT32_C(0x1)
/*
- * Number of dimensions for this nvm configuration variable.
- * This value indicates how many of the indexN values to use.
- * A value of 0 means that none of the indexN values are valid.
- * A value of 1 requires at index0 is valued, a value of 2
- * requires that index0 and index1 are valid, and so forth
+ * This run type will execute the requested USEQ as many times as it
+ * needs to fill an entire buffer to return to the host. The runs
+ * will occur with a run interval given by the run_interval
+ * parameter.
*/
- uint16_t dimensions;
- /* index for the 1st dimensions */
- uint16_t index_0;
- /* index for the 2nd dimensions */
- uint16_t index_1;
- /* index for the 3rd dimensions */
- uint16_t index_2;
- /* index for the 4th dimensions */
- uint16_t index_3;
- uint8_t unused_0[2];
+ #define HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_FILL_BUF UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_LAST \
+ HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_FILL_BUF
+ /*
+ * If indicated by flags, this represents the number of times to run
+ * the USEQ. Note that runs are stopped if the buffer fills prior
+ * regardless of the number of runs. For example, if a run_cnt of 10 is
+ * specified and 3 runs results in the buffer being full then only 3
+ * runs are executed.
+ */
+ uint8_t run_cnt;
+ /*
+ * This value represents microseconds between runs of the USEQ when
+ * running multiple times as indicated by flags.
+ */
+ uint32_t run_interval;
+ /*
+ * Address of the host buffer where collected USEQ output data will be
+ * placed
+ */
+ uint64_t host_dest_addr;
+ /*
+ * Size, in bytes, of the memory associated with host_dest_addr. It is
+ * expected that this is >= 4096
+ */
+ uint32_t host_dest_len;
+ /* This field is reserved */
+ uint32_t unused_0;
} __rte_packed;
-/* hwrm_nvm_validate_option_output (size:128b/16B) */
-struct hwrm_nvm_validate_option_output {
+/* hwrm_dbg_useq_run_output (size:256b/32B) */
+struct hwrm_dbg_useq_run_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -61723,51 +68083,49 @@ struct hwrm_nvm_validate_option_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t result;
- /*
- * indicates that the value provided for the option is not matching
- * with the saved data.
- */
- #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_NOT_MATCH UINT32_C(0x0)
+ /* Non-zero firmware timestamp */
+ uint32_t nz_fw_timestamp;
+ /* The last selected USID */
+ uint16_t last_usid;
+ /* The number of USEQs currently allocated */
+ uint16_t num_useq_allocd;
+ /* Flags indicating current USEQ engine state */
+ uint32_t useq_resp_flags;
+ /* When set, there is at least some data available to be delivered */
+ #define HWRM_DBG_USEQ_RUN_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL \
+ UINT32_C(0x1)
+ /* When set, all internal buffers are full */
+ #define HWRM_DBG_USEQ_RUN_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW \
+ UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_RUN_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST \
+ HWRM_DBG_USEQ_RUN_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW
+ /* Current count of the number of full buffers available for delivery */
+ uint8_t full_cnt;
+ /* Reserved */
+ uint8_t useq_resp_unused_0[3];
/*
- * indicates that the value provided for the option is matching the
- * saved data.
+ * The length, in bytes, of the amount of data placed in the
+ * corresponding host_dest_addr given in the input message. This will
+ * always be a multiple of 4096
*/
- #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH UINT32_C(0x1)
- #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_LAST \
- HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH
- uint8_t unused_0[6];
+ uint32_t host_dest_filled_len;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
- uint8_t valid;
-} __rte_packed;
-
-/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */
-struct hwrm_nvm_validate_option_cmd_err {
- /*
- * command specific error codes that goes to
- * the cmd_err field in Common HWRM Error Response.
- */
- uint8_t code;
- /* Unknown error */
- #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
- #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST \
- HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN
- uint8_t unused_0[7];
+ uint32_t valid;
} __rte_packed;
-/*******************
- * hwrm_nvm_defrag *
- *******************/
+/******************************
+ * hwrm_dbg_useq_delivery_req *
+ ******************************/
-/* hwrm_nvm_defrag_input (size:192b/24B) */
-struct hwrm_nvm_defrag_input {
+/* hwrm_dbg_useq_delivery_req_input (size:896b/112B) */
+struct hwrm_dbg_useq_delivery_req_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -61796,14 +68154,22 @@ struct hwrm_nvm_defrag_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t flags;
- /* This bit must be '1' to perform NVM defragmentation. */
- #define HWRM_NVM_DEFRAG_INPUT_FLAGS_DEFRAG UINT32_C(0x1)
- uint8_t unused_0[4];
+ /*
+ * Eight destination addresses provide host memory space for FW to
+ * deliver USEQ output details. A value of 0x0 for the address can be
+ * used to inform FW that the buffer is not available.
+ */
+ uint64_t host_dest_addrs[8];
+ /*
+ * The length, in bytes, of the corresponding host_dest_addrs array
+ * entry. Each valid hist_dest_addrs entry must have a len of at least
+ * 4096 bytes.
+ */
+ uint32_t host_dest_len[8];
} __rte_packed;
-/* hwrm_nvm_defrag_output (size:128b/16B) */
-struct hwrm_nvm_defrag_output {
+/* hwrm_dbg_useq_delivery_req_output (size:512b/64B) */
+struct hwrm_dbg_useq_delivery_req_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -61812,40 +68178,51 @@ struct hwrm_nvm_defrag_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t unused_0[7];
+ /* Non-zero firmware timestamp */
+ uint32_t nz_fw_timestamp;
+ /* The last selected USID */
+ uint16_t last_usid;
+ /* The number of USEQs currently allocated */
+ uint16_t num_useq_allocd;
+ /* Flags indicating current USEQ engine state */
+ uint32_t useq_resp_flags;
+ /* When set, there is at least some data available to be delivered */
+ #define HWRM_DBG_USEQ_DELIVERY_REQ_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL \
+ UINT32_C(0x1)
+ /* When set, all internal buffers are full */
+ #define HWRM_DBG_USEQ_DELIVERY_REQ_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW \
+ UINT32_C(0x2)
+ #define HWRM_DBG_USEQ_DELIVERY_REQ_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST \
+ HWRM_DBG_USEQ_DELIVERY_REQ_OUTPUT_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW
+ /* Current count of the number of full buffers available for delivery */
+ uint8_t full_cnt;
+ /* Reserved */
+ uint8_t useq_resp_unused_0[3];
/*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
+ * The length, in bytes, of the amount of data placed in the
+ * corresponding host_dest_addrs entry given in the input message. This
+ * will always be a multiple of 4096.
*/
- uint8_t valid;
-} __rte_packed;
-
-/* hwrm_nvm_defrag_cmd_err (size:64b/8B) */
-struct hwrm_nvm_defrag_cmd_err {
+ uint32_t host_dest_filled_len[8];
+ /* This field is reserved */
+ uint32_t unused_0;
/*
- * command specific error codes that goes to
- * the cmd_err field in Common HWRM Error Response.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint8_t code;
- /* Unknown error */
- #define HWRM_NVM_DEFRAG_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
- /* NVM defragmentation could not be performed */
- #define HWRM_NVM_DEFRAG_CMD_ERR_CODE_FAIL UINT32_C(0x1)
- #define HWRM_NVM_DEFRAG_CMD_ERR_CODE_LAST \
- HWRM_NVM_DEFRAG_CMD_ERR_CODE_FAIL
- uint8_t unused_0[7];
+ uint32_t valid;
} __rte_packed;
-/****************
- * hwrm_oem_cmd *
- ****************/
+/*****************************
+ * hwrm_dbg_log_buffer_flush *
+ *****************************/
-/* hwrm_oem_cmd_input (size:1024b/128B) */
-struct hwrm_oem_cmd_input {
+/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */
+struct hwrm_dbg_log_buffer_flush_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -61874,14 +68251,44 @@ struct hwrm_oem_cmd_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t IANA;
- uint32_t unused_0;
- /* This field contains the vendor specific command data. */
- uint32_t oem_data[26];
+ /* Type of trace buffer to flush. */
+ uint16_t type;
+ /* SRT trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_SRT_TRACE \
+ UINT32_C(0x0)
+ /* SRT2 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_SRT2_TRACE \
+ UINT32_C(0x1)
+ /* CRT trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_CRT_TRACE \
+ UINT32_C(0x2)
+ /* CRT2 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_CRT2_TRACE \
+ UINT32_C(0x3)
+ /* RIGP0 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_RIGP0_TRACE \
+ UINT32_C(0x4)
+ /* L2 HWRM trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_L2_HWRM_TRACE \
+ UINT32_C(0x5)
+ /* RoCE HWRM trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_ROCE_HWRM_TRACE \
+ UINT32_C(0x6)
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_LAST \
+ HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_ROCE_HWRM_TRACE
+ uint8_t unused_1[2];
+ /* Control flags. */
+ uint32_t flags;
+ /*
+ * When set, it indicates that all buffers should be flushed.
+ * The type will be ignored.
+ */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_FLAGS_FLUSH_ALL_BUFFERS \
+ UINT32_C(0x1)
} __rte_packed;
-/* hwrm_oem_cmd_output (size:768b/96B) */
-struct hwrm_oem_cmd_output {
+/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */
+struct hwrm_dbg_log_buffer_flush_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -61890,16 +68297,20 @@ struct hwrm_oem_cmd_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint32_t IANA;
- uint32_t unused_0;
- /* This field contains the vendor specific response data. */
- uint32_t oem_data[18];
- uint8_t unused_1[7];
+ /*
+ * Specifies the current host buffer offset. Data up to this offset
+ * has been populated by the firmware. For example, if the firmware
+ * has DMA-ed 8192 bytes to the host buffer, then this field has a
+ * value of 8192. This field rolls over to zero once the firmware
+ * writes the last page of the host buffer
+ */
+ uint32_t current_buffer_offset;
+ uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -14,7 +14,7 @@ endif
headers = files('rte_pmd_bnxt.h')
cflags_options = [
- '-DSUPPORT_CFA_HW_ALL=1',
+ '-DSUPPORT_CFA_HW_P70=1',
]
foreach option:cflags_options
@@ -34,6 +34,7 @@ sources = files(
'bnxt_flow.c',
'bnxt_hwrm.c',
'bnxt_irq.c',
+ 'bnxt_mpc.c',
'bnxt_ring.c',
'bnxt_rxq.c',
'bnxt_rxr.c',
@@ -49,7 +50,9 @@ sources = files(
#Add the subdirectories that need to be compiled
subdir('tf_ulp')
subdir('tf_core')
+subdir('tf_core/v3')
subdir('hcapi/cfa')
+subdir('hcapi/cfa_v3')
if arch_subdir == 'x86'
sources += files('bnxt_rxtx_vec_sse.c')
new file mode 100644
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+# Copyright(c) 2022 Broadcom
+
+#Include the folder for headers
+includes += include_directories('.')
+
+foreach option:cflags_options
+ if cc.has_argument(option)
+ cflags += option
+ endif
+endforeach
+
+#Add the source files
+sources += files(
+ 'tfc_act.c',
+ 'tfc_cpm.c',
+ 'tfc_em.c',
+ 'tfc_global_id.c',
+ 'tfc_ident.c',
+ 'tfc_idx_tbl.c',
+ 'tfc_if_tbl.c',
+ 'tfc_init.c',
+ 'tfc_mpc_table.c',
+ 'tfc_msg.c',
+ 'tfc_priv.c',
+ 'tfc_resources.c',
+ 'tfc_session.c',
+ 'tfc_tbl_scope.c',
+ 'tfc_tcam.c',
+ 'tfc_util.c',
+ 'tfo.c',
+ 'tfc_vf2pf_msg.c'
+)
new file mode 100644
@@ -0,0 +1,1527 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TFC_H_
+#define _TFC_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "cfa_resources.h"
+#include "cfa_types.h"
+
+/**
+ * @file
+ *
+ * @brief TFC (Truflow Core v3) API Header File
+ *
+ * @page TFCV3 Truflow Core v3
+ *
+ * These pages describe the APIs for Truflow Core v3.
+ *
+ * - @subpage Init
+ * - @subpage Resources
+ * - @subpage Session
+ * - @subpage GID
+ * - @subpage Identifiers
+ * - @subpage GIM
+ * - @subpage TCAM
+ * - @subpage TBM
+ * - @subpage EM
+ * - @subpage ACTCFA
+ * - @subpage TFCOV3
+ */
+
+/********** BEGIN Truflow Core v3 DEFINITIONS **********/
+/* @cond temporary */
+#define TFC_V3_RESOURCE_API_ENABLE 0
+/* @endcond */
+
+/**
+ * TFC handle
+ */
+struct tfc {
+ /**
+ * Pointer to the private tfc object
+ */
+ void *tfo;
+ /**
+ * the pointer to the parent bp struct
+ */
+ void *bp;
+};
+
+/* API Guidance:
+ *
+ * 1. If more than 5-6 parameters, please define structures
+ *
+ * 2. Design structures that can be used with multiple APIs
+ *
+ * 3. If items in structures are not to be used, these must
+ * be documented in the API header IN DETAIL.
+ *
+ * 4. Use defines in cfa_types.h where possible. These are shared
+ * firmware types to avoid duplication. These types do
+ * not represent the HWRM interface and may need to be mapped
+ * to HWRM definitions.
+ *
+ * 5. Resource types and subtypes are defined in cfa_resources.h
+ */
+
+/********** BEGIN API FUNCTION PROTOTYPES/PARAMETERS **********/
+/**
+ * @page Init Initialization and De-initialization APIs
+ *
+ * @ref tfc_open
+ *
+ * @ref tfc_close
+ */
+/**
+ * Allocate the TFC state for this DPDK port/function. The TF
+ * object memory is allocated during this API call.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ *
+ * @note This API will initialize only the software state.
+ */
+int tfc_open(struct tfc *tfcp);
+
+/**
+ * De-allocate the TFC state for this DPDK port/function. The TF
+ * object memory is deallocated during this API call.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ *
+ * @note This API will reset only the software state.
+ */
+int tfc_close(struct tfc *tfcp);
+
+/**
+ * @page Resources
+ *
+ * @ref tfc_resource_types_query
+ */
+
+/**
+ * The maximum number of foreseeable resource types.
+ * Use cfa_resource_types enum internally.
+ */
+#define TFC_MAX_RESOURCE_TYPES 32
+
+/**
+ * Supported resource information
+ */
+struct tfc_resources {
+ /** Resource subtype mask of valid resource types */
+ uint32_t rtypes_mask;
+ /** Maximum resource type number */
+ uint8_t max_rtype;
+ /** Array indexed by resource type indicating valid subtypes */
+ uint32_t rsubtypes_mask[TFC_MAX_RESOURCE_TYPES];
+};
+
+/**
+ * Get all supported CFA resource types for the device
+ *
+ * This API goes to the firmware to query all supported resource
+ * types and subtypes supported.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in,out] resources
+ * Pointer to a structure containing information about the supported CFA device
+ * resources.
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_resource_types_query(struct tfc *tfcp, struct tfc_resources *resources);
+
+/**
+ * @page Session
+ *
+ * @ref tfc_session_id_alloc
+ *
+ * @ref tfc_session_fid_add
+ *
+ * @ref tfc_session_fid_rem
+ */
+
+/**
+ * Allocate a TFC session
+ *
+ * This API goes to the firmware to allocate a TFC session id and associate a
+ * forwarding function with the session.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+- * @param[in] fid
+- * Function id to associated with the session
+- *
+ * @param[out] sid
+ * Pointer to the where the session id will be returned
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_session_id_alloc(struct tfc *tfcp, uint16_t fid, uint16_t *sid);
+
+/**
+ * Associate a forwarding function with an existing TFC session
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * Function id to associated with the session
+ *
+ * @param[in] sid
+ * The session id to associate with
+ *
+ * @param[in,out] fid_cnt
+ * The number of forwarding functions currently associated with the session
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_session_fid_add(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ uint16_t *fid_cnt);
+/**
+ * Disassociate a forwarding function from an existing TFC session
+ *
+ * Once the last function has been removed from the session in the firmware
+ * the session is freed and all associated resources freed.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * Function id to associated with the session
+ *
+ * @param[in,out] fid_cnt
+ * The number of forwarding functions currently associated with the session
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_session_fid_rem(struct tfc *tfcp, uint16_t fid, uint16_t *fid_cnt);
+
+/**
+ * @page GID Global Identifier
+ *
+ * @ref tfc_global_id_alloc
+ */
+
+/** Domain id range
+ */
+enum tfc_domain_id {
+ TFC_DOMAIN_ID_INVALID = 0,
+ TFC_DOMAIN_ID_1,
+ TFC_DOMAIN_ID_2,
+ TFC_DOMAIN_ID_3,
+ TFC_DOMAIN_ID_4,
+ TFC_DOMAIN_ID_MAX = TFC_DOMAIN_ID_4
+};
+
+/** Global id request definition
+ */
+struct tfc_global_id_req {
+ enum cfa_resource_type rtype; /**< Resource type */
+ uint8_t rsubtype; /**< Resource subtype */
+ enum cfa_dir dir; /**< Direction */
+ uint16_t cnt; /**< Number of resources to allocate of this type */
+};
+
+/** Global id resource definition
+ */
+struct tfc_global_id {
+ enum cfa_resource_type rtype; /**< Resource type */
+ uint8_t rsubtype; /**< Resource subtype */
+ enum cfa_dir dir; /**< Direction */
+ uint16_t id; /**< Resource id */
+};
+
+/**
+ * Allocate global TFC resources
+ *
+ * Some resources are not owned by a single session. They are "global" in that
+ * they will be in use as long as any associated session exists. Once all
+ * sessions/functions hve been removed, all associated global ids are freed.
+ * There are currently up to 4 global id domain sets.
+ *
+ * TODO: REDUCE PARAMETERS WHEN IMPLEMENTING API
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] domain_id
+ * The domain id to associate.
+ *
+ * @param[in] req_cnt
+ * The number of total resource requests
+ *
+ * @param[in] glb_id_req
+ * The list of global id requests
+ *
+ * @param[in,out] rsp_cnt
+ * The number of items in the response buffer
+ *
+ * @param[out] glb_id_rsp
+ * The number of items in the response buffer
+ *
+ * @param[in,out] first
+ * This is the first domain request for the indicated domain id.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_global_id_alloc(struct tfc *tfcp, uint16_t fid, enum tfc_domain_id domain_id,
+ uint16_t req_cnt, const struct tfc_global_id_req *glb_id_req,
+ struct tfc_global_id *glb_id_rsp, uint16_t *rsp_cnt,
+ bool *first);
+/**
+ * @page Identifiers
+ *
+ * @ref tfc_identifier_alloc
+ *
+ * @ref tfc_identifier_free
+ */
+
+/**
+ * Identifier resource structure
+ */
+struct tfc_identifier_info {
+ enum cfa_resource_subtype_ident rsubtype; /**< resource subtype */
+ enum cfa_dir dir; /**< direction rx/tx */
+ uint16_t id; /**< alloc/free index */
+};
+
+/**
+ * allocate a TFC Identifier
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tt
+ * Track type - either track by session or by function
+ *
+ * @param[in, out] ident_info
+ * All the information related to the requested identifier (subtype/dir) and
+ * the returned identifier id.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_identifier_alloc(struct tfc *tfcp, uint16_t fid,
+ enum cfa_track_type tt,
+ struct tfc_identifier_info *ident_info);
+
+/**
+ * free a TFC Identifier
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] ident_info
+ * All the information related to the requested identifier (subtype/dir) and
+ * the identifier id to free.
+ *
+ * @returns success or failure code.
+ */
+int tfc_identifier_free(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_identifier_info *ident_info);
+
+/**
+ * @page GIM Index Table
+ *
+ * @ref tfc_idx_tbl_alloc
+ *
+ * @ref tfc_idx_tbl_alloc_set
+ *
+ * @ref tfc_idx_tbl_set
+ *
+ * @ref tfc_idx_tbl_get
+ *
+ * @ref tfc_idx_tbl_free
+ */
+
+/**
+ * Index table resource structure
+ */
+struct tfc_idx_tbl_info {
+ enum cfa_resource_subtype_idx_tbl rsubtype; /**< resource subtype */
+ enum cfa_dir dir; /**< direction rx/tx */
+ uint16_t id; /**< alloc/free index */
+};
+
+/**
+ * allocate a TFC index table entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tt
+ * Track type - either track by session or by function
+ *
+ * @param[in,out] tbl_info
+ * All the information related to the requested index table entry (subtype/dir)
+ * and the returned id.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_idx_tbl_alloc(struct tfc *tfcp, uint16_t fid,
+ enum cfa_track_type tt,
+ struct tfc_idx_tbl_info *tbl_info);
+
+/**
+ * allocate and set a TFC index table entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tt
+ * Track type - either track by session or by function
+ *
+ * @param[in,out] tbl_info
+ * All the information related to the requested index table entry (subtype/dir)
+ * and the returned id.
+ *
+ * @param[in] data
+ * Pointer to the data to write to the entry. The data is aligned correctly
+ * in the buffer for writing to the hardware.
+ *
+ * @param[in] data_sz_in_bytes
+ * The size of the entry in bytes for Thor2.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_idx_tbl_alloc_set(struct tfc *tfcp, uint16_t fid,
+ enum cfa_track_type tt,
+ struct tfc_idx_tbl_info *tbl_info,
+ const uint32_t *data, uint8_t data_sz_in_bytes);
+
+/**
+ * Set a TFC index table entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tbl_info
+ * All the information related to the requested index table entry (subtype/dir)
+ * including the id.
+ *
+ * @param[in] data
+ * Pointer to the data to write to the entry. The data is aligned correctly
+ * in the buffer for writing to the hardware.
+ *
+ * @param[in] data_sz_in_bytes
+ * The size of the entry in device sized bytes for Thor2.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_idx_tbl_set(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_idx_tbl_info *tbl_info,
+ const uint32_t *data, uint8_t data_sz_in_bytes);
+
+/**
+ * Get a TFC index table entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tbl_info
+ * All the information related to the requested index table entry (subtype/dir)
+ * including the id.
+ *
+ * @param[in, out] data
+ * Pointer to the data to read from the entry.
+ *
+ * @param[in,out] data_sz_in_bytes
+ * The size of the entry in device sized bytes for Thor2. Input is the
+ * size of the buffer, output is the actual size.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_idx_tbl_get(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_idx_tbl_info *tbl_info,
+ uint32_t *data, uint8_t *data_sz_in_bytes);
+/**
+ * Free a TFC index table entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tbl_info
+ * All the information related to the requested index table entry (subtype/dir)
+ * and the returned id.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_idx_tbl_free(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_idx_tbl_info *tbl_info);
+
+/**
+ * @page TCAM
+ *
+ * @ref tfc_tcam_alloc
+ *
+ * @ref tfc_tcam_alloc_set
+ *
+ * @ref tfc_tcam_set
+ *
+ * @ref tfc_tcam_get
+ *
+ * @ref tfc_tcam_free
+ */
+/**
+ * Tcam table info structure
+ */
+struct tfc_tcam_info {
+ enum cfa_resource_subtype_tcam rsubtype; /**< resource subtype */
+ enum cfa_dir dir; /**< direction rx/tx */
+ uint16_t id; /**< alloc/free index */
+};
+
+/**
+ * Tcam table resource structure
+ */
+struct tfc_tcam_data {
+ uint8_t *key; /**< tcam key */
+ uint8_t *mask; /**< tcam mask */
+ uint8_t *remap; /**< remap */
+ uint8_t key_sz_in_bytes; /**< key size in bytes */
+ uint8_t remap_sz_in_bytes; /**< remap size in bytes */
+
+};
+
+/**
+ * allocate a TFC TCAM entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tt
+ * Track type - either track by session or by function
+ *
+ * @param[in] priority
+ * Priority - the priority of the tcam entry
+ *
+ * @param[in,out] tcam_info
+ * All the information related to the requested index table entry (subtype/dir)
+ * and the returned id.
+ *
+ * @param[in] key_sz_in_bytes
+ * The size of the entry in bytes for Thor2.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tcam_alloc(struct tfc *tfcp, uint16_t fid,
+ enum cfa_track_type tt, uint16_t priority,
+ uint8_t key_sz_in_bytes,
+ struct tfc_tcam_info *tcam_info);
+
+/**
+ * allocate and set a TFC TCAM entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] tt
+ * Track type - either track by session or by function
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] priority
+ * Priority - the priority of the tcam entry
+ *
+ * @param[in,out] tcam_info
+ * All the information related to the requested TCAM table entry (subtype/dir)
+ * and the returned id.
+ *
+ * @param[in] tcam_data
+ * Pointer to the tcam data, including tcam, mask, and remap, to write to
+ * the entry. The data is aligned in the buffer for writing to the hardware.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tcam_alloc_set(struct tfc *tfcp, uint16_t fid,
+ enum cfa_track_type tt, uint16_t priority,
+ struct tfc_tcam_info *tcam_info,
+ const struct tfc_tcam_data *tcam_data);
+
+/**
+ * Set a TFC TCAM entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in, out] tcam_info
+ * All the information related to the requested index table entry (subtype/dir)
+ * including the id.
+ *
+ * @param[in] tcam_data
+ * Pointer to the tcam data, including tcam, mask, and remap, to write to
+ * the entry. The data is aligned in the buffer for writing to the hardware.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tcam_set(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_tcam_info *tcam_info,
+ const struct tfc_tcam_data *tcam_data);
+
+/**
+ * Get a TFC TCAM entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tcam_info
+ * All the information related to the requested TCAM entry (subtype/dir)
+ * including the id.
+ *
+ * @param[in, out] tcam_data
+ * Pointer to the tcam data, including tcam, mask, and remap, to read from
+ * the entry. The data is aligned in the buffer for writing to the hardware.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tcam_get(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_tcam_info *tcam_info,
+ struct tfc_tcam_data *tcam_data);
+/**
+ * Free a TFC TCAM entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tcam_info
+ * All the information related to the requested tcam entry (subtype/dir)
+ * and the id to be freed.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tcam_free(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_tcam_info *tcam_info);
+
+/**
+ * @page TBM Table Scope
+ *
+ * @ref tfc_tbl_scope_qcaps
+ *
+ * @ref tfc_tbl_scope_size_query
+ *
+ * @ref tfc_tbl_scope_id_alloc
+ *
+ * @ref tfc_tbl_scope_mem_alloc
+ *
+ * @ref tfc_tbl_scope_mem_free
+ *
+ * @ref tfc_tbl_scope_cpm_alloc
+ *
+ * @ref tfc_tbl_scope_cpm_free
+ *
+ * @ref tfc_tbl_scope_fid_add
+ *
+ * @ref tfc_tbl_scope_fid_rem
+ *
+ * @ref tfc_tbl_scope_config_state_get
+ *
+ * tfc_tbl_scope_pfid_query (FUTURE shared table scope)
+ *
+ * tfc_tbl_scope_config_get (FUTURE shared table scope)
+ *
+ */
+
+/**
+ * tfc_tbl_scope_bucket_factor indicates a multiplier factor for determining the
+ * static and dynamic buckets counts. The larger the factor, the more buckets
+ * will be allocated.
+ *
+ * This is necessary because flows will not hash so as to perfectly fill all the
+ * buckets. It is necessary to add some allowance for not fully populated
+ * buckets.
+ */
+enum tfc_tbl_scope_bucket_factor {
+ TFC_TBL_SCOPE_BUCKET_FACTOR_1 = 1,
+ TFC_TBL_SCOPE_BUCKET_FACTOR_2 = 2,
+ TFC_TBL_SCOPE_BUCKET_FACTOR_4 = 4,
+ TFC_TBL_SCOPE_BUCKET_FACTOR_8 = 8,
+ TFC_TBL_SCOPE_BUCKET_FACTOR_16 = 16,
+ TFC_TBL_SCOPE_BUCKET_FACTOR_MAX = TFC_TBL_SCOPE_BUCKET_FACTOR_16
+};
+
+/**
+ * tfc_tbl_scope_size_query_parms contains the parameters for the
+ * tfc_tbl_scope_size_query API.
+ */
+struct tfc_tbl_scope_size_query_parms {
+ /**
+ * [in] If a shared table scope, dynamic buckets are disabled. This
+ * affects the calculation for static buckets in this function.
+ * Initially, if not shared, the size of the static bucket table should
+ * be double the number of flows supported. Numbers are validated
+ * against static_cnt and dynamic_cnt
+ */
+ bool shared;
+ /**
+ * [in] Direction indexed array indicating the number of flows. Must be
+ * at least as large as the number entries that the buckets can point
+ * to.
+ */
+ uint32_t flow_cnt[CFA_DIR_MAX];
+ /**
+ * [in] tfc_tbl_scope_bucket_factor indicates a multiplier factor for
+ * determining the static and dynamic buckets counts. The larger the
+ * factor, the more buckets will be allocated.
+ */
+ enum tfc_tbl_scope_bucket_factor factor;
+ /**
+ * [in] The number of pools each region of the table scope will be
+ * divided into.
+ */
+ uint32_t max_pools;
+ /**
+ * [in] Direction indexed array indicating the key size.
+ */
+ uint16_t key_sz_in_bytes[CFA_DIR_MAX];
+ /**
+ * [in] Direction indexed array indicating the action record size. Must
+ * be a multiple of 32B lines on Thor2.
+ */
+ uint16_t act_rec_sz_in_bytes[CFA_DIR_MAX];
+ /**
+ * [out] Direction indexed array indicating the EM static bucket count
+ * expressed as: log2(static_bucket_count). For example if 1024 static
+ * buckets, 1024=2^10, so the value 10 would be returned.
+ */
+ uint8_t static_bucket_cnt_exp[CFA_DIR_MAX];
+ /**
+ * [out] Direction indexed array indicating the EM dynamic bucket count.
+ */
+ uint32_t dynamic_bucket_cnt[CFA_DIR_MAX];
+ /**
+ * [out] The number of minimum sized lkup records per direction. In
+ * this usage, records are the minimum lookup memory allocation unit in
+ * a table scope. This value is the total memory required for buckets
+ * and entries.
+ *
+ * Note: The EAS variously refers to these as words or cache-lines.
+ *
+ * For example, on Thor2 where each bucket consumes one record, if the
+ * key size is such that the LREC and key use 2 records, then the
+ * lkup_rec_cnt = the number of buckets + (2 * the number of flows).
+ */
+ uint32_t lkup_rec_cnt[CFA_DIR_MAX];
+ /**
+ * [out] The number of minimum sized action records per direction.
+ * Similar to the lkup_rec_cnt, records are the minimum action memory
+ * allocation unit in a table scope.
+ */
+ uint32_t act_rec_cnt[CFA_DIR_MAX];
+ /**
+ * [out] Direction indexed array indicating the size of each individual
+ * lookup record pool expressed as: log2(max_records/max_pools). For
+ * example if 1024 records and 2 pools 1024/2=512=2^9, so the value 9
+ * would be entered.
+ */
+ uint8_t lkup_pool_sz_exp[CFA_DIR_MAX];
+ /**
+ * [out] Direction indexed array indicating the size of each individual
+ * action record pool expressed as: log2(max_records/max_pools). For
+ * example if 1024 records and 2 pools 1024/2=512=2^9, so the value 9
+ * would be entered.
+ */
+ uint8_t act_pool_sz_exp[CFA_DIR_MAX];
+ /**
+ * [out] Direction indexed array indicating the offset in records from
+ * the start of the memory after the static buckets where the first
+ * lrec pool begins.
+ */
+ uint32_t lkup_rec_start_offset[CFA_DIR_MAX];
+};
+
+/**
+ * tfc_tbl_scope_mem_alloc_parms contains the parameters for allocating memory
+ * to be used by a table scope.
+ */
+struct tfc_tbl_scope_mem_alloc_parms {
+ /**
+ * [in] If a shared table scope, indicate whether this is the first
+ * if, the first, the table scope memory will be allocated. Otherwise
+ * only the details of the configuration will be stored internally
+ * for use - i.e. act_rec_cnt/lkup_rec_cnt/lkup_rec_start_offset.
+ */
+ bool first;
+ /**
+ * [in] Direction indexed array indicating the EM static bucket count
+ * expressed as: log2(static_bucket_count). For example if 1024 static
+ * buckets, 1024=2^10, so the value 10 would be entered.
+ */
+ uint8_t static_bucket_cnt_exp[CFA_DIR_MAX];
+ /**
+ * [in] Direction indexed array indicating the EM dynamic bucket count.
+ */
+ uint8_t dynamic_bucket_cnt[CFA_DIR_MAX];
+ /**
+ * [in] The number of minimum sized lkup records per direction. In this
+ * usage, records are the minimum lookup memory allocation unit in a
+ * table scope. This value is the total memory required for buckets and
+ * entries.
+ */
+ uint32_t lkup_rec_cnt[CFA_DIR_MAX];
+ /**
+ * [in] The number of minimum sized action records per direction.
+ * Similar to the lkup_rec_cnt, records are the minimum action memory
+ * allocation unit in a table scope.
+ */
+ uint32_t act_rec_cnt[CFA_DIR_MAX];
+ /**
+ * [in] The page size used for allocation. If running in the kernel
+ * driver, this may be as small as 1KB. For huge pages this may be more
+ * commonly 2MB. Supported values include 4K, 8K, 64K, 2M, 8M and 1GB.
+ */
+ uint32_t pbl_page_sz_in_bytes;
+ /**
+ * [in] Indicates local application vs remote application table scope. A
+ * table scope can be created on a PF for it's own use or for use by
+ * other children. These may or may not be shared table scopes. Set
+ * local to false if calling API on behalf of a remote client VF.
+ * (alternatively, we could pass in the remote fid or the local fid).
+ */
+ bool local;
+ /**
+ * [in] The maximum number of pools supported.
+ */
+ uint8_t max_pools;
+ /**
+ * [in] Direction indexed array indicating the action table pool size
+ * expressed as: log2(act_pool_sz). For example if 1024 static
+ * buckets, 1024=2^10, so the value 10 would be entered.
+ */
+ uint8_t act_pool_sz_exp[CFA_DIR_MAX];
+ /**
+ * [in] Direction indexed array indicating the lookup table pool size
+ * expressed as: log2(lkup_pool_sz). For example if 1024 static
+ * buckets, 1024=2^10, so the value 10 would be entered.
+ */
+ uint8_t lkup_pool_sz_exp[CFA_DIR_MAX];
+ /**
+ * [in] Lookup table record start offset. Offset in 32B records after
+ * the static buckets where the lookup records and dynamic bucket memory
+ * will begin.
+ */
+ uint32_t lkup_rec_start_offset[CFA_DIR_MAX];
+};
+
+/**
+ * Determine whether table scopes are supported in the hardware.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[out] tbl_scope_capable
+ * True if table scopes are supported in the firmware.
+ *
+ * @param[out] max_lkup_rec_cnt
+ * The maximum number of lookup records in a table scope (optional)
+ *
+ * @param[out] max_act_rec_cnt
+ * The maximum number of action records in a table scope (optional)
+ *
+ * @param[out] max_lkup_static_buckets_exp
+ * The log2 of the maximum number of lookup static buckets in a table scope
+ * (optional)
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_qcaps(struct tfc *tfcp, bool *tbl_scope_capable,
+ uint32_t *max_lkup_rec_cnt,
+ uint32_t *max_act_rec_cnt,
+ uint8_t *max_lkup_static_buckets_exp);
+
+/**
+ * Determine table scope sizing
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in,out] parms
+ * The parameters used by this function.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_size_query(struct tfc *tfcp,
+ struct tfc_tbl_scope_size_query_parms *parms);
+
+/**
+ * Allocate a table scope
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] shared
+ * Create a shared table scope.
+ *
+ * @param[in] app_type
+ * The application type, TF or AFM
+ *
+ * @param[out] tsid
+ * The allocated table scope ID.
+ *
+ * @param[in,out] first
+ * True if the caller is the creator of this table scope.
+ * If not shared, first is always set. (optional)
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_id_alloc(struct tfc *tfcp, bool shared,
+ enum cfa_app_type app_type, uint8_t *tsid,
+ bool *first);
+
+/**
+ * Allocate memory for a table scope
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @param[in] fid
+ * Function id requesting the memory allocation
+ *
+ * @param[in] parms
+ * Memory allocation parameters
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_mem_alloc(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ struct tfc_tbl_scope_mem_alloc_parms *parms);
+
+/**
+ * Free memory for a table scope
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * Function id for memory to free from the table scope. Set to INVALID_FID
+ * by default. Populated when VF2PF mem_free message received from a VF
+ * for a shared table scope.
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_mem_free(struct tfc *tfcp, uint16_t fid, uint8_t tsid);
+
+/**
+ * tfc_tbl_scope_cpm_alloc_parms contains the parameters for allocating a
+ * CPM instance to be used by a table scope.
+ */
+struct tfc_tbl_scope_cpm_alloc_parms {
+ /**
+ * [in] Direction indexed array indicating the maximum number of lookup
+ * contiguous records.
+ */
+ uint8_t lkup_max_contig_rec[CFA_DIR_MAX];
+ /**
+ * [in] Direction indexed array indicating the maximum number of action
+ * contiguous records.
+ */
+ uint8_t act_max_contig_rec[CFA_DIR_MAX];
+ /**
+ * [in] The maximum number of pools supported by the table scope.
+ */
+ uint16_t max_pools;
+};
+/**
+ * Allocate CFA Pool Manager (CPM) Instance
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] cpm_parms
+ * Pointer to the CFA Pool Manager parameters
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_cpm_alloc(struct tfc *tfcp, uint8_t tsid,
+ struct tfc_tbl_scope_cpm_alloc_parms *cpm_parms);
+
+/**
+ * Free CPM Instance
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_cpm_free(struct tfc *tfcp, uint8_t tsid);
+
+/**
+ * Associate a forwarding function with an existing table scope
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * Function id to associated with the table scope
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @param[in,out] fid_cnt
+ * The number of forwarding functions currently associated with the table scope
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_fid_add(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ uint16_t *fid_cnt);
+
+/**
+ * Disassociate a forwarding function from an existing TFC table scope
+ *
+ * Once the last function has been removed from the session in the firmware
+ * the session is freed and all associated resources freed.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * Function id to associated with the table scope
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @param[in,out] fid_cnt
+ * The number of forwarding functions currently associated with the session
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_fid_rem(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ uint16_t *fid_cnt);
+
+/**
+ * Pool allocation
+ *
+ * Allocate a pool ID and set it's size
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * Function id allocating the pool
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @param[in] region
+ * Pool region identifier
+ *
+ * @param[in] dir
+ * Direction
+ *
+ * @param[out] pool_sz_exp
+ * Pool size exponent
+ *
+ * @param[out] pool_id
+ * Used to return the allocated pool ID.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_pool_alloc(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ enum cfa_region_type region, enum cfa_dir dir,
+ uint8_t *pool_sz_exp, uint16_t *pool_id);
+
+/**
+ * Pool free
+ *
+ * Free a pool ID
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * Function freeing the pool
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @param[in] region
+ * Pool region identifier
+ *
+ * @param[in] dir
+ * Direction
+ *
+ * @param[in] pool_id
+ * Used to return the allocated pool ID.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_pool_free(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ enum cfa_region_type region, enum cfa_dir dir,
+ uint16_t pool_id);
+
+/**
+ * Get configured state
+ *
+ * This API is intended for DPDK applications where a single table scope is shared
+ * across one or more DPDK instances. When one instance succeeds to allocate and
+ * configure a table scope, it then sets the config for that table scope; while
+ * other sessions polling and waiting for the shared table scope to be configured.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @param[out] configured
+ * Used to return the allocated pool ID.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_config_state_get(struct tfc *tfcp, uint8_t tsid, bool *configured);
+
+/**
+ * Table scope function reset
+ *
+ * Delete resources and EM entries associated with fid.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * Table scope identifier
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_tbl_scope_func_reset(struct tfc *tfcp, uint16_t fid);
+
+/**
+ * @page EM Exact Match
+ *
+ * @ref tfc_em_insert
+ *
+ * @ref tfc_em_delete
+ */
+
+/**
+ * tfc_em_insert_parms contains the parameters for an EM insert.
+ *
+ */
+struct tfc_em_insert_parms {
+ /**
+ * [in] Entry direction.
+ */
+ enum cfa_dir dir;
+ /**
+ * [in] Pointer to the combined lkup record and key data to be written.
+ */
+ uint8_t *lkup_key_data;
+ /**
+ * [in] The size of the entry to write in 32b words.
+ */
+ uint16_t lkup_key_sz_words;
+ /**
+ * [in] Thor only - The key data to be used to calculate the hash.
+ */
+ const uint8_t *key_data;
+ /**
+ * [in] Thor only - Size of key in bits.
+ */
+ uint16_t key_sz_bits;
+ /**
+ * [out] Will contain the entry flow handle a unique identifier.
+ */
+ uint64_t *flow_handle;
+ /**
+ * [in/out] Batch mode data
+ */
+ struct tfc_mpc_batch_info_t *batch_info;
+};
+
+/**
+ * Start MPC batching
+ *
+ * @param[in/out] batch_info
+ * Contains batch processing info
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_mpc_batch_start(struct tfc_mpc_batch_info_t *batch_info);
+
+/**
+ * Ends MPC batching and returns the accumulated results
+ *
+ * @param[in/out] batch_info
+ * Contains batch processing info
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_mpc_batch_end(struct tfc *tfcp,
+ struct tfc_mpc_batch_info_t *batch_info);
+
+/**
+ * Checks to see if batching is active and other MPCs have been sent
+ *
+ * @param[in/out] batch_info
+ * Contains batch processing info
+ *
+ * @returns
+ * True is started and MPCs have been sent else False.
+ */
+bool tfc_mpc_batch_started(struct tfc_mpc_batch_info_t *batch_info);
+
+/**
+ * Insert an EM Entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] tsid
+ * Table scope id
+ *
+ * @param[in,out] parms
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ * Error codes -1 through -9 indicate an MPC error and the
+ * positive value of the error code maps directly on to the
+ * MPC error code. For example, if the value -8 is returned
+ * it indicates a CFA_BLD_MPC_EM_DUPLICATE error occurred.
+ */
+int tfc_em_insert(struct tfc *tfcp, uint8_t tsid,
+ struct tfc_em_insert_parms *parms);
+
+
+/**
+ * tfc_em_delete_parms Contains args required to delete an EM Entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] dir
+ * Direction (CFA_DIR_RX or CFA_DIR_TX)
+ *
+ * @param[in,out] flow_handle
+ * The flow handle returned to be used for flow deletion.
+ *
+ */
+struct tfc_em_delete_parms {
+ /**
+ * [in] Entry direction.
+ */
+ enum cfa_dir dir;
+ /**
+ * [in] Flow handle of flow to delete
+ */
+ uint64_t flow_handle;
+ /**
+ * [in/out] Batch mode data
+ */
+ struct tfc_mpc_batch_info_t *batch_info;
+};
+
+/**
+ * Delete an EM Entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in,out] parms
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ * Error codes -1 through -9 indicate an MPC error and the
+ * positive value of the error code maps directly on to the
+ * MPC error code. For example, if the value -8 is returned
+ * it indicates a CFA_BLD_MPC_EM_DUPLICATE error occurred.
+ */
+int tfc_em_delete(struct tfc *tfcp, struct tfc_em_delete_parms *parms);
+
+
+/**
+ * @page ACTCFA Action CFA Memory Management
+ *
+ * @ref tfc_act_alloc
+ *
+ * @ref tfc_act_set
+ *
+ * @ref tfc_act_get
+ *
+ * @ref tfc_act_free
+ */
+/**
+ * CMM resource structure
+ */
+struct tfc_cmm_info {
+ enum cfa_resource_subtype_cmm rsubtype; /**< resource subtype */
+ enum cfa_dir dir; /**< direction rx/tx */
+ uint64_t act_handle; /**< alloc/free handle */
+};
+
+/**
+ * CMM resource clear structure
+ */
+struct tfc_cmm_clr {
+ bool clr; /**< flag for clear */
+ uint16_t offset_in_byte; /**< field offset in byte */
+ uint16_t sz_in_byte; /**< field size in byte */
+};
+
+/**
+ * Allocate an action CMM Resource
+ *
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @param[in,out] cmm_info
+ * Pointer to cmm info
+ *
+ * @param[in] num_contig_rec
+ * Num contiguous records required. Record size is 8B for Thor/32B for Thor2.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_act_alloc(struct tfc *tfcp, uint8_t tsid,
+ struct tfc_cmm_info *cmm_info, uint16_t num_contig_rec);
+
+/**
+ * Set an action CMM resource
+ *
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in/out] batch_info
+ * Batch mode data
+ *
+ * @param[in] cmm_info
+ * Pointer to cmm info.
+ *
+ * @param[in] data
+ * Data to be written.
+ *
+ * @param[in] data_sz_words
+ * Data buffer size in words. In 8B increments.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ * Error codes -1 through -9 indicate an MPC error and the
+ * positive value of the error code maps directly on to the
+ * MPC error code. For example, if the value -8 is returned
+ * it indicates a CFA_BLD_MPC_EM_DUPLICATE error occurred.
+ */
+int tfc_act_set(struct tfc *tfcp,
+ struct tfc_mpc_batch_info_t *batch_info,
+ const struct tfc_cmm_info *cmm_info,
+ const uint8_t *data, uint16_t data_sz_words);
+
+/**
+ * Get an action CMM resource
+ *
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in/out] batch_info
+ * Batch mode data
+ *
+ * @param[in] cmm_info
+ * Pointer to cmm info
+ *
+ * @param[in] cmm_clr
+ * Pointer to cmm clr
+ *
+ * @param[in,out] data
+ * Data read. Must be word aligned, i.e. [1:0] must be 0.
+ *
+ * @param[in,out] data_sz_words
+ * Data buffer size in words. Size could be 8/16/24/32/64B
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ * Error codes -1 through -9 indicate an MPC error and the
+ * positive value of the error code maps directly on to the
+ * MPC error code. For example, if the value -8 is returned
+ * it indicates a CFA_BLD_MPC_EM_DUPLICATE error occurred.
+ */
+int tfc_act_get(struct tfc *tfcp,
+ struct tfc_mpc_batch_info_t *batch_info,
+ const struct tfc_cmm_info *cmm_info,
+ struct tfc_cmm_clr *clr,
+ uint8_t *data, uint16_t *data_sz_words);
+
+/**
+ * Free a CMM Resource
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] cmm_info
+ * CMM info
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_act_free(struct tfc *tfcp,
+ const struct tfc_cmm_info *cmm_info);
+
+/**
+ * @page IF Table
+ *
+ * @ref tfc_if_tbl_set
+ *
+ * @ref tfc_if_tbl_get
+ */
+
+/**
+ * IF table resource structure
+ */
+struct tfc_if_tbl_info {
+ enum cfa_resource_subtype_if_tbl rsubtype; /**< resource subtype */
+ enum cfa_dir dir; /**< direction rx/tx */
+ uint16_t id; /**< index */
+};
+
+/**
+ * Set a TFC if table entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tbl_info
+ * All the information related to the requested index table entry (subtype/dir)
+ * including the id.
+ *
+ * @param[in] data
+ * Pointer to the data to write to the entry. The data is aligned correctly
+ * in the buffer for writing to the hardware.
+ *
+ * @param[in] data_sz_in_bytes
+ * The size of the entry in device sized bytes for Thor2.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_if_tbl_set(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_if_tbl_info *tbl_info,
+ const uint8_t *data, uint8_t data_sz_in_bytes);
+
+/**
+ * Get a TFC if table entry
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * FID - Function ID to be used
+ *
+ * @param[in] tbl_info
+ * All the information related to the requested index table entry (subtype/dir)
+ * including the id.
+ *
+ * @param[in, out] data
+ * Pointer to the data to read from the entry.
+ *
+ * @param[in,out] data_sz_in_bytes
+ * The size of the entry in device sized bytes for Thor2. Input is the
+ * size of the buffer, output is the actual size.
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_if_tbl_get(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_if_tbl_info *tbl_info,
+ uint8_t *data, uint8_t *data_sz_in_bytes);
+#endif /* _TFC_H_ */
new file mode 100644
@@ -0,0 +1,785 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+
+#include "bnxt.h"
+#include "bnxt_mpc.h"
+
+#include "tfc.h"
+#include "cfa_bld_mpc_field_ids.h"
+#include "cfa_bld_mpcops.h"
+#include "tfo.h"
+#include "tfc_em.h"
+#include "tfc_cpm.h"
+#include "tfc_msg.h"
+#include "tfc_priv.h"
+#include "cfa_types.h"
+#include "cfa_mm.h"
+#include "tfc_action_handle.h"
+#include "sys_util.h"
+#include "tfc_util.h"
+
+/*
+ * The read/write granularity is 32B
+ */
+#define TFC_ACT_RW_GRANULARITY 32
+
+#define TFC_ACT_CACHE_OPT_EN 0
+
+/* Max additional data size in bytes */
+#define TFC_ACT_DISCARD_DATA_SIZE 128
+
+int tfc_act_alloc(struct tfc *tfcp,
+ uint8_t tsid,
+ struct tfc_cmm_info *cmm_info,
+ uint16_t num_contig_rec)
+{
+ int rc = 0;
+ struct tfc_cpm *cpm_lkup = NULL;
+ struct tfc_cpm *cpm_act = NULL;
+ uint16_t pool_id;
+ struct tfc_ts_mem_cfg mem_cfg;
+ bool is_bs_owner;
+ struct tfc_cmm *cmm;
+ uint32_t entry_offset;
+ struct cfa_mm_alloc_parms aparms;
+ bool is_shared;
+ struct tfc_ts_pool_info pi;
+ bool valid;
+ uint16_t max_pools;
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, &max_pools);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s", strerror(-rc));
+ return -EINVAL;
+ }
+
+ if (unlikely(!valid)) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) not allocated", tsid);
+ return -EINVAL;
+ }
+
+ if (unlikely(max_pools == 0)) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) Max pools must be greater than 0 %d",
+ tsid, max_pools);
+ return -EINVAL;
+ }
+
+ tfo_ts_get_pool_info(tfcp->tfo, tsid, cmm_info->dir, &pi);
+
+ /* Get CPM instances */
+ rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, cmm_info->dir, &cpm_lkup, &cpm_act);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get CPM instances: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid,
+ cmm_info->dir,
+ CFA_REGION_TYPE_ACT,
+ &is_bs_owner,
+ &mem_cfg);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "tfo_ts_get_mem_cfg() failed: %s", strerror(-rc));
+ return -EINVAL;
+ }
+
+ /* if no pool available locally or all pools full */
+ rc = tfc_cpm_get_avail_pool(cpm_act, &pool_id);
+
+ if (rc) {
+ /* Allocate a pool */
+ struct cfa_mm_query_parms qparms;
+ struct cfa_mm_open_parms oparms;
+ uint16_t fid;
+
+ /* There is only 1 pool for a non-shared table scope
+ * and it is full.
+ */
+ if (unlikely(!is_shared)) {
+ PMD_DRV_LOG_LINE(ERR, "no records remain");
+ return -ENOMEM;
+ }
+ rc = tfc_get_fid(tfcp, &fid);
+ if (unlikely(rc))
+ return rc;
+
+ rc = tfc_tbl_scope_pool_alloc(tfcp,
+ fid,
+ tsid,
+ CFA_REGION_TYPE_ACT,
+ cmm_info->dir,
+ NULL,
+ &pool_id);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "table scope pool alloc failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ /* Create pool CMM instance */
+ qparms.max_records = mem_cfg.rec_cnt;
+ qparms.max_contig_records = pi.act_max_contig_rec;
+ rc = cfa_mm_query(&qparms);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_query() failed: %s", strerror(-rc));
+ return rc;
+ }
+
+ cmm = rte_zmalloc("tf", qparms.db_size, 0);
+ oparms.db_mem_size = qparms.db_size;
+ oparms.max_contig_records = qparms.max_contig_records;
+ oparms.max_records = qparms.max_records / max_pools;
+ rc = cfa_mm_open(cmm, &oparms);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_open() failed: %d", rc);
+ return -EINVAL;
+ }
+
+ /* Store CMM instance in the CPM */
+ rc = tfc_cpm_set_cmm_inst(cpm_act, pool_id, cmm);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "tfc_cpm_set_cmm_inst() failed: %d", rc);
+ return -EINVAL;
+ }
+ /* store updated pool info */
+ tfo_ts_set_pool_info(tfcp->tfo, tsid, cmm_info->dir, &pi);
+
+ } else {
+ /* Get the pool instance and allocate an act rec index from the pool */
+ rc = tfc_cpm_get_cmm_inst(cpm_act, pool_id, &cmm);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "tfc_cpm_get_cmm_inst() failed: %d", rc);
+ return -EINVAL;
+ }
+ }
+
+ aparms.num_contig_records = 1 << next_pow2(num_contig_rec);
+ rc = cfa_mm_alloc(cmm, &aparms);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_alloc() failed: %d", rc);
+ return -EINVAL;
+ }
+
+ /* Update CPM info so it will determine best pool to use next alloc */
+ rc = tfc_cpm_set_usage(pi.act_cpm, pool_id, aparms.used_count, aparms.all_used);
+ if (unlikely(rc))
+ PMD_DRV_LOG_LINE(ERR, "EM insert tfc_cpm_set_usage() failed: %d", rc);
+
+ CREATE_OFFSET(&entry_offset, pi.act_pool_sz_exp, pool_id, aparms.record_offset);
+
+ /* Create Action handle */
+ cmm_info->act_handle = tfc_create_action_handle(tsid,
+ num_contig_rec,
+ entry_offset);
+ return rc;
+}
+
+int tfc_act_set_response(struct cfa_bld_mpcinfo *mpc_info,
+ struct bnxt_mpc_mbuf *mpc_msg_out,
+ uint8_t *rx_msg)
+{
+ int rc;
+ int i;
+ struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_WRITE_CMP_MAX_FLD];
+
+ /* Process response */
+ for (i = 0; i < CFA_BLD_MPC_WRITE_CMP_MAX_FLD; i++)
+ fields_cmp[i].field_id = INVALID_U16;
+
+ fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMP_STATUS_FLD;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_write(rx_msg,
+ mpc_msg_out->msg_size,
+ fields_cmp);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "write parse failed: %d", rc);
+ rc = -EINVAL;
+ }
+
+ if (unlikely(fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK)) {
+ PMD_DRV_LOG_LINE(ERR, "failed with status code:%d",
+ (uint32_t)fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].val);
+ rc = ((int)fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].val) * -1;
+ }
+
+ return rc;
+}
+
+int tfc_act_set(struct tfc *tfcp,
+ struct tfc_mpc_batch_info_t *batch_info,
+ const struct tfc_cmm_info *cmm_info,
+ const uint8_t *data,
+ uint16_t data_sz_words)
+{
+ int rc = 0;
+ uint8_t tx_msg[TFC_MPC_MAX_TX_BYTES];
+ uint8_t rx_msg[TFC_MPC_MAX_RX_BYTES];
+ uint32_t msg_count = BNXT_MPC_COMP_MSG_COUNT;
+ uint32_t i;
+ uint32_t buff_len;
+ struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_WRITE_CMD_MAX_FLD];
+ uint32_t entry_offset;
+ struct bnxt_mpc_mbuf mpc_msg_in;
+ struct bnxt_mpc_mbuf mpc_msg_out;
+ struct cfa_bld_mpcinfo *mpc_info;
+ uint32_t record_size;
+ uint8_t tsid;
+ bool is_shared;
+ bool valid;
+
+ tfo_mpcinfo_get(tfcp->tfo, &mpc_info);
+
+ /* Check that MPC APIs are bound */
+ if (unlikely(mpc_info->mpcops == NULL)) {
+ PMD_DRV_LOG_LINE(ERR, "MPC not initialized");
+ return -EINVAL;
+ }
+
+ tfc_get_fields_from_action_handle(&cmm_info->act_handle,
+ &tsid,
+ &record_size,
+ &entry_offset);
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s", strerror(-rc));
+ return -EINVAL;
+ }
+ if (unlikely(!valid)) {
+ PMD_DRV_LOG_LINE(ERR, "tsid not allocated %d", tsid);
+ return -EINVAL;
+ }
+
+ /* Create MPC EM insert command using builder */
+ for (i = 0; i < CFA_BLD_MPC_WRITE_CMD_MAX_FLD; i++)
+ fields_cmd[i].field_id = INVALID_U16;
+
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD].val = 0xAA;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD].val = CFA_BLD_MPC_HW_TABLE_TYPE_ACTION;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD].val = tsid;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD].val = data_sz_words;
+#if TFC_ACT_CACHE_OPT_EN
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD].val = 0x01;
+#endif
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD].val = entry_offset;
+
+ buff_len = TFC_MPC_MAX_TX_BYTES;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_write(tx_msg,
+ &buff_len,
+ data,
+ fields_cmd);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "write build failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Send MPC */
+ mpc_msg_in.chnl_id = (cmm_info->dir == CFA_DIR_TX ?
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA :
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA);
+ mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_in.msg_size = buff_len - TFC_MPC_HEADER_SIZE_BYTES;
+ mpc_msg_out.cmp_type = CMPL_BASE_TYPE_MID_PATH_SHORT;
+ mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES;
+
+ rc = tfc_mpc_send(tfcp->bp,
+ &mpc_msg_in,
+ &mpc_msg_out,
+ &msg_count,
+ TFC_MPC_TABLE_WRITE,
+ batch_info);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "write MPC send failed: %d", rc);
+ goto cleanup;
+ }
+
+ if (batch_info && !batch_info->enabled)
+ rc = tfc_act_set_response(mpc_info, &mpc_msg_out, rx_msg);
+
+ return rc;
+
+ cleanup:
+
+ return rc;
+}
+
+int tfc_act_get_only_response(struct cfa_bld_mpcinfo *mpc_info,
+ struct bnxt_mpc_mbuf *mpc_msg_out,
+ uint8_t *rx_msg,
+ uint16_t *data_sz_words)
+{
+ int i;
+ int rc;
+ uint8_t discard_data[TFC_ACT_DISCARD_DATA_SIZE];
+ struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_READ_CMP_MAX_FLD] = { {0} };
+
+ /* Process response */
+ for (i = 0; i < CFA_BLD_MPC_READ_CMP_MAX_FLD; i++)
+ fields_cmp[i].field_id = INVALID_U16;
+
+ fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].field_id =
+ CFA_BLD_MPC_READ_CMP_STATUS_FLD;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_read(rx_msg,
+ mpc_msg_out->msg_size,
+ discard_data,
+ *data_sz_words * TFC_MPC_BYTES_PER_WORD,
+ fields_cmp);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "Action read parse failed: %d", rc);
+ return -1;
+ }
+
+ if (fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) {
+ PMD_DRV_LOG_LINE(ERR, "Action read failed with status code:%d",
+ (uint32_t)fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val);
+ rc = ((int)fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val) * -1;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int tfc_act_get_only(struct tfc *tfcp,
+ struct tfc_mpc_batch_info_t *batch_info,
+ const struct tfc_cmm_info *cmm_info,
+ uint8_t *data,
+ uint16_t *data_sz_words)
+{
+ int rc = 0;
+ uint8_t tx_msg[TFC_MPC_MAX_TX_BYTES] = { 0 };
+ uint8_t rx_msg[TFC_MPC_MAX_RX_BYTES] = { 0 };
+ uint32_t msg_count = BNXT_MPC_COMP_MSG_COUNT;
+ int i;
+ uint32_t buff_len;
+ struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_READ_CMD_MAX_FLD] = { {0} };
+ uint32_t entry_offset;
+ uint64_t host_address;
+ struct bnxt_mpc_mbuf mpc_msg_in;
+ struct bnxt_mpc_mbuf mpc_msg_out;
+ uint32_t record_size;
+ uint8_t tsid;
+ bool is_shared;
+ struct cfa_bld_mpcinfo *mpc_info;
+ bool valid;
+
+ tfo_mpcinfo_get(tfcp->tfo, &mpc_info);
+
+ tfc_get_fields_from_action_handle(&cmm_info->act_handle,
+ &tsid,
+ &record_size,
+ &entry_offset);
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s", strerror(-rc));
+ return -EINVAL;
+ }
+ if (unlikely(!valid)) {
+ PMD_DRV_LOG_LINE(ERR, "tsid not allocated %d", tsid);
+ return -EINVAL;
+ }
+
+ /* Check that data pointer is word aligned */
+ if (unlikely(((uint64_t)data) & 0x3ULL)) {
+ PMD_DRV_LOG_LINE(ERR, "data pointer not word aligned");
+ return -EINVAL;
+ }
+
+ host_address = (uint64_t)rte_mem_virt2iova(data);
+
+ /* Check that MPC APIs are bound */
+ if (unlikely(mpc_info->mpcops == NULL)) {
+ PMD_DRV_LOG_LINE(ERR, "MPC not initialized");
+ return -EINVAL;
+ }
+
+ /* Create MPC EM insert command using builder */
+ for (i = 0; i < CFA_BLD_MPC_READ_CMD_MAX_FLD; i++)
+ fields_cmd[i].field_id = INVALID_U16;
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_OPAQUE_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_OPAQUE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_OPAQUE_FLD].val = 0xAA;
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].val =
+ CFA_BLD_MPC_HW_TABLE_TYPE_ACTION;
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD].val = tsid;
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD].val = *data_sz_words;
+
+#if TFC_ACT_CACHE_OPT_EN
+ fields_cmd[CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD].val = 0x0;
+#endif
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD].val = entry_offset;
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD].val = host_address;
+
+ buff_len = TFC_MPC_MAX_TX_BYTES;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_read(tx_msg,
+ &buff_len,
+ fields_cmd);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "read build failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Send MPC */
+ mpc_msg_in.chnl_id = (cmm_info->dir == CFA_DIR_TX ?
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA :
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA);
+ mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_in.msg_size = buff_len - TFC_MPC_HEADER_SIZE_BYTES;
+ mpc_msg_out.cmp_type = CMPL_BASE_TYPE_MID_PATH_SHORT;
+ mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES;
+
+ rc = tfc_mpc_send(tfcp->bp,
+ &mpc_msg_in,
+ &mpc_msg_out,
+ &msg_count,
+ TFC_MPC_TABLE_READ,
+ batch_info);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "read MPC send failed: %d", rc);
+ goto cleanup;
+ }
+
+ if ((batch_info && !batch_info->enabled) || !batch_info) {
+ rc = tfc_act_get_only_response(mpc_info,
+ &mpc_msg_out,
+ rx_msg,
+ data_sz_words);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Action response failed: %d", rc);
+ goto cleanup;
+ }
+ } else {
+ batch_info->comp_info[batch_info->count - 1].read_words = *data_sz_words;
+ }
+
+ return 0;
+
+ cleanup:
+
+ return rc;
+}
+
+static int tfc_act_get_clear(struct tfc *tfcp,
+ const struct tfc_cmm_info *cmm_info,
+ uint8_t *data,
+ uint16_t *data_sz_words,
+ uint8_t clr_offset,
+ uint8_t clr_size)
+{
+ int rc = 0;
+ uint8_t tx_msg[TFC_MPC_MAX_TX_BYTES] = { 0 };
+ uint8_t rx_msg[TFC_MPC_MAX_RX_BYTES] = { 0 };
+ uint32_t msg_count = BNXT_MPC_COMP_MSG_COUNT;
+ int i;
+ uint32_t buff_len;
+ struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD] = { {0} };
+ struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD] = { {0} };
+ uint8_t discard_data[TFC_ACT_DISCARD_DATA_SIZE];
+ uint32_t entry_offset;
+ uint64_t host_address;
+ struct bnxt_mpc_mbuf mpc_msg_in;
+ struct bnxt_mpc_mbuf mpc_msg_out;
+ uint32_t record_size;
+ uint8_t tsid;
+ bool is_shared;
+ struct cfa_bld_mpcinfo *mpc_info;
+ bool valid;
+ uint16_t mask = 0;
+
+ tfo_mpcinfo_get(tfcp->tfo, &mpc_info);
+
+ tfc_get_fields_from_action_handle(&cmm_info->act_handle,
+ &tsid,
+ &record_size,
+ &entry_offset);
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+ if (unlikely(!valid)) {
+ PMD_DRV_LOG_LINE(ERR, "tsid not allocated %d", tsid);
+ return -EINVAL;
+ }
+
+ /* Check that data pointer is word aligned */
+ if (unlikely(((uint64_t)data) & 0x3ULL)) {
+ PMD_DRV_LOG_LINE(ERR, "data pointer not word aligned");
+ return -EINVAL;
+ }
+
+ host_address = (uint64_t)rte_mem_virt2iova(data);
+
+ /* Check that MPC APIs are bound */
+ if (unlikely(mpc_info->mpcops == NULL)) {
+ PMD_DRV_LOG_LINE(ERR, "MPC not initialized");
+ return -EINVAL;
+ }
+
+ /* Create MPC EM insert command using builder */
+ for (i = 0; i < CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD; i++)
+ fields_cmd[i].field_id = INVALID_U16;
+
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD].field_id =
+ CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD].val = 0xAA;
+
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD].field_id =
+ CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD].val =
+ CFA_BLD_MPC_HW_TABLE_TYPE_ACTION;
+
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD].field_id =
+ CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD].val = tsid;
+
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD].field_id =
+ CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD].val = *data_sz_words;
+
+#if TFC_ACT_CACHE_OPT_EN
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD].field_id =
+ CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD].val = 0x0;
+#endif
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD].field_id =
+ CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD].val = entry_offset;
+
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD].field_id =
+ CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD].val = host_address;
+
+ for (i = clr_offset; i < clr_size; i++)
+ mask |= (1 << i);
+
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD].field_id =
+ CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD].val = mask;
+
+ buff_len = TFC_MPC_MAX_TX_BYTES;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_read_clr(tx_msg,
+ &buff_len,
+ fields_cmd);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "read clear build failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Send MPC */
+ mpc_msg_in.chnl_id = (cmm_info->dir == CFA_DIR_TX ?
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA :
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA);
+ mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_in.msg_size = buff_len - TFC_MPC_HEADER_SIZE_BYTES;
+ mpc_msg_out.cmp_type = CMPL_BASE_TYPE_MID_PATH_SHORT;
+ mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES;
+
+ rc = tfc_mpc_send(tfcp->bp,
+ &mpc_msg_in,
+ &mpc_msg_out,
+ &msg_count,
+ TFC_MPC_TABLE_READ_CLEAR,
+ NULL);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "read clear MPC send failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Process response */
+ for (i = 0; i < CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD; i++)
+ fields_cmp[i].field_id = INVALID_U16;
+
+ fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD].field_id =
+ CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_read_clr(rx_msg,
+ mpc_msg_out.msg_size,
+ discard_data,
+ *data_sz_words *
+ TFC_MPC_BYTES_PER_WORD,
+ fields_cmp);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "Action read clear parse failed: %d", rc);
+ goto cleanup;
+ }
+
+ if (fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) {
+ PMD_DRV_LOG_LINE(ERR, "Action read clear failed with status code:%d",
+ (uint32_t)fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD].val);
+ rc = ((int)fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD].val) * -1;
+ goto cleanup;
+ }
+
+ return 0;
+
+ cleanup:
+
+ return rc;
+}
+
+int tfc_act_get(struct tfc *tfcp,
+ struct tfc_mpc_batch_info_t *batch_info,
+ const struct tfc_cmm_info *cmm_info,
+ struct tfc_cmm_clr *clr,
+ uint8_t *data, uint16_t *data_sz_words)
+{
+ /* It's not an error to pass clr as a Null pointer, just means that read
+ * and clear is not being requested. Also allow the user to manage
+ * clear via the clr flag.
+ */
+ if (clr && clr->clr) {
+ if (unlikely(batch_info && batch_info->enabled)) {
+ PMD_DRV_LOG_LINE(ERR, "Not supported in batching mode");
+ return -EINVAL;
+ }
+
+ /* Clear offset and size have to be two bytes aligned */
+ if (clr->offset_in_byte % 2 || clr->sz_in_byte % 2) {
+ PMD_DRV_LOG_LINE(ERR,
+ "clr offset(%d) or size(%d) is not two bytes aligned",
+ clr->offset_in_byte, clr->sz_in_byte);
+ return -EINVAL;
+ }
+
+ return tfc_act_get_clear(tfcp, cmm_info,
+ data, data_sz_words,
+ clr->offset_in_byte / 2,
+ clr->sz_in_byte / 2);
+ } else {
+ return tfc_act_get_only(tfcp,
+ batch_info,
+ cmm_info,
+ data,
+ data_sz_words);
+ }
+}
+
+int tfc_act_free(struct tfc *tfcp,
+ const struct tfc_cmm_info *cmm_info)
+{
+ int rc = 0;
+ struct tfc_cpm *cpm_lkup = NULL;
+ struct tfc_cpm *cpm_act = NULL;
+ struct tfc_cmm *cmm;
+ uint32_t pool_id;
+ struct tfc_ts_pool_info pi;
+ uint32_t record_size;
+ uint32_t record_offset;
+ struct cfa_mm_free_parms fparms;
+ uint8_t tsid;
+ bool is_shared;
+ bool valid;
+ bool is_bs_owner;
+ struct tfc_ts_mem_cfg mem_cfg;
+
+ /* Get fields from MPC Action handle */
+ tfc_get_fields_from_action_handle(&cmm_info->act_handle,
+ &tsid,
+ &record_size,
+ &record_offset);
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s", strerror(-rc));
+ return -EINVAL;
+ }
+ if (unlikely(!valid)) {
+ PMD_DRV_LOG_LINE(ERR, "tsid not allocated %d", tsid);
+ return -EINVAL;
+ }
+ tfo_ts_get_pool_info(tfcp->tfo, tsid, cmm_info->dir, &pi);
+
+ pool_id = TFC_ACTION_GET_POOL_ID(record_offset, pi.act_pool_sz_exp);
+
+ rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid,
+ cmm_info->dir,
+ CFA_REGION_TYPE_ACT,
+ &is_bs_owner,
+ &mem_cfg); /* Gets rec_cnt */
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "tfo_ts_get_mem_cfg() failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ /* Get CPM instance for this table scope */
+ rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, cmm_info->dir, &cpm_lkup, &cpm_act);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get CPM instance: %d", rc);
+ return -EINVAL;
+ }
+
+ rc = tfc_cpm_get_cmm_inst(cpm_act, pool_id, &cmm);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get CMM instance: %d", rc);
+ return -EINVAL;
+ }
+
+ fparms.record_offset = record_offset;
+ fparms.num_contig_records = 1 << next_pow2(record_size);
+ rc = cfa_mm_free(cmm, &fparms);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to free record: %d", rc);
+ return -EINVAL;
+ }
+
+ rc = tfc_cpm_set_usage(cpm_act, pool_id, 0, false);
+ if (unlikely(rc))
+ PMD_DRV_LOG_LINE(ERR, "failed to set usage: %d", rc);
+
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TFC_ACTION_HANDLE_H_
+#define _TFC_ACTION_HANDLE_H_
+
+#define TFC_POOL_TSID_ACTION_HANDLE_MASK 0x0000003F000000000ULL
+#define TFC_POOL_TSID_ACTION_HANDLE_SFT 36
+#define TFC_RECORD_SIZE_ACTION_HANDLE_MASK 0x00000000F00000000ULL
+#define TFC_RECORD_SIZE_ACTION_HANDLE_SFT 32
+#define TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK 0x00000000007FFFFFFULL
+#define TFC_EM_REC_OFFSET_ACTION_HANDLE_SFT 0
+
+#define TFC_ACTION_HANDLE_MASK ( \
+ TFC_POOL_TSID_ACTION_HANDLE_MASK | \
+ TFC_RECORD_SIZE_ACTION_HANDLE_MASK | \
+ TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK)
+
+static inline void tfc_get_fields_from_action_handle(const uint64_t *act_handle,
+ uint8_t *tsid,
+ uint32_t *record_size,
+ uint32_t *action_offset)
+{
+ *tsid = (uint8_t)((*act_handle & TFC_POOL_TSID_ACTION_HANDLE_MASK) >>
+ TFC_POOL_TSID_ACTION_HANDLE_SFT);
+ *record_size =
+ (uint32_t)((*act_handle & TFC_RECORD_SIZE_ACTION_HANDLE_MASK) >>
+ TFC_RECORD_SIZE_ACTION_HANDLE_SFT);
+ *action_offset =
+ (uint32_t)((*act_handle & TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK) >>
+ TFC_EM_REC_OFFSET_ACTION_HANDLE_SFT);
+}
+
+static inline uint64_t tfc_create_action_handle(uint8_t tsid,
+ uint32_t record_size,
+ uint32_t action_offset)
+{
+ uint64_t act_handle = 0ULL;
+
+ act_handle |=
+ ((((uint64_t)tsid) << TFC_POOL_TSID_ACTION_HANDLE_SFT) &
+ TFC_POOL_TSID_ACTION_HANDLE_MASK);
+ act_handle |=
+ ((((uint64_t)record_size) << TFC_RECORD_SIZE_ACTION_HANDLE_SFT) &
+ TFC_RECORD_SIZE_ACTION_HANDLE_MASK);
+ act_handle |=
+ ((((uint64_t)action_offset) << TFC_EM_REC_OFFSET_ACTION_HANDLE_SFT) &
+ TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK);
+
+ return act_handle;
+}
+
+#define TFC_ACTION_GET_POOL_ID(action_offset, pool_sz_exp) \
+ ((action_offset) >> (pool_sz_exp))
+
+#define TFC_GET_32B_OFFSET_ACT_HANDLE(act_32byte_offset, act_handle) \
+ { \
+ (act_32byte_offset) = (uint32_t)((*(act_handle) & \
+ TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK) >> \
+ TFC_EM_REC_OFFSET_ACTION_HANDLE_SFT); \
+ }
+
+#define TFC_GET_8B_OFFSET(act_8byte_offset, act_32byte_offset) \
+ { (act_8byte_offset) = ((act_32byte_offset) << 2); }
+
+#endif /* _TFC_ACTION_HANDLE_H_ */
new file mode 100644
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "tfc.h"
+#include "tfc_cpm.h"
+
+/*
+ * Per pool entry
+ */
+struct cpm_pool_entry {
+ bool valid;
+ struct tfc_cmm *cmm;
+ uint32_t used_count;
+ bool all_used;
+ struct cpm_pool_use *pool_use;
+};
+
+/*
+ * Pool use list entry
+ */
+struct cpm_pool_use {
+ uint16_t pool_id;
+ struct cpm_pool_use *prev;
+ struct cpm_pool_use *next;
+};
+
+/*
+ * tfc_cpm
+ *
+ * This is the main CPM data struct
+ */
+struct tfc_cpm {
+ struct cpm_pool_entry *pools;
+ uint16_t available_pool_id; /* pool with highest use count, i.e. most used entries */
+ bool pool_valid; /* pool has free entries */
+ uint32_t pool_size; /* number of entries in each pool */
+ uint32_t max_pools; /* maximum number of pools */
+ uint32_t next_index; /* search index */
+ struct cpm_pool_use *pool_use_list; /* Ordered list of pool usage */
+};
+
+#define CPM_DEBUG 0
+
+#if (CPM_DEBUG == 1)
+static void show_list(char *str, struct tfc_cpm *cpm)
+{
+ struct cpm_pool_use *pu = cpm->pool_use_list;
+
+ PMD_DRV_LOG_LINE("%s - ", str);
+
+ while (pu != NULL) {
+ PMD_DRV_LOG_LINE("PU(%p) id:%d(u:%d au:%d) p:0x%p n:0x%p",
+ pu,
+ pu->pool_id,
+ cpm->pools[pu->pool_id].used_count,
+ cpm->pools[pu->pool_id].all_used,
+ pu->prev,
+ pu->next);
+
+ pu = pu->next;
+ }
+}
+#endif
+
+static int cpm_insert_pool_id(struct tfc_cpm *cpm, uint16_t pool_id)
+{
+ struct cpm_pool_entry *pool = &cpm->pools[pool_id];
+ struct cpm_pool_use *pool_use = cpm->pool_use_list;
+ struct cpm_pool_use *new_pool_use;
+ struct cpm_pool_use *prev = NULL;
+
+ if (!pool->valid) {
+ PMD_DRV_LOG_LINE(ERR, "Pool ID:0x%x is invalid", pool_id);
+ return -EINVAL;
+ }
+
+ /* Find where in insert new entry */
+ while (pool_use != NULL) {
+ if (cpm->pools[pool_use->pool_id].valid &&
+ cpm->pools[pool_use->pool_id].used_count >
+ pool->used_count) {
+ pool_use = pool_use->next;
+ prev = pool_use;
+ } else {
+ break;
+ }
+ }
+
+ /* Alloc new entry */
+ new_pool_use = rte_zmalloc("tf", sizeof(struct cpm_pool_use), 0);
+ new_pool_use->pool_id = pool_id;
+ new_pool_use->prev = NULL;
+ new_pool_use->next = NULL;
+ pool->pool_use = new_pool_use;
+
+ if (pool_use == NULL) { /* Empty list */
+ cpm->pool_use_list = new_pool_use;
+ } else if (prev == NULL) { /* Start of list */
+ cpm->pool_use_list = new_pool_use;
+ new_pool_use->next = pool_use;
+ pool_use->prev = new_pool_use;
+ } else { /* Within list */
+ prev->next = new_pool_use;
+ new_pool_use->next = pool_use;
+ new_pool_use->prev = prev;
+ }
+
+ cpm->available_pool_id = cpm->pool_use_list->pool_id;
+ cpm->pool_valid = true;
+#if (CPM_DEBUG == 1)
+ show_list("Insert", cpm);
+#endif
+ return 0;
+}
+
+static int cpm_sort_pool_id(struct tfc_cpm *cpm, uint16_t pool_id)
+{
+ struct cpm_pool_entry *pool = &cpm->pools[pool_id];
+ struct cpm_pool_use *pool_use = pool->pool_use;
+ struct cpm_pool_use *prev, *next;
+
+ /*
+ * Does entry need to move up, down or stay where it is?
+ *
+ * The list is ordered by:
+ * Head: - Most used, but not full
+ * - ....next most used but not full
+ * - least used
+ * Tail: - All used
+ */
+ while (1) {
+ if (pool_use->prev != NULL &&
+ cpm->pools[pool_use->prev->pool_id].valid &&
+ !pool->all_used &&
+ (cpm->pools[pool_use->prev->pool_id].all_used ||
+ cpm->pools[pool_use->prev->pool_id].used_count <
+ pool->used_count)) {
+ /* Move up */
+ prev = pool_use->prev;
+ pool_use->prev->next = pool_use->next;
+ if (pool_use->next != NULL) /* May be at the end of the list */
+ pool_use->next->prev = pool_use->prev;
+ pool_use->next = pool_use->prev;
+
+ if (pool_use->prev->prev != NULL) {
+ pool_use->prev->prev->next = pool_use;
+ pool_use->prev = pool_use->prev->prev;
+ } else {
+ /* Moved to head of the list */
+ pool_use->prev->prev = pool_use;
+ pool_use->prev = NULL;
+ cpm->pool_use_list = pool_use;
+ }
+
+ prev->prev = pool_use;
+ } else if (pool_use->next != NULL &&
+ cpm->pools[pool_use->next->pool_id].valid &&
+ (pool->all_used ||
+ (!cpm->pools[pool_use->next->pool_id].all_used &&
+ (cpm->pools[pool_use->next->pool_id].used_count >
+ pool->used_count)))) {
+ /* Move down */
+ next = pool_use->next;
+ pool_use->next->prev = pool_use->prev;
+ if (pool_use->prev != NULL) /* May be at the start of the list */
+ pool_use->prev->next = pool_use->next;
+ else
+ cpm->pool_use_list = pool_use->next;
+
+ pool_use->prev = pool_use->next;
+
+ if (pool_use->next->next != NULL) {
+ pool_use->next->next->prev = pool_use;
+ pool_use->next = pool_use->next->next;
+ } else {
+ /* Moved to end of the list */
+ pool_use->next->next = pool_use;
+ pool_use->next = NULL;
+ }
+
+ next->next = pool_use;
+ } else {
+ /* Nothing to do */
+ break;
+ }
+#if (CPM_DEBUG == 1)
+ show_list("Sort", cpm);
+#endif
+ }
+
+ if (cpm->pools[cpm->pool_use_list->pool_id].all_used) {
+ cpm->available_pool_id = TFC_CPM_INVALID_POOL_ID;
+ cpm->pool_valid = false;
+ } else {
+ cpm->available_pool_id = cpm->pool_use_list->pool_id;
+ cpm->pool_valid = true;
+ }
+
+ return 0;
+}
+
+int tfc_cpm_open(struct tfc_cpm **cpm, uint32_t max_pools)
+{
+ /* Allocate CPM struct */
+ *cpm = rte_zmalloc("tf", sizeof(struct tfc_cpm), 0);
+ if (*cpm == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "cpm alloc error %s", strerror(ENOMEM));
+ *cpm = NULL;
+ return -ENOMEM;
+ }
+
+ /* Allocate CPM pools array */
+ (*cpm)->pools = rte_zmalloc("tf", sizeof(struct cpm_pool_entry) * max_pools, 0);
+ if ((*cpm)->pools == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "pools alloc error %s", strerror(ENOMEM));
+ rte_free(*cpm);
+ *cpm = NULL;
+
+ return -ENOMEM;
+ }
+
+ /* Init pool entries by setting all fields to zero */
+ memset((*cpm)->pools, 0, sizeof(struct cpm_pool_entry) * max_pools);
+
+ /* Init remaining CPM fields */
+ (*cpm)->pool_valid = false;
+ (*cpm)->available_pool_id = 0;
+ (*cpm)->max_pools = max_pools;
+ (*cpm)->pool_use_list = NULL;
+
+ return 0;
+}
+
+int tfc_cpm_close(struct tfc_cpm *cpm)
+{
+ struct cpm_pool_use *current;
+ struct cpm_pool_use *next;
+
+ if (cpm == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "CPM is NULL");
+ return -EINVAL;
+ }
+
+ /* Free pool_use_list */
+ current = cpm->pool_use_list;
+ while (current != NULL) {
+ next = current->next;
+ rte_free(current);
+ current = next;
+ }
+
+ /* Free pools */
+ rte_free(cpm->pools);
+
+ /* Free CPM */
+ rte_free(cpm);
+
+ return 0;
+}
+
+int tfc_cpm_set_pool_size(struct tfc_cpm *cpm, uint32_t pool_sz_in_records)
+{
+ if (cpm == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "CPM is NULL");
+ return -EINVAL;
+ }
+
+ cpm->pool_size = pool_sz_in_records;
+ return 0;
+}
+
+int tfc_cpm_get_pool_size(struct tfc_cpm *cpm, uint32_t *pool_sz_in_records)
+{
+ if (cpm == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "CPM is NULL");
+ return -EINVAL;
+ }
+
+ *pool_sz_in_records = cpm->pool_size;
+ return 0;
+}
+
+int tfc_cpm_set_cmm_inst(struct tfc_cpm *cpm, uint16_t pool_id, struct tfc_cmm *cmm)
+{
+ struct cpm_pool_entry *pool;
+
+ if (cpm == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "CPM is NULL");
+ return -EINVAL;
+ }
+
+ pool = &cpm->pools[pool_id];
+
+ if (pool->valid && cmm != NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Pool ID:0x%x is already in use", pool_id);
+ return -EINVAL;
+ }
+
+ pool->cmm = cmm;
+ pool->used_count = 0;
+ pool->all_used = false;
+ pool->pool_use = NULL;
+
+ if (cmm == NULL) {
+ pool->valid = false;
+ } else {
+ pool->valid = true;
+ cpm_insert_pool_id(cpm, pool_id);
+ }
+
+ return 0;
+}
+
+int tfc_cpm_get_cmm_inst(struct tfc_cpm *cpm, uint16_t pool_id, struct tfc_cmm **cmm)
+{
+ struct cpm_pool_entry *pool;
+
+ if (cpm == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "CPM is NULL");
+ return -EINVAL;
+ }
+
+ pool = &cpm->pools[pool_id];
+
+ if (!pool->valid) {
+ PMD_DRV_LOG_LINE(ERR, "Pool ID:0x%x is not valid", pool_id);
+ return -EINVAL;
+ }
+
+ *cmm = pool->cmm;
+ return 0;
+}
+
+int tfc_cpm_get_avail_pool(struct tfc_cpm *cpm, uint16_t *pool_id)
+{
+ if (cpm == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "CPM is NULL");
+ return -EINVAL;
+ }
+
+ if (!cpm->pool_valid)
+ return -EINVAL;
+
+ *pool_id = cpm->available_pool_id;
+
+ return 0;
+}
+
+int tfc_cpm_set_usage(struct tfc_cpm *cpm, uint16_t pool_id, uint32_t used_count, bool all_used)
+{
+ struct cpm_pool_entry *pool;
+
+ if (cpm == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "CPM is NULL");
+ return -EINVAL;
+ }
+
+ pool = &cpm->pools[pool_id];
+
+ if (!pool->valid) {
+ PMD_DRV_LOG_LINE(ERR, "Pool ID:0x%x is invalid", pool_id);
+ return -EINVAL;
+ }
+
+ if (used_count > cpm->pool_size) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Number of entries(%d) exceeds pool_size(%d)",
+ used_count, cpm->pool_size);
+ return -EINVAL;
+ }
+
+ pool->all_used = all_used;
+ pool->used_count = used_count;
+
+ /* Update ordered list of pool_ids */
+ cpm_sort_pool_id(cpm, pool_id);
+
+ return 0;
+}
+int tfc_cpm_srchm_by_configured_pool(struct tfc_cpm *cpm, enum cfa_srch_mode srch_mode,
+ uint16_t *pool_id, struct tfc_cmm **cmm)
+{
+ uint32_t i;
+
+ if (cpm == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "CPM is NULL");
+ return -EINVAL;
+ }
+
+ if (!pool_id) {
+ PMD_DRV_LOG_LINE(ERR, "pool_id ptr is NULL");
+ return -EINVAL;
+ }
+
+ if (!cmm) {
+ PMD_DRV_LOG_LINE(ERR, "cmm ptr is NULL");
+ return -EINVAL;
+ }
+ *pool_id = TFC_CPM_INVALID_POOL_ID;
+ *cmm = NULL;
+
+ if (srch_mode == CFA_SRCH_MODE_FIRST)
+ cpm->next_index = 0;
+
+ for (i = cpm->next_index; i < cpm->max_pools; i++) {
+ if (cpm->pools[i].cmm) {
+ *pool_id = i;
+ *cmm = cpm->pools[i].cmm;
+ cpm->next_index = i + 1;
+ return 0;
+ }
+ }
+ cpm->next_index = cpm->max_pools;
+ return -ENOENT;
+}
new file mode 100644
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TFC_CPM_H_
+#define _TFC_CPM_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <errno.h>
+#include "cfa_types.h"
+
+/*
+ * Set to 1 to force using just TS 0
+ */
+#define TFC_FORCE_POOL_0 1
+
+/*
+ * Temp to enable build. Remove when tfc_cmm is added
+ */
+struct tfc_cmm {
+ int a;
+};
+
+
+struct tfc_cpm;
+
+#define TFC_CPM_INVALID_POOL_ID 0xFFFF
+
+/**
+ * int tfc_cpm_open
+ *
+ * Initializes pre-allocated CPM structure. The cpm_db_size argument is
+ * validated against the max_pools argument.
+ *
+ * @param[in, out] cpm
+ * Pointer to pointer of the allocated CPM data structure. The open will
+ * perform the alloc and return a pointer to the allocated memory.
+ *
+ * @param[in] max_pools
+ * Maximum number of pools
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - cpm_db_size is not correct
+ * -ENOMEM - Failed to allocate memory for CPM data structures.
+ *
+ */
+int tfc_cpm_open(struct tfc_cpm **cpm, uint32_t max_pools);
+
+/**
+ * int tfc_cpm_close
+ *
+ * Deinitialize data structures. Note this does not free the memory.
+ *
+ * @param[in] cpm
+ * Pointer to the CPM instance to free.
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Invalid argument
+ */
+int tfc_cpm_close(struct tfc_cpm *cpm);
+
+/**
+ * int tfc_cpm_set_pool_size
+ *
+ * Sets number of entries for pools in a given region.
+ *
+ * @param[in] cpm
+ * Pointer to the CPM instance
+ *
+ * @param[in] pool_sz_in_records
+ * Max number of entries for each pool must be
+ * a power of 2.
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Invalid argument
+ *
+ */
+int tfc_cpm_set_pool_size(struct tfc_cpm *cpm, uint32_t pool_sz_in_records);
+
+/**
+ * int tfc_cpm_get_pool_size
+ *
+ * Returns the number of entries for pools in a given region.
+ *
+ * @param[in] cpm
+ * Pointer to the CPM instance
+ *
+ * @param[out] pool_sz_in_records
+ * Max number of entries for each pool
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Invalid argument
+ */
+int tfc_cpm_get_pool_size(struct tfc_cpm *cpm, uint32_t *pool_sz_in_records);
+
+/**
+ * int tfc_cpm_set_cmm_inst
+ *
+ * Add CMM instance.
+ *
+ * @param[in] cpm
+ * Pointer to the CPM instance
+ *
+ * @param[in] pool_id
+ * Pool ID to use
+ *
+ * @param[in] valid
+ * Is entry valid
+ *
+ * @param[in] cmm
+ * Pointer to the CMM instance
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Invalid argument
+ */
+int tfc_cpm_set_cmm_inst(struct tfc_cpm *cpm, uint16_t pool_id, struct tfc_cmm *cmm);
+
+/**
+ * int tfc_cpm_get_cmm_inst
+ *
+ * Get CMM instance.
+ *
+ * @param[in] cpm
+ * Pointer to the CPM instance
+ *
+ * @param[in] pool_id
+ * Pool ID to use
+ *
+ * @param[in] valid
+ * Is entry valid
+ *
+ * @param[out] cmm
+ * Pointer to the CMM instance
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Invalid argument
+ */
+int tfc_cpm_get_cmm_inst(struct tfc_cpm *cpm, uint16_t pool_id, struct tfc_cmm **cmm);
+
+/**
+ * int tfc_cpm_get_avail_pool
+ *
+ * Returns the pool_id to use for the next EM insert
+ *
+ * @param[in] cpm
+ * Pointer to the CPM instance
+ *
+ * @param[out] pool_id
+ * Pool ID to use for EM insert
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Invalid argument
+ */
+int tfc_cpm_get_avail_pool(struct tfc_cpm *cpm, uint16_t *pool_id);
+
+/**
+ * int tfc_cpm_set_usage
+ *
+ * Set the usage_count and all_used fields for the specified pool_id
+ *
+ * @param[in] cpm
+ * Pointer to the CPM instance
+ *
+ * @param[in] pool_id
+ * Pool ID to update
+ *
+ * @param[in] used_count
+ * Number of entries used within specified pool
+ *
+ * @param[in] all_used
+ * Set if all pool entries are used
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Invalid argument
+ */
+int tfc_cpm_set_usage(struct tfc_cpm *cpm, uint16_t pool_id, uint32_t used_count, bool all_used);
+
+/**
+ * int tfc_cpm_srchm_by_configured_pool
+ *
+ * Get the next configured pool
+ *
+ * @param[in] cpm
+ * Pointer to the CPM instance
+ *
+ * @param[in] srch_mode
+ * Valid pool id
+ *
+ * @param[out] pool_id
+ * Pointer to a valid pool id
+ *
+ * * @param[out] cmm
+ * Pointer to the associated CMM instance
+ *
+ * Returns:
+ * 0 - Success
+ * -EINVAL - Invalid argument
+ */
+int tfc_cpm_srchm_by_configured_pool(struct tfc_cpm *cpm, enum cfa_srch_mode srch_mode,
+ uint16_t *pool_id, struct tfc_cmm **cmm);
+
+#endif /* _TFC_CPM_H_ */
new file mode 100644
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TFC_DEBUG_H_
+#define _TFC_DEBUG_H_
+
+/* #define EM_DEBUG */
+/* #define ACT_DEBUG */
+
+int tfc_mpc_table_write_zero(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint32_t type,
+ uint32_t offset,
+ uint8_t words,
+ uint8_t *data);
+
+int tfc_act_show(struct tfc *tfcp, uint8_t tsid, enum cfa_dir dir);
+int tfc_em_show(struct tfc *tfcp, uint8_t tsid, enum cfa_dir dir);
+int tfc_mpc_table_invalidate(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint32_t type,
+ uint32_t offset,
+ uint32_t words);
+#endif
new file mode 100644
@@ -0,0 +1,999 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#include <memory.h>
+
+#include "bnxt.h"
+#include "bnxt_mpc.h"
+
+#include "tfc.h"
+#include "tfo.h"
+#include "tfc_em.h"
+#include "tfc_cpm.h"
+#include "tfc_msg.h"
+#include "tfc_priv.h"
+#include "cfa_types.h"
+#include "cfa_mm.h"
+#include "cfa_bld_mpc_field_ids.h"
+#include "tfc_flow_handle.h"
+#include "sys_util.h"
+#include "tfc_util.h"
+
+#include "tfc_debug.h"
+
+#define TFC_EM_DYNAMIC_BUCKET_RECORD_SIZE 1
+
+/* Enable cache configuration */
+#define TFC_EM_CACHE_OPT_EN 0
+/* Enable dynamic bucket support */
+
+#ifdef EM_DEBUG
+char const *tfc_mpc_error_string[] = {
+ "OK",
+ "Unsupported Opcode",
+ "Format",
+ "Scope",
+ "Address",
+ "Cache",
+ "EM Miss",
+ "Duplicate",
+ "No Events",
+ "EM Abort"
+};
+#endif
+
+static int tfc_em_insert_response(struct cfa_bld_mpcinfo *mpc_info,
+ struct bnxt_mpc_mbuf *mpc_msg_out,
+ uint8_t *rx_msg,
+ uint32_t *hash)
+{
+ int i;
+ int rc;
+ struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD];
+
+ /* Process response */
+ for (i = 0; i < CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD; i++)
+ fields_cmp[i].field_id = INVALID_U16;
+
+ fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD;
+ fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD;
+ fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD;
+ fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD;
+ fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD;
+ fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_parse_em_insert(rx_msg,
+ mpc_msg_out->msg_size,
+ fields_cmp);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "EM insert parse failed: %d", rc);
+ return -EINVAL;
+ }
+ if (fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) {
+#ifdef EM_DEBUG
+ PMD_DRV_LOG_LINE(ERR, "MPC failed with error:%s",
+ tfc_mpc_error_string[(uint32_t)
+ fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD].val]);
+#else
+ PMD_DRV_LOG_LINE(ERR, "MPC failed with status code:%d",
+ (uint32_t)
+ fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD].val);
+#endif
+ rc = ((int)fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD].val) * -1;
+ return rc;
+ }
+
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ /* If the dynamic bucket is unused then free it */
+ if (bucket_offset && fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD].val == 0) {
+ /* Free allocated resources */
+ fparms.record_offset = bucket_offset;
+ fparms.num_contig_records = TFC_EM_DYNAMIC_BUCKET_RECORD_SIZE;
+
+ rc = cfa_mm_free(cmm, &fparms);
+ bucket_offset = 0;
+ }
+#endif
+
+ *hash = fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD].val;
+
+ return rc;
+}
+
+int tfc_em_insert(struct tfc *tfcp, uint8_t tsid,
+ struct tfc_em_insert_parms *parms)
+{
+ int rc;
+ int cleanup_rc;
+ struct tfc_cpm *cpm_lkup = NULL;
+ struct tfc_cpm *cpm_act = NULL;
+ uint16_t pool_id;
+ struct tfc_cmm *cmm = NULL;
+ bool is_bs_owner;
+ struct tfc_ts_mem_cfg mem_cfg;
+ uint32_t entry_offset;
+ uint32_t num_contig_records;
+ struct bnxt_mpc_mbuf mpc_msg_in;
+ struct bnxt_mpc_mbuf mpc_msg_out;
+ struct tfc_ts_pool_info pi;
+ struct cfa_mm_free_parms fparms;
+ struct cfa_mm_alloc_parms aparms;
+ uint32_t buff_len;
+ uint8_t tx_msg[TFC_MPC_MAX_TX_BYTES];
+ uint8_t rx_msg[TFC_MPC_MAX_RX_BYTES];
+ uint32_t msg_count = BNXT_MPC_COMP_MSG_COUNT;
+ uint32_t i;
+ uint32_t hash = 0;
+ struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD];
+ bool is_shared;
+ struct cfa_bld_mpcinfo *mpc_info;
+ bool valid;
+ uint16_t max_pools;
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ struct cfa_mm_alloc_parms bucket_aparms;
+ bool shared = false;
+ uint32_t bucket_offset;
+#endif
+
+ tfo_mpcinfo_get(tfcp->tfo, &mpc_info);
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, &max_pools);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s", strerror(-rc));
+ return -EINVAL;
+ }
+ if (unlikely(!valid)) {
+ PMD_DRV_LOG_LINE(ERR, "tsid not allocated %d", tsid);
+ return -EINVAL;
+ }
+ if (unlikely(!max_pools)) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) Max pools must be greater than 0 %d",
+ tsid, max_pools);
+ return -EINVAL;
+ }
+
+ /* Check that MPC APIs are bound */
+ if (unlikely(mpc_info->mpcops == NULL)) {
+ PMD_DRV_LOG_LINE(ERR, "MPC not initialized");
+ return -EINVAL;
+ }
+
+ rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid,
+ parms->dir,
+ CFA_REGION_TYPE_LKUP,
+ &is_bs_owner,
+ &mem_cfg); /* Gets rec_cnt */
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "tfo_ts_get_mem_cfg() failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+
+ /* Get CPM instances */
+ rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, parms->dir, &cpm_lkup, &cpm_act);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get CPM instances: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ num_contig_records = 1 << next_pow2(parms->lkup_key_sz_words);
+
+ tfo_ts_get_pool_info(tfcp->tfo, tsid, parms->dir, &pi);
+
+ rc = tfc_cpm_get_avail_pool(cpm_lkup, &pool_id);
+
+ /* if no pool available locally or all pools full */
+ if (rc) {
+ /* Allocate a pool */
+ struct cfa_mm_query_parms qparms;
+ struct cfa_mm_open_parms oparms;
+ uint16_t fid;
+
+ /* There is only 1 pool for a non-shared table scope and
+ * it is full.
+ */
+ if (!is_shared) {
+ PMD_DRV_LOG_LINE(ERR, "no records remain");
+ return -ENOMEM;
+ }
+
+ rc = tfc_get_fid(tfcp, &fid);
+ if (unlikely(rc))
+ return rc;
+
+ rc = tfc_tbl_scope_pool_alloc(tfcp,
+ fid,
+ tsid,
+ CFA_REGION_TYPE_LKUP,
+ parms->dir,
+ NULL,
+ &pool_id);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "table scope pool alloc failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ /*
+ * Create pool CMM instance.
+ * rec_cnt is the total number of records which includes static buckets,
+ */
+ qparms.max_records = (mem_cfg.rec_cnt - mem_cfg.lkup_rec_start_offset) / max_pools;
+ qparms.max_contig_records = pi.lkup_max_contig_rec;
+ rc = cfa_mm_query(&qparms);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_query() failed: %s", strerror(-rc));
+ rte_free(cmm);
+ return -EINVAL;
+ }
+
+ cmm = rte_zmalloc("tf", qparms.db_size, 0);
+ oparms.db_mem_size = qparms.db_size;
+ oparms.max_contig_records = qparms.max_contig_records;
+ oparms.max_records = qparms.max_records;
+ rc = cfa_mm_open(cmm, &oparms);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_open() failed: %s", strerror(-rc));
+ rte_free(cmm);
+ return -EINVAL;
+ }
+
+ /* Store CMM instance in the CPM */
+ rc = tfc_cpm_set_cmm_inst(cpm_lkup, pool_id, cmm);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "tfc_cpm_set_cmm_inst() failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ /* Store the updated pool information */
+ tfo_ts_set_pool_info(tfcp->tfo, tsid, parms->dir, &pi);
+
+ } else {
+ /* Get the pool instance and allocate an lkup rec index from the pool */
+ rc = tfc_cpm_get_cmm_inst(cpm_lkup, pool_id, &cmm);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "tfc_cpm_get_cmm_inst() failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+ }
+
+ aparms.num_contig_records = num_contig_records;
+ rc = cfa_mm_alloc(cmm, &aparms);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_alloc() failed: %s", strerror(-rc));
+ return -EINVAL;
+ }
+
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ if (!shared) {
+ /* Allocate dynamic bucket */
+ bucket_aparms.num_contig_records = TFC_EM_DYNAMIC_BUCKET_RECORD_SIZE;
+ rc = cfa_mm_alloc(cmm, &bucket_aparms);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR,
+ "cfa_mm_alloc() for dynamic bucket failed: %s\n",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ bucket_offset = bucket_aparms.record_offset;
+ }
+#endif
+
+ CREATE_OFFSET(&entry_offset, pi.lkup_pool_sz_exp, pool_id, aparms.record_offset);
+
+ /* Create MPC EM insert command using builder */
+ for (i = 0; i < CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD; i++)
+ fields_cmd[i].field_id = INVALID_U16;
+
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD].val = 0xAA;
+
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD].val = tsid;
+
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD].val = parms->lkup_key_sz_words;
+
+ /* LREC address */
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD].val = entry_offset +
+ mem_cfg.lkup_rec_start_offset;
+
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ /* Dynamic bucket address */
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD].val = bucket_offset;
+#endif
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD].field_id =
+ CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD].val = 0x0;
+
+ buff_len = TFC_MPC_MAX_TX_BYTES;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_build_em_insert(tx_msg,
+ &buff_len,
+ parms->lkup_key_data,
+ fields_cmd);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "EM insert build failed: %s",
+ strerror(-rc));
+ goto cleanup;
+ }
+
+ /* Send MPC */
+ mpc_msg_in.chnl_id = (parms->dir == CFA_DIR_TX ?
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA :
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA);
+ mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_in.msg_size = (parms->lkup_key_sz_words * 32) +
+ TFC_MPC_HEADER_SIZE_BYTES;
+ mpc_msg_out.cmp_type = CMPL_BASE_TYPE_MID_PATH_LONG;
+ mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES;
+
+ rc = tfc_mpc_send(tfcp->bp,
+ &mpc_msg_in,
+ &mpc_msg_out,
+ &msg_count,
+ TFC_MPC_EM_INSERT,
+ parms->batch_info);
+
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR, "EM insert send failed: %s", strerror(-rc));
+ goto cleanup;
+ }
+
+ if (parms->batch_info && !parms->batch_info->enabled) {
+ rc = tfc_em_insert_response(mpc_info,
+ &mpc_msg_out,
+ rx_msg,
+ &hash);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "EM insert tfc_em_insert_response() failed: %d",
+ rc);
+ goto cleanup;
+ }
+ }
+
+ *parms->flow_handle = tfc_create_flow_handle(tsid,
+ num_contig_records, /* Based on key size */
+ entry_offset,
+ hash);
+
+ /* Update CPM info so it will determine best pool to use next alloc */
+ rc = tfc_cpm_set_usage(cpm_lkup, pool_id, aparms.used_count, aparms.all_used);
+ if (unlikely(rc)) {
+ PMD_DRV_LOG_LINE(ERR,
+ "EM insert tfc_cpm_set_usage() failed: %d",
+ rc);
+ goto cleanup;
+ }
+
+ if (!rc)
+ return 0;
+
+ cleanup:
+ /*
+ * Preserve the rc from the actual error rather than
+ * an error during cleanup.
+ */
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ /* If the dynamic bucket set then free it */
+ if (bucket_offset) {
+ /* Free allocated resources */
+ fparms.record_offset = bucket_offset;
+ fparms.num_contig_records = TFC_EM_DYNAMIC_BUCKET_RECORD_SIZE;
+
+ cleanup_rc = cfa_mm_free(cmm, &fparms);
+ }
+#endif
+
+ /* Free allocated resources */
+ fparms.record_offset = aparms.record_offset;
+ fparms.num_contig_records = num_contig_records;
+ cleanup_rc = cfa_mm_free(cmm, &fparms);
+ if (cleanup_rc != 0)
+ PMD_DRV_LOG_LINE(ERR, "failed to free entry: %s", strerror(-rc));
+
+ cleanup_rc = tfc_cpm_set_usage(cpm_lkup, pool_id, fparms.used_count, false);
+ if (cleanup_rc != 0)
+ PMD_DRV_LOG_LINE(ERR, "failed to set usage: %s", strerror(-rc));
+
+ return rc;
+}
+
+static int tfc_em_delete_response(struct cfa_bld_mpcinfo *mpc_info,
+ struct bnxt_mpc_mbuf *mpc_msg_out,
+ uint8_t *rx_msg
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ , bool *db_unused,
+ uint32_t *db_offset
+#endif
+ )
+{
+ int i;
+ int rc;
+ struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD];
+
+ /* Process response */
+ for (i = 0; i < CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD; i++)
+ fields_cmp[i].field_id = INVALID_U16;
+
+ fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD;
+#ifdef EM_DEBUF
+ fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_BKT_NUM_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMP_BKT_NUM_FLD;
+ fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD;
+ fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD;
+ fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD;
+#endif
+ rc = mpc_info->mpcops->cfa_bld_mpc_parse_em_delete(rx_msg,
+ mpc_msg_out->msg_size,
+ fields_cmp);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "delete parse failed: %s", strerror(-rc));
+ return -EINVAL;
+ }
+
+ if (fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) {
+#ifdef EM_DEBUG
+ PMD_DRV_LOG_LINE(ERR, "MPC failed with error:%s",
+ tfc_mpc_error_string[(uint32_t)
+ fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD].val]);
+#else
+ PMD_DRV_LOG_LINE(ERR, "MPC failed with status code:%d",
+ (uint32_t)
+ fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD].val);
+#endif
+ rc = ((int)fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD].val) * -1;
+ return rc;
+ }
+
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ *db_unused = fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD].val == 1 ?
+ true : false;
+ *db_offset = fields[CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD].val;
+#endif
+ return 0;
+}
+
+int tfc_em_delete_raw(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint32_t offset,
+ uint32_t static_bucket,
+ struct tfc_mpc_batch_info_t *batch_info
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ , bool *db_unused,
+ uint32_t *db_offset
+#endif
+ )
+{
+ int rc = 0;
+ uint32_t buff_len;
+ struct bnxt_mpc_mbuf mpc_msg_in;
+ struct bnxt_mpc_mbuf mpc_msg_out;
+ uint8_t tx_msg[TFC_MPC_MAX_TX_BYTES];
+ uint8_t rx_msg[TFC_MPC_MAX_RX_BYTES];
+ uint32_t msg_count = BNXT_MPC_COMP_MSG_COUNT;
+ int i;
+ struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD];
+ struct cfa_bld_mpcinfo *mpc_info;
+
+ tfo_mpcinfo_get(tfcp->tfo, &mpc_info);
+ if (mpc_info->mpcops == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "MPC not initialized");
+ return -EINVAL;
+ }
+
+ /* Create MPC EM delete command using builder */
+ for (i = 0; i < CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD; i++)
+ fields_cmd[i].field_id = INVALID_U16;
+
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD].val = 0xAA;
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD].val = tsid;
+
+ /* LREC address to delete */
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD].val = offset;
+
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD].val = static_bucket;
+
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ /* Static bucket address */
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD].field_id =
+ CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD;
+ fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD].val = 0x01222222;
+#endif
+
+ /* Create MPC EM delete command using builder */
+ buff_len = TFC_MPC_MAX_TX_BYTES;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_build_em_delete(tx_msg,
+ &buff_len,
+ fields_cmd);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "delete mpc build failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ /* Send MPC */
+ mpc_msg_in.chnl_id = (dir == CFA_DIR_TX ?
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA :
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA);
+ mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_in.msg_size = 16;
+ mpc_msg_out.cmp_type = CMPL_BASE_TYPE_MID_PATH_LONG;
+ mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES];
+ mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES;
+
+ rc = tfc_mpc_send(tfcp->bp,
+ &mpc_msg_in,
+ &mpc_msg_out,
+ &msg_count,
+ TFC_MPC_EM_DELETE,
+ batch_info);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "delete MPC send failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ if (!batch_info->enabled)
+ rc = tfc_em_delete_response(mpc_info,
+ &mpc_msg_out,
+ rx_msg
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ , db_unused,
+ db_offset
+#endif
+ );
+ return rc;
+}
+
+int tfc_em_delete(struct tfc *tfcp, struct tfc_em_delete_parms *parms)
+{
+ int rc = 0;
+ uint32_t static_bucket;
+ uint32_t pool_id;
+ struct tfc_cpm *cpm_lkup = NULL;
+ struct tfc_cpm *cpm_act = NULL;
+ struct tfc_cmm *cmm;
+ uint32_t record_offset;
+ uint32_t record_size;
+ struct cfa_mm_free_parms fparms;
+ uint8_t tsid;
+ bool is_shared;
+ struct tfc_ts_pool_info pi;
+ bool is_bs_owner;
+ struct tfc_ts_mem_cfg mem_cfg;
+ bool valid;
+
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ bool db_unused;
+ uint32_t db_offset;
+#endif
+ /* Get fields from MPC Flow handle */
+ tfc_get_fields_from_flow_handle(&parms->flow_handle,
+ &tsid,
+ &record_size,
+ &record_offset,
+ &static_bucket);
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+ if (!valid) {
+ PMD_DRV_LOG_LINE(ERR, "tsid not allocated %d", tsid);
+ return -EINVAL;
+ }
+
+ tfo_ts_get_pool_info(tfcp->tfo, tsid, parms->dir, &pi);
+
+ pool_id = TFC_FLOW_GET_POOL_ID(record_offset, pi.lkup_pool_sz_exp);
+
+ rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid,
+ parms->dir,
+ CFA_REGION_TYPE_LKUP,
+ &is_bs_owner,
+ &mem_cfg); /* Gets rec_cnt */
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tfo_ts_get_mem_cfg() failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ /* Get CPM instance for this table scope */
+ rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, parms->dir, &cpm_lkup, &cpm_act);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get CMM instance: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ rc = tfc_em_delete_raw(tfcp,
+ tsid,
+ parms->dir,
+ record_offset +
+ mem_cfg.lkup_rec_start_offset,
+ static_bucket,
+ parms->batch_info
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ , &db_unused,
+ &db_offset
+#endif
+ );
+
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ /* If the dynamic bucket is unused then free it */
+ if (db_unused) {
+ /* Free allocated resources */
+ fparms.record_offset = db_offset;
+ fparms.num_contig_records = TFC_EM_DYNAMIC_BUCKET_RECORD_SIZE;
+
+ rc = cfa_mm_free(cmm, &fparms);
+ }
+#endif
+
+ rc = tfc_cpm_get_cmm_inst(cpm_lkup, pool_id, &cmm);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get CMM instance: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ fparms.record_offset = record_offset;
+ fparms.num_contig_records = 1 << next_pow2(record_size);
+
+ rc = cfa_mm_free(cmm, &fparms);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "failed to free CMM instance: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ rc = tfc_cpm_set_usage(cpm_lkup, pool_id, fparms.used_count, false);
+ if (rc != 0)
+ PMD_DRV_LOG_LINE(ERR, "failed to set usage: %s",
+ strerror(-rc));
+
+ return rc;
+}
+
+static void bucket_decode(uint32_t *bucket_ptr,
+ struct bucket_info_t *bucket_info)
+{
+ int i;
+ int offset = 0;
+
+ bucket_info->valid = false;
+ bucket_info->chain = tfc_getbits(bucket_ptr, 254, 1);
+ bucket_info->chain_ptr = tfc_getbits(bucket_ptr, 228, 26);
+
+ if (bucket_info->chain ||
+ bucket_info->chain_ptr)
+ bucket_info->valid = true;
+
+ for (i = 0; i < TFC_BUCKET_ENTRIES; i++) {
+ bucket_info->entries[i].entry_ptr = tfc_getbits(bucket_ptr, offset, 26);
+ offset += 26;
+ bucket_info->entries[i].hash_msb = tfc_getbits(bucket_ptr, offset, 12);
+ offset += 12;
+ if (bucket_info->entries[i].hash_msb ||
+ bucket_info->entries[i].entry_ptr) {
+ bucket_info->valid = true;
+ }
+ }
+}
+
+int tfc_em_delete_entries_by_pool_id(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint16_t pool_id,
+ uint8_t debug,
+ uint8_t *data)
+{
+ uint32_t offset;
+ int rc;
+ int i;
+ int j;
+ struct bucket_info_t bucket;
+ struct tfc_ts_pool_info pi;
+ struct tfc_ts_mem_cfg mem_cfg;
+ bool is_bs_owner;
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ bool db_unused;
+ uint32_t db_offset;
+#endif
+ struct tfc_mpc_batch_info_t batch_info;
+
+ memset(&batch_info, 0, sizeof(batch_info));
+
+ /* Get memory info */
+ rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, dir, &pi);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to get pool info for tsid:%d",
+ tsid);
+ return -EINVAL;
+ }
+
+ rc = tfo_ts_get_mem_cfg(tfcp->tfo,
+ tsid,
+ dir,
+ CFA_REGION_TYPE_LKUP,
+ &is_bs_owner,
+ &mem_cfg);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tfo_ts_get_mem_cfg() failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+
+ /* Read static bucket entries */
+ for (offset = 0; offset < mem_cfg.lkup_rec_start_offset; ) {
+ /*
+ * Read static bucket region of lookup table.
+ * A static bucket is 32B in size and must be 32B aligned.
+ * A table read can read up to 4 * 32B so in the interest
+ * of efficiency the max read size will be used.
+ */
+ rc = tfc_mpc_table_read(tfcp,
+ tsid,
+ dir,
+ CFA_REGION_TYPE_LKUP,
+ offset,
+ TFC_MPC_MAX_TABLE_READ_WORDS,
+ data,
+ debug);
+
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR,
+ "tfc_mpc_table_read() failed for offset:%d: %s",
+ offset, strerror(-rc));
+ return -EINVAL;
+ }
+
+ for (i = 0; (i < TFC_MPC_MAX_TABLE_READ_WORDS) &&
+ (offset < mem_cfg.lkup_rec_start_offset); i++) {
+ /* Walk static bucket entry pointers */
+ bucket_decode((uint32_t *)&data[i * TFC_MPC_BYTES_PER_WORD],
+ &bucket);
+
+ for (j = 0; j < TFC_BUCKET_ENTRIES; j++) {
+ if (bucket.entries[j].entry_ptr != 0 &&
+ pool_id == (bucket.entries[j].entry_ptr >> pi.lkup_pool_sz_exp)) {
+ /* Delete EM entry */
+ rc = tfc_em_delete_raw(tfcp,
+ tsid,
+ dir,
+ bucket.entries[j].entry_ptr,
+ offset,
+ &batch_info
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ , &db_unused,
+ &db_offset
+#endif
+ );
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "EM delete failed for offset:0x%08x %d",
+ offset, rc);
+ return -1;
+ }
+ }
+ }
+
+ offset++;
+ }
+ }
+
+ return rc;
+}
+
+int tfc_mpc_send(struct bnxt *bp,
+ struct bnxt_mpc_mbuf *in_msg,
+ struct bnxt_mpc_mbuf *out_msg,
+ uint32_t *opaque,
+ int type,
+ struct tfc_mpc_batch_info_t *batch_info)
+{
+ int rc;
+ bool enabled = false;
+
+ if (batch_info)
+ enabled = batch_info->enabled;
+
+ rc = bnxt_mpc_send(bp, in_msg, out_msg, opaque, enabled);
+
+ if (rc)
+ return rc;
+
+ if (batch_info && batch_info->enabled) {
+ memcpy(&batch_info->comp_info[batch_info->count].out_msg,
+ out_msg,
+ sizeof(*out_msg));
+ batch_info->comp_info[batch_info->count].mpc_queue =
+ bp->mpc->mpc_txq[in_msg->chnl_id];
+ batch_info->comp_info[batch_info->count].type = type;
+ batch_info->count++;
+ }
+
+ return 0;
+}
+
+static int tfc_mpc_process_completions(uint8_t *rx_msg,
+ struct tfc_mpc_comp_info_t *comp_info)
+{
+ int rc;
+ int retry = BNXT_MPC_RX_RETRY;
+
+ comp_info->out_msg.msg_data = rx_msg;
+
+ do {
+ rc = bnxt_mpc_cmd_cmpl(comp_info->mpc_queue,
+ &comp_info->out_msg);
+
+ if (likely(rc == 1)) {
+#ifdef MPC_DEBUG
+ if (unlikely(retry != BNXT_MPC_RX_RETRY))
+ PMD_DRV_LOG_LINE(ERR, "Retrys:%d",
+ BNXT_MPC_RX_RETRY - retry);
+#endif
+ return 0;
+ }
+#ifdef MPC_DEBUG
+ PMD_DRV_LOG_LINE(ERR,
+ "Received zero or more than one completion:%d",
+ rc);
+#endif
+ retry--;
+ } while (retry);
+
+ PMD_DRV_LOG_LINE(ERR, "Retry timeout rc:%d", rc);
+ return -1;
+}
+
+int tfc_mpc_batch_start(struct tfc_mpc_batch_info_t *batch_info)
+{
+ if (unlikely(batch_info->enabled))
+ return -EBUSY;
+
+ batch_info->enabled = true;
+ batch_info->count = 0;
+ batch_info->error = false;
+ return 0;
+}
+
+bool tfc_mpc_batch_started(struct tfc_mpc_batch_info_t *batch_info)
+{
+ if (unlikely(!batch_info))
+ return false;
+
+ return (batch_info->enabled && batch_info->count > 0);
+}
+
+int tfc_mpc_batch_end(struct tfc *tfcp,
+ struct tfc_mpc_batch_info_t *batch_info)
+{
+ uint32_t i;
+ int rc;
+ uint8_t rx_msg[TFC_MPC_MAX_RX_BYTES];
+ struct cfa_bld_mpcinfo *mpc_info;
+ uint32_t hash = 0;
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ bool *db_unused;
+ uint32_t *db_offset;
+#endif
+
+ if (unlikely(!batch_info->enabled))
+ return -EBUSY;
+
+ tfo_mpcinfo_get(tfcp->tfo, &mpc_info);
+
+ if (unlikely(mpc_info->mpcops == NULL)) {
+ PMD_DRV_LOG_LINE(ERR, "MPC not initialized");
+ return -EINVAL;
+ }
+
+ rte_delay_us_block(BNXT_MPC_RX_US_DELAY * 4);
+
+ for (i = 0; i < batch_info->count; i++) {
+ rc = tfc_mpc_process_completions(&rx_msg[TFC_MPC_HEADER_SIZE_BYTES],
+ &batch_info->comp_info[i]);
+ if (unlikely(rc))
+ return -1;
+
+
+ switch (batch_info->comp_info[i].type) {
+ case TFC_MPC_EM_INSERT:
+ rc = tfc_em_insert_response(mpc_info,
+ &batch_info->comp_info[i].out_msg,
+ rx_msg,
+ &hash);
+ /*
+ * If the handle is non NULL it should reference a
+ * flow DB entry that requires the flow_handle
+ * contained within to be updated.
+ */
+ batch_info->em_hdl[i] =
+ tfc_create_flow_handle2(batch_info->em_hdl[i],
+ hash);
+ batch_info->em_error = rc;
+ break;
+ case TFC_MPC_EM_DELETE:
+ rc = tfc_em_delete_response(mpc_info,
+ &batch_info->comp_info[i].out_msg,
+ rx_msg
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ , bool *db_unused,
+ uint32_t *db_offset
+#endif
+ );
+ break;
+ case TFC_MPC_TABLE_WRITE:
+ rc = tfc_act_set_response(mpc_info,
+ &batch_info->comp_info[i].out_msg,
+ rx_msg);
+ break;
+ case TFC_MPC_TABLE_READ:
+ rc = tfc_act_get_only_response(mpc_info,
+ &batch_info->comp_info[i].out_msg,
+ rx_msg,
+ &batch_info->comp_info[i].read_words);
+ break;
+
+ case TFC_MPC_TABLE_READ_CLEAR:
+ default:
+ PMD_DRV_LOG_LINE(ERR, "MPC Batch not supported for type: %d",
+ batch_info->comp_info[i].type);
+ return -1;
+ }
+
+ batch_info->result[i] = rc;
+ if (rc)
+ batch_info->error = true;
+ }
+
+ batch_info->enabled = false;
+ batch_info->count = 0;
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TFC_EM_H_
+#define _TFC_EM_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <errno.h>
+#include <cfa_bld_mpcops.h>
+
+/* Defines the maximum number of outstanding completions supported. */
+#define BNXT_MPC_COMP_MAX_COUNT 64
+
+struct tfc_mpc_comp_info_t {
+ struct bnxt_mpc_txq *mpc_queue;
+ struct bnxt_mpc_mbuf out_msg;
+ int type;
+ uint16_t read_words;
+};
+
+struct tfc_mpc_batch_info_t {
+ bool enabled;
+ int error;
+ int em_error;
+ uint32_t count;
+ uint32_t result[BNXT_MPC_COMP_MAX_COUNT];
+ /* List of resources IDs that are to be processed during batch end */
+ uint64_t res_idx[BNXT_MPC_COMP_MAX_COUNT];
+ uint64_t em_hdl[BNXT_MPC_COMP_MAX_COUNT];
+ struct tfc_mpc_comp_info_t comp_info[BNXT_MPC_COMP_MAX_COUNT];
+};
+
+enum tfc_mpc_cmd_type {
+ TFC_MPC_EM_INSERT,
+ TFC_MPC_EM_DELETE,
+ TFC_MPC_TABLE_WRITE,
+ TFC_MPC_TABLE_READ,
+ TFC_MPC_TABLE_READ_CLEAR,
+ TFC_MPC_INVALIDATE
+};
+
+#define TFC_EM_DYNAMIC_BUCKET_EN 0
+
+/*
+ * Derived from CAS document
+ */
+#define TFC_MPC_MAX_TX_BYTES 188
+#define TFC_MPC_MAX_RX_BYTES 188
+
+#define TFC_MPC_HEADER_SIZE_BYTES 16
+
+#define TFC_MPC_BYTES_PER_WORD 32
+#define TFC_MPC_MAX_TABLE_READ_WORDS 4
+#define TFC_MPC_MAX_TABLE_READ_BYTES \
+ (TFC_MPC_BYTES_PER_WORD * TFC_MPC_MAX_TABLE_READ_WORDS)
+
+#define TFC_BUCKET_ENTRIES 6
+
+struct em_info_t {
+ bool valid;
+ uint8_t rec_size;
+ uint16_t epoch0;
+ uint16_t epoch1;
+ uint8_t opcode;
+ uint8_t strength;
+ uint8_t act_hint;
+
+ uint32_t act_rec_ptr; /* Not FAST */
+
+ uint32_t destination; /* Just FAST */
+
+ uint8_t tcp_direction; /* Just CT */
+ uint8_t tcp_update_en;
+ uint8_t tcp_win;
+ uint32_t tcp_msb_loc;
+ uint32_t tcp_msb_opp;
+ uint8_t tcp_msb_opp_init;
+ uint8_t state;
+ uint8_t timer_value;
+
+ uint16_t ring_table_idx; /* Not CT and not RECYCLE */
+ uint8_t act_rec_size;
+ uint8_t paths_m1;
+ uint8_t fc_op;
+ uint8_t fc_type;
+ uint32_t fc_ptr;
+
+ uint8_t recycle_dest; /* Just Recycle */
+ uint8_t prof_func;
+ uint8_t meta_prof;
+ uint32_t metadata;
+
+ uint8_t range_profile;
+ uint16_t range_index;
+
+ uint8_t *key;
+};
+
+struct sb_entry_t {
+ uint16_t hash_msb;
+ uint32_t entry_ptr;
+};
+
+struct bucket_info_t {
+ bool valid;
+ bool chain;
+ uint32_t chain_ptr;
+ struct sb_entry_t entries[TFC_BUCKET_ENTRIES];
+ struct em_info_t em_info[TFC_BUCKET_ENTRIES];
+};
+
+#define CALC_NUM_RECORDS_IN_POOL(a, b, c)
+
+/* Calculates number of 32Byte records from total size in 32bit words */
+#define CALC_NUM_RECORDS(result, key_sz_words) \
+ (*(result) = (((key_sz_words) + 7) / 8))
+
+/* Calculates the entry offset */
+#define CREATE_OFFSET(result, pool_sz_exp, pool_id, record_offset) \
+ (*(result) = (((pool_id) << (pool_sz_exp)) | (record_offset)))
+
+int tfc_em_delete_raw(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint32_t offset,
+ uint32_t static_bucket,
+ struct tfc_mpc_batch_info_t *batch_info
+#if TFC_EM_DYNAMIC_BUCKET_EN
+ , bool *db_unused,
+ uint32_t *db_offset
+#endif
+#ifdef BNXT_MPC_COMP_COUNT
+ , uint32_t comp_count
+#endif
+ );
+
+int tfc_mpc_table_read(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint32_t type,
+ uint32_t offset,
+ uint8_t words,
+ uint8_t *data,
+ uint8_t debug);
+
+int tfc_em_delete_entries_by_pool_id(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint16_t pool_id,
+ uint8_t debug,
+ uint8_t *data);
+
+int tfc_act_set_response(struct cfa_bld_mpcinfo *mpc_info,
+ struct bnxt_mpc_mbuf *mpc_msg_out,
+ uint8_t *rx_msg);
+
+int tfc_act_get_only_response(struct cfa_bld_mpcinfo *mpc_info,
+ struct bnxt_mpc_mbuf *mpc_msg_out,
+ uint8_t *rx_msg,
+ uint16_t *data_sz_words);
+
+int tfc_mpc_send(struct bnxt *bp,
+ struct bnxt_mpc_mbuf *in_msg,
+ struct bnxt_mpc_mbuf *out_msg,
+ uint32_t *opaque,
+ int type,
+ struct tfc_mpc_batch_info_t *batch_info);
+
+#endif /* _TFC_EM_H_ */
new file mode 100644
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TFC_FLOW_HANDLE_H_
+#define _TFC_FLOW_HANDLE_H_
+
+#define TFC_POOL_TSID_FLOW_HANDLE_MASK 0x0F80000000000000ULL
+#define TFC_POOL_TSID_FLOW_HANDLE_SFT 55
+#define TFC_RECORD_SIZE_FLOW_HANDLE_MASK 0x0070000000000000ULL
+#define TFC_RECORD_SIZE_FLOW_HANDLE_SFT 52
+#define TFC_EM_REC_OFFSET_FLOW_HANDLE_MASK 0x000FFFFFFC000000ULL
+#define TFC_EM_REC_OFFSET_FLOW_HANDLE_SFT 26
+#define TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_MASK 0x0000000003FFFFFFULL
+#define TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_SFT 0
+
+#define TFC_FLOW_HANDLE_MASK ( \
+ TFC_POOL_TSID_FLOW_HANDLE_MASK | \
+ TFC_RECORD_SIZE_FLOW_HANDLE_MASK | \
+ TFC_EM_REC_OFFSET_FLOW_HANDLE_MASK | \
+ TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_MASK)
+
+static inline void tfc_get_fields_from_flow_handle(uint64_t *flow_handle,
+ uint8_t *tsid,
+ uint32_t *record_size,
+ uint32_t *em_record_offset,
+ uint32_t *static_bucket_offset)
+{
+ *tsid = (uint8_t)((*flow_handle & TFC_POOL_TSID_FLOW_HANDLE_MASK) >>
+ TFC_POOL_TSID_FLOW_HANDLE_SFT);
+ *record_size =
+ (uint32_t)((*flow_handle & TFC_RECORD_SIZE_FLOW_HANDLE_MASK) >>
+ TFC_RECORD_SIZE_FLOW_HANDLE_SFT);
+ *em_record_offset =
+ (uint32_t)((*flow_handle & TFC_EM_REC_OFFSET_FLOW_HANDLE_MASK) >>
+ TFC_EM_REC_OFFSET_FLOW_HANDLE_SFT);
+ *static_bucket_offset =
+ (uint32_t)((*flow_handle & TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_MASK) >>
+ TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_SFT);
+}
+
+static inline uint64_t tfc_create_flow_handle(uint32_t tsid,
+ uint32_t record_size,
+ uint32_t em_record_offset,
+ uint32_t static_bucket_offset)
+{
+ uint64_t flow_handle = 0ULL;
+
+ flow_handle |=
+ ((((uint64_t)tsid) << TFC_POOL_TSID_FLOW_HANDLE_SFT) &
+ TFC_POOL_TSID_FLOW_HANDLE_MASK);
+ flow_handle |=
+ ((((uint64_t)record_size) << TFC_RECORD_SIZE_FLOW_HANDLE_SFT) &
+ TFC_RECORD_SIZE_FLOW_HANDLE_MASK);
+ flow_handle |=
+ ((((uint64_t)em_record_offset) << TFC_EM_REC_OFFSET_FLOW_HANDLE_SFT) &
+ TFC_EM_REC_OFFSET_FLOW_HANDLE_MASK);
+ flow_handle |=
+ (((static_bucket_offset) << TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_SFT) &
+ TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_MASK);
+
+ return flow_handle;
+}
+
+static inline uint64_t tfc_create_flow_handle2(uint64_t partial_flow_handle,
+ uint32_t static_bucket_offset)
+{
+ uint64_t flow_handle = partial_flow_handle;
+
+ flow_handle |=
+ (((static_bucket_offset) << TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_SFT) &
+ TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_MASK);
+
+ return flow_handle;
+}
+
+#define TFC_FLOW_GET_POOL_ID(em_record_offset, pool_sz_exp) \
+ ((em_record_offset) >> (pool_sz_exp))
+
+#endif /* _TFC_FLOW_HANDLE_H_ */
new file mode 100644
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include "tfc.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "tfc.h"
+#include "tfc_msg.h"
+
+int tfc_global_id_alloc(struct tfc *tfcp, uint16_t fid,
+ enum tfc_domain_id domain_id, uint16_t req_cnt,
+ const struct tfc_global_id_req *req,
+ struct tfc_global_id *rsp, uint16_t *rsp_cnt,
+ bool *first)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+
+ if (req == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "global_id req is NULL");
+ return -EINVAL;
+ }
+
+ if (rsp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "global_id rsp is NULL");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_global_id_alloc(tfcp, fid, sid, domain_id, req_cnt,
+ req, rsp, rsp_cnt, first);
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include "tfc.h"
+
+#include "tfc_msg.h"
+#include "cfa_types.h"
+#include "tfo.h"
+#include "tfc_util.h"
+#include "bnxt.h"
+
+int tfc_identifier_alloc(struct tfc *tfcp, uint16_t fid,
+ enum cfa_track_type tt,
+ struct tfc_identifier_info *ident_info)
+{
+ int rc = 0;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ if (ident_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid ident_info pointer");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_identifier_alloc(tfcp, ident_info->dir,
+ ident_info->rsubtype,
+ tt, fid, sid, &ident_info->id);
+
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "hwrm failed %s:%s, rc:%s",
+ tfc_dir_2_str(ident_info->dir),
+ tfc_ident_2_str(ident_info->rsubtype),
+ strerror(-rc));
+
+ return rc;
+}
+
+int tfc_identifier_free(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_identifier_info *ident_info)
+{
+ int rc = 0;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ if (ident_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid ident_info pointer");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_identifier_free(tfcp, ident_info->dir,
+ ident_info->rsubtype,
+ fid, sid, ident_info->id);
+
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "hwrm failed %s:%s:%d, rc:%s",
+ tfc_dir_2_str(ident_info->dir),
+ tfc_ident_2_str(ident_info->rsubtype),
+ ident_info->id, strerror(-rc));
+
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "rte_malloc.h"
+#include "hsi_struct_def_dpdk.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "tfc.h"
+#include "tfc_msg.h"
+#include "tfc_util.h"
+
+int tfc_idx_tbl_alloc(struct tfc *tfcp, uint16_t fid,
+ enum cfa_track_type tt,
+ struct tfc_idx_tbl_info *tbl_info)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+
+ if (tbl_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tbl_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tt >= CFA_TRACK_TYPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid track type: %d", tt);
+ return -EINVAL;
+ }
+
+ if (tbl_info->dir >= CFA_DIR_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid cfa dir: %d", tbl_info->dir);
+ return -EINVAL;
+ }
+
+ if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid idx tbl subtype: %d",
+ tbl_info->rsubtype);
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_idx_tbl_alloc(tfcp, fid, sid, tt, tbl_info->dir,
+ tbl_info->rsubtype, &tbl_info->id);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "hwrm failed: %s:%s %s",
+ tfc_dir_2_str(tbl_info->dir),
+ tfc_idx_tbl_2_str(tbl_info->rsubtype),
+ strerror(-rc));
+
+ return rc;
+}
+
+int tfc_idx_tbl_alloc_set(struct tfc *tfcp, uint16_t fid,
+ enum cfa_track_type tt,
+ struct tfc_idx_tbl_info *tbl_info,
+ const uint32_t *data, uint8_t data_sz_in_bytes)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+ bp = tfcp->bp;
+
+ if (tbl_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tbl_info is NULL");
+ return -EINVAL;
+ }
+
+ if (data == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid data pointer");
+ return -EINVAL;
+ }
+
+ if (tt >= CFA_TRACK_TYPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid track type: %d", tt);
+ return -EINVAL;
+ }
+
+ if (tbl_info->dir >= CFA_DIR_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid cfa dir: %d", tbl_info->dir);
+ return -EINVAL;
+ }
+
+ if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid idx tbl subtype: %d",
+ tbl_info->rsubtype);
+ return -EINVAL;
+ }
+
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ if (data_sz_in_bytes == 0) {
+ PMD_DRV_LOG_LINE(ERR, "Data size must be greater than zero");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_idx_tbl_alloc_set(tfcp, fid, sid, tt, tbl_info->dir,
+ tbl_info->rsubtype, data,
+ data_sz_in_bytes, &tbl_info->id);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "hwrm failed: %s:%s %s",
+ tfc_dir_2_str(tbl_info->dir),
+ tfc_idx_tbl_2_str(tbl_info->rsubtype),
+ strerror(-rc));
+
+ return rc;
+}
+
+int tfc_idx_tbl_set(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_idx_tbl_info *tbl_info,
+ const uint32_t *data, uint8_t data_sz_in_bytes)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+
+ if (tbl_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tbl_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tbl_info->dir >= CFA_DIR_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid cfa dir: %d", tbl_info->dir);
+ return -EINVAL;
+ }
+
+ if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid idx tbl subtype: %d",
+ tbl_info->rsubtype);
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_idx_tbl_set(tfcp, fid, sid, tbl_info->dir,
+ tbl_info->rsubtype, tbl_info->id,
+ data, data_sz_in_bytes);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "hwrm failed: %s:%s %d %s",
+ tfc_dir_2_str(tbl_info->dir),
+ tfc_idx_tbl_2_str(tbl_info->rsubtype),
+ tbl_info->id, strerror(-rc));
+
+ return rc;
+}
+
+int tfc_idx_tbl_get(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_idx_tbl_info *tbl_info,
+ uint32_t *data, uint8_t *data_sz_in_bytes)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+
+ if (tbl_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tbl_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tbl_info->dir >= CFA_DIR_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid cfa dir: %d", tbl_info->dir);
+ return -EINVAL;
+ }
+
+ if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid idx tbl subtype: %d",
+ tbl_info->rsubtype);
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_idx_tbl_get(tfcp, fid, sid, tbl_info->dir,
+ tbl_info->rsubtype, tbl_info->id,
+ data, data_sz_in_bytes);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "hwrm failed: %s:%s %d %s",
+ tfc_dir_2_str(tbl_info->dir),
+ tfc_idx_tbl_2_str(tbl_info->rsubtype),
+ tbl_info->id, strerror(-rc));
+ return rc;
+}
+
+int tfc_idx_tbl_free(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_idx_tbl_info *tbl_info)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+
+ if (tbl_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tbl_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tbl_info->dir >= CFA_DIR_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid cfa dir: %d", tbl_info->dir);
+ return -EINVAL;
+ }
+
+ if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid idx tbl subtype: %d",
+ tbl_info->rsubtype);
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_idx_tbl_free(tfcp, fid, sid, tbl_info->dir,
+ tbl_info->rsubtype, tbl_info->id);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "hwrm failed: %s:%s %d %s",
+ tfc_dir_2_str(tbl_info->dir),
+ tfc_idx_tbl_2_str(tbl_info->rsubtype),
+ tbl_info->id, strerror(-rc));
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "rte_malloc.h"
+#include "hsi_struct_def_dpdk.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "tfc.h"
+#include "tfc_msg.h"
+#include "tfc_util.h"
+
+int tfc_if_tbl_set(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_if_tbl_info *tbl_info,
+ const uint8_t *data, uint8_t data_sz_in_bytes)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+
+ if (tbl_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tbl_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tbl_info->dir >= CFA_DIR_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid cfa dir: %d", tbl_info->dir);
+ return -EINVAL;
+ }
+
+ if (tbl_info->rsubtype >= CFA_RSUBTYPE_IF_TBL_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid if tbl subtype: %d",
+ tbl_info->rsubtype);
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_if_tbl_set(tfcp, fid, sid, tbl_info->dir,
+ tbl_info->rsubtype, tbl_info->id,
+ data_sz_in_bytes, data);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "hwrm failed: %s:%s %d %s",
+ tfc_dir_2_str(tbl_info->dir),
+ tfc_if_tbl_2_str(tbl_info->rsubtype),
+ tbl_info->id, strerror(-rc));
+
+ return rc;
+}
+
+int tfc_if_tbl_get(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_if_tbl_info *tbl_info,
+ uint8_t *data, uint8_t *data_sz_in_bytes)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+
+ if (tbl_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tbl_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tbl_info->dir >= CFA_DIR_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid cfa dir: %d", tbl_info->dir);
+ return -EINVAL;
+ }
+
+ if (tbl_info->rsubtype >= CFA_RSUBTYPE_IF_TBL_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid if tbl subtype: %d",
+ tbl_info->rsubtype);
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_if_tbl_get(tfcp, fid, sid, tbl_info->dir,
+ tbl_info->rsubtype, tbl_info->id,
+ data_sz_in_bytes, data);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "hwrm failed: %s:%s %d %s",
+ tfc_dir_2_str(tbl_info->dir),
+ tfc_if_tbl_2_str(tbl_info->rsubtype),
+ tbl_info->id, strerror(-rc));
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include "tfc.h"
+#include "tfo.h"
+#include "tfc_priv.h"
+#include "bnxt.h"
+#include "bnxt_ring.h"
+#include "bnxt_mpc.h"
+#include "cfa_bld_mpcops.h"
+#include "cfa_bld_devops.h"
+
+/*
+ * The tfc_open and tfc_close APIs may only be used for setting TFC software
+ * state. They are never used to modify the HW state. That is, they are not
+ * allowed to send HWRM messages.
+ */
+
+int tfc_open(struct tfc *tfcp)
+{
+ int rc = 0;
+ bool is_pf;
+
+ /* Initialize the TF object */
+ if (tfcp->tfo) {
+ PMD_DRV_LOG_LINE(WARNING, "tfc_opened already");
+ return rc;
+ }
+ rc = tfc_bp_is_pf(tfcp, &is_pf);
+ if (rc)
+ return rc;
+ tfo_open(&tfcp->tfo, is_pf);
+
+ return rc;
+}
+
+int tfc_close(struct tfc *tfcp)
+{
+ int rc = 0;
+ uint16_t sid;
+ uint8_t tsid;
+ bool valid;
+
+ /* Nullify the TF object */
+ if (tfcp->tfo) {
+ if (tfo_sid_get(tfcp->tfo, &sid) == 0) {
+ /*
+ * If no error, then there is a valid SID which means
+ * that the FID is still associated with the SID.
+ */
+ PMD_DRV_LOG_LINE(WARNING, "There is still a session "
+ "associated with this object");
+ }
+
+ for (tsid = 0; tsid < TFC_TBL_SCOPE_MAX; tsid++) {
+ rc = tfo_ts_get(tfcp->tfo, tsid, NULL, NULL, &valid, NULL);
+ if (rc == 0 && valid) {
+ PMD_DRV_LOG_LINE(WARNING, "There is still a "
+ "tsid %d associated",
+ tsid);
+ }
+ }
+ tfo_close(&tfcp->tfo);
+ }
+
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include <inttypes.h>
+#include <math.h>
+
+#include "bnxt.h"
+#include "bnxt_mpc.h"
+
+#include "tfc.h"
+#include "cfa_bld_mpc_field_ids.h"
+#include "cfa_bld_mpcops.h"
+#include "tfo.h"
+#include "tfc_em.h"
+#include "tfc_cpm.h"
+#include "tfc_msg.h"
+#include "tfc_debug.h"
+#include "cfa_types.h"
+#include "cfa_mm.h"
+#include "sys_util.h"
+#include "cfa_bld.h"
+#include "tfc_util.h"
+
+int tfc_mpc_table_read(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint32_t type,
+ uint32_t offset,
+ uint8_t words,
+ uint8_t *data,
+ uint8_t debug)
+{
+ int rc = 0;
+ uint8_t tx_msg[TFC_MPC_MAX_TX_BYTES];
+ uint8_t rx_msg[TFC_MPC_MAX_RX_BYTES];
+ uint32_t msg_count = BNXT_MPC_COMP_MSG_COUNT;
+ int i;
+ uint32_t buff_len;
+ struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_READ_CMD_MAX_FLD];
+ struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_READ_CMP_MAX_FLD];
+ struct bnxt_mpc_mbuf mpc_msg_in;
+ struct bnxt_mpc_mbuf mpc_msg_out;
+ bool is_shared;
+ struct cfa_bld_mpcinfo *mpc_info;
+ uint64_t host_address;
+ uint8_t discard_data[128];
+ uint32_t set;
+ uint32_t way;
+ bool valid;
+
+ tfo_mpcinfo_get(tfcp->tfo, &mpc_info);
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s", strerror(-rc));
+ return -EINVAL;
+ }
+ if (!valid) {
+ PMD_DRV_LOG_LINE(ERR, "tsid not allocated %d", tsid);
+ return -EINVAL;
+ }
+
+ /* Check that data pointer is word aligned */
+ if (((uint64_t)data) & 0x1fULL) {
+ PMD_DRV_LOG_LINE(ERR, "Table read data pointer not word aligned");
+ return -EINVAL;
+ }
+
+ host_address = (uint64_t)rte_mem_virt2iova(data);
+
+ /* Check that MPC APIs are bound */
+ if (mpc_info->mpcops == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "MPC not initialized");
+ return -EINVAL;
+ }
+
+ set = offset & 0x7ff;
+ way = (offset >> 12) & 0xf;
+
+ if (debug)
+ PMD_DRV_LOG_LINE(ERR,
+ "Debug read table type:%s %d words32B at way:%d set:%d debug:%d words32B",
+ (type == 0 ? "Lookup" : "Action"),
+ words, way, set, debug);
+ else
+ PMD_DRV_LOG_LINE(ERR,
+ "Reading table type:%s %d words32B at offset %d words32B",
+ (type == 0 ? "Lookup" : "Action"),
+ words, offset);
+
+ /* Create MPC EM insert command using builder */
+ for (i = 0; i < CFA_BLD_MPC_READ_CMD_MAX_FLD; i++)
+ fields_cmd[i].field_id = INVALID_U16;
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_OPAQUE_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_OPAQUE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_OPAQUE_FLD].val = 0xAA;
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].val = (type == 0 ?
+ CFA_BLD_MPC_HW_TABLE_TYPE_LOOKUP : CFA_BLD_MPC_HW_TABLE_TYPE_ACTION);
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD].val =
+ (debug ? way : tsid);
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD].val = words;
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD].val =
+ (debug ? set : offset);
+
+ fields_cmd[CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD].val = host_address;
+
+ if (debug) {
+ fields_cmd[CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD].field_id =
+ CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD;
+ fields_cmd[CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD].val = debug; /* Debug read */
+ }
+
+ buff_len = TFC_MPC_MAX_TX_BYTES;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_read(tx_msg,
+ &buff_len,
+ fields_cmd);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Action read build failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Send MPC */
+ mpc_msg_in.chnl_id = (dir == CFA_DIR_TX ?
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA :
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA);
+ mpc_msg_in.msg_data = &tx_msg[16];
+ mpc_msg_in.msg_size = 16;
+ mpc_msg_out.cmp_type = CMPL_BASE_TYPE_MID_PATH_SHORT;
+ mpc_msg_out.msg_data = &rx_msg[16];
+ mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES;
+
+ rc = tfc_mpc_send(tfcp->bp,
+ &mpc_msg_in,
+ &mpc_msg_out,
+ &msg_count,
+ TFC_MPC_TABLE_READ,
+ NULL);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Table read MPC send failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Process response */
+ for (i = 0; i < CFA_BLD_MPC_READ_CMP_MAX_FLD; i++)
+ fields_cmp[i].field_id = INVALID_U16;
+
+ fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].field_id =
+ CFA_BLD_MPC_READ_CMP_STATUS_FLD;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_read(rx_msg,
+ mpc_msg_out.msg_size,
+ discard_data,
+ words * TFC_MPC_BYTES_PER_WORD,
+ fields_cmp);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Table read parse failed: %d", rc);
+ goto cleanup;
+ }
+
+ if (fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) {
+ PMD_DRV_LOG_LINE(ERR, "Table read failed with status code:%d",
+ (uint32_t)fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val);
+ rc = -1;
+ goto cleanup;
+ }
+
+ return 0;
+
+ cleanup:
+
+ return rc;
+}
+
+int tfc_mpc_table_write_zero(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint32_t type,
+ uint32_t offset,
+ uint8_t words,
+ uint8_t *data)
+{
+ int rc = 0;
+ uint8_t tx_msg[TFC_MPC_MAX_TX_BYTES];
+ uint8_t rx_msg[TFC_MPC_MAX_RX_BYTES];
+ uint32_t msg_count = BNXT_MPC_COMP_MSG_COUNT;
+ int i;
+ uint32_t buff_len;
+ struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_WRITE_CMD_MAX_FLD];
+ struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_WRITE_CMP_MAX_FLD];
+ struct bnxt_mpc_mbuf mpc_msg_in;
+ struct bnxt_mpc_mbuf mpc_msg_out;
+ struct cfa_bld_mpcinfo *mpc_info;
+ bool is_shared;
+ bool valid;
+
+ tfo_mpcinfo_get(tfcp->tfo, &mpc_info);
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s", strerror(-rc));
+ return -EINVAL;
+ }
+ if (!valid) {
+ PMD_DRV_LOG_LINE(ERR, "tsid not allocated %d", tsid);
+ return -EINVAL;
+ }
+ /* Check that MPC APIs are bound */
+ if (mpc_info->mpcops == NULL) {
+ PMD_DRV_LOG_LINE(ERR, " MPC not initialized");
+ return -EINVAL;
+ }
+
+ /* Create MPC EM insert command using builder */
+ for (i = 0; i < CFA_BLD_MPC_WRITE_CMD_MAX_FLD; i++)
+ fields_cmd[i].field_id = INVALID_U16;
+
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD].val = 0xAA;
+
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD].val = (type == 0 ?
+ CFA_BLD_MPC_HW_TABLE_TYPE_LOOKUP : CFA_BLD_MPC_HW_TABLE_TYPE_ACTION);
+
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD].val = tsid;
+
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD].val = words;
+
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD;
+ fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD].val = offset;
+
+ buff_len = TFC_MPC_MAX_TX_BYTES;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_write(tx_msg,
+ &buff_len,
+ data,
+ fields_cmd);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "write build failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Send MPC */
+ mpc_msg_in.chnl_id = (dir == CFA_DIR_TX ?
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA :
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA);
+ mpc_msg_in.msg_data = &tx_msg[16];
+ mpc_msg_in.msg_size = (words * TFC_MPC_BYTES_PER_WORD) + 16;
+ mpc_msg_out.cmp_type = CMPL_BASE_TYPE_MID_PATH_SHORT;
+ mpc_msg_out.msg_data = &rx_msg[16];
+ mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES;
+
+ rc = tfc_mpc_send(tfcp->bp,
+ &mpc_msg_in,
+ &mpc_msg_out,
+ &msg_count,
+ TFC_MPC_TABLE_WRITE,
+ NULL);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "write MPC send failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Process response */
+ for (i = 0; i < CFA_BLD_MPC_WRITE_CMP_MAX_FLD; i++)
+ fields_cmp[i].field_id = INVALID_U16;
+
+ fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].field_id =
+ CFA_BLD_MPC_WRITE_CMP_STATUS_FLD;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_write(rx_msg,
+ mpc_msg_out.msg_size,
+ fields_cmp);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "write parse failed: %d", rc);
+ goto cleanup;
+ }
+
+ if (fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) {
+ PMD_DRV_LOG_LINE(ERR, "Action write failed with status code:%d",
+ (uint32_t)fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].val);
+ PMD_DRV_LOG_LINE(ERR, "Hash MSB:0x%0x",
+ (uint32_t)fields_cmp[CFA_BLD_MPC_WRITE_CMP_HASH_MSB_FLD].val);
+ goto cleanup;
+ }
+
+ return 0;
+
+ cleanup:
+
+ return rc;
+}
+
+int tfc_mpc_table_invalidate(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint32_t type,
+ uint32_t offset,
+ uint32_t words)
+{
+ int rc = 0;
+ uint8_t tx_msg[TFC_MPC_MAX_TX_BYTES];
+ uint8_t rx_msg[TFC_MPC_MAX_RX_BYTES];
+ uint32_t msg_count = BNXT_MPC_COMP_MSG_COUNT;
+ int i;
+ uint32_t buff_len;
+ struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD];
+ struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD];
+ struct bnxt_mpc_mbuf mpc_msg_in;
+ struct bnxt_mpc_mbuf mpc_msg_out;
+ struct cfa_bld_mpcinfo *mpc_info;
+ bool is_shared;
+ bool valid;
+
+ tfo_mpcinfo_get(tfcp->tfo, &mpc_info);
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "failed to get tsid: %s", strerror(-rc));
+ return -EINVAL;
+ }
+ if (!valid) {
+ PMD_DRV_LOG_LINE(ERR, "tsid not allocated %d", tsid);
+ return -EINVAL;
+ }
+ /* Check that MPC APIs are bound */
+ if (mpc_info->mpcops == NULL) {
+ PMD_DRV_LOG_LINE(ERR, " MPC not initialized");
+ return -EINVAL;
+ }
+
+ /* Create MPC EM insert command using builder */
+ for (i = 0; i < CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD; i++)
+ fields_cmd[i].field_id = INVALID_U16;
+
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_OPAQUE_FLD].field_id =
+ CFA_BLD_MPC_INVALIDATE_CMD_OPAQUE_FLD;
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_OPAQUE_FLD].val = 0xAA;
+
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD].field_id =
+ CFA_BLD_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD;
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD].val = (type == 0 ?
+ CFA_BLD_MPC_HW_TABLE_TYPE_LOOKUP : CFA_BLD_MPC_HW_TABLE_TYPE_ACTION);
+
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD].field_id =
+ CFA_BLD_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD;
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD].val = tsid;
+
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_DATA_SIZE_FLD].field_id =
+ CFA_BLD_MPC_INVALIDATE_CMD_DATA_SIZE_FLD;
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_DATA_SIZE_FLD].val = words;
+
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD].field_id =
+ CFA_BLD_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD;
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD].val = offset;
+
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD].field_id =
+ CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD;
+ fields_cmd[CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD].val =
+ CFA_BLD_MPC_EV_EVICT_SCOPE_ADDRESS;
+
+ buff_len = TFC_MPC_MAX_TX_BYTES;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_evict(tx_msg,
+ &buff_len,
+ fields_cmd);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "evict build failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Send MPC */
+ mpc_msg_in.chnl_id = (dir == CFA_DIR_TX ?
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_TE_CFA :
+ HWRM_RING_ALLOC_INPUT_MPC_CHNLS_TYPE_RE_CFA);
+ mpc_msg_in.msg_data = &tx_msg[16];
+ mpc_msg_in.msg_size = 16;
+ mpc_msg_out.cmp_type = CMPL_BASE_TYPE_MID_PATH_SHORT;
+ mpc_msg_out.msg_data = &rx_msg[16];
+ mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES;
+
+ rc = tfc_mpc_send(tfcp->bp,
+ &mpc_msg_in,
+ &mpc_msg_out,
+ &msg_count,
+ TFC_MPC_INVALIDATE,
+ NULL);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "write MPC send failed: %d", rc);
+ goto cleanup;
+ }
+
+ /* Process response */
+ for (i = 0; i < CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD; i++)
+ fields_cmp[i].field_id = INVALID_U16;
+
+ fields_cmp[CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD].field_id =
+ CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD;
+
+ rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_evict(rx_msg,
+ mpc_msg_out.msg_size,
+ fields_cmp);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "evict parse failed: %d", rc);
+ goto cleanup;
+ }
+
+ if (fields_cmp[CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) {
+ PMD_DRV_LOG_LINE(ERR, "evict failed with status code:%d",
+ (uint32_t)fields_cmp[CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD].val);
+ PMD_DRV_LOG_LINE(ERR, "Hash MSB:0x%0x",
+ (uint32_t)fields_cmp[CFA_BLD_MPC_INVALIDATE_CMP_HASH_MSB_FLD].val);
+ goto cleanup;
+ }
+
+ return 0;
+
+ cleanup:
+
+ return rc;
+}
+
+#define TFC_ACTION_SIZE_BYTES 32
+#define TFC_BUCKET_SIZE_BYTES 32
+
+struct act_full_info_t {
+ bool drop;
+ uint8_t vlan_del_rep;
+ uint8_t dest_op;
+ uint16_t vnic_vport;
+ uint8_t decap_func;
+ uint16_t mirror;
+ uint16_t meter_ptr;
+ uint8_t stat0_ctr_type;
+ bool stat0_ing_egr;
+ uint32_t stat0_ptr;
+ uint8_t stat1_ctr_type;
+ bool stat1_ing_egr;
+ uint32_t stat1_ptr;
+ uint32_t mod_ptr;
+ uint32_t enc_ptr;
+ uint32_t src_ptr;
+ char mod_str[512];
+};
+
+struct act_mcg_info_t {
+ uint8_t src_ko_en;
+ uint32_t nxt_ptr;
+ uint8_t act_hint0;
+ uint32_t act_rec_ptr0;
+ uint8_t act_hint1;
+ uint32_t act_rec_ptr1;
+ uint8_t act_hint2;
+ uint32_t act_rec_ptr2;
+ uint8_t act_hint3;
+ uint32_t act_rec_ptr3;
+ uint8_t act_hint4;
+ uint32_t act_rec_ptr4;
+ uint8_t act_hint5;
+ uint32_t act_rec_ptr5;
+ uint8_t act_hint6;
+ uint32_t act_rec_ptr6;
+ uint8_t act_hint7;
+ uint32_t act_rec_ptr7;
+};
+
+struct act_info_t {
+ bool valid;
+ uint8_t vector;
+ union {
+ struct act_full_info_t full;
+ struct act_mcg_info_t mcg;
+ };
+};
+
+struct mod_field_s {
+ uint8_t num_bits;
+ const char *name;
+};
+
+struct mod_data_s {
+ uint8_t num_fields;
+ const char *name;
+ struct mod_field_s field[4];
+};
+
+struct mod_data_s mod_data[] = {
+ {1, "Replace:", {{16, "DPort"} } },
+ {1, "Replace:", {{16, "SPort"} } },
+ {1, "Replace:", {{32, "IPv4 DIP"} } },
+ {1, "Replace:", {{32, "IPv4 SIP"} } },
+ {1, "Replace:", {{128, "IPv6 DIP"} } },
+ {1, "Replace:", {{128, "IPv6 SIP"} } },
+ {1, "Replace:", {{48, "SMAC"} } },
+ {1, "Replace:", {{48, "DMAC"} } },
+ {2, "Update Field:", {{16, "uf_vec"}, {32, "uf_data"} } },
+ {3, "Tunnel Modify:", {{16, "tun_mv"}, {16, "tun_ex_prot"}, {16, "tun_new_prot"} } },
+ {3, "TTL Update:", {{5, "alt_pfid"}, {12, "alt_vid"}, {5, "ttl_op"} } },
+ {4, "Replace/Add Outer VLAN:", {{16, "tpid"}, {3, "pri"}, {1, "de"}, {12, "vid"} } },
+ {4, "Replace/Add Inner:", {{16, "tpid"}, {3, "pri"}, {1, "de"}, {12, "vid"} } },
+ {0, "Remove outer VLAN:", {{0, NULL} } },
+ {0, "Remove inner VLAN:", {{0, NULL} } },
+ {4, "Metadata Update:", {{2, "md_op"}, {4, "md_prof"}, {10, "rsvd"}, {32, "md_data"} } },
+};
+
+struct stat_fields_s {
+ uint64_t pkt_cnt;
+ uint64_t byte_cnt;
+ union {
+ struct {
+ uint32_t timestamp __rte_packed;
+ uint16_t tcp_flags __rte_packed;
+ } c_24b;
+ struct {
+ uint64_t meter_pkt_cnt;
+ uint64_t meter_byte_cnt;
+ } c_32b;
+ struct {
+ uint64_t timestamp:32 __rte_packed;
+ uint64_t tcp_flags:16 __rte_packed;
+ uint64_t meter_pkt_cnt:38 __rte_packed;
+ uint64_t meter_byte_cnt:42 __rte_packed;
+ } c_32b_all;
+ } t;
+};
+
+#define STATS_COMMON_FMT \
+ "\tPkt count : 0x%016" PRIu64 ", Byte count : 0x%016" PRIu64 "\n"
+#define STATS_METER_FMT \
+ "\tMeter pkt cnt: 0x%016" PRIx64 ", Meter byte cnt: 0x%016" PRIx64 "\n"
+#define STATS_TCP_FLAGS_FMT \
+ "\tTCP flags : 0x%04x, timestamp : 0x%08x\n"
new file mode 100644
@@ -0,0 +1,1197 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "rte_malloc.h"
+#include "tfc_msg.h"
+#include "tf_msg_common.h"
+#include "hsi_struct_def_dpdk.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "tfo.h"
+
+/* Logging defines */
+#define TFC_RM_MSG_DEBUG 0
+
+#define CFA_INVALID_FID UINT16_MAX
+
+/**
+ * This is the MAX data we can transport across regular HWRM
+ */
+#define TFC_PCI_BUF_SIZE_MAX 80
+/**
+ * If data bigger than TFC_PCI_BUF_SIZE_MAX then use DMA method
+ */
+struct tfc_msg_dma_buf {
+ void *va_addr;
+ uint64_t pa_addr;
+};
+
+static int tfc_msg_set_fid(struct bnxt *bp, uint16_t req_fid, uint16_t *msg_fid)
+{
+ /*
+ * Set request FID to 0xffff in case the request FID is the same as the
+ * target FID (bp->fw_fid), or if this is a PF. If we're on a TVF, then
+ * set the FID to the requested FID.
+ *
+ * The firmware validates the FID and accepts/rejects the request based
+ * on these rules:
+ *
+ * 1. (request_fid == 0xffff), final_fid = target_fid, accept
+ * 2. IS_PF(request_fid):
+ * reject, Only (1) above is allowed
+ * 3. IS_PF(target_fid) && IS_VF(request_fid):
+ * if(target_fid == parent_of(request_fid)) accept, else reject
+ * 4. IS_VF(target_fid) && IS_VF(request_fid):
+ * if(parent_of(target_fid) == parent_of(request_fid)) accept, else reject
+ *
+ * Note: for cases 2..4, final_fid = request_fid
+ */
+ if (bp->fw_fid == req_fid || BNXT_PF(bp))
+ *msg_fid = CFA_INVALID_FID;
+ else if (BNXT_VF_IS_TRUSTED(bp))
+ *msg_fid = rte_cpu_to_le_16(req_fid);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * Allocates a DMA buffer that can be used for message transfer.
+ *
+ * [in] buf
+ * Pointer to DMA buffer structure
+ *
+ * [in] size
+ * Requested size of the buffer in bytes
+ *
+ * Returns:
+ * 0 - Success
+ * -ENOMEM - Unable to allocate buffer, no memory
+ */
+static int
+tfc_msg_alloc_dma_buf(struct tfc_msg_dma_buf *buf, int size)
+{
+ /* Allocate session */
+ buf->va_addr = rte_zmalloc("tfc_msg_dma_buf", size, 4096);
+ if (buf->va_addr == NULL)
+ return -ENOMEM;
+
+ buf->pa_addr = rte_mem_virt2iova((void *)(uintptr_t)buf->va_addr);
+ if (buf->pa_addr == RTE_BAD_IOVA) {
+ rte_free(buf->va_addr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * Free's a previous allocated DMA buffer.
+ *
+ * [in] buf
+ * Pointer to DMA buffer structure
+ */
+static void
+tfc_msg_free_dma_buf(struct tfc_msg_dma_buf *buf)
+{
+ rte_free(buf->va_addr);
+}
+
+/* HWRM Direct messages */
+
+int
+tfc_msg_tbl_scope_qcaps(struct tfc *tfcp,
+ bool *tbl_scope_capable,
+ uint32_t *max_lkup_rec_cnt,
+ uint32_t *max_act_rec_cnt,
+ uint8_t *max_lkup_static_buckets_exp)
+{
+ struct hwrm_tfc_tbl_scope_qcaps_input req = { 0 };
+ struct hwrm_tfc_tbl_scope_qcaps_output resp = { 0 };
+ struct bnxt *bp;
+ int rc;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tbl_scope_capable == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tbl_scope_capable pointer");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ *tbl_scope_capable = false;
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TBL_SCOPE_QCAPS,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (rc)
+ return rc;
+
+ if (resp.tbl_scope_capable) {
+ *tbl_scope_capable = true;
+ if (max_lkup_rec_cnt)
+ *max_lkup_rec_cnt =
+ rte_le_to_cpu_32(resp.max_lkup_rec_cnt);
+ if (max_act_rec_cnt)
+ *max_act_rec_cnt =
+ rte_le_to_cpu_32(resp.max_act_rec_cnt);
+ if (max_lkup_static_buckets_exp)
+ *max_lkup_static_buckets_exp =
+ resp.max_lkup_static_buckets_exp;
+ }
+
+ return rc;
+}
+int
+tfc_msg_tbl_scope_id_alloc(struct tfc *tfcp, uint16_t fid,
+ bool shared, enum cfa_app_type app_type,
+ uint8_t *tsid,
+ bool *first)
+{
+ struct hwrm_tfc_tbl_scope_id_alloc_input req = { 0 };
+ struct hwrm_tfc_tbl_scope_id_alloc_output resp = { 0 };
+ struct bnxt *bp;
+ int rc;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tsid == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid pointer");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ req.app_type = app_type;
+ req.shared = shared;
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TBL_SCOPE_ID_ALLOC,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (rc == 0) {
+ *tsid = resp.tsid;
+ if (first) {
+ if (resp.first)
+ *first = true;
+ else
+ *first = false;
+ }
+ }
+
+ return rc;
+}
+
+#define RE_LKUP 0
+#define RE_ACT 1
+#define TE_LKUP 2
+#define TE_ACT 3
+/**
+ * Given the direction and the region return the backing store cfg instance
+ */
+static int tfc_tbl_scope_region_dir_to_inst(enum cfa_region_type region,
+ enum cfa_dir dir,
+ uint16_t *instance)
+{
+ if (!instance) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ switch (region) {
+ case CFA_REGION_TYPE_LKUP:
+ if (dir == CFA_DIR_RX)
+ *instance = RE_LKUP;
+ else
+ *instance = TE_LKUP;
+ break;
+ case CFA_REGION_TYPE_ACT:
+ if (dir == CFA_DIR_RX)
+ *instance = RE_ACT;
+ else
+ *instance = TE_ACT;
+ break;
+ default:
+ PMD_DRV_LOG_LINE(ERR, "Invalid region");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Given the page_sz_bytes and pbl_level, encode the pg_sz_pbl_level
+ */
+static int tfc_tbl_scope_pg_sz_pbl_level_encode(uint32_t page_sz_in_bytes,
+ uint8_t pbl_level,
+ uint8_t *page_sz_pbl_level)
+{
+ uint8_t page_sz;
+ int rc = 0;
+
+ switch (page_sz_in_bytes) {
+ case 0x1000: /* 4K */
+ page_sz = HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_PAGE_SIZE_PG_4K;
+ break;
+ case 0x2000: /* 8K */
+ page_sz = HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_PAGE_SIZE_PG_8K;
+ break;
+ case 0x10000: /* 64K */
+ page_sz = HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_PAGE_SIZE_PG_64K;
+ break;
+ case 0x200000: /* 2M */
+ page_sz = HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_PAGE_SIZE_PG_2M;
+ break;
+ case 0x40000000: /* 1G */
+ page_sz = HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_PAGE_SIZE_PG_1G;
+ break;
+ default:
+ PMD_DRV_LOG_LINE(ERR, "Unsupported page size (0x%x)",
+ page_sz_in_bytes);
+ return -EINVAL;
+ }
+ /* Page size value is already shifted */
+ *page_sz_pbl_level = page_sz;
+ if (pbl_level > 2) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid pbl_level(%d)", pbl_level);
+ return -EINVAL;
+ }
+ *page_sz_pbl_level |= pbl_level;
+ return rc;
+}
+
+
+int
+tfc_msg_backing_store_cfg_v2(struct tfc *tfcp, uint8_t tsid, enum cfa_dir dir,
+ enum cfa_region_type region, uint64_t base_addr,
+ uint8_t pbl_level, uint32_t pbl_page_sz_in_bytes,
+ uint32_t rec_cnt, uint8_t static_bkt_cnt_exp,
+ bool cfg_done)
+{
+ struct hwrm_func_backing_store_cfg_v2_input req = { 0 };
+ struct hwrm_func_backing_store_cfg_v2_input resp = { 0 };
+ struct bnxt *bp;
+ int rc;
+ struct ts_split_entries *ts_sp;
+
+ ts_sp = (struct ts_split_entries *)&req.split_entry_0;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ bp = tfcp->bp;
+ ts_sp->tsid = tsid;
+ ts_sp->lkup_static_bkt_cnt_exp[dir] = static_bkt_cnt_exp;
+ ts_sp->region_num_entries = rec_cnt;
+ if (cfg_done)
+ req.flags |= HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_FLAGS_BS_CFG_ALL_DONE;
+
+ rc = tfc_tbl_scope_region_dir_to_inst(region, dir, &req.instance);
+ if (rc)
+ return rc;
+
+ req.page_dir = rte_cpu_to_le_64(base_addr);
+ req.num_entries = rte_cpu_to_le_32(rec_cnt);
+
+ req.type = HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TBL_SCOPE;
+
+ rc = tfc_tbl_scope_pg_sz_pbl_level_encode(pbl_page_sz_in_bytes,
+ pbl_level,
+ &req.page_size_pbl_level);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_FUNC_BACKING_STORE_CFG_V2,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ return rc;
+}
+
+int
+tfc_msg_tbl_scope_deconfig(struct tfc *tfcp, uint8_t tsid)
+{
+ struct hwrm_tfc_tbl_scope_deconfig_input req = { 0 };
+ struct hwrm_tfc_tbl_scope_deconfig_output resp = { 0 };
+ struct bnxt *bp;
+ int rc;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ req.tsid = tsid;
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TBL_SCOPE_DECONFIG,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ return rc;
+}
+
+int
+tfc_msg_tbl_scope_fid_add(struct tfc *tfcp, uint16_t fid,
+ uint8_t tsid, uint16_t *fid_cnt)
+{
+ struct hwrm_tfc_tbl_scope_fid_add_input req = { 0 };
+ struct hwrm_tfc_tbl_scope_fid_add_output resp = { 0 };
+ struct bnxt *bp;
+ int rc;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.tsid = tsid;
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TBL_SCOPE_FID_ADD,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (rc == 0)
+ if (fid_cnt)
+ *fid_cnt = rte_le_to_cpu_16(resp.fid_cnt);
+
+ return rc;
+}
+
+int
+tfc_msg_tbl_scope_fid_rem(struct tfc *tfcp, uint16_t fid,
+ uint8_t tsid, uint16_t *fid_cnt)
+{
+ struct hwrm_tfc_tbl_scope_fid_rem_input req = { 0 };
+ struct hwrm_tfc_tbl_scope_fid_rem_output resp = { 0 };
+ struct bnxt *bp;
+ int rc;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.tsid = tsid;
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TBL_SCOPE_FID_REM,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (rc == 0)
+ if (fid_cnt)
+ *fid_cnt = rte_le_to_cpu_16(resp.fid_cnt);
+
+ return rc;
+}
+
+int
+tfc_msg_idx_tbl_alloc(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_track_type tt, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl subtype,
+ uint16_t *id)
+
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_idx_tbl_alloc_input req = { 0 };
+ struct hwrm_tfc_idx_tbl_alloc_output resp = { 0 };
+
+ if (dir == CFA_DIR_RX)
+ req.flags |= HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_RX &
+ HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR;
+ else
+ req.flags |= HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_TX &
+ HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR;
+
+ if (tt == CFA_TRACK_TYPE_FID)
+ req.track_type = HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID;
+ else
+ req.track_type = HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_SID;
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.subtype = rte_le_to_cpu_16(subtype);
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_IDX_TBL_ALLOC,
+ &req, sizeof(req), &resp, sizeof(resp));
+
+ if (rc == 0)
+ *id = rte_cpu_to_le_16(resp.idx_tbl_id);
+
+ return rc;
+}
+
+int
+tfc_msg_idx_tbl_alloc_set(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_track_type tt, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl subtype,
+ const uint32_t *dev_data, uint8_t data_size,
+ uint16_t *id)
+
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_idx_tbl_alloc_set_input req = { 0 };
+ struct hwrm_tfc_idx_tbl_alloc_set_output resp = { 0 };
+ struct tfc_msg_dma_buf buf = { 0 };
+ uint8_t *data = NULL;
+
+ if (dir == CFA_DIR_RX)
+ req.flags |= HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_RX &
+ HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR;
+ else
+ req.flags |= HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR_TX &
+ HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DIR;
+
+ if (tt == CFA_TRACK_TYPE_FID)
+ req.track_type = HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID;
+ else
+ req.track_type = HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_SID;
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.subtype = rte_le_to_cpu_16(subtype);
+ req.data_size = rte_le_to_cpu_16(data_size);
+
+ if (req.data_size >= sizeof(req.dev_data)) {
+ req.flags |= HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DMA;
+ rc = tfc_msg_alloc_dma_buf(&buf, data_size);
+ if (rc)
+ goto cleanup;
+ data = buf.va_addr;
+ memcpy(&req.dma_addr, &buf.pa_addr, sizeof(buf.pa_addr));
+ } else {
+ data = &req.dev_data[0];
+ }
+
+ memcpy(&data[0], &dev_data[0], req.data_size);
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_IDX_TBL_ALLOC_SET,
+ &req, sizeof(req), &resp, sizeof(resp));
+
+ if (rc == 0)
+ *id = rte_cpu_to_le_16(resp.idx_tbl_id);
+
+cleanup:
+ tfc_msg_free_dma_buf(&buf);
+
+ return rc;
+}
+
+int
+tfc_msg_idx_tbl_set(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl subtype, uint16_t id,
+ const uint32_t *dev_data, uint8_t data_size)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_idx_tbl_set_input req = { 0 };
+ struct hwrm_tfc_idx_tbl_set_output resp = { 0 };
+ struct tfc_msg_dma_buf buf = { 0 };
+ uint8_t *data = NULL;
+
+ if (dir == CFA_DIR_RX)
+ req.flags |= HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_RX &
+ HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR;
+ else
+ req.flags |= HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR_TX &
+ HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DIR;
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.idx_tbl_id = rte_le_to_cpu_16(id);
+ req.subtype = rte_le_to_cpu_16(subtype);
+ req.data_size = rte_le_to_cpu_16(data_size);
+ rc = tfc_msg_alloc_dma_buf(&buf, data_size);
+ if (rc)
+ goto cleanup;
+
+ if (req.data_size >= sizeof(req.dev_data)) {
+ req.flags |= HWRM_TFC_IDX_TBL_SET_INPUT_FLAGS_DMA;
+ rc = tfc_msg_alloc_dma_buf(&buf, data_size);
+ if (rc)
+ goto cleanup;
+ data = buf.va_addr;
+ memcpy(&req.dma_addr, &buf.pa_addr, sizeof(buf.pa_addr));
+ } else {
+ data = &req.dev_data[0];
+ }
+
+ memcpy(&data[0], &dev_data[0], req.data_size);
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_IDX_TBL_SET,
+ &req, sizeof(req), &resp, sizeof(resp));
+cleanup:
+ tfc_msg_free_dma_buf(&buf);
+
+ return rc;
+}
+
+int
+tfc_msg_idx_tbl_get(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl subtype, uint16_t id,
+ uint32_t *dev_data, uint8_t *data_size)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_idx_tbl_get_input req = { 0 };
+ struct hwrm_tfc_idx_tbl_get_output resp = { 0 };
+ struct tfc_msg_dma_buf buf = { 0 };
+
+ if (dir == CFA_DIR_RX)
+ req.flags |= HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_RX &
+ HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR;
+ else
+ req.flags |= HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR_TX &
+ HWRM_TFC_IDX_TBL_GET_INPUT_FLAGS_DIR;
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_cpu_to_le_16(sid);
+ req.idx_tbl_id = rte_cpu_to_le_16(id);
+ req.subtype = rte_cpu_to_le_16(subtype);
+ req.buffer_size = rte_cpu_to_le_16(*data_size);
+
+ rc = tfc_msg_alloc_dma_buf(&buf, *data_size);
+ if (rc)
+ goto cleanup;
+
+ memcpy(&req.dma_addr, &buf.pa_addr, sizeof(buf.pa_addr));
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_IDX_TBL_GET,
+ &req, sizeof(req), &resp, sizeof(resp));
+
+ if (rc == 0) {
+ memcpy(dev_data, buf.va_addr, resp.data_size);
+ *data_size = rte_le_to_cpu_16(resp.data_size);
+ }
+
+cleanup:
+ tfc_msg_free_dma_buf(&buf);
+
+ return rc;
+}
+
+int
+tfc_msg_idx_tbl_free(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl subtype, uint16_t id)
+{
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_idx_tbl_free_input req = { 0 };
+ struct hwrm_tfc_idx_tbl_free_output resp = { 0 };
+ int rc = 0;
+
+ if (dir == CFA_DIR_RX)
+ req.flags |= HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_RX &
+ HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR;
+ else
+ req.flags |= HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR_TX &
+ HWRM_TFC_IDX_TBL_FREE_INPUT_FLAGS_DIR;
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_cpu_to_le_16(sid);
+ req.idx_tbl_id = rte_cpu_to_le_16(id);
+ req.subtype = rte_cpu_to_le_16(subtype);
+
+ return bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_IDX_TBL_FREE,
+ &req, sizeof(req), &resp, sizeof(resp));
+}
+
+int tfc_msg_global_id_alloc(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum tfc_domain_id domain_id, uint16_t req_cnt,
+ const struct tfc_global_id_req *glb_id_req,
+ struct tfc_global_id *rsp, uint16_t *rsp_cnt,
+ bool *first)
+{
+ int rc = 0;
+ int i = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_global_id_alloc_input hwrm_req;
+ struct hwrm_tfc_global_id_alloc_output hwrm_resp;
+ struct tfc_global_id_hwrm_req *req_data;
+ struct tfc_global_id_hwrm_rsp *rsp_data;
+ struct tfc_msg_dma_buf req_buf = { 0 };
+ struct tfc_msg_dma_buf rsp_buf = { 0 };
+ int dma_size;
+ int resp_cnt = 0;
+
+ /* Prepare DMA buffers */
+ dma_size = req_cnt * sizeof(struct tfc_global_id_req);
+ rc = tfc_msg_alloc_dma_buf(&req_buf, dma_size);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < req_cnt; i++)
+ resp_cnt += glb_id_req->cnt;
+ dma_size = resp_cnt * sizeof(struct tfc_global_id);
+ *rsp_cnt = resp_cnt;
+ rc = tfc_msg_alloc_dma_buf(&rsp_buf, dma_size);
+ if (rc) {
+ tfc_msg_free_dma_buf(&req_buf);
+ return rc;
+ }
+
+ /* Populate the request */
+ rc = tfc_msg_set_fid(bp, fid, &hwrm_req.fid);
+ if (rc)
+ goto cleanup;
+
+ hwrm_req.sid = rte_cpu_to_le_16(sid);
+ hwrm_req.global_id = rte_cpu_to_le_16(domain_id);
+ hwrm_req.req_cnt = req_cnt;
+ hwrm_req.req_addr = rte_cpu_to_le_64(req_buf.pa_addr);
+ hwrm_req.resc_addr = rte_cpu_to_le_64(rsp_buf.pa_addr);
+ req_data = (struct tfc_global_id_hwrm_req *)req_buf.va_addr;
+ for (i = 0; i < req_cnt; i++) {
+ req_data[i].rtype = rte_cpu_to_le_16(glb_id_req[i].rtype);
+ req_data[i].dir = rte_cpu_to_le_16(glb_id_req[i].dir);
+ req_data[i].subtype = rte_cpu_to_le_16(glb_id_req[i].rsubtype);
+ req_data[i].cnt = rte_cpu_to_le_16(glb_id_req[i].cnt);
+ }
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_GLOBAL_ID_ALLOC,
+ &hwrm_req, sizeof(hwrm_req), &hwrm_resp,
+ sizeof(hwrm_resp));
+ if (rc == 0) {
+ if (first) {
+ if (hwrm_resp.first)
+ *first = true;
+ else
+ *first = false;
+ }
+ }
+
+ /* Process the response
+ * Should always get expected number of entries
+ */
+ if (rte_le_to_cpu_32(hwrm_resp.rsp_cnt) != *rsp_cnt) {
+ PMD_DRV_LOG_LINE(ERR, "Alloc message size error, rc:%s",
+ strerror(-EINVAL));
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ rsp_data = (struct tfc_global_id_hwrm_rsp *)rsp_buf.va_addr;
+ for (i = 0; i < *rsp_cnt; i++) {
+ rsp[i].rtype = rte_le_to_cpu_32(rsp_data[i].rtype);
+ rsp[i].dir = rte_le_to_cpu_32(rsp_data[i].dir);
+ rsp[i].rsubtype = rte_le_to_cpu_32(rsp_data[i].subtype);
+ rsp[i].id = rte_le_to_cpu_32(rsp_data[i].id);
+ }
+
+cleanup:
+ tfc_msg_free_dma_buf(&req_buf);
+ tfc_msg_free_dma_buf(&rsp_buf);
+ return rc;
+}
+
+int
+tfc_msg_tbl_scope_config_get(struct tfc *tfcp, uint8_t tsid, bool *configured)
+{
+ struct bnxt *bp;
+ struct hwrm_tfc_tbl_scope_config_get_input req = { 0 };
+ struct hwrm_tfc_tbl_scope_config_get_output resp = { 0 };
+ int rc = 0;
+
+ bp = tfcp->bp;
+ req.tsid = tsid;
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TBL_SCOPE_CONFIG_GET,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (rc == 0)
+ *configured = rte_le_to_cpu_16(resp.configured) ? true : false;
+
+ return rc;
+}
+
+int
+tfc_msg_session_id_alloc(struct tfc *tfcp, uint16_t fid,
+ uint16_t *sid)
+{
+ struct bnxt *bp;
+ struct hwrm_tfc_session_id_alloc_input req = { 0 };
+ struct hwrm_tfc_session_id_alloc_output resp = { 0 };
+ int rc = 0;
+
+ /* TBD: Parameters are checked by caller, is this enough? */
+ bp = tfcp->bp;
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_SESSION_ID_ALLOC,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (rc == 0)
+ *sid = rte_le_to_cpu_16(resp.sid);
+
+ return rc;
+}
+
+int
+tfc_msg_session_fid_add(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, uint16_t *fid_cnt)
+{
+ struct bnxt *bp;
+ struct hwrm_tfc_session_fid_add_input req = { 0 };
+ struct hwrm_tfc_session_fid_add_output resp = { 0 };
+ int rc;
+
+ /* TBD: Parameters are checked by caller, is this enough? */
+ bp = tfcp->bp;
+ req.sid = rte_cpu_to_le_16(sid);
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_SESSION_FID_ADD,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (rc == 0)
+ if (fid_cnt)
+ *fid_cnt = rte_le_to_cpu_16(resp.fid_cnt);
+
+ return rc;
+}
+
+int
+tfc_msg_session_fid_rem(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, uint16_t *fid_cnt)
+{
+ struct bnxt *bp;
+ struct hwrm_tfc_session_fid_rem_input req = { 0 };
+ struct hwrm_tfc_session_fid_rem_output resp = { 0 };
+ int rc;
+
+ /* TBD: Parameters are checked by caller, is this enough? */
+ bp = tfcp->bp;
+ req.sid = rte_cpu_to_le_16(sid);
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_SESSION_FID_REM,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (rc == 0)
+ if (fid_cnt)
+ *fid_cnt = rte_le_to_cpu_16(resp.fid_cnt);
+ return rc;
+}
+
+static int tfc_msg_set_tt(enum cfa_track_type tt, uint8_t *ptt)
+{
+ switch (tt) {
+ case CFA_TRACK_TYPE_SID:
+ *ptt = HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_SID;
+ break;
+ case CFA_TRACK_TYPE_FID:
+ *ptt = HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID;
+ break;
+ default:
+ PMD_DRV_LOG_LINE(ERR, "Invalid tt[%u]", tt);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int tfc_msg_identifier_alloc(struct tfc *tfcp, enum cfa_dir dir,
+ enum cfa_resource_subtype_ident subtype,
+ enum cfa_track_type tt, uint16_t fid,
+ uint16_t sid, uint16_t *ident_id)
+{
+ struct bnxt *bp;
+ struct hwrm_tfc_ident_alloc_input req = { 0 };
+ struct hwrm_tfc_ident_alloc_output resp = { 0 };
+ int rc;
+
+ bp = tfcp->bp;
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_IDENT_ALLOC_INPUT_FLAGS_DIR_RX);
+
+ rc = tfc_msg_set_tt(tt, &req.track_type);
+ if (rc)
+ return rc;
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.subtype = subtype;
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_IDENT_ALLOC,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (rc == 0)
+ *ident_id = rte_le_to_cpu_16(resp.ident_id);
+ return rc;
+}
+
+int tfc_msg_identifier_free(struct tfc *tfcp, enum cfa_dir dir,
+ enum cfa_resource_subtype_ident subtype,
+ uint16_t fid, uint16_t sid,
+ uint16_t ident_id)
+{
+ struct bnxt *bp;
+ struct hwrm_tfc_ident_free_input req = { 0 };
+ struct hwrm_tfc_ident_free_output resp = { 0 };
+ int rc;
+
+ bp = tfcp->bp;
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_RX);
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.subtype = subtype;
+ req.ident_id = ident_id;
+
+ return bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_IDENT_FREE,
+ &req, sizeof(req), &resp,
+ sizeof(resp));
+}
+
+int
+tfc_msg_tcam_alloc(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ enum cfa_track_type tt, uint16_t pri, uint16_t key_sz_bytes,
+ uint16_t *tcam_id)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_tcam_alloc_input req = { 0 };
+ struct hwrm_tfc_tcam_alloc_output resp = { 0 };
+
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_TCAM_ALLOC_INPUT_FLAGS_DIR_RX);
+
+ req.track_type = (tt == CFA_TRACK_TYPE_FID ?
+ HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID :
+ HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_SID);
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.subtype = rte_le_to_cpu_16(subtype);
+ req.priority = rte_le_to_cpu_16(pri);
+ req.key_size = rte_le_to_cpu_16(key_sz_bytes);
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TCAM_ALLOC,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (rc == 0)
+ *tcam_id = resp.idx;
+
+ return rc;
+}
+
+int
+tfc_msg_tcam_alloc_set(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ enum cfa_track_type tt, uint16_t *tcam_id, uint16_t pri,
+ const uint8_t *key, uint8_t key_size, const uint8_t *mask,
+ const uint8_t *remap, uint8_t remap_size)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_tcam_alloc_set_input req = { 0 };
+ struct hwrm_tfc_tcam_alloc_set_output resp = { 0 };
+ struct tfc_msg_dma_buf buf = { 0 };
+ uint8_t *data = NULL;
+ int data_size = 0;
+
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DIR_RX);
+
+ req.track_type = (tt == CFA_TRACK_TYPE_FID ?
+ HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID :
+ HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_SID);
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.subtype = rte_le_to_cpu_16(subtype);
+ req.key_size = rte_le_to_cpu_16(key_size);
+ req.priority = rte_le_to_cpu_16(pri);
+ req.result_size = rte_le_to_cpu_16(remap_size);
+ data_size = 2 * req.key_size + req.result_size;
+
+ if (data_size > TFC_PCI_BUF_SIZE_MAX) {
+ req.flags |= HWRM_TFC_TCAM_ALLOC_SET_INPUT_FLAGS_DMA;
+ rc = tfc_msg_alloc_dma_buf(&buf, data_size);
+ if (rc)
+ goto cleanup;
+ data = buf.va_addr;
+ memcpy(&req.dma_addr, &buf.pa_addr, sizeof(buf.pa_addr));
+ } else {
+ data = &req.dev_data[0];
+ }
+
+ memcpy(&data[0], &key, key_size * sizeof(uint32_t));
+ memcpy(&data[key_size], &mask, key_size * sizeof(uint32_t));
+ memcpy(&data[key_size * 2], &remap, remap_size * sizeof(uint32_t));
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TCAM_ALLOC_SET,
+ &req, sizeof(req), &resp, sizeof(resp));
+
+ if (rc == 0)
+ *tcam_id = resp.tcam_id;
+
+cleanup:
+ tfc_msg_free_dma_buf(&buf);
+
+ return rc;
+}
+
+int
+tfc_msg_tcam_set(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ uint16_t tcam_id, const uint8_t *key, uint8_t key_size,
+ const uint8_t *mask, const uint8_t *remap,
+ uint8_t remap_size)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_tcam_set_input req = { 0 };
+ struct hwrm_tfc_tcam_set_output resp = { 0 };
+ struct tfc_msg_dma_buf buf = { 0 };
+ uint8_t *data = NULL;
+ int data_size = 0;
+
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_TCAM_SET_INPUT_FLAGS_DIR_RX);
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.tcam_id = rte_le_to_cpu_16(tcam_id);
+ req.subtype = rte_le_to_cpu_16(subtype);
+ req.key_size = rte_le_to_cpu_16(key_size);
+ req.result_size = rte_le_to_cpu_16(remap_size);
+ data_size = 2 * req.key_size + req.result_size;
+
+ if (data_size > TFC_PCI_BUF_SIZE_MAX) {
+ req.flags |= HWRM_TFC_TCAM_SET_INPUT_FLAGS_DMA;
+ rc = tfc_msg_alloc_dma_buf(&buf, data_size);
+ if (rc)
+ goto cleanup;
+ data = buf.va_addr;
+ memcpy(&req.dma_addr, &buf.pa_addr, sizeof(buf.pa_addr));
+ } else {
+ data = &req.dev_data[0];
+ }
+
+ memcpy(&data[0], key, key_size);
+ memcpy(&data[key_size], mask, key_size);
+ memcpy(&data[key_size * 2], remap, remap_size);
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TCAM_SET,
+ &req, sizeof(req), &resp, sizeof(resp));
+cleanup:
+ tfc_msg_free_dma_buf(&buf);
+
+ return rc;
+}
+
+int
+tfc_msg_tcam_get(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ uint16_t tcam_id, uint8_t *key, uint8_t *key_size,
+ uint8_t *mask, uint8_t *remap, uint8_t *remap_size)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_tcam_get_input req = { 0 };
+ struct hwrm_tfc_tcam_get_output resp = { 0 };
+
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_TCAM_GET_INPUT_FLAGS_DIR_RX);
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.tcam_id = rte_le_to_cpu_16(tcam_id);
+ req.subtype = rte_le_to_cpu_16(subtype);
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TCAM_GET,
+ &req, sizeof(req), &resp, sizeof(resp));
+
+ if (rc ||
+ *key_size < rte_le_to_cpu_16(resp.key_size) ||
+ *remap_size < rte_le_to_cpu_16(resp.result_size)) {
+ PMD_DRV_LOG_LINE(ERR, "Key buffer is too small, rc:%s",
+ strerror(EINVAL));
+ rc = -EINVAL;
+ }
+ *key_size = resp.key_size;
+ *remap_size = resp.result_size;
+ memcpy(key, &resp.dev_data[0], resp.key_size);
+ memcpy(mask, &resp.dev_data[resp.key_size], resp.key_size);
+ memcpy(remap, &resp.dev_data[resp.key_size * 2], resp.result_size);
+
+ return rc;
+}
+
+int
+tfc_msg_tcam_free(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ uint16_t tcam_id)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_tcam_free_input req = { 0 };
+ struct hwrm_tfc_tcam_free_output resp = { 0 };
+
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_TCAM_FREE_INPUT_FLAGS_DIR_RX);
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+ req.tcam_id = rte_le_to_cpu_16(tcam_id);
+ req.subtype = rte_le_to_cpu_16(subtype);
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_TCAM_FREE,
+ &req, sizeof(req), &resp, sizeof(resp));
+
+ return rc;
+}
+
+int
+tfc_msg_if_tbl_set(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_if_tbl subtype,
+ uint16_t index, uint8_t data_size, const uint8_t *data)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_if_tbl_set_input req = { 0 };
+ struct hwrm_tfc_if_tbl_set_output resp = { 0 };
+
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_RX);
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+
+ req.index = rte_le_to_cpu_16(index);
+ req.subtype = rte_le_to_cpu_16(subtype);
+ req.data_size = data_size;
+ memcpy(req.data, data, data_size);
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_IF_TBL_SET,
+ &req, sizeof(req), &resp, sizeof(resp));
+
+ return rc;
+}
+
+int
+tfc_msg_if_tbl_get(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_if_tbl subtype,
+ uint16_t index, uint8_t *data_size, uint8_t *data)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_if_tbl_get_input req = { 0 };
+ struct hwrm_tfc_if_tbl_get_output resp = { 0 };
+
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_RX);
+
+ rc = tfc_msg_set_fid(bp, fid, &req.fid);
+ if (rc)
+ return rc;
+ req.sid = rte_le_to_cpu_16(sid);
+
+ req.index = rte_le_to_cpu_16(index);
+ req.subtype = rte_le_to_cpu_16(subtype);
+ req.data_size = rte_le_to_cpu_16(*data_size);
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_IF_TBL_GET,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (rc)
+ return rc;
+
+ if (*data_size < rte_le_to_cpu_16(resp.data_size)) {
+ PMD_DRV_LOG_LINE(ERR, "Table buffer is too small, rc:%s",
+ strerror(EINVAL));
+ rc = -EINVAL;
+ }
+
+ *data_size = resp.data_size;
+ memcpy(data, resp.data, *data_size);
+
+ return rc;
+}
+
+#ifdef TF_FLOW_SCALE_QUERY
+int tfc_msg_resc_usage_query(struct tfc *tfcp, uint16_t sid, enum cfa_dir dir,
+ uint16_t *data_size, void *data)
+{
+ int rc = 0;
+ struct bnxt *bp = tfcp->bp;
+ struct hwrm_tfc_resc_usage_query_input req = { 0 };
+ struct hwrm_tfc_resc_usage_query_output resp = { 0 };
+
+ req.flags = (dir == CFA_DIR_TX ?
+ HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX :
+ HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_RX);
+
+ req.sid = rte_le_to_cpu_16(sid);
+ req.fid = CFA_INVALID_FID; /* For WC-TCAM, ignore FID */
+ req.data_size = rte_le_to_cpu_16(*data_size);
+ req.track_type = HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_TRACK_TYPE_SID;
+
+ rc = bnxt_hwrm_tf_message_direct(bp, false, HWRM_TFC_RESC_USAGE_QUERY,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (rc)
+ return rc;
+
+ if (*data_size < rte_le_to_cpu_16(resp.data_size)) {
+ PMD_DRV_LOG_LINE(ERR, "Table buffer is too small, rc:%s",
+ strerror(EINVAL));
+ rc = -EINVAL;
+ } else {
+ *data_size = resp.data_size;
+ memcpy(data, resp.data, *data_size);
+ }
+ return rc;
+}
+#endif /* TF_FLOW_SCALE_QUERY */
new file mode 100644
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+
+#include <assert.h>
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "tfc.h"
+#include "tfo.h"
+
+/* HWRM Direct messages */
+
+int
+tfc_msg_tbl_scope_qcaps(struct tfc *tfcp,
+ bool *tbl_scope_capable,
+ uint32_t *max_lkup_rec_cnt,
+ uint32_t *max_act_rec_cnt,
+ uint8_t *max_lkup_static_buckets_exp);
+
+int tfc_msg_tbl_scope_id_alloc(struct tfc *tfcp, uint16_t fid, bool shared,
+ enum cfa_app_type app_type, uint8_t *tsid,
+ bool *first);
+
+int
+tfc_msg_backing_store_cfg_v2(struct tfc *tfcp, uint8_t tsid, enum cfa_dir dir,
+ enum cfa_region_type region, uint64_t base_addr,
+ uint8_t pbl_level, uint32_t pbl_page_sz,
+ uint32_t rec_cnt, uint8_t static_bkt_cnt_exp,
+ bool cfg_done);
+
+int
+tfc_msg_tbl_scope_deconfig(struct tfc *tfcp, uint8_t tsid);
+
+int
+tfc_msg_tbl_scope_fid_add(struct tfc *tfcp, uint16_t fid,
+ uint8_t tsid, uint16_t *fid_cnt);
+
+int
+tfc_msg_tbl_scope_fid_rem(struct tfc *tfcp, uint16_t fid,
+ uint8_t tsid, uint16_t *fid_cnt);
+
+int
+tfc_msg_idx_tbl_alloc(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_track_type tt, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl rsubtype,
+ uint16_t *id);
+
+int
+tfc_msg_idx_tbl_alloc_set(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_track_type tt, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl subtype,
+ const uint32_t *dev_data, uint8_t data_size,
+ uint16_t *id);
+
+int
+tfc_msg_idx_tbl_set(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl subtype,
+ uint16_t id, const uint32_t *dev_data, uint8_t data_size);
+
+int
+tfc_msg_idx_tbl_get(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl subtype,
+ uint16_t id, uint32_t *dev_data, uint8_t *data_size);
+
+int
+tfc_msg_idx_tbl_free(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, enum cfa_dir dir,
+ enum cfa_resource_subtype_idx_tbl subtype, uint16_t id);
+
+int tfc_msg_global_id_alloc(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum tfc_domain_id domain_id, uint16_t req_cnt,
+ const struct tfc_global_id_req *glb_id_req,
+ struct tfc_global_id *rsp, uint16_t *rsp_cnt,
+ bool *first);
+int
+tfc_msg_session_id_alloc(struct tfc *tfcp, uint16_t fid, uint16_t *tsid);
+
+int
+tfc_msg_session_fid_add(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, uint16_t *fid_cnt);
+int
+tfc_msg_session_fid_rem(struct tfc *tfcp, uint16_t fid,
+ uint16_t sid, uint16_t *fid_cnt);
+
+int tfc_msg_identifier_alloc(struct tfc *tfcp, enum cfa_dir dir,
+ enum cfa_resource_subtype_ident subtype,
+ enum cfa_track_type tt, uint16_t fid,
+ uint16_t sid, uint16_t *ident_id);
+
+int tfc_msg_identifier_free(struct tfc *tfcp, enum cfa_dir dir,
+ enum cfa_resource_subtype_ident subtype,
+ uint16_t fid, uint16_t sid,
+ uint16_t ident_id);
+
+#ifndef TFC_FORCE_POOL_0
+int
+tfc_msg_tbl_scope_pool_alloc(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ enum tfc_ts_table_type type,
+ uint16_t *pool_id,
+ uint8_t *lkup_pool_sz_exp);
+
+int
+tfc_msg_tbl_scope_pool_free(struct tfc *tfcp,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ enum tfc_ts_table_type type,
+ uint16_t pool_id);
+#endif /* !TFC_FORCE_POOL_0 */
+
+int
+tfc_msg_tbl_scope_config_get(struct tfc *tfcp, uint8_t tsid, bool *configured);
+
+int
+tfc_msg_tcam_alloc(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ enum cfa_track_type tt, uint16_t pri, uint16_t key_sz_words,
+ uint16_t *tcam_id);
+
+int
+tfc_msg_tcam_alloc_set(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ enum cfa_track_type tt, uint16_t *tcam_id, uint16_t pri,
+ const uint8_t *key, uint8_t key_size, const uint8_t *mask,
+ const uint8_t *remap, uint8_t remap_size);
+
+int
+tfc_msg_tcam_set(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ uint16_t tcam_id, const uint8_t *key, uint8_t key_size,
+ const uint8_t *mask, const uint8_t *remap,
+ uint8_t remap_size);
+
+int
+tfc_msg_tcam_get(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ uint16_t tcam_id, uint8_t *key, uint8_t *key_size,
+ uint8_t *mask, uint8_t *remap, uint8_t *remap_size);
+
+int
+tfc_msg_tcam_free(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_tcam subtype,
+ uint16_t tcam_id);
+
+int
+tfc_msg_if_tbl_set(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_if_tbl subtype,
+ uint16_t index, uint8_t data_size, const uint8_t *data);
+
+int
+tfc_msg_if_tbl_get(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ enum cfa_dir dir, enum cfa_resource_subtype_if_tbl subtype,
+ uint16_t index, uint8_t *data_size, uint8_t *data);
+
+#ifdef TF_FLOW_SCALE_QUERY
+int tfc_msg_resc_usage_query(struct tfc *tfcp, uint16_t sid, enum cfa_dir dir,
+ uint16_t *data_size, void *data);
+#endif /* TF_FLOW_SCALE_QUERY */
new file mode 100644
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include "bnxt.h"
+#include "tfc.h"
+#include "tfc_priv.h"
+
+int
+tfc_get_fid(struct tfc *tfcp, uint16_t *fw_fid)
+{
+ struct bnxt *bp = NULL;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ if (fw_fid == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid fw_fid pointer");
+ return -EINVAL;
+ }
+
+ bp = (struct bnxt *)tfcp->bp;
+ if (bp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid bp pointer");
+ return -EINVAL;
+ }
+
+ *fw_fid = bp->fw_fid;
+
+ return 0;
+}
+
+int
+tfc_get_pfid(struct tfc *tfcp, uint16_t *pfid)
+{
+ struct bnxt *bp = NULL;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ if (pfid == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid pfid pointer");
+ return -EINVAL;
+ }
+
+ bp = (struct bnxt *)tfcp->bp;
+ if (bp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid bp pointer");
+ return -EINVAL;
+ }
+
+ if (BNXT_VF(bp) && bp->parent) {
+ *pfid = bp->parent->fid;
+ return 0;
+ } else if (BNXT_PF(bp)) {
+ *pfid = bp->fw_fid;
+ return 0;
+ }
+
+ PMD_DRV_LOG_LINE(ERR, "Invalid FID in bp");
+ return -EINVAL;
+}
+
+int
+tfc_bp_is_pf(struct tfc *tfcp, bool *is_pf)
+{
+ struct bnxt *bp = NULL;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (is_pf == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "invalid is_pf pointer");
+ return -EINVAL;
+ }
+
+ bp = (struct bnxt *)tfcp->bp;
+ if (bp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid bp pointer");
+ return -EINVAL;
+ }
+
+ if (BNXT_PF(bp)) {
+ *is_pf = true;
+ return 0;
+ }
+ *is_pf = false;
+ return 0;
+}
+
+int tfc_bp_vf_max(struct tfc *tfcp, uint16_t *max_vf)
+{
+ struct bnxt *bp = NULL;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (max_vf == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "invalid max_vf pointer");
+ return -EINVAL;
+ }
+
+ bp = (struct bnxt *)tfcp->bp;
+ if (bp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid bp pointer");
+ return -EINVAL;
+ }
+
+ if (!BNXT_PF(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "not a PF");
+ return -EINVAL;
+ }
+
+ *max_vf = bp->pf->first_vf_id + BNXT_MAX_VFS(bp);
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TFC_PRIV_H_
+#define _TFC_PRIV_H_
+
+#include <stdint.h>
+#include "tfc.h"
+
+/**
+ * @page Common Utility functions
+ *
+ * @ref tfc_get_fid
+ *
+ * @ref tfc_get_pfid
+ *
+ * @ref tfc_bp_is_pf
+ *
+ * @ref tfc_bp_vf_max
+ */
+/**
+ * Get the FID for this DPDK port/function.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[out] fw_fid
+ * The function ID
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_get_fid(struct tfc *tfcp, uint16_t *fw_fid);
+
+/**
+ * Get the PFID for this DPDK port/function.
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[out] pfid
+ * The Physical Function ID for this port/function
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_get_pfid(struct tfc *tfcp, uint16_t *pfid);
+
+/**
+ * Is this DPDK port/function a PF?
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[out] is_pf
+ * If true, the DPDK port is a PF (as opposed to a VF)
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_bp_is_pf(struct tfc *tfcp, bool *is_pf);
+
+/**
+ * Get the maximum VF for the PF
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[out] max_vf
+ * The maximum VF for the PF (only valid on a PF)
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfc_bp_vf_max(struct tfc *tfcp, uint16_t *max_vf);
+#endif /* _TFC_PRIV_H_ */
new file mode 100644
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include "bnxt.h"
+#include "bnxt_ulp_tfc.h"
+#include "tfc.h"
+#include "cfa_p70.h"
+#include "tfc_resources.h"
+#include "tfc_msg.h"
+
+#if (TFC_V3_RESOURCE_API_ENABLE == 1)
+int tfc_resource_types_query(struct tfc *tfcp,
+ struct tfc_resources *resources)
+{
+ int rc = 0;
+ /* TODO: implement API */
+ return rc;
+}
+#endif /* (TFC_V3_RESOURCE_API_ENABLE == 1) */
+
+#ifdef TF_FLOW_SCALE_QUERY
+/****************************************************************************************
+ * TruFlow Debug Feature: Flow scale query
+ * It is deisabled by default and to be enabled with build option -DTF_FLOW_SCALE_QUERY
+ ****************************************************************************************/
+
+/**
+ * CFA TCAM instance statistics information
+ */
+struct cfa_tcm_stats_info {
+ /* [out] Maximum number of TCAM entries */
+ uint16_t max_slices;
+ /* [out[ number of partially used 1-slice rows */
+ uint16_t row_1_slice_p_used;
+ /* [out] number of fully used 1-slice rows */
+ uint16_t row_1_slice_f_used;
+ /* [out] number of partially used 2-slice rows */
+ uint16_t row_2_slice_p_used;
+ /* [out] number of fully used 2-slice rows */
+ uint16_t row_2_slice_f_used;
+ /* [out] number of fully used 4-slice rows */
+ uint16_t row_4_slice_used;
+ /* [out] number of unused rows */
+ uint16_t unused_row_number;
+ /* [out] number of used slices */
+ uint16_t used_slices_number;
+};
+
+/* Query TF resource usage state with firmware */
+static int
+tfc_query_resc_usage(struct tfc *tfcp, enum cfa_dir dir)
+{
+ int rc = 0;
+ uint16_t sid;
+ struct cfa_tcm_stats_info stats_info;
+ uint16_t data_size = sizeof(stats_info);
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s\n",
+ strerror(-rc));
+ return rc;
+ }
+ tfc_msg_resc_usage_query(tfcp, sid, dir, &data_size, &stats_info);
+ PMD_DRV_LOG_LINE(ERR,
+ "dir:%s, %d - %d %d(1-slice) - %d %d(2-slices) - %d(4-slices) - %d(unused row) - %d(used_slices)\n",
+ dir ? "TX" : "RX", stats_info.max_slices,
+ stats_info.row_1_slice_p_used,
+ stats_info.row_1_slice_f_used,
+ stats_info.row_2_slice_p_used,
+ stats_info.row_2_slice_f_used,
+ stats_info.row_4_slice_used,
+ stats_info.unused_row_number,
+ stats_info.used_slices_number);
+
+ return 0;
+}
+
+/* query all resource usage state with firmware for both direction */
+void tfc_resc_usage_query_all(struct bnxt *bp)
+{
+ struct tfc *tfcp;
+ enum cfa_dir dir;
+
+ tfcp = bnxt_ulp_cntxt_tfcp_get(bp->ulp_ctx);
+ if (!tfcp) {
+ BNXT_DRV_DBG(ERR, "Failed to get truflow pointer\n");
+ return;
+ }
+
+ /* query usage state with firmware for each direction */
+ for (dir = 0; dir < CFA_DIR_MAX; dir++)
+ tfc_query_resc_usage(tfcp, dir);
+}
+
+#endif /* TF_FLOW_SCALE_QUERY */
new file mode 100644
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2023 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TFC_RESOURCES_H_
+#define _TFC_RESOURCES_H_
+#include <rte_common.h>
+#include "bnxt.h"
+
+#ifdef TF_FLOW_SCALE_QUERY
+void tfc_resc_usage_query_all(struct bnxt *bp);
+#endif /* TF_FLOW_SCALE_QUERY */
+
+#endif /* _TFC_RESOURCES_H_ */
new file mode 100644
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include "tfc.h"
+
+#include "tfc_msg.h"
+#include "cfa_types.h"
+#include "tfo.h"
+#include "bnxt.h"
+
+int tfc_session_id_alloc(struct tfc *tfcp, uint16_t fid, uint16_t *sid)
+{
+ int rc = 0;
+ uint16_t current_sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (sid == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid sid pointer");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, ¤t_sid);
+ if (rc == 0) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot allocate SID, current session is %u",
+ current_sid);
+ return -EBUSY;
+ } else if (rc != -ENODEV) {
+ PMD_DRV_LOG_LINE(ERR, "Getting current sid failed, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+ /* -ENODEV ==> current SID is invalid */
+
+ rc = tfc_msg_session_id_alloc(tfcp, fid, sid);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "session id alloc message failed, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfo_sid_set(tfcp->tfo, *sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to store session id, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ return rc;
+}
+
+int tfc_session_fid_add(struct tfc *tfcp, uint16_t fid, uint16_t sid,
+ uint16_t *fid_cnt)
+{
+ int rc = 0;
+ uint16_t current_sid = INVALID_SID;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (fid_cnt == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid fid_cnt pointer");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, ¤t_sid);
+ if (rc == 0) {
+ /* SID is valid if rc == 0 */
+ if (current_sid != sid) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot add FID to SID %u,"
+ " current session is %u",
+ sid, current_sid);
+ return -EBUSY;
+ }
+ } else if (rc != -ENODEV) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Getting current sid failed, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+ /* -ENODEV ==> current SID is invalid */
+
+ rc = tfc_msg_session_fid_add(tfcp, fid, sid, fid_cnt);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "session fid add message failed, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ if (current_sid != sid) {
+ rc = tfo_sid_set(tfcp->tfo, sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to store session id, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+ }
+
+ return rc;
+}
+int tfc_session_fid_rem(struct tfc *tfcp, uint16_t fid, uint16_t *fid_cnt)
+{
+ int rc = 0;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (fid_cnt == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid fid_cnt pointer");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "no sid allocated, rc:%s", strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_session_fid_rem(tfcp, fid, sid, fid_cnt);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "session fid rem message failed, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ if (((struct bnxt *)tfcp->bp)->fw_fid == fid) {
+ rc = tfo_sid_set(tfcp->tfo, INVALID_SID);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to reset session id, rc:%s", strerror(-rc));
+ }
+
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,2077 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <math.h> /* May need to link with -lm */
+#include <bnxt.h>
+#include <bnxt_mpc.h>
+
+#include "cfa_bld_mpc_field_ids.h"
+#include "cfa_bld_mpcops.h"
+#include "rte_malloc.h"
+#include "rte_memzone.h"
+#include "tfc.h"
+#include "tfc_em.h"
+#include "tfc_priv.h"
+#include "tfc_msg.h"
+#include "cfa_types.h"
+#include "cfa_tim.h"
+#include "cfa_tpm.h"
+#include "tfo.h"
+#include "bnxt.h"
+#include "tfc_cpm.h"
+#include "cfa_mm.h"
+#include "tfc_vf2pf_msg.h"
+#include "tfc_util.h"
+
+/*
+ * These values are for Thor2. Take care to adjust them appropriately when
+ * support for additional HW is added.
+ */
+/* How many entries a single bucket can point to */
+#define ENTRIES_PER_BUCKET 6
+/* sizes in bytes */
+#define LREC_SIZE 16
+#define RECORD_SIZE 32
+
+/*
+ * Page alignments must be some power of 2. These bits define the powers of 2
+ * that are valid for page alignments. It is taken from
+ * cfa_hw_ts_pbl_page_size.
+ */
+#define VALID_PAGE_ALIGNMENTS 0x40753000
+
+#define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *))
+
+/* Private functions */
+
+/*
+ * This function calculates how many buckets and records are required for a
+ * given flow_cnt and factor.
+ *
+ * @param[in] flow_cnt
+ * The total number of flows for which to compute memory
+ *
+ * @param[in] key_sz_in_bytes
+ * The lookup key size in bytes
+ *
+ * @param[in] shared
+ * True if the table scope will be shared. Shared table scopes cannot have
+ * dynamic buckets.
+ *
+ * @param[in] factor
+ * This indicates a multiplier factor for determining the static and dynamic
+ * bucket counts. The larger the factor, the more buckets will be allocated.
+ *
+ * @param[out] lkup_rec_cnt
+ * The total number of lookup records to allocate (includes buckets)
+ *
+ * @param[out] static_bucket_cnt_exp
+ * The log2 of the number of static buckets to allocate. For example if 1024
+ * static buckets, 1024=2^10, so the value 10 would be returned.
+ *
+ * @param[out] dynamic_bucket_cnt
+ * The number of dynamic buckets to allocate
+ *
+ */
+static int calc_lkup_rec_cnt(uint32_t flow_cnt, uint16_t key_sz_in_bytes,
+ __rte_unused bool shared,
+ enum tfc_tbl_scope_bucket_factor factor,
+ uint32_t *lkup_rec_cnt,
+ uint8_t *static_bucket_cnt_exp,
+ uint32_t *dynamic_bucket_cnt)
+{
+ unsigned int entry_size;
+ unsigned int flow_adj; /* flow_cnt adjusted for factor */
+ unsigned int key_rec_cnt;
+
+ switch (factor) {
+ case TFC_TBL_SCOPE_BUCKET_FACTOR_1:
+ flow_adj = flow_cnt;
+ break;
+ case TFC_TBL_SCOPE_BUCKET_FACTOR_2:
+ flow_adj = flow_cnt * 2;
+ break;
+ case TFC_TBL_SCOPE_BUCKET_FACTOR_4:
+ flow_adj = flow_cnt * 4;
+ break;
+ case TFC_TBL_SCOPE_BUCKET_FACTOR_8:
+ flow_adj = flow_cnt * 8;
+ break;
+ case TFC_TBL_SCOPE_BUCKET_FACTOR_16:
+ flow_adj = flow_cnt * 16;
+ break;
+ default:
+ PMD_DRV_LOG_LINE(ERR, "Invalid factor (%u)", factor);
+ return -EINVAL;
+ }
+
+ if (key_sz_in_bytes <= RECORD_SIZE - LREC_SIZE) {
+ entry_size = 1;
+ } else if (key_sz_in_bytes <= RECORD_SIZE * 2 - LREC_SIZE) {
+ entry_size = 2;
+ } else if (key_sz_in_bytes <= RECORD_SIZE * 3 - LREC_SIZE) {
+ entry_size = 3;
+ } else if (key_sz_in_bytes <= RECORD_SIZE * 4 - LREC_SIZE) {
+ entry_size = 4;
+ } else {
+ PMD_DRV_LOG_LINE(ERR,
+ "Key size (%u) cannot be larger than (%u)",
+ key_sz_in_bytes,
+ RECORD_SIZE * 4 - LREC_SIZE);
+ return -EINVAL;
+ }
+ key_rec_cnt = flow_cnt * entry_size;
+
+#ifdef DYNAMIC_BUCKETS_SUPPORTED
+ if (shared) {
+#endif
+ *static_bucket_cnt_exp =
+ next_pow2(flow_adj / ENTRIES_PER_BUCKET);
+ *dynamic_bucket_cnt = 0;
+#ifdef DYNAMIC_BUCKETS_SUPPORTED
+ } else {
+ *static_bucket_cnt_exp =
+ prev_pow2(flow_cnt / ENTRIES_PER_BUCKET);
+ *dynamic_bucket_cnt =
+ (flow_adj - flow_cnt) / ENTRIES_PER_BUCKET;
+ }
+#endif
+
+ *lkup_rec_cnt = key_rec_cnt + (1 << *static_bucket_cnt_exp) +
+ *dynamic_bucket_cnt;
+
+ return 0;
+}
+
+static int calc_act_rec_cnt(uint32_t *act_rec_cnt, uint32_t flow_cnt,
+ uint16_t act_rec_sz_in_bytes)
+{
+ if (act_rec_sz_in_bytes % RECORD_SIZE) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Action record size (%u) must be a multiple "
+ "of %u",
+ act_rec_sz_in_bytes, RECORD_SIZE);
+ return -EINVAL;
+ }
+
+ *act_rec_cnt = flow_cnt * (act_rec_sz_in_bytes / RECORD_SIZE);
+
+ return 0;
+}
+
+/*
+ * Using a #define for the number of bits since the size of an int can depend
+ * upon the processor.
+ */
+#define BITS_IN_UINT (sizeof(unsigned int) * 8)
+
+static int calc_pool_sz_exp(uint8_t *pool_sz_exp, uint32_t rec_cnt,
+ uint32_t max_pools)
+{
+ unsigned int recs_per_region = rec_cnt / max_pools;
+
+ if (recs_per_region == 0) {
+ PMD_DRV_LOG_LINE(ERR,
+ "rec_cnt (%u) must be larger than max_pools "
+ "(%u)", rec_cnt, max_pools);
+ return -EINVAL;
+ }
+
+ *pool_sz_exp = prev_pow2(recs_per_region + 1);
+
+ return 0;
+}
+
+static int calc_rec_start_offset(uint32_t *start_offset, uint32_t bucket_cnt_exp)
+{
+ *start_offset = 1 << bucket_cnt_exp;
+
+ return 0;
+}
+
+static void free_pg_tbl(struct tfc_ts_page_tbl *tp)
+{
+ tp->pg_count = 0;
+ rte_free(tp->pg_va_tbl);
+ tp->pg_va_tbl = NULL;
+ rte_free(tp->pg_pa_tbl);
+ tp->pg_pa_tbl = NULL;
+}
+
+/* simple sequential allocator, there is no associated free */
+static void tfc_mz_alloc(struct tfc_ts_mz *ts_mz, uint64_t *pa, void **va)
+{
+ if (ts_mz->alloc_count <= ts_mz->page_count) {
+ ts_mz->alloc_count++;
+ } else {
+ *pa = 0;
+ *va = NULL;
+ return;
+ }
+
+ *pa = (uint64_t)(((uint8_t *)((uintptr_t)ts_mz->mz->iova)) +
+ ((ts_mz->alloc_count - 1) * ts_mz->page_size));
+ *va = (void *)(((uint8_t *)ts_mz->mz->addr) +
+ ((ts_mz->alloc_count - 1) * ts_mz->page_size));
+}
+
+static int alloc_pg_tbl(struct tfc_ts_mem_cfg *mem_cfg, struct tfc_ts_page_tbl *tp,
+ uint32_t pg_count, uint32_t pg_size)
+{
+ uint32_t i;
+
+ /*
+ * The VA and PA tables are used localy to store the page information
+ * and are not shared via memory
+ */
+ tp->pg_va_tbl =
+ rte_zmalloc("tfc tbl scope", pg_count * sizeof(void *), 0);
+ if (tp->pg_va_tbl == NULL)
+ return -ENOMEM;
+
+ tp->pg_pa_tbl =
+ rte_zmalloc("tfc tbl scope", pg_count * sizeof(void *), 0);
+ if (tp->pg_pa_tbl == NULL) {
+ rte_free(tp->pg_va_tbl);
+ return -ENOMEM;
+ }
+
+ tp->pg_count = 0;
+ tp->pg_size = pg_size;
+
+ for (i = 0; i < pg_count; i++) {
+ tfc_mz_alloc(&mem_cfg->ts_mz, &tp->pg_pa_tbl[i], &tp->pg_va_tbl[i]);
+ if (tp->pg_va_tbl[i] == NULL)
+ goto cleanup;
+
+ tp->pg_count++;
+ }
+
+ return 0;
+
+cleanup:
+ free_pg_tbl(tp);
+ return -ENOMEM;
+}
+
+static void free_page_table(struct tfc_ts_mem_cfg *mem_cfg)
+{
+ struct tfc_ts_page_tbl *tp;
+ int i;
+
+ for (i = 0; i < mem_cfg->num_lvl; i++) {
+ tp = &mem_cfg->pg_tbl[i];
+ PMD_DRV_LOG_LINE(DEBUG, "EEM: Freeing page table: lvl %d cnt %u",
+ i, tp->pg_count);
+
+ free_pg_tbl(tp);
+ }
+
+ rte_memzone_free(mem_cfg->ts_mz.mz);
+ memset(&mem_cfg->ts_mz, 0, sizeof(mem_cfg->ts_mz));
+
+ mem_cfg->l0_addr = NULL;
+ mem_cfg->l0_dma_addr = 0;
+ mem_cfg->num_lvl = 0;
+ mem_cfg->num_data_pages = 0;
+}
+
+static int alloc_page_table(struct tfc_ts_mem_cfg *mem_cfg, uint32_t page_size)
+{
+ struct tfc_ts_page_tbl *tp;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < mem_cfg->num_lvl; i++) {
+ tp = &mem_cfg->pg_tbl[i];
+
+ rc = alloc_pg_tbl(mem_cfg, tp, mem_cfg->page_cnt[i], page_size);
+ if (rc) {
+ PMD_DRV_LOG_LINE(WARNING,
+ "Failed to allocate page table: lvl: %d, "
+ "rc:%s", i, strerror(-rc));
+ goto cleanup;
+ }
+ }
+ return rc;
+
+cleanup:
+ free_page_table(mem_cfg);
+ return rc;
+}
+
+static uint32_t page_tbl_pgcnt(uint32_t num_pages, uint32_t page_size)
+{
+ return roundup32(num_pages, MAX_PAGE_PTRS(page_size)) /
+ MAX_PAGE_PTRS(page_size);
+ return 0;
+}
+
+static void size_page_tbls(int max_lvl, uint64_t num_data_pages,
+ uint32_t page_size, uint32_t *page_cnt,
+ uint32_t *total_pages)
+{
+ if (max_lvl == TFC_TS_PT_LVL_0) {
+ page_cnt[TFC_TS_PT_LVL_0] = num_data_pages;
+ *total_pages = page_cnt[TFC_TS_PT_LVL_0];
+ } else if (max_lvl == TFC_TS_PT_LVL_1) {
+ page_cnt[TFC_TS_PT_LVL_1] = num_data_pages;
+ page_cnt[TFC_TS_PT_LVL_0] =
+ page_tbl_pgcnt(page_cnt[TFC_TS_PT_LVL_1], page_size);
+ *total_pages = page_cnt[TFC_TS_PT_LVL_0] + page_cnt[TFC_TS_PT_LVL_1];
+ } else if (max_lvl == TFC_TS_PT_LVL_2) {
+ page_cnt[TFC_TS_PT_LVL_2] = num_data_pages;
+ page_cnt[TFC_TS_PT_LVL_1] =
+ page_tbl_pgcnt(page_cnt[TFC_TS_PT_LVL_2], page_size);
+ page_cnt[TFC_TS_PT_LVL_0] =
+ page_tbl_pgcnt(page_cnt[TFC_TS_PT_LVL_1], page_size);
+ *total_pages = page_cnt[TFC_TS_PT_LVL_0] +
+ page_cnt[TFC_TS_PT_LVL_1] +
+ page_cnt[TFC_TS_PT_LVL_2];
+ } else {
+ return;
+ }
+}
+
+static int num_pages_get(struct tfc_ts_mem_cfg *mem_cfg, uint32_t page_size)
+{
+ uint64_t lvl_data_size = page_size;
+ uint64_t data_size;
+ uint64_t max_page_ptrs = MAX_PAGE_PTRS(page_size);
+ int lvl = TFC_TS_PT_LVL_0;
+
+ mem_cfg->num_data_pages = 0;
+ data_size = (uint64_t)mem_cfg->rec_cnt * mem_cfg->entry_size;
+
+ while (lvl_data_size < data_size) {
+ lvl++;
+
+ if (lvl == TFC_TS_PT_LVL_1)
+ lvl_data_size = max_page_ptrs * page_size;
+ else if (lvl == TFC_TS_PT_LVL_2)
+ lvl_data_size =
+ max_page_ptrs * max_page_ptrs * page_size;
+ else
+ return -ENOMEM;
+ }
+
+ mem_cfg->num_data_pages = roundup64(data_size, page_size) / page_size;
+ mem_cfg->num_lvl = lvl + 1;
+
+ return 0;
+}
+
+static void link_page_table(struct tfc_ts_page_tbl *tp,
+ struct tfc_ts_page_tbl *tp_next, bool set_pte_last)
+{
+ uint64_t *pg_pa = tp_next->pg_pa_tbl;
+ uint64_t *pg_va;
+ uint64_t valid;
+ uint32_t k = 0;
+ uint32_t i;
+ uint32_t j;
+
+ for (i = 0; i < tp->pg_count; i++) {
+ pg_va = tp->pg_va_tbl[i];
+
+ for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) {
+ if (k == tp_next->pg_count - 2 && set_pte_last)
+ valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID;
+ else if (k == tp_next->pg_count - 1 && set_pte_last)
+ valid = PTU_PTE_LAST | PTU_PTE_VALID;
+ else
+ valid = PTU_PTE_VALID;
+
+ pg_va[j] = rte_cpu_to_le_64(pg_pa[k] | valid);
+ if (++k >= tp_next->pg_count)
+ return;
+ }
+ }
+}
+
+static void setup_page_table(struct tfc_ts_mem_cfg *mem_cfg)
+{
+ struct tfc_ts_page_tbl *tp_next;
+ struct tfc_ts_page_tbl *tp;
+ bool set_pte_last = 0;
+ int i;
+
+ for (i = 0; i < mem_cfg->num_lvl - 1; i++) {
+ tp = &mem_cfg->pg_tbl[i];
+ tp_next = &mem_cfg->pg_tbl[i + 1];
+ if (i == mem_cfg->num_lvl - 2)
+ set_pte_last = 1;
+ link_page_table(tp, tp_next, set_pte_last);
+ }
+
+ mem_cfg->l0_addr = mem_cfg->pg_tbl[TFC_TS_PT_LVL_0].pg_va_tbl[0];
+ mem_cfg->l0_dma_addr = mem_cfg->pg_tbl[TFC_TS_PT_LVL_0].pg_pa_tbl[0];
+}
+
+static void unlink_and_free(struct tfc_ts_mem_cfg *mem_cfg, uint32_t page_size)
+{
+ /* tf_em_free_page_table */
+ struct tfc_ts_page_tbl *tp;
+ int i;
+
+ for (i = 0; i < mem_cfg->num_lvl; i++) {
+ tp = &mem_cfg->pg_tbl[i];
+ PMD_DRV_LOG_LINE(DEBUG,
+ "EEM: Freeing page table: size %u lvl %d cnt %u",
+ page_size, i, tp->pg_count);
+
+ /* tf_em_free_pg_tbl */
+ free_pg_tbl(tp);
+ }
+
+ mem_cfg->l0_addr = NULL;
+ mem_cfg->l0_dma_addr = 0;
+ mem_cfg->num_lvl = 0;
+ mem_cfg->num_data_pages = 0;
+}
+
+static int alloc_link_pbl(struct tfc_ts_mem_cfg *mem_cfg, uint32_t page_size,
+ uint8_t tsid, int dir, const char *type)
+{
+ int rc;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int mz_size;
+ uint64_t total_size;
+ uint32_t total_pages;
+
+ /* tf_em_size_page_tbl_lvl */
+ rc = num_pages_get(mem_cfg, page_size);
+ if (rc) {
+ PMD_DRV_LOG_LINE(WARNING, "EEM: Failed to size page table levels");
+ PMD_DRV_LOG_LINE(WARNING, "data-sz: %016" PRIu64 " page-sz: %u",
+ (uint64_t)mem_cfg->rec_cnt * mem_cfg->entry_size,
+ page_size);
+ return rc;
+ }
+
+ /* tf_em_size_page_tbls */
+ size_page_tbls(mem_cfg->num_lvl - 1, mem_cfg->num_data_pages, page_size,
+ mem_cfg->page_cnt, &total_pages);
+
+ PMD_DRV_LOG_LINE(DEBUG,
+ "EEM: lvls: %d sz: %016" PRIu64 " pgs: %016" PRIu64
+ " l0: %u l1: %u l2: %u",
+ mem_cfg->num_lvl, mem_cfg->num_data_pages * page_size,
+ mem_cfg->num_data_pages, mem_cfg->page_cnt[TFC_TS_PT_LVL_0],
+ mem_cfg->page_cnt[TFC_TS_PT_LVL_1],
+ mem_cfg->page_cnt[TFC_TS_PT_LVL_2]);
+
+ /*
+ * Allocate singe blob large enough for the backing store pages
+ * and page tables. The allocation will occur once only per backing
+ * store and will located by name and reused on subsequent runs.
+ */
+ total_size = page_size * total_pages;
+
+ if (total_size <= (1024 * 256))
+ mz_size = RTE_MEMZONE_256KB;
+ else if (total_size <= (1024 * 1024 * 2))
+ mz_size = RTE_MEMZONE_2MB;
+ else if (total_size <= (1024 * 1024 * 16))
+ mz_size = RTE_MEMZONE_16MB;
+ else if (total_size <= (1024 * 1024 * 256))
+ mz_size = RTE_MEMZONE_256MB;
+ else if (total_size <= (1024 * 1024 * 512))
+ mz_size = RTE_MEMZONE_512MB;
+ else if (total_size <= (1024 * 1024 * 1024))
+ mz_size = RTE_MEMZONE_1GB;
+ else if (total_size <= (1024ULL * 1024 * 1024 * 4))
+ mz_size = RTE_MEMZONE_4GB;
+ else if (total_size <= (1024ULL * 1024 * 1024 * 16))
+ mz_size = RTE_MEMZONE_16GB;
+ else
+ return -ENOMEM;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_bs_%d_%d_%s",
+ tsid, dir, type);
+ PMD_DRV_LOG_LINE(DEBUG, "EEM: mz name:%s", mz_name);
+
+ mem_cfg->ts_mz.mz = rte_memzone_lookup(mz_name);
+ if (!mem_cfg->ts_mz.mz) {
+ /* Backing store does not already exist so allocate */
+ mem_cfg->ts_mz.mz = rte_memzone_reserve_aligned(mz_name,
+ total_size,
+ SOCKET_ID_ANY,
+ mz_size |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ page_size);
+ }
+ memset(mem_cfg->ts_mz.mz->addr, 0, mem_cfg->ts_mz.mz->len);
+ mem_cfg->ts_mz.page_count = total_pages;
+ mem_cfg->ts_mz.page_size = page_size;
+
+ /* tf_em_alloc_page_table -> tf_em_alloc_pg_tbl */
+ rc = alloc_page_table(mem_cfg, page_size);
+ if (rc)
+ goto cleanup;
+
+ /* tf_em_setup_page_table */
+ setup_page_table(mem_cfg);
+
+ return 0;
+
+cleanup:
+ unlink_and_free(mem_cfg, page_size);
+ return rc;
+}
+
+/**
+ * tbl_scope_pools_create_parms contains the parameters for creating pools.
+ */
+struct tbl_scope_pools_create_parms {
+ /**
+ * [in] Indicates if the table scope will be shared.
+ */
+ bool shared;
+ /**
+ * [in] The number of pools the table scope will be divided into. (set
+ * to 1 if not shared).
+ */
+ uint16_t max_pools;
+ /**
+ * [in] The size of each individual lookup record pool expressed as:
+ * log2(max_records/max_pools). For example if 1024 records and 2 pools
+ * 1024/2=512=2^9, so the value 9 would be entered.
+ */
+ uint8_t lkup_pool_sz_exp[CFA_DIR_MAX];
+ /**
+ * [in] The size of each individual action record pool expressed as:
+ * log2(max_records/max_pools). For example if 1024 records and 2 pools
+ * 1024/2=512=2^9, so the value 9 would be entered.
+ */
+ uint8_t act_pool_sz_exp[CFA_DIR_MAX];
+};
+/**
+ * Allocate and store TPM and TIM for shared scope
+ *
+ * Dynamically allocate and store TPM instances for shared scope
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @param[in] params
+ * Parameters for allocate and store TPM instances for shared scope
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+static int tbl_scope_pools_create(struct tfc *tfcp, uint8_t tsid,
+ struct tbl_scope_pools_create_parms *parms)
+{
+ int rc;
+ int dir;
+ enum cfa_region_type region;
+ uint32_t tpm_db_size;
+ void *tim;
+ void *tpms[CFA_DIR_MAX][CFA_REGION_TYPE_MAX];
+ void *tpm;
+
+ /*
+ * Dynamically allocate and store base addresses for TIM,
+ * TPM instances for the given tsid
+ */
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->tfo == NULL || tfcp->bp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp pointer not initialized");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+
+ rc = tfo_tim_get(tfcp->tfo, &tim);
+ if (rc)
+ return -EINVAL;
+
+ rc = cfa_tpm_query(parms->max_pools, &tpm_db_size);
+ if (rc)
+ return -EINVAL;
+
+ memset(tpms, 0, sizeof(void *) * CFA_DIR_MAX * CFA_REGION_TYPE_MAX);
+
+ /* Allocate pool managers */
+ for (region = 0; region < CFA_REGION_TYPE_MAX; region++) {
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ tpms[dir][region] = rte_zmalloc("TPM", tpm_db_size, 0);
+ if (tpms[dir][region] == NULL)
+ goto cleanup;
+
+ rc = cfa_tpm_open(tpms[dir][region], tpm_db_size, parms->max_pools);
+ if (rc)
+ goto cleanup;
+
+ rc = cfa_tpm_pool_size_set(tpms[dir][region],
+ (region == CFA_REGION_TYPE_LKUP ? parms->lkup_pool_sz_exp[dir] :
+ parms->act_pool_sz_exp[dir]));
+
+ if (rc)
+ goto cleanup;
+
+ rc = cfa_tim_tpm_inst_set(tim, tsid, region, dir, tpms[dir][region]);
+ if (rc)
+ goto cleanup;
+ }
+ }
+
+ return rc;
+
+ cleanup:
+ if (tim) {
+ for (region = 0; region < CFA_REGION_TYPE_MAX; region++) {
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ /*
+ * It is possiible that a tpm has been
+ * allocated but not added to tim.
+ * Ensure that those instances are cleaned
+ * up.
+ */
+ rc = cfa_tim_tpm_inst_get(tim,
+ tsid,
+ region,
+ dir,
+ &tpm);
+
+ if (tpm) {
+ rc = cfa_tim_tpm_inst_set(tim,
+ tsid,
+ region,
+ dir,
+ NULL);
+ rte_free(tpm);
+ } else if (tpms[dir][region] != NULL) {
+ rte_free(tpms[dir][region]);
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * Free TPM instances for shared scope
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+static int tbl_scope_pools_destroy(struct tfc *tfcp, uint8_t tsid)
+{
+ int rc;
+ void *tim;
+ int dir;
+ enum cfa_region_type region;
+ void *tpm;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->tfo == NULL || tfcp->bp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp pointer not initialized");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+
+ rc = tfo_tim_get(tfcp->tfo, &tim);
+ if (rc)
+ return -EINVAL;
+
+ /* Free TIM, TPM instances for the given tsid. */
+ if (tim) {
+ for (region = 0; region < CFA_REGION_TYPE_MAX; region++) {
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ rc = cfa_tim_tpm_inst_get(tim,
+ tsid,
+ region,
+ dir,
+ &tpm);
+ if (rc)
+ return -EINVAL;
+
+ if (tpm) {
+ rc = cfa_tim_tpm_inst_set(tim,
+ tsid,
+ region,
+ dir,
+ NULL);
+ rte_free(tpm);
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * Remove all associated pools owned by a function from TPM
+ *
+ * @param[in] tfcp
+ * Pointer to TFC handle
+ *
+ * @param[in] fid
+ * function
+ *
+ * @param[in] tsid
+ * Table scope identifier
+ *
+ * @param[out] pool_cnt
+ * Pointer to the number of pools still associated with other fids.
+ * @returns
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+static int tbl_scope_tpm_fid_rem(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ uint16_t *pool_cnt)
+{
+ int rc = 0;
+ bool shared;
+ bool valid;
+ enum cfa_dir dir;
+ uint16_t pool_id;
+ uint16_t found_cnt = 0;
+ void *tim;
+ void *tpm;
+ enum cfa_region_type region;
+ uint16_t lfid;
+ bool is_pf;
+ uint16_t max_fid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ if (pool_cnt == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid pool_cnt pointer");
+ return -EINVAL;
+ }
+ rc = tfc_bp_is_pf(tfcp, &is_pf);
+ if (rc)
+ return rc;
+
+ if (!is_pf) {
+ PMD_DRV_LOG_LINE(ERR, "only valid for PF");
+ return -EINVAL;
+ }
+ rc = tfo_ts_get(tfcp->tfo, tsid, &shared, NULL, &valid, NULL);
+ if (!valid || !shared) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) valid(%s) shared(%s)",
+ tsid, valid ? "TRUE" : "FALSE",
+ shared ? "TRUE" : "FALSE");
+ return -EINVAL;
+ }
+
+ rc = tfo_tim_get(tfcp->tfo, &tim);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to get TIM");
+ return -EINVAL;
+ }
+
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ for (region = 0; region < CFA_REGION_TYPE_MAX; region++) {
+ /*
+ * Get the TPM and then check to see if the fid is associated
+ * with any of the pools
+ */
+ rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to get TPM for tsid:%d dir:%d",
+ tsid, dir);
+ return -EINVAL;
+ }
+ rc = cfa_tpm_srchm_by_fid(tpm, CFA_SRCH_MODE_FIRST, fid, &pool_id);
+ if (rc) /* FID not used */
+ continue;
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) fid(%d) region(%s) pool_id(%d)",
+ tsid, fid, tfc_ts_region_2_str(region, dir),
+ pool_id);
+ do {
+ /* Remove fid from pool */
+ rc = cfa_tpm_fid_rem(tpm, pool_id, fid);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR,
+ "cfa_tpm_fid_rem() failed for fid:%d pool:%d",
+ fid, pool_id);
+
+ rc = cfa_tpm_srchm_by_fid(tpm,
+ CFA_SRCH_MODE_NEXT,
+ fid, &pool_id);
+ if (!rc)
+ PMD_DRV_LOG_LINE(ERR,
+ "tsid(%d) fid(%d) region(%s) pool_id(%d)",
+ tsid, fid,
+ tfc_ts_region_2_str(region, dir), pool_id);
+ } while (!rc);
+ }
+ }
+ rc = tfc_bp_vf_max(tfcp, &max_fid);
+ if (rc)
+ return rc;
+
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ for (region = 0; region < CFA_REGION_TYPE_MAX; region++) {
+ /*
+ * Get the TPM and then check to see if the fid is associated
+ * with any of the pools
+ */
+ rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to get TPM for tsid:%d dir:%d",
+ tsid, dir);
+ return -EINVAL;
+ }
+ for (lfid = BNXT_FIRST_PF_FID; lfid <= max_fid; lfid++) {
+ rc = cfa_tpm_srchm_by_fid(tpm, CFA_SRCH_MODE_FIRST,
+ lfid, &pool_id);
+ if (rc) /* FID not used */
+ continue;
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) fid(%d) region(%s) pool_id(%d)",
+ tsid, lfid, tfc_ts_region_2_str(region, dir),
+ pool_id);
+ do {
+ found_cnt++;
+ rc = cfa_tpm_srchm_by_fid(tpm,
+ CFA_SRCH_MODE_NEXT,
+ lfid, &pool_id);
+ if (rc == 0) {
+ PMD_DRV_LOG_LINE(ERR,
+ "tsid(%d) fid(%d) region(%s) pool_id(%d)",
+ tsid, lfid,
+ tfc_ts_region_2_str(region, dir), pool_id);
+ }
+ } while (!rc);
+ }
+ }
+ }
+ *pool_cnt = found_cnt;
+ return 0;
+}
+
+/* Public APIs */
+
+int tfc_tbl_scope_qcaps(struct tfc *tfcp, bool *tbl_scope_capable,
+ uint32_t *max_lkup_rec_cnt,
+ uint32_t *max_act_rec_cnt,
+ uint8_t *max_lkup_static_buckets_exp)
+{
+ int rc = 0;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ if (tbl_scope_capable == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tbl_scope_capable pointer");
+ return -EINVAL;
+ }
+
+ rc = tfc_msg_tbl_scope_qcaps(tfcp, tbl_scope_capable, max_lkup_rec_cnt,
+ max_act_rec_cnt,
+ max_lkup_static_buckets_exp);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR,
+ "table scope qcaps message failed, rc:%s",
+ strerror(-rc));
+
+ return rc;
+}
+
+int tfc_tbl_scope_size_query(struct tfc *tfcp,
+ struct tfc_tbl_scope_size_query_parms *parms)
+{
+ int rc = 0;
+ enum cfa_dir dir;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ if (parms == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid parms pointer");
+ return -EINVAL;
+ }
+
+ if (parms->factor > TFC_TBL_SCOPE_BUCKET_FACTOR_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid factor %u", parms->factor);
+ return -EINVAL;
+ }
+
+ for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) {
+ rc = calc_lkup_rec_cnt(parms->flow_cnt[dir],
+ parms->key_sz_in_bytes[dir],
+ parms->shared, parms->factor,
+ &parms->lkup_rec_cnt[dir],
+ &parms->static_bucket_cnt_exp[dir],
+ &parms->dynamic_bucket_cnt[dir]);
+ if (rc)
+ break;
+
+ rc = calc_act_rec_cnt(&parms->act_rec_cnt[dir],
+ parms->flow_cnt[dir],
+ parms->act_rec_sz_in_bytes[dir]);
+ if (rc)
+ break;
+
+ rc = calc_pool_sz_exp(&parms->lkup_pool_sz_exp[dir],
+ parms->lkup_rec_cnt[dir] -
+ (1 << parms->static_bucket_cnt_exp[dir]),
+ parms->max_pools);
+ if (rc)
+ break;
+
+ rc = calc_pool_sz_exp(&parms->act_pool_sz_exp[dir],
+ parms->act_rec_cnt[dir],
+ parms->max_pools);
+ if (rc)
+ break;
+
+ rc = calc_rec_start_offset(&parms->lkup_rec_start_offset[dir],
+ parms->static_bucket_cnt_exp[dir]);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int tfc_tbl_scope_id_alloc(struct tfc *tfcp, bool shared,
+ enum cfa_app_type app_type, uint8_t *tsid,
+ bool *first)
+{
+ int rc;
+ bool valid = true;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ if (tsid == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid pointer");
+ return -EINVAL;
+ }
+ if (first == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid first pointer");
+ return -EINVAL;
+ }
+ if (app_type >= CFA_APP_TYPE_INVALID) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid app type");
+ return -EINVAL;
+ }
+ rc = tfc_msg_tbl_scope_id_alloc(tfcp, ((struct bnxt *)tfcp->bp)->fw_fid,
+ shared, app_type, tsid, first);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "table scope ID alloc message failed, rc:%s",
+ strerror(-rc));
+ } else {
+ rc = tfo_ts_set(tfcp->tfo, *tsid, shared, app_type, valid, 0);
+ }
+ return rc;
+}
+
+int tfc_tbl_scope_mem_alloc(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ struct tfc_tbl_scope_mem_alloc_parms *parms)
+{
+ struct tfc_ts_mem_cfg lkup_mem_cfg[CFA_DIR_MAX];
+ struct tfc_ts_mem_cfg act_mem_cfg[CFA_DIR_MAX];
+ uint64_t lkup_base_addr[2];
+ uint64_t act_base_addr[2];
+ int dir;
+ int rc = 0;
+ bool shared = false;
+ uint32_t page_sz;
+ uint16_t pfid;
+ uint8_t lkup_pbl_level[2];
+ uint8_t act_pbl_level[2];
+ bool valid, cfg_done;
+ uint8_t cfg_cnt;
+ bool is_pf;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (parms == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid parms pointer");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, &valid) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid(%d) object", tsid);
+ return -EINVAL;
+ }
+
+ if (parms->local && !valid) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) not allocated", tsid);
+ return -EINVAL;
+ }
+
+ /* Normalize page size to a power of 2 */
+ page_sz = 1 << next_pow2(parms->pbl_page_sz_in_bytes);
+ if (parms->pbl_page_sz_in_bytes != page_sz ||
+ (page_sz & VALID_PAGE_ALIGNMENTS) == 0) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid page size %d",
+ parms->pbl_page_sz_in_bytes);
+ return -EINVAL;
+ }
+
+ memset(lkup_mem_cfg, 0, sizeof(lkup_mem_cfg));
+ memset(act_mem_cfg, 0, sizeof(act_mem_cfg));
+
+ rc = tfc_get_pfid(tfcp, &pfid);
+ if (rc)
+ return rc;
+
+ rc = tfc_bp_is_pf(tfcp, &is_pf);
+ if (rc)
+ return rc;
+
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ struct tfc_ts_pool_info pi;
+
+ rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, dir, &pi);
+ if (rc)
+ return rc;
+
+ pi.lkup_pool_sz_exp = parms->lkup_pool_sz_exp[dir];
+ pi.act_pool_sz_exp = parms->act_pool_sz_exp[dir];
+ rc = tfo_ts_set_pool_info(tfcp->tfo, tsid, dir, &pi);
+ if (rc)
+ return rc;
+ }
+
+ /*
+ * A shared table scope will have more than 1 pool
+ */
+ if (parms->max_pools > 1)
+ shared = true;
+
+ /* If we are running on a PF, we will allocate memory locally
+ */
+ if (is_pf) {
+ struct tbl_scope_pools_create_parms cparms;
+
+ cfg_done = false;
+ cfg_cnt = 0;
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ lkup_mem_cfg[dir].rec_cnt = parms->lkup_rec_cnt[dir];
+ lkup_mem_cfg[dir].lkup_rec_start_offset =
+ 1 << parms->static_bucket_cnt_exp[dir];
+ lkup_mem_cfg[dir].entry_size = RECORD_SIZE;
+
+ PMD_DRV_LOG_LINE(DEBUG, "Alloc lkup table: dir %d", dir);
+
+ rc = alloc_link_pbl(&lkup_mem_cfg[dir],
+ parms->pbl_page_sz_in_bytes,
+ tsid,
+ dir,
+ "lkup");
+ if (rc)
+ goto cleanup;
+
+ lkup_base_addr[dir] = lkup_mem_cfg[dir].l0_dma_addr;
+ lkup_pbl_level[dir] = lkup_mem_cfg[dir].num_lvl - 1;
+
+ rc = tfc_msg_backing_store_cfg_v2(tfcp, tsid, dir,
+ CFA_REGION_TYPE_LKUP,
+ lkup_base_addr[dir],
+ lkup_pbl_level[dir],
+ parms->pbl_page_sz_in_bytes,
+ parms->lkup_rec_cnt[dir],
+ parms->static_bucket_cnt_exp[dir],
+ cfg_done);
+
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "backing store config message "
+ "failed dir(%s) lkup, rc:%s",
+ dir == CFA_DIR_RX ? "rx" : "tx",
+ strerror(-rc));
+ goto cleanup;
+ }
+
+ rc = tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir, CFA_REGION_TYPE_LKUP,
+ parms->local, &lkup_mem_cfg[dir]);
+ if (rc)
+ goto cleanup;
+
+ PMD_DRV_LOG_LINE(DEBUG, "Alloc action table: dir %d", dir);
+
+ act_mem_cfg[dir].rec_cnt = parms->act_rec_cnt[dir];
+ act_mem_cfg[dir].entry_size = RECORD_SIZE;
+
+ rc = alloc_link_pbl(&act_mem_cfg[dir],
+ parms->pbl_page_sz_in_bytes,
+ tsid,
+ dir,
+ "act");
+ if (rc)
+ goto cleanup;
+
+
+ act_base_addr[dir] = act_mem_cfg[dir].l0_dma_addr;
+ act_pbl_level[dir] = act_mem_cfg[dir].num_lvl - 1;
+
+ cfg_done = false;
+
+ if (cfg_cnt)
+ cfg_done = true;
+
+ rc = tfc_msg_backing_store_cfg_v2(tfcp, tsid, dir,
+ CFA_REGION_TYPE_ACT,
+ act_base_addr[dir],
+ act_pbl_level[dir],
+ parms->pbl_page_sz_in_bytes,
+ parms->act_rec_cnt[dir], 0,
+ cfg_done);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "backing store config message "
+ "failed dir(%s) action, rc:%s",
+ dir == CFA_DIR_RX ? "rx" : "tx",
+ strerror(-rc));
+ goto cleanup;
+ }
+
+ /* Set shared and valid in local state */
+ valid = true;
+ rc = tfo_ts_set(tfcp->tfo, tsid, shared, CFA_APP_TYPE_TF,
+ valid, parms->max_pools);
+ if (rc)
+ goto cleanup;
+
+ rc = tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir, CFA_REGION_TYPE_ACT,
+ parms->local, &act_mem_cfg[dir]);
+ if (rc)
+ goto cleanup;
+
+ cfg_cnt++;
+ }
+ cparms.shared = shared;
+ cparms.max_pools = parms->max_pools;
+
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ cparms.lkup_pool_sz_exp[dir] = parms->lkup_pool_sz_exp[dir];
+ cparms.act_pool_sz_exp[dir] = parms->act_pool_sz_exp[dir];
+ }
+
+ rc = tbl_scope_pools_create(tfcp, tsid, &cparms);
+ if (rc)
+ goto cleanup;
+
+ /* If not shared, allocate the single pool_id in each region
+ * so that we can save the associated fid for the table scope
+ */
+ if (!shared) {
+ uint16_t pool_id;
+ enum cfa_region_type region;
+ uint16_t max_vf;
+
+ rc = tfc_bp_vf_max(tfcp, &max_vf);
+ if (rc)
+ return rc;
+
+ if (fid > max_vf) {
+ PMD_DRV_LOG_LINE(ERR, "fid out of range %d", fid);
+ return -EINVAL;
+ }
+
+ for (region = 0; region < CFA_REGION_TYPE_MAX; region++) {
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ rc = tfc_tbl_scope_pool_alloc(tfcp,
+ fid,
+ tsid,
+ region,
+ dir,
+ NULL,
+ &pool_id);
+ if (rc)
+ goto cleanup;
+ /* only 1 pool available */
+ if (pool_id != 0)
+ goto cleanup;
+ }
+ }
+ }
+
+ } else /* this is a VF */ {
+ /* If first or !shared, send message to PF to allocate the memory */
+ if (parms->first || !shared) {
+ struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd req = { { 0 } };
+ struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp resp = { { 0 } };
+ uint16_t fid;
+
+ rc = tfc_get_fid(tfcp, &fid);
+ if (rc)
+ return rc;
+
+ req.hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_ALLOC_CFG_CMD;
+ req.hdr.fid = fid;
+ req.tsid = tsid;
+ req.max_pools = parms->max_pools;
+ for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) {
+ req.static_bucket_cnt_exp[dir] = parms->static_bucket_cnt_exp[dir];
+ req.dynamic_bucket_cnt[dir] = parms->dynamic_bucket_cnt[dir];
+ req.lkup_rec_cnt[dir] = parms->lkup_rec_cnt[dir];
+ req.lkup_pool_sz_exp[dir] = parms->lkup_pool_sz_exp[dir];
+ req.act_pool_sz_exp[dir] = parms->act_pool_sz_exp[dir];
+ req.act_rec_cnt[dir] = parms->act_rec_cnt[dir];
+ req.lkup_rec_start_offset[dir] = parms->lkup_rec_start_offset[dir];
+ }
+
+ rc = tfc_vf2pf_mem_alloc(tfcp, &req, &resp);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tfc_vf2pf_mem_alloc failed");
+ goto cleanup;
+ }
+
+ PMD_DRV_LOG_LINE(DEBUG, "tsid: %d, status %d", resp.tsid, resp.status);
+ }
+ /* Save off info for later use */
+ for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) {
+ lkup_mem_cfg[dir].rec_cnt = parms->lkup_rec_cnt[dir];
+ lkup_mem_cfg[dir].lkup_rec_start_offset =
+ 1 << parms->static_bucket_cnt_exp[dir];
+ lkup_mem_cfg[dir].entry_size = RECORD_SIZE;
+
+ act_mem_cfg[dir].rec_cnt = parms->act_rec_cnt[dir];
+ act_mem_cfg[dir].entry_size = RECORD_SIZE;
+
+ rc = tfo_ts_set_mem_cfg(tfcp->tfo,
+ tsid,
+ dir,
+ CFA_REGION_TYPE_LKUP,
+ true,
+ &lkup_mem_cfg[dir]);
+ if (rc)
+ goto cleanup;
+
+ rc = tfo_ts_set_mem_cfg(tfcp->tfo,
+ tsid,
+ dir,
+ CFA_REGION_TYPE_ACT,
+ true,
+ &act_mem_cfg[dir]);
+ if (rc)
+ goto cleanup;
+
+ /* Set shared and valid in local state */
+ valid = true;
+ rc = tfo_ts_set(tfcp->tfo, tsid, shared, CFA_APP_TYPE_TF,
+ valid, parms->max_pools);
+ }
+ }
+ return rc;
+cleanup:
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ unlink_and_free(&lkup_mem_cfg[dir],
+ parms->pbl_page_sz_in_bytes);
+ unlink_and_free(&act_mem_cfg[dir],
+ parms->pbl_page_sz_in_bytes);
+ }
+
+ memset(lkup_mem_cfg, 0, sizeof(lkup_mem_cfg));
+ memset(act_mem_cfg, 0, sizeof(act_mem_cfg));
+
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ (void)tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir,
+ CFA_REGION_TYPE_LKUP,
+ parms->local,
+ &lkup_mem_cfg[dir]);
+ (void)tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir,
+ CFA_REGION_TYPE_ACT,
+ parms->local,
+ &act_mem_cfg[dir]);
+ }
+ return rc;
+}
+
+int tfc_tbl_scope_mem_free(struct tfc *tfcp, uint16_t fid, uint8_t tsid)
+{
+ struct tfc_ts_mem_cfg mem_cfg;
+ bool local;
+ int dir, region;
+ int lrc = 0;
+ int rc = 0;
+ bool is_pf = false;
+ bool shared;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->tfo == NULL || tfcp->bp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp pointer not initialized");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+
+ rc = tfo_ts_get(tfcp->tfo, tsid, &shared, NULL, NULL, NULL);
+ if (rc)
+ return rc;
+
+ rc = tfc_bp_is_pf(tfcp, &is_pf);
+ if (rc)
+ return rc;
+
+ /* Lookup any memory config to get local */
+ rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, CFA_DIR_RX, CFA_REGION_TYPE_LKUP, &local,
+ &mem_cfg);
+ if (rc)
+ return rc;
+
+ if (!is_pf) {
+ PMD_DRV_LOG_LINE(DEBUG, "Send VF2PF message and await response");
+ struct tfc_vf2pf_tbl_scope_mem_free_cmd req = { { 0 } };
+ struct tfc_vf2pf_tbl_scope_mem_free_resp resp = { { 0 } };
+ uint16_t fid;
+
+ rc = tfc_get_fid(tfcp, &fid);
+ if (rc)
+ return rc;
+
+ req.hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD;
+ req.hdr.fid = fid;
+ req.tsid = tsid;
+
+ rc = tfc_vf2pf_mem_free(tfcp, &req, &resp);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tfc_vf2pf_mem_free failed");
+ /* continue cleanup regardless */
+ }
+ PMD_DRV_LOG_LINE(DEBUG, "tsid: %d, status %d", resp.tsid, resp.status);
+ }
+
+ if (shared && is_pf) {
+ uint16_t pool_cnt;
+ uint16_t max_vf;
+
+ rc = tfc_bp_vf_max(tfcp, &max_vf);
+ if (rc)
+ return rc;
+
+ if (fid > max_vf) {
+ PMD_DRV_LOG_LINE(ERR, "invalid fid 0x%x", fid);
+ return -EINVAL;
+ }
+ rc = tbl_scope_tpm_fid_rem(tfcp, fid, tsid, &pool_cnt);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "error getting tsid(%d) pools status %s",
+ tsid, strerror(-rc));
+ return rc;
+ }
+ /* Then if there are still fids present, return */
+ if (pool_cnt) {
+ PMD_DRV_LOG_LINE(DEBUG, "tsid(%d) fids still present #pools(%d)",
+ tsid, pool_cnt);
+ return 0;
+ }
+ }
+
+ /* Send Deconfig HWRM before freeing memory */
+ rc = tfc_msg_tbl_scope_deconfig(tfcp, tsid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "deconfig failure: %s", strerror(-rc));
+ return rc;
+ }
+
+ for (region = 0; region < CFA_REGION_TYPE_MAX; region++) {
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ lrc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, dir, region, &local,
+ &mem_cfg);
+ if (lrc) {
+ rc = lrc;
+ continue;
+ }
+ /* memory only allocated on PF */
+ if (is_pf)
+ unlink_and_free(&mem_cfg, mem_cfg.pg_tbl[0].pg_size);
+
+ memset(&mem_cfg, 0, sizeof(mem_cfg));
+
+ /* memory freed, set local to false */
+ local = false;
+ (void)tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir, region, local,
+ &mem_cfg);
+ }
+ }
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) db err(%s), continuing",
+ tsid, strerror(rc));
+ }
+ if (is_pf) {
+ rc = tbl_scope_pools_destroy(tfcp, tsid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) pool err(%s) continuing",
+ tsid, strerror(rc));
+ }
+ }
+ /* cleanup state */
+ rc = tfo_ts_set(tfcp->tfo, tsid, false, CFA_APP_TYPE_INVALID, false, 0);
+
+ return rc;
+}
+
+int tfc_tbl_scope_fid_add(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ uint16_t *fid_cnt)
+{
+ int rc = 0;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (((struct bnxt *)tfcp->bp)->fw_fid != fid) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid fid");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+
+ rc = tfc_msg_tbl_scope_fid_add(tfcp, fid, tsid, fid_cnt);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR,
+ "table scope fid add message failed, rc:%s",
+ strerror(-rc));
+
+ return rc;
+}
+
+int tfc_tbl_scope_fid_rem(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ uint16_t *fid_cnt)
+{
+ struct tfc_ts_mem_cfg mem_cfg;
+ struct tfc_cpm *cpm_lkup;
+ struct tfc_cpm *cpm_act;
+ int rc = 0;
+ bool local;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->tfo == NULL || tfcp->bp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp pointer not initialized");
+ return -EINVAL;
+ }
+
+ if (((struct bnxt *)tfcp->bp)->fw_fid != fid) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid fid");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+
+ rc = tfc_msg_tbl_scope_fid_rem(tfcp, fid, tsid, fid_cnt);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR,
+ "table scope fid rem message failed, rc:%s",
+ strerror(-rc));
+
+ /*
+ * Check if any direction has a CPM instance and, if so, free
+ * it.
+ */
+ rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, CFA_DIR_RX, &cpm_lkup,
+ &cpm_act);
+ if (rc == 0 && (cpm_lkup != NULL || cpm_act != NULL))
+ (void)tfc_tbl_scope_cpm_free(tfcp, tsid);
+
+ /*
+ * Check if any table has memory configured and, if so, free it.
+ */
+ rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, CFA_DIR_RX, CFA_REGION_TYPE_LKUP,
+ &local, &mem_cfg);
+ /* If mem already freed, then local is set to zero (false). */
+ if (rc == 0 && local)
+ (void)tfc_tbl_scope_mem_free(tfcp, fid, tsid);
+
+ rc = tfo_ts_set(tfcp->tfo, tsid, false, CFA_APP_TYPE_INVALID,
+ false, 0);
+
+ return rc;
+}
+
+int tfc_tbl_scope_cpm_alloc(struct tfc *tfcp, uint8_t tsid,
+ struct tfc_tbl_scope_cpm_alloc_parms *parms)
+{
+ int dir;
+ struct tfc_ts_pool_info pi;
+ bool is_shared;
+ int rc;
+ struct tfc_cmm *cmm_lkup = NULL;
+ struct tfc_cmm *cmm_act = NULL;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+ if (tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, NULL, NULL)) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) info get failed", tsid);
+ return -EINVAL;
+ }
+
+ /* Create 4 CPM instances and set the pool_sz_exp and max_pools for each
+ */
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ tfo_ts_get_pool_info(tfcp->tfo, tsid, dir, &pi);
+ pi.lkup_max_contig_rec = parms->lkup_max_contig_rec[dir];
+ pi.act_max_contig_rec = parms->act_max_contig_rec[dir];
+ tfc_cpm_open(&pi.lkup_cpm, parms->max_pools);
+ tfc_cpm_set_pool_size(pi.lkup_cpm, (1 << pi.lkup_pool_sz_exp));
+ tfc_cpm_open(&pi.act_cpm, parms->max_pools);
+ tfc_cpm_set_pool_size(pi.act_cpm, (1 << pi.act_pool_sz_exp));
+ tfo_ts_set_cpm_inst(tfcp->tfo, tsid, dir, pi.lkup_cpm, pi.act_cpm);
+ tfo_ts_set_pool_info(tfcp->tfo, tsid, dir, &pi);
+
+ /* If not shared create CMM instance for and populate CPM with pool_id 0.
+ * If shared, a pool_id will be allocated during tfc_act_alloc() or
+ * tfc_em_insert() and the CMM instance will be created on the first
+ * call.
+ */
+ if (!is_shared) {
+ struct cfa_mm_query_parms qparms;
+ struct cfa_mm_open_parms oparms;
+ uint32_t pool_id = 0;
+ struct tfc_ts_mem_cfg mem_cfg;
+
+ /* ACTION */
+ rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, dir, CFA_REGION_TYPE_ACT,
+ NULL, &mem_cfg);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tfo_ts_get_mem_cfg() failed: %s",
+ strerror(-rc));
+ return -EINVAL;
+ }
+ /* override the record size since a single pool because
+ * pool_sz_exp is 0 in this case
+ */
+ tfc_cpm_set_pool_size(pi.act_cpm, mem_cfg.rec_cnt);
+
+ /*create CMM instance */
+ qparms.max_records = mem_cfg.rec_cnt;
+ qparms.max_contig_records = pi.act_max_contig_rec;
+ rc = cfa_mm_query(&qparms);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_query() failed: %d",
+ rc);
+ return -EINVAL;
+ }
+
+ cmm_act = rte_zmalloc("tf", qparms.db_size, 0);
+ oparms.db_mem_size = qparms.db_size;
+ oparms.max_contig_records = qparms.max_contig_records;
+ oparms.max_records = qparms.max_records;
+ rc = cfa_mm_open(cmm_act, &oparms);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_open() failed: %d", rc);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ /* Store CMM instance in the CPM for pool_id 0 */
+ rc = tfc_cpm_set_cmm_inst(pi.act_cpm, pool_id, cmm_act);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tfc_cpm_set_cmm_inst() failed: %d",
+ rc);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ /* LOOKUP */
+ rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, dir, CFA_REGION_TYPE_LKUP,
+ NULL, &mem_cfg);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tfo_ts_get_mem_cfg() failed: %s",
+ strerror(-rc));
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ /* Create lkup pool CMM instance */
+ qparms.max_records = mem_cfg.rec_cnt;
+ qparms.max_contig_records = pi.lkup_max_contig_rec;
+ rc = cfa_mm_query(&qparms);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_query() failed: %d", rc);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ cmm_lkup = rte_zmalloc("tf", qparms.db_size, 0);
+ oparms.db_mem_size = qparms.db_size;
+ oparms.max_contig_records = qparms.max_contig_records;
+ oparms.max_records = qparms.max_records;
+ rc = cfa_mm_open(cmm_lkup, &oparms);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "cfa_mm_open() failed: %d", rc);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ /* override the record size since a single pool because
+ * pool_sz_exp is 0 in this case
+ */
+ tfc_cpm_set_pool_size(pi.lkup_cpm, mem_cfg.rec_cnt);
+
+ /* Store CMM instance in the CPM for pool_id 0 */
+ rc = tfc_cpm_set_cmm_inst(pi.lkup_cpm, pool_id, cmm_lkup);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tfc_cpm_set_cmm_inst() failed: %d", rc);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+ }
+ }
+
+ return 0;
+
+ cleanup:
+ if (cmm_lkup != NULL)
+ rte_free(cmm_lkup);
+ if (cmm_act != NULL)
+ rte_free(cmm_act);
+
+ return rc;
+}
+
+int tfc_tbl_scope_cpm_free(struct tfc *tfcp, uint8_t tsid)
+{
+ int dir;
+ struct tfc_ts_pool_info pi;
+ int rc;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->tfo == NULL || tfcp->bp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp pointer not initialized");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+
+
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ uint16_t pool_id;
+ struct tfc_cmm *cmm;
+ enum cfa_srch_mode srch_mode;
+
+ rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, dir, &pi);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "pool info error(%s)", strerror(-rc));
+
+ /* Clean up lkup cpm/cmm instances */
+ srch_mode = CFA_SRCH_MODE_FIRST;
+ do {
+ rc = tfc_cpm_srchm_by_configured_pool(pi.lkup_cpm, srch_mode,
+ &pool_id, &cmm);
+ srch_mode = CFA_SRCH_MODE_NEXT;
+
+ if (rc == 0 && cmm) {
+ PMD_DRV_LOG_LINE(DEBUG, "free lkup_%s CMM for pool(%d)",
+ dir == CFA_DIR_RX ? "rx" : "tx",
+ pool_id);
+ cfa_mm_close(cmm);
+ rte_free(cmm);
+ }
+
+ } while (!rc);
+
+ tfc_cpm_close(pi.lkup_cpm);
+
+ /* Clean up action cpm/cmm instances */
+ srch_mode = CFA_SRCH_MODE_FIRST;
+ do {
+ uint16_t pool_id;
+ struct tfc_cmm *cmm;
+
+ rc = tfc_cpm_srchm_by_configured_pool(pi.act_cpm, srch_mode,
+ &pool_id, &cmm);
+ srch_mode = CFA_SRCH_MODE_NEXT;
+
+ if (rc == 0 && cmm) {
+ PMD_DRV_LOG_LINE(DEBUG, "free act_%s CMM for pool(%d)",
+ dir == CFA_DIR_RX ? "rx" : "tx",
+ pool_id);
+ cfa_mm_close(cmm);
+ rte_free(cmm);
+ }
+
+ } while (!rc);
+
+ tfc_cpm_close(pi.act_cpm);
+
+ rc = tfo_ts_set_cpm_inst(tfcp->tfo, tsid, dir, NULL, NULL);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "cpm inst error(%s)", strerror(-rc));
+
+ pi.lkup_cpm = NULL;
+ pi.act_cpm = NULL;
+ rc = tfo_ts_set_pool_info(tfcp->tfo, tsid, dir, &pi);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "pool info error(%s)", strerror(-rc));
+ }
+
+ return rc;
+}
+
+
+int tfc_tbl_scope_pool_alloc(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ enum cfa_region_type region, enum cfa_dir dir,
+ uint8_t *pool_sz_exp, uint16_t *pool_id)
+{
+ int rc = 0;
+ void *tim;
+ void *tpm;
+ bool is_pf;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (pool_id == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid pool_id pointer");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+
+ rc = tfc_bp_is_pf(tfcp, &is_pf);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to get PF status");
+ return -EINVAL;
+ }
+
+ if (is_pf) {
+ rc = tfo_tim_get(tfcp->tfo, &tim);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to get TIM");
+ return -EINVAL;
+ }
+
+ rc = cfa_tim_tpm_inst_get(tim,
+ tsid,
+ region,
+ dir,
+ &tpm);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to get TPM for tsid:%d region:%d dir:%d",
+ tsid, region, dir);
+ return -EINVAL;
+ }
+
+ rc = cfa_tpm_alloc(tpm, pool_id);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed allocate pool_id %s", strerror(-rc));
+ return rc;
+ }
+
+ if (pool_sz_exp) {
+ rc = cfa_tpm_pool_size_get(tpm, pool_sz_exp);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed get pool size exp %s",
+ strerror(-rc));
+ return rc;
+ }
+ }
+
+ rc = cfa_tpm_fid_add(tpm, *pool_id, fid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to set pool_id %d fid 0x%x %s",
+ *pool_id, fid, strerror(-rc));
+ return rc;
+ }
+
+ } else { /* !PF */
+ struct tfc_vf2pf_tbl_scope_pool_alloc_cmd req = { { 0 } };
+ struct tfc_vf2pf_tbl_scope_pool_alloc_resp resp = { { 0 } };
+ uint16_t fid;
+
+ rc = tfc_get_fid(tfcp, &fid);
+ if (rc)
+ return rc;
+
+ req.hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_POOL_ALLOC_CMD;
+ req.hdr.fid = fid;
+ req.tsid = tsid;
+ req.dir = dir;
+ req.region = region;
+
+ /* Send message to PF to allocate pool */
+ rc = tfc_vf2pf_pool_alloc(tfcp, &req, &resp);
+ if (rc != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tfc_vf2pf_pool_alloc failed");
+ return rc;
+ }
+ *pool_id = resp.pool_id;
+ if (pool_sz_exp)
+ *pool_sz_exp = resp.pool_sz_exp;
+ }
+ return rc;
+}
+
+int tfc_tbl_scope_pool_free(struct tfc *tfcp, uint16_t fid, uint8_t tsid,
+ enum cfa_region_type region, enum cfa_dir dir,
+ uint16_t pool_id)
+{
+ int rc = 0;
+ void *tim;
+ void *tpm;
+ bool is_pf;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+
+ rc = tfc_bp_is_pf(tfcp, &is_pf);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to get PF status");
+ return -EINVAL;
+ }
+
+ if (is_pf) {
+ rc = tfo_tim_get(tfcp->tfo, &tim);
+ if (rc)
+ return -EINVAL;
+
+ rc = cfa_tim_tpm_inst_get(tim,
+ tsid,
+ region,
+ dir,
+ &tpm);
+ if (rc)
+ return -EINVAL;
+
+ rc = cfa_tpm_fid_rem(tpm, pool_id, fid);
+ if (rc)
+ return -EINVAL;
+
+ rc = cfa_tpm_free(tpm, pool_id);
+
+ return rc;
+ }
+ /* Pools are currently only deleted on the VF when the
+ * VF calls tfc_tbl_scope_mem_free() if shared.
+ */
+ return rc;
+}
+
+
+int tfc_tbl_scope_config_state_get(struct tfc *tfcp,
+ uint8_t tsid,
+ bool *configured)
+{
+ int rc = 0;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) invalid", tsid);
+ return -EINVAL;
+ }
+
+ rc = tfc_msg_tbl_scope_config_get(tfcp, tsid, configured);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "message failed %s", strerror(-rc));
+ return rc;
+ }
+
+ return rc;
+}
+
+static void tfc_tbl_scope_delete_by_pool(uint16_t *found_cnt,
+ struct tfc *tfcp,
+ uint16_t fid,
+ enum cfa_region_type region,
+ uint8_t tsid,
+ enum cfa_dir dir,
+ uint16_t pool_id,
+ uint8_t *data,
+ void *tpm)
+{
+ uint16_t fc = 0;
+ int rc;
+
+ do {
+ fc++;
+
+ if (region == CFA_REGION_TYPE_LKUP) {
+ /*
+ * Flush EM entries associated with this TS.
+ */
+ rc = tfc_em_delete_entries_by_pool_id(tfcp,
+ tsid,
+ dir,
+ pool_id,
+ 0,
+ data);
+
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR,
+ "delete_em_entries_by_pool_id() failed for TS:%d Dir:%d pool:%d",
+ tsid, dir, pool_id);
+ }
+
+ /* Remove fid from pool */
+ rc = cfa_tpm_fid_rem(tpm, pool_id, fid);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR,
+ "cfa_tpm_fid_rem() failed for fid:%d pool:%d",
+ fid, pool_id);
+
+ /* Next! */
+ rc = cfa_tpm_srchm_by_fid(tpm,
+ CFA_SRCH_MODE_NEXT,
+ fid,
+ &pool_id);
+ } while (!rc);
+
+ *found_cnt = fc;
+}
+
+int tfc_tbl_scope_func_reset(struct tfc *tfcp, uint16_t fid)
+{
+ int rc = 0;
+ bool shared;
+ enum cfa_app_type app;
+ bool valid;
+ uint8_t tsid;
+ enum cfa_dir dir;
+ uint16_t pool_id;
+ uint16_t found_cnt = 0;
+ void *tim;
+ void *tpm;
+ enum cfa_region_type region;
+ bool is_pf;
+ uint8_t *data;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+ rc = tfc_bp_is_pf(tfcp, &is_pf);
+ if (rc)
+ return rc;
+
+ if (!is_pf) {
+ PMD_DRV_LOG_LINE(ERR, "only valid for PF");
+ return -EINVAL;
+ }
+
+ rc = tfo_tim_get(tfcp->tfo, &tim);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to get TIM");
+ return -EINVAL;
+ }
+
+ data = rte_zmalloc("data", 32 * TFC_MPC_BYTES_PER_WORD, 32);
+
+ for (tsid = 1; tsid < TFC_TBL_SCOPE_MAX; tsid++) {
+ rc = tfo_ts_get(tfcp->tfo, tsid, &shared, &app, &valid, NULL);
+ if (rc)
+ continue; /* TS is not used, move on to the next */
+
+ if (!shared || !valid)
+ continue; /* TS invalid or not shared, move on */
+
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ for (region = 0; region < CFA_REGION_TYPE_MAX; region++) {
+ /*
+ * Get the TPM and then check to see if the fid is associated
+ * with any of the pools
+ */
+ rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Failed to get TPM for tsid:%d dir:%d",
+ tsid, dir);
+ rte_free(data);
+ return -EINVAL;
+ }
+
+ rc = cfa_tpm_srchm_by_fid(tpm, CFA_SRCH_MODE_FIRST, fid, &pool_id);
+ if (rc) /* FID not used */
+ continue;
+
+ tfc_tbl_scope_delete_by_pool(&found_cnt,
+ tfcp,
+ fid,
+ region,
+ tsid,
+ dir,
+ pool_id,
+ data,
+ tpm);
+ }
+ }
+ }
+
+ rte_free(data);
+
+ if (found_cnt == 0)
+ PMD_DRV_LOG_LINE(ERR, "FID:%d is not associated with any pool", fid);
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include "tfc.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "tfc.h"
+#include "tfc_msg.h"
+#include "tfc_util.h"
+
+int tfc_tcam_alloc(struct tfc *tfcp, uint16_t fid, enum cfa_track_type tt,
+ uint16_t priority, uint8_t key_sz_in_bytes,
+ struct tfc_tcam_info *tcam_info)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+ bp = tfcp->bp;
+
+ if (tcam_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tcam_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tcam subtype: %d",
+ tcam_info->rsubtype);
+ return -EINVAL;
+ }
+
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_tcam_alloc(tfcp, fid, sid, tcam_info->dir,
+ tcam_info->rsubtype, tt, priority,
+ key_sz_in_bytes, &tcam_info->id);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "alloc failed %s:%s %s",
+ tfc_dir_2_str(tcam_info->dir),
+ tfc_tcam_2_str(tcam_info->rsubtype),
+ strerror(-rc));
+
+ return rc;
+}
+
+int tfc_tcam_alloc_set(struct tfc *tfcp, uint16_t fid, enum cfa_track_type tt,
+ uint16_t priority, struct tfc_tcam_info *tcam_info,
+ const struct tfc_tcam_data *tcam_data)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+ bp = tfcp->bp;
+
+ if (tcam_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tcam_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tcam_data == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tcam_data is NULL");
+ return -EINVAL;
+ }
+
+ if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tcam subtype: %d",
+ tcam_info->rsubtype);
+ return -EINVAL;
+ }
+
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_tcam_alloc_set(tfcp, fid, sid, tcam_info->dir,
+ tcam_info->rsubtype, tt, &tcam_info->id,
+ priority, tcam_data->key,
+ tcam_data->key_sz_in_bytes,
+ tcam_data->mask,
+ tcam_data->remap,
+ tcam_data->remap_sz_in_bytes);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "alloc_set failed: %s:%s %s",
+ tfc_dir_2_str(tcam_info->dir),
+ tfc_tcam_2_str(tcam_info->rsubtype), strerror(-rc));
+
+ return rc;
+}
+
+int tfc_tcam_set(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_tcam_info *tcam_info,
+ const struct tfc_tcam_data *tcam_data)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+ bp = tfcp->bp;
+
+ if (tcam_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tcam_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tcam_data == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tcam_data is NULL");
+ return -EINVAL;
+ }
+
+ if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tcam subtype: %d",
+ tcam_info->rsubtype);
+ return -EINVAL;
+ }
+
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_tcam_set(tfcp, fid, sid, tcam_info->dir,
+ tcam_info->rsubtype, tcam_info->id,
+ tcam_data->key,
+ tcam_data->key_sz_in_bytes,
+ tcam_data->mask, tcam_data->remap,
+ tcam_data->remap_sz_in_bytes);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "set failed: %s:%s %d %s",
+ tfc_dir_2_str(tcam_info->dir),
+ tfc_tcam_2_str(tcam_info->rsubtype), tcam_info->id,
+ strerror(-rc));
+
+ return rc;
+}
+
+int tfc_tcam_get(struct tfc *tfcp, uint16_t fid,
+ const struct tfc_tcam_info *tcam_info,
+ struct tfc_tcam_data *tcam_data)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+ bp = tfcp->bp;
+
+ if (tcam_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tcam_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tcam_data == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tcam_data is NULL");
+ return -EINVAL;
+ }
+
+ if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tcam subtype: %d",
+ tcam_info->rsubtype);
+ return -EINVAL;
+ }
+
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_tcam_get(tfcp, fid, sid, tcam_info->dir,
+ tcam_info->rsubtype, tcam_info->id,
+ tcam_data->key, &tcam_data->key_sz_in_bytes,
+ tcam_data->mask, tcam_data->remap,
+ &tcam_data->remap_sz_in_bytes);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "get failed: %s:%s %d %s",
+ tfc_dir_2_str(tcam_info->dir),
+ tfc_tcam_2_str(tcam_info->rsubtype), tcam_info->id,
+ strerror(-rc));
+
+ return rc;
+}
+
+int tfc_tcam_free(struct tfc *tfcp, uint16_t fid, const struct tfc_tcam_info *tcam_info)
+{
+ int rc = 0;
+ struct bnxt *bp;
+ uint16_t sid;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (tfcp->bp == NULL || tfcp->tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tfcp not initialized");
+ return -EINVAL;
+ }
+
+ if (tcam_info == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "tcam_info is NULL");
+ return -EINVAL;
+ }
+
+ if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tcam subtype: %d",
+ tcam_info->rsubtype);
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ PMD_DRV_LOG_LINE(ERR, "bp not PF or trusted VF");
+ return -EINVAL;
+ }
+
+ rc = tfo_sid_get(tfcp->tfo, &sid);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "Failed to retrieve SID, rc:%s",
+ strerror(-rc));
+ return rc;
+ }
+
+ rc = tfc_msg_tcam_free(tfcp, fid, sid, tcam_info->dir,
+ tcam_info->rsubtype, tcam_info->id);
+ if (rc)
+ PMD_DRV_LOG_LINE(ERR, "free failed: %s:%s:%d %s",
+ tfc_dir_2_str(tcam_info->dir),
+ tfc_tcam_2_str(tcam_info->rsubtype), tcam_info->id,
+ strerror(-rc));
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2021 Broadcom
+ * All rights reserved.
+ */
+
+#include <string.h>
+
+#include "tfc.h"
+#include "tfo.h"
+#include "tfc_util.h"
+
+const char *
+tfc_dir_2_str(enum cfa_dir dir)
+{
+ switch (dir) {
+ case CFA_DIR_RX:
+ return "RX";
+ case CFA_DIR_TX:
+ return "TX";
+ default:
+ return "Invalid direction";
+ }
+}
+
+const char *
+tfc_ident_2_str(enum cfa_resource_subtype_ident id_stype)
+{
+ switch (id_stype) {
+ case CFA_RSUBTYPE_IDENT_L2CTX:
+ return "ident_l2_ctx";
+ case CFA_RSUBTYPE_IDENT_PROF_FUNC:
+ return "ident_prof_func";
+ case CFA_RSUBTYPE_IDENT_WC_PROF:
+ return "ident_wc_prof";
+ case CFA_RSUBTYPE_IDENT_EM_PROF:
+ return "ident_em_prof";
+ case CFA_RSUBTYPE_IDENT_L2_FUNC:
+ return "ident_l2_func";
+ default:
+ return "Invalid identifier subtype";
+ }
+}
+
+const char *
+tfc_tcam_2_str(enum cfa_resource_subtype_tcam tcam_stype)
+{
+ switch (tcam_stype) {
+ case CFA_RSUBTYPE_TCAM_L2CTX:
+ return "tcam_l2_ctx";
+ case CFA_RSUBTYPE_TCAM_PROF_TCAM:
+ return "tcam_prof_tcam";
+ case CFA_RSUBTYPE_TCAM_WC:
+ return "tcam_wc";
+ case CFA_RSUBTYPE_TCAM_CT_RULE:
+ return "tcam_ct_rule";
+ case CFA_RSUBTYPE_TCAM_VEB:
+ return "tcam_veb";
+ case CFA_RSUBTYPE_TCAM_FEATURE_CHAIN:
+ return "tcam_fc";
+ default:
+ return "Invalid tcam subtype";
+ }
+}
+
+const char *
+tfc_idx_tbl_2_str(enum cfa_resource_subtype_idx_tbl tbl_stype)
+{
+ switch (tbl_stype) {
+ case CFA_RSUBTYPE_IDX_TBL_STAT64:
+ return "idx_tbl_64b_statistics";
+ case CFA_RSUBTYPE_IDX_TBL_METER_PROF:
+ return "idx_tbl_meter_prof";
+ case CFA_RSUBTYPE_IDX_TBL_METER_INST:
+ return "idx_tbl_meter_inst";
+ case CFA_RSUBTYPE_IDX_TBL_MIRROR:
+ return "idx_tbl_mirror";
+ case CFA_RSUBTYPE_IDX_TBL_METADATA_PROF:
+ return "idx_tbl_metadata_prof";
+ case CFA_RSUBTYPE_IDX_TBL_METADATA_LKUP:
+ return "idx_tbl_metadata_lkup";
+ case CFA_RSUBTYPE_IDX_TBL_METADATA_ACT:
+ return "idx_tbl_metadata_act";
+ case CFA_RSUBTYPE_IDX_TBL_EM_FKB:
+ return "idx_tbl_em_fkb";
+ case CFA_RSUBTYPE_IDX_TBL_WC_FKB:
+ return "idx_tbl_wc_fkb";
+ case CFA_RSUBTYPE_IDX_TBL_EM_FKB_MASK:
+ return "idx_tbl_em_fkb_mask";
+ case CFA_RSUBTYPE_IDX_TBL_CT_STATE:
+ return "idx_tbl_ct_state";
+ case CFA_RSUBTYPE_IDX_TBL_RANGE_PROF:
+ return "idx_tbl_range_prof";
+ case CFA_RSUBTYPE_IDX_TBL_RANGE_ENTRY:
+ return "idx_tbl_range_entry";
+ default:
+ return "Invalid idx tbl subtype";
+ }
+}
+
+const char *
+tfc_if_tbl_2_str(enum cfa_resource_subtype_if_tbl tbl_stype)
+{
+ switch (tbl_stype) {
+ case CFA_RSUBTYPE_IF_TBL_ILT:
+ return "if_tbl_ilt";
+ case CFA_RSUBTYPE_IF_TBL_VSPT:
+ return "if_tbl_vspt";
+ case CFA_RSUBTYPE_IF_TBL_PROF_PARIF_DFLT_ACT_PTR:
+ return "if_tbl_parif_dflt_act_ptr";
+ case CFA_RSUBTYPE_IF_TBL_PROF_PARIF_ERR_ACT_PTR:
+ return "if_tbl_parif_err_act_ptr";
+ case CFA_RSUBTYPE_IF_TBL_EPOCH0:
+ return "if_tbl_epoch0";
+ case CFA_RSUBTYPE_IF_TBL_EPOCH1:
+ return "if_tbl_epoch1";
+ case CFA_RSUBTYPE_IF_TBL_LAG:
+ return "if_tbl_lag";
+ default:
+ return "Invalid if tbl subtype";
+ }
+}
+
+const char *
+tfc_ts_region_2_str(enum cfa_region_type region, enum cfa_dir dir)
+{
+ switch (region) {
+ case CFA_REGION_TYPE_LKUP:
+ if (dir == CFA_DIR_RX)
+ return "ts_lookup_rx";
+ else if (dir == CFA_DIR_TX)
+ return "lookup_tx";
+ else
+ return "ts_lookup_invalid_dir";
+ case CFA_REGION_TYPE_ACT:
+ if (dir == CFA_DIR_RX)
+ return "ts_action_rx";
+ else if (dir == CFA_DIR_TX)
+ return "ts_action_tx";
+ else
+ return "ts_action_invalid_dir";
+ default:
+ return "Invalid ts region";
+ }
+}
+
+uint32_t
+tfc_getbits(uint32_t *data, int offset, int blen)
+{
+ int start = offset >> 5;
+ int end = (offset + blen - 1) >> 5;
+ uint32_t val = data[start] >> (offset & 0x1f);
+
+ if (start != end)
+ val |= (data[start + 1] << (32 - (offset & 0x1f)));
+ return (blen == 32) ? val : (val & ((1 << blen) - 1));
+}
+
+#define BITS_IN_VAR(x) (sizeof(x) * 8)
+
+/*
+ * Calculate the smallest power of 2 that is >= x. The return value is the
+ * exponent of 2.
+ */
+uint32_t next_pow2(uint32_t x)
+{
+ /*
+ * This algorithm calculates the nearest power of 2 greater than or
+ * equal to x:
+ * The function __builtin_clz returns the number of leading 0-bits in
+ * an unsigned int.
+ * Subtract this from the number of bits in x to get the power of 2. In
+ * the examples below, an int is assumed to have 32 bits.
+ *
+ * Example 1:
+ * x == 2
+ * __builtin_clz(1) = 31
+ * 32 - 31 = 1
+ * 2^1 = 2
+ * Example 2:
+ * x = 63
+ * __builtin_clz(62) = 26
+ * 32 - 26 = 6
+ * 2^6 = 64
+ */
+ return x == 1 ? 1 : (BITS_IN_VAR(x) - __builtin_clz(x - 1));
+}
+
+/*
+ * Calculate the largest power of 2 that is less than x. The return value is
+ * the exponent of 2.
+ */
+uint32_t prev_pow2(uint32_t x)
+{
+ /*
+ * This algorithm calculates the nearest power of 2 less than x:
+ * The function __builtin_clz returns the number of leading 0-bits in
+ * an unsigned int.
+ * Subtract this from one less than the number of bits in x to get
+ * the power of 2. In the examples below, an int is assumed to have
+ * 32 bits.
+ *
+ * Example 1:
+ * x = 2
+ * __builtin_clz(1) = 31
+ * 31 - 31 = 0
+ * 2^0 = 1
+ * Example 2:
+ * x = 63
+ * __builtin_clz(62) = 26
+ * 31 - 26 = 5
+ * 2^5 = 32
+ * Example 3:
+ * x = 64
+ * __builtin_clz(63) = 26
+ * 31 - 26 = 5
+ * 2^5 = 32
+ */
+ return x == 1 ? 0 : (BITS_IN_VAR(x) - 1 - __builtin_clz(x - 1));
+}
+
+uint32_t roundup32(uint32_t x, uint32_t y)
+{
+ return ((x + y - 1) / y) * y;
+}
+
+uint64_t roundup64(uint64_t x, uint64_t y)
+{
+ return ((x + y - 1) / y) * y;
+}
new file mode 100644
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TFC_UTIL_H_
+#define _TFC_UTIL_H_
+
+#include "tfc.h"
+#include "tfo.h"
+
+#define TFC_MPC_ERROR_MIN -1
+#define TFC_MPC_ERROR_MAX -9
+static inline bool tfc_is_mpc_error(int rc)
+{
+ return ((((rc) <= TFC_MPC_ERROR_MIN) &&
+ ((rc) >= TFC_MPC_ERROR_MAX)) ? true : false);
+}
+
+/**
+ * Helper function converting direction to text string
+ *
+ * [in] dir
+ * Receive or transmit direction identifier
+ *
+ * Returns:
+ * Pointer to a char string holding the string for the direction
+ */
+const char *tfc_dir_2_str(enum cfa_dir dir);
+
+/**
+ * Helper function converting identifier subtype to text string
+ *
+ * [in] id_stype
+ * Identifier subtype
+ *
+ * Returns:
+ * Pointer to a char string holding the string for the identifier
+ */
+const char *tfc_ident_2_str(enum cfa_resource_subtype_ident id_stype);
+
+/**
+ * Helper function converting tcam subtype to text string
+ *
+ * [in] tcam_stype
+ * TCAM subtype
+ *
+ * Returns:
+ * Pointer to a char string holding the string for the tcam
+ */
+const char *tfc_tcam_2_str(enum cfa_resource_subtype_tcam tcam_stype);
+
+/**
+ * Helper function converting index tbl subtype to text string
+ *
+ * [in] idx_tbl_stype
+ * Index table subtype
+ *
+ * Returns:
+ * Pointer to a char string holding the string for the table subtype
+ */
+const char *tfc_idx_tbl_2_str(enum cfa_resource_subtype_idx_tbl idx_tbl_stype);
+
+/**
+ * Helper function converting table scope lkup/act type and direction (region)
+ * to string
+ *
+ * [in] region type
+ * Region type (LKUP/ACT)
+ *
+ * [in] dir
+ * Direction
+ *
+ * Returns:
+ * Pointer to a char string holding the string for the table subtype
+ */
+const char *tfc_ts_region_2_str(enum cfa_region_type region, enum cfa_dir dir);
+
+/**
+ * Helper function converting if tbl subtype to text string
+ *
+ * [in] if_tbl_stype
+ * If table subtype
+ *
+ * Returns:
+ * Pointer to a char string holding the string for the table subtype
+ */
+const char *tfc_if_tbl_2_str(enum cfa_resource_subtype_if_tbl if_tbl_stype);
+
+/**
+ * Helper function retrieving field value from the buffer
+ *
+ * [in] data
+ * buffer
+ *
+ * [in] offset
+ * field start bit position in the buffer
+ *
+ * [in] blen
+ * field length in bit
+ *
+ * Returns:
+ * field value
+ */
+uint32_t tfc_getbits(uint32_t *data, int offset, int blen);
+
+/*
+ * Calculate the smallest power of 2 that is >= x. The return value is the
+ * exponent of 2.
+ */
+uint32_t next_pow2(uint32_t x);
+
+/*
+ * Calculate the largest power of 2 that is less than x. The return value is
+ * the exponent of 2.
+ */
+uint32_t prev_pow2(uint32_t x);
+
+uint32_t roundup32(uint32_t x, uint32_t y);
+
+uint64_t roundup64(uint64_t x, uint64_t y);
+
+#endif /* _TFC_UTIL_H_ */
new file mode 100644
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Broadcom
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "tfc_vf2pf_msg.h"
+#include "tfc_util.h"
+
+/* Logging defines */
+#define TFC_VF2PF_MSG_DEBUG 0
+
+int
+tfc_vf2pf_mem_alloc(struct tfc *tfcp,
+ struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd *req,
+ struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp *resp)
+{
+ struct bnxt *bp;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (req == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid req pointer");
+ return -EINVAL;
+ }
+
+ if (resp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid resp pointer");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ return bnxt_hwrm_tf_oem_cmd(bp, (uint32_t *)req, sizeof(*req),
+ (uint32_t *)resp, sizeof(*resp));
+}
+int
+tfc_vf2pf_mem_free(struct tfc *tfcp,
+ struct tfc_vf2pf_tbl_scope_mem_free_cmd *req,
+ struct tfc_vf2pf_tbl_scope_mem_free_resp *resp)
+{
+ struct bnxt *bp;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (req == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid req pointer");
+ return -EINVAL;
+ }
+
+ if (resp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid resp pointer");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ return bnxt_hwrm_tf_oem_cmd(bp, (uint32_t *)req, sizeof(*req),
+ (uint32_t *)resp, sizeof(*resp));
+}
+
+int
+tfc_vf2pf_pool_alloc(struct tfc *tfcp,
+ struct tfc_vf2pf_tbl_scope_pool_alloc_cmd *req,
+ struct tfc_vf2pf_tbl_scope_pool_alloc_resp *resp)
+{
+ struct bnxt *bp;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (req == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid req pointer");
+ return -EINVAL;
+ }
+
+ if (resp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid resp pointer");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ return bnxt_hwrm_tf_oem_cmd(bp, (uint32_t *)req, sizeof(*req),
+ (uint32_t *)resp, sizeof(*resp));
+}
+
+int
+tfc_vf2pf_pool_free(struct tfc *tfcp,
+ struct tfc_vf2pf_tbl_scope_pool_free_cmd *req,
+ struct tfc_vf2pf_tbl_scope_pool_free_resp *resp)
+{
+ struct bnxt *bp;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (req == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid req pointer");
+ return -EINVAL;
+ }
+
+ if (resp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid resp pointer");
+ return -EINVAL;
+ }
+
+ bp = tfcp->bp;
+ return bnxt_hwrm_tf_oem_cmd(bp, (uint32_t *)req, sizeof(*req),
+ (uint32_t *)resp, sizeof(*resp));
+}
+
+static int
+tfc_vf2pf_mem_alloc_process(struct tfc *tfcp,
+ uint32_t *oem_data,
+ uint32_t *resp_data,
+ uint16_t *resp_len)
+{
+ int dir;
+ int rc = 0;
+ struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd *req =
+ (struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd *)oem_data;
+ struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp *resp =
+ (struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp *)resp_data;
+ uint16_t data_len = sizeof(*resp);
+ struct tfc_tbl_scope_mem_alloc_parms ma_parms;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (*resp_len < data_len) {
+ PMD_DRV_LOG_LINE(ERR, "resp_data buffer is too small");
+ return -EINVAL;
+ }
+
+ /* This block of code is for testing purpose. Will be removed later */
+ PMD_DRV_LOG_LINE(ERR, "Table scope mem alloc cfg cmd:");
+ PMD_DRV_LOG_LINE(ERR, "\ttsid: 0x%x, max_pools: 0x%x", req->tsid, req->max_pools);
+ for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) {
+ PMD_DRV_LOG_LINE(ERR, "\tsbuckt_cnt_exp: 0x%x, dbucket_cnt: 0x%x",
+ req->static_bucket_cnt_exp[dir],
+ req->dynamic_bucket_cnt[dir]);
+ PMD_DRV_LOG_LINE(ERR, "\tlkup_rec_cnt: 0x%x, lkup_pool_sz_exp: 0x%x",
+ req->lkup_rec_cnt[dir],
+ req->lkup_pool_sz_exp[dir]);
+ PMD_DRV_LOG_LINE(ERR, "\tact_pool_sz_exp: 0x%x, lkup_rec_start_offset: 0x%x",
+ req->act_pool_sz_exp[dir],
+ req->lkup_rec_start_offset[dir]);
+ }
+
+ memset(&ma_parms, 0, sizeof(struct tfc_tbl_scope_mem_alloc_parms));
+
+ for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) {
+ ma_parms.static_bucket_cnt_exp[dir] = req->static_bucket_cnt_exp[dir];
+ ma_parms.dynamic_bucket_cnt[dir] = req->dynamic_bucket_cnt[dir];
+ ma_parms.lkup_rec_cnt[dir] = req->lkup_rec_cnt[dir];
+ ma_parms.act_rec_cnt[dir] = req->act_rec_cnt[dir];
+ ma_parms.act_pool_sz_exp[dir] = req->act_pool_sz_exp[dir];
+ ma_parms.lkup_pool_sz_exp[dir] = req->lkup_pool_sz_exp[dir];
+ ma_parms.lkup_rec_start_offset[dir] = req->lkup_rec_start_offset[dir];
+ }
+ /* Obtain from driver page definition (4k for DPDK) */
+ ma_parms.pbl_page_sz_in_bytes = BNXT_PAGE_SIZE;
+ /* First is meaningless on the PF, set to 0 */
+ ma_parms.first = 0;
+
+ /* This is not for local use if we are getting a message from the VF */
+ ma_parms.local = false;
+ ma_parms.max_pools = req->max_pools;
+ rc = tfc_tbl_scope_mem_alloc(tfcp, req->hdr.fid, req->tsid, &ma_parms);
+ if (rc == 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) PF allocation succeeds",
+ req->tsid);
+ } else {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) PF allocation fails (%s)",
+ req->tsid, strerror(-rc));
+ }
+ *resp_len = rte_cpu_to_le_16(data_len);
+ resp->hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_ALLOC_CFG_CMD;
+ resp->tsid = req->tsid;
+ resp->status = rc;
+ return rc;
+}
+
+
+static int
+tfc_vf2pf_mem_free_process(struct tfc *tfcp,
+ uint32_t *oem_data,
+ uint32_t *resp_data,
+ uint16_t *resp_len)
+{
+ int rc = 0;
+ struct tfc_vf2pf_tbl_scope_mem_free_cmd *req =
+ (struct tfc_vf2pf_tbl_scope_mem_free_cmd *)oem_data;
+ struct tfc_vf2pf_tbl_scope_mem_free_resp *resp =
+ (struct tfc_vf2pf_tbl_scope_mem_free_resp *)resp_data;
+ uint16_t data_len = sizeof(*resp);
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (*resp_len < data_len) {
+ PMD_DRV_LOG_LINE(ERR, "resp_data buffer is too small");
+ return -EINVAL;
+ }
+
+ /* This block of code is for testing purpose. Will be removed later */
+ PMD_DRV_LOG_LINE(ERR, "Table scope mem free cfg cmd:");
+ PMD_DRV_LOG_LINE(ERR, "\ttsid: 0x%x", req->tsid);
+
+ rc = tfc_tbl_scope_mem_free(tfcp, req->hdr.fid, req->tsid);
+ if (rc == 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) PF free succeeds", req->tsid);
+ } else {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) PF free fails (%s)",
+ req->tsid, strerror(-rc));
+ }
+ *resp_len = rte_cpu_to_le_16(data_len);
+ resp->hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD;
+ resp->tsid = req->tsid;
+ resp->status = rc;
+ return rc;
+}
+
+static int
+tfc_vf2pf_pool_alloc_process(struct tfc *tfcp,
+ uint32_t *oem_data,
+ uint32_t *resp_data,
+ uint16_t *resp_len)
+{
+ int rc = 0;
+ struct tfc_vf2pf_tbl_scope_pool_alloc_cmd *req =
+ (struct tfc_vf2pf_tbl_scope_pool_alloc_cmd *)oem_data;
+ struct tfc_vf2pf_tbl_scope_pool_alloc_resp *resp =
+ (struct tfc_vf2pf_tbl_scope_pool_alloc_resp *)resp_data;
+ uint16_t data_len = sizeof(*resp);
+ uint8_t pool_sz_exp = 0;
+ uint16_t pool_id = 0;
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (*resp_len < data_len) {
+ PMD_DRV_LOG_LINE(ERR, "resp_data buffer is too small");
+ return -EINVAL;
+ }
+
+ /* This block of code is for testing purpose. Will be removed later */
+ PMD_DRV_LOG_LINE(ERR, "Table scope pool alloc cmd:");
+ PMD_DRV_LOG_LINE(ERR, "\ttsid: 0x%x, region:%s fid(%d)", req->tsid,
+ tfc_ts_region_2_str(req->region, req->dir), req->hdr.fid);
+
+ rc = tfc_tbl_scope_pool_alloc(tfcp, req->hdr.fid, req->tsid, req->region,
+ req->dir, &pool_sz_exp, &pool_id);
+ if (rc == 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) PF pool_alloc(%d) succeeds",
+ req->tsid, pool_id);
+ } else {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) PF pool_alloc fails (%s)",
+ req->tsid, strerror(-rc));
+ }
+ *resp_len = rte_cpu_to_le_16(data_len);
+ resp->hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_POOL_ALLOC_CMD;
+ resp->tsid = req->tsid;
+ resp->pool_sz_exp = pool_sz_exp;
+ resp->pool_id = pool_id;
+ resp->status = rc;
+ return rc;
+}
+
+
+static int
+tfc_vf2pf_pool_free_process(struct tfc *tfcp,
+ uint32_t *oem_data,
+ uint32_t *resp_data,
+ uint16_t *resp_len)
+{
+ int rc = 0;
+ struct tfc_vf2pf_tbl_scope_pool_free_cmd *req =
+ (struct tfc_vf2pf_tbl_scope_pool_free_cmd *)oem_data;
+ struct tfc_vf2pf_tbl_scope_pool_free_resp *resp =
+ (struct tfc_vf2pf_tbl_scope_pool_free_resp *)resp_data;
+ uint16_t data_len = sizeof(*resp);
+
+ if (tfcp == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfcp pointer");
+ return -EINVAL;
+ }
+
+ if (*resp_len < data_len) {
+ PMD_DRV_LOG_LINE(ERR, "resp_data buffer is too small");
+ return -EINVAL;
+ }
+
+ /* This block of code is for testing purpose. Will be removed later */
+ PMD_DRV_LOG_LINE(ERR, "Table scope pool free cfg cmd:");
+ PMD_DRV_LOG_LINE(ERR, "\ttsid: 0x%x", req->tsid);
+
+ rc = tfc_tbl_scope_pool_free(tfcp, req->hdr.fid, req->tsid, req->region,
+ req->dir, req->pool_id);
+ if (rc == 0) {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) PF free succeeds", req->tsid);
+ } else {
+ PMD_DRV_LOG_LINE(ERR, "tsid(%d) PF free fails (%s)",
+ req->tsid, strerror(-rc));
+ }
+ *resp_len = rte_cpu_to_le_16(data_len);
+ resp->hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD;
+ resp->tsid = req->tsid;
+ resp->status = rc;
+ return rc;
+}
+
+int
+tfc_oem_cmd_process(struct tfc *tfcp, uint32_t *oem_data, uint32_t *resp, uint16_t *resp_len)
+{
+ struct tfc_vf2pf_hdr *oem = (struct tfc_vf2pf_hdr *)oem_data;
+ int rc = 0;
+
+ switch (oem->type) {
+ case TFC_VF2PF_TYPE_TBL_SCOPE_MEM_ALLOC_CFG_CMD:
+ rc = tfc_vf2pf_mem_alloc_process(tfcp, oem_data, resp, resp_len);
+ break;
+ case TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD:
+ rc = tfc_vf2pf_mem_free_process(tfcp, oem_data, resp, resp_len);
+ break;
+
+ case TFC_VF2PF_TYPE_TBL_SCOPE_POOL_ALLOC_CMD:
+ rc = tfc_vf2pf_pool_alloc_process(tfcp, oem_data, resp, resp_len);
+ break;
+
+ case TFC_VF2PF_TYPE_TBL_SCOPE_POOL_FREE_CMD:
+ rc = tfc_vf2pf_pool_free_process(tfcp, oem_data, resp, resp_len);
+ break;
+ case TFC_VF2PF_TYPE_TBL_SCOPE_PFID_QUERY_CMD:
+ default:
+ rc = -EPERM;
+ break;
+ }
+
+ return rc;
+}
new file mode 100644
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+#ifndef _TFC_VF2PF_MSG_H_
+#define _TFC_VF2PF_MSG_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "cfa_types.h"
+#include "tfc.h"
+#include "hsi_struct_def_dpdk.h"
+
+/* HWRM_OEM_CMD is used to transport the vf2pf commands and responses.
+ * All commands will have a naming_authority set to PCI_SIG, oem_id set to
+ * 0x14e4 and message_family set to TRUFLOW. The maximum size of the oem_data
+ * is 104 bytes. The response maximum size is 88 bytes.
+ */
+
+/** Truflow VF2PF message types
+ */
+enum tfc_vf2pf_type {
+ TFC_VF2PF_TYPE_TBL_SCOPE_MEM_ALLOC_CFG_CMD = 1,
+ TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD,
+ TFC_VF2PF_TYPE_TBL_SCOPE_PFID_QUERY_CMD,
+ TFC_VF2PF_TYPE_TBL_SCOPE_POOL_ALLOC_CMD,
+ TFC_VF2PF_TYPE_TBL_SCOPE_POOL_FREE_CMD,
+};
+
+/** Truflow VF2PF response status
+ */
+enum tfc_vf2pf_status {
+ TFC_VF2PF_STATUS_OK = 0,
+ TFC_VF2PF_STATUS_TSID_CFG_ERR = 1,
+ TFC_VF2PF_STATUS_TSID_MEM_ALLOC_ERR = 2,
+ TFC_VF2PF_STATUS_TSID_INVALID = 3,
+ TFC_VF2PF_STATUS_TSID_NOT_CONFIGURED = 4,
+ TFC_VF2PF_STATUS_NO_POOLS_AVAIL = 5,
+ TFC_VF2PF_STATUS_FID_ERR = 6,
+};
+
+/**
+ * Truflow VF2PF header used for all Truflow VF2PF cmds/responses
+ */
+struct tfc_vf2pf_hdr {
+ /** use enum tfc_vf2pf_type */
+ uint16_t type;
+ /** VF fid */
+ uint16_t fid;
+};
+
+/**
+ * Truflow VF2PF Table Scope Memory allocate/config command
+ */
+struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd {
+ /** vf2pf header */
+ struct tfc_vf2pf_hdr hdr;
+ /** table scope identifier */
+ uint8_t tsid;
+ /** lkup static bucket count */
+ uint8_t static_bucket_cnt_exp[CFA_DIR_MAX];
+ /** maximum number of pools requested - 1 for non-shared */
+ uint16_t max_pools;
+ /** dynamic bucket count */
+ uint32_t dynamic_bucket_cnt[CFA_DIR_MAX];
+ /** lkup record count */
+ uint32_t lkup_rec_cnt[CFA_DIR_MAX];
+ /** action record count */
+ uint32_t act_rec_cnt[CFA_DIR_MAX];
+ /** lkup pool size expressed as log2(max_recs/max_pools) */
+ uint8_t lkup_pool_sz_exp[CFA_DIR_MAX];
+ /** action pool size expressed as log2(max_recs/max_pools) */
+ uint8_t act_pool_sz_exp[CFA_DIR_MAX];
+ /** start offset in 32B records of the lkup recs (after buckets) */
+ uint32_t lkup_rec_start_offset[CFA_DIR_MAX];
+};
+/**
+ * Truflow VF2PF Table Scope Memory allocate/config response
+ */
+struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp {
+ /** vf2pf header copied from cmd */
+ struct tfc_vf2pf_hdr hdr;
+ /** status of request */
+ enum tfc_vf2pf_status status;
+ /** tsid allocated */
+ uint8_t tsid;
+};
+/**
+ * Truflow VF2PF Table Scope Memory free command
+ */
+struct tfc_vf2pf_tbl_scope_mem_free_cmd {
+ /** vf2pf header */
+ struct tfc_vf2pf_hdr hdr;
+ /** table scope identifier */
+ uint8_t tsid;
+};
+
+/**
+ * Truflow VF2PF Table Scope Memory free response
+ */
+struct tfc_vf2pf_tbl_scope_mem_free_resp {
+ /** vf2pf header copied from cmd */
+ struct tfc_vf2pf_hdr hdr;
+ /** status of request */
+ enum tfc_vf2pf_status status;
+ /** tsid memory freed */
+ uint8_t tsid;
+};
+
+/**
+ * Truflow VF2PF Table Scope PFID query command
+ */
+struct tfc_vf2pf_tbl_scope_pfid_query_cmd {
+ /** vf2pf header */
+ struct tfc_vf2pf_hdr hdr;
+};
+/**
+ * Truflow VF2PF Table Scope PFID query response
+ */
+struct tfc_vf2pf_pfid_query_resp {
+ /** vf2pf header copied from cmd */
+ struct tfc_vf2pf_hdr hdr;
+ /** status of AFM/NIC flow tbl scope */
+ enum tfc_vf2pf_status status;
+ /** tsid used for AFM/NIC flow tbl scope */
+ uint8_t tsid;
+ /** lookup tbl pool size expressed as log2(max_recs/max_pools) */
+ uint8_t lkup_pool_sz_exp[CFA_DIR_MAX];
+ /** action tbl pool size expressed as log2(max_recs/max_pools) */
+ uint8_t act_pool_sz_exp[CFA_DIR_MAX];
+ /** lkup record start offset in 32B records */
+ uint32_t lkup_rec_start_offset[CFA_DIR_MAX];
+ /** maximum number of pools */
+ uint16_t max_pools;
+};
+
+/**
+ * Truflow VF2PF Table Scope pool alloc command
+ */
+struct tfc_vf2pf_tbl_scope_pool_alloc_cmd {
+ /** vf2pf header */
+ struct tfc_vf2pf_hdr hdr;
+ /** table scope identifier */
+ uint8_t tsid;
+ /** direction RX or TX */
+ enum cfa_dir dir;
+ /** region lkup or action */
+ enum cfa_region_type region;
+};
+
+/**
+ * Truflow VF2PF Table Scope pool alloc response
+ */
+struct tfc_vf2pf_tbl_scope_pool_alloc_resp {
+ /** vf2pf header copied from cmd */
+ struct tfc_vf2pf_hdr hdr;
+ /** status of pool allocation */
+ enum tfc_vf2pf_status status;
+ /* tbl scope identifier */
+ uint8_t tsid;
+ /* pool size expressed as log2(max_recs/max_pools) */
+ uint8_t pool_sz_exp;
+ /* pool_id allocated */
+ uint16_t pool_id;
+};
+
+/**
+ * Truflow VF2PF Table Scope pool free command
+ */
+struct tfc_vf2pf_tbl_scope_pool_free_cmd {
+ /** vf2pf header */
+ struct tfc_vf2pf_hdr hdr;
+ /* direction RX or TX */
+ enum cfa_dir dir;
+ /* region lkup or action */
+ enum cfa_region_type region;
+ /* table scope id */
+ uint8_t tsid;
+ /* pool_id */
+ uint16_t pool_id;
+};
+
+/**
+ * Truflow VF2PF Table Scope pool free response
+ */
+struct tfc_vf2pf_tbl_scope_pool_free_resp {
+ /** vf2pf header copied from cmd */
+ struct tfc_vf2pf_hdr hdr;
+ /** status of pool allocation */
+ enum tfc_vf2pf_status status;
+ /** table scope id */
+ uint8_t tsid;
+};
+
+int
+tfc_vf2pf_mem_alloc(struct tfc *tfcp,
+ struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd *req,
+ struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp *resp);
+
+int
+tfc_vf2pf_mem_free(struct tfc *tfcp,
+ struct tfc_vf2pf_tbl_scope_mem_free_cmd *req,
+ struct tfc_vf2pf_tbl_scope_mem_free_resp *resp);
+
+int
+tfc_vf2pf_pool_alloc(struct tfc *tfcp,
+ struct tfc_vf2pf_tbl_scope_pool_alloc_cmd *req,
+ struct tfc_vf2pf_tbl_scope_pool_alloc_resp *resp);
+
+int
+tfc_vf2pf_pool_free(struct tfc *tfcp,
+ struct tfc_vf2pf_tbl_scope_pool_free_cmd *req,
+ struct tfc_vf2pf_tbl_scope_pool_free_resp *resp);
+
+int
+tfc_oem_cmd_process(struct tfc *tfcp,
+ uint32_t *oem_data,
+ uint32_t *resp,
+ uint16_t *resp_len);
+#endif /* _TFC_VF2PF_MSG_H */
new file mode 100644
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include "tfo.h"
+#include "cfa_types.h"
+#include "cfa_tim.h"
+#include "bnxt.h"
+
+/** Table scope stored configuration
+ */
+struct tfc_tsid_db {
+ bool ts_valid; /**< Table scope is valid */
+ bool ts_is_shared; /**< Table scope is shared */
+ bool ts_is_bs_owner; /**< Backing store alloced by this instance (PF) */
+ uint16_t ts_max_pools; /**< maximum pools per CPM instance */
+ enum cfa_app_type ts_app; /**< application type TF/AFM */
+ /** backing store memory config */
+ struct tfc_ts_mem_cfg ts_mem[CFA_REGION_TYPE_MAX][CFA_DIR_MAX];
+ /** pool info config */
+ struct tfc_ts_pool_info ts_pool[CFA_DIR_MAX];
+};
+
+/** TFC Object Signature
+ * This signature identifies the tfc object database and
+ * is used for pointer validation
+ */
+#define TFC_OBJ_SIGNATURE 0xABACABAF
+
+/** TFC Object
+ * This data structure contains all data stored per bnxt port
+ * Access is restricted through set/get APIs.
+ *
+ * If a module (e.g. tbl_scope needs to store data, it should
+ * be added here and accessor functions created.
+ */
+struct tfc_object {
+ uint32_t signature; /**< TF object signature */
+ uint16_t sid; /**< Session ID */
+ bool is_pf; /**< port is a PF */
+ struct cfa_bld_mpcinfo mpc_info; /**< MPC ops handle */
+ struct tfc_tsid_db tsid_db[TFC_TBL_SCOPE_MAX]; /**< tsid database */
+ /** TIM instance pointer (PF) - this is where the 4 instances
+ * of the TPM (rx/tx_lkup, rx/tx_act) will be stored per shared
+ * table scope. Only valid on a PF.
+ */
+ void *ts_tim;
+};
+
+void tfo_open(void **tfo, bool is_pf)
+{
+ int rc;
+ struct tfc_object *tfco = NULL;
+ uint32_t tim_db_size;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return;
+ }
+
+ tfco = (struct tfc_object *)rte_zmalloc("tf", sizeof(*tfco), 0);
+ if (tfco == NULL)
+ return;
+
+ tfco->signature = TFC_OBJ_SIGNATURE;
+ tfco->is_pf = is_pf;
+ tfco->sid = INVALID_SID;
+ tfco->ts_tim = NULL;
+
+ /* Bind to the MPC builder */
+ rc = cfa_bld_mpc_bind(CFA_P70, &tfco->mpc_info);
+ if (rc) {
+ PMD_DRV_LOG_LINE(ERR, "MPC bind failed");
+ rte_free(tfco);
+ *tfo = NULL;
+ return;
+ }
+ if (is_pf) {
+ /* Allocate TIM */
+ rc = cfa_tim_query(TFC_TBL_SCOPE_MAX, CFA_REGION_TYPE_MAX,
+ &tim_db_size);
+ if (rc)
+ goto cleanup;
+
+ tfco->ts_tim = rte_zmalloc("TIM", tim_db_size, 0);
+ if (tfco->ts_tim == NULL)
+ goto cleanup;
+
+ rc = cfa_tim_open(tfco->ts_tim,
+ tim_db_size,
+ TFC_TBL_SCOPE_MAX,
+ CFA_REGION_TYPE_MAX);
+ if (rc) {
+ rte_free(tfco->ts_tim);
+ tfco->ts_tim = NULL;
+ goto cleanup;
+ }
+ }
+
+ *tfo = tfco;
+ return;
+
+ cleanup:
+ if (tfco != NULL)
+ rte_free(tfo);
+}
+
+void tfo_close(void **tfo)
+{
+ struct tfc_object *tfco = (struct tfc_object *)(*tfo);
+ enum cfa_region_type region;
+ int dir;
+ int tsid;
+ void *tim;
+ void *tpm;
+
+ if (*tfo && tfco->signature == TFC_OBJ_SIGNATURE) {
+ /* If TIM is setup free it and any TPMs */
+ if (tfo_tim_get(*tfo, &tim))
+ goto done;
+
+ if (!tim)
+ goto done;
+
+ for (tsid = 0; tsid < TFC_TBL_SCOPE_MAX; tsid++) {
+ for (region = 0; region < CFA_REGION_TYPE_MAX; region++) {
+ for (dir = 0; dir < CFA_DIR_MAX; dir++) {
+ tpm = NULL;
+ cfa_tim_tpm_inst_get(tim,
+ tsid,
+ region,
+ dir,
+ &tpm);
+ if (tpm) {
+ cfa_tim_tpm_inst_set(tim,
+ tsid,
+ region,
+ dir,
+ NULL);
+ rte_free(tpm);
+ }
+ }
+ }
+ }
+ rte_free(tim);
+ tfco->ts_tim = NULL;
+done:
+ rte_free(*tfo);
+ *tfo = NULL;
+ }
+}
+
+int tfo_mpcinfo_get(void *tfo, struct cfa_bld_mpcinfo **mpc_info)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+
+ *mpc_info = &tfco->mpc_info;
+
+ return 0;
+}
+
+int tfo_ts_validate(void *tfo, uint8_t ts_tsid, bool *ts_valid)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+ struct tfc_tsid_db *tsid_db;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+
+ if (ts_tsid >= TFC_TBL_SCOPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid %d", ts_tsid);
+ return -EINVAL;
+ }
+ tsid_db = &tfco->tsid_db[ts_tsid];
+
+ if (ts_valid)
+ *ts_valid = tsid_db->ts_valid;
+
+ return 0;
+}
+
+int tfo_ts_set(void *tfo, uint8_t ts_tsid, bool ts_is_shared,
+ enum cfa_app_type ts_app, bool ts_valid, uint16_t ts_max_pools)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+ struct tfc_tsid_db *tsid_db;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+
+ if (ts_tsid >= TFC_TBL_SCOPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid %d", ts_tsid);
+ return -EINVAL;
+ }
+
+ tsid_db = &tfco->tsid_db[ts_tsid];
+
+ tsid_db->ts_valid = ts_valid;
+ tsid_db->ts_is_shared = ts_is_shared;
+ tsid_db->ts_app = ts_app;
+ tsid_db->ts_max_pools = ts_max_pools;
+
+ return 0;
+}
+
+int tfo_ts_get(void *tfo, uint8_t ts_tsid, bool *ts_is_shared,
+ enum cfa_app_type *ts_app, bool *ts_valid,
+ uint16_t *ts_max_pools)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+ struct tfc_tsid_db *tsid_db;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (ts_tsid >= TFC_TBL_SCOPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid %d", ts_tsid);
+ return -EINVAL;
+ }
+
+ tsid_db = &tfco->tsid_db[ts_tsid];
+
+ if (ts_valid)
+ *ts_valid = tsid_db->ts_valid;
+
+ if (ts_is_shared)
+ *ts_is_shared = tsid_db->ts_is_shared;
+
+ if (ts_app)
+ *ts_app = tsid_db->ts_app;
+
+ if (ts_max_pools)
+ *ts_max_pools = tsid_db->ts_max_pools;
+
+ return 0;
+}
+
+/** Set the table scope memory configuration for this direction
+ */
+int tfo_ts_set_mem_cfg(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ enum cfa_region_type region, bool is_bs_owner,
+ struct tfc_ts_mem_cfg *mem_cfg)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+ int rc = 0;
+ struct tfc_tsid_db *tsid_db;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (mem_cfg == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid mem_cfg pointer");
+ return -EINVAL;
+ }
+ if (ts_tsid >= TFC_TBL_SCOPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid %d", ts_tsid);
+ return -EINVAL;
+ }
+
+ tsid_db = &tfco->tsid_db[ts_tsid];
+
+ tsid_db->ts_mem[region][dir] = *mem_cfg;
+ tsid_db->ts_is_bs_owner = is_bs_owner;
+
+ return rc;
+}
+
+/** Get the table scope memory configuration for this direction
+ */
+int tfo_ts_get_mem_cfg(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ enum cfa_region_type region, bool *is_bs_owner,
+ struct tfc_ts_mem_cfg *mem_cfg)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+ int rc = 0;
+ struct tfc_tsid_db *tsid_db;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (mem_cfg == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid mem_cfg pointer");
+ return -EINVAL;
+ }
+ if (ts_tsid >= TFC_TBL_SCOPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid %d", ts_tsid);
+ return -EINVAL;
+ }
+
+ tsid_db = &tfco->tsid_db[ts_tsid];
+
+ *mem_cfg = tsid_db->ts_mem[region][dir];
+ if (is_bs_owner)
+ *is_bs_owner = tsid_db->ts_is_bs_owner;
+
+ return rc;
+}
+
+/** Get the Pool Manager instance
+ */
+int tfo_ts_get_cpm_inst(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ struct tfc_cpm **cpm_lkup, struct tfc_cpm **cpm_act)
+{
+ int rc = 0;
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+ struct tfc_tsid_db *tsid_db;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (cpm_lkup == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid cpm_lkup pointer");
+ return -EINVAL;
+ }
+ if (cpm_act == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid cpm_act pointer");
+ return -EINVAL;
+ }
+ if (ts_tsid >= TFC_TBL_SCOPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid %d", ts_tsid);
+ return -EINVAL;
+ }
+
+ tsid_db = &tfco->tsid_db[ts_tsid];
+
+ *cpm_lkup = tsid_db->ts_pool[dir].lkup_cpm;
+ *cpm_act = tsid_db->ts_pool[dir].act_cpm;
+
+ return rc;
+}
+/** Set the Pool Manager instance
+ */
+int tfo_ts_set_cpm_inst(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ struct tfc_cpm *cpm_lkup, struct tfc_cpm *cpm_act)
+{
+ int rc = 0;
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+ struct tfc_tsid_db *tsid_db;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (ts_tsid >= TFC_TBL_SCOPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid %d", ts_tsid);
+ return -EINVAL;
+ }
+ tsid_db = &tfco->tsid_db[ts_tsid];
+
+ tsid_db->ts_pool[dir].lkup_cpm = cpm_lkup;
+ tsid_db->ts_pool[dir].act_cpm = cpm_act;
+
+ return rc;
+}
+/** Set the table scope pool memory configuration for this direction
+ */
+int tfo_ts_set_pool_info(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ struct tfc_ts_pool_info *ts_pool)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+ int rc = 0;
+ struct tfc_tsid_db *tsid_db;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (ts_pool == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid ts_pool pointer");
+ return -EINVAL;
+ }
+ if (ts_tsid >= TFC_TBL_SCOPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid %d", ts_tsid);
+ return -EINVAL;
+ }
+ tsid_db = &tfco->tsid_db[ts_tsid];
+
+ tsid_db->ts_pool[dir] = *ts_pool;
+
+ return rc;
+}
+
+/** Get the table scope pool memory configuration for this direction
+ */
+int tfo_ts_get_pool_info(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ struct tfc_ts_pool_info *ts_pool)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+ int rc = 0;
+ struct tfc_tsid_db *tsid_db;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (ts_pool == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid ts_pool pointer");
+ return -EINVAL;
+ }
+ if (ts_tsid >= TFC_TBL_SCOPE_MAX) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tsid %d", ts_tsid);
+ return -EINVAL;
+ }
+ tsid_db = &tfco->tsid_db[ts_tsid];
+
+ *ts_pool = tsid_db->ts_pool[dir];
+
+ return rc;
+}
+
+int tfo_sid_set(void *tfo, uint16_t sid)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (tfco->sid != INVALID_SID && sid != INVALID_SID &&
+ tfco->sid != sid) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot set SID %u, current session is %u",
+ sid, tfco->sid);
+ return -EINVAL;
+ }
+
+ tfco->sid = sid;
+
+ return 0;
+}
+
+int tfo_sid_get(void *tfo, uint16_t *sid)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (sid == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid sid pointer");
+ return -EINVAL;
+ }
+
+ if (tfco->sid == INVALID_SID) {
+ /* Session has not been created */
+ return -ENODEV;
+ }
+
+ *sid = tfco->sid;
+
+ return 0;
+}
+
+int tfo_tim_set(void *tfo, void *tim)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (tim == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tim pointer");
+ return -EINVAL;
+ }
+
+ if (tfco->ts_tim != NULL &&
+ tfco->ts_tim != tim) {
+ PMD_DRV_LOG_LINE(ERR,
+ "Cannot set TS TIM, TIM is already set");
+ return -EINVAL;
+ }
+
+ tfco->ts_tim = tim;
+
+ return 0;
+}
+
+int tfo_tim_get(void *tfo, void **tim)
+{
+ struct tfc_object *tfco = (struct tfc_object *)tfo;
+
+ if (tfo == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo pointer");
+ return -EINVAL;
+ }
+ if (tfco->signature != TFC_OBJ_SIGNATURE) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tfo object");
+ return -EINVAL;
+ }
+ if (tim == NULL) {
+ PMD_DRV_LOG_LINE(ERR, "Invalid tim pointer to pointer");
+ return -EINVAL;
+ }
+ if (tfco->ts_tim == NULL) {
+ /* ts tim could be null, no need to log error message */
+ return -ENODEV;
+ }
+
+ *tim = tfco->ts_tim;
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Broadcom
+ * All rights reserved.
+ */
+#ifndef _TFO_H_
+#define _TFO_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "rte_memzone.h"
+#include "cfa_types.h"
+#include "cfa_bld_mpcops.h"
+#include "tfc.h"
+#include "tfc_cpm.h"
+
+/**
+ * @file
+ *
+ * @brief TFC (Truflow Core v3) Object Header File
+ *
+ * @page TFCOV3 Truflow Core v3 Object
+ *
+ * The TF Object stores internal TFC data.
+ * This data must be set/get using accessor functions.
+ *
+ * @ref tfo_open
+ *
+ * @ref tfo_close
+ *
+ * @ref tfo_mpcinfo_get
+ *
+ * @ref tfo_ts_set
+ *
+ * @ref tfo_ts_get
+ *
+ * @ref tfo_ts_validate
+ *
+ * @ref tfo_ts_set_mem_cfg
+ *
+ * @ref tfo_ts_get_mem_cfg
+ *
+ * @ref tfo_ts_get_cpm_inst
+ *
+ * @ref tfo_ts_set_cpm_inst
+ *
+ * @ref tfo_ts_get_pool_info
+ *
+ * @ref tfo_ts_set_pool_info
+ *
+ * @ref tfo_sid_set
+ *
+ * @ref tfo_sid_get
+ */
+
+/** Invalid Table Scope ID */
+#define INVALID_TSID UINT8_MAX
+
+/** Maximum number of table scopes */
+#define TFC_TBL_SCOPE_MAX 32
+
+/** Backing store/memory page levels */
+enum tfc_ts_pg_tbl_lvl {
+ TFC_TS_PT_LVL_0 = 0,
+ TFC_TS_PT_LVL_1,
+ TFC_TS_PT_LVL_2,
+ TFC_TS_PT_LVL_MAX
+};
+
+/**
+ * Backing store and page table control struct that
+ * is used for storing the memory zone pointer and
+ * page allocation.
+ */
+struct tfc_ts_mz {
+ const struct rte_memzone *mz;
+ uint32_t page_count;
+ uint32_t page_size;
+ uint32_t alloc_count;
+};
+
+/**
+ * Backing store/memory page table level config structure
+ */
+struct tfc_ts_page_tbl {
+ uint64_t *pg_pa_tbl; /**< Array of pointers to physical addresses */
+ void **pg_va_tbl; /**< Array of pointers to virtual addresses */
+ uint32_t pg_count; /**< Number of pages in this level */
+ uint32_t pg_size; /**< Size of each page in bytes */
+};
+
+/**
+ * Backing store/memory config structure
+ */
+struct tfc_ts_mem_cfg {
+ /** page table configuration */
+ struct tfc_ts_page_tbl pg_tbl[TFC_TS_PT_LVL_MAX];
+ uint64_t num_data_pages; /**< Total number of pages */
+ uint64_t l0_dma_addr; /**< Physical base memory address */
+ void *l0_addr; /**< Virtual base memory address */
+ int num_lvl; /**< Number of page levels */
+ uint32_t page_cnt[TFC_TS_PT_LVL_MAX]; /**< Page count per level */
+ uint32_t rec_cnt; /**< Total number of records in memory */
+ uint32_t lkup_rec_start_offset; /**< Offset of lkup rec start (in records) */
+ uint32_t entry_size; /**< Size of record in bytes */
+ struct tfc_ts_mz ts_mz; /**< Memory zone control struct */
+};
+
+/**
+ * Backing store pool info
+ */
+struct tfc_ts_pool_info {
+ uint16_t lkup_max_contig_rec; /**< max contig records */
+ uint16_t act_max_contig_rec; /**< max contig records */
+ uint8_t lkup_pool_sz_exp; /**< lookup pool size exp */
+ uint8_t act_pool_sz_exp; /**< action pool size exp */
+ struct tfc_cpm *lkup_cpm; /**< CPM lookup pool manager pointer */
+ struct tfc_cpm *act_cpm; /**< CPM action pool manager pointer */
+};
+
+
+/* TFO APIs */
+
+/**
+ * Allocate a TFC object for this DPDK port/function.
+ *
+ * @param[out] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] is_pf
+ * Indicates whether the DPDK port is a PF.
+ */
+void tfo_open(void **tfo, bool is_pf);
+
+/**
+ * Free the TFC object for this DPDK port/function.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ */
+void tfo_close(void **tfo);
+
+/**
+ * Validate table scope id
+ *
+ * @param[in] tfo
+ *
+ * @param[in] ts_tsid
+ *
+ * @param[out] ts_valid
+ *
+ * @return 0 for tsid within range
+ */
+int tfo_ts_validate(void *tfo, uint8_t ts_tsid, bool *ts_valid);
+/**
+ * Set the table scope configuration.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] ts_tsid
+ * The table scope ID
+ *
+ * @param[in] ts_is_shared
+ * True if the table scope is shared
+ *
+ * @param[in] ts_app
+ * Application type TF/AFM
+ *
+ * @param[in] ts_valid
+ * True if the table scope is valid
+ *
+ * @param[in] ts_max_pools
+ * Maximum number of pools if shared.
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_ts_set(void *tfo, uint8_t ts_tsid, bool ts_is_shared,
+ enum cfa_app_type ts_app, bool ts_valid,
+ uint16_t ts_max_pools);
+
+/**
+ * Get the table scope configuration.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] ts_tsid
+ * The table scope ID
+ *
+ * @param[out] ts_is_shared
+ * True if the table scope is shared
+ *
+ * @param[out] ts_app
+ * Application type TF/AFM
+ *
+ * @param[out] ts_valid
+ * True if the table scope is valid
+ *
+ * @param[out] ts_max_pools
+ * Maximum number of pools returned if shared.
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_ts_get(void *tfo, uint8_t ts_tsid, bool *ts_is_shared,
+ enum cfa_app_type *ts_app, bool *ts_valid,
+ uint16_t *ts_max_pools);
+
+/**
+ * Set the table scope memory configuration for this direction.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] ts_tsid
+ * The table scope ID
+ *
+ * @param[in] dir
+ * The direction (RX/TX)
+ *
+ * @param[in] region
+ * The memory region (lookup/action)
+ *
+ * @param[in] is_bs_owner
+ * True if the caller is the owner of the backing store
+ *
+ * @param[in] mem_cfg
+ * Backing store/memory config structure
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_ts_set_mem_cfg(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ enum cfa_region_type region, bool is_bs_owner,
+ struct tfc_ts_mem_cfg *mem_cfg);
+
+/**
+ * Get the table scope memory configuration for this direction.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] ts_tsid
+ * The table scope ID
+ *
+ * @param[in] dir
+ * The direction (RX/TX)
+ *
+ * @param[in] region
+ * The memory table region (lookup/action)
+ *
+ * @param[out] is_bs_owner
+ * True if the caller is the owner of the backing store
+ *
+ * @param[out] mem_cfg
+ * Backing store/memory config structure
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_ts_get_mem_cfg(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ enum cfa_region_type region, bool *is_bs_owner,
+ struct tfc_ts_mem_cfg *mem_cfg);
+
+/**
+ * Set the pool memory configuration for this direction.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] ts_tsid
+ * The table scope ID
+ *
+ * @param[in] dir
+ * The direction (RX/TX)
+ *
+ * @param[out] ts_pool
+ * Table scope pool info
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_ts_get_pool_info(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ struct tfc_ts_pool_info *ts_pool);
+
+/**
+ * Get the pool memory configuration for this direction.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] ts_tsid
+ * The table scope ID
+ *
+ * @param[in] dir
+ * The direction (RX/TX)
+ *
+ * @param[in] ts_pool
+ * Table scope pool info
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_ts_set_pool_info(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ struct tfc_ts_pool_info *ts_pool);
+
+
+/** Get the Pool Manager instance
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] ts_tsid
+ * The table scope ID
+ *
+ * @param[in] dir
+ * The direction (RX/TX)
+ *
+ * @param[in] cpm_lkup
+ * Lookup CPM instance
+ *
+ * @param[in] cpm_act
+ * Action CPM instance
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_ts_get_cpm_inst(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ struct tfc_cpm **cpm_lkup, struct tfc_cpm **cpm_act);
+
+/** Set the Pool Manager instance
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] ts_tsid
+ * The table scope ID
+ *
+ * @param[in] dir
+ * The direction (RX/TX)
+ *
+ * @param[in] cpm_lkup
+ * Lookup CPM instance
+ *
+ * @param[in] cpm_act
+ * Action CPM instance
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_ts_set_cpm_inst(void *tfo, uint8_t ts_tsid, enum cfa_dir dir,
+ struct tfc_cpm *cpm_lkup, struct tfc_cpm *cpm_act);
+
+/** Get the MPC info reference
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] mpc_info
+ * MPC reference
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_mpcinfo_get(void *tfo, struct cfa_bld_mpcinfo **mpc_info);
+
+
+/** Invalid session ID */
+#define INVALID_SID UINT16_MAX
+
+/**
+ * Set the session ID.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] sid
+ * The session ID
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_sid_set(void *tfo, uint16_t sid);
+
+/**
+ * Get the session ID.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[out] sid
+ * The session ID
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_sid_get(void *tfo, uint16_t *sid);
+
+/**
+ * Set the table scope instance manager.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[in] tim
+ * Pointer to the table scope instance manager
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_tim_set(void *tfo, void *tim);
+
+/**
+ * Get the table scope instance manager.
+ *
+ * @param[in] tfo
+ * Pointer to TFC object
+ *
+ * @param[out] tim
+ * Pointer to a pointer to the table scope instance manager
+ *
+ * @return
+ * 0 for SUCCESS, negative error value for FAILURE (errno.h)
+ */
+int tfo_tim_get(void *tfo, void **tim);
+
+#endif /* _TFO_H_ */
new file mode 100644
@@ -0,0 +1,74 @@
+#ifndef _BNXT_ULP_TFC_H_
+#define _BNXT_ULP_TFC_H_
+
+#include "bnxt.h"
+#include <inttypes.h>
+
+bool
+bnxt_ulp_cntxt_shared_tbl_scope_enabled(struct bnxt_ulp_context *ulp_ctx);
+
+int32_t
+bnxt_ulp_cntxt_tfcp_set(struct bnxt_ulp_context *ulp, struct tfc *tfcp);
+
+struct tfc *
+bnxt_ulp_cntxt_tfcp_get(struct bnxt_ulp_context *ulp);
+
+uint32_t
+bnxt_ulp_cntxt_tbl_scope_max_pools_get(struct bnxt_ulp_context *ulp_ctx);
+
+int32_t
+bnxt_ulp_cntxt_tbl_scope_max_pools_set(struct bnxt_ulp_context *ulp_ctx,
+ uint32_t max);
+enum tfc_tbl_scope_bucket_factor
+bnxt_ulp_cntxt_em_mulitplier_get(struct bnxt_ulp_context *ulp_ctx);
+
+int32_t
+bnxt_ulp_cntxt_em_mulitplier_set(struct bnxt_ulp_context *ulp_ctx,
+ enum tfc_tbl_scope_bucket_factor factor);
+
+uint32_t
+bnxt_ulp_cntxt_num_rx_flows_get(struct bnxt_ulp_context *ulp_ctx);
+
+int32_t
+bnxt_ulp_cntxt_num_rx_flows_set(struct bnxt_ulp_context *ulp_ctx, uint32_t num);
+
+uint32_t
+bnxt_ulp_cntxt_num_tx_flows_get(struct bnxt_ulp_context *ulp_ctx);
+
+int32_t
+bnxt_ulp_cntxt_num_tx_flows_set(struct bnxt_ulp_context *ulp_ctx, uint32_t num);
+
+uint16_t
+bnxt_ulp_cntxt_em_rx_key_max_sz_get(struct bnxt_ulp_context *ulp_ctx);
+
+int32_t
+bnxt_ulp_cntxt_em_rx_key_max_sz_set(struct bnxt_ulp_context *ulp_ctx,
+ uint16_t max);
+
+uint16_t
+bnxt_ulp_cntxt_em_tx_key_max_sz_get(struct bnxt_ulp_context *ulp_ctx);
+
+int32_t
+bnxt_ulp_cntxt_em_tx_key_max_sz_set(struct bnxt_ulp_context *ulp_ctx,
+ uint16_t max);
+
+uint16_t
+bnxt_ulp_cntxt_act_rec_rx_max_sz_get(struct bnxt_ulp_context *ulp_ctx);
+
+int32_t
+bnxt_ulp_cntxt_act_rec_rx_max_sz_set(struct bnxt_ulp_context *ulp_ctx,
+ int16_t max);
+
+uint16_t
+bnxt_ulp_cntxt_act_rec_tx_max_sz_get(struct bnxt_ulp_context *ulp_ctx);
+
+int32_t
+bnxt_ulp_cntxt_act_rec_tx_max_sz_set(struct bnxt_ulp_context *ulp_ctx,
+ int16_t max);
+uint32_t
+bnxt_ulp_cntxt_page_sz_get(struct bnxt_ulp_context *ulp_ctxt);
+
+int32_t
+bnxt_ulp_cntxt_page_sz_set(struct bnxt_ulp_context *ulp_ctxt,
+ uint32_t page_sz);
+#endif