@@ -10,6 +10,8 @@
#include "nt4ga_pci_ta_tg.h"
#include "nt4ga_link_100g.h"
+#include "flow_filter.h"
+
/* Sensors includes */
#include "board_sensors.h"
#include "avr_sensors.h"
@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)
n_nim_ports = fpga_info->n_nims;
assert(n_nim_ports >= 1);
+ /* Nt4ga Init Filter */
+ nt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;
+
+ res = flow_filter_init(p_fpga, &p_filter->mp_flow_device,
+ p_adapter_info->adapter_no);
+ if (res != 0) {
+ NT_LOG(ERR, ETHDEV, "%s: Cannot initialize filter\n",
+ p_adapter_id_str);
+ return res;
+ }
+
/*
* HIF/PCI TA/TG
*/
@@ -7,6 +7,7 @@
#include "nthw_drv.h"
#include "nthw_fpga.h"
#include "nt4ga_adapter.h"
+#include "flow_filter.h"
#define NO_FLAGS 0
@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)
return ((ts >> 32) * 1000000000) + (ts & 0xffffffff);
}
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+ nt4ga_stat_t *p_nt4ga_stat,
uint32_t *p_stat_dma_virtual);
static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
uint32_t *p_stat_dma_virtual);
-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,
nt4ga_stat_t *p_nt4ga_stat)
{
nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,
} else {
p_nt4ga_stat->last_timestamp =
timestamp2ns(*p_nthw_stat->mp_timestamp);
- nt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,
+ nt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,
p_nt4ga_stat->p_stat_dma_virtual);
}
return 0;
@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)
return -1;
}
- p_nt4ga_stat->flm_stat_ver = 0;
+ struct flow_nic_dev *ndev =
+ p_adapter_info->nt4ga_filter.mp_flow_device;
+ p_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;
p_nt4ga_stat->mp_stat_structs_flm =
calloc(1, sizeof(struct flm_counters_v1));
@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
}
/* Called with stat mutex locked */
-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,
+ nt4ga_stat_t *p_nt4ga_stat,
uint32_t *p_stat_dma_virtual)
{
nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
+ struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
const int n_rx_ports = p_nt4ga_stat->mn_rx_ports;
const int n_tx_ports = p_nt4ga_stat->mn_tx_ports;
@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,
p_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;
}
+ /* _update and get FLM stats */
+ flow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,
+ sizeof(struct flm_counters_v1) / sizeof(uint64_t));
+
return 0;
}
@@ -61,8 +61,10 @@ sources = files(
'nthw/core/nthw_spim.c',
'nthw/core/nthw_spis.c',
'nthw/core/nthw_tsm.c',
+ 'nthw/flow_api/flow_api.c',
'nthw/flow_api/flow_api_actions.c',
'nthw/flow_api/flow_api_backend.c',
+ 'nthw/flow_api/flow_api_profile_inline.c',
'nthw/flow_api/flow_engine/flow_group.c',
'nthw/flow_api/flow_engine/flow_hasher.c',
'nthw/flow_api/flow_engine/flow_kcc.c',
@@ -81,6 +83,8 @@ sources = files(
'nthw/flow_api/hw_mod/hw_mod_slc.c',
'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
'nthw/flow_api/hw_mod/hw_mod_tpe.c',
+ 'nthw/flow_filter/flow_backend.c',
+ 'nthw/flow_filter/flow_filter.c',
'nthw/flow_filter/flow_nthw_cat.c',
'nthw/flow_filter/flow_nthw_csu.c',
'nthw/flow_filter/flow_nthw_flm.c',
new file mode 100644
@@ -0,0 +1,1307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "flow_api.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include "flow_api_profile_inline.h"
+
+#define SCATTER_GATHER
+
+const char *dbg_res_descr[] = {
+ /* RES_QUEUE */ "RES_QUEUE",
+ /* RES_CAT_CFN */ "RES_CAT_CFN",
+ /* RES_CAT_COT */ "RES_CAT_COT",
+ /* RES_CAT_EXO */ "RES_CAT_EXO",
+ /* RES_CAT_LEN */ "RES_CAT_LEN",
+ /* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
+ /* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
+ /* RES_HSH_RCP */ "RES_HSH_RCP",
+ /* RES_PDB_RCP */ "RES_PDB_RCP",
+ /* RES_QSL_RCP */ "RES_QSL_RCP",
+ /* RES_QSL_LTX */ "RES_QSL_LTX",
+ /* RES_QSL_QST */ "RES_QSL_QST",
+ /* RES_SLC_RCP */ "RES_SLC_RCP",
+ /* RES_IOA_RCP */ "RES_IOA_RCP",
+ /* RES_ROA_RCP */ "RES_ROA_RCP",
+ /* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
+ /* RES_FLM_RCP */ "RES_FLM_RCP",
+ /* RES_HST_RCP */ "RES_HST_RCP",
+ /* RES_TPE_RCP */ "RES_TPE_RCP",
+ /* RES_TPE_EXT */ "RES_TPE_EXT",
+ /* RES_TPE_RPL */ "RES_TPE_RPL",
+ /* RES_COUNT */ "RES_COUNT",
+ /* RES_INVALID */ "RES_INVALID"
+};
+
+static struct flow_nic_dev *dev_base;
+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * *****************************************************************************
+ * Error handling
+ * *****************************************************************************
+ */
+
+static const struct {
+ const char *message;
+} err_msg[] = {
+ /* 00 */ { "Operation successfully completed" },
+ /* 01 */ { "Operation failed" },
+ /* 02 */ { "Memory allocation failed" },
+ /* 03 */ { "Too many output destinations" },
+ /* 04 */ { "Too many output queues for RSS" },
+ /* 05 */ { "The VLAN TPID specified is not supported" },
+ /* 06 */ { "The VxLan Push header specified is not accepted" },
+ /* 07 */
+ { "While interpreting VxLan Pop action, could not find a destination port" },
+ /* 08 */ { "Failed in creating a HW-internal VTEP port" },
+ /* 09 */ { "Too many VLAN tag matches" },
+ /* 10 */ { "IPv6 invalid header specified" },
+ /* 11 */ { "Too many tunnel ports. HW limit reached" },
+ /* 12 */ { "Unknown or unsupported flow match element received" },
+ /* 13 */ { "Match failed because of HW limitations" },
+ /* 14 */ { "Match failed because of HW resource limitations" },
+ /* 15 */ { "Match failed because of too complex element definitions" },
+ /* 16 */ { "Action failed. To too many output destinations" },
+ /* 17 */ { "Action Output failed, due to HW resource exhaustion" },
+ /* 18 */
+ { "Push Tunnel Header action cannot output to multiple destination queues" },
+ /* 19 */ { "Inline action HW resource exhaustion" },
+ /* 20 */ { "Action retransmit/recirculate HW resource exhaustion" },
+ /* 21 */ { "Flow counter HW resource exhaustion" },
+ /* 22 */ { "Internal HW resource exhaustion to handle Actions" },
+ /* 23 */ { "Internal HW QSL compare failed" },
+ /* 24 */ { "Internal CAT CFN reuse failed" },
+ /* 25 */ { "Match variations too complex" },
+ /* 26 */ { "Match failed because of CAM/TCAM full" },
+ /* 27 */ { "Internal creation of a tunnel end point port failed" },
+ /* 28 */ { "Unknown or unsupported flow action received" },
+ /* 29 */ { "Removing flow failed" },
+ /* 30 */
+ { "No output queue specified. Ignore this flow offload and uses default queue" },
+ /* 31 */ { "No output queue found" },
+ /* 32 */ { "Unsupported EtherType or rejected caused by offload policy" },
+ /* 33 */
+ { "Destination port specified is invalid or not reachable from this NIC" },
+ /* 34 */ { "Partial offload is not supported in this configuration" },
+ /* 35 */ { "Match failed because of CAT CAM exhausted" },
+ /* 36 */
+ { "Match failed because of CAT CAM Key clashed with an existing KCC Key" },
+ /* 37 */ { "Match failed because of CAT CAM write failed" },
+ /* 38 */ { "Partial flow mark too big for device" },
+ /* 39 */ {"Invalid priority value"},
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)
+{
+ assert(msg < ERR_MSG_NO_MSG);
+ if (error) {
+ error->message = err_msg[msg].message;
+ error->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :
+ FLOW_ERROR_GENERAL;
+ }
+}
+
+/*
+ * *****************************************************************************
+ * Resources
+ * *****************************************************************************
+ */
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ uint32_t alignment)
+{
+ for (unsigned int i = 0; i < ndev->res[res_type].resource_count;
+ i += alignment) {
+ if (!flow_nic_is_resource_used(ndev, res_type, i)) {
+ flow_nic_mark_resource_used(ndev, res_type, i);
+ ndev->res[res_type].ref[i] = 1;
+ return i;
+ }
+ }
+ return -1;
+}
+
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+ enum res_type_e res_type)
+{
+ if (!flow_nic_is_resource_used(ndev, res_type, idx)) {
+ flow_nic_mark_resource_used(ndev, res_type, idx);
+ ndev->res[res_type].ref[idx] = 1;
+ return 0;
+ }
+ return -1;
+}
+
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+ enum res_type_e res_type, unsigned int num,
+ uint32_t alignment)
+{
+ unsigned int idx_offs;
+
+ for (unsigned int res_idx = 0;
+ res_idx < ndev->res[res_type].resource_count - (num - 1);
+ res_idx += alignment) {
+ if (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {
+ for (idx_offs = 1; idx_offs < num; idx_offs++) {
+ if (flow_nic_is_resource_used(ndev, res_type,
+ res_idx + idx_offs))
+ break;
+ }
+ if (idx_offs < num)
+ continue;
+
+ /* found a contiguous number of "num" res_type elements - allocate them */
+ for (idx_offs = 0; idx_offs < num; idx_offs++) {
+ flow_nic_mark_resource_used(ndev, res_type,
+ res_idx + idx_offs);
+ ndev->res[res_type].ref[res_idx + idx_offs] = 1;
+ }
+ return res_idx;
+ }
+ }
+ return -1;
+}
+
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ int idx)
+{
+ flow_nic_mark_resource_unused(ndev, res_type, idx);
+}
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ int index)
+{
+ NT_LOG(DBG, FILTER,
+ "Reference resource %s idx %i (before ref cnt %i)\n",
+ dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+ assert(flow_nic_is_resource_used(ndev, res_type, index));
+ if (ndev->res[res_type].ref[index] == (uint32_t)-1)
+ return -1;
+ ndev->res[res_type].ref[index]++;
+ return 0;
+}
+
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ int index)
+{
+ NT_LOG(DBG, FILTER,
+ "De-reference resource %s idx %i (before ref cnt %i)\n",
+ dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
+ assert(flow_nic_is_resource_used(ndev, res_type, index));
+ assert(ndev->res[res_type].ref[index]);
+ /* deref */
+ ndev->res[res_type].ref[index]--;
+ if (!ndev->res[res_type].ref[index])
+ flow_nic_free_resource(ndev, res_type, index);
+ return !!ndev->res[res_type]
+ .ref[index]; /* if 0 resource has been freed */
+}
+
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+ enum res_type_e res_type, int idx_start)
+{
+ for (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;
+ i++) {
+ if (flow_nic_is_resource_used(ndev, res_type, i))
+ return i;
+ }
+ return -1;
+}
+
+/*
+ * Allocate a number flow resources.
+ *
+ * Arguments:
+ * ndev : device
+ * res_type : resource type
+ * fh : flow handle
+ * count : number of (contiguous) resources to be allocated
+ * alignment : start index alignment
+ * 1: the allocation can start at any index
+ * 2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)
+ * 3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)
+ * etc.
+ * Returns:
+ * 0 : success
+ * -1 : failure
+ */
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+ enum res_type_e res_type,
+ struct flow_handle *fh, uint32_t count,
+ uint32_t alignment)
+{
+ if (count > 1) {
+ /* Contiguous */
+ fh->resource[res_type].index =
+ flow_nic_alloc_resource_contig(ndev, res_type, count, alignment);
+ } else {
+ fh->resource[res_type].index =
+ flow_nic_alloc_resource(ndev, res_type, alignment);
+ }
+
+ if (fh->resource[res_type].index < 0)
+ return -1;
+ fh->resource[res_type].count = count;
+ return 0;
+}
+
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+ enum res_type_e res_type, int idx,
+ struct flow_handle *fh)
+{
+ int err = flow_nic_alloc_resource_index(ndev, idx, res_type);
+
+ if (err)
+ return err;
+
+ fh->resource[res_type].index = idx;
+ if (fh->resource[res_type].index < 0)
+ return -1;
+ fh->resource[res_type].count = 1;
+ return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Hash
+ * *****************************************************************************
+ */
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+ enum flow_nic_hash_e algorithm)
+{
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);
+ switch (algorithm) {
+ case HASH_ALGO_5TUPLE:
+ /* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,
+ hsh_idx, 0, 2);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+ DYN_FINAL_IP_DST);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+ -16);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+ DYN_FINAL_IP_DST);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+ 0);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,
+ DYN_L4);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,
+ 0);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,
+ 1);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+ HASH_5TUPLE);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+ hsh_idx, 0, 1);
+
+ NT_LOG(DBG, FILTER,
+ "Set IPv6 5-tuple hasher with adaptive IPv4 hashing\n");
+ break;
+ default:
+ case HASH_ALGO_ROUND_ROBIN:
+ /* zero is round-robin */
+ break;
+ }
+
+ return 0;
+}
+
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+ struct nt_eth_rss f)
+{
+ uint64_t fields = f.fields;
+
+ int res = 0;
+
+ res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,
+ 0);
+ res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,
+ 0, 2);
+ switch (fields) {
+ case NT_ETH_RSS_C_VLAN:
+ /*
+ * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN
+ * provider
+ */
+ res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,
+ 0, DYN_FIRST_VLAN);
+ res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,
+ 0, 0);
+ res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,
+ hsh_idx, 8, 0xffffffff);
+ res |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,
+ hsh_idx, 0, HASH_LAST_VLAN_ID);
+ if (res) {
+ NT_LOG(ERR, FILTER,
+ "VLAN hasher is not set hardware communication problem has "
+ "occurred. The cardware could be in inconsistent state. Rerun.\n");
+ return -1;
+ }
+ NT_LOG(DBG, FILTER, "Set VLAN hasher.\n");
+ return 0;
+ case NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:
+ /* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,
+ DYN_FINAL_IP_DST);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,
+ 0);
+
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,
+ 0xffffffff);
+
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+ HASH_OUTER_DST_IP);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+ hsh_idx, 0, 1);
+ if (res) {
+ NT_LOG(ERR, FILTER,
+ "Outer dst IP hasher is not set hardware communication problem has "
+ "occurred. The cardware could be in inconsistent state. Rerun.\n");
+ return -1;
+ }
+ NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+ return 0;
+ case NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:
+ /* need to create an IPv6 hashing and enable the adaptive ip mask bit */
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,
+ DYN_TUN_FINAL_IP_DST);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,
+ -16);
+
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,
+ 0xffffffff);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,
+ 0xffffffff);
+
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,
+ HASH_INNER_SRC_IP);
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,
+ hsh_idx, 0, 1);
+ if (res) {
+ NT_LOG(ERR, FILTER,
+ "Inner (depth = 1) src IP hasher is not set hardware communication "
+ "problem has occurred. The cardware could be in inconsistent state. "
+ "Rerun.\n");
+ return -1;
+ }
+ NT_LOG(DBG, FILTER, "Set outer dst IP hasher.\n");
+ return 0;
+ default:
+ NT_LOG(ERR, FILTER,
+ "RSS bit flags can't be set up. "
+ "Flags combination is not supported.");
+ return -1;
+ }
+}
+
+/*
+ * *****************************************************************************
+ * Nic port/adapter lookup
+ * *****************************************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)
+{
+ struct flow_nic_dev *nic_dev = dev_base;
+
+ while (nic_dev) {
+ if (nic_dev->adapter_no == adapter_no)
+ break;
+ nic_dev = nic_dev->next;
+ }
+
+ if (!nic_dev)
+ return NULL;
+
+ struct flow_eth_dev *dev = nic_dev->eth_base;
+
+ while (dev) {
+ if (port == dev->port)
+ return dev;
+ dev = dev->next;
+ }
+
+ return NULL;
+}
+
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)
+{
+ struct flow_nic_dev *ndev = dev_base;
+
+ while (ndev) {
+ if (adapter_no == ndev->adapter_no)
+ break;
+ ndev = ndev->next;
+ }
+ return ndev;
+}
+
+/*
+ * *****************************************************************************
+ * LAG control implementation
+ * *****************************************************************************
+ */
+
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)
+{
+ pthread_mutex_lock(&base_mtx);
+ struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+ if (!ndev) {
+ /* Error invalid nic device */
+ pthread_mutex_unlock(&base_mtx);
+ return -1;
+ }
+ /*
+ * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together
+ * and reported as N*2 incoming port
+ */
+ hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);
+ hw_mod_rmc_ctrl_flush(&ndev->be);
+ pthread_mutex_unlock(&base_mtx);
+ return 0;
+}
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)
+{
+ pthread_mutex_lock(&base_mtx);
+ struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+ if (!ndev) {
+ /* Error invalid nic device */
+ pthread_mutex_unlock(&base_mtx);
+ return -1;
+ }
+ /* Blocks for traffic from port */
+ hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);
+ hw_mod_rmc_ctrl_flush(&ndev->be);
+ pthread_mutex_unlock(&base_mtx);
+ return 0;
+}
+
+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,
+ uint32_t value)
+{
+ hw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);
+ hw_mod_roa_lagcfg_flush(be, index, 1);
+}
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+ uint32_t value)
+{
+ pthread_mutex_lock(&base_mtx);
+ struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+ if (!ndev) {
+ /* Error invalid nic device */
+ pthread_mutex_unlock(&base_mtx);
+ return -1;
+ }
+
+ switch (cmd) {
+ case FLOW_LAG_SET_ENTRY:
+ write_lag_entry(&ndev->be, index, value);
+ break;
+
+ case FLOW_LAG_SET_ALL:
+ index &= 3;
+ for (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;
+ i += 4)
+ write_lag_entry(&ndev->be, i + index, value);
+ break;
+
+ case FLOW_LAG_SET_BALANCE:
+ /*
+ * This function will balance the output port
+ * value: The balance of the distribution:
+ * port P0 / P1
+ * 0: 0 / 100 port 0 is disabled
+ * 25: 25 / 75
+ * 50: 50 / 50
+ * 75: 75 / 25
+ * 100: 100/ 0 port 1 is disabled
+ */
+ {
+ /* Clamp the balance to 100% output on port 1 */
+ if (value > 100)
+ value = 100;
+ double balance = ((double)value / 100.0);
+ double block_count =
+ (double)ndev->be.roa.nb_lag_entries / 4.0;
+
+ int output_port = 1;
+ int port0_output_block_count =
+ (int)(block_count * balance);
+
+ for (int block = 0; block < block_count; block++) {
+ /* When the target port0 balance is reached. */
+ if (block >= port0_output_block_count)
+ output_port = 2;
+ /* Write an entire hash block to a given output port. */
+ for (int idx = 0; idx < 4; idx++) {
+ write_lag_entry(&ndev->be,
+ block * 4 + idx,
+ output_port);
+ } /* for each index in hash block */
+ } /* for each hash block */
+ }
+
+ break;
+ default:
+ pthread_mutex_unlock(&base_mtx);
+ return -1;
+ }
+
+ pthread_mutex_unlock(&base_mtx);
+ return 0;
+}
+
+/*
+ * *****************************************************************************
+ * Flow API
+ * *****************************************************************************
+ */
+
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+ const struct flow_action action[], struct flow_error *error)
+{
+ if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+ NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+ return -1;
+ }
+ return flow_validate_profile_inline(dev, item, action, error);
+}
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+ const struct flow_attr *attr,
+ const struct flow_elem item[],
+ const struct flow_action action[],
+ struct flow_error *error)
+{
+ if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+ NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+ return NULL;
+ }
+ return flow_create_profile_inline(dev, attr, item, action, error);
+}
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+ struct flow_error *error)
+{
+ if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+ NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+ return -1;
+ }
+ return flow_destroy_profile_inline(dev, flow, error);
+}
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)
+{
+ if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+ NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+ return -1;
+ }
+ return flow_flush_profile_inline(dev, error);
+}
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+ const struct flow_action *action, void **data, uint32_t *length,
+ struct flow_error *error)
+{
+ if (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+ NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+ return -1;
+ }
+ return flow_query_profile_inline(dev, flow, action, data, length,
+ error);
+}
+
+/*
+ * *****************************************************************************
+ * Device Management API
+ * *****************************************************************************
+ */
+
+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,
+ struct flow_eth_dev *dev)
+{
+ dev->next = ndev->eth_base;
+ ndev->eth_base = dev;
+}
+
+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,
+ struct flow_eth_dev *eth_dev)
+{
+ struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
+
+ while (dev) {
+ if (dev == eth_dev) {
+ if (prev)
+ prev->next = dev->next;
+
+ else
+ ndev->eth_base = dev->next;
+ return 0;
+ }
+ prev = dev;
+ dev = dev->next;
+ }
+ return -1;
+}
+
+static void flow_ndev_reset(struct flow_nic_dev *ndev)
+{
+ /* Delete all eth-port devices created on this NIC device */
+ while (ndev->eth_base)
+ flow_delete_eth_dev(ndev->eth_base);
+
+ /* Error check */
+ while (ndev->flow_base) {
+ NT_LOG(ERR, FILTER,
+ "ERROR : Flows still defined but all eth-ports deleted. Flow %p\n",
+ ndev->flow_base);
+
+ if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+ NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+ return;
+ }
+ flow_destroy_profile_inline(ndev->flow_base->dev,
+ ndev->flow_base, NULL);
+ }
+
+ if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+ NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+ return;
+ }
+ done_flow_management_of_ndev_profile_inline(ndev);
+
+ km_free_ndev_resource_management(&ndev->km_res_handle);
+ kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
+
+#ifdef FLOW_DEBUG
+ /*
+ * free all resources default allocated, initially for this NIC DEV
+ * Is not really needed since the bitmap will be freed in a sec. Therefore
+ * only in debug mode
+ */
+
+ /* Check if all resources has been released */
+ NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i\n", ndev->adapter_no);
+ for (unsigned int i = 0; i < RES_COUNT; i++) {
+ int err = 0;
+#if defined(FLOW_DEBUG)
+ NT_LOG(DBG, FILTER, "RES state for: %s\n", dbg_res_descr[i]);
+#endif
+ for (unsigned int ii = 0; ii < ndev->res[i].resource_count;
+ ii++) {
+ int ref = ndev->res[i].ref[ii];
+ int used = flow_nic_is_resource_used(ndev, i, ii);
+
+ if (ref || used) {
+ NT_LOG(DBG, FILTER,
+ " [%i]: ref cnt %i, used %i\n", ii, ref,
+ used);
+ err = 1;
+ }
+ }
+ if (err)
+ NT_LOG(DBG, FILTER,
+ "ERROR - some resources not freed\n");
+ }
+#endif
+}
+
+int flow_reset_nic_dev(uint8_t adapter_no)
+{
+ struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+ if (!ndev)
+ return -1;
+ flow_ndev_reset(ndev);
+ flow_api_backend_reset(&ndev->be);
+ return 0;
+}
+
+/*
+ * adapter_no physical adapter no
+ * port_no local port no
+ * alloc_rx_queues number of rx-queues to allocate for this eth_dev
+ */
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,
+ uint32_t port_id, int alloc_rx_queues,
+ struct flow_queue_id_s queue_ids[],
+ int *rss_target_id,
+ enum flow_eth_dev_profile flow_profile,
+ uint32_t exception_path)
+{
+ int i;
+ struct flow_eth_dev *eth_dev = NULL;
+
+ NT_LOG(DBG, FILTER,
+ "Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\n",
+ adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);
+
+ if (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {
+ assert(0);
+ NT_LOG(ERR, FILTER,
+ "ERROR: Internal array for multiple queues too small for API\n");
+ }
+
+ pthread_mutex_lock(&base_mtx);
+ struct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);
+
+ if (!ndev) {
+ /* Error - no flow api found on specified adapter */
+ NT_LOG(ERR, FILTER,
+ "ERROR: no flow interface registered for adapter %d\n",
+ adapter_no);
+ pthread_mutex_unlock(&base_mtx);
+ return NULL;
+ }
+
+ if (ndev->ports < ((uint16_t)port_no + 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: port exceeds supported port range for adapter\n");
+ pthread_mutex_unlock(&base_mtx);
+ return NULL;
+ }
+
+ if ((alloc_rx_queues - 1) >
+ FLOW_MAX_QUEUES) { /* 0th is exception so +1 */
+ NT_LOG(ERR, FILTER,
+ "ERROR: Exceeds supported number of rx queues per eth device\n");
+ pthread_mutex_unlock(&base_mtx);
+ return NULL;
+ }
+
+ /* don't accept multiple eth_dev's on same NIC and same port */
+ eth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);
+ if (eth_dev) {
+ NT_LOG(DBG, FILTER,
+ "Re-opening existing NIC port device: NIC DEV: %i Port %i\n",
+ adapter_no, port_no);
+ pthread_mutex_unlock(&base_mtx);
+ flow_delete_eth_dev(eth_dev);
+ eth_dev = NULL;
+ }
+
+ eth_dev = calloc(1, sizeof(struct flow_eth_dev));
+ if (!eth_dev) {
+ NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+ goto err_exit1;
+ }
+
+ pthread_mutex_lock(&ndev->mtx);
+
+ eth_dev->ndev = ndev;
+ eth_dev->port = port_no;
+ eth_dev->port_id = port_id;
+
+#ifdef FLOW_DEBUG
+ ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+ FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+ /* First time then NIC is initialized */
+ if (!ndev->flow_mgnt_prepared) {
+ ndev->flow_profile = flow_profile;
+ /* Initialize modules if needed - recipe 0 is used as no-match and must be setup */
+ if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {
+ NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+ goto err_exit0;
+ } else if (initialize_flow_management_of_ndev_profile_inline(ndev)
+ != 0) {
+ goto err_exit0;
+ }
+ } else {
+ /* check if same flow type is requested, otherwise fail */
+ if (ndev->flow_profile != flow_profile) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Different flow types requested on same NIC device. "
+ "Not supported.\n");
+ goto err_exit0;
+ }
+ }
+
+ /* Allocate the requested queues in HW for this dev */
+
+ for (i = 0; i < alloc_rx_queues; i++) {
+#ifdef SCATTER_GATHER
+ eth_dev->rx_queue[i] = queue_ids[i];
+#else
+ int queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);
+
+ if (queue_id < 0) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: no more free queue IDs in NIC\n");
+ goto err_exit0;
+ }
+
+ eth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;
+ eth_dev->rx_queue[eth_dev->num_queues].hw_id =
+ ndev->be.iface->alloc_rx_queue(ndev->be.be_dev,
+ eth_dev->rx_queue[eth_dev->num_queues].id);
+ if (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: could not allocate a new queue\n");
+ goto err_exit0;
+ }
+
+ if (queue_ids) {
+ queue_ids[eth_dev->num_queues] =
+ eth_dev->rx_queue[eth_dev->num_queues];
+ }
+#endif
+ if (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||
+ (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&
+ exception_path))) {
+ /*
+ * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL
+ */
+ if (hw_mod_qsl_unmq_set(&ndev->be,
+ HW_QSL_UNMQ_DEST_QUEUE,
+ eth_dev->port,
+ eth_dev->rx_queue[0].hw_id) < 0)
+ goto err_exit0;
+ if (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,
+ eth_dev->port, 1) < 0)
+ goto err_exit0;
+ if (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <
+ 0)
+ goto err_exit0;
+ }
+
+ eth_dev->num_queues++;
+ }
+
+ eth_dev->rss_target_id = -1;
+
+ if (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+ for (i = 0; i < eth_dev->num_queues; i++) {
+ uint32_t qen_value = 0;
+ uint32_t queue_id =
+ (uint32_t)eth_dev->rx_queue[i].hw_id;
+
+ hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+ queue_id / 4, &qen_value);
+ hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+ queue_id / 4,
+ qen_value | (1 << (queue_id % 4)));
+ hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+ }
+ }
+
+ *rss_target_id = eth_dev->rss_target_id;
+
+#ifdef FLOW_DEBUG
+ ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+ FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+ nic_insert_eth_port_dev(ndev, eth_dev);
+
+ pthread_mutex_unlock(&ndev->mtx);
+ pthread_mutex_unlock(&base_mtx);
+ return eth_dev;
+
+err_exit0:
+ pthread_mutex_unlock(&ndev->mtx);
+ pthread_mutex_unlock(&base_mtx);
+
+err_exit1:
+ if (eth_dev)
+ free(eth_dev);
+
+#ifdef FLOW_DEBUG
+ ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+ FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+ NT_LOG(DBG, FILTER, "ERR in %s\n", __func__);
+ return NULL; /* Error exit */
+}
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+ struct flow_queue_id_s *queue_id)
+{
+ uint32_t qen_value = 0;
+
+ eth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;
+ eth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;
+ eth_dev->num_queues += 1;
+
+ hw_mod_qsl_qen_get(ð_dev->ndev->be, HW_QSL_QEN_EN,
+ queue_id->hw_id / 4, &qen_value);
+ hw_mod_qsl_qen_set(ð_dev->ndev->be, HW_QSL_QEN_EN,
+ queue_id->hw_id / 4,
+ qen_value | (1 << (queue_id->hw_id % 4)));
+ hw_mod_qsl_qen_flush(ð_dev->ndev->be, queue_id->hw_id / 4, 1);
+
+ return 0;
+}
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
+{
+ struct flow_nic_dev *ndev = eth_dev->ndev;
+
+ if (!ndev) {
+ /* Error invalid nic device */
+ return -1;
+ }
+
+ NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i\n", eth_dev,
+ eth_dev->port);
+
+#ifdef FLOW_DEBUG
+ ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+ FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+ /* delete all created flows from this device */
+ pthread_mutex_lock(&ndev->mtx);
+
+ struct flow_handle *flow = ndev->flow_base;
+
+ while (flow) {
+ if (flow->dev == eth_dev) {
+ struct flow_handle *flow_next = flow->next;
+
+ if (ndev->flow_profile ==
+ FLOW_ETH_DEV_PROFILE_VSWITCH) {
+ NT_LOG(ERR, FILTER, "vSwitch profile not supported");
+ return -1;
+ }
+ flow_destroy_locked_profile_inline(eth_dev,
+ flow, NULL);
+ flow = flow_next;
+ } else {
+ flow = flow->next;
+ }
+ }
+
+ /*
+ * remove unmatched queue if setup in QSL
+ * remove exception queue setting in QSL UNM
+ */
+ hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,
+ 0);
+ hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
+ hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
+
+ if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {
+ for (int i = 0; i < eth_dev->num_queues; ++i) {
+ uint32_t qen_value = 0;
+ uint32_t queue_id =
+ (uint32_t)eth_dev->rx_queue[i].hw_id;
+
+ hw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,
+ queue_id / 4, &qen_value);
+ hw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,
+ queue_id / 4,
+ qen_value & ~(1U << (queue_id % 4)));
+ hw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);
+ }
+ }
+
+#ifdef FLOW_DEBUG
+ ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+ FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+#ifndef SCATTER_GATHER
+ /* free rx queues */
+ for (int i = 0; i < eth_dev->num_queues; i++) {
+ ndev->be.iface->free_rx_queue(ndev->be.be_dev,
+ eth_dev->rx_queue[i].hw_id);
+ flow_nic_deref_resource(ndev, RES_QUEUE,
+ eth_dev->rx_queue[i].id);
+ }
+#endif
+
+ /* take eth_dev out of ndev list */
+ if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
+ NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found\n", eth_dev);
+
+ pthread_mutex_unlock(&ndev->mtx);
+
+ /* free eth_dev */
+ free(eth_dev);
+ return 0;
+}
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+ uint8_t vport)
+{
+ return tunnel_get_definition(tun, flow_stat_id, vport);
+}
+
+/*
+ * ***************************** Flow API NIC Setup ***************************************
+ * Flow backend creation function - register and initialize common backend API to FPA modules
+ * ******************************************************************************************
+ */
+
+static int init_resource_elements(struct flow_nic_dev *ndev,
+ enum res_type_e res_type, uint32_t count)
+{
+ assert(ndev->res[res_type].alloc_bm == NULL);
+ /* allocate bitmap and ref counter */
+ ndev->res[res_type].alloc_bm =
+ calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
+ if (ndev->res[res_type].alloc_bm) {
+ ndev->res[res_type].ref =
+ (uint32_t *)&ndev->res[res_type]
+ .alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
+ ndev->res[res_type].resource_count = count;
+ return 0;
+ }
+ return -1;
+}
+
+static void done_resource_elements(struct flow_nic_dev *ndev,
+ enum res_type_e res_type)
+{
+ assert(ndev);
+ if (ndev->res[res_type].alloc_bm)
+ free(ndev->res[res_type].alloc_bm);
+}
+
+static void list_insert_flow_nic(struct flow_nic_dev *ndev)
+{
+ pthread_mutex_lock(&base_mtx);
+ ndev->next = dev_base;
+ dev_base = ndev;
+ pthread_mutex_unlock(&base_mtx);
+}
+
+static int list_remove_flow_nic(struct flow_nic_dev *ndev)
+{
+ pthread_mutex_lock(&base_mtx);
+ struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
+
+ while (nic_dev) {
+ if (nic_dev == ndev) {
+ if (prev)
+ prev->next = nic_dev->next;
+ else
+ dev_base = nic_dev->next;
+ pthread_mutex_unlock(&base_mtx);
+ return 0;
+ }
+ prev = nic_dev;
+ nic_dev = nic_dev->next;
+ }
+
+ pthread_mutex_unlock(&base_mtx);
+ return -1;
+}
+
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+ const struct flow_api_backend_ops *be_if,
+ void *be_dev)
+{
+ if (!be_if || be_if->version != 1) {
+ NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+ return NULL;
+ }
+
+ struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
+
+ if (!ndev) {
+ NT_LOG(ERR, FILTER, "ERROR: calloc failed\n");
+ return NULL;
+ }
+
+ /*
+ * To dump module initialization writes use
+ * FLOW_BACKEND_DEBUG_MODE_WRITE
+ * then remember to set it ...NONE afterwards again
+ */
+ be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
+
+ if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
+ goto err_exit;
+ ndev->adapter_no = adapter_no;
+
+ ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?
+ 256 :
+ ndev->be.num_rx_ports);
+
+ /*
+ * Free resources in NIC must be managed by this module
+ * Get resource sizes and create resource manager elements
+ */
+ if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_CAT_CFN,
+ ndev->be.cat.nb_cat_funcs))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_KM_FLOW_TYPE,
+ ndev->be.cat.nb_flow_types))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_KM_CATEGORY,
+ ndev->be.km.nb_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_PDB_RCP,
+ ndev->be.pdb.nb_pdb_rcp_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_QSL_RCP,
+ ndev->be.qsl.nb_rcp_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_QSL_QST,
+ ndev->be.qsl.nb_qst_entries))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_IOA_RCP,
+ ndev->be.ioa.nb_rcp_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_ROA_RCP,
+ ndev->be.roa.nb_tun_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,
+ ndev->be.cat.nb_flow_types))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_FLM_RCP,
+ ndev->be.flm.nb_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_HST_RCP,
+ ndev->be.hst.nb_hst_rcp_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_TPE_RCP,
+ ndev->be.tpe.nb_rcp_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_TPE_EXT,
+ ndev->be.tpe.nb_rpl_ext_categories))
+ goto err_exit;
+ if (init_resource_elements(ndev, RES_TPE_RPL,
+ ndev->be.tpe.nb_rpl_depth))
+ goto err_exit;
+
+ /* may need IPF, COR */
+
+ /* check all defined has been initialized */
+ for (int i = 0; i < RES_COUNT; i++)
+ assert(ndev->res[i].alloc_bm);
+
+ pthread_mutex_init(&ndev->mtx, NULL);
+ list_insert_flow_nic(ndev);
+
+ return ndev;
+
+err_exit:
+ if (ndev)
+ flow_api_done(ndev);
+ NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+ return NULL;
+}
+
+int flow_api_done(struct flow_nic_dev *ndev)
+{
+ NT_LOG(DBG, FILTER, "FLOW API DONE\n");
+ if (ndev) {
+ flow_ndev_reset(ndev);
+
+ /* delete resource management allocations for this ndev */
+ for (int i = 0; i < RES_COUNT; i++)
+ done_resource_elements(ndev, i);
+
+ flow_api_backend_done(&ndev->be);
+ list_remove_flow_nic(ndev);
+ free(ndev);
+ }
+ return 0;
+}
+
+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
+{
+ if (!ndev) {
+ NT_LOG(DBG, FILTER, "ERR: %s\n", __func__);
+ return NULL;
+ }
+ return ndev->be.be_dev;
+}
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)
+{
+ struct flow_eth_dev *eth_dev =
+ nic_and_port_to_eth_dev(adapter_no, port_no);
+ return eth_dev->num_queues;
+}
+
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)
+{
+ struct flow_eth_dev *eth_dev =
+ nic_and_port_to_eth_dev(adapter_no, port_no);
+ return eth_dev->rx_queue[queue_no].hw_id;
+}
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
+{
+ if (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)
+ return flow_get_flm_stats_profile_inline(ndev, data, size);
+ return -1;
+}
new file mode 100644
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_H_
+#define _FLOW_API_H_
+
+#include <pthread.h>
+
+#include "ntlog.h"
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+/*
+ * ****************************************************
+ * Flow NIC and Eth port device management
+ * ****************************************************
+ */
+
+struct hw_mod_resource_s {
+ uint8_t *alloc_bm; /* allocation bitmap */
+ uint32_t *ref; /* reference counter for each resource element */
+ uint32_t resource_count; /* number of total available entries */
+};
+
+/*
+ * Set of definitions to be used to map desirable fields for RSS
+ * hash functions. Supposed to be used with dpdk, so the values
+ * correspond to dpdk definitions, but we avoid dependency to
+ * dpdk headers here.
+ */
+
+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)
+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)
+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)
+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)
+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)
+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)
+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)
+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)
+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)
+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)
+
+#define NT_ETH_RSS_IP \
+ (NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 | \
+ NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 | \
+ NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ NT_ETH_RSS_IPV6_EX)
+
+/*
+ * level 1, requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ */
+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
+
+/*
+ * level 2, requests RSS to be performed on the specified inner packet
+ * encapsulation level, from outermost to innermost (lower to higher values).
+ */
+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
+
+/*
+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits
+ * to avoid occasional incorrect usage interfacing with higher level
+ * framework (e.g. DPDK)
+ */
+struct nt_eth_rss {
+ uint64_t fields;
+};
+
+struct flow_eth_dev {
+ struct flow_nic_dev *ndev; /* NIC that owns this port device */
+ uint8_t port; /* NIC port id */
+ uint32_t port_id; /* App assigned port_id - may be DPDK port_id */
+
+ struct flow_queue_id_s
+ rx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */
+ int num_queues; /* VSWITCH has exceptions sent on queue 0 per design */
+
+ int rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */
+ struct flow_eth_dev *next;
+};
+
+enum flow_nic_hash_e {
+ HASH_ALGO_ROUND_ROBIN = 0,
+ HASH_ALGO_5TUPLE,
+};
+
+/* registered NIC backends */
+struct flow_nic_dev {
+ uint8_t adapter_no; /* physical adapter no in the host system */
+ uint16_t ports; /* number of in-ports addressable on this NIC */
+ enum flow_eth_dev_profile
+ flow_profile; /* flow profile this NIC is initially prepared for */
+ int flow_mgnt_prepared;
+
+ struct hw_mod_resource_s
+ res[RES_COUNT]; /* raw NIC resource allocation table */
+ void *flm_res_handle;
+ void *km_res_handle;
+ void *kcc_res_handle;
+
+ void *flm_mtr_handle;
+ void *ft_res_handle;
+ void *mtr_stat_handle;
+ void *group_handle;
+
+ /* statistics */
+ uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];
+
+ struct flow_handle
+ *flow_base; /* linked list of all flows created on this NIC */
+ struct flow_handle *
+ flow_base_flm; /* linked list of all FLM flows created on this NIC */
+
+ struct flow_api_backend_s be; /* NIC backend API */
+ struct flow_eth_dev *
+ eth_base; /* linked list of created eth-port devices on this NIC */
+ pthread_mutex_t mtx;
+
+ int default_qsl_drop_index; /* pre allocated default QSL Drop */
+ int default_qsl_discard_index; /* pre allocated default QSL Discard */
+ /* RSS hash function settings bitfields correspond to data used for hashing */
+ struct nt_eth_rss
+ rss_hash_config;
+ struct flow_nic_dev *next; /* next NIC linked list */
+};
+
+/*
+ * ****************************************************
+ * Error
+ * ****************************************************
+ */
+
+enum flow_nic_err_msg_e {
+ ERR_SUCCESS = 0,
+ ERR_FAILED = 1,
+ ERR_MEMORY = 2,
+ ERR_OUTPUT_TOO_MANY = 3,
+ ERR_RSS_TOO_MANY_QUEUES = 4,
+ ERR_VLAN_TYPE_NOT_SUPPORTED = 5,
+ ERR_VXLAN_HEADER_NOT_ACCEPTED = 6,
+ ERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,
+ ERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,
+ ERR_MATCH_VLAN_TOO_MANY = 9,
+ ERR_MATCH_INVALID_IPV6_HDR = 10,
+ ERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,
+ ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,
+ ERR_MATCH_FAILED_BY_HW_LIMITS = 13,
+ ERR_MATCH_RESOURCE_EXHAUSTION = 14,
+ ERR_MATCH_FAILED_TOO_COMPLEX = 15,
+ ERR_ACTION_REPLICATION_FAILED = 16,
+ ERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,
+ ERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,
+ ERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,
+ ERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,
+ ERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,
+ ERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,
+ ERR_INTERNAL_QSL_COMPARE_FAILED = 23,
+ ERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,
+ ERR_MATCH_ENTROPY_FAILED = 25,
+ ERR_MATCH_CAM_EXHAUSTED = 26,
+ ERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,
+ ERR_ACTION_UNSUPPORTED = 28,
+ ERR_REMOVE_FLOW_FAILED = 29,
+ ERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,
+ ERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,
+ ERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,
+ ERR_OUTPUT_INVALID = 33,
+ ERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,
+ ERR_MATCH_CAT_CAM_EXHAUSTED = 35,
+ ERR_MATCH_KCC_KEY_CLASH = 36,
+ ERR_MATCH_CAT_CAM_FAILED = 37,
+ ERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,
+ ERR_FLOW_PRIORITY_VALUE_INVALID = 39,
+ ERR_MSG_NO_MSG
+};
+
+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);
+
+/*
+ * ****************************************************
+ * Resources
+ * ****************************************************
+ */
+
+extern const char *dbg_res_descr[];
+
+#define flow_nic_set_bit(arr, x) \
+ do { \
+ uint8_t *_temp_arr = (arr); \
+ size_t _temp_x = (x); \
+ _temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \
+ (uint8_t)(1 << (_temp_x % 8))); \
+ } while (0)
+
+
+
+#define flow_nic_unset_bit(arr, x) \
+ do { \
+ size_t _temp_x = (x); \
+ arr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \
+ } while (0)
+
+#define flow_nic_is_bit_set(arr, x) \
+ ({ \
+ size_t _temp_x = (x); \
+ (arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \
+ })
+
+#define flow_nic_mark_resource_used(_ndev, res_type, index) \
+ do { \
+ struct flow_nic_dev *_temp_ndev = (_ndev); \
+ __typeof__(res_type) _temp_res_type = (res_type); \
+ size_t _temp_index = (index); \
+ NT_LOG(DBG, FILTER, "mark resource used: %s idx %zu\n", \
+ dbg_res_descr[_temp_res_type], _temp_index); \
+ assert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \
+ == 0); \
+ flow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \
+ } while (0)
+
+
+
+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \
+ do { \
+ __typeof__(res_type) _temp_res_type = (res_type); \
+ size_t _temp_index = (index); \
+ NT_LOG(DBG, FILTER, "mark resource unused: %s idx %zu\n", \
+ dbg_res_descr[_temp_res_type], _temp_index); \
+ flow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \
+ } while (0)
+
+
+#define flow_nic_is_resource_used(_ndev, res_type, index) \
+ (!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))
+
+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ uint32_t alignment);
+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,
+ enum res_type_e res_type);
+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,
+ enum res_type_e res_type, unsigned int num,
+ uint32_t alignment);
+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ int idx);
+
+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ int index);
+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,
+ int index);
+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,
+ enum res_type_e res_type, int idx_start);
+
+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,
+ enum res_type_e res_type,
+ struct flow_handle *fh, uint32_t count,
+ uint32_t alignment);
+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,
+ enum res_type_e res_type, int idx,
+ struct flow_handle *fh);
+
+/*
+ * ****************************************************
+ * Other
+ * ****************************************************
+ */
+
+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);
+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);
+
+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,
+ enum flow_nic_hash_e algorithm);
+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,
+ struct nt_eth_rss fields);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+ uint32_t value);
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);
+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);
+
+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,
+ uint64_t size);
+
+#endif
new file mode 100644
@@ -0,0 +1,5128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pthread.h>
+#include <unistd.h> /* sleep() */
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include <inttypes.h>
+
+#include "ntlog.h"
+
+#include "flow_api_nic_setup.h"
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_api_profile_inline.h"
+
+#include <rte_spinlock.h>
+
+#define UNUSED __rte_unused
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,
+ int flow_type, int lookup, int enable)
+{
+ const int max_lookups = 4;
+ const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+ int fte_index =
+ (8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+ int fte_field = cfn_index % cat_funcs;
+
+ uint32_t current_bm = 0;
+ uint32_t fte_field_bm = 1 << fte_field;
+
+ hw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+ fte_index, ¤t_bm);
+
+ uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+ (~fte_field_bm & current_bm);
+
+ if (current_bm != final_bm) {
+ hw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+ KM_FLM_IF_FIRST, fte_index, final_bm);
+ hw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+ 1);
+ }
+
+ return 0;
+}
+
+/*
+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.
+ */
+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,
+ int flow_type, int lookup, int enable)
+{
+ const int max_lookups = 4;
+ const int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;
+
+ int fte_index =
+ (8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;
+ int fte_field = cfn_index % cat_funcs;
+
+ uint32_t current_bm = 0;
+ uint32_t fte_field_bm = 1 << fte_field;
+
+ hw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,
+ fte_index, ¤t_bm);
+
+ uint32_t final_bm = enable ? (fte_field_bm | current_bm) :
+ (~fte_field_bm & current_bm);
+
+ if (current_bm != final_bm) {
+ hw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,
+ KM_FLM_IF_FIRST, fte_index, final_bm);
+ hw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,
+ 1);
+ }
+
+ return 0;
+}
+
+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)
+{
+ for (int i = 0; i < dev->num_queues; ++i) {
+ if (dev->rx_queue[i].id == id)
+ return dev->rx_queue[i].hw_id;
+ }
+ return -1;
+}
+
+/*
+ * Flow Matcher functionality
+ */
+
+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)
+{
+ int success = 0;
+
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+ 0x10);
+ hw_mod_flm_control_flush(&ndev->be);
+
+ /* Wait for ddr4 calibration/init done */
+ for (uint32_t i = 0; i < 1000000; ++i) {
+ uint32_t value = 0;
+
+ hw_mod_flm_status_update(&ndev->be);
+ hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,
+ &value);
+ if (value) {
+ success = 1;
+ break;
+ }
+ usleep(1);
+ }
+
+ if (!success) {
+ /* "Flow matcher initialization failed - SDRAM calibration failed"; */
+ return -1;
+ }
+
+ /* Set the flow scrubber and timeout settings */
+ hw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);
+ hw_mod_flm_timeout_flush(&ndev->be);
+
+ hw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);
+ hw_mod_flm_scrub_flush(&ndev->be);
+
+ return 0;
+}
+
+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)
+{
+ int success = 0;
+
+ /*
+ * Make sure no lookup is performed during init, i.e.
+ * disable every category and disable FLM
+ */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);
+ hw_mod_flm_control_flush(&ndev->be);
+
+ for (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)
+ hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);
+ hw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);
+
+ /* Wait for FLM to enter Idle state */
+ for (uint32_t i = 0; i < 1000000; ++i) {
+ uint32_t value = 0;
+
+ hw_mod_flm_status_update(&ndev->be);
+ hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);
+ if (value) {
+ success = 1;
+ break;
+ }
+ usleep(1);
+ }
+
+ if (!success) {
+ /* "Flow matcher initialization failed - never idle"; */
+ return -1;
+ }
+
+ success = 0;
+
+ /* Start SDRAM initialization */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);
+ hw_mod_flm_control_flush(&ndev->be);
+
+ for (uint32_t i = 0; i < 1000000; ++i) {
+ uint32_t value = 0;
+
+ hw_mod_flm_status_update(&ndev->be);
+ hw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,
+ &value);
+ if (value) {
+ success = 1;
+ break;
+ }
+ usleep(1);
+ }
+
+ if (!success) {
+ /* "Flow matcher initialization failed - SDRAM initialization incomplete"; */
+ return -1;
+ }
+
+ /* Set the INIT value back to zero to clear the bit in the SW register cache */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);
+ hw_mod_flm_control_flush(&ndev->be);
+
+ /* Enable FLM */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);
+ hw_mod_flm_control_flush(&ndev->be);
+
+ return 0;
+}
+
+#define FLM_FLOW_RCP_MAX 32
+#define FLM_FLOW_FT_MAX 16
+
+struct flm_flow_ft_ident_s {
+ union {
+ struct {
+ uint64_t in_use : 1;
+ uint64_t drop : 1;
+ uint64_t ltx_en : 1;
+ uint64_t ltx_port : 1;
+ uint64_t queue_en : 1;
+ uint64_t queue : 8;
+ uint64_t encap_len : 8;
+ uint64_t encap_vlans : 2;
+ uint64_t encap_ip : 1;
+ uint64_t decap_end : 5;
+ uint64_t jump_to_group : 8;
+ uint64_t pad : 27;
+ };
+ uint64_t data;
+ };
+};
+
+struct flm_flow_key_def_s {
+ union {
+ struct {
+ uint64_t qw0_dyn : 7;
+ uint64_t qw0_ofs : 8;
+ uint64_t qw4_dyn : 7;
+ uint64_t qw4_ofs : 8;
+ uint64_t sw8_dyn : 7;
+ uint64_t sw8_ofs : 8;
+ uint64_t sw9_dyn : 7;
+ uint64_t sw9_ofs : 8;
+ uint64_t outer_proto : 1;
+ uint64_t inner_proto : 1;
+ uint64_t pad : 2;
+ };
+ uint64_t data;
+ };
+};
+
+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)
+{
+ struct flm_flow_ft_ident_s ft_ident;
+
+ assert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));
+
+ memset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+ ft_ident.in_use = 1;
+
+ if (fd->dst_num_avail == 0) {
+ ft_ident.drop = 1;
+ } else {
+ for (int i = 0; i < fd->dst_num_avail; ++i) {
+ if (fd->dst_id[i].type == PORT_PHY) {
+ ft_ident.ltx_en = 1;
+ ft_ident.ltx_port = fd->dst_id[i].id;
+ } else if (fd->dst_id[i].type == PORT_VIRT) {
+ ft_ident.queue_en = 1;
+ ft_ident.queue = fd->dst_id[i].id;
+ }
+ }
+ }
+
+ if (fd->tun_hdr.len > 0) {
+ ft_ident.encap_len = fd->tun_hdr.len;
+ ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+ ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+ }
+
+ ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+ if (fd->jump_to_group != UINT32_MAX)
+ ft_ident.jump_to_group = fd->jump_to_group & 0xff;
+
+ return ft_ident;
+}
+
+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,
+ unsigned int qw, unsigned int dyn,
+ unsigned int ofs)
+{
+ assert(qw < 2);
+ if (qw == 0) {
+ key_def->qw0_dyn = dyn & 0x7f;
+ key_def->qw0_ofs = ofs & 0xff;
+ } else {
+ key_def->qw4_dyn = dyn & 0x7f;
+ key_def->qw4_ofs = ofs & 0xff;
+ }
+}
+
+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,
+ unsigned int sw, unsigned int dyn,
+ unsigned int ofs)
+{
+ assert(sw < 2);
+ if (sw == 0) {
+ key_def->sw8_dyn = dyn & 0x7f;
+ key_def->sw8_ofs = ofs & 0xff;
+ } else {
+ key_def->sw9_dyn = dyn & 0x7f;
+ key_def->sw9_ofs = ofs & 0xff;
+ }
+}
+
+struct flm_flow_group_s {
+ int cfn_group0;
+ int km_ft_group0;
+ struct flow_handle *fh_group0;
+
+ struct flm_flow_key_def_s key_def;
+
+ int miss_enabled;
+
+ struct flm_flow_group_ft_s {
+ struct flm_flow_ft_ident_s ident;
+ struct flow_handle *fh;
+ } ft[FLM_FLOW_FT_MAX];
+
+ uint32_t cashed_ft_index;
+};
+
+struct flm_flow_handle_s {
+ struct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];
+};
+
+static void flm_flow_handle_create(void **handle)
+{
+ struct flm_flow_handle_s *flm_handle;
+
+ if (!*handle)
+ *handle = calloc(1, sizeof(struct flm_flow_handle_s));
+
+ else
+ memset(*handle, 0x0, sizeof(struct flm_flow_handle_s));
+
+ flm_handle = (struct flm_flow_handle_s *)*handle;
+
+ for (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {
+ flm_handle->groups[i].cfn_group0 = -1;
+ flm_handle->groups[i].fh_group0 = NULL;
+ }
+}
+
+static void flm_flow_handle_remove(void **handle)
+{
+ free(*handle);
+ *handle = NULL;
+}
+
+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,
+ int cfn, int km_ft, struct flow_handle *fh)
+{
+ struct flm_flow_handle_s *flm_handle =
+ (struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+ struct flm_flow_group_s *flm_group;
+
+ if (group_index >= FLM_FLOW_RCP_MAX) {
+ NT_LOG(ERR, FILTER,
+ "FLM: Invalid index for FLM programming: Group=%d\n",
+ (int)group_index);
+ return -1;
+ }
+
+ flm_group = &flm_handle->groups[group_index];
+
+ flm_group->cfn_group0 = cfn;
+ flm_group->km_ft_group0 = km_ft;
+ flm_group->fh_group0 = fh;
+ flm_group->miss_enabled = 0;
+
+ return 0;
+}
+
+static int flm_flow_destroy_group(struct flow_eth_dev *dev,
+ uint32_t group_index)
+{
+ struct flm_flow_handle_s *flm_handle =
+ (struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+ struct flm_flow_group_s *flm_group;
+
+ if (group_index >= FLM_FLOW_RCP_MAX) {
+ NT_LOG(ERR, FILTER,
+ "FLM: Invalid index for FLM programming: Group=%d\n",
+ (int)group_index);
+ return -1;
+ }
+
+ flm_group = &flm_handle->groups[group_index];
+
+ memset(flm_group, 0x0, sizeof(struct flm_flow_group_s));
+ flm_group->cfn_group0 = -1;
+
+ return 0;
+}
+
+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,
+ uint32_t group_index,
+ struct flow_handle **fh_miss)
+{
+ struct flm_flow_handle_s *flm_handle =
+ (struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+
+ if (group_index >= FLM_FLOW_RCP_MAX) {
+ NT_LOG(ERR, FILTER,
+ "FLM: Invalid index for FLM programming: Group=%d\n",
+ (int)group_index);
+ return -1;
+ }
+
+ *fh_miss = flm_handle->groups[group_index].fh_group0;
+
+ return 0;
+}
+
+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,
+ struct flm_flow_key_def_s *key_def,
+ uint32_t *packet_mask, uint32_t group_index)
+{
+ if (group_index >= FLM_FLOW_RCP_MAX) {
+ NT_LOG(ERR, FILTER,
+ "FLM: Invalid index for FLM programming: Group=%d\n",
+ (int)group_index);
+ return -1;
+ }
+
+ uint32_t flm_mask[10] = {
+ packet_mask[0], /* SW9 */
+ packet_mask[1], /* SW8 */
+ packet_mask[5], packet_mask[4],
+ packet_mask[3], packet_mask[2], /* QW4 */
+ packet_mask[9], packet_mask[8],
+ packet_mask[7], packet_mask[6], /* QW0 */
+ };
+
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);
+
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,
+ key_def->qw0_dyn);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,
+ key_def->qw0_ofs);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,
+ key_def->qw4_dyn);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,
+ key_def->qw4_ofs);
+
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,
+ key_def->sw8_dyn);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,
+ key_def->sw8_ofs);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,
+ key_def->sw9_dyn);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,
+ key_def->sw9_ofs);
+
+ hw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,
+ flm_mask);
+
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,
+ group_index + 2);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,
+ key_def->outer_proto);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,
+ key_def->inner_proto);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,
+ -20);
+
+ hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+ return 0;
+}
+
+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)
+{
+ struct flm_flow_handle_s *flm_handle =
+ (struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+ struct flm_flow_group_s *flm_group;
+
+ if (group_index >= FLM_FLOW_RCP_MAX) {
+ NT_LOG(ERR, FILTER,
+ "FLM: Invalid index for FLM programming: Group=%d\n",
+ (int)group_index);
+ return -1;
+ }
+
+ flm_group = &flm_handle->groups[group_index];
+
+ hw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,
+ 0);
+ hw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);
+
+ if (flm_group->miss_enabled) {
+ uint32_t bm = 0;
+
+ /* Change group 0 FLM RCP selection to point to 0 */
+ hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+ KM_FLM_IF_FIRST, flm_group->cfn_group0,
+ 0);
+ hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+ flm_group->cfn_group0, 1);
+
+ /* Change group 0 FT MISS to FT UNHANDLED */
+ set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);
+ set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);
+
+ /* Finally, disable FLM for group 0 */
+ hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST,
+ flm_group->cfn_group0 / 8, &bm);
+ hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,
+ bm & ~(1 << (flm_group->cfn_group0 % 8)));
+ hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+ flm_group->cfn_group0 / 8, 1);
+
+ memset(&flm_group->key_def, 0x0,
+ sizeof(struct flm_flow_key_def_s));
+ flm_group->miss_enabled = 0;
+ }
+
+ return 0;
+}
+
+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,
+ struct flow_handle *fh, uint32_t group_index,
+ struct flm_flow_key_def_s *key_def,
+ uint32_t *packet_mask,
+ /* Return values */
+ uint32_t *kid, uint32_t *ft, int *cfn_to_copy,
+ int *cfn_to_copy_km_ft,
+ struct flow_handle **fh_existing)
+{
+ struct flm_flow_handle_s *flm_handle =
+ (struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+ struct flm_flow_group_s *flm_group;
+ struct flm_flow_ft_ident_s temp_ft_ident;
+ struct nic_flow_def *fd = fh->fd;
+
+ if (group_index >= FLM_FLOW_RCP_MAX) {
+ NT_LOG(ERR, FILTER,
+ "FLM: Invalid index for FLM programming: Group=%d\n",
+ (int)group_index);
+ return -1;
+ }
+
+ flm_group = &flm_handle->groups[group_index];
+
+ if (flm_group->cfn_group0 < 0) {
+ NT_LOG(ERR, FILTER,
+ "FLM: Attempt to program to a unset CFN: Group=%d\n",
+ (int)group_index);
+ return -1;
+ }
+
+ if (!flm_group->miss_enabled) {
+ uint32_t bm = 0;
+
+ if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,
+ (int)group_index, fh)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get FLM RCP resource\n");
+ return -1;
+ }
+
+ /* Change group 0 FLM RCP selection to point to "group_index" */
+ hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+ KM_FLM_IF_FIRST, flm_group->cfn_group0,
+ group_index);
+ hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+ flm_group->cfn_group0, 1);
+
+ /* Setup FLM RCP "group_index" */
+ flm_flow_setup_rcp(dev, key_def, packet_mask, group_index);
+
+ /*
+ * Change group 0 FT UNHANDLED to FT MISS
+ * Note: Once this step is done, the filter is invalid until the KCE step is done
+ */
+ set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);
+ set_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);
+
+ /* Finally, enable FLM for group 0 */
+ hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST,
+ flm_group->cfn_group0 / 8, &bm);
+ hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST,
+ flm_group->cfn_group0 / 8,
+ bm | (1 << (flm_group->cfn_group0 % 8)));
+ hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+ flm_group->cfn_group0 / 8, 1);
+
+ flm_group->key_def.data = key_def->data;
+ flm_group->miss_enabled = 1;
+ }
+
+ if (flm_group->key_def.data != key_def->data) {
+ NT_LOG(ERR, FILTER,
+ "FLM: Attempt to program 2 different types of flows into group=%d\n",
+ (int)group_index);
+ return -1;
+ }
+
+ /* Create action set */
+ memset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));
+ temp_ft_ident.in_use = 1;
+
+ if (fd->dst_num_avail == 0) {
+ temp_ft_ident.drop = 1;
+ } else {
+ for (int i = 0; i < fd->dst_num_avail; ++i) {
+ if (fd->dst_id[i].type == PORT_PHY) {
+ temp_ft_ident.ltx_en = 1;
+ temp_ft_ident.ltx_port = fd->dst_id[i].id;
+ } else if (fd->dst_id[i].type == PORT_VIRT) {
+ temp_ft_ident.queue_en = 1;
+ temp_ft_ident.queue = fd->dst_id[i].id;
+ }
+ }
+ }
+
+ /* Set encap/decap data */
+ if (fd->tun_hdr.len > 0) {
+ temp_ft_ident.encap_len = fd->tun_hdr.len;
+ temp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;
+ temp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;
+ }
+
+ temp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;
+
+ /* Find ft ident or create a new one */
+ uint32_t ft_index = 0;
+
+ if (flm_group->cashed_ft_index > 0 &&
+ flm_group->ft[flm_group->cashed_ft_index].ident.data ==
+ temp_ft_ident.data) {
+ ft_index = flm_group->cashed_ft_index;
+ *fh_existing = flm_group->ft[ft_index].fh;
+ } else {
+ for (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {
+ struct flm_flow_ft_ident_s *ft_ident =
+ &flm_group->ft[ft_index].ident;
+ if (ft_ident->data == 0) {
+ ft_ident->data = temp_ft_ident.data;
+ *cfn_to_copy = flm_group->cfn_group0;
+ *cfn_to_copy_km_ft = flm_group->km_ft_group0;
+ flm_group->ft[ft_index].fh = fh;
+ fh->flm_group_index = (uint8_t)group_index;
+ fh->flm_ft_index = (uint8_t)ft_index;
+ break;
+ } else if (ft_ident->data == temp_ft_ident.data) {
+ *fh_existing = flm_group->ft[ft_index].fh;
+ break;
+ }
+ }
+
+ if (ft_index >= FLM_FLOW_FT_MAX) {
+ NT_LOG(ERR, FILTER, "FLM: FT resource not available\n");
+ return -1;
+ }
+
+ flm_group->cashed_ft_index = ft_index;
+ }
+
+ /* Set return values */
+ *kid = group_index + 2;
+ *ft = ft_index;
+
+ return 0;
+}
+
+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,
+ struct flow_handle *fh)
+{
+ int error = 0;
+
+ struct flm_flow_handle_s *flm_handle =
+ (struct flm_flow_handle_s *)dev->ndev->flm_res_handle;
+ struct flm_flow_group_s *flm_group =
+ &flm_handle->groups[fh->flm_group_index];
+
+ memset(&flm_group->ft[fh->flm_ft_index], 0x0,
+ sizeof(struct flm_flow_group_ft_s));
+
+ error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+ flm_group->km_ft_group0, 0, 0);
+ error |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+ (int)fh->flm_ft_index, 2, 0);
+
+ return error;
+}
+
+#define FLM_MTR_PROFILE_SIZE 0x100000
+
+struct flm_flow_mtr_handle_s {
+ struct dual_buckets_s {
+ uint16_t rate_a;
+ uint16_t rate_b;
+ uint16_t size_a;
+ uint16_t size_b;
+ } dual_buckets[FLM_MTR_PROFILE_SIZE];
+};
+
+int flow_mtr_supported(struct flow_eth_dev *dev)
+{
+ return hw_mod_flm_present(&dev->ndev->be) &&
+ dev->ndev->be.flm.nb_variant == 2;
+}
+
+uint64_t flow_mtr_meter_policy_n_max(void)
+{
+ return FLM_MTR_PROFILE_SIZE;
+}
+
+static inline uint64_t convert_to_bucket_size_units(uint64_t value)
+{
+ /* Assumes a 40-bit int as input */
+ uint64_t lo_bits = (value & 0xfffff) * 1000000000;
+ uint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;
+ uint64_t round_up =
+ (hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;
+ return (hi_bits >> 20) + (lo_bits >> 40) + round_up;
+}
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+ uint64_t bucket_rate_a, uint64_t bucket_size_a,
+ uint64_t bucket_rate_b, uint64_t bucket_size_b)
+{
+ struct flow_nic_dev *ndev = dev->ndev;
+ struct flm_flow_mtr_handle_s *handle =
+ (struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;
+ struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+ uint64_t bucket_rate_shift_a = 0;
+ uint64_t bucket_rate_shift_b = 0;
+
+ uint64_t bucket_size_shift_a = 0;
+ uint64_t bucket_size_shift_b = 0;
+
+ /* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */
+ bucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :
+ (bucket_rate_a >> 7);
+ bucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :
+ (bucket_rate_b >> 7);
+
+ /* Round rate down to max rate supported */
+ if (bucket_rate_a > 0x7ff8000)
+ bucket_rate_a = 0x7ff8000;
+ if (bucket_rate_b > 0x7ff8000)
+ bucket_rate_b = 0x7ff8000;
+
+ /* Find shift to convert into 12-bit int */
+ while ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)
+ bucket_rate_shift_a += 1;
+ while ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)
+ bucket_rate_shift_b += 1;
+
+ /* Store in format [11:0] shift-left [15:12] */
+ buckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |
+ (bucket_rate_shift_a << 12);
+ buckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |
+ (bucket_rate_shift_b << 12);
+
+ /* Round size down to 38-bit int */
+ if (bucket_size_a > 0x3fffffffff)
+ bucket_size_a = 0x3fffffffff;
+ if (bucket_size_b > 0x3fffffffff)
+ bucket_size_b = 0x3fffffffff;
+
+ /* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */
+ bucket_size_a = convert_to_bucket_size_units(bucket_size_a);
+ bucket_size_b = convert_to_bucket_size_units(bucket_size_b);
+
+ /* Round rate down to max rate supported */
+ if (bucket_size_a > 0x7ff8000)
+ bucket_size_a = 0x7ff8000;
+ if (bucket_size_b > 0x7ff8000)
+ bucket_size_b = 0x7ff8000;
+
+ /* Find shift to convert into 12-bit int */
+ while ((bucket_size_a >> bucket_size_shift_a) > 0xfff)
+ bucket_size_shift_a += 1;
+ while ((bucket_size_b >> bucket_size_shift_b) > 0xfff)
+ bucket_size_shift_b += 1;
+
+ /* Store in format [11:0] shift-left [15:12] */
+ buckets->size_a = (bucket_size_a >> bucket_size_shift_a) |
+ (bucket_size_shift_a << 12);
+ buckets->size_b = (bucket_size_b >> bucket_size_shift_b) |
+ (bucket_size_shift_b << 12);
+
+ return 0;
+}
+
+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,
+ UNUSED uint32_t policy_id, UNUSED int drop)
+{
+ return 0;
+}
+
+#define FLM_MTR_STAT_SIZE 0x1000000
+#define WORDS_PER_INF_DATA \
+ (sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))
+#define MAX_INF_DATA_RECORDS_PER_READ 20
+#define UINT64_MSB ((uint64_t)1 << 63)
+
+/* 2^23 bytes ~ 8MB */
+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8
+/* 2^16 pkt ~ 64K pkt */
+#define FLM_PERIODIC_STATS_PKT_LIMIT 5
+/* 2^38 ns ~ 275 sec */
+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23
+
+uint32_t flow_mtr_meters_supported(void)
+{
+ return FLM_MTR_STAT_SIZE;
+}
+
+struct mtr_stat_s {
+ struct dual_buckets_s *buckets;
+
+ uint64_t n_pkt;
+ uint64_t n_bytes;
+ uint64_t n_pkt_base;
+ uint64_t n_bytes_base;
+ uint64_t stats_mask;
+};
+
+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)
+#define FLM_PROG_MAX_RETRY 100
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+ uint32_t *data);
+
+static int flow_flm_apply(struct flow_eth_dev *dev,
+ struct flm_v17_lrn_data_s *learn_record)
+{
+ uint32_t lrn_ready;
+ uint32_t retry = 0;
+ uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+ hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,
+ &lrn_ready);
+ if (lrn_ready < WORDS_PER_LEARN_DATA) {
+ hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+ hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+ HW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);
+ while (lrn_ready < WORDS_PER_LEARN_DATA) {
+ ++retry;
+ if (retry > FLM_PROG_MAX_RETRY)
+ return 1;
+
+ flm_read_inf_rec_locked(dev, data);
+
+ hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+ hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+ HW_FLM_BUF_CTRL_LRN_FREE,
+ &lrn_ready);
+ }
+ }
+
+ int res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,
+ HW_FLM_FLOW_LRN_DATA_V17,
+ (uint32_t *)learn_record);
+ return res;
+}
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+ uint32_t profile_id, UNUSED uint32_t policy_id,
+ uint64_t stats_mask)
+{
+ pthread_mutex_lock(&dev->ndev->mtx);
+
+ struct flm_flow_mtr_handle_s *handle =
+ (struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;
+ struct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];
+
+ struct flm_v17_lrn_data_s learn_record;
+
+ memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+ learn_record.sw9 = mtr_id + 1;
+ learn_record.kid = 1;
+
+ learn_record.rate = buckets->rate_a;
+ learn_record.size = buckets->size_a;
+ learn_record.fill = buckets->size_a & 0x0fff;
+
+ learn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */
+
+ learn_record.ent = 1;
+ learn_record.op = 1;
+ learn_record.eor = 1;
+
+ learn_record.id[0] = mtr_id & 0xff;
+ learn_record.id[1] = (mtr_id >> 8) & 0xff;
+ learn_record.id[2] = (mtr_id >> 16) & 0xff;
+ learn_record.id[3] = (mtr_id >> 24) & 0xff;
+ learn_record.id[8] = 1U << 7;
+
+ if (stats_mask)
+ learn_record.vol_idx = 1;
+
+ int res = flow_flm_apply(dev, &learn_record);
+
+ if (res == 0) {
+ struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+ mtr_stat[mtr_id].buckets = buckets;
+ __atomic_store_n(&mtr_stat[mtr_id].stats_mask, stats_mask, __ATOMIC_RELAXED);
+ }
+
+ pthread_mutex_unlock(&dev->ndev->mtx);
+
+ return res;
+}
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)
+{
+ pthread_mutex_lock(&dev->ndev->mtx);
+
+ struct flm_v17_lrn_data_s learn_record;
+
+ memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+ learn_record.sw9 = mtr_id + 1;
+ learn_record.kid = 1;
+
+ learn_record.ent = 1;
+ learn_record.op = 0;
+ learn_record.eor = 1;
+
+ learn_record.id[0] = mtr_id & 0xff;
+ learn_record.id[1] = (mtr_id >> 8) & 0xff;
+ learn_record.id[2] = (mtr_id >> 16) & 0xff;
+ learn_record.id[3] = (mtr_id >> 24) & 0xff;
+ learn_record.id[8] = 1U << 7;
+
+ /* Clear statistics so stats_mask prevents updates of counters on deleted meters */
+ struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+ __atomic_store_n(&mtr_stat[mtr_id].stats_mask, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&mtr_stat[mtr_id].n_bytes, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&mtr_stat[mtr_id].n_pkt, 0, __ATOMIC_RELAXED);
+ mtr_stat[mtr_id].n_bytes_base = 0;
+ mtr_stat[mtr_id].n_pkt_base = 0;
+ mtr_stat[mtr_id].buckets = NULL;
+
+ int res = flow_flm_apply(dev, &learn_record);
+
+ pthread_mutex_unlock(&dev->ndev->mtx);
+
+ return res;
+}
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+ uint32_t adjust_value)
+{
+ pthread_mutex_lock(&dev->ndev->mtx);
+
+ struct mtr_stat_s *mtr_stat =
+ &((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];
+
+ struct flm_v17_lrn_data_s learn_record;
+
+ memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+ learn_record.sw9 = mtr_id + 1;
+ learn_record.kid = 1;
+
+ learn_record.rate = mtr_stat->buckets->rate_a;
+ learn_record.size = mtr_stat->buckets->size_a;
+ learn_record.adj = adjust_value;
+
+ learn_record.ft_mbr = 15;
+
+ learn_record.ent = 1;
+ learn_record.op = 2;
+ learn_record.eor = 1;
+
+ if (__atomic_load_n(&mtr_stat->stats_mask, __ATOMIC_RELAXED))
+ learn_record.vol_idx = 1;
+
+ int res = flow_flm_apply(dev, &learn_record);
+
+ pthread_mutex_unlock(&dev->ndev->mtx);
+
+ return res;
+}
+
+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,
+ uint32_t *data)
+{
+ uint32_t inf_cnt = 0;
+
+ hw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,
+ &inf_cnt);
+ if (inf_cnt < WORDS_PER_INF_DATA) {
+ hw_mod_flm_buf_ctrl_update(&dev->ndev->be);
+ hw_mod_flm_buf_ctrl_get(&dev->ndev->be,
+ HW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);
+ }
+
+ uint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;
+
+ if (records_to_read == 0)
+ return 0;
+ if (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)
+ records_to_read = MAX_INF_DATA_RECORDS_PER_READ;
+
+ hw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,
+ data,
+ records_to_read * WORDS_PER_INF_DATA);
+
+ return records_to_read;
+}
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)
+{
+ uint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];
+
+ pthread_mutex_lock(&dev->ndev->mtx);
+ uint32_t records = flm_read_inf_rec_locked(dev, data);
+
+ pthread_mutex_unlock(&dev->ndev->mtx);
+
+ struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+
+ for (uint32_t i = 0; i < records; ++i) {
+ uint32_t *p_record = &data[i * WORDS_PER_INF_DATA];
+
+ /* Check that received record hold valid meter statistics */
+ if ((p_record[6] < flow_mtr_meters_supported() &&
+ p_record[7] == 0 && (p_record[8] >> 31) == 1)) {
+ uint32_t id = p_record[6];
+
+ /* Don't update a deleted meter */
+ uint64_t stats_mask =
+ __atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+ if (stats_mask) {
+ uint64_t nb = ((uint64_t)p_record[1] << 32) |
+ p_record[0];
+ uint64_t np = ((uint64_t)p_record[3] << 32) |
+ p_record[2];
+
+ __atomic_store_n(&mtr_stat[id].n_pkt,
+ np | UINT64_MSB, __ATOMIC_RELAXED);
+ __atomic_store_n(&mtr_stat[id].n_bytes, nb, __ATOMIC_RELAXED);
+ __atomic_store_n(&mtr_stat[id].n_pkt, np, __ATOMIC_RELAXED);
+ }
+ }
+ }
+
+ return records;
+}
+
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+ uint64_t *stats_mask, uint64_t *green_pkt,
+ uint64_t *green_bytes, int clear)
+{
+ struct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;
+ *stats_mask = __atomic_load_n(&mtr_stat[id].stats_mask, __ATOMIC_RELAXED);
+ if (*stats_mask) {
+ uint64_t pkt_1;
+ uint64_t pkt_2;
+ uint64_t nb;
+
+ do {
+ do {
+ pkt_1 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+ } while (pkt_1 & UINT64_MSB);
+ nb = __atomic_load_n(&mtr_stat[id].n_bytes, __ATOMIC_RELAXED);
+ pkt_2 = __atomic_load_n(&mtr_stat[id].n_pkt, __ATOMIC_RELAXED);
+ } while (pkt_1 != pkt_2);
+
+ *green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;
+ *green_bytes = nb - mtr_stat[id].n_bytes_base;
+ if (clear) {
+ mtr_stat[id].n_pkt_base = pkt_1;
+ mtr_stat[id].n_bytes_base = nb;
+ }
+ }
+}
+
+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)
+{
+ return port + 1;
+}
+
+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,
+ uint32_t port_id)
+{
+ struct flow_eth_dev *dev = ndev->eth_base;
+
+ while (dev) {
+ if (dev->port_id == port_id)
+ return dev->port;
+ dev = dev->next;
+ }
+
+ return UINT8_MAX;
+}
+
+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+ if (ndev->flow_base)
+ ndev->flow_base->prev = fh;
+ fh->next = ndev->flow_base;
+ fh->prev = NULL;
+ ndev->flow_base = fh;
+}
+
+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)
+{
+ struct flow_handle *next = fh->next;
+ struct flow_handle *prev = fh->prev;
+
+ if (next && prev) {
+ prev->next = next;
+ next->prev = prev;
+ } else if (next) {
+ ndev->flow_base = next;
+ next->prev = NULL;
+ } else if (prev) {
+ prev->next = NULL;
+ } else if (ndev->flow_base == fh) {
+ ndev->flow_base = NULL;
+ }
+}
+
+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,
+ struct flow_handle *fh)
+{
+ if (ndev->flow_base_flm)
+ ndev->flow_base_flm->prev = fh;
+ fh->next = ndev->flow_base_flm;
+ fh->prev = NULL;
+ ndev->flow_base_flm = fh;
+}
+
+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,
+ struct flow_handle *fh_flm)
+{
+ struct flow_handle *next = fh_flm->next;
+ struct flow_handle *prev = fh_flm->prev;
+
+ if (next && prev) {
+ prev->next = next;
+ next->prev = prev;
+ } else if (next) {
+ ndev->flow_base_flm = next;
+ next->prev = NULL;
+ } else if (prev) {
+ prev->next = NULL;
+ } else if (ndev->flow_base_flm == fh_flm) {
+ ndev->flow_base_flm = NULL;
+ }
+}
+
+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,
+ struct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,
+ uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+ const struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;
+ const struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;
+
+ if (vlan_spec != NULL && vlan_mask != NULL) {
+ if (vlan_mask->tci) {
+ if (implicit_vlan_vid > 0) {
+ NT_LOG(ERR, FILTER, "Multiple VLANs not supported "
+ "for implicit VLAN patterns.\n");
+ flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);
+ free(fd);
+ return 1;
+ }
+
+ if (sw_counter >= 2) {
+ NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return 1;
+ }
+
+ uint32_t *sw_data = &packet_data[1 - sw_counter];
+ uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+ sw_mask[0] = ntohs(vlan_mask->tci);
+ sw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];
+
+ km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);
+ set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+ sw_counter += 1;
+ }
+ }
+
+ fd->vlans += 1;
+ return 0;
+}
+
+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,
+ struct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,
+ uint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,
+ uint32_t any_count)
+{
+ const struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;
+ const struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;
+
+ if (ipv4_spec != NULL && ipv4_mask != NULL) {
+ if (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)
+ fd->fragmentation = 0xfe;
+
+ if (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {
+ uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+ uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+ qw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+ qw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);
+
+ qw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];
+ qw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];
+
+ km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);
+ set_key_def_qw(key_def, qw_counter, DYN_L3, 12);
+ qw_counter += 1;
+ } else {
+ if (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +
+ (ipv4_mask->hdr.dst_ip ? 1U : 0U))) {
+ NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return 1;
+ }
+
+ if (ipv4_mask->hdr.src_ip) {
+ uint32_t *sw_data = &packet_data[1 - sw_counter];
+ uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+ sw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);
+ sw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];
+
+ km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);
+ set_key_def_sw(key_def, sw_counter, DYN_L3, 12);
+ sw_counter += 1;
+ }
+
+ if (ipv4_mask->hdr.dst_ip) {
+ uint32_t *sw_data = &packet_data[1 - sw_counter];
+ uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+ sw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);
+ sw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];
+
+ km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);
+ set_key_def_sw(key_def, sw_counter, DYN_L3, 16);
+ sw_counter += 1;
+ }
+ }
+ }
+
+ if (any_count > 0 || fd->l3_prot != -1)
+ fd->tunnel_l3_prot = PROT_TUN_L3_IPV4;
+ else
+ fd->l3_prot = PROT_L3_IPV4;
+ return 0;
+}
+
+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,
+ struct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,
+ uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+ const struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;
+ const struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;
+
+ if (ipv6_spec != NULL && ipv6_mask != NULL) {
+ if (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {
+ if (qw_counter >= 2) {
+ NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return 1;
+ }
+
+ uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+ uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+ memcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);
+ memcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);
+
+ qw_data[0] = ntohl(qw_data[0]);
+ qw_data[1] = ntohl(qw_data[1]);
+ qw_data[2] = ntohl(qw_data[2]);
+ qw_data[3] = ntohl(qw_data[3]);
+
+ qw_mask[0] = ntohl(qw_mask[0]);
+ qw_mask[1] = ntohl(qw_mask[1]);
+ qw_mask[2] = ntohl(qw_mask[2]);
+ qw_mask[3] = ntohl(qw_mask[3]);
+
+ qw_data[0] &= qw_mask[0];
+ qw_data[1] &= qw_mask[1];
+ qw_data[2] &= qw_mask[2];
+ qw_data[3] &= qw_mask[3];
+
+ km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);
+ set_key_def_qw(key_def, qw_counter, DYN_L3, 8);
+ qw_counter += 1;
+ }
+
+ if (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {
+ if (qw_counter >= 2) {
+ NT_LOG(ERR, FILTER, "Key size too big. Out of QW resources.\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return 1;
+ }
+
+ uint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];
+ uint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];
+
+ memcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);
+ memcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);
+
+ qw_data[0] = ntohl(qw_data[0]);
+ qw_data[1] = ntohl(qw_data[1]);
+ qw_data[2] = ntohl(qw_data[2]);
+ qw_data[3] = ntohl(qw_data[3]);
+
+ qw_mask[0] = ntohl(qw_mask[0]);
+ qw_mask[1] = ntohl(qw_mask[1]);
+ qw_mask[2] = ntohl(qw_mask[2]);
+ qw_mask[3] = ntohl(qw_mask[3]);
+ qw_data[0] &= qw_mask[0];
+ qw_data[1] &= qw_mask[1];
+ qw_data[2] &= qw_mask[2];
+ qw_data[3] &= qw_mask[3];
+
+ km_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);
+ set_key_def_qw(key_def, qw_counter, DYN_L3, 24);
+ qw_counter += 1;
+ }
+ }
+
+ if (any_count > 0 || fd->l3_prot != -1)
+ fd->tunnel_l3_prot = PROT_TUN_L3_IPV6;
+ else
+ fd->l3_prot = PROT_L3_IPV6;
+ return 0;
+}
+
+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,
+ struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+ uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+ const struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;
+ const struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;
+
+ if (udp_spec != NULL && udp_mask != NULL) {
+ if (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {
+ if (sw_counter >= 2) {
+ NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return 1;
+ }
+
+ uint32_t *sw_data = &packet_data[1 - sw_counter];
+ uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+ sw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |
+ ntohs(udp_mask->hdr.dst_port);
+ sw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |
+ ntohs(udp_spec->hdr.dst_port)) & sw_mask[0];
+
+ km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+ set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+ sw_counter += 1;
+ }
+ }
+
+ if (any_count > 0 || fd->l4_prot != -1) {
+ fd->tunnel_l4_prot = PROT_TUN_L4_UDP;
+ key_def->inner_proto = 1;
+ } else {
+ fd->l4_prot = PROT_L4_UDP;
+ key_def->outer_proto = 1;
+ }
+ return 0;
+}
+
+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+ struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+ uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+ const struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;
+ const struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;
+
+ if (sctp_spec != NULL && sctp_mask != NULL) {
+ if (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {
+ if (sw_counter >= 2) {
+ NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return 1;
+ }
+
+ uint32_t *sw_data = &packet_data[1 - sw_counter];
+ uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+ sw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |
+ ntohs(sctp_mask->hdr.dst_port);
+ sw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |
+ ntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];
+
+ km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+ set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+ sw_counter += 1;
+ }
+ }
+
+ if (any_count > 0 || fd->l4_prot != -1) {
+ fd->tunnel_l4_prot = PROT_TUN_L4_SCTP;
+ key_def->inner_proto = 1;
+ } else {
+ fd->l4_prot = PROT_L4_SCTP;
+ key_def->outer_proto = 1;
+ }
+ return 0;
+}
+
+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+ struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+ uint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)
+{
+ const struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;
+ const struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;
+
+ if (tcp_spec != NULL && tcp_mask != NULL) {
+ if (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {
+ if (sw_counter >= 2) {
+ NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return 1;
+ }
+
+ uint32_t *sw_data = &packet_data[1 - sw_counter];
+ uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+ sw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |
+ ntohs(tcp_mask->hdr.dst_port);
+ sw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |
+ ntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];
+
+ km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);
+ set_key_def_sw(key_def, sw_counter, DYN_L4, 0);
+ sw_counter += 1;
+ }
+ }
+
+ if (any_count > 0 || fd->l4_prot != -1) {
+ fd->tunnel_l4_prot = PROT_TUN_L4_TCP;
+ key_def->inner_proto = 1;
+ } else {
+ fd->l4_prot = PROT_L4_TCP;
+ key_def->outer_proto = 1;
+ }
+ return 0;
+}
+
+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,
+ struct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,
+ uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+ const struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;
+ const struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;
+
+ if (gtp_spec != NULL && gtp_mask != NULL) {
+ if (gtp_mask->teid) {
+ if (sw_counter >= 2) {
+ NT_LOG(ERR, FILTER, "Key size too big. Out of SW resources.\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return 1;
+ }
+
+ uint32_t *sw_data = &packet_data[1 - sw_counter];
+ uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+ sw_mask[0] = ntohl(gtp_mask->teid);
+ sw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];
+
+ km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);
+ set_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);
+ sw_counter += 1;
+ }
+ }
+
+ fd->tunnel_prot = PROT_TUN_GTPV1U;
+ return 0;
+}
+
+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,
+ const struct flow_elem elem[], const struct flow_action action[],
+ struct flow_error *error, uint16_t implicit_vlan_vid,
+ uint32_t *in_port_id, uint32_t *num_dest_port,
+ uint32_t *num_queues, uint32_t *packet_data,
+ uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+ uint32_t any_count = 0;
+ int mtr_count = 0;
+
+ unsigned int encap_decap_order = 0;
+
+ unsigned int qw_counter = 0;
+ unsigned int sw_counter = 0;
+
+ uint64_t modify_field_use_flags = 0x0;
+
+ *in_port_id = UINT32_MAX;
+ *num_dest_port = 0;
+ *num_queues = 0;
+
+ memset(packet_data, 0x0, sizeof(uint32_t) * 10);
+ memset(packet_mask, 0x0, sizeof(uint32_t) * 10);
+ key_def->data = 0;
+
+ if (action == NULL || elem == NULL) {
+ flow_nic_set_error(ERR_FAILED, error);
+ NT_LOG(ERR, FILTER, "Flow items / actions missing\n");
+ return NULL;
+ }
+
+ struct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));
+
+ if (!fd) {
+ flow_nic_set_error(ERR_MEMORY, error);
+ NT_LOG(ERR, FILTER, "ERR Memory\n");
+ return NULL;
+ }
+
+ /* Set default values for fd */
+ fd->full_offload = -1;
+ fd->in_port_override = -1;
+ fd->mark = UINT32_MAX;
+ fd->jump_to_group = UINT32_MAX;
+
+ fd->l2_prot = -1;
+ fd->l3_prot = -1;
+ fd->l4_prot = -1;
+ fd->vlans = 0;
+ fd->tunnel_prot = -1;
+ fd->tunnel_l3_prot = -1;
+ fd->tunnel_l4_prot = -1;
+ fd->fragmentation = -1;
+
+ NT_LOG(DBG, FILTER,
+ ">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\n",
+ dev, dev->ndev->adapter_no, dev->port, fd);
+
+ /*
+ * Gather flow match + actions and convert into internal flow definition structure
+ * (struct nic_flow_def_s)
+ * This is the 1st step in the flow creation - validate, convert and prepare
+ */
+ for (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {
+ switch (action[aidx].type) {
+ case FLOW_ACTION_TYPE_PORT_ID:
+ NT_LOG(DBG, FILTER,
+ "Dev:%p: FLOW_ACTION_TYPE_PORT_ID\n", dev);
+ if (action[aidx].conf) {
+ uint32_t port_id =
+ ((const struct flow_action_port_id *)
+ action[aidx]
+ .conf)
+ ->id;
+ uint8_t port = get_port_from_port_id(dev->ndev,
+ port_id);
+
+ if (fd->dst_num_avail == MAX_OUTPUT_DEST) {
+ /* ERROR too many output destinations */
+ NT_LOG(ERR, FILTER,
+ "Too many output destinations\n");
+ flow_nic_set_error(ERR_OUTPUT_TOO_MANY,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ if (port >= dev->ndev->be.num_phy_ports) {
+ /* ERROR phy port out of range */
+ NT_LOG(ERR, FILTER,
+ "Phy port out of range\n");
+ flow_nic_set_error(ERR_OUTPUT_INVALID,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ /* New destination port to add */
+ fd->dst_id[fd->dst_num_avail].owning_port_id =
+ port_id;
+ fd->dst_id[fd->dst_num_avail].type = PORT_PHY;
+ fd->dst_id[fd->dst_num_avail].id = (int)port;
+ fd->dst_id[fd->dst_num_avail].active = 1;
+ fd->dst_num_avail++;
+
+ if (fd->flm_mtu_fragmentation_recipe == 0) {
+ fd->flm_mtu_fragmentation_recipe =
+ convert_port_to_ifr_mtu_recipe(port);
+ }
+
+ if (fd->full_offload < 0)
+ fd->full_offload = 1;
+
+ *num_dest_port += 1;
+
+ NT_LOG(DBG, FILTER, "Phy port ID: %i\n",
+ (int)port);
+ }
+ break;
+
+ case FLOW_ACTION_TYPE_QUEUE:
+ NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_QUEUE\n",
+ dev);
+ if (action[aidx].conf) {
+ const struct flow_action_queue *queue =
+ (const struct flow_action_queue *)
+ action[aidx]
+ .conf;
+
+ int hw_id = rx_queue_idx_to_hw_id(dev,
+ queue->index);
+
+ fd->dst_id[fd->dst_num_avail].owning_port_id =
+ dev->port;
+ fd->dst_id[fd->dst_num_avail].id = hw_id;
+ fd->dst_id[fd->dst_num_avail].type = PORT_VIRT;
+ fd->dst_id[fd->dst_num_avail].active = 1;
+ fd->dst_num_avail++;
+
+ NT_LOG(DBG, FILTER,
+ "Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\n",
+ dev, dev->port, queue->index, hw_id);
+
+ fd->full_offload = 0;
+ *num_queues += 1;
+ }
+ break;
+
+ case FLOW_ACTION_TYPE_RSS:
+ NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_RSS\n",
+ dev);
+ if (action[aidx].conf) {
+ const struct flow_action_rss *rss =
+ (const struct flow_action_rss *)
+ action[aidx]
+ .conf;
+
+ for (uint32_t i = 0; i < rss->queue_num; ++i) {
+ int hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);
+
+ fd->dst_id[fd->dst_num_avail]
+ .owning_port_id = dev->port;
+ fd->dst_id[fd->dst_num_avail].id =
+ hw_id;
+ fd->dst_id[fd->dst_num_avail].type =
+ PORT_VIRT;
+ fd->dst_id[fd->dst_num_avail].active =
+ 1;
+ fd->dst_num_avail++;
+ }
+
+ fd->full_offload = 0;
+ *num_queues += rss->queue_num;
+ }
+ break;
+
+ case FLOW_ACTION_TYPE_MARK:
+ NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_MARK\n",
+ dev);
+ if (action[aidx].conf) {
+ fd->mark = ((const struct flow_action_mark *)
+ action[aidx]
+ .conf)
+ ->id;
+ NT_LOG(DBG, FILTER, "Mark: %i\n", fd->mark);
+ }
+ break;
+
+ case FLOW_ACTION_TYPE_JUMP:
+ NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_JUMP\n",
+ dev);
+ if (action[aidx].conf) {
+ const struct flow_action_jump *jump =
+ (const struct flow_action_jump *)
+ action[aidx]
+ .conf;
+ fd->jump_to_group = jump->group;
+ NT_LOG(DBG, FILTER,
+ "Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\n",
+ dev, jump->group);
+ }
+ break;
+
+ case FLOW_ACTION_TYPE_DROP:
+ NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_DROP\n",
+ dev);
+ if (action[aidx].conf) {
+ fd->dst_id[fd->dst_num_avail].owning_port_id =
+ 0;
+ fd->dst_id[fd->dst_num_avail].id = 0;
+ fd->dst_id[fd->dst_num_avail].type = PORT_NONE;
+ fd->dst_num_avail++;
+ }
+ break;
+
+ case FLOW_ACTION_TYPE_METER:
+ NT_LOG(DBG, FILTER, "Dev:%p: FLOW_ACTION_TYPE_METER\n",
+ dev);
+ if (action[aidx].conf) {
+ const struct flow_action_meter *meter =
+ (const struct flow_action_meter *)
+ action[aidx]
+ .conf;
+ if (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: - Number of METER actions exceeds %d.\n",
+ MAX_FLM_MTRS_SUPPORTED);
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+ fd->mtr_ids[mtr_count++] = meter->mtr_id + 1;
+ }
+ break;
+
+ case FLOW_ACTION_TYPE_RAW_ENCAP:
+ NT_LOG(DBG, FILTER,
+ "Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\n", dev);
+ if (action[aidx].conf) {
+ const struct flow_action_raw_encap *encap =
+ (const struct flow_action_raw_encap *)
+ action[aidx]
+ .conf;
+ const struct flow_elem *items = encap->items;
+
+ if (encap_decap_order != 1) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ if (encap->size == 0 || encap->size > 255 ||
+ encap->item_count < 2) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: - RAW_ENCAP data/size invalid.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ encap_decap_order = 2;
+
+ fd->tun_hdr.len = (uint8_t)encap->size;
+ memcpy(fd->tun_hdr.d.hdr8, encap->data,
+ fd->tun_hdr.len);
+
+ while (items->type != FLOW_ELEM_TYPE_END) {
+ switch (items->type) {
+ case FLOW_ELEM_TYPE_ETH:
+ fd->tun_hdr.l2_len = 14;
+ break;
+ case FLOW_ELEM_TYPE_VLAN:
+ fd->tun_hdr.nb_vlans += 1;
+ fd->tun_hdr.l2_len += 4;
+ break;
+ case FLOW_ELEM_TYPE_IPV4:
+ fd->tun_hdr.ip_version = 4;
+ fd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);
+ fd->tun_hdr.new_outer = 1;
+ break;
+ case FLOW_ELEM_TYPE_IPV6:
+ fd->tun_hdr.ip_version = 6;
+ fd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);
+ fd->tun_hdr.new_outer = 1;
+ break;
+ case FLOW_ELEM_TYPE_SCTP:
+ fd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);
+ break;
+ case FLOW_ELEM_TYPE_TCP:
+ fd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);
+ break;
+ case FLOW_ELEM_TYPE_UDP:
+ fd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);
+ break;
+ case FLOW_ELEM_TYPE_ICMP:
+ fd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);
+ break;
+ default:
+ break;
+ }
+ items++;
+ }
+
+ if (fd->tun_hdr.nb_vlans > 3) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: - Encapsulation with %d vlans not supported.\n",
+ (int)fd->tun_hdr.nb_vlans);
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ /* Convert encap data to 128-bit little endian */
+ for (size_t i = 0; i < (encap->size + 15) / 16;
+ ++i) {
+ uint8_t *data =
+ fd->tun_hdr.d.hdr8 + i * 16;
+ for (unsigned int j = 0; j < 8; ++j) {
+ uint8_t t = data[j];
+
+ data[j] = data[15 - j];
+ data[15 - j] = t;
+ }
+ }
+ }
+ break;
+
+ case FLOW_ACTION_TYPE_RAW_DECAP:
+ NT_LOG(DBG, FILTER,
+ "Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\n", dev);
+ if (action[aidx].conf) {
+ const struct flow_action_raw_decap *decap =
+ (const struct flow_action_raw_decap *)
+ action[aidx]
+ .conf;
+
+ if (encap_decap_order != 0) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: - RAW_ENCAP must follow RAW_DECAP.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ if (decap->item_count < 2) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: - RAW_DECAP must decap something.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ encap_decap_order = 1;
+
+ fd->header_strip_start_dyn = 2;
+ fd->header_strip_start_ofs = 2;
+
+ switch (decap->items[decap->item_count - 2]
+ .type) {
+ case FLOW_ELEM_TYPE_ETH:
+ case FLOW_ELEM_TYPE_VLAN:
+ fd->header_strip_end_dyn = 4;
+ fd->header_strip_end_ofs = 0;
+ break;
+ case FLOW_ELEM_TYPE_IPV4:
+ case FLOW_ELEM_TYPE_IPV6:
+ fd->header_strip_end_dyn = 7;
+ fd->header_strip_end_ofs = 0;
+ fd->header_strip_removed_outer_ip = 1;
+ break;
+ case FLOW_ELEM_TYPE_SCTP:
+ case FLOW_ELEM_TYPE_TCP:
+ case FLOW_ELEM_TYPE_UDP:
+ case FLOW_ELEM_TYPE_ICMP:
+ fd->header_strip_end_dyn = 8;
+ fd->header_strip_end_ofs = 0;
+ fd->header_strip_removed_outer_ip = 1;
+ break;
+ case FLOW_ELEM_TYPE_GTP:
+ fd->header_strip_end_dyn = 13;
+ fd->header_strip_end_ofs = 0;
+ fd->header_strip_removed_outer_ip = 1;
+ break;
+ default:
+ fd->header_strip_end_dyn = 1;
+ fd->header_strip_end_ofs = 0;
+ fd->header_strip_removed_outer_ip = 1;
+ break;
+ }
+ }
+ break;
+
+ case FLOW_ACTION_TYPE_MODIFY_FIELD:
+ NT_LOG(DBG, FILTER,
+ "Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\n", dev);
+ {
+ const struct flow_action_modify_field *modify_field =
+ (const struct flow_action_modify_field *)
+ action[aidx]
+ .conf;
+ uint64_t modify_field_use_flag = 0;
+
+ if (modify_field->src.field !=
+ FLOW_FIELD_VALUE) {
+ NT_LOG(ERR, FILTER,
+ "MODIFY_FIELD only src type VALUE is supported.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ if (modify_field->dst.level > 2) {
+ NT_LOG(ERR, FILTER,
+ "MODIFY_FIELD only dst level 0, 1, and 2 is supported.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ if (modify_field->dst.field ==
+ FLOW_FIELD_IPV4_TTL ||
+ modify_field->dst.field ==
+ FLOW_FIELD_IPV6_HOPLIMIT) {
+ if (modify_field->operation !=
+ FLOW_MODIFY_SUB) {
+ NT_LOG(ERR, FILTER,
+ "MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ if (fd->ttl_sub_enable) {
+ NT_LOG(ERR, FILTER,
+ "MODIFY_FIELD TTL/HOPLIMIT resource already in use.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ fd->ttl_sub_enable = 1;
+ fd->ttl_sub_ipv4 =
+ (modify_field->dst.field ==
+ FLOW_FIELD_IPV4_TTL) ?
+ 1 :
+ 0;
+ fd->ttl_sub_outer =
+ (modify_field->dst.level <= 1) ?
+ 1 :
+ 0;
+ } else {
+ if (modify_field->operation !=
+ FLOW_MODIFY_SET) {
+ NT_LOG(ERR, FILTER,
+ "MODIFY_FIELD only operation SET "
+ "is supported in general.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ if (fd->modify_field_count >=
+ dev->ndev->be.tpe.nb_cpy_writers) {
+ NT_LOG(ERR, FILTER,
+ "MODIFY_FIELD exceeded maximum of %u"
+ " MODIFY_FIELD actions.\n",
+ dev->ndev->be.tpe
+ .nb_cpy_writers);
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ switch (modify_field->dst.field) {
+ case FLOW_FIELD_IPV4_DSCP:
+ fd->modify_field
+ [fd->modify_field_count]
+ .select =
+ CPY_SELECT_DSCP_IPV4;
+ fd->modify_field
+ [fd->modify_field_count]
+ .dyn = DYN_L3;
+ fd->modify_field
+ [fd->modify_field_count]
+ .ofs = 1;
+ fd->modify_field
+ [fd->modify_field_count]
+ .len = 1;
+ break;
+ case FLOW_FIELD_IPV6_DSCP:
+ fd->modify_field
+ [fd->modify_field_count]
+ .select =
+ CPY_SELECT_DSCP_IPV6;
+ fd->modify_field
+ [fd->modify_field_count]
+ .dyn = DYN_L3;
+ fd->modify_field
+ [fd->modify_field_count]
+ .ofs = 0;
+ fd->modify_field
+ /*
+ * len=2 is needed because IPv6 DSCP overlaps 2
+ * bytes.
+ */
+ [fd->modify_field_count]
+ .len = 2;
+ break;
+ case FLOW_FIELD_GTP_PSC_QFI:
+ fd->modify_field
+ [fd->modify_field_count]
+ .select =
+ CPY_SELECT_RQI_QFI;
+ fd->modify_field
+ [fd->modify_field_count]
+ .dyn =
+ DYN_L4_PAYLOAD;
+ fd->modify_field
+ [fd->modify_field_count]
+ .ofs = 14;
+ fd->modify_field
+ [fd->modify_field_count]
+ .len = 1;
+ break;
+ case FLOW_FIELD_IPV4_SRC:
+ fd->modify_field
+ [fd->modify_field_count]
+ .select =
+ CPY_SELECT_IPV4;
+ fd->modify_field
+ [fd->modify_field_count]
+ .dyn = DYN_L3;
+ fd->modify_field
+ [fd->modify_field_count]
+ .ofs = 12;
+ fd->modify_field
+ [fd->modify_field_count]
+ .len = 4;
+ break;
+ case FLOW_FIELD_IPV4_DST:
+ fd->modify_field
+ [fd->modify_field_count]
+ .select =
+ CPY_SELECT_IPV4;
+ fd->modify_field
+ [fd->modify_field_count]
+ .dyn = DYN_L3;
+ fd->modify_field
+ [fd->modify_field_count]
+ .ofs = 16;
+ fd->modify_field
+ [fd->modify_field_count]
+ .len = 4;
+ break;
+ case FLOW_FIELD_TCP_PORT_SRC:
+ /* fallthrough */
+ case FLOW_FIELD_UDP_PORT_SRC:
+ fd->modify_field
+ [fd->modify_field_count]
+ .select =
+ CPY_SELECT_PORT;
+ fd->modify_field
+ [fd->modify_field_count]
+ .dyn = DYN_L4;
+ fd->modify_field
+ [fd->modify_field_count]
+ .ofs = 0;
+ fd->modify_field
+ [fd->modify_field_count]
+ .len = 2;
+ break;
+ case FLOW_FIELD_TCP_PORT_DST:
+ /* fallthrough */
+ case FLOW_FIELD_UDP_PORT_DST:
+ fd->modify_field
+ [fd->modify_field_count]
+ .select =
+ CPY_SELECT_PORT;
+ fd->modify_field
+ [fd->modify_field_count]
+ .dyn = DYN_L4;
+ fd->modify_field
+ [fd->modify_field_count]
+ .ofs = 2;
+ fd->modify_field
+ [fd->modify_field_count]
+ .len = 2;
+ break;
+ case FLOW_FIELD_GTP_TEID:
+ fd->modify_field
+ [fd->modify_field_count]
+ .select =
+ CPY_SELECT_TEID;
+ fd->modify_field
+ [fd->modify_field_count]
+ .dyn =
+ DYN_L4_PAYLOAD;
+ fd->modify_field
+ [fd->modify_field_count]
+ .ofs = 4;
+ fd->modify_field
+ [fd->modify_field_count]
+ .len = 4;
+ break;
+ default:
+ NT_LOG(ERR, FILTER,
+ "MODIFY_FIELD dst type is not supported.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ modify_field_use_flag =
+ 1
+ << fd->modify_field
+ [fd->modify_field_count]
+ .select;
+ if (modify_field_use_flag &
+ modify_field_use_flags) {
+ NT_LOG(ERR, FILTER,
+ "MODIFY_FIELD dst type hardware "
+ "resource already used.\n");
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED,
+ error);
+ free(fd);
+ return NULL;
+ }
+
+ memcpy(fd->modify_field
+ [fd->modify_field_count]
+ .value8,
+ modify_field->src.value, 16);
+
+ fd->modify_field[fd->modify_field_count]
+ .level =
+ modify_field->dst.level;
+
+ modify_field_use_flags |=
+ modify_field_use_flag;
+ fd->modify_field_count += 1;
+ }
+ }
+ break;
+
+ default:
+ NT_LOG(ERR, FILTER,
+ "Invalid or unsupported flow action received - %i\n",
+ action[aidx].type);
+ flow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);
+ free(fd);
+ return NULL;
+ }
+ }
+
+ if (!(encap_decap_order == 0 || encap_decap_order == 2)) {
+ NT_LOG(ERR, FILTER, "Invalid encap/decap actions\n");
+ free(fd);
+ return NULL;
+ }
+
+ if (implicit_vlan_vid > 0) {
+ uint32_t *sw_data = &packet_data[1 - sw_counter];
+ uint32_t *sw_mask = &packet_mask[1 - sw_counter];
+
+ sw_mask[0] = 0x0fff;
+ sw_data[0] = implicit_vlan_vid & sw_mask[0];
+
+ km_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,
+ DYN_FIRST_VLAN, 0);
+ set_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);
+ sw_counter += 1;
+
+ fd->vlans += 1;
+ }
+
+ /*
+ * All Actions interpreted
+ */
+ for (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {
+ switch (elem[eidx].type) {
+ case FLOW_ELEM_TYPE_ANY:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\n",
+ dev->ndev->adapter_no, dev->port);
+ {
+ const struct flow_elem_any *any_spec =
+ (const struct flow_elem_any *)elem[eidx]
+ .spec;
+ const struct flow_elem_any *any_mask =
+ (const struct flow_elem_any *)elem[eidx]
+ .mask;
+
+ if (any_spec && any_mask) {
+ any_count += any_spec->num &
+ any_mask->num;
+ }
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_ETH:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\n",
+ dev->ndev->adapter_no, dev->port);
+ {
+ const struct flow_elem_eth *eth_spec =
+ (const struct flow_elem_eth *)elem[eidx]
+ .spec;
+ const struct flow_elem_eth *eth_mask =
+ (const struct flow_elem_eth *)elem[eidx]
+ .mask;
+
+ if (any_count > 0) {
+ NT_LOG(ERR, FILTER,
+ "Tunneled L2 ethernet not supported\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return NULL;
+ }
+
+ if (qw_counter >= 2) {
+ NT_LOG(ERR, FILTER,
+ "Key size too big. Out of QW resources.\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fd);
+ return NULL;
+ }
+
+ if (eth_spec != NULL && eth_mask != NULL) {
+ if (is_non_zero(eth_mask->d_addr.addr_b,
+ 6) ||
+ is_non_zero(eth_mask->s_addr.addr_b,
+ 6)) {
+ uint32_t *qw_data =
+ &packet_data[2 + 4 -
+ qw_counter *
+ 4];
+ uint32_t *qw_mask =
+ &packet_mask[2 + 4 -
+ qw_counter *
+ 4];
+
+ qw_data[0] =
+ ((eth_spec->d_addr
+ .addr_b[0] &
+ eth_mask->d_addr
+ .addr_b[0])
+ << 24) +
+ ((eth_spec->d_addr
+ .addr_b[1] &
+ eth_mask->d_addr
+ .addr_b[1])
+ << 16) +
+ ((eth_spec->d_addr
+ .addr_b[2] &
+ eth_mask->d_addr
+ .addr_b[2])
+ << 8) +
+ (eth_spec->d_addr
+ .addr_b[3] &
+ eth_mask->d_addr
+ .addr_b[3]);
+
+ qw_data[1] =
+ ((eth_spec->d_addr
+ .addr_b[4] &
+ eth_mask->d_addr
+ .addr_b[4])
+ << 24) +
+ ((eth_spec->d_addr
+ .addr_b[5] &
+ eth_mask->d_addr
+ .addr_b[5])
+ << 16) +
+ ((eth_spec->s_addr
+ .addr_b[0] &
+ eth_mask->s_addr
+ .addr_b[0])
+ << 8) +
+ (eth_spec->s_addr
+ .addr_b[1] &
+ eth_mask->s_addr
+ .addr_b[1]);
+
+ qw_data[2] =
+ ((eth_spec->s_addr
+ .addr_b[2] &
+ eth_mask->s_addr
+ .addr_b[2])
+ << 24) +
+ ((eth_spec->s_addr
+ .addr_b[3] &
+ eth_mask->s_addr
+ .addr_b[3])
+ << 16) +
+ ((eth_spec->s_addr
+ .addr_b[4] &
+ eth_mask->s_addr
+ .addr_b[4])
+ << 8) +
+ (eth_spec->s_addr
+ .addr_b[5] &
+ eth_mask->s_addr
+ .addr_b[5]);
+
+ qw_mask[0] = (eth_mask->d_addr
+ .addr_b[0]
+ << 24) +
+ (eth_mask->d_addr
+ .addr_b[1]
+ << 16) +
+ (eth_mask->d_addr
+ .addr_b[2]
+ << 8) +
+ eth_mask->d_addr
+ .addr_b[3];
+
+ qw_mask[1] = (eth_mask->d_addr
+ .addr_b[4]
+ << 24) +
+ (eth_mask->d_addr
+ .addr_b[5]
+ << 16) +
+ (eth_mask->s_addr
+ .addr_b[0]
+ << 8) +
+ eth_mask->s_addr
+ .addr_b[1];
+
+ qw_mask[2] = (eth_mask->s_addr
+ .addr_b[2]
+ << 24) +
+ (eth_mask->s_addr
+ .addr_b[3]
+ << 16) +
+ (eth_mask->s_addr
+ .addr_b[4]
+ << 8) +
+ eth_mask->s_addr
+ .addr_b[5];
+
+ km_add_match_elem(&fd->km,
+ &qw_data[(size_t)(qw_counter *
+ 4)],
+ &qw_mask[(size_t)(qw_counter *
+ 4)],
+ 3, DYN_L2, 0);
+ set_key_def_qw(key_def,
+ qw_counter,
+ DYN_L2, 0);
+ qw_counter += 1;
+ }
+ }
+
+ fd->l2_prot = PROT_L2_ETH2;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_VLAN:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\n",
+ dev->ndev->adapter_no, dev->port);
+ {
+ if (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,
+ sw_counter, packet_data, packet_mask, key_def))
+ return NULL;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_IPV4:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\n",
+ dev->ndev->adapter_no, dev->port);
+ {
+ if (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,
+ sw_counter, packet_data, packet_mask, key_def, any_count))
+ return NULL;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_IPV6:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\n",
+ dev->ndev->adapter_no, dev->port);
+ {
+ if (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,
+ packet_data, packet_mask, key_def, any_count))
+ return NULL;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_UDP:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\n",
+ dev->ndev->adapter_no, dev->port);
+ {
+ if (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,
+ packet_data, packet_mask, key_def, any_count))
+ return NULL;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_SCTP:
+ NT_LOG(DBG, FILTER,
+ "Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\n",
+ dev->ndev->adapter_no, dev->port);
+ {
+ if (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,
+ packet_data, packet_mask, key_def, any_count))
+ return NULL;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_TCP:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\n",
+ dev->ndev->adapter_no, dev->port);
+ {
+ if (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,
+ packet_data, packet_mask, key_def, any_count))
+ return NULL;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_GTP:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\n",
+ dev->ndev->adapter_no, dev->port);
+ {
+ if (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,
+ packet_data, packet_mask, key_def))
+ return NULL;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_PORT_ID:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\n",
+ dev->ndev->adapter_no, dev->port);
+ if (elem[eidx].spec) {
+ *in_port_id =
+ ((const struct flow_elem_port_id *)
+ elem[eidx]
+ .spec)
+ ->id;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_VOID:
+ NT_LOG(DBG, FILTER,
+ "Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\n",
+ dev->ndev->adapter_no, dev->port);
+ break;
+
+ default:
+ NT_LOG(ERR, FILTER,
+ "Invalid or unsupported flow request: %d\n",
+ (int)elem[eidx].type);
+ flow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,
+ error);
+ free(fd);
+ return NULL;
+ }
+ }
+
+ return fd;
+}
+
+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)
+{
+ /* CFN */
+ {
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,
+ 0, 0);
+ hw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);
+ }
+
+ /* KM */
+ {
+ uint32_t bm = 0;
+
+ hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST, cfn / 8, &bm);
+ hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST, cfn / 8,
+ bm & ~(1 << (cfn % 8)));
+ hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+ KM_FLM_IF_FIRST, cfn, 0);
+
+ hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+ cfn / 8, 1);
+ hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+ 1);
+
+ for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+ ft++) {
+ set_flow_type_km(dev->ndev, cfn, ft, 0, 0);
+ set_flow_type_km(dev->ndev, cfn, ft, 1, 0);
+ set_flow_type_km(dev->ndev, cfn, ft, 2, 0);
+ set_flow_type_km(dev->ndev, cfn, ft, 3, 0);
+ }
+ }
+
+ /* FLM */
+ {
+ uint32_t bm = 0;
+
+ hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST, cfn / 8, &bm);
+ hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST, cfn / 8,
+ bm & ~(1 << (cfn % 8)));
+ hw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+ KM_FLM_IF_FIRST, cfn, 0);
+
+ hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+ cfn / 8, 1);
+ hw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,
+ 1);
+
+ for (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;
+ ft++) {
+ set_flow_type_flm(dev->ndev, cfn, ft, 0, 0);
+ set_flow_type_flm(dev->ndev, cfn, ft, 1, 0);
+ set_flow_type_flm(dev->ndev, cfn, ft, 2, 0);
+ set_flow_type_flm(dev->ndev, cfn, ft, 3, 0);
+ }
+ }
+
+ /* CTE / CTS */
+ {
+ uint32_t cte = 0;
+
+ hw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,
+ &cte);
+
+ if (cte) {
+ const int cts_offset =
+ ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+ hw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,
+ cfn, 0);
+ hw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);
+
+ for (int cte_type = 0; cte_type < cts_offset;
+ ++cte_type) {
+ hw_mod_cat_cts_set(&dev->ndev->be,
+ HW_CAT_CTS_CAT_A,
+ cts_offset * cfn + cte_type,
+ 0);
+ hw_mod_cat_cts_set(&dev->ndev->be,
+ HW_CAT_CTS_CAT_B,
+ cts_offset * cfn + cte_type,
+ 0);
+ }
+
+ hw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,
+ cts_offset);
+ }
+ }
+
+ return 0;
+}
+
+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,
+ const uint32_t *packet_data, uint32_t flm_key_id,
+ uint16_t rpl_ext_ptr, uint32_t priority)
+{
+ if (fh->type != FLOW_HANDLE_TYPE_FLM)
+ return -1;
+
+ switch (fd->l4_prot) {
+ case PROT_L4_TCP:
+ fh->flm_prot = 6;
+ break;
+ case PROT_L4_UDP:
+ fh->flm_prot = 17;
+ break;
+ case PROT_L4_SCTP:
+ fh->flm_prot = 132;
+ break;
+ case PROT_L4_ICMP:
+ fh->flm_prot = 1;
+ break;
+ default:
+ switch (fd->tunnel_l4_prot) {
+ case PROT_TUN_L4_TCP:
+ fh->flm_prot = 6;
+ break;
+ case PROT_TUN_L4_UDP:
+ fh->flm_prot = 17;
+ break;
+ case PROT_TUN_L4_SCTP:
+ fh->flm_prot = 132;
+ break;
+ case PROT_TUN_L4_ICMP:
+ fh->flm_prot = 1;
+ break;
+ default:
+ fh->flm_prot = 0;
+ break;
+ }
+ break;
+ }
+
+ memcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);
+
+ fh->flm_kid = flm_key_id;
+ fh->flm_rpl_ext_ptr = rpl_ext_ptr;
+ fh->flm_prio = (uint8_t)priority;
+
+ for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+ switch (fd->modify_field[i].select) {
+ case CPY_SELECT_DSCP_IPV4:
+ /* fallthrough */
+ case CPY_SELECT_DSCP_IPV6:
+ fh->flm_dscp = fd->modify_field[i].value8[0];
+ break;
+ case CPY_SELECT_RQI_QFI:
+ fh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &
+ 0x1;
+ fh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;
+ break;
+ case CPY_SELECT_IPV4:
+ fh->flm_nat_ipv4 =
+ ntohl(fd->modify_field[i].value32[0]);
+ break;
+ case CPY_SELECT_PORT:
+ fh->flm_nat_port =
+ ntohs(fd->modify_field[i].value16[0]);
+ break;
+ case CPY_SELECT_TEID:
+ fh->flm_teid = ntohl(fd->modify_field[i].value32[0]);
+ break;
+ }
+ }
+
+ fh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;
+
+ return 0;
+}
+
+static int flm_flow_programming(struct flow_eth_dev *dev,
+ struct flow_handle *fh, uint32_t *mtr_ids,
+ uint32_t flm_ft, uint32_t flm_op)
+{
+ struct flm_v17_lrn_data_s learn_record;
+
+ if (fh->type != FLOW_HANDLE_TYPE_FLM)
+ return -1;
+
+ memset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));
+
+ learn_record.qw0[0] = fh->flm_data[9];
+ learn_record.qw0[1] = fh->flm_data[8];
+ learn_record.qw0[2] = fh->flm_data[7];
+ learn_record.qw0[3] = fh->flm_data[6];
+ learn_record.qw4[0] = fh->flm_data[5];
+ learn_record.qw4[1] = fh->flm_data[4];
+ learn_record.qw4[2] = fh->flm_data[3];
+ learn_record.qw4[3] = fh->flm_data[2];
+ learn_record.sw8 = fh->flm_data[1];
+ learn_record.sw9 = fh->flm_data[0];
+ learn_record.prot = fh->flm_prot;
+
+
+ struct flm_v17_mbr_idx_overlay *mbr_id1_ptr =
+ (struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+ struct flm_v17_mbr_idx_overlay *mbr_id2_ptr =
+ (struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx;
+ struct flm_v17_mbr_idx_overlay *mbr_id3_ptr =
+ (struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx + 7;
+ struct flm_v17_mbr_idx_overlay *mbr_id4_ptr =
+ (struct flm_v17_mbr_idx_overlay *)learn_record.mbr_idx + 7;
+ if (mtr_ids) {
+ mbr_id1_ptr->a = mtr_ids[0];
+ mbr_id2_ptr->b = mtr_ids[1];
+ mbr_id3_ptr->a = mtr_ids[2];
+ mbr_id4_ptr->b = mtr_ids[3];
+
+ /* Last non-zero mtr is used for statistics */
+ uint8_t mbrs = 0;
+
+ while (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)
+ ++mbrs;
+ learn_record.vol_idx = mbrs;
+ }
+
+ learn_record.nat_ip = fh->flm_nat_ipv4;
+ learn_record.nat_port = fh->flm_nat_port;
+ learn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;
+
+ learn_record.dscp = fh->flm_dscp;
+ learn_record.teid = fh->flm_teid;
+ learn_record.qfi = fh->flm_qfi;
+ learn_record.rqi = fh->flm_rqi;
+ learn_record.color = fh->flm_rpl_ext_ptr &
+ 0x3ff; /* Lower 10 bits used for RPL EXT PTR */
+ learn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)
+ << 10; /* Bit [13:10] used for MTU recipe */
+
+ learn_record.ent = 0;
+ learn_record.op = flm_op & 0xf;
+ learn_record.prio = fh->flm_prio & 0x3;
+ learn_record.ft = flm_ft;
+ learn_record.kid = fh->flm_kid;
+ learn_record.eor = 1;
+
+ int res = flow_flm_apply(dev, &learn_record);
+ return res;
+}
+
+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,
+ struct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,
+ struct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,
+ struct flow_handle *flow)
+{
+ if (!identical_flow_found) {
+ /* Find existing KM FT that can be reused */
+ {
+ int found_ft = 0, found_zero = 0;
+
+ struct flm_flow_ft_ident_s *ft_idents =
+ (struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;
+ struct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);
+
+ for (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {
+ if (ft_ident.data == ft_idents[i].data) {
+ found_ft = i;
+ break;
+ } else if (found_zero == 0 && ft_idents[i].data == 0) {
+ found_zero = i;
+ }
+ }
+
+ if (found_ft) {
+ if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {
+ NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+ "KM FLOW TYPE resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return 1;
+ }
+
+ fh->resource[RES_KM_FLOW_TYPE].count = 1;
+ fh->resource[RES_KM_FLOW_TYPE].index = found_ft;
+ fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+ } else if (found_zero) {
+ if (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,
+ found_zero, fh)) {
+ NT_LOG(ERR, FILTER, "ERROR: Could not get "
+ "KM FLOW TYPE resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return 1;
+ }
+
+ ft_idents[found_zero].data = ft_ident.data;
+ } else {
+ NT_LOG(ERR, FILTER, "ERROR: Could not get KM FLOW TYPE resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return 1;
+ }
+ }
+ /* Attach resources to KM entry */
+ km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+ fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+ /* _update existing KM RCP or allocate a new RCP */
+ if (found_flow != NULL) {
+ if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow
+ ->resource[RES_KM_CATEGORY].index)) {
+ NT_LOG(ERR, FILTER, "ERROR: Could not reference "
+ "KM CATEGORY resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return 1;
+ }
+
+ fh->resource[RES_KM_CATEGORY].count = 1;
+ fh->resource[RES_KM_CATEGORY].index =
+ found_flow->resource[RES_KM_CATEGORY].index;
+ fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+ if (fd->km.target == KM_CAM) {
+ uint32_t ft_a_mask = 0;
+
+ hw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,
+ fh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);
+ hw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,
+ fh->resource[RES_KM_CATEGORY].index, 0,
+ ft_a_mask | (1 << fd->km.flow_type));
+ }
+ } else {
+ if (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {
+ NT_LOG(ERR, FILTER, "ERROR: Could not get KM CATEGORY resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return 1;
+ }
+
+ /* Note: km_rcp_set clears existing RCPs */
+ km_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);
+ }
+
+ /* Set filter setup variables */
+ *setup_km = 1;
+ *setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;
+ *setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;
+
+ /* _flush KM RCP and entry */
+ hw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);
+
+ km_write_data_match_entry(&fd->km, 0);
+ } else {
+ if (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,
+ found_flow->resource[RES_KM_FLOW_TYPE].index)) {
+ NT_LOG(ERR, FILTER, "ERROR: Could not reference KM FLOW TYPE resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return 1;
+ }
+
+ fh->resource[RES_KM_FLOW_TYPE].count = 1;
+ fh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;
+ fh->resource[RES_KM_FLOW_TYPE].referenced = 1;
+
+ if (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,
+ found_flow->resource[RES_KM_CATEGORY].index)) {
+ NT_LOG(ERR, FILTER, "ERROR: Could not reference KM CATEGORY resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ return 1;
+ }
+
+ fh->resource[RES_KM_CATEGORY].count = 1;
+ fh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;
+ fh->resource[RES_KM_CATEGORY].referenced = 1;
+
+ km_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);
+ fd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;
+
+ km_refer_data_match_entry(&fd->km, &found_flow->fd->km);
+
+ *setup_km = 1;
+ *setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;
+ *setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;
+ }
+ return 0;
+}
+
+/*
+ * Tunneling invalidates dynamic offsets, so change them to static
+ * offsets starting at beginning of L2.
+ */
+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,
+ uint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,
+ const uint32_t l4_length, uint32_t *dyn)
+{
+ if (fd->tun_hdr.len > eth_length) {
+ if (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {
+ ofs += fd->tun_hdr.len - eth_length;
+ } else {
+ switch (select) {
+ case CPY_SELECT_IPV4:
+ case CPY_SELECT_DSCP_IPV4:
+ case CPY_SELECT_DSCP_IPV6:
+ *ofs += l2_length;
+ break;
+ case CPY_SELECT_PORT:
+ *ofs += l2_length + l3_length;
+ break;
+ case CPY_SELECT_TEID:
+ case CPY_SELECT_RQI_QFI:
+ *ofs += l2_length + l3_length + l4_length;
+ break;
+ }
+ *dyn = 1;
+ }
+ }
+}
+
+static struct flow_handle *
+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,
+ const struct flow_attr *attr, struct flow_error *error,
+ uint32_t port_id, uint32_t num_dest_port,
+ uint32_t num_queues, uint32_t *packet_data,
+ uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)
+{
+ uint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :
+ num_queues;
+ uint32_t flm_key_id = 0;
+ uint32_t flm_ft = 0;
+ uint16_t flm_rpl_ext_ptr = 0;
+
+ struct flow_handle *fh_flm = NULL;
+ struct flow_handle *fh = calloc(1, sizeof(struct flow_handle));
+
+ if (!fh) {
+ NT_LOG(ERR, FILTER, "ERR memory\n");
+ flow_nic_set_error(ERR_MEMORY, error);
+ return NULL;
+ }
+
+ fh->type = FLOW_HANDLE_TYPE_FLOW;
+ fh->port_id = port_id;
+ fh->dev = dev;
+ fh->fd = fd;
+
+ int setup_cat_cfn = 0;
+ int setup_cat_cot = 0;
+ int setup_cat_cts = 0;
+ int setup_qsl_rcp = 0;
+
+ int setup_flm = 0;
+ int setup_flm_ft = 0;
+
+ int setup_km = 0;
+ int setup_km_ft = 0;
+ int setup_km_rcp = 0;
+
+ int setup_default_ft = 0;
+
+ int setup_hst = 0;
+ int setup_tpe = 0;
+ int setup_tpe_encap_data = 0;
+
+ int free_fd = 0;
+
+ const int empty_pattern =
+ fd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&
+ fd->vlans == 0 && fd->tunnel_prot < 0 &&
+ fd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;
+
+ if (attr->group > 0 && empty_pattern) {
+ /*
+ * Group 0 default filter actions
+ */
+ struct flow_handle *fh_miss = NULL;
+
+ if (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {
+ /* Error was printed to log by flm_flow_get_group_miss_fh */
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fh);
+ return NULL;
+ }
+
+ if (fh_miss == NULL) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not setup default action for uninitialized group\n");
+ flow_nic_set_error(ERR_FAILED, error);
+ free(fh);
+ return NULL;
+ }
+
+ if (qsl_size > 0 &&
+ flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,
+ qsl_size, 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get QSL QST resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ free(fh);
+ return NULL;
+ }
+
+ if (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,
+ fh_miss->resource[RES_QSL_RCP].index)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not reference QSL RCP resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ free(fh);
+ return NULL;
+ }
+
+ fh->resource[RES_QSL_RCP].count = 1;
+ fh->resource[RES_QSL_RCP].index =
+ fh_miss->resource[RES_QSL_RCP].index;
+ fh->resource[RES_QSL_RCP].referenced = 1;
+
+ nic_insert_flow(dev->ndev, fh);
+
+ setup_qsl_rcp = 1;
+ } else if (attr->group > 0) {
+ /*
+ * FLM programming
+ */
+ struct flow_handle *fh_existing = NULL;
+ int cfn_to_copy = -1;
+
+ if (attr->priority >= dev->ndev->be.flm.nb_prios) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Priority value of FLM flow exceeds %u"
+ "\n",
+ dev->ndev->be.flm.nb_prios);
+ flow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,
+ error);
+ free(fh);
+ return NULL;
+ }
+
+ if (flm_flow_learn_prepare(dev, fh, attr->group, key_def,
+ packet_mask, &flm_key_id, &flm_ft,
+ &cfn_to_copy, &setup_km_ft,
+ &fh_existing)) {
+ /* Error was printed to log by flm_flow_learn_prepare */
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ free(fh);
+ return NULL;
+ }
+
+ setup_tpe_encap_data = (fd->tun_hdr.len > 0);
+ setup_tpe =
+ (fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);
+
+ /* Create HIT filter for new FLM FT */
+ if (cfn_to_copy >= 0) {
+ uint32_t value = 0;
+
+ nic_insert_flow(dev->ndev, fh);
+
+ setup_qsl_rcp = 1;
+ setup_cat_cot = 1;
+ setup_cat_cts = 1;
+
+ setup_default_ft = 1;
+
+ setup_flm = 1;
+ setup_flm_ft = (int)flm_ft;
+
+ setup_tpe |= setup_tpe_encap_data;
+
+ if (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||
+ fd->header_strip_start_ofs != fd->header_strip_end_ofs)
+ setup_hst = 1;
+
+ if (flow_nic_allocate_fh_resource(dev->ndev,
+ RES_CAT_CFN,
+ fh, 1, 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get CAT CFN resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ if (flow_nic_allocate_fh_resource(dev->ndev,
+ RES_CAT_COT,
+ fh, 1, 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get CAT COT resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ if (flow_nic_allocate_fh_resource(dev->ndev,
+ RES_QSL_RCP,
+ fh, 1, 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get QSL RCP resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ if (qsl_size > 0 &&
+ flow_nic_allocate_fh_resource(dev->ndev,
+ RES_QSL_QST,
+ fh, qsl_size, 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get QSL QST resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ NT_LOG(DBG, FILTER,
+ "FLM: Creating new CFN %d as a copy of CFN %d with FT %d\n",
+ fh->resource[RES_CAT_CFN].index, cfn_to_copy,
+ setup_flm_ft);
+
+ /* Copy parts from base MISS filter */
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,
+ fh->resource[RES_CAT_CFN].index, 0,
+ cfn_to_copy);
+ hw_mod_cat_cfn_flush(&dev->ndev->be,
+ fh->resource[RES_CAT_CFN].index,
+ 1);
+
+ hw_mod_cat_kcs_km_get(&dev->ndev->be,
+ HW_CAT_KCS_CATEGORY,
+ KM_FLM_IF_FIRST, cfn_to_copy,
+ &value);
+ if (value > 0) {
+ setup_km = 1;
+ setup_km_rcp = (int)value;
+ }
+
+ hw_mod_cat_kcs_flm_get(&dev->ndev->be,
+ HW_CAT_KCS_CATEGORY,
+ KM_FLM_IF_FIRST, cfn_to_copy,
+ &value);
+ hw_mod_cat_kcs_flm_set(&dev->ndev->be,
+ HW_CAT_KCS_CATEGORY,
+ KM_FLM_IF_FIRST,
+ fh->resource[RES_CAT_CFN].index,
+ value);
+ hw_mod_cat_kcs_flm_flush(&dev->ndev->be,
+ KM_FLM_IF_FIRST,
+ fh->resource[RES_CAT_CFN].index,
+ 1);
+
+ fh_flm = calloc(1, sizeof(struct flow_handle));
+ if (!fh_flm) {
+ flow_nic_set_error(ERR_MEMORY, error);
+ return NULL;
+ }
+
+ nic_insert_flow_flm(dev->ndev, fh_flm);
+
+ fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+ fh_flm->dev = dev;
+ fh_flm->flm_owner = fh;
+ } else {
+ /* Reuse allocated memory */
+ fh_flm = fh;
+ fh = fh_existing;
+
+ nic_insert_flow_flm(dev->ndev, fh_flm);
+
+ fh_flm->type = FLOW_HANDLE_TYPE_FLM;
+ fh_flm->dev = dev;
+ fh_flm->flm_owner = fh_existing;
+
+ free_fd = 1;
+ }
+
+ fh_flm->flm_owner->flm_ref_count += 1;
+ } else {
+ /*
+ * Filter creation
+ */
+ nic_insert_flow(dev->ndev, fh);
+
+ setup_cat_cfn = 1;
+ setup_cat_cts = 1;
+ setup_qsl_rcp = 1;
+
+ if (fd->km.num_ftype_elem) {
+ struct flow_handle *flow = dev->ndev->flow_base,
+ *found_flow = NULL;
+ int identical_flow_found = 0;
+
+ /* Compute new KM key */
+ if (km_key_create(&fd->km, fh->port_id)) {
+ NT_LOG(ERR, FILTER, "KM creation failed\n");
+ flow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,
+ error);
+ return NULL;
+ }
+
+ fd->km.be = &dev->ndev->be;
+
+ /* Find existing KM key that can be reused */
+ while (flow) {
+ if (flow->type == FLOW_HANDLE_TYPE_FLOW &&
+ flow->fd->km
+ .flow_type && /* This check also skips self */
+ flow->resource[RES_KM_CATEGORY].count) {
+ int res = km_key_compare(&fd->km,
+ &flow->fd->km);
+ if (res < 0) {
+ identical_flow_found = 1;
+ found_flow = flow;
+ break;
+ } else if (res > 0 &&
+ !flow->resource[RES_KM_CATEGORY]
+ .referenced &&
+ found_flow == NULL)
+ found_flow = flow;
+ }
+ flow = flow->next;
+ }
+ if (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,
+ found_flow, identical_flow_found, dev, fd, error, fh, flow))
+ return NULL;
+ }
+
+ setup_default_ft = 1;
+
+ if (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,
+ fh, 1, 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get CAT CFN resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ if (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,
+ 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get QSL RCP resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ if (qsl_size > 0 &&
+ flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,
+ fh, qsl_size, 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get QSL QST resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ /* Check if filter is set up for FLM */
+ if (fd->jump_to_group != UINT32_MAX) {
+ flm_flow_setup_group(dev, fd->jump_to_group,
+ fh->resource[RES_CAT_CFN].index,
+ fh->resource[RES_KM_FLOW_TYPE].index,
+ fh);
+ }
+ }
+
+ /*
+ * Setup QSL
+ */
+ if (setup_qsl_rcp) {
+ if (qsl_size == 0) {
+ /* Create drop filter */
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+ fh->resource[RES_QSL_RCP].index,
+ 0x0);
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+ fh->resource[RES_QSL_RCP].index,
+ 0x3);
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+ fh->resource[RES_QSL_RCP].index,
+ 0x0);
+
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+ fh->resource[RES_QSL_RCP].index, 0);
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+ fh->resource[RES_QSL_RCP].index, 0);
+
+ hw_mod_qsl_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_QSL_RCP].index,
+ 1);
+ } else {
+ const int table_start = fh->resource[RES_QSL_QST].index;
+ const int table_end = table_start +
+ fh->resource[RES_QSL_QST].count -
+ 1;
+
+ /* Use 0x0 for pure retransmit */
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,
+ fh->resource[RES_QSL_RCP].index,
+ 0x0);
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,
+ fh->resource[RES_QSL_RCP].index,
+ 0x0);
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,
+ fh->resource[RES_QSL_RCP].index,
+ num_dest_port > 0 ? 0x3 : 0x0);
+
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,
+ fh->resource[RES_QSL_RCP].index,
+ table_start);
+ hw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,
+ fh->resource[RES_QSL_RCP].index,
+ table_end);
+
+ hw_mod_qsl_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_QSL_RCP].index,
+ 1);
+
+ /* Setup QSL QST/QEN */
+ if (num_dest_port > 0 && num_queues > 0) {
+ int ports[num_dest_port];
+ int queues[num_queues];
+
+ int port_index = 0;
+ int queue_index = 0;
+
+ for (int i = 0; i < fd->dst_num_avail; ++i) {
+ if (fd->dst_id[i].type == PORT_PHY) {
+ ports[port_index++] =
+ fd->dst_id[i].id;
+ } else if (fd->dst_id[i].type ==
+ PORT_VIRT) {
+ queues[queue_index++] =
+ fd->dst_id[i].id;
+ }
+ }
+
+ for (int i = 0; i < fd->dst_num_avail; ++i) {
+ hw_mod_qsl_qst_set(&dev->ndev->be,
+ HW_QSL_QST_TX_PORT,
+ table_start + i,
+ ports[i % num_dest_port]);
+ hw_mod_qsl_qst_set(&dev->ndev->be,
+ HW_QSL_QST_LRE,
+ table_start + i, 1);
+
+ hw_mod_qsl_qst_set(&dev->ndev->be,
+ HW_QSL_QST_QUEUE,
+ table_start + i,
+ queues[i % num_queues]);
+ hw_mod_qsl_qst_set(&dev->ndev->be,
+ HW_QSL_QST_EN,
+ table_start + i, 1);
+ }
+ } else if (num_dest_port > 0) {
+ for (int i = 0; i < fd->dst_num_avail; ++i) {
+ hw_mod_qsl_qst_set(&dev->ndev->be,
+ HW_QSL_QST_TX_PORT,
+ table_start + i,
+ fd->dst_id[i].id);
+ hw_mod_qsl_qst_set(&dev->ndev->be,
+ HW_QSL_QST_LRE,
+ table_start + i, 1);
+ }
+ } else if (num_queues > 0) {
+ for (int i = 0; i < fd->dst_num_avail; ++i) {
+ hw_mod_qsl_qst_set(&dev->ndev->be,
+ HW_QSL_QST_QUEUE,
+ table_start + i,
+ fd->dst_id[i].id);
+ hw_mod_qsl_qst_set(&dev->ndev->be,
+ HW_QSL_QST_EN,
+ table_start + i, 1);
+ }
+ }
+
+ hw_mod_qsl_qst_flush(&dev->ndev->be, table_start,
+ fd->dst_num_avail);
+ }
+ }
+
+ /*
+ * Setup CAT KM functionality
+ */
+ if (setup_km) {
+ uint32_t bm = 0;
+
+ /* Enable KM match FS for key A */
+ set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+ setup_km_ft, 0, 1);
+
+ /* KM function select */
+ hw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,
+ KM_FLM_IF_FIRST,
+ fh->resource[RES_CAT_CFN].index,
+ setup_km_rcp);
+ hw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+ fh->resource[RES_CAT_CFN].index, 1);
+
+ /* KM function enable */
+ hw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST,
+ fh->resource[RES_CAT_CFN].index / 8, &bm);
+ hw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+ bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+ hw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+ fh->resource[RES_CAT_CFN].index / 8, 1);
+ } else if (setup_default_ft) {
+ /* Enable "no KM match" FT for key A */
+ set_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,
+ 0, 0, 1);
+ }
+
+ /*
+ * Setup CAT FLM functionality
+ */
+ if (setup_flm) {
+ uint32_t bm = 0;
+
+ /* Enable KM match FT for key A, and FLM match FT for key C */
+ set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+ setup_km_ft, 0, 1); /* KM FT A */
+ set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+ setup_flm_ft, 2, 1); /* FLM FT C */
+
+ /* FLM function enable */
+ hw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST,
+ fh->resource[RES_CAT_CFN].index / 8,
+ &bm);
+ hw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,
+ KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,
+ bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));
+ hw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,
+ fh->resource[RES_CAT_CFN].index / 8,
+ 1);
+ } else if (setup_default_ft) {
+ /* Enable KM for key A and UNHANDLED for key C */
+ set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,
+ setup_km_ft, 0, 1);
+ set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,
+ 2, 1);
+ }
+
+ /*
+ * Setup HST
+ */
+ if (setup_hst) {
+ int hst_index = -1;
+
+ for (int i = 1;
+ i < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {
+ uint32_t values[] = { 0, 0, 0, 0, 0 };
+
+ if (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,
+ i))
+ continue;
+
+ hw_mod_hst_rcp_get(&dev->ndev->be,
+ HW_HST_RCP_STRIP_MODE, i,
+ &values[0]);
+ hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,
+ i, &values[1]);
+ hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,
+ i, &values[2]);
+ hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,
+ i, &values[3]);
+ hw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,
+ i, &values[4]);
+
+ if ((int)values[0] == 1 &&
+ (int)values[1] == fd->header_strip_start_dyn &&
+ (int)values[2] == fd->header_strip_start_ofs &&
+ (int)values[3] == fd->header_strip_end_dyn &&
+ (int)values[4] == fd->header_strip_end_ofs) {
+ hst_index = i;
+ break;
+ }
+ }
+
+ if (hst_index >= 0) {
+ if (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,
+ hst_index)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not reference HST RCP resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ fh->resource[RES_HST_RCP].count = 1;
+ fh->resource[RES_HST_RCP].index = hst_index;
+ fh->resource[RES_HST_RCP].referenced = 1;
+ } else {
+ if (flow_nic_allocate_fh_resource(dev->ndev,
+ RES_HST_RCP,
+ fh, 1, 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get HST RCP resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ hw_mod_hst_rcp_set(&dev->ndev->be,
+ HW_HST_RCP_STRIP_MODE,
+ fh->resource[RES_HST_RCP].index, 1);
+ hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,
+ fh->resource[RES_HST_RCP].index,
+ fd->header_strip_start_dyn);
+ hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,
+ fh->resource[RES_HST_RCP].index,
+ fd->header_strip_start_ofs);
+ hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,
+ fh->resource[RES_HST_RCP].index,
+ fd->header_strip_end_dyn);
+ hw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,
+ fh->resource[RES_HST_RCP].index,
+ fd->header_strip_end_ofs);
+
+ hw_mod_hst_rcp_set(&dev->ndev->be,
+ HW_HST_RCP_MODIF0_CMD,
+ fh->resource[RES_HST_RCP].index,
+ fd->header_strip_removed_outer_ip ? 7 : 6);
+ hw_mod_hst_rcp_set(&dev->ndev->be,
+ HW_HST_RCP_MODIF0_DYN,
+ fh->resource[RES_HST_RCP].index, 2);
+ hw_mod_hst_rcp_set(&dev->ndev->be,
+ HW_HST_RCP_MODIF0_OFS,
+ fh->resource[RES_HST_RCP].index, 0);
+
+ hw_mod_hst_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_HST_RCP].index, 1);
+ }
+ }
+
+ /*
+ * Setup TPE
+ */
+ if (setup_tpe_encap_data) {
+ int ext_rpl_index = -1;
+ int rpl_rpl_index = -1;
+ int rpl_rpl_length = -1;
+
+ /* Find existing RPL */
+ for (int i = 1;
+ i < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {
+ int found = 1;
+ uint32_t len;
+ uint32_t ptr;
+
+ if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,
+ i))
+ continue;
+
+ hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+ HW_TPE_RPL_EXT_META_RPL_LEN, i,
+ &len);
+ if (len != fd->tun_hdr.len)
+ continue;
+
+ hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+ HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);
+
+ for (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;
+ ++ptr_it) {
+ uint32_t data[4];
+
+ hw_mod_tpe_rpl_rpl_get(&dev->ndev->be,
+ HW_TPE_RPL_RPL_VALUE,
+ ptr + ptr_it, data);
+
+ if (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=
+ data[0] ||
+ fd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=
+ data[1] ||
+ fd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=
+ data[2] ||
+ fd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=
+ data[3]) {
+ found = 0;
+ break;
+ }
+ }
+
+ if (found) {
+ ext_rpl_index = i;
+ rpl_rpl_index = (int)ptr;
+ rpl_rpl_length = (int)len;
+ break;
+ }
+ }
+
+ /* Set RPL data */
+ if (ext_rpl_index >= 0) {
+ if (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,
+ ext_rpl_index)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not reference TPE EXT resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ for (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {
+ if (flow_nic_ref_resource(dev->ndev,
+ RES_TPE_RPL,
+ rpl_rpl_index + i)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not reference TPE RPL resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+ }
+ } else {
+ ext_rpl_index = flow_nic_alloc_resource(dev->ndev,
+ RES_TPE_EXT, 1);
+ if (ext_rpl_index < 0) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get TPE EXT resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ rpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;
+ rpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,
+ RES_TPE_RPL,
+ rpl_rpl_length,
+ 1);
+ if (rpl_rpl_index < 0) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get TPE RPL resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ /* Program new encap header data */
+ hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+ HW_TPE_RPL_EXT_RPL_PTR,
+ ext_rpl_index, rpl_rpl_index);
+ hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+ HW_TPE_RPL_EXT_META_RPL_LEN,
+ ext_rpl_index, fd->tun_hdr.len);
+ hw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,
+ 1);
+
+ for (int i = 0; i < rpl_rpl_length; ++i) {
+ hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+ HW_TPE_RPL_RPL_VALUE,
+ rpl_rpl_index + i,
+ fd->tun_hdr.d.hdr32 + i * 4);
+ }
+ hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,
+ rpl_rpl_length);
+ }
+
+ flm_rpl_ext_ptr = ext_rpl_index;
+ }
+
+ if (setup_tpe) {
+ const uint32_t eth_length = 14;
+ const uint32_t l2_length = fd->tun_hdr.l2_len;
+ const uint32_t l3_length = fd->tun_hdr.l3_len;
+ const uint32_t l4_length = fd->tun_hdr.l4_len;
+ const uint32_t fcs_length = 4;
+
+ int tpe_index = -1;
+
+ /* Find existing RCP */
+ for (int i = 1;
+ i < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {
+ uint32_t value;
+
+ if (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,
+ i))
+ continue;
+
+ hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+ HW_TPE_RPL_RCP_LEN, i, &value);
+ if (value != fd->tun_hdr.len)
+ continue;
+ hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+ HW_TPE_RPL_RCP_DYN, i, &value);
+ if (value != 1)
+ continue;
+ hw_mod_tpe_rpl_rcp_get(&dev->ndev->be,
+ HW_TPE_RPL_RCP_OFS, i, &value);
+ if (value != 0)
+ continue;
+ hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+ HW_TPE_HFU_RCP_L3_PRT, i,
+ &value);
+ if (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))
+ continue;
+ hw_mod_tpe_hfu_rcp_get(&dev->ndev->be,
+ HW_TPE_HFU_RCP_OUTER_L3_OFS, i,
+ &value);
+ if (value != l2_length)
+ continue;
+
+ tpe_index = i;
+ break;
+ }
+
+ /* Set RCP data */
+ if (tpe_index >= 0) {
+ if (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,
+ tpe_index)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not reference TPE RCP resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ fh->resource[RES_TPE_RCP].count = 1;
+ fh->resource[RES_TPE_RCP].index = tpe_index;
+ fh->resource[RES_TPE_RCP].referenced = 1;
+ } else {
+ if (flow_nic_allocate_fh_resource(dev->ndev,
+ RES_TPE_RCP,
+ fh, 1, 1)) {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Could not get TPE RCP resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,
+ error);
+ return NULL;
+ }
+
+ /* Extend packet if needed. */
+ if (fd->tun_hdr.len > eth_length) {
+ /* Extend FPGA packet buffer */
+ hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+ HW_TPE_RPP_RCP_EXP,
+ fh->resource[RES_TPE_RCP].index,
+ fd->tun_hdr.len - eth_length);
+ hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+
+ /*
+ * Insert 0's into packet
+ * After this step DYN offsets are shifted by encap length,
+ * so only DYN offset 1 and 18 should be used
+ */
+ hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+ HW_TPE_INS_RCP_DYN,
+ fh->resource[RES_TPE_RCP].index, 1);
+ hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+ HW_TPE_INS_RCP_OFS,
+ fh->resource[RES_TPE_RCP].index, 0);
+ hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+ HW_TPE_INS_RCP_LEN,
+ fh->resource[RES_TPE_RCP].index,
+ fd->tun_hdr.len - eth_length);
+ hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ }
+
+ if (fd->tun_hdr.len > 0) {
+ /* Write header data to beginning of packet */
+ hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+ HW_TPE_RPL_RCP_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+ HW_TPE_RPL_RCP_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+ HW_TPE_RPL_RCP_LEN,
+ fh->resource[RES_TPE_RCP].index,
+ fd->tun_hdr.len);
+ hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+ HW_TPE_RPL_RCP_RPL_PTR,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+ HW_TPE_RPL_RCP_EXT_PRIO,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ }
+
+ for (unsigned int i = 0; i < fd->modify_field_count;
+ ++i) {
+ uint32_t select = fd->modify_field[i].select;
+ uint32_t dyn = fd->modify_field[i].dyn;
+ uint32_t ofs = fd->modify_field[i].ofs;
+ uint32_t len = fd->modify_field[i].len;
+
+ align_tun_offset(fd, eth_length, i, &ofs, select, l2_length,
+ l3_length, l4_length, &dyn);
+
+ hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+ HW_TPE_CPY_RCP_READER_SELECT,
+ fh->resource[RES_TPE_RCP].index +
+ 16 * i,
+ select);
+ hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+ HW_TPE_CPY_RCP_DYN,
+ fh->resource[RES_TPE_RCP].index +
+ 16 * i,
+ dyn);
+ hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+ HW_TPE_CPY_RCP_OFS,
+ fh->resource[RES_TPE_RCP].index +
+ 16 * i,
+ ofs);
+ hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+ HW_TPE_CPY_RCP_LEN,
+ fh->resource[RES_TPE_RCP].index +
+ 16 * i,
+ len);
+ hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_TPE_RCP].index +
+ 16 * i,
+ 1);
+ }
+
+ if (fd->tun_hdr.new_outer) {
+ /*
+ * UDP length
+ * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS
+ */
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_A_WR,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ l2_length + l3_length + 4);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 18);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ -(l2_length + l3_length + fcs_length));
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+
+ /* IPv4/IPv6 length */
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_B_WR,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ l2_length +
+ (fd->tun_hdr.ip_version == 4 ? 2 : 4));
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 18);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ -(l2_length +
+ (fd->tun_hdr.ip_version == 4 ?
+ 0 : l3_length) + fcs_length));
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+
+ /* GTP length */
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_C_WR,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ l2_length + l3_length + l4_length + 2);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 18);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ -(l2_length + l3_length + l4_length +
+ 8 + fcs_length));
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+
+ /* _update TTL */
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_WR,
+ fh->resource[RES_TPE_RCP].index,
+ fd->ttl_sub_enable);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_POS_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ fd->ttl_sub_outer ? 1 : DYN_L3);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_POS_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ (fd->ttl_sub_outer ?
+ l2_length :
+ fd->tun_hdr.len - eth_length) +
+ (fd->ttl_sub_ipv4 ? 8 : 7));
+
+ /* _update FPGA DYN offsets */
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_CS_INF,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_L3_PRT,
+ fh->resource[RES_TPE_RCP].index,
+ (fd->tun_hdr.ip_version == 4 ? 1 : 2));
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_L3_FRAG,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TUNNEL,
+ fh->resource[RES_TPE_RCP].index,
+ 6);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_L4_PRT,
+ fh->resource[RES_TPE_RCP].index,
+ 2);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_OUTER_L3_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ l2_length);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_OUTER_L4_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ l2_length + l3_length);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_INNER_L3_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ fd->tun_hdr.len - eth_length
+ - 4 * fd->tun_hdr.nb_vlans);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_INNER_L4_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ fd->tun_hdr.len - eth_length
+ - 4 * fd->tun_hdr.nb_vlans);
+
+ hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ } else {
+ /* _update TTL */
+ if (fd->ttl_sub_enable) {
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_WR,
+ fh->resource[RES_TPE_RCP].index,
+ fd->ttl_sub_enable);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_POS_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ fd->ttl_sub_outer ? DYN_L3 :
+ DYN_TUN_L3);
+ if (fd->tun_hdr.len == 0) {
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_POS_OFS,
+ fh->resource[RES_TPE_RCP]
+ .index,
+ fd->ttl_sub_ipv4 ? 8 : 7);
+ } else {
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_POS_OFS,
+ fh->resource[RES_TPE_RCP]
+ .index,
+ (fd->tun_hdr.len -
+ eth_length) +
+ (fd->ttl_sub_ipv4 ?
+ 8 : 7));
+ }
+ } else {
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_WR,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_POS_DYN,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TTL_POS_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ }
+
+ /* _update FPGA DYN offsets */
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_CS_INF,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_L3_PRT,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_L3_FRAG,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_TUNNEL,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_L4_PRT,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ if (fd->tun_hdr.len == 0) {
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_OUTER_L3_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_OUTER_L4_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_INNER_L3_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_INNER_L4_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ 0);
+ } else {
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_OUTER_L3_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ fd->tun_hdr.len - eth_length);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_OUTER_L4_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ fd->tun_hdr.len - eth_length);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_INNER_L3_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ fd->tun_hdr.len - eth_length);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_HFU_RCP_INNER_L4_OFS,
+ fh->resource[RES_TPE_RCP].index,
+ fd->tun_hdr.len - eth_length);
+ }
+
+ hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ }
+
+ /* Calculate valid outer and inner checksums */
+ hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+ HW_TPE_CSU_RCP_OUTER_L3_CMD,
+ fh->resource[RES_TPE_RCP].index,
+ 3);
+ hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+ HW_TPE_CSU_RCP_OUTER_L4_CMD,
+ fh->resource[RES_TPE_RCP].index,
+ 3);
+ hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+ HW_TPE_CSU_RCP_INNER_L3_CMD,
+ fh->resource[RES_TPE_RCP].index,
+ 3);
+ hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+ HW_TPE_CSU_RCP_INNER_L4_CMD,
+ fh->resource[RES_TPE_RCP].index,
+ 3);
+ hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_TPE_RCP].index,
+ 1);
+ }
+ }
+
+ /*
+ * Setup CAT Color Table functionality
+ */
+ if (setup_cat_cot) {
+ hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,
+ fh->resource[RES_CAT_COT].index, 0);
+ hw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,
+ fh->resource[RES_CAT_COT].index, 0x4);
+ hw_mod_cat_cot_flush(&dev->ndev->be,
+ fh->resource[RES_CAT_COT].index, 1);
+ }
+
+ /*
+ * Setup CAT action functionality
+ */
+ if (setup_cat_cts) {
+ /* Setup CAT CTS */
+ const int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;
+
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+ offset * fh->resource[RES_CAT_CFN].index + 0,
+ fh->resource[RES_CAT_COT].index);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+ offset * fh->resource[RES_CAT_CFN].index + 0,
+ 0);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+ offset * fh->resource[RES_CAT_CFN].index + 1,
+ 0);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+ offset * fh->resource[RES_CAT_CFN].index + 1,
+ fh->resource[RES_QSL_RCP].index);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+ offset * fh->resource[RES_CAT_CFN].index + 2,
+ 0);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+ offset * fh->resource[RES_CAT_CFN].index + 2,
+ 0);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+ offset * fh->resource[RES_CAT_CFN].index + 3,
+ 0);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+ offset * fh->resource[RES_CAT_CFN].index + 3,
+ 0);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+ offset * fh->resource[RES_CAT_CFN].index + 4,
+ fh->resource[RES_HST_RCP].index);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+ offset * fh->resource[RES_CAT_CFN].index + 4,
+ 0);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,
+ offset * fh->resource[RES_CAT_CFN].index + 5,
+ fh->resource[RES_TPE_RCP].index);
+ hw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,
+ offset * fh->resource[RES_CAT_CFN].index + 5,
+ 0);
+
+ hw_mod_cat_cts_flush(&dev->ndev->be,
+ offset * fh->resource[RES_CAT_CFN].index,
+ 6);
+ hw_mod_cat_cts_flush(&dev->ndev->be,
+ offset * fh->resource[RES_CAT_CFN].index,
+ 6);
+
+ /* Setup CAT CTE */
+ hw_mod_cat_cte_set(&dev->ndev->be,
+ HW_CAT_CTE_ENABLE_BM,
+ fh->resource[RES_CAT_CFN].index,
+ (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |
+ (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |
+ 0x040 |
+ (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |
+ (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));
+ hw_mod_cat_cte_flush(&dev->ndev->be,
+ fh->resource[RES_CAT_CFN].index, 1);
+ }
+
+ /*
+ * Setup CAT CFN
+ *
+ * Once CAT CFN has been programmed traffic will start match the filter,
+ * so CAT CFN must be the last thing to be programmed.
+ */
+ if (setup_cat_cfn) {
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+ fh->resource[RES_CAT_CFN].index, 0, 0x0);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,
+ fh->resource[RES_CAT_CFN].index, 0, 0x1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,
+ fh->resource[RES_CAT_CFN].index, 0, 0x0);
+
+ /* Protocol checks */
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,
+ fh->resource[RES_CAT_CFN].index, 0, 0x0);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,
+ fh->resource[RES_CAT_CFN].index, 0,
+ fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,
+ fh->resource[RES_CAT_CFN].index, 0,
+ (0xf << fd->vlans) & 0xf);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,
+ fh->resource[RES_CAT_CFN].index, 0,
+ fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,
+ fh->resource[RES_CAT_CFN].index, 0,
+ fd->fragmentation);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,
+ fh->resource[RES_CAT_CFN].index, 0,
+ fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be,
+ HW_CAT_CFN_PTC_TUNNEL,
+ fh->resource[RES_CAT_CFN].index, 0,
+ fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,
+ fh->resource[RES_CAT_CFN].index, 0,
+ fd->tunnel_l3_prot != -1 ?
+ (1 << fd->tunnel_l3_prot) : -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,
+ fh->resource[RES_CAT_CFN].index, 0,
+ fd->tunnel_l4_prot != -1 ?
+ (1 << fd->tunnel_l4_prot) : -1);
+
+ /* Error checks */
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,
+ fh->resource[RES_CAT_CFN].index, 0, 0x0);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,
+ fh->resource[RES_CAT_CFN].index, 0, 0x1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,
+ fh->resource[RES_CAT_CFN].index, 0, 0x1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,
+ fh->resource[RES_CAT_CFN].index, 0, 0x1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,
+ fh->resource[RES_CAT_CFN].index, 0, 0x1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,
+ fh->resource[RES_CAT_CFN].index, 0, 0x1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be,
+ HW_CAT_CFN_ERR_TTL_EXP,
+ fh->resource[RES_CAT_CFN].index, 0,
+ (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);
+ hw_mod_cat_cfn_set(&dev->ndev->be,
+ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+ fh->resource[RES_CAT_CFN].index, 0,
+ (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);
+
+ /* MAC port check */
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,
+ fh->resource[RES_CAT_CFN].index, 0,
+ 1 << fh->port_id);
+
+ /* Pattern match checks */
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,
+ fh->resource[RES_CAT_CFN].index, 0, 0x0);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,
+ fh->resource[RES_CAT_CFN].index, 0, 0x0);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,
+ fh->resource[RES_CAT_CFN].index, 0, 0x0);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,
+ fh->resource[RES_CAT_CFN].index, 0, 0x0);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+
+ /* Length checks */
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,
+ fh->resource[RES_CAT_CFN].index, 0, 0x0);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,
+ fh->resource[RES_CAT_CFN].index, 0, -1);
+
+ /* KM and FLM */
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,
+ fh->resource[RES_CAT_CFN].index, 0, 0x1);
+ hw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,
+ fh->resource[RES_CAT_CFN].index, 0, 0x3);
+
+ hw_mod_cat_cfn_flush(&dev->ndev->be,
+ fh->resource[RES_CAT_CFN].index, 1);
+ }
+
+ /* Program FLM flow */
+ if (fh_flm) {
+ convert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,
+ flm_rpl_ext_ptr, attr->priority);
+ flm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);
+ }
+
+ if (free_fd)
+ free(fd);
+
+ return (fh_flm) ? fh_flm : fh;
+}
+
+/*
+ * Public functions
+ */
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+ if (!ndev->flow_mgnt_prepared) {
+ /* Check static arrays are big enough */
+ assert(ndev->be.tpe.nb_cpy_writers <=
+ MAX_CPY_WRITERS_SUPPORTED);
+
+ /* KM Flow Type 0 is reserved */
+ flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+ flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+ /* FLM Flow Type 0 and 1 is reserved */
+ flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);
+ flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);
+ flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
+
+ /* CAT CFN 0 is reserved as a low priority catch all filter */
+ hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,
+ 0, 0, 0);
+ hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+ flow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);
+
+ /* Initialize QSL with unmatched recipe index 0 - discard */
+ if (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)
+ goto err_exit0;
+ if (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)
+ goto err_exit0;
+
+ flow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);
+
+ /* Initialize QST with default index 0 */
+ if (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,
+ 0x0) < 0)
+ goto err_exit0;
+ if (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)
+ goto err_exit0;
+
+ flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
+
+ /* HST & TPE index 0 is reserved */
+ flow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);
+ flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+ flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+ flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
+
+ /* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */
+ if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <
+ 0)
+ goto err_exit0;
+ if (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <
+ 0)
+ goto err_exit0;
+
+ if (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)
+ goto err_exit0;
+
+ flow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);
+
+ /* Set default hasher recipe to 5-tuple */
+ flow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);
+ hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+
+ flow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);
+
+ /*
+ * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for
+ * this entry
+ */
+ hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+ if (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)
+ goto err_exit0;
+
+ flow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);
+
+ /* Unblock MAC and MAC statistics on this NIC */
+ if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)
+ goto err_exit0;
+ /* block keep alive - not needed */
+ if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)
+ goto err_exit0;
+ /*
+ * Unblock all MAC ports
+ */
+ if (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)
+ goto err_exit0;
+
+ /*
+ * unblock RPP slices
+ */
+ hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);
+
+ if (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)
+ goto err_exit0;
+
+ /* FLM */
+ if (flm_sdram_calibrate(ndev) < 0)
+ goto err_exit0;
+ if (flm_sdram_reset(ndev, 1) < 0)
+ goto err_exit0;
+ flm_flow_handle_create(&ndev->flm_res_handle);
+
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,
+ 0); /* Learn done status */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,
+ 0); /* Learn fail status */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,
+ 0); /* Learn ignore status */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,
+ 0); /* Unlearn done status */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,
+ 0); /* Unlearn ignore status */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,
+ 0); /* Relearn done status */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,
+ 0); /* Relearn ignore status */
+ hw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);
+ hw_mod_flm_control_flush(&ndev->be);
+
+ hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,
+ 0); /* Drop at 100% FIFO fill level */
+ hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);
+ hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,
+ 6); /* Drop at 37,5% FIFO fill level */
+ hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);
+ hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,
+ 4); /* Drop at 25% FIFO fill level */
+ hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);
+ hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,
+ 2); /* Drop at 12,5% FIFO fill level */
+ hw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);
+ hw_mod_flm_prio_flush(&ndev->be);
+
+ for (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {
+ hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,
+ FLM_PERIODIC_STATS_BYTE_LIMIT);
+ hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,
+ FLM_PERIODIC_STATS_PKT_LIMIT);
+ hw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,
+ FLM_PERIODIC_STATS_BYTE_TIMEOUT);
+ }
+ hw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);
+
+ hw_mod_flm_stat_update(&ndev->be);
+
+ ndev->flm_mtr_handle =
+ calloc(1, sizeof(struct flm_flow_mtr_handle_s));
+ ndev->ft_res_handle =
+ calloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));
+ ndev->mtr_stat_handle =
+ calloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));
+
+ if (ndev->flm_mtr_handle == NULL ||
+ ndev->ft_res_handle == NULL ||
+ ndev->mtr_stat_handle == NULL)
+ goto err_exit0;
+
+ struct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;
+
+ for (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {
+ __atomic_store_n(&mtr_stat[i].n_pkt, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&mtr_stat[i].n_bytes, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&mtr_stat[i].stats_mask, 0, __ATOMIC_RELAXED);
+ }
+
+ if (flow_group_handle_create(&ndev->group_handle,
+ FLM_FLOW_RCP_MAX))
+ goto err_exit0;
+
+ ndev->flow_mgnt_prepared = 1;
+ }
+ return 0;
+
+err_exit0:
+ done_flow_management_of_ndev_profile_inline(ndev);
+ return -1;
+}
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
+{
+#ifdef FLOW_DEBUG
+ ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+ FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+ if (ndev->flow_mgnt_prepared) {
+ flm_sdram_reset(ndev, 0);
+ flm_flow_handle_remove(&ndev->flm_res_handle);
+
+ flow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);
+ flow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);
+
+ hw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);
+ hw_mod_flm_rcp_flush(&ndev->be, 0, 1);
+ flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);
+ flow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);
+ flow_nic_free_resource(ndev, RES_FLM_RCP, 0);
+
+ free(ndev->flm_mtr_handle);
+ free(ndev->ft_res_handle);
+ free(ndev->mtr_stat_handle);
+ flow_group_handle_destroy(&ndev->group_handle);
+
+ hw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);
+ hw_mod_cat_cfn_flush(&ndev->be, 0, 1);
+ flow_nic_free_resource(ndev, RES_CAT_CFN, 0);
+
+ hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);
+ hw_mod_qsl_rcp_flush(&ndev->be, 0, 1);
+ flow_nic_free_resource(ndev, RES_QSL_RCP, 0);
+
+ hw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);
+ hw_mod_hst_rcp_flush(&ndev->be, 0, 1);
+ flow_nic_free_resource(ndev, RES_HST_RCP, 0);
+
+ hw_mod_tpe_reset(&ndev->be);
+ flow_nic_free_resource(ndev, RES_TPE_RCP, 0);
+ flow_nic_free_resource(ndev, RES_TPE_EXT, 0);
+ flow_nic_free_resource(ndev, RES_TPE_RPL, 0);
+
+ hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);
+ hw_mod_pdb_rcp_flush(&ndev->be, 0, 1);
+ flow_nic_free_resource(ndev, RES_PDB_RCP, 0);
+
+ hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);
+ hw_mod_hsh_rcp_flush(&ndev->be, 0, 1);
+ flow_nic_free_resource(ndev, RES_HSH_RCP, 0);
+
+ hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
+ hw_mod_cat_cot_flush(&ndev->be, 0, 1);
+ flow_nic_free_resource(ndev, RES_CAT_COT, 0);
+
+#ifdef FLOW_DEBUG
+ ndev->be.iface->set_debug_mode(ndev->be.be_dev,
+ FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+ ndev->flow_mgnt_prepared = 0;
+ }
+
+ return 0;
+}
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+ const struct flow_elem elem[],
+ const struct flow_action action[],
+ struct flow_error *error)
+{
+ uint32_t port_id = 0;
+ uint32_t num_dest_port = 0;
+ uint32_t num_queues = 0;
+
+ uint32_t packet_data[10];
+ uint32_t packet_mask[10];
+ struct flm_flow_key_def_s key_def;
+
+ flow_nic_set_error(ERR_SUCCESS, error);
+
+ pthread_mutex_lock(&dev->ndev->mtx);
+ struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,
+ error, 0, &port_id,
+ &num_dest_port, &num_queues,
+ packet_data, packet_mask,
+ &key_def);
+ pthread_mutex_unlock(&dev->ndev->mtx);
+
+ if (!fd)
+ return -1;
+
+ free(fd);
+ return 0;
+}
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+ const struct flow_attr *attr, const struct flow_elem elem[],
+ const struct flow_action action[], struct flow_error *error)
+{
+ struct flow_handle *fh = NULL;
+
+ uint32_t port_id = UINT32_MAX;
+ uint32_t num_dest_port;
+ uint32_t num_queues;
+
+ uint32_t packet_data[10];
+ uint32_t packet_mask[10];
+ struct flm_flow_key_def_s key_def;
+
+ struct flow_attr attr_local;
+
+ memcpy(&attr_local, attr, sizeof(struct flow_attr));
+ if (attr_local.group > 0)
+ attr_local.forced_vlan_vid = 0;
+
+ flow_nic_set_error(ERR_SUCCESS, error);
+
+ pthread_mutex_lock(&dev->ndev->mtx);
+
+ struct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,
+ attr_local.forced_vlan_vid,
+ &port_id, &num_dest_port,
+ &num_queues, packet_data,
+ packet_mask, &key_def);
+ if (!fd)
+ goto err_exit;
+
+ /* Translate group IDs */
+ if (fd->jump_to_group != UINT32_MAX &&
+ flow_group_translate_get(dev->ndev->group_handle,
+ attr_local.caller_id, fd->jump_to_group,
+ &fd->jump_to_group)) {
+ NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ goto err_exit;
+ }
+ if (attr_local.group > 0 &&
+ flow_group_translate_get(dev->ndev->group_handle,
+ attr_local.caller_id, attr_local.group,
+ &attr_local.group)) {
+ NT_LOG(ERR, FILTER, "ERROR: Could not get group resource\n");
+ flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+ goto err_exit;
+ }
+
+ if (port_id == UINT32_MAX)
+ port_id = dev->port_id;
+
+ /* Create and flush filter to NIC */
+ fh = create_flow_filter(dev, fd, &attr_local, error, port_id,
+ num_dest_port, num_queues, packet_data,
+ packet_mask, &key_def);
+ if (!fh)
+ goto err_exit;
+
+ NT_LOG(DBG, FILTER,
+ "New FlOW: fh (flow handle) %p, fd (flow definition) %p\n", fh,
+ fd);
+ NT_LOG(DBG, FILTER,
+ ">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\n",
+ dev, dev->ndev->adapter_no, dev->port, fh, fd);
+
+ pthread_mutex_unlock(&dev->ndev->mtx);
+
+ return fh;
+
+err_exit:
+ if (fh)
+ flow_destroy_locked_profile_inline(dev, fh, NULL);
+
+ pthread_mutex_unlock(&dev->ndev->mtx);
+
+ NT_LOG(ERR, FILTER, "ERR: %s\n", __func__);
+ return NULL;
+}
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+ struct flow_handle *fh,
+ struct flow_error *error)
+{
+ assert(dev);
+ assert(fh);
+
+ int err = 0;
+
+ flow_nic_set_error(ERR_SUCCESS, error);
+
+ /* take flow out of ndev list - may not have been put there yet */
+ if (fh->type == FLOW_HANDLE_TYPE_FLM)
+ nic_remove_flow_flm(dev->ndev, fh);
+
+ else
+ nic_remove_flow(dev->ndev, fh);
+
+#ifdef FLOW_DEBUG
+ dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+ FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+ if (fh->type == FLOW_HANDLE_TYPE_FLM) {
+ err |= flm_flow_programming(dev, fh, NULL, 0, 0);
+
+ if (fh->flm_rpl_ext_ptr > 0 &&
+ flow_nic_deref_resource(dev->ndev, RES_TPE_EXT,
+ (int)fh->flm_rpl_ext_ptr) == 0) {
+ uint32_t ptr = 0;
+ uint32_t len = 0;
+
+ hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+ HW_TPE_RPL_EXT_RPL_PTR,
+ (int)fh->flm_rpl_ext_ptr, &ptr);
+ hw_mod_tpe_rpl_ext_get(&dev->ndev->be,
+ HW_TPE_RPL_EXT_META_RPL_LEN,
+ (int)fh->flm_rpl_ext_ptr, &len);
+
+ hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+ HW_TPE_PRESET_ALL,
+ (int)fh->flm_rpl_ext_ptr, 0);
+ hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+ (int)fh->flm_rpl_ext_ptr, 1);
+
+ for (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {
+ if (flow_nic_deref_resource(dev->ndev,
+ RES_TPE_RPL,
+ (int)(ptr + ii)) == 0) {
+ uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+
+ hw_mod_tpe_rpl_rpl_set(&dev->ndev->be,
+ HW_TPE_PRESET_ALL,
+ (int)(ptr + ii),
+ rpl_zero);
+ hw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,
+ (int)(ptr + ii),
+ 1);
+ }
+ }
+ }
+
+ flow_group_translate_release(dev->ndev->group_handle,
+ fh->flm_owner->flm_group_index);
+
+ fh->flm_owner->flm_ref_count -= 1;
+ if (fh->flm_owner->flm_ref_count == 0) {
+ err |= flow_flm_destroy_owner(dev, fh->flm_owner);
+ err |= flow_destroy_locked_profile_inline(dev,
+ fh->flm_owner,
+ error);
+ }
+ } else {
+ NT_LOG(DBG, FILTER, "removing flow :%p\n", fh);
+
+ if (fh->fd) {
+ if (fh->fd->km.num_ftype_elem)
+ km_clear_data_match_entry(&fh->fd->km);
+
+ if (fh->fd->jump_to_group != UINT32_MAX) {
+ err |= flm_flow_destroy_group(dev,
+ fh->fd->jump_to_group);
+ flow_group_translate_release(dev->ndev->group_handle,
+ fh->fd->jump_to_group);
+ }
+ }
+
+ for (int res_type = 0; res_type < RES_COUNT; res_type++) {
+ if (fh->resource[res_type].count < 1)
+ continue;
+
+ for (int ii = 0; ii < fh->resource[res_type].count;
+ ii++) {
+ /* If last ref count of this resource, free it */
+ if (flow_nic_deref_resource(dev->ndev,
+ res_type,
+ fh->resource[res_type].index +
+ ii) == 0) {
+ /* Free resource up in NIC */
+ switch (res_type) {
+ case RES_CAT_CFN:
+ assert(ii == 0);
+ err |= reset_cat_function_setup(dev,
+ fh->resource[RES_CAT_CFN]
+ .index + ii);
+ break;
+
+ case RES_QSL_QST:
+ hw_mod_qsl_qst_set(&dev->ndev->be,
+ HW_QSL_QST_PRESET_ALL,
+ fh->resource[RES_QSL_QST]
+ .index + ii,
+ 0);
+ hw_mod_qsl_qst_flush(&dev->ndev->be,
+ fh->resource[RES_QSL_QST]
+ .index + ii,
+ 1);
+ break;
+
+ case RES_QSL_RCP:
+ hw_mod_qsl_rcp_set(&dev->ndev->be,
+ HW_QSL_RCP_PRESET_ALL,
+ fh->resource[RES_QSL_RCP]
+ .index + ii,
+ 0);
+ hw_mod_qsl_rcp_flush(&dev->ndev->be,
+ fh->resource[RES_QSL_RCP]
+ .index + ii,
+ 1);
+ break;
+
+ case RES_CAT_COT:
+ hw_mod_cat_cot_set(&dev->ndev->be,
+ HW_CAT_COT_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_cat_cot_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ break;
+
+ case RES_KM_CATEGORY:
+ assert(ii == 0);
+ hw_mod_km_rcp_set(&dev->ndev->be,
+ HW_KM_RCP_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0, 0);
+ hw_mod_km_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ break;
+
+ case RES_KM_FLOW_TYPE: {
+ struct flm_flow_ft_ident_s *ft_idents =
+ (struct flm_flow_ft_ident_s
+ *)dev->ndev
+ ->ft_res_handle;
+ ft_idents[fh->resource[res_type]
+ .index +
+ ii]
+ .data = 0;
+ }
+ break;
+
+ case RES_FLM_RCP:
+ assert(ii == 0);
+ err |= flm_flow_destroy_rcp(dev,
+ fh->resource[res_type]
+ .index + ii);
+ break;
+
+ case RES_FLM_FLOW_TYPE:
+ /* Nothing needed */
+ break;
+
+ case RES_HSH_RCP:
+ hw_mod_hsh_rcp_set(&dev->ndev->be,
+ HW_HSH_RCP_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0, 0);
+ hw_mod_hsh_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index +
+ ii,
+ 1);
+ break;
+
+ case RES_PDB_RCP:
+ hw_mod_pdb_rcp_set(&dev->ndev->be,
+ HW_PDB_RCP_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_pdb_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ break;
+
+ case RES_HST_RCP:
+ hw_mod_hst_rcp_set(&dev->ndev->be,
+ HW_HST_RCP_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_hst_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ break;
+
+ case RES_TPE_RCP:
+ hw_mod_tpe_rpp_rcp_set(&dev->ndev->be,
+ HW_TPE_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ hw_mod_tpe_ins_rcp_set(&dev->ndev->be,
+ HW_TPE_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_tpe_ins_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ hw_mod_tpe_rpl_rcp_set(&dev->ndev->be,
+ HW_TPE_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ hw_mod_tpe_rpl_ext_set(&dev->ndev->be,
+ HW_TPE_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_tpe_rpl_ext_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ hw_mod_tpe_cpy_rcp_set(&dev->ndev->be,
+ HW_TPE_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ hw_mod_tpe_hfu_rcp_set(&dev->ndev->be,
+ HW_TPE_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ hw_mod_tpe_csu_rcp_set(&dev->ndev->be,
+ HW_TPE_PRESET_ALL,
+ fh->resource[res_type]
+ .index + ii,
+ 0);
+ hw_mod_tpe_csu_rcp_flush(&dev->ndev->be,
+ fh->resource[res_type]
+ .index + ii,
+ 1);
+ break;
+
+ case RES_TPE_EXT:
+ /* Nothing needed */
+ break;
+
+ case RES_TPE_RPL:
+ /* Nothing needed */
+ break;
+
+ default:
+ err |= -1;
+ break;
+ }
+ }
+ }
+ }
+ free(fh->fd);
+ }
+
+ if (err) {
+ NT_LOG(ERR, FILTER, "FAILED removing flow: %p\n", fh);
+ flow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);
+ }
+
+ free(fh);
+
+#ifdef FLOW_DEBUG
+ dev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,
+ FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+ return err;
+}
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+ struct flow_handle *flow,
+ struct flow_error *error)
+{
+ int err = 0;
+
+ flow_nic_set_error(ERR_SUCCESS, error);
+
+ pthread_mutex_lock(&dev->ndev->mtx);
+ if (flow) {
+ /* Delete this flow */
+ err = flow_destroy_locked_profile_inline(dev, flow, error);
+ } else {
+ /* Delete all created flows from this eth device */
+ flow = dev->ndev->flow_base;
+
+ while (flow && !err) {
+ if (flow->dev == dev) {
+ struct flow_handle *flow_next = flow->next;
+
+ err = flow_destroy_locked_profile_inline(dev,
+ flow,
+ NULL);
+ flow = flow_next;
+ } else {
+ flow = flow->next;
+ }
+ }
+
+ /* Delete all created FLM flows from this eth device */
+ flow = dev->ndev->flow_base_flm;
+
+ while (flow && !err) {
+ if (flow->dev == dev) {
+ struct flow_handle *flow_next = flow->next;
+
+ err = flow_destroy_locked_profile_inline(dev,
+ flow,
+ NULL);
+ flow = flow_next;
+ } else {
+ flow = flow->next;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&dev->ndev->mtx);
+
+ return err;
+}
+
+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,
+ struct flow_error *error)
+{
+ NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+ error->type = FLOW_ERROR_GENERAL;
+ error->message = "rte_flow_flush is not supported";
+ return -1;
+}
+
+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,
+ UNUSED struct flow_handle *flow,
+ UNUSED const struct flow_action *action,
+ void **data, uint32_t *length,
+ struct flow_error *error)
+{
+ NT_LOG(ERR, FILTER, "ERROR: Not implemented yet\n");
+
+ *length = 0;
+ *data = NULL;
+ error->type = FLOW_ERROR_GENERAL;
+ error->message = "rte_flow_query is not supported";
+ return -1;
+}
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+ uint64_t size)
+{
+ const enum hw_flm_e fields[] = {
+ HW_FLM_STAT_FLOWS, HW_FLM_STAT_LRN_DONE,
+ HW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,
+ HW_FLM_STAT_UNL_DONE, HW_FLM_STAT_UNL_IGNORE,
+ HW_FLM_STAT_AUL_DONE, HW_FLM_STAT_AUL_IGNORE,
+ HW_FLM_STAT_AUL_FAIL, HW_FLM_STAT_TUL_DONE,
+ HW_FLM_STAT_REL_DONE, HW_FLM_STAT_REL_IGNORE,
+ HW_FLM_STAT_PRB_DONE, HW_FLM_STAT_PRB_IGNORE,
+
+ HW_FLM_STAT_STA_DONE, HW_FLM_STAT_INF_DONE,
+ HW_FLM_STAT_INF_SKIP, HW_FLM_STAT_PCK_HIT,
+ HW_FLM_STAT_PCK_MISS, HW_FLM_STAT_PCK_UNH,
+ HW_FLM_STAT_PCK_DIS, HW_FLM_STAT_CSH_HIT,
+ HW_FLM_STAT_CSH_MISS, HW_FLM_STAT_CSH_UNH,
+ HW_FLM_STAT_CUC_START, HW_FLM_STAT_CUC_MOVE,
+ };
+
+ const uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);
+
+ if (size < fields_cnt)
+ return -1;
+
+ hw_mod_flm_stat_update(&ndev->be);
+
+ for (uint64_t i = 0; i < fields_cnt; ++i) {
+ uint32_t value = 0;
+
+ hw_mod_flm_stat_get(&ndev->be, fields[i], &value);
+ data[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :
+ data[i] + value;
+ if (ndev->be.flm.ver < 18 &&
+ fields[i] == HW_FLM_STAT_PRB_IGNORE)
+ break;
+ }
+
+ return 0;
+}
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)
+{
+ if (port >= 255)
+ return -1;
+
+ int err = 0;
+ uint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);
+ struct flow_nic_dev *ndev = dev->ndev;
+
+ err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+ ifr_mtu_recipe, 1);
+ err |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+ ifr_mtu_recipe, mtu);
+ err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,
+ ifr_mtu_recipe, 1);
+ err |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,
+ ifr_mtu_recipe, mtu);
+
+ if (err == 0) {
+ err |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,
+ 1);
+ err |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);
+ }
+
+ return err;
+}
new file mode 100644
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_PROFILE_INLINE_H_
+#define _FLOW_API_PROFILE_INLINE_H_
+
+#include "stream_binary_flow_api.h"
+#include "flow_api.h"
+
+/*
+ * Management
+ */
+
+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);
+
+/*
+ * Flow functionality
+ */
+
+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,
+ struct flow_handle *flow,
+ struct flow_error *error);
+
+int flow_validate_profile_inline(struct flow_eth_dev *dev,
+ const struct flow_elem elem[],
+ const struct flow_action action[],
+ struct flow_error *error);
+
+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,
+ const struct flow_attr *attr,
+ const struct flow_elem elem[], const struct flow_action action[],
+ struct flow_error *error);
+
+int flow_destroy_profile_inline(struct flow_eth_dev *dev,
+ struct flow_handle *flow,
+ struct flow_error *error);
+
+int flow_flush_profile_inline(struct flow_eth_dev *dev,
+ struct flow_error *error);
+
+int flow_query_profile_inline(struct flow_eth_dev *dev,
+ struct flow_handle *flow,
+ const struct flow_action *action, void **data,
+ uint32_t *length, struct flow_error *error);
+
+/*
+ * Stats
+ */
+
+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,
+ uint64_t size);
+
+#endif /* _FLOW_API_PROFILE_INLINE_H_ */
new file mode 100644
@@ -0,0 +1,3205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+
+#include "flow_nthw_info.h"
+#include "flow_nthw_ifr.h"
+#include "flow_nthw_cat.h"
+#include "flow_nthw_csu.h"
+#include "flow_nthw_km.h"
+#include "flow_nthw_flm.h"
+#include "flow_nthw_hfu.h"
+#include "flow_nthw_hsh.h"
+#include "flow_nthw_hst.h"
+#include "flow_nthw_qsl.h"
+#include "flow_nthw_slc.h"
+#include "flow_nthw_slc_lr.h"
+#include "flow_nthw_pdb.h"
+#include "flow_nthw_ioa.h"
+#include "flow_nthw_rpp_lr.h"
+#include "flow_nthw_roa.h"
+#include "flow_nthw_rmc.h"
+#include "flow_nthw_tx_cpy.h"
+#include "flow_nthw_tx_ins.h"
+#include "flow_nthw_tx_rpl.h"
+#include "flow_backend.h"
+#include "flow_api_backend.h"
+
+#include <stdio.h> /* printf */
+
+#if !defined(MAX_PHYS_ADAPTERS)
+#define MAX_PHYS_ADAPTERS (8)
+#endif
+
+/*
+ * Binary Flow API backend implementation into ntservice driver
+ *
+ * General note on this backend implementation:
+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
+ */
+
+static struct backend_dev_s {
+ uint8_t adapter_no;
+ enum debug_mode_e dmode;
+ struct info_nthw *p_info_nthw;
+ struct cat_nthw *p_cat_nthw;
+ struct km_nthw *p_km_nthw;
+ struct flm_nthw *p_flm_nthw;
+ struct hsh_nthw *p_hsh_nthw;
+ struct hst_nthw *p_hst_nthw;
+ struct qsl_nthw *p_qsl_nthw;
+ struct slc_nthw *p_slc_nthw;
+ struct slc_lr_nthw *p_slc_lr_nthw;
+ struct pdb_nthw *p_pdb_nthw;
+ struct ioa_nthw *p_ioa_nthw;
+ struct roa_nthw *p_roa_nthw;
+ struct rmc_nthw *p_rmc_nthw;
+ struct hfu_nthw *p_hfu_nthw; /* TPE module */
+ struct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */
+ struct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */
+ struct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */
+ struct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */
+ struct csu_nthw *p_csu_nthw; /* TPE module */
+ struct ifr_nthw *p_ifr_nthw; /* TPE module */
+} be_devs[MAX_PHYS_ADAPTERS];
+
+#define _CHECK_DEBUG_ON(be, mod, inst) \
+ int __debug__ = 0; \
+ if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \
+ do { \
+ mod##_nthw_set_debug_mode(inst, 0xFF); \
+ __debug__ = 1; \
+ } while (0)
+
+#define _CHECK_DEBUG_OFF(mod, inst) \
+ do { \
+ if (__debug__) \
+ mod##_nthw_set_debug_mode(inst, 0); \
+ } while (0)
+
+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ be->dmode = mode;
+ return 0;
+}
+
+/*
+ * ***************** INFO *******************
+ */
+
+static int get_nb_phy_ports(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_phy_ports(be->p_info_nthw);
+}
+
+static int get_nb_rx_ports(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_rx_ports(be->p_info_nthw);
+}
+
+static int get_ltx_avail(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_ltx_avail(be->p_info_nthw);
+}
+
+static int get_nb_cat_funcs(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
+}
+
+static int get_nb_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_categories(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_cnt(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m0(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
+}
+
+static int get_nb_cat_km_if_m1(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
+}
+
+static int get_nb_queues(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_queues(be->p_info_nthw);
+}
+
+static int get_nb_km_flow_types(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
+}
+
+static int get_nb_pm_ext(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_pm_ext(be->p_info_nthw);
+}
+
+static int get_nb_len(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_len(be->p_info_nthw);
+}
+
+static int get_kcc_size(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_kcc_size(be->p_info_nthw);
+}
+
+static int get_kcc_banks(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_kcc_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_km_categories(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_banks(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_record_words(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
+}
+
+static int get_nb_km_cam_records(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_banks(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
+}
+
+static int get_nb_km_tcam_bank_width(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
+}
+
+static int get_nb_flm_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_flm_categories(be->p_info_nthw);
+}
+
+static int get_nb_flm_size_mb(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
+}
+
+static int get_nb_flm_entry_size(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
+}
+
+static int get_nb_flm_variant(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_flm_variant(be->p_info_nthw);
+}
+
+static int get_nb_flm_prios(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_flm_prios(be->p_info_nthw);
+}
+
+static int get_nb_flm_pst_profiles(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
+}
+
+static int get_nb_hst_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_hst_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
+}
+
+static int get_nb_qsl_qst_entries(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
+}
+
+static int get_nb_pdb_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
+}
+
+static int get_nb_ioa_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_ioa_categories(be->p_info_nthw);
+}
+
+static int get_nb_roa_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_roa_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_writers(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
+}
+
+static int get_nb_tx_cpy_mask_mem(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_depth(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
+}
+
+static int get_nb_tx_rpl_ext_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
+}
+
+static int get_nb_tpe_ifr_categories(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
+}
+
+/*
+ * ***************** CAT *******************
+ */
+
+static bool cat_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_cat_nthw != NULL;
+}
+
+static uint32_t cat_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
+ (module_get_minor_version(be->p_cat_nthw->m_cat) &
+ 0xffff));
+}
+
+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,
+ int cat_func, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18) {
+ r(be->p_cat_nthw, 1U);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+ cat_nthw_cfn_enable(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].enable);
+ cat_nthw_cfn_inv(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].inv);
+ cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_inv);
+ cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_isl);
+ cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_cfp);
+ cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_mac);
+ cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_l2);
+ cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_vntag);
+ cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_vlan);
+ cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_mpls);
+ cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_l3);
+ cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_frag);
+ cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_ip_prot);
+ cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_l4);
+ cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_tunnel);
+ cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_tnl_l2);
+ cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_tnl_vlan);
+ cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_tnl_mpls);
+ cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_tnl_l3);
+ cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_tnl_frag);
+ cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
+ cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].ptc_tnl_l4);
+
+ cat_nthw_cfn_err_inv(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].err_inv);
+ cat_nthw_cfn_err_cv(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].err_cv);
+ cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].err_fcs);
+ cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].err_trunc);
+ cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].err_l3_cs);
+ cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].err_l4_cs);
+
+ cat_nthw_cfn_mac_port(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].mac_port);
+
+ cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].pm_cmp);
+ cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].pm_dct);
+ cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].pm_ext_inv);
+ cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].pm_cmb);
+ cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].pm_and_inv);
+ cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].pm_or_inv);
+ cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].pm_inv);
+
+ cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
+ cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].lc_inv);
+ cat_nthw_cfn_km0_or(be->p_cat_nthw,
+ cat->v18.cfn[cat_func].km_or);
+ cat_nthw_cfn_flush(be->p_cat_nthw);
+ cat_func++;
+ }
+ } else if (cat->ver == 21 || cat->ver == 22) {
+ r(be->p_cat_nthw, 1U);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
+ cat_nthw_cfn_enable(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].enable);
+ cat_nthw_cfn_inv(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].inv);
+ cat_nthw_cfn_ptc_inv(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_inv);
+ cat_nthw_cfn_ptc_isl(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_isl);
+ cat_nthw_cfn_ptc_cfp(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_cfp);
+ cat_nthw_cfn_ptc_mac(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_mac);
+ cat_nthw_cfn_ptc_l2(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_l2);
+ cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_vntag);
+ cat_nthw_cfn_ptc_vlan(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_vlan);
+ cat_nthw_cfn_ptc_mpls(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_mpls);
+ cat_nthw_cfn_ptc_l3(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_l3);
+ cat_nthw_cfn_ptc_frag(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_frag);
+ cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_ip_prot);
+ cat_nthw_cfn_ptc_l4(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_l4);
+ cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_tunnel);
+ cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_tnl_l2);
+ cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_tnl_vlan);
+ cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_tnl_mpls);
+ cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_tnl_l3);
+ cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_tnl_frag);
+ cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
+ cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].ptc_tnl_l4);
+
+ cat_nthw_cfn_err_inv(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_inv);
+ cat_nthw_cfn_err_cv(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_cv);
+ cat_nthw_cfn_err_fcs(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_fcs);
+ cat_nthw_cfn_err_trunc(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_trunc);
+ cat_nthw_cfn_err_l3_cs(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_l3_cs);
+ cat_nthw_cfn_err_l4_cs(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_l4_cs);
+ cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_tnl_l3_cs);
+ cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_tnl_l4_cs);
+ cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_ttl_exp);
+ cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].err_tnl_ttl_exp);
+
+ cat_nthw_cfn_mac_port(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].mac_port);
+
+ cat_nthw_cfn_pm_cmp(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].pm_cmp);
+ cat_nthw_cfn_pm_dct(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].pm_dct);
+ cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].pm_ext_inv);
+ cat_nthw_cfn_pm_cmb(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].pm_cmb);
+ cat_nthw_cfn_pm_and_inv(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].pm_and_inv);
+ cat_nthw_cfn_pm_or_inv(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].pm_or_inv);
+ cat_nthw_cfn_pm_inv(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].pm_inv);
+
+ cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
+ cat_nthw_cfn_lc_inv(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].lc_inv);
+ cat_nthw_cfn_km0_or(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].km0_or);
+ if (be->p_cat_nthw->m_km_if_cnt > 1) {
+ cat_nthw_cfn_km1_or(be->p_cat_nthw,
+ cat->v21.cfn[cat_func].km1_or);
+ }
+ cat_nthw_cfn_flush(be->p_cat_nthw);
+ cat_func++;
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,
+ int km_if_idx, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18) {
+ cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
+ cat_nthw_kce_enable(be->p_cat_nthw, 0,
+ cat->v18.kce[index + i].enable_bm);
+ cat_nthw_kce_flush(be->p_cat_nthw, 0);
+ }
+ } else if (cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
+ cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
+ cat->v21.kce[index + i].enable_bm[km_if_idx]);
+ cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,
+ int km_if_idx, int cat_func, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18) {
+ cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
+ cat_nthw_kcs_category(be->p_cat_nthw, 0,
+ cat->v18.kcs[cat_func].category);
+ cat_nthw_kcs_flush(be->p_cat_nthw, 0);
+ cat_func++;
+ }
+ } else if (cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
+ cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
+ cat->v21.kcs[cat_func].category[km_if_idx]);
+ cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
+ cat_func++;
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,
+ int km_if_idx, int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18) {
+ cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
+ cat_nthw_fte_enable(be->p_cat_nthw, 0,
+ cat->v18.fte[index + i].enable_bm);
+ cat_nthw_fte_flush(be->p_cat_nthw, 0);
+ }
+ } else if (cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
+ cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
+ cat->v21.fte[index + i].enable_bm[km_if_idx]);
+ cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,
+ int cat_func, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18 || cat->ver == 21) {
+ cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+ cat_nthw_cte_enable_col(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.col);
+ cat_nthw_cte_enable_cor(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.cor);
+ cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.hsh);
+ cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.qsl);
+ cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.ipf);
+ cat_nthw_cte_enable_slc(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.slc);
+ cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.pdb);
+ cat_nthw_cte_enable_msk(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.msk);
+ cat_nthw_cte_enable_hst(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.hst);
+ cat_nthw_cte_enable_epp(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.epp);
+ cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+ cat->v18.cte[cat_func].b.tpe);
+
+ cat_nthw_cte_flush(be->p_cat_nthw);
+ cat_func++;
+ }
+ } else if (cat->ver == 22) {
+ cat_nthw_cte_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_cte_select(be->p_cat_nthw, cat_func);
+ cat_nthw_cte_enable_col(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.col);
+ cat_nthw_cte_enable_cor(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.cor);
+ cat_nthw_cte_enable_hsh(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.hsh);
+ cat_nthw_cte_enable_qsl(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.qsl);
+ cat_nthw_cte_enable_ipf(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.ipf);
+ cat_nthw_cte_enable_slc(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.slc);
+ cat_nthw_cte_enable_pdb(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.pdb);
+ cat_nthw_cte_enable_msk(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.msk);
+ cat_nthw_cte_enable_hst(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.hst);
+ cat_nthw_cte_enable_epp(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.epp);
+ cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.tpe);
+ cat_nthw_cte_enable_tpe(be->p_cat_nthw,
+ cat->v22.cte[cat_func].b.rrb);
+
+ cat_nthw_cte_flush(be->p_cat_nthw);
+ cat_func++;
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,
+ int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_cts_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_cts_select(be->p_cat_nthw, index + i);
+ cat_nthw_cts_cat_a(be->p_cat_nthw,
+ cat->v18.cts[index + i].cat_a);
+ cat_nthw_cts_cat_b(be->p_cat_nthw,
+ cat->v18.cts[index + i].cat_b);
+ cat_nthw_cts_flush(be->p_cat_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,
+ int cat_func, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_cot_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
+ cat_nthw_cot_color(be->p_cat_nthw,
+ cat->v18.cot[cat_func + i].color);
+ cat_nthw_cot_km(be->p_cat_nthw,
+ cat->v18.cot[cat_func + i].km);
+ cat_nthw_cot_flush(be->p_cat_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,
+ int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_cct_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_cct_select(be->p_cat_nthw, index + i);
+ cat_nthw_cct_color(be->p_cat_nthw,
+ cat->v18.cct[index + i].color);
+ cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
+ cat_nthw_cct_flush(be->p_cat_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,
+ int ext_index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_exo_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
+ cat_nthw_exo_dyn(be->p_cat_nthw,
+ cat->v18.exo[ext_index + i].dyn);
+ cat_nthw_exo_ofs(be->p_cat_nthw,
+ cat->v18.exo[ext_index + i].ofs);
+ cat_nthw_exo_flush(be->p_cat_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,
+ int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_rck_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_rck_select(be->p_cat_nthw, index + i);
+ cat_nthw_rck_data(be->p_cat_nthw,
+ cat->v18.rck[index + i].rck_data);
+ cat_nthw_rck_flush(be->p_cat_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,
+ int len_index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_len_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_len_select(be->p_cat_nthw, len_index + i);
+ cat_nthw_len_lower(be->p_cat_nthw,
+ cat->v18.len[len_index + i].lower);
+ cat_nthw_len_upper(be->p_cat_nthw,
+ cat->v18.len[len_index + i].upper);
+ cat_nthw_len_dyn1(be->p_cat_nthw,
+ cat->v18.len[len_index + i].dyn1);
+ cat_nthw_len_dyn2(be->p_cat_nthw,
+ cat->v18.len[len_index + i].dyn2);
+ cat_nthw_len_inv(be->p_cat_nthw,
+ cat->v18.len[len_index + i].inv);
+ cat_nthw_len_flush(be->p_cat_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,
+ int len_index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {
+ cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
+ cat_nthw_kcc_key(be->p_cat_nthw,
+ cat->v18.kcc_cam[len_index + i].key);
+ cat_nthw_kcc_category(be->p_cat_nthw,
+ cat->v18.kcc_cam[len_index + i].category);
+ cat_nthw_kcc_id(be->p_cat_nthw,
+ cat->v18.kcc_cam[len_index + i].id);
+ cat_nthw_kcc_flush(be->p_cat_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,
+ int len_index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 22) {
+ cat_nthw_cce_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_cce_select(be->p_cat_nthw, len_index + i);
+ cat_nthw_cce_data_imm(be->p_cat_nthw,
+ cat->v22.cce[len_index + i].imm);
+ cat_nthw_cce_data_ind(be->p_cat_nthw,
+ cat->v22.cce[len_index + i].ind);
+ cat_nthw_cce_flush(be->p_cat_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,
+ int len_index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
+
+ if (cat->ver == 22) {
+ cat_nthw_ccs_cnt(be->p_cat_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ cat_nthw_ccs_select(be->p_cat_nthw, len_index + i);
+ cat_nthw_ccs_data_cor_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].cor_en);
+ cat_nthw_ccs_data_cor(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].cor);
+ cat_nthw_ccs_data_hsh_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].hsh_en);
+ cat_nthw_ccs_data_hsh(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].hsh);
+ cat_nthw_ccs_data_qsl_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].qsl_en);
+ cat_nthw_ccs_data_qsl(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].qsl);
+ cat_nthw_ccs_data_ipf_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].ipf_en);
+ cat_nthw_ccs_data_ipf(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].ipf);
+ cat_nthw_ccs_data_slc_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].slc_en);
+ cat_nthw_ccs_data_slc(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].slc);
+ cat_nthw_ccs_data_pdb_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].pdb_en);
+ cat_nthw_ccs_data_pdb(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].pdb);
+ cat_nthw_ccs_data_msk_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].msk_en);
+ cat_nthw_ccs_data_msk(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].msk);
+ cat_nthw_ccs_data_hst_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].hst_en);
+ cat_nthw_ccs_data_hst(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].hst);
+ cat_nthw_ccs_data_epp_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].epp_en);
+ cat_nthw_ccs_data_epp(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].epp);
+ cat_nthw_ccs_data_tpe_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].tpe_en);
+ cat_nthw_ccs_data_tpe(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].tpe);
+ cat_nthw_ccs_data_rrb_en(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].rrb_en);
+ cat_nthw_ccs_data_rrb(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].rrb);
+ cat_nthw_ccs_data_sb0_type(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].sb0_type);
+ cat_nthw_ccs_data_sb0_data(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].sb0_data);
+ cat_nthw_ccs_data_sb1_type(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].sb1_type);
+ cat_nthw_ccs_data_sb1_data(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].sb1_data);
+ cat_nthw_ccs_data_sb2_type(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].sb2_type);
+ cat_nthw_ccs_data_sb2_data(be->p_cat_nthw,
+ cat->v22.ccs[len_index + i].sb2_data);
+ cat_nthw_ccs_flush(be->p_cat_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
+ return 0;
+}
+
+/*
+ * ***************** KM *******************
+ */
+
+static bool km_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_km_nthw != NULL;
+}
+
+static uint32_t km_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |
+ (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
+}
+
+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,
+ int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+ if (km->ver == 7) {
+ km_nthw_rcp_cnt(be->p_km_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ km_nthw_rcp_select(be->p_km_nthw, category + i);
+ km_nthw_rcp_qw0_dyn(be->p_km_nthw,
+ km->v7.rcp[category + i].qw0_dyn);
+ km_nthw_rcp_qw0_ofs(be->p_km_nthw,
+ km->v7.rcp[category + i].qw0_ofs);
+ km_nthw_rcp_qw0_sel_a(be->p_km_nthw,
+ km->v7.rcp[category + i].qw0_sel_a);
+ km_nthw_rcp_qw0_sel_b(be->p_km_nthw,
+ km->v7.rcp[category + i].qw0_sel_b);
+ km_nthw_rcp_qw4_dyn(be->p_km_nthw,
+ km->v7.rcp[category + i].qw4_dyn);
+ km_nthw_rcp_qw4_ofs(be->p_km_nthw,
+ km->v7.rcp[category + i].qw4_ofs);
+ km_nthw_rcp_qw4_sel_a(be->p_km_nthw,
+ km->v7.rcp[category + i].qw4_sel_a);
+ km_nthw_rcp_qw4_sel_b(be->p_km_nthw,
+ km->v7.rcp[category + i].qw4_sel_b);
+ km_nthw_rcp_dw8_dyn(be->p_km_nthw,
+ km->v7.rcp[category + i].dw8_dyn);
+ km_nthw_rcp_dw8_ofs(be->p_km_nthw,
+ km->v7.rcp[category + i].dw8_ofs);
+ km_nthw_rcp_dw8_sel_a(be->p_km_nthw,
+ km->v7.rcp[category + i].dw8_sel_a);
+ km_nthw_rcp_dw8_sel_b(be->p_km_nthw,
+ km->v7.rcp[category + i].dw8_sel_b);
+ km_nthw_rcp_dw10_dyn(be->p_km_nthw,
+ km->v7.rcp[category + i].dw10_dyn);
+ km_nthw_rcp_dw10_ofs(be->p_km_nthw,
+ km->v7.rcp[category + i].dw10_ofs);
+ km_nthw_rcp_dw10_sel_a(be->p_km_nthw,
+ km->v7.rcp[category + i].dw10_sel_a);
+ km_nthw_rcp_dw10_sel_b(be->p_km_nthw,
+ km->v7.rcp[category + i].dw10_sel_b);
+ km_nthw_rcp_swx_cch(be->p_km_nthw,
+ km->v7.rcp[category + i].swx_cch);
+ km_nthw_rcp_swx_sel_a(be->p_km_nthw,
+ km->v7.rcp[category + i].swx_sel_a);
+ km_nthw_rcp_swx_sel_b(be->p_km_nthw,
+ km->v7.rcp[category + i].swx_sel_b);
+ km_nthw_rcp_mask_d_a(be->p_km_nthw,
+ km->v7.rcp[category + i].mask_d_a);
+ km_nthw_rcp_mask_b(be->p_km_nthw,
+ km->v7.rcp[category + i].mask_b);
+ km_nthw_rcp_dual(be->p_km_nthw,
+ km->v7.rcp[category + i].dual);
+ km_nthw_rcp_paired(be->p_km_nthw,
+ km->v7.rcp[category + i].paired);
+ km_nthw_rcp_el_a(be->p_km_nthw,
+ km->v7.rcp[category + i].el_a);
+ km_nthw_rcp_el_b(be->p_km_nthw,
+ km->v7.rcp[category + i].el_b);
+ km_nthw_rcp_info_a(be->p_km_nthw,
+ km->v7.rcp[category + i].info_a);
+ km_nthw_rcp_info_b(be->p_km_nthw,
+ km->v7.rcp[category + i].info_b);
+ km_nthw_rcp_ftm_a(be->p_km_nthw,
+ km->v7.rcp[category + i].ftm_a);
+ km_nthw_rcp_ftm_b(be->p_km_nthw,
+ km->v7.rcp[category + i].ftm_b);
+ km_nthw_rcp_bank_a(be->p_km_nthw,
+ km->v7.rcp[category + i].bank_a);
+ km_nthw_rcp_bank_b(be->p_km_nthw,
+ km->v7.rcp[category + i].bank_b);
+ km_nthw_rcp_kl_a(be->p_km_nthw,
+ km->v7.rcp[category + i].kl_a);
+ km_nthw_rcp_kl_b(be->p_km_nthw,
+ km->v7.rcp[category + i].kl_b);
+ km_nthw_rcp_keyway_a(be->p_km_nthw,
+ km->v7.rcp[category + i].keyway_a);
+ km_nthw_rcp_keyway_b(be->p_km_nthw,
+ km->v7.rcp[category + i].keyway_b);
+ km_nthw_rcp_synergy_mode(be->p_km_nthw,
+ km->v7.rcp[category + i].synergy_mode);
+ km_nthw_rcp_dw0_b_dyn(be->p_km_nthw,
+ km->v7.rcp[category + i].dw0_b_dyn);
+ km_nthw_rcp_dw0_b_ofs(be->p_km_nthw,
+ km->v7.rcp[category + i].dw0_b_ofs);
+ km_nthw_rcp_dw2_b_dyn(be->p_km_nthw,
+ km->v7.rcp[category + i].dw2_b_dyn);
+ km_nthw_rcp_dw2_b_ofs(be->p_km_nthw,
+ km->v7.rcp[category + i].dw2_b_ofs);
+ km_nthw_rcp_sw4_b_dyn(be->p_km_nthw,
+ km->v7.rcp[category + i].sw4_b_dyn);
+ km_nthw_rcp_sw4_b_ofs(be->p_km_nthw,
+ km->v7.rcp[category + i].sw4_b_ofs);
+ km_nthw_rcp_sw5_b_dyn(be->p_km_nthw,
+ km->v7.rcp[category + i].sw5_b_dyn);
+ km_nthw_rcp_sw5_b_ofs(be->p_km_nthw,
+ km->v7.rcp[category + i].sw5_b_ofs);
+ km_nthw_rcp_flush(be->p_km_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(km, be->p_km_nthw);
+ return 0;
+}
+
+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,
+ int record, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+ if (km->ver == 7) {
+ km_nthw_cam_cnt(be->p_km_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ km_nthw_cam_select(be->p_km_nthw,
+ (bank << 11) + record + i);
+ km_nthw_cam_w0(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].w0);
+ km_nthw_cam_w1(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].w1);
+ km_nthw_cam_w2(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].w2);
+ km_nthw_cam_w3(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].w3);
+ km_nthw_cam_w4(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].w4);
+ km_nthw_cam_w5(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].w5);
+ km_nthw_cam_ft0(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].ft0);
+ km_nthw_cam_ft1(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].ft1);
+ km_nthw_cam_ft2(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].ft2);
+ km_nthw_cam_ft3(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].ft3);
+ km_nthw_cam_ft4(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].ft4);
+ km_nthw_cam_ft5(be->p_km_nthw,
+ km->v7.cam[(bank << 11) + record + i].ft5);
+ km_nthw_cam_flush(be->p_km_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(km, be->p_km_nthw);
+ return 0;
+}
+
+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,
+ int byte, int value, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+ if (km->ver == 7) {
+ int start_idx = bank * 4 * 256 + byte * 256 + value;
+
+ km_nthw_tcam_cnt(be->p_km_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ if (km->v7.tcam[start_idx + i].dirty) {
+ km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
+ km_nthw_tcam_t(be->p_km_nthw,
+ km->v7.tcam[start_idx + i].t);
+ km_nthw_tcam_flush(be->p_km_nthw);
+ km->v7.tcam[start_idx + i].dirty = 0;
+ }
+ }
+ }
+
+ _CHECK_DEBUG_OFF(km, be->p_km_nthw);
+ return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+ if (km->ver == 7) {
+ /* TCAM bank width in version 3 = 72 */
+ km_nthw_tci_cnt(be->p_km_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
+ km_nthw_tci_color(be->p_km_nthw,
+ km->v7.tci[bank * 72 + index + i].color);
+ km_nthw_tci_ft(be->p_km_nthw,
+ km->v7.tci[bank * 72 + index + i].ft);
+ km_nthw_tci_flush(be->p_km_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(km, be->p_km_nthw);
+ return 0;
+}
+
+/*
+ * bank is the TCAM bank, index is the index within the bank (0..71)
+ */
+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, km, be->p_km_nthw);
+
+ if (km->ver == 7) {
+ /* TCAM bank width in version 3 = 72 */
+ km_nthw_tcq_cnt(be->p_km_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ /* adr = lover 4 bits = bank, upper 7 bits = index */
+ km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
+ km_nthw_tcq_bank_mask(be->p_km_nthw,
+ km->v7.tcq[bank + (index << 4) + i].bank_mask);
+ km_nthw_tcq_qual(be->p_km_nthw,
+ km->v7.tcq[bank + (index << 4) + i].qual);
+ km_nthw_tcq_flush(be->p_km_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(km, be->p_km_nthw);
+ return 0;
+}
+
+/*
+ * ***************** FLM *******************
+ */
+
+static bool flm_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_flm_nthw != NULL;
+}
+
+static uint32_t flm_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
+ (module_get_minor_version(be->p_flm_nthw->m_flm) &
+ 0xffff));
+}
+
+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);
+ flm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);
+ flm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);
+ flm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);
+ flm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);
+ flm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);
+ flm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);
+ flm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);
+ flm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);
+ flm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);
+ flm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);
+ flm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);
+ flm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);
+ flm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);
+ flm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);
+ flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
+ flm->v17.control->split_sdram_usage);
+ flm_nthw_control_flush(be->p_flm_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ /* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
+ flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+ 0);
+ flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);
+ flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);
+ flm_nthw_status_flush(be->p_flm_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_status_update(be->p_flm_nthw);
+ flm_nthw_status_calibdone(be->p_flm_nthw,
+ &flm->v17.status->calibdone, 1);
+ flm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,
+ 1);
+ flm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);
+ flm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,
+ 1);
+ flm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);
+ flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);
+ flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);
+ flm_nthw_timeout_flush(be->p_flm_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);
+ flm_nthw_scrub_flush(be->p_flm_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);
+ flm_nthw_load_bin_flush(be->p_flm_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);
+ flm_nthw_load_pps_flush(be->p_flm_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);
+ flm_nthw_load_lps_flush(be->p_flm_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);
+ flm_nthw_load_aps_flush(be->p_flm_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);
+ flm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);
+ flm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);
+ flm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);
+ flm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);
+ flm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);
+ flm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);
+ flm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);
+ flm_nthw_prio_flush(be->p_flm_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,
+ int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_pst_cnt(be->p_flm_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ flm_nthw_pst_select(be->p_flm_nthw, index + i);
+ flm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);
+ flm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);
+ flm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);
+ flm_nthw_pst_flush(be->p_flm_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,
+ int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ flm_nthw_rcp_select(be->p_flm_nthw, index + i);
+ flm_nthw_rcp_lookup(be->p_flm_nthw,
+ flm->v17.rcp[index + i].lookup);
+ flm_nthw_rcp_qw0_dyn(be->p_flm_nthw,
+ flm->v17.rcp[index + i].qw0_dyn);
+ flm_nthw_rcp_qw0_ofs(be->p_flm_nthw,
+ flm->v17.rcp[index + i].qw0_ofs);
+ flm_nthw_rcp_qw0_sel(be->p_flm_nthw,
+ flm->v17.rcp[index + i].qw0_sel);
+ flm_nthw_rcp_qw4_dyn(be->p_flm_nthw,
+ flm->v17.rcp[index + i].qw4_dyn);
+ flm_nthw_rcp_qw4_ofs(be->p_flm_nthw,
+ flm->v17.rcp[index + i].qw4_ofs);
+ flm_nthw_rcp_sw8_dyn(be->p_flm_nthw,
+ flm->v17.rcp[index + i].sw8_dyn);
+ flm_nthw_rcp_sw8_ofs(be->p_flm_nthw,
+ flm->v17.rcp[index + i].sw8_ofs);
+ flm_nthw_rcp_sw8_sel(be->p_flm_nthw,
+ flm->v17.rcp[index + i].sw8_sel);
+ flm_nthw_rcp_sw9_dyn(be->p_flm_nthw,
+ flm->v17.rcp[index + i].sw9_dyn);
+ flm_nthw_rcp_sw9_ofs(be->p_flm_nthw,
+ flm->v17.rcp[index + i].sw9_ofs);
+ flm_nthw_rcp_mask(be->p_flm_nthw,
+ flm->v17.rcp[index + i].mask);
+ flm_nthw_rcp_kid(be->p_flm_nthw,
+ flm->v17.rcp[index + i].kid);
+ flm_nthw_rcp_opn(be->p_flm_nthw,
+ flm->v17.rcp[index + i].opn);
+ flm_nthw_rcp_ipn(be->p_flm_nthw,
+ flm->v17.rcp[index + i].ipn);
+ flm_nthw_rcp_byt_dyn(be->p_flm_nthw,
+ flm->v17.rcp[index + i].byt_dyn);
+ flm_nthw_rcp_byt_ofs(be->p_flm_nthw,
+ flm->v17.rcp[index + i].byt_ofs);
+ flm_nthw_rcp_txplm(be->p_flm_nthw,
+ flm->v17.rcp[index + i].txplm);
+ flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
+ flm->v17.rcp[index + i].auto_ipv4_mask);
+ flm_nthw_rcp_flush(be->p_flm_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_buf_ctrl_update(be->p_flm_nthw,
+ &flm->v17.buf_ctrl->lrn_free,
+ &flm->v17.buf_ctrl->inf_avail,
+ &flm->v17.buf_ctrl->sta_avail);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ if (flm->ver >= 17) {
+ flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
+ flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
+ flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
+ flm_nthw_stat_unl_done_update(be->p_flm_nthw);
+ flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
+ flm_nthw_stat_rel_done_update(be->p_flm_nthw);
+ flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
+ flm_nthw_stat_aul_done_update(be->p_flm_nthw);
+ flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
+ flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
+ flm_nthw_stat_tul_done_update(be->p_flm_nthw);
+ flm_nthw_stat_flows_update(be->p_flm_nthw);
+
+ flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,
+ 1);
+ flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,
+ &flm->v17.lrn_ignore->cnt, 1);
+ flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,
+ 1);
+ flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,
+ 1);
+ flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,
+ &flm->v17.unl_ignore->cnt, 1);
+ flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,
+ 1);
+ flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,
+ &flm->v17.rel_ignore->cnt, 1);
+ flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,
+ 1);
+ flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,
+ &flm->v17.aul_ignore->cnt, 1);
+ flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,
+ 1);
+ flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,
+ 1);
+ flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);
+
+ flm_nthw_stat_prb_done_update(be->p_flm_nthw);
+ flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
+ flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,
+ 1);
+ flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,
+ &flm->v17.prb_ignore->cnt, 1);
+ }
+ if (flm->ver >= 20) {
+ flm_nthw_stat_sta_done_update(be->p_flm_nthw);
+ flm_nthw_stat_inf_done_update(be->p_flm_nthw);
+ flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
+ flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
+ flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
+ flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
+ flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
+ flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
+ flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
+ flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
+ flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
+ flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
+
+ flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,
+ 1);
+ flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,
+ 1);
+ flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,
+ 1);
+ flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);
+ flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,
+ 1);
+ flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);
+ flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);
+ flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);
+ flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,
+ 1);
+ flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);
+ flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,
+ 1);
+ flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,
+ 1);
+ }
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return 0;
+}
+
+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,
+ const uint32_t *lrn_data, uint32_t size)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,
+ &flm->v17.buf_ctrl->lrn_free,
+ &flm->v17.buf_ctrl->inf_avail,
+ &flm->v17.buf_ctrl->sta_avail);
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return ret;
+}
+
+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,
+ uint32_t *inf_data, uint32_t size)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ int ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,
+ &flm->v17.buf_ctrl->lrn_free,
+ &flm->v17.buf_ctrl->inf_avail,
+ &flm->v17.buf_ctrl->sta_avail);
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return ret;
+}
+
+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,
+ uint32_t *sta_data, uint32_t size)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
+
+ int ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,
+ &flm->v17.buf_ctrl->lrn_free,
+ &flm->v17.buf_ctrl->inf_avail,
+ &flm->v17.buf_ctrl->sta_avail);
+
+ _CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
+ return ret;
+}
+
+/*
+ * ***************** HSH *******************
+ */
+
+static bool hsh_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_hsh_nthw != NULL;
+}
+
+static uint32_t hsh_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
+ (module_get_minor_version(be->p_hsh_nthw->m_hsh) &
+ 0xffff));
+}
+
+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,
+ int category, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
+
+ if (hsh->ver == 5) {
+ hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
+ hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].load_dist_type);
+ hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].mac_port_mask);
+ hsh_nthw_rcp_sort(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].sort);
+ hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].qw0_pe);
+ hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].qw0_ofs);
+ hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].qw4_pe);
+ hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].qw4_ofs);
+ hsh_nthw_rcp_w8_pe(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].w8_pe);
+ hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].w8_ofs);
+ hsh_nthw_rcp_w8_sort(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].w8_sort);
+ hsh_nthw_rcp_w9_pe(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].w9_pe);
+ hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].w9_ofs);
+ hsh_nthw_rcp_w9_sort(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].w9_sort);
+ hsh_nthw_rcp_w9_p(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].w9_p);
+ hsh_nthw_rcp_p_mask(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].p_mask);
+ hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].word_mask);
+ hsh_nthw_rcp_seed(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].seed);
+ hsh_nthw_rcp_tnl_p(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].tnl_p);
+ hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].hsh_valid);
+ hsh_nthw_rcp_hsh_type(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].hsh_type);
+ hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
+ hsh->v5.rcp[category + i].auto_ipv4_mask);
+ hsh_nthw_rcp_flush(be->p_hsh_nthw);
+ }
+ }
+ _CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
+ return 0;
+}
+
+/*
+ * ***************** HST *******************
+ */
+
+static bool hst_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_hst_nthw != NULL;
+}
+
+static uint32_t hst_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |
+ (module_get_minor_version(be->p_hst_nthw->m_hst) &
+ 0xffff));
+}
+
+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,
+ int category, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);
+
+ if (hst->ver == 2) {
+ hst_nthw_rcp_cnt(be->p_hst_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ hst_nthw_rcp_select(be->p_hst_nthw, category + i);
+ hst_nthw_rcp_strip_mode(be->p_hst_nthw,
+ hst->v2.rcp[category + i].strip_mode);
+ hst_nthw_rcp_start_dyn(be->p_hst_nthw,
+ hst->v2.rcp[category + i].start_dyn);
+ hst_nthw_rcp_start_ofs(be->p_hst_nthw,
+ hst->v2.rcp[category + i].start_ofs);
+ hst_nthw_rcp_end_dyn(be->p_hst_nthw,
+ hst->v2.rcp[category + i].end_dyn);
+ hst_nthw_rcp_end_ofs(be->p_hst_nthw,
+ hst->v2.rcp[category + i].end_ofs);
+ hst_nthw_rcp_modif0_cmd(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif0_cmd);
+ hst_nthw_rcp_modif0_dyn(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif0_dyn);
+ hst_nthw_rcp_modif0_ofs(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif0_ofs);
+ hst_nthw_rcp_modif0_value(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif0_value);
+ hst_nthw_rcp_modif1_cmd(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif1_cmd);
+ hst_nthw_rcp_modif1_dyn(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif1_dyn);
+ hst_nthw_rcp_modif1_ofs(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif1_ofs);
+ hst_nthw_rcp_modif1_value(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif1_value);
+ hst_nthw_rcp_modif2_cmd(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif2_cmd);
+ hst_nthw_rcp_modif2_dyn(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif2_dyn);
+ hst_nthw_rcp_modif2_ofs(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif2_ofs);
+ hst_nthw_rcp_modif2_value(be->p_hst_nthw,
+ hst->v2.rcp[category + i].modif2_value);
+ hst_nthw_rcp_flush(be->p_hst_nthw);
+ }
+ }
+ _CHECK_DEBUG_OFF(hst, be->p_hst_nthw);
+ return 0;
+}
+
+/*
+ * ***************** QSL *******************
+ */
+
+static bool qsl_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_qsl_nthw != NULL;
+}
+
+static uint32_t qsl_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
+ (module_get_minor_version(be->p_qsl_nthw->m_qsl) &
+ 0xffff));
+}
+
+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,
+ int category, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+ if (qsl->ver == 7) {
+ qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
+ qsl_nthw_rcp_discard(be->p_qsl_nthw,
+ qsl->v7.rcp[category + i].discard);
+ qsl_nthw_rcp_drop(be->p_qsl_nthw,
+ qsl->v7.rcp[category + i].drop);
+ qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,
+ qsl->v7.rcp[category + i].tbl_lo);
+ qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,
+ qsl->v7.rcp[category + i].tbl_hi);
+ qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,
+ qsl->v7.rcp[category + i].tbl_idx);
+ qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,
+ qsl->v7.rcp[category + i].tbl_msk);
+ qsl_nthw_rcp_lr(be->p_qsl_nthw,
+ qsl->v7.rcp[category + i].lr);
+ qsl_nthw_rcp_tsa(be->p_qsl_nthw,
+ qsl->v7.rcp[category + i].tsa);
+ qsl_nthw_rcp_vli(be->p_qsl_nthw,
+ qsl->v7.rcp[category + i].vli);
+ qsl_nthw_rcp_flush(be->p_qsl_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+ return 0;
+}
+
+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+ int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+ if (qsl->ver == 7) {
+ qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
+ qsl_nthw_qst_queue(be->p_qsl_nthw,
+ qsl->v7.qst[entry + i].queue);
+ qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
+
+ qsl_nthw_qst_tx_port(be->p_qsl_nthw,
+ qsl->v7.qst[entry + i].tx_port);
+ qsl_nthw_qst_lre(be->p_qsl_nthw,
+ qsl->v7.qst[entry + i].lre);
+ qsl_nthw_qst_tci(be->p_qsl_nthw,
+ qsl->v7.qst[entry + i].tci);
+ qsl_nthw_qst_ven(be->p_qsl_nthw,
+ qsl->v7.qst[entry + i].ven);
+ qsl_nthw_qst_flush(be->p_qsl_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+ return 0;
+}
+
+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+ int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+ if (qsl->ver == 7) {
+ qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
+ qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
+ qsl_nthw_qen_flush(be->p_qsl_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+ return 0;
+}
+
+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,
+ int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
+
+ if (qsl->ver == 7) {
+ qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
+ qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
+ qsl->v7.unmq[entry + i].dest_queue);
+ qsl_nthw_unmq_en(be->p_qsl_nthw,
+ qsl->v7.unmq[entry + i].en);
+ qsl_nthw_unmq_flush(be->p_qsl_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
+ return 0;
+}
+
+/*
+ * ***************** SLC *******************
+ */
+
+static bool slc_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_slc_nthw != NULL;
+}
+
+static uint32_t slc_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |
+ (module_get_minor_version(be->p_slc_nthw->m_slc) &
+ 0xffff));
+}
+
+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,
+ int category, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);
+
+ if (slc->ver == 1) {
+ slc_nthw_rcp_cnt(be->p_slc_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ slc_nthw_rcp_select(be->p_slc_nthw, category + i);
+ slc_nthw_rcp_tail_slc_en(be->p_slc_nthw,
+ slc->v1.rcp[category + i].tail_slc_en);
+ slc_nthw_rcp_tail_dyn(be->p_slc_nthw,
+ slc->v1.rcp[category + i].tail_dyn);
+ slc_nthw_rcp_tail_ofs(be->p_slc_nthw,
+ slc->v1.rcp[category + i].tail_ofs);
+ slc_nthw_rcp_pcap(be->p_slc_nthw,
+ slc->v1.rcp[category + i].pcap);
+ slc_nthw_rcp_flush(be->p_slc_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(slc, be->p_slc_nthw);
+ return 0;
+}
+
+/*
+ * ***************** SLC LR *******************
+ */
+
+static bool slc_lr_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_slc_lr_nthw != NULL;
+}
+
+static uint32_t slc_lr_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)
+ << 16) |
+ (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &
+ 0xffff));
+}
+
+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,
+ int category, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
+
+ if (slc_lr->ver == 2) {
+ slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
+ slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
+ slc_lr->v2.rcp[category + i].tail_slc_en);
+ slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
+ slc_lr->v2.rcp[category + i].tail_dyn);
+ slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
+ slc_lr->v2.rcp[category + i].tail_ofs);
+ slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,
+ slc_lr->v2.rcp[category + i].pcap);
+ slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
+ return 0;
+}
+
+/*
+ * ***************** PDB *******************
+ */
+
+static bool pdb_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_pdb_nthw != NULL;
+}
+
+static uint32_t pdb_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
+ (module_get_minor_version(be->p_pdb_nthw->m_pdb) &
+ 0xffff));
+}
+
+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,
+ int category, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+ if (pdb->ver == 9) {
+ pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
+ pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].descriptor);
+ pdb_nthw_rcp_desc_len(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].desc_len);
+ pdb_nthw_rcp_tx_port(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].tx_port);
+ pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].tx_ignore);
+ pdb_nthw_rcp_tx_now(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].tx_now);
+ pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].crc_overwrite);
+ pdb_nthw_rcp_align(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].align);
+ pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].ofs0_dyn);
+ pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].ofs0_rel);
+ pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].ofs1_dyn);
+ pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].ofs1_rel);
+ pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].ofs2_dyn);
+ pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].ofs2_rel);
+ pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].ip_prot_tnl);
+ pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].ppc_hsh);
+ pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].duplicate_en);
+ pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].duplicate_bit);
+ pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
+ pdb->v9.rcp[category + i].pcap_keep_fcs);
+ pdb_nthw_rcp_flush(be->p_pdb_nthw);
+ }
+ }
+ _CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+ return 0;
+}
+
+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
+
+ if (pdb->ver == 9) {
+ pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
+ pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
+ pdb_nthw_config_flush(be->p_pdb_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
+ return 0;
+}
+
+/*
+ * ***************** IOA *******************
+ */
+
+static bool ioa_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_ioa_nthw != NULL;
+}
+
+static uint32_t ioa_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |
+ (module_get_minor_version(be->p_ioa_nthw->m_ioa) &
+ 0xffff));
+}
+
+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,
+ int category, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+ if (ioa->ver == 4) {
+ ioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ ioa_nthw_rcp_select(be->p_ioa_nthw, category + i);
+ ioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,
+ ioa->v4.rcp[category + i].tunnel_pop);
+ ioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,
+ ioa->v4.rcp[category + i].vlan_pop);
+ ioa_nthw_rcp_vlan_push(be->p_ioa_nthw,
+ ioa->v4.rcp[category + i].vlan_push);
+ ioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,
+ ioa->v4.rcp[category + i].vlan_vid);
+ ioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,
+ ioa->v4.rcp[category + i].vlan_dei);
+ ioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,
+ ioa->v4.rcp[category + i].vlan_pcp);
+ ioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,
+ ioa->v4.rcp[category + i].vlan_tpid_sel);
+ ioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,
+ ioa->v4.rcp[category + i].queue_override_en);
+ ioa_nthw_rcp_queue_id(be->p_ioa_nthw,
+ ioa->v4.rcp[category + i].queue_id);
+ ioa_nthw_rcp_flush(be->p_ioa_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+ return 0;
+}
+
+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+ if (ioa->ver == 4) {
+ ioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,
+ ioa->v4.tpid->cust_tpid_0);
+ ioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,
+ ioa->v4.tpid->cust_tpid_1);
+ ioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+ return 0;
+}
+
+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);
+
+ if (ioa->ver == 4) {
+ ioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ ioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);
+ ioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,
+ ioa->v4.roa_epp[index + i].push_tunnel);
+ ioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,
+ ioa->v4.roa_epp[index + i].tx_port);
+ ioa_nthw_roa_epp_flush(be->p_ioa_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);
+ return 0;
+}
+
+/*
+ * ***************** ROA *******************
+ */
+
+static bool roa_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_roa_nthw != NULL;
+}
+
+static uint32_t roa_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |
+ (module_get_minor_version(be->p_roa_nthw->m_roa) &
+ 0xffff));
+}
+
+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+ if (roa->ver == 6) {
+ roa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);
+ for (int i = 0; i < cnt; i++) {
+ for (int ii = 0; ii < 4; ii++) {
+ roa_nthw_tun_hdr_select(be->p_roa_nthw,
+ index + (i * 4) + ii);
+ roa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,
+ &roa->v6.tunhdr[index / 4 + i]
+ .tunnel_hdr[ii * 4]);
+ roa_nthw_tun_hdr_flush(be->p_roa_nthw);
+ }
+ }
+ }
+
+ _CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+ return 0;
+}
+
+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,
+ int category, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+ if (roa->ver == 6) {
+ roa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ roa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);
+ roa_nthw_tun_cfg_tun_len(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].tun_len);
+ roa_nthw_tun_cfg_tun_type(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].tun_type);
+ roa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].tun_vlan);
+ roa_nthw_tun_cfg_ip_type(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].ip_type);
+ roa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].ipcs_upd);
+ roa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].ipcs_precalc);
+ roa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].iptl_upd);
+ roa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].iptl_precalc);
+ roa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].vxlan_udp_len_upd);
+ roa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].tx_lag_ix);
+ roa_nthw_tun_cfg_recirculate(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].recirculate);
+ roa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].push_tunnel);
+ roa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].recirc_port);
+ roa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,
+ roa->v6.tuncfg[category + i].recirc_bypass);
+ roa_nthw_tun_cfg_flush(be->p_roa_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+ return 0;
+}
+
+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+ if (roa->ver == 6) {
+ roa_nthw_config_fwd_recirculate(be->p_roa_nthw,
+ roa->v6.config->fwd_recirculate);
+ roa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,
+ roa->v6.config->fwd_normal_pcks);
+ roa_nthw_config_fwd_tx_port0(be->p_roa_nthw,
+ roa->v6.config->fwd_txport0);
+ roa_nthw_config_fwd_tx_port1(be->p_roa_nthw,
+ roa->v6.config->fwd_txport1);
+ roa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,
+ roa->v6.config->fwd_cellbuilder_pcks);
+ roa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,
+ roa->v6.config->fwd_non_normal_pcks);
+ roa_nthw_config_flush(be->p_roa_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+ return 0;
+}
+
+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);
+
+ if (roa->ver == 6) {
+ roa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ roa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);
+ roa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,
+ roa->v6.lagcfg[index + i].txphy_port);
+ roa_nthw_lag_cfg_flush(be->p_roa_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(roa, be->p_roa_nthw);
+ return 0;
+}
+
+/*
+ * ***************** RMC *******************
+ */
+
+static bool rmc_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_rmc_nthw != NULL;
+}
+
+static uint32_t rmc_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |
+ (module_get_minor_version(be->p_rmc_nthw->m_rmc) &
+ 0xffff));
+}
+
+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);
+
+ if (rmc->ver == 0x10003) {
+ rmc_nthw_ctrl_block_statt(be->p_rmc_nthw,
+ rmc->v1_3.ctrl->block_statt);
+ rmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,
+ rmc->v1_3.ctrl->block_keepa);
+ rmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,
+ rmc->v1_3.ctrl->block_rpp_slice);
+ rmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,
+ rmc->v1_3.ctrl->block_mac_port);
+ rmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,
+ rmc->v1_3.ctrl->lag_phy_odd_even);
+ rmc_nthw_ctrl_flush(be->p_rmc_nthw);
+ }
+
+ _CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);
+ return 0;
+}
+
+/*
+ * ***************** TPE *******************
+ */
+
+static bool tpe_get_present(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&
+ be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&
+ be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;
+}
+
+static uint32_t tpe_get_version(void *be_dev)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ const uint32_t csu_version =
+ (uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
+ (module_get_minor_version(be->p_csu_nthw->m_csu) &
+ 0xffff));
+
+ const uint32_t hfu_version =
+ (uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
+ (module_get_minor_version(be->p_hfu_nthw->m_hfu) &
+ 0xffff));
+
+ const uint32_t rpp_lr_version =
+ (uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)
+ << 16) |
+ (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &
+ 0xffff));
+
+ const uint32_t tx_cpy_version =
+ (uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)
+ << 16) |
+ (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &
+ 0xffff));
+
+ const uint32_t tx_ins_version =
+ (uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)
+ << 16) |
+ (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &
+ 0xffff));
+
+ const uint32_t tx_rpl_version =
+ (uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)
+ << 16) |
+ (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &
+ 0xffff));
+
+ if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&
+ tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+ return 1;
+
+ if (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&
+ tx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)
+ return 2;
+
+ assert(false);
+ return 0;
+}
+
+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+ if (rpp_lr->ver >= 1) {
+ rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
+ rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,
+ rpp_lr->v1.rpp_rcp[index + i].exp);
+ rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+ return 0;
+}
+
+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,
+ int index, int cnt)
+{
+ int res = 0;
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
+
+ if (rpp_lr->ver >= 2) {
+ rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
+ rpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,
+ rpp_lr->v2.rpp_ifr_rcp[index + i].en);
+ rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
+ rpp_lr->v2.rpp_ifr_rcp[index + i].mtu);
+ rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
+ }
+ } else {
+ res = -1;
+ }
+ _CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
+ return res;
+}
+
+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,
+ int index, int cnt)
+{
+ int res = 0;
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
+
+ if (ifr->ver >= 2) {
+ ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
+ ifr_nthw_rcp_en(be->p_ifr_nthw,
+ ifr->v2.ifr_rcp[index + i].en);
+ ifr_nthw_rcp_mtu(be->p_ifr_nthw,
+ ifr->v2.ifr_rcp[index + i].mtu);
+ ifr_nthw_rcp_flush(be->p_ifr_nthw);
+ }
+ } else {
+ res = -1;
+ }
+ _CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
+ return res;
+}
+
+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
+
+ if (tx_ins->ver >= 1) {
+ tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
+ tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,
+ tx_ins->v1.ins_rcp[index + i].dyn);
+ tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,
+ tx_ins->v1.ins_rcp[index + i].ofs);
+ tx_ins_nthw_rcp_len(be->p_tx_ins_nthw,
+ tx_ins->v1.ins_rcp[index + i].len);
+ tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
+ return 0;
+}
+
+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+ if (tx_rpl->ver >= 1) {
+ tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
+ tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,
+ tx_rpl->v1.rpl_rcp[index + i].dyn);
+ tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,
+ tx_rpl->v1.rpl_rcp[index + i].ofs);
+ tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,
+ tx_rpl->v1.rpl_rcp[index + i].len);
+ tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
+ tx_rpl->v1.rpl_rcp[index + i].rpl_ptr);
+ tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
+ tx_rpl->v1.rpl_rcp[index + i].ext_prio);
+ tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+ return 0;
+}
+
+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+ if (tx_rpl->ver >= 1) {
+ tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
+ tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
+ tx_rpl->v1.rpl_ext[index + i].rpl_ptr);
+ tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+ return 0;
+}
+
+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
+
+ if (tx_rpl->ver >= 1) {
+ tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
+ tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
+ tx_rpl->v1.rpl_rpl[index + i].value);
+ tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
+ return 0;
+}
+
+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+ unsigned int wr_index = -1;
+
+ _CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
+
+ if (tx_cpy->ver >= 1) {
+ for (int i = 0; i < cnt; i++) {
+ if (wr_index !=
+ (index + i) / tx_cpy->nb_rcp_categories) {
+ wr_index =
+ (index + i) / tx_cpy->nb_rcp_categories;
+ tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,
+ 1);
+ }
+
+ tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
+ (index + i) % tx_cpy->nb_rcp_categories);
+ tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
+ tx_cpy->v1.cpy_rcp[index + i].reader_select);
+ tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
+ tx_cpy->v1.cpy_rcp[index + i].dyn);
+ tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
+ tx_cpy->v1.cpy_rcp[index + i].ofs);
+ tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
+ tx_cpy->v1.cpy_rcp[index + i].len);
+ tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
+ return 0;
+}
+
+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
+
+ if (hfu->ver >= 1) {
+ hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
+ hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_a_wr);
+ hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);
+ hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);
+ hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);
+ hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_a_add_dyn);
+ hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_a_add_ofs);
+ hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);
+ hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_b_wr);
+ hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);
+ hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);
+ hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_b_add_dyn);
+ hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_b_add_ofs);
+ hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);
+ hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_c_wr);
+ hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);
+ hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);
+ hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_c_add_dyn);
+ hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_c_add_ofs);
+ hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);
+ hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].ttl_wr);
+ hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);
+ hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);
+ hfu_nthw_rcp_csinf(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].cs_inf);
+ hfu_nthw_rcp_l3prt(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].l3_prt);
+ hfu_nthw_rcp_l3frag(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].l3_frag);
+ hfu_nthw_rcp_tunnel(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].tunnel);
+ hfu_nthw_rcp_l4prt(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].l4_prt);
+ hfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].outer_l3_ofs);
+ hfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].outer_l4_ofs);
+ hfu_nthw_rcp_il3ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].inner_l3_ofs);
+ hfu_nthw_rcp_il4ofs(be->p_hfu_nthw,
+ hfu->v1.hfu_rcp[index + i].inner_l4_ofs);
+ hfu_nthw_rcp_flush(be->p_hfu_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
+ return 0;
+}
+
+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,
+ int index, int cnt)
+{
+ struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
+
+ _CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
+
+ if (csu->ver >= 1) {
+ csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
+ for (int i = 0; i < cnt; i++) {
+ csu_nthw_rcp_select(be->p_csu_nthw, index + i);
+ csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
+ csu->v1.csu_rcp[index + i].ol3_cmd);
+ csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
+ csu->v1.csu_rcp[index + i].ol4_cmd);
+ csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
+ csu->v1.csu_rcp[index + i].il3_cmd);
+ csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
+ csu->v1.csu_rcp[index + i].il4_cmd);
+ csu_nthw_rcp_flush(be->p_csu_nthw);
+ }
+ }
+
+ _CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
+ return 0;
+}
+
+/*
+ * ***************** DBS *******************
+ */
+
+static int alloc_rx_queue(void *be_dev, int queue_id)
+{
+ (void)be_dev;
+ (void)queue_id;
+ printf("ERROR alloc Rx queue\n");
+ return -1;
+}
+
+static int free_rx_queue(void *be_dev, int hw_queue)
+{
+ (void)be_dev;
+ (void)hw_queue;
+ printf("ERROR free Rx queue\n");
+ return 0;
+}
+
+const struct flow_api_backend_ops flow_be_iface = {
+ 1,
+
+ set_debug_mode,
+ get_nb_phy_ports,
+ get_nb_rx_ports,
+ get_ltx_avail,
+ get_nb_cat_funcs,
+ get_nb_categories,
+ get_nb_cat_km_if_cnt,
+ get_nb_cat_km_if_m0,
+ get_nb_cat_km_if_m1,
+ get_nb_queues,
+ get_nb_km_flow_types,
+ get_nb_pm_ext,
+ get_nb_len,
+ get_kcc_size,
+ get_kcc_banks,
+ get_nb_km_categories,
+ get_nb_km_cam_banks,
+ get_nb_km_cam_record_words,
+ get_nb_km_cam_records,
+ get_nb_km_tcam_banks,
+ get_nb_km_tcam_bank_width,
+ get_nb_flm_categories,
+ get_nb_flm_size_mb,
+ get_nb_flm_entry_size,
+ get_nb_flm_variant,
+ get_nb_flm_prios,
+ get_nb_flm_pst_profiles,
+ get_nb_hst_categories,
+ get_nb_qsl_categories,
+ get_nb_qsl_qst_entries,
+ get_nb_pdb_categories,
+ get_nb_ioa_categories,
+ get_nb_roa_categories,
+ get_nb_tpe_categories,
+ get_nb_tx_cpy_writers,
+ get_nb_tx_cpy_mask_mem,
+ get_nb_tx_rpl_depth,
+ get_nb_tx_rpl_ext_categories,
+ get_nb_tpe_ifr_categories,
+
+ alloc_rx_queue,
+ free_rx_queue,
+
+ cat_get_present,
+ cat_get_version,
+ cat_cfn_flush,
+
+ cat_kce_flush,
+ cat_kcs_flush,
+ cat_fte_flush,
+
+ cat_cte_flush,
+ cat_cts_flush,
+ cat_cot_flush,
+ cat_cct_flush,
+ cat_exo_flush,
+ cat_rck_flush,
+ cat_len_flush,
+ cat_kcc_flush,
+ cat_cce_flush,
+ cat_ccs_flush,
+
+ km_get_present,
+ km_get_version,
+ km_rcp_flush,
+ km_cam_flush,
+ km_tcam_flush,
+ km_tci_flush,
+ km_tcq_flush,
+
+ flm_get_present,
+ flm_get_version,
+ flm_control_flush,
+ flm_status_flush,
+ flm_status_update,
+ flm_timeout_flush,
+ flm_scrub_flush,
+ flm_load_bin_flush,
+ flm_load_pps_flush,
+ flm_load_lps_flush,
+ flm_load_aps_flush,
+ flm_prio_flush,
+ flm_pst_flush,
+ flm_rcp_flush,
+ flm_buf_ctrl_update,
+ flm_stat_update,
+ flm_lrn_data_flush,
+ flm_inf_data_update,
+ flm_sta_data_update,
+
+ hsh_get_present,
+ hsh_get_version,
+ hsh_rcp_flush,
+
+ hst_get_present,
+ hst_get_version,
+ hst_rcp_flush,
+
+ qsl_get_present,
+ qsl_get_version,
+ qsl_rcp_flush,
+ qsl_qst_flush,
+ qsl_qen_flush,
+ qsl_unmq_flush,
+
+ slc_get_present,
+ slc_get_version,
+ slc_rcp_flush,
+
+ slc_lr_get_present,
+ slc_lr_get_version,
+ slc_lr_rcp_flush,
+
+ pdb_get_present,
+ pdb_get_version,
+ pdb_rcp_flush,
+ pdb_config_flush,
+
+ ioa_get_present,
+ ioa_get_version,
+ ioa_rcp_flush,
+ ioa_special_tpid_flush,
+ ioa_roa_epp_flush,
+
+ roa_get_present,
+ roa_get_version,
+ roa_tunhdr_flush,
+ roa_tuncfg_flush,
+ roa_config_flush,
+ roa_lagcfg_flush,
+
+ rmc_get_present,
+ rmc_get_version,
+ rmc_ctrl_flush,
+
+ tpe_get_present,
+ tpe_get_version,
+ tpe_rpp_rcp_flush,
+ tpe_rpp_ifr_rcp_flush,
+ tpe_ifr_rcp_flush,
+ tpe_ins_rcp_flush,
+ tpe_rpl_rcp_flush,
+ tpe_rpl_ext_flush,
+ tpe_rpl_rpl_flush,
+ tpe_cpy_rcp_flush,
+ tpe_hfu_rcp_flush,
+ tpe_csu_rcp_flush,
+};
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+ void **dev)
+{
+ uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
+
+ struct info_nthw *pinfonthw = info_nthw_new();
+
+ info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
+
+ /* Init nthw CAT */
+ if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct cat_nthw *pcatnthw = cat_nthw_new();
+
+ cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
+ } else {
+ be_devs[physical_adapter_no].p_cat_nthw = NULL;
+ }
+ /* Init nthw KM */
+ if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct km_nthw *pkmnthw = km_nthw_new();
+
+ km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
+ } else {
+ be_devs[physical_adapter_no].p_km_nthw = NULL;
+ }
+ /* Init nthw FLM */
+ if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct flm_nthw *pflmnthw = flm_nthw_new();
+
+ flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
+ } else {
+ be_devs[physical_adapter_no].p_flm_nthw = NULL;
+ }
+ /* Init nthw IFR */
+ if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct ifr_nthw *ifrnthw = ifr_nthw_new();
+
+ ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
+ } else {
+ be_devs[physical_adapter_no].p_ifr_nthw = NULL;
+ }
+ /* Init nthw HSH */
+ if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct hsh_nthw *phshnthw = hsh_nthw_new();
+
+ hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
+ } else {
+ be_devs[physical_adapter_no].p_hsh_nthw = NULL;
+ }
+ /* Init nthw HST */
+ if (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct hst_nthw *phstnthw = hst_nthw_new();
+
+ hst_nthw_init(phstnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_hst_nthw = phstnthw;
+ } else {
+ be_devs[physical_adapter_no].p_hst_nthw = NULL;
+ }
+ /* Init nthw QSL */
+ if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct qsl_nthw *pqslnthw = qsl_nthw_new();
+
+ qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
+ } else {
+ be_devs[physical_adapter_no].p_qsl_nthw = NULL;
+ }
+ /* Init nthw SLC */
+ if (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct slc_nthw *pslcnthw = slc_nthw_new();
+
+ slc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_slc_nthw = pslcnthw;
+ } else {
+ be_devs[physical_adapter_no].p_slc_nthw = NULL;
+ }
+ /* Init nthw SLC LR */
+ if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
+
+ slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
+ } else {
+ be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
+ }
+ /* Init nthw PDB */
+ if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct pdb_nthw *ppdbnthw = pdb_nthw_new();
+
+ pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
+ } else {
+ be_devs[physical_adapter_no].p_pdb_nthw = NULL;
+ }
+ /* Init nthw IOA */
+ if (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct ioa_nthw *pioanthw = ioa_nthw_new();
+
+ ioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_ioa_nthw = pioanthw;
+ } else {
+ be_devs[physical_adapter_no].p_ioa_nthw = NULL;
+ }
+ /* Init nthw ROA */
+ if (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct roa_nthw *proanthw = roa_nthw_new();
+
+ roa_nthw_init(proanthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_roa_nthw = proanthw;
+ } else {
+ be_devs[physical_adapter_no].p_roa_nthw = NULL;
+ }
+ /* Init nthw RMC */
+ if (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct rmc_nthw *prmcnthw = rmc_nthw_new();
+
+ rmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;
+ } else {
+ be_devs[physical_adapter_no].p_rmc_nthw = NULL;
+ }
+ /* Init nthw HFU */
+ if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct hfu_nthw *ptr = hfu_nthw_new();
+
+ hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_hfu_nthw = ptr;
+ } else {
+ be_devs[physical_adapter_no].p_hfu_nthw = NULL;
+ }
+ /* Init nthw RPP_LR */
+ if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
+
+ rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
+ } else {
+ be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
+ }
+ /* Init nthw TX_CPY */
+ if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
+
+ tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
+ } else {
+ be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
+ }
+ /* Init nthw CSU */
+ if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct csu_nthw *ptr = csu_nthw_new();
+
+ csu_nthw_init(ptr, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_csu_nthw = ptr;
+ } else {
+ be_devs[physical_adapter_no].p_csu_nthw = NULL;
+ }
+ /* Init nthw TX_INS */
+ if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct tx_ins_nthw *ptr = tx_ins_nthw_new();
+
+ tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
+ } else {
+ be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
+ }
+ /* Init nthw TX_RPL */
+ if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
+ struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
+
+ tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
+ be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
+ } else {
+ be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
+ }
+ be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
+ *dev = (void *)&be_devs[physical_adapter_no];
+
+ return &flow_be_iface;
+}
+
+void bin_flow_backend_done(void *dev)
+{
+ struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
+
+ info_nthw_delete(be_dev->p_info_nthw);
+ cat_nthw_delete(be_dev->p_cat_nthw);
+ km_nthw_delete(be_dev->p_km_nthw);
+ flm_nthw_delete(be_dev->p_flm_nthw);
+ hsh_nthw_delete(be_dev->p_hsh_nthw);
+ hst_nthw_delete(be_dev->p_hst_nthw);
+ qsl_nthw_delete(be_dev->p_qsl_nthw);
+ slc_nthw_delete(be_dev->p_slc_nthw);
+ slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
+ pdb_nthw_delete(be_dev->p_pdb_nthw);
+ ioa_nthw_delete(be_dev->p_ioa_nthw);
+ roa_nthw_delete(be_dev->p_roa_nthw);
+ rmc_nthw_delete(be_dev->p_rmc_nthw);
+ csu_nthw_delete(be_dev->p_csu_nthw);
+ hfu_nthw_delete(be_dev->p_hfu_nthw);
+ rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
+ tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
+ tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
+ tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
+}
new file mode 100644
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_BACKEND_H__
+#define __FLOW_BACKEND_H__
+
+#include <stdint.h> /* uint8_t */
+#include "nthw_fpga_model.h"
+
+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,
+ void **be_dev);
+void bin_flow_backend_done(void *be_dev);
+
+#endif /* __FLOW_BACKEND_H__ */
new file mode 100644
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "nthw_drv.h"
+#include "flow_filter.h"
+#include "flow_api_backend.h"
+#include "flow_backend.h"
+#include "flow_api_nic_setup.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+ int adapter_no)
+{
+ void *be_dev = NULL;
+ struct flow_nic_dev *flow_nic;
+
+ NT_LOG(DBG, FILTER, "Initializing flow filter api\n");
+ const struct flow_api_backend_ops *iface =
+ bin_flow_backend_init(p_fpga, &be_dev);
+
+ flow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);
+ if (!flow_nic) {
+ *p_flow_device = NULL;
+ return -1;
+ }
+ *p_flow_device = flow_nic;
+ return 0;
+}
+
+int flow_filter_done(struct flow_nic_dev *dev)
+{
+ void *be_dev = flow_api_get_be_dev(dev);
+
+ int res = flow_api_done(dev);
+
+ if (be_dev)
+ bin_flow_backend_done(be_dev);
+ return res;
+}
new file mode 100644
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_FILTER_HPP__
+#define __FLOW_FILTER_HPP__
+#undef USE_OPAE
+
+#include "nthw_fpga_model.h"
+#include "flow_api.h"
+
+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,
+ int adapter_no);
+int flow_filter_done(struct flow_nic_dev *dev);
+
+#endif /* __FLOW_FILTER_HPP__ */