@@ -18,6 +18,7 @@ includes = [
include_directories('nthw'),
include_directories('nthw/core'),
include_directories('nthw/supported'),
+ include_directories('nthw/flow_api'),
include_directories('nthw/flow_filter'),
include_directories('sensors'),
include_directories('sensors/avr_sensors'),
@@ -60,6 +61,26 @@ sources = files(
'nthw/core/nthw_spim.c',
'nthw/core/nthw_spis.c',
'nthw/core/nthw_tsm.c',
+ 'nthw/flow_api/flow_api_actions.c',
+ 'nthw/flow_api/flow_api_backend.c',
+ 'nthw/flow_api/flow_engine/flow_group.c',
+ 'nthw/flow_api/flow_engine/flow_hasher.c',
+ 'nthw/flow_api/flow_engine/flow_kcc.c',
+ 'nthw/flow_api/flow_engine/flow_km.c',
+ 'nthw/flow_api/flow_engine/flow_tunnel.c',
+ 'nthw/flow_api/hw_mod/hw_mod_cat.c',
+ 'nthw/flow_api/hw_mod/hw_mod_flm.c',
+ 'nthw/flow_api/hw_mod/hw_mod_hsh.c',
+ 'nthw/flow_api/hw_mod/hw_mod_hst.c',
+ 'nthw/flow_api/hw_mod/hw_mod_ioa.c',
+ 'nthw/flow_api/hw_mod/hw_mod_km.c',
+ 'nthw/flow_api/hw_mod/hw_mod_pdb.c',
+ 'nthw/flow_api/hw_mod/hw_mod_qsl.c',
+ 'nthw/flow_api/hw_mod/hw_mod_rmc.c',
+ 'nthw/flow_api/hw_mod/hw_mod_roa.c',
+ 'nthw/flow_api/hw_mod/hw_mod_slc.c',
+ 'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',
+ 'nthw/flow_api/hw_mod/hw_mod_tpe.c',
'nthw/flow_filter/flow_nthw_cat.c',
'nthw/flow_filter/flow_nthw_csu.c',
'nthw/flow_filter/flow_nthw_flm.c',
new file mode 100644
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+
+#include "ntlog.h"
+
+#include "stream_binary_flow_api.h"
+#include "flow_api_actions.h"
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+ struct tunnel_header_s *tun)
+{
+ int err = 0;
+ int num_writes = (tun->ip_version == 4) ? 4 : 8;
+
+ /*
+ * Write 4 * 4 words = 64 bytes (IPv4) or 8 * 4 words = 128 bytes (IPv6)
+ */
+ for (int i = 0; (i < num_writes) && !err; i++) {
+ for (int ii = 0; (ii < 4) && !err; ii++) {
+ /* must write each 4 words backwards! */
+ err |= hw_mod_roa_tunhdr_set(be, HW_ROA_TUNHDR,
+ index, i * 4 + ii,
+ ntohl(tun->d.hdr32[(i + 1) * 4 - ii - 1]));
+ }
+ }
+
+ return err;
+}
+
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+ uint64_t color_actions)
+{
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PRESET_ALL, index, 0);
+ /*
+ * If tunnel header specified
+ */
+ int tun_len = get_roa_tunhdr_len(color_actions);
+
+ if (tun_len) {
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_LEN, index,
+ tun_len);
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TUN_TYPE, index,
+ roa_get_tun_type(color_actions));
+
+ /* set the total tunnel IP header length */
+ if (get_roa_tun_ip_type(color_actions) == 1) {
+ /* IPv6 */
+ if ((size_t)tun_len > (sizeof(struct flow_elem_eth) +
+ sizeof(struct flow_elem_ipv6))) {
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD, index, 1);
+ /* tunnel header length excludes the IPv6 header itself */
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+ (uint32_t)(tun_len -
+ (sizeof(struct flow_elem_eth) +
+ sizeof(struct flow_elem_ipv6))));
+ }
+ } else {
+ /* IPv4 */
+ if ((size_t)tun_len > sizeof(struct flow_elem_eth)) {
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_UPD,
+ index, 1);
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPTL_PRECALC, index,
+ (uint32_t)(tun_len -
+ sizeof(struct flow_elem_eth)));
+ }
+ }
+
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IP_TYPE, index,
+ get_roa_tun_ip_type(color_actions));
+
+ if (get_roa_tun_ip_type(color_actions) == 1) {
+ /* IPv6 - Do not update the IP checksum in the tunnel header */
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD, index,
+ 0);
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+ index, 0);
+ } else {
+ /* IPv4 */
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_UPD,
+ index, 1);
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_IPCS_PRECALC,
+ index,
+ get_roa_tun_ip_csum(color_actions));
+ }
+
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+ index, 1);
+
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_PUSH_TUNNEL, index, 1);
+ }
+
+ /* bypass must be > 0 or recirculate_port >= 0 - bypass wins */
+ uint8_t recirculate_bypass = roa_get_recirc_bypass_port(color_actions);
+
+ if (recirculate_bypass) {
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+ recirculate_bypass);
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE, index, 1);
+
+ } else {
+ int32_t recirculate_port = roa_get_recirc_port(color_actions);
+
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS, index,
+ 255);
+
+ if (recirculate_port >= 0) {
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_PORT,
+ index, recirculate_port);
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+ index, 1);
+ }
+ }
+
+ uint8_t tx = roa_get_tx(color_actions);
+
+ if (tx) {
+ if (tx == DESTINATION_TX_PHY0) {
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+ index, ROA_TX_PHY0);
+ } else if (tx == DESTINATION_TX_PHY1) {
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+ index, ROA_TX_PHY1);
+ } else if (tx == (DESTINATION_TX_PHY0 | DESTINATION_TX_PHY1)) {
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_TX_LAG_IX,
+ index, ROA_TX_PHY0);
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRC_BYPASS,
+ index, 0x81); /* port 1 - only port left */
+ hw_mod_roa_tuncfg_set(be, HW_ROA_TUNCFG_RECIRCULATE,
+ index, 1);
+
+ } else {
+ return -1; /* ERR */
+ }
+ }
+
+ /*
+ * Special IOA memory that contains ROA information - bad FPGA design
+ */
+ if (tx || tun_len) {
+ if (be->ioa.ver > 3 && tun_len &&
+ get_roa_tun_ip_type(color_actions) == 1) {
+ /* IPv6 VxLAN tunnel. Select EPP recipe 2 */
+ hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+ index, 2);
+ } else {
+ /* IPv4 VxLAN tunnel or no tunnel (select recipe 1 or 0) */
+ hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_PUSH_TUNNEL,
+ index, !!tun_len);
+ }
+ hw_mod_ioa_roa_epp_set(be, HW_IOA_ROA_EPP_TX_PORT, index, tx);
+ }
+
+ return 0;
+}
+
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+ uint64_t color_actions)
+{
+ if (color_actions & ioa_set_vxlan_pop(0)) {
+ hw_mod_ioa_rcp_set(be, HW_IOA_RCP_TUNNEL_POP, index, 1);
+ NT_LOG(DBG, FILTER, "Pop outer Tunnel (Vxlan)\n");
+ }
+
+ if (color_actions & ioa_set_vlan_pop(0)) {
+ hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_POP, index, 1);
+ NT_LOG(DBG, FILTER, "Pop outer Vlan\n");
+ }
+
+ int tpid_sel = ioa_get_tpid_sel(color_actions);
+
+ if (color_actions & ioa_set_vlan_push(0, 0)) {
+ uint16_t tci = ioa_get_vlan_tci(color_actions);
+
+ NT_LOG(DBG, FILTER, "Push Vlan with TPID/TCI %04x/%04x\n",
+ tpid_sel ? 0x88a8 : 0x8100, tci);
+ hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_VID, index,
+ tci & 0x0FFF);
+ hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_DEI, index,
+ (tci >> 12) & 0x1);
+ hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PCP, index,
+ (tci >> 13) & 0x7);
+ hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_PUSH, index, 1);
+ }
+
+ int queue = ioa_get_queue(color_actions);
+
+ if (queue >= 0) {
+ hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_OVERRIDE_EN, index, 1);
+ hw_mod_ioa_rcp_set(be, HW_IOA_RCP_QUEUE_ID, index, queue);
+ }
+
+ hw_mod_ioa_rcp_set(be, HW_IOA_RCP_VLAN_TPID_SEL, index, tpid_sel);
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_ACTIONS_H_
+#define _FLOW_ACTIONS_H_
+
+struct flow_api_backend_s;
+struct tunnel_header_s;
+
+#define MAX_COLOR_FLOW_STATS 0x400
+
+#define ROA_RECIRC_BYPASS_PHY_OFFSET 0x80
+#define MAX_REPLICATION_PORTS 2
+
+enum {
+ DESTINATION_TX_NONE = 0,
+ DESTINATION_TX_PHY0 = 1,
+ DESTINATION_TX_PHY1 = 2
+};
+
+enum { TUN_IPV4 = 0, TUN_IPV6 };
+
+enum {
+ VLAN_TPID_802_1Q = 0,
+ VLAN_TPID_802_1AD,
+ VLAN_TPID_CUSTOM_0,
+ VLAN_TPID_CUSTOM_1
+};
+
+enum { ROA_TX_NO_RETRANSMIT = 0, ROA_TX_PHY0, ROA_TX_PHY1, ROA_TX_RESERVED };
+
+/*
+ * before version 6 of QSL
+ */
+#if (MAX_COLOR_FLOW_STATS == 0x4000)
+#define MAX_HW_FLOW_STATS_OLD 0x3fff
+
+#else
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+#define MAX_HW_FLOW_STATS_OLD 0x03ff
+#else
+#error *** Unsupported number of color statistics counter ***
+#endif
+#endif
+
+/*
+ * OLD behavior substituted from 4.1+
+ *
+ * 13:0 Mark (color) 16384 flow stats
+ * 21:14 IOA index 256 entries
+ * 29:22 ROA index 256 entries
+ * 31:30 1 to indicate this layout
+ * or
+ * 9:0 Mark (color) 1024 flow stats
+ * 19:10 IOA index 1024 entries
+ * 29:20 ROA index 1024 entries
+ * 31:30 0 to indicate this layout
+ */
+static inline uint32_t set_color_action_old(uint32_t color, uint32_t ioa_rcp,
+ uint32_t roa_rcp)
+{
+#if (MAX_COLOR_FLOW_STATS == 0x400)
+ uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+ ((ioa_rcp & 0x3ff) << 10) |
+ ((roa_rcp & 0x3ff) << 20) | (0 << 30);
+#else
+ uint32_t color_action = (color & MAX_HW_FLOW_STATS_OLD) |
+ ((ioa_rcp & 0xff) << 14) |
+ ((roa_rcp & 0xff) << 22) | (1 << 30);
+#endif
+ return color_action;
+}
+
+#define BITMASK(a, b) ((1U << ((a) - (b) + 1)) - 1)
+
+/*
+ * 9:0 Mark (color) 1024 flow stats
+ * 17:10 IOA index 256 entries
+ * 25:18 ROA index 256 entries
+ * 30:26 QSL and HSH 32 recipes indexable
+ * 31:31 CAO implicitly when color_action is set
+ */
+#define FLOW_MARK_MASK BITMASK(9, 0)
+#define IOA_RCP_MASK BITMASK(17, 10)
+#define ROA_RCP_MASK BITMASK(25, 18)
+#define QSL_HSH_MASK BITMASK(30, 26)
+
+static inline uint32_t set_color_action(uint32_t mark, uint32_t ioa_rcp,
+ uint32_t roa_rcp, uint32_t qsl_hsh)
+{
+ uint32_t color_action = (mark & FLOW_MARK_MASK) |
+ ((ioa_rcp & IOA_RCP_MASK) << 10) |
+ ((roa_rcp & ROA_RCP_MASK) << 18) |
+ ((qsl_hsh & QSL_HSH_MASK) << 26) | (1 << 31);
+ return color_action;
+}
+
+/*
+ * This is a bitmask representation in SW for
+ * roa config settings. It is mostly done for
+ * effective cache matching
+ *
+ * ROA config bit offs bits
+ * ----------------------------
+ * recirc port 7:0 8 -> uses hbx > 0
+ * recirc bypass 15:8 8 -> uses hbx > 0 if set, will override
+ * tunnel type 19:16 4
+ * tx port 23:20 4 -> txport + 1
+ * tun_ip_type 24:24 1
+ * recirculate 25:25 1 -> recirculate port set
+ * tunhdr_len 33:26 8 -> tunnel header length - 0 if none
+ * ip_csum_prec 49:34 16 -> tunnel ip header checksum pre-calculated
+ * new_recirc_port 50:50 1 -> indication of a new port for recirculate has been allocated.
+ * Needs default queue
+ */
+
+static inline uint64_t set_roa_new_recirc_port(uint64_t actions)
+{
+ actions |= 1ULL << 50;
+ return actions;
+}
+
+static inline uint8_t get_roa_new_recirc_port(uint64_t actions)
+{
+ return (uint8_t)((actions >> 50) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_type(uint64_t actions, uint8_t ip_type)
+{
+ actions |= (uint64_t)(ip_type & 1) << 24;
+ return actions;
+}
+
+static inline uint8_t get_roa_tun_ip_type(uint64_t actions)
+{
+ return (uint8_t)((actions >> 24) & 1);
+}
+
+static inline uint64_t set_roa_tun_ip_csum(uint64_t actions, uint16_t csum)
+{
+ actions |= (uint64_t)csum << 34;
+ return actions;
+}
+
+static inline uint16_t get_roa_tun_ip_csum(uint64_t actions)
+{
+ return (uint16_t)((actions >> 34) & 0xffff);
+}
+
+static inline uint64_t set_roa_tunhdr_len(uint64_t actions, uint8_t length)
+{
+ actions |= (uint64_t)length << 26;
+ return actions;
+}
+
+static inline uint8_t get_roa_tunhdr_len(uint64_t actions)
+{
+ return (uint8_t)((actions >> 26) & 0xff);
+}
+
+static inline uint64_t set_roa_tx(uint64_t actions, uint8_t txport)
+{
+ actions |= ((txport + ROA_TX_PHY0) & 0x0f) << 20;
+ return actions;
+}
+
+static inline uint8_t roa_get_tx(uint64_t actions)
+{
+ return (actions >> 20) & 0x0f;
+}
+
+static inline uint64_t set_roa_tun_type(uint64_t actions, uint8_t type)
+{
+ actions |= (type & 0x0f) << 16;
+ return actions;
+}
+
+static inline uint8_t roa_get_tun_type(uint64_t actions)
+{
+ return (actions >> 16) & 0x0f;
+}
+
+static inline uint64_t set_roa_recirculate(uint64_t actions, uint8_t port)
+{
+ actions |= (1ULL << 25) | port;
+ return actions;
+}
+
+static inline int32_t roa_get_recirc_port(uint64_t actions)
+{
+ if (!((1ULL << 25) & actions))
+ return -1;
+ return (actions & 0xff);
+}
+
+static inline uint64_t set_roa_recirc_bypass(uint64_t actions, uint8_t port)
+{
+ actions |= ((uint64_t)port & 0xff) << 8;
+ return actions;
+}
+
+static inline uint8_t roa_get_recirc_bypass_port(uint64_t actions)
+{
+ return ((actions >> 8) & 0xff);
+}
+
+/*
+ * This is a bitmask representation in SW for
+ * ioa action settings. It is mostly done for
+ * effective cache matching
+ *
+ * IOA action bit offs bits
+ * --------------------------------
+ * tci 15:0 16
+ * queue 23:16 8 uses hbx
+ * tpid select 27:24 4
+ * pop vxlan 28 1
+ * pop vlan 29 1
+ * push vlan 30 1
+ * queue override 31 1
+ */
+
+static inline uint64_t ioa_set_queue(uint64_t actions, uint8_t hb)
+{
+ actions |= (1 << 31) | ((uint64_t)hb << 16);
+ return actions;
+}
+
+static inline int ioa_get_queue(uint64_t actions)
+{
+ if (!(actions & (1 << 31)))
+ return -1;
+ return ((actions >> 16) & 0xff);
+}
+
+static inline uint64_t ioa_set_vxlan_pop(uint64_t actions)
+{
+ actions |= 1 << 28;
+ return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pop(uint64_t actions)
+{
+ actions |= 1 << 29;
+ return actions;
+}
+
+static inline uint64_t ioa_set_vlan_push_qinq(uint64_t actions)
+{
+ actions |= (VLAN_TPID_802_1AD & 0x0f) << 24;
+ return actions;
+}
+
+static inline uint8_t ioa_get_tpid_sel(uint64_t actions)
+{
+ return (uint8_t)((actions >> 24) & 0x0f);
+}
+
+static inline uint64_t ioa_set_vlan_push(uint64_t actions, uint16_t tci)
+{
+ actions |= (1 << 30) | tci;
+ return actions;
+}
+
+static inline uint64_t ioa_set_vlan_pcp(uint64_t actions, uint8_t pcp)
+{
+ actions |= (1 << 30) | ((uint16_t)(pcp & 7) << 13);
+ return actions;
+}
+
+static inline uint16_t ioa_get_vlan_tci(uint64_t actions)
+{
+ return (uint16_t)(actions & 0xffff);
+}
+
+int flow_actions_create_roa_tunhdr(struct flow_api_backend_s *be, int index,
+ struct tunnel_header_s *tun);
+int flow_actions_create_roa_tuncfg(struct flow_api_backend_s *be, int index,
+ uint64_t color_actions);
+int flow_actions_create_ioa_config(struct flow_api_backend_s *be, int index,
+ uint64_t color_actions);
+
+#endif /* _FLOW_ACTIONS_H_ */
new file mode 100644
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <assert.h>
+
+#include "flow_api_backend.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct {
+ const char *name;
+ int (*allocate)(struct flow_api_backend_s *be);
+ void (*free)(struct flow_api_backend_s *be);
+ int (*reset)(struct flow_api_backend_s *be);
+ bool (*present)(struct flow_api_backend_s *be);
+} module[] = {
+ { "CAT", hw_mod_cat_alloc, hw_mod_cat_free, hw_mod_cat_reset,
+ hw_mod_cat_present
+ },
+ { "KM", hw_mod_km_alloc, hw_mod_km_free, hw_mod_km_reset,
+ hw_mod_km_present
+ },
+ { "FLM", hw_mod_flm_alloc, hw_mod_flm_free, hw_mod_flm_reset,
+ hw_mod_flm_present
+ },
+ { "HSH", hw_mod_hsh_alloc, hw_mod_hsh_free, hw_mod_hsh_reset,
+ hw_mod_hsh_present
+ },
+ { "HST", hw_mod_hst_alloc, hw_mod_hst_free, hw_mod_hst_reset,
+ hw_mod_hst_present
+ },
+ { "QSL", hw_mod_qsl_alloc, hw_mod_qsl_free, hw_mod_qsl_reset,
+ hw_mod_qsl_present
+ },
+ { "SLC", hw_mod_slc_alloc, hw_mod_slc_free, hw_mod_slc_reset,
+ hw_mod_slc_present
+ },
+ { "SLC LR", hw_mod_slc_lr_alloc, hw_mod_slc_lr_free,
+ hw_mod_slc_lr_reset, hw_mod_slc_lr_present
+ },
+ { "PDB", hw_mod_pdb_alloc, hw_mod_pdb_free, hw_mod_pdb_reset,
+ hw_mod_pdb_present
+ },
+ { "IOA", hw_mod_ioa_alloc, hw_mod_ioa_free, hw_mod_ioa_reset,
+ hw_mod_ioa_present
+ },
+ { "ROA", hw_mod_roa_alloc, hw_mod_roa_free, hw_mod_roa_reset,
+ hw_mod_roa_present
+ },
+ { "RMC", hw_mod_rmc_alloc, hw_mod_rmc_free, hw_mod_rmc_reset,
+ hw_mod_rmc_present
+ },
+ { "TPE", hw_mod_tpe_alloc, hw_mod_tpe_free, hw_mod_tpe_reset,
+ hw_mod_tpe_present
+ },
+};
+
+#define MOD_COUNT (ARRAY_SIZE(module))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...)
+{
+#define MAX_SETS 38
+ void *base = NULL;
+ void **plist[MAX_SETS];
+ int len[MAX_SETS];
+ int offs[MAX_SETS];
+ unsigned int total_bytes = 0;
+ int cnt, elem_size;
+
+ assert(sets <= MAX_SETS);
+ assert(sets > 0);
+
+ va_list args;
+
+ va_start(args, sets);
+
+ for (int i = 0; i < sets; i++) {
+ plist[i] = va_arg(args, void *);
+ cnt = va_arg(args, int);
+ elem_size = va_arg(args, int);
+ offs[i] = EXTRA_INDEXES * elem_size;
+ len[i] = offs[i] + cnt * elem_size;
+ total_bytes += len[i];
+ }
+ base = calloc(1, total_bytes);
+ if (base) {
+ char *p_b = (char *)base;
+
+ for (int i = 0; i < sets; i++) {
+ (*plist[i]) = (void *)((char *)p_b + offs[i]);
+ p_b += len[i];
+ }
+ } else {
+ NT_LOG(ERR, FILTER, "ERROR: module memory allocation failed\n");
+ }
+
+ va_end(args);
+
+ mod->base = base;
+ mod->allocated_size = total_bytes;
+
+ return base;
+}
+
+void zero_module_cache(struct common_func_s *mod)
+{
+ memset(mod->base, 0, mod->allocated_size);
+}
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+ const struct flow_api_backend_ops *iface,
+ void *be_dev)
+{
+ assert(dev);
+ dev->iface = iface;
+ dev->be_dev = be_dev;
+ dev->num_phy_ports = iface->get_nb_phy_port(be_dev);
+ dev->num_rx_ports = iface->get_nb_rx_port(be_dev);
+ dev->max_categories = iface->get_nb_categories(be_dev);
+ dev->max_queues = iface->get_nb_queues(be_dev);
+
+ NT_LOG(DBG, FILTER,
+ "*************** FLOW REGISTER MODULES AND INITIALIZE - SET ALL TO DEFAULT *****************\n");
+ /*
+ * Create Cache and SW, version independent, NIC module representation
+ */
+ for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+ if (!module[mod].present(dev))
+ continue;
+ if (module[mod].allocate(dev) == 0 &&
+ module[mod].reset(dev) == 0) {
+ /* OK */
+ continue;
+ } else {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Initialization of NIC module failed : [ %s ]\n",
+ module[mod].name);
+ flow_api_backend_done(dev);
+ NT_LOG(ERR, FILTER,
+ "*************** Failed to create Binary Flow API *******************\n");
+ NT_LOG(ERR, FILTER,
+ "******** ERROR ERROR: Binary Flow API will not be available ********\n");
+ NT_LOG(ERR, FILTER,
+ "********************************************************************\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int flow_api_backend_reset(struct flow_api_backend_s *dev)
+{
+ assert(dev);
+
+ for (unsigned int mod = 0; mod < MOD_COUNT; mod++) {
+ if (module[mod].reset(dev) == 0) {
+ /* OK */
+ continue;
+ } else {
+ NT_LOG(ERR, FILTER,
+ "ERROR: Resetting NIC module failed : [ %s ]\n",
+ module[mod].name);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int flow_api_backend_done(struct flow_api_backend_s *dev)
+{
+ for (unsigned int mod = 0; mod < MOD_COUNT; mod++)
+ module[mod].free(dev);
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,1818 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_BACKEND_H__
+#define __FLOW_API_BACKEND_H__
+
+/*
+ * Flow API
+ * Direct access to NIC HW module memory and register fields in a
+ * module version independent representation
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "ntlog.h"
+
+/* supported module versions */
+#include "../flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_cat_v18.h"
+#include "flow_api/hw_mod/hw_mod_cat_v21.h"
+#include "flow_api/hw_mod/hw_mod_cat_v22.h"
+#include "flow_api/hw_mod/hw_mod_flm_v17.h"
+#include "flow_api/hw_mod/hw_mod_flm_v20.h"
+#include "flow_api/hw_mod/hw_mod_hst_v2.h"
+#include "flow_api/hw_mod/hw_mod_km_v7.h"
+#include "flow_api/hw_mod/hw_mod_qsl_v7.h"
+#include "flow_api/hw_mod/hw_mod_pdb_v9.h"
+#include "flow_api/hw_mod/hw_mod_slc_v1.h"
+#include "flow_api/hw_mod/hw_mod_slc_lr_v2.h"
+#include "flow_api/hw_mod/hw_mod_roa_v6.h"
+#include "flow_api/hw_mod/hw_mod_hsh_v5.h"
+#include "flow_api/hw_mod/hw_mod_ioa_v4.h"
+#include "flow_api/hw_mod/hw_mod_rmc_v1_3.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v1.h"
+#include "flow_api/hw_mod/hw_mod_tpe_v2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PHYS_ADAPTERS 8
+
+#define VER_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define VER_MINOR(ver) ((ver) & 0xffff)
+
+struct flow_api_backend_s;
+struct common_func_s;
+
+#define CAST_COMMON(mod) ((struct common_func_s *)(mod))
+
+void *callocate_mod(struct common_func_s *mod, int sets, ...);
+void zero_module_cache(struct common_func_s *mod);
+
+#define ZERO_MOD_CACHE(mod) (zero_module_cache(CAST_COMMON(mod)))
+
+#define ALL_ENTRIES -1000
+#define ALL_BANK_ENTRIES -1001
+
+static inline int error_index_too_large(const char *func)
+{
+ NT_LOG(INF, FILTER, "ERROR:%s: Index too large\n", func);
+ return -2;
+}
+
+static inline int error_word_off_too_large(const char *func)
+{
+ NT_LOG(INF, FILTER, "ERROR:%s: Word offset too large\n", func);
+ return -3;
+}
+
+static inline int error_unsup_ver(const char *func, const char *mod, int ver)
+{
+ NT_LOG(INF, FILTER, "ERROR:%s: Unsupported NIC module: %s ver %i.%i\n",
+ func, mod, VER_MAJOR(ver), VER_MINOR(ver));
+ return -4;
+}
+
+static inline int error_unsup_field(const char *func)
+{
+ NT_LOG(INF, FILTER, "ERROR:%s: Unsupported field in NIC module\n",
+ func);
+ return -5;
+}
+
+static inline int error_resource_count(const char *func, const char *resource,
+ const char *mod, int ver)
+{
+ NT_LOG(INF, FILTER,
+ "ERROR:%s: Insufficient resource [ %s ] : NIC module:"
+ "%s ver %i.%i\n",
+ func, resource, mod, VER_MAJOR(ver), VER_MINOR(ver));
+ return -4;
+}
+
+#define NOT_FOUND 0xffffffff
+
+enum { EXTRA_INDEXES };
+#define COPY_INDEX (EXTRA_INDEX_COPY - EXTRA_INDEXES)
+
+static inline void get_set(uint32_t *cached_val, uint32_t *val, int get)
+{
+ if (get)
+ *val = *cached_val;
+ else
+ *cached_val = *val;
+}
+
+static inline void get_set_signed(int32_t *cached_val, uint32_t *val, int get)
+{
+ if (get)
+ *val = (uint32_t)*cached_val;
+ else
+ *cached_val = (int32_t)*val;
+}
+
+static inline int find_equal_index(void *be_module_reg,
+ unsigned int type_size, unsigned int idx, unsigned int start,
+ unsigned int nb_elements, uint32_t *value, int get, const char *func)
+{
+ unsigned int i;
+ if (!get)
+ return error_unsup_field(func);
+ *value = NOT_FOUND;
+ if (start >= nb_elements)
+ return error_index_too_large(func);
+ for (i = start; i < nb_elements; i++) {
+ if (idx == i)
+ continue;
+ if (memcmp((uint8_t *)be_module_reg + idx * type_size,
+ (uint8_t *)be_module_reg + i * type_size,
+ type_size) == 0) {
+ *value = i;
+ break;
+ }
+ }
+ return 0;
+}
+
+static inline int do_compare_indexes(void *be_module_reg,
+ unsigned int type_size, unsigned int idx, unsigned int cmp_idx,
+ unsigned int nb_elements, int get, const char *func)
+{
+ if (!get)
+ return error_unsup_field(func);
+ if (cmp_idx >= nb_elements)
+ return error_index_too_large(func);
+ if (idx != cmp_idx &&
+ (memcmp((uint8_t *)be_module_reg + idx * type_size,
+ (uint8_t *)be_module_reg + cmp_idx * type_size,
+ type_size) == 0))
+ return 1;
+ return 0;
+}
+
+static inline int is_non_zero(const void *addr, size_t n)
+{
+ size_t i = 0;
+ const uint8_t *p = (const uint8_t *)addr;
+
+ for (i = 0; i < n; i++) {
+ if (p[i] != 0)
+ return 1;
+ }
+ return 0;
+}
+
+static inline int is_all_bits_set(const void *addr, size_t n)
+{
+ size_t i = 0;
+ const uint8_t *p = (const uint8_t *)addr;
+
+ for (i = 0; i < n; i++) {
+ if (p[i] != 0xff)
+ return 0;
+ }
+ return 1;
+}
+
+enum cte_index_e {
+ CT_COL = 0,
+ CT_COR = 1,
+ CT_HSH = 2,
+ CT_QSL = 3,
+ CT_IPF = 4,
+ CT_SLC = 5,
+ CT_PDB = 6,
+ CT_MSK = 7,
+ CT_HST = 8,
+ CT_EPP = 9,
+ CT_TPE = 10,
+ CT_RRB = 11,
+ CT_CNT
+};
+
+/* Sideband info bit indicator */
+#define SWX_INFO (1 << 6)
+
+enum frame_offs_e {
+ DYN_SOF = 0,
+ DYN_L2 = 1,
+ DYN_FIRST_VLAN = 2,
+ DYN_MPLS = 3,
+ DYN_L3 = 4,
+ DYN_ID_IPV4_6 = 5,
+ DYN_FINAL_IP_DST = 6,
+ DYN_L4 = 7,
+ DYN_L4_PAYLOAD = 8,
+ DYN_TUN_PAYLOAD = 9,
+ DYN_TUN_L2 = 10,
+ DYN_TUN_VLAN = 11,
+ DYN_TUN_MPLS = 12,
+ DYN_TUN_L3 = 13,
+ DYN_TUN_ID_IPV4_6 = 14,
+ DYN_TUN_FINAL_IP_DST = 15,
+ DYN_TUN_L4 = 16,
+ DYN_TUN_L4_PAYLOAD = 17,
+ DYN_EOF = 18,
+ DYN_L3_PAYLOAD_END = 19,
+ DYN_TUN_L3_PAYLOAD_END = 20,
+ SB_VNI = SWX_INFO | 1,
+ SB_MAC_PORT = SWX_INFO | 2,
+ SB_KCC_ID = SWX_INFO | 3
+};
+
+enum km_flm_if_select_e { KM_FLM_IF_FIRST = 0, KM_FLM_IF_SECOND = 1 };
+
+enum {
+ QW0_SEL_EXCLUDE = 0,
+ QW0_SEL_FIRST32 = 1,
+ QW0_SEL_SECOND32 = 2,
+ QW0_SEL_FIRST64 = 3,
+ QW0_SEL_ALL128 = 4,
+};
+
+enum {
+ QW4_SEL_EXCLUDE = 0,
+ QW4_SEL_FIRST32 = 1,
+ QW4_SEL_FIRST64 = 2,
+ QW4_SEL_ALL128 = 3,
+};
+
+enum {
+ SW8_SEL_EXCLUDE = 0,
+ SW8_SEL_FIRST16 = 1,
+ SW8_SEL_SECOND16 = 2,
+ SW8_SEL_ALL32 = 3,
+};
+
+enum {
+ DW8_SEL_EXCLUDE = 0,
+ DW8_SEL_FIRST16 = 1,
+ DW8_SEL_SECOND16 = 2,
+ DW8_SEL_FIRST32 = 3,
+ DW8_SEL_FIRST32_SWAP16 = 4,
+ DW8_SEL_ALL64 = 5,
+};
+
+enum {
+ SW9_SEL_EXCLUDE = 0,
+ SW9_SEL_FIRST16 = 1,
+ SW9_SEL_ALL32 = 2,
+};
+
+enum {
+ DW10_SEL_EXCLUDE = 0,
+ DW10_SEL_FIRST16 = 1,
+ DW10_SEL_FIRST32 = 2,
+ DW10_SEL_ALL64 = 3,
+};
+
+enum {
+ SWX_SEL_EXCLUDE = 0,
+ SWX_SEL_ALL32 = 1,
+};
+
+enum {
+ PROT_OTHER = 0,
+ PROT_L2_ETH2 = 1,
+ PROT_L2_SNAP = 2,
+ PROT_L2_LLC = 3,
+ PROT_L2_RAW = 4,
+ PROT_L2_PPPOE_D = 5,
+ PROT_L2_PPOE_S = 6
+};
+
+enum { PROT_L3_IPV4 = 1, PROT_L3_IPV6 = 2 };
+
+enum { PROT_L4_TCP = 1, PROT_L4_UDP = 2, PROT_L4_SCTP = 3, PROT_L4_ICMP = 4 };
+
+enum {
+ PROT_TUN_IP_IN_IP = 1,
+ PROT_TUN_ETHER_IP = 2,
+ PROT_TUN_GREV0 = 3,
+ PROT_TUN_GREV1 = 4,
+ PROT_TUN_GTPV0U = 5,
+ PROT_TUN_GTPV1U = 6,
+ PROT_TUN_GTPV1C = 7,
+ PROT_TUN_GTPV2C = 8,
+ PROT_TUN_VXLAN = 9,
+ PROT_TUN_PSEUDO_WIRE = 10
+};
+
+enum { PROT_TUN_L2_OTHER = 0, PROT_TUN_L2_ETH2 = 1 };
+
+enum { PROT_TUN_L3_OTHER = 0, PROT_TUN_L3_IPV4 = 1, PROT_TUN_L3_IPV6 = 2 };
+
+enum {
+ PROT_TUN_L4_OTHER = 0,
+ PROT_TUN_L4_TCP = 1,
+ PROT_TUN_L4_UDP = 2,
+ PROT_TUN_L4_SCTP = 3,
+ PROT_TUN_L4_ICMP = 4
+};
+
+enum {
+ IP_FRAG_NOT_A_FRAG = 0,
+ IP_FRAG_FIRST = 1,
+ IP_FRAG_MIDDLE = 2,
+ IP_FRAG_LAST = 3
+};
+
+enum {
+ HASH_HASH_NONE = 0,
+ HASH_USER_DEFINED = 1,
+ HASH_LAST_MPLS_LABEL = 2,
+ HASH_ALL_MPLS_LABELS = 3,
+ HASH_2TUPLE = 4,
+ HASH_2TUPLESORTED = 5,
+ HASH_LAST_VLAN_ID = 6,
+ HASH_ALL_VLAN_IDS = 7,
+ HASH_5TUPLE = 8,
+ HASH_5TUPLESORTED = 9,
+ HASH_3TUPLE_GRE_V0 = 10,
+ HASH_3TUPLE_GRE_V0_SORTED = 11,
+ HASH_5TUPLE_SCTP = 12,
+ HASH_5TUPLE_SCTP_SORTED = 13,
+ HASH_3TUPLE_GTP_V0 = 14,
+ HASH_3TUPLE_GTP_V0_SORTED = 15,
+ HASH_3TUPLE_GTP_V1V2 = 16,
+ HASH_3TUPLE_GTP_V1V2_SORTED = 17,
+ HASH_HASHINNER_2TUPLE = 18,
+ HASH_HASHINNER_2TUPLESORTED = 19,
+ HASH_HASHINNER_5TUPLE = 20,
+ HASH_HASHINNER_5TUPLESORTED = 21,
+ HASH_KM = 30,
+ HASH_ROUND_ROBIN = 31,
+ HASH_OUTER_DST_IP = 32,
+ HASH_INNER_SRC_IP = 33,
+};
+
+enum {
+ CPY_SELECT_DSCP_IPV4 = 0,
+ CPY_SELECT_DSCP_IPV6 = 1,
+ CPY_SELECT_RQI_QFI = 2,
+ CPY_SELECT_IPV4 = 3,
+ CPY_SELECT_PORT = 4,
+ CPY_SELECT_TEID = 5,
+};
+
+#define RCK_CML(_comp_) (1 << ((_comp_) * 4))
+#define RCK_CMU(_comp_) (1 << ((_comp_) * 4 + 1))
+#define RCK_SEL(_comp_) (1 << ((_comp_) * 4 + 2))
+#define RCK_SEU(_comp_) (1 << ((_comp_) * 4 + 3))
+
+#define RCK_EXT(x) (((uint32_t)(x) << 6))
+
+#define FIELD_START_INDEX 100
+
+#define COMMON_FUNC_INFO_S \
+ int ver; \
+ void *base; \
+ unsigned int allocated_size; \
+ int debug
+
+struct common_func_s {
+ COMMON_FUNC_INFO_S;
+};
+
+struct cat_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_cat_funcs;
+ uint32_t nb_flow_types;
+ uint32_t nb_pm_ext;
+ uint32_t nb_len;
+ uint32_t kcc_size;
+ uint32_t cts_num;
+ uint32_t kcc_banks;
+ uint32_t kcc_id_bit_size;
+ uint32_t kcc_records;
+ uint32_t km_if_count;
+ int32_t km_if_m0;
+ int32_t km_if_m1;
+
+ union {
+ struct hw_mod_cat_v18_s v18;
+ struct hw_mod_cat_v21_s v21;
+ struct hw_mod_cat_v22_s v22;
+ };
+};
+
+enum hw_cat_e {
+ /*
+ * functions initial CAT v18
+ */
+ /* 00 */ HW_CAT_CFN_SET_ALL_DEFAULTS = 0,
+ /* 01 */ HW_CAT_CFN_PRESET_ALL,
+ /* 02 */ HW_CAT_CFN_COMPARE,
+ /* 03 */ HW_CAT_CFN_FIND,
+ /* 04 */ HW_CAT_CFN_COPY_FROM,
+ /* 05 */ HW_CAT_COT_PRESET_ALL,
+ /* 06 */ HW_CAT_COT_COMPARE,
+ /* 07 */ HW_CAT_COT_FIND,
+ /* fields */
+ /* 00 */ HW_CAT_CFN_ENABLE = FIELD_START_INDEX,
+ /* 01 */ HW_CAT_CFN_INV,
+ /* 02 */ HW_CAT_CFN_PTC_INV,
+ /* 03 */ HW_CAT_CFN_PTC_ISL,
+ /* 04 */ HW_CAT_CFN_PTC_CFP,
+ /* 05 */ HW_CAT_CFN_PTC_MAC,
+ /* 06 */ HW_CAT_CFN_PTC_L2,
+ /* 07 */ HW_CAT_CFN_PTC_VNTAG,
+ /* 08 */ HW_CAT_CFN_PTC_VLAN,
+ /* 09 */ HW_CAT_CFN_PTC_MPLS,
+ /* 10 */ HW_CAT_CFN_PTC_L3,
+ /* 11 */ HW_CAT_CFN_PTC_FRAG,
+ /* 12 */ HW_CAT_CFN_PTC_IP_PROT,
+ /* 13 */ HW_CAT_CFN_PTC_L4,
+ /* 14 */ HW_CAT_CFN_PTC_TUNNEL,
+ /* 15 */ HW_CAT_CFN_PTC_TNL_L2,
+ /* 16 */ HW_CAT_CFN_PTC_TNL_VLAN,
+ /* 17 */ HW_CAT_CFN_PTC_TNL_MPLS,
+ /* 18 */ HW_CAT_CFN_PTC_TNL_L3,
+ /* 19 */ HW_CAT_CFN_PTC_TNL_FRAG,
+ /* 20 */ HW_CAT_CFN_PTC_TNL_IP_PROT,
+ /* 21 */ HW_CAT_CFN_PTC_TNL_L4,
+ /* 22 */ HW_CAT_CFN_ERR_INV,
+ /* 23 */ HW_CAT_CFN_ERR_CV,
+ /* 24 */ HW_CAT_CFN_ERR_FCS,
+ /* 25 */ HW_CAT_CFN_ERR_TRUNC,
+ /* 26 */ HW_CAT_CFN_ERR_L3_CS,
+ /* 27 */ HW_CAT_CFN_ERR_L4_CS,
+ /* 28 */ HW_CAT_CFN_MAC_PORT,
+ /* 29 */ HW_CAT_CFN_PM_CMP,
+ /* 30 */ HW_CAT_CFN_PM_DCT,
+ /* 31 */ HW_CAT_CFN_PM_EXT_INV,
+ /* 32 */ HW_CAT_CFN_PM_CMB,
+ /* 33 */ HW_CAT_CFN_PM_AND_INV,
+ /* 34 */ HW_CAT_CFN_PM_OR_INV,
+ /* 35 */ HW_CAT_CFN_PM_INV,
+ /* 36 */ HW_CAT_CFN_LC,
+ /* 37 */ HW_CAT_CFN_LC_INV,
+ /* 38 */ HW_CAT_CFN_KM0_OR,
+ /* 39 */ HW_CAT_CFN_KM1_OR,
+ /* 40 */ HW_CAT_KCE_ENABLE_BM,
+ /* 41 */ HW_CAT_KCS_CATEGORY,
+ /* 42 */ HW_CAT_FTE_ENABLE_BM,
+ /* 43 */ HW_CAT_CTE_ENABLE_BM,
+ /* 44 */ HW_CAT_CTS_CAT_A,
+ /* 45 */ HW_CAT_CTS_CAT_B,
+ /* 46 */ HW_CAT_COT_COLOR,
+ /* 47 */ HW_CAT_COT_KM,
+ /* 48 */ HW_CAT_CCT_COLOR,
+ /* 49 */ HW_CAT_CCT_KM,
+ /* 50 */ HW_CAT_KCC_KEY,
+ /* 51 */ HW_CAT_KCC_CATEGORY,
+ /* 52 */ HW_CAT_KCC_ID,
+ /* 53 */ HW_CAT_EXO_DYN,
+ /* 54 */ HW_CAT_EXO_OFS,
+ /* 55 */ HW_CAT_RCK_DATA,
+ /* 56 */ HW_CAT_LEN_LOWER,
+ /* 57 */ HW_CAT_LEN_UPPER,
+ /* 58 */ HW_CAT_LEN_DYN1,
+ /* 59 */ HW_CAT_LEN_DYN2,
+ /* 60 */ HW_CAT_LEN_INV,
+ /* 61 */ HW_CAT_CFN_ERR_TNL_L3_CS,
+ /* 62 */ HW_CAT_CFN_ERR_TNL_L4_CS,
+ /* 63 */ HW_CAT_CFN_ERR_TTL_EXP,
+ /* 64 */ HW_CAT_CFN_ERR_TNL_TTL_EXP,
+
+ /* 65 */ HW_CAT_CCE_IMM,
+ /* 66 */ HW_CAT_CCE_IND,
+ /* 67 */ HW_CAT_CCS_COR_EN,
+ /* 68 */ HW_CAT_CCS_COR,
+ /* 69 */ HW_CAT_CCS_HSH_EN,
+ /* 70 */ HW_CAT_CCS_HSH,
+ /* 71 */ HW_CAT_CCS_QSL_EN,
+ /* 72 */ HW_CAT_CCS_QSL,
+ /* 73 */ HW_CAT_CCS_IPF_EN,
+ /* 74 */ HW_CAT_CCS_IPF,
+ /* 75 */ HW_CAT_CCS_SLC_EN,
+ /* 76 */ HW_CAT_CCS_SLC,
+ /* 77 */ HW_CAT_CCS_PDB_EN,
+ /* 78 */ HW_CAT_CCS_PDB,
+ /* 79 */ HW_CAT_CCS_MSK_EN,
+ /* 80 */ HW_CAT_CCS_MSK,
+ /* 81 */ HW_CAT_CCS_HST_EN,
+ /* 82 */ HW_CAT_CCS_HST,
+ /* 83 */ HW_CAT_CCS_EPP_EN,
+ /* 84 */ HW_CAT_CCS_EPP,
+ /* 85 */ HW_CAT_CCS_TPE_EN,
+ /* 86 */ HW_CAT_CCS_TPE,
+ /* 87 */ HW_CAT_CCS_RRB_EN,
+ /* 88 */ HW_CAT_CCS_RRB,
+ /* 89 */ HW_CAT_CCS_SB0_TYPE,
+ /* 90 */ HW_CAT_CCS_SB0_DATA,
+ /* 91 */ HW_CAT_CCS_SB1_TYPE,
+ /* 92 */ HW_CAT_CCS_SB1_DATA,
+ /* 93 */ HW_CAT_CCS_SB2_TYPE,
+ /* 94 */ HW_CAT_CCS_SB2_DATA,
+
+};
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be);
+int hw_mod_cat_alloc(struct flow_api_backend_s *be);
+void hw_mod_cat_free(struct flow_api_backend_s *be);
+int hw_mod_cat_reset(struct flow_api_backend_s *be);
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, int word_off, uint32_t value);
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, int word_off, uint32_t *value);
+/* KCE/KCS/FTE KM */
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count);
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value);
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value);
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count);
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value);
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value);
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count);
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value);
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value);
+/* KCE/KCS/FTE FLM */
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count);
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value);
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value);
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count);
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value);
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value);
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count);
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value);
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value);
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value);
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value);
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value);
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value);
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value);
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value);
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value);
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value);
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, int word_off, uint32_t value);
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, int word_off, uint32_t *value);
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value);
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value);
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value);
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value);
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value);
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value);
+/* added in v22 */
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value);
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value);
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value);
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value);
+
+struct km_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_categories;
+ uint32_t nb_cam_banks;
+ uint32_t nb_cam_record_words;
+ uint32_t nb_cam_records;
+ uint32_t nb_tcam_banks;
+ uint32_t nb_tcam_bank_width;
+ /* not read from backend, but rather set using version */
+ uint32_t nb_km_rcp_mask_a_word_size;
+ uint32_t nb_km_rcp_mask_b_word_size;
+ union {
+ struct hw_mod_km_v7_s v7;
+ };
+};
+
+enum hw_km_e {
+ /* functions */
+ HW_KM_RCP_PRESET_ALL = 0,
+ HW_KM_CAM_PRESET_ALL,
+ /* to sync and reset hw with cache - force write all entries in a bank */
+ HW_KM_TCAM_BANK_RESET,
+ /* fields */
+ HW_KM_RCP_QW0_DYN = FIELD_START_INDEX,
+ HW_KM_RCP_QW0_OFS,
+ HW_KM_RCP_QW0_SEL_A,
+ HW_KM_RCP_QW0_SEL_B,
+ HW_KM_RCP_QW4_DYN,
+ HW_KM_RCP_QW4_OFS,
+ HW_KM_RCP_QW4_SEL_A,
+ HW_KM_RCP_QW4_SEL_B,
+ HW_KM_RCP_DW8_DYN,
+ HW_KM_RCP_DW8_OFS,
+ HW_KM_RCP_DW8_SEL_A,
+ HW_KM_RCP_DW8_SEL_B,
+ HW_KM_RCP_DW10_DYN,
+ HW_KM_RCP_DW10_OFS,
+ HW_KM_RCP_DW10_SEL_A,
+ HW_KM_RCP_DW10_SEL_B,
+ HW_KM_RCP_SWX_CCH,
+ HW_KM_RCP_SWX_SEL_A,
+ HW_KM_RCP_SWX_SEL_B,
+ HW_KM_RCP_MASK_A,
+ HW_KM_RCP_MASK_B,
+ HW_KM_RCP_DUAL,
+ HW_KM_RCP_PAIRED,
+ HW_KM_RCP_EL_A,
+ HW_KM_RCP_EL_B,
+ HW_KM_RCP_INFO_A,
+ HW_KM_RCP_INFO_B,
+ HW_KM_RCP_FTM_A,
+ HW_KM_RCP_FTM_B,
+ HW_KM_RCP_BANK_A,
+ HW_KM_RCP_BANK_B,
+ HW_KM_RCP_KL_A,
+ HW_KM_RCP_KL_B,
+ HW_KM_RCP_KEYWAY_A,
+ HW_KM_RCP_KEYWAY_B,
+ HW_KM_RCP_SYNERGY_MODE,
+ HW_KM_RCP_DW0_B_DYN,
+ HW_KM_RCP_DW0_B_OFS,
+ HW_KM_RCP_DW2_B_DYN,
+ HW_KM_RCP_DW2_B_OFS,
+ HW_KM_RCP_SW4_B_DYN,
+ HW_KM_RCP_SW4_B_OFS,
+ HW_KM_RCP_SW5_B_DYN,
+ HW_KM_RCP_SW5_B_OFS,
+ HW_KM_CAM_W0,
+ HW_KM_CAM_W1,
+ HW_KM_CAM_W2,
+ HW_KM_CAM_W3,
+ HW_KM_CAM_W4,
+ HW_KM_CAM_W5,
+ HW_KM_CAM_FT0,
+ HW_KM_CAM_FT1,
+ HW_KM_CAM_FT2,
+ HW_KM_CAM_FT3,
+ HW_KM_CAM_FT4,
+ HW_KM_CAM_FT5,
+ HW_KM_TCAM_T,
+ HW_KM_TCI_COLOR,
+ HW_KM_TCI_FT,
+ HW_KM_TCQ_BANK_MASK,
+ HW_KM_TCQ_QUAL
+};
+
+bool hw_mod_km_present(struct flow_api_backend_s *be);
+int hw_mod_km_alloc(struct flow_api_backend_s *be);
+void hw_mod_km_free(struct flow_api_backend_s *be);
+int hw_mod_km_reset(struct flow_api_backend_s *be);
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int index, int word_off, uint32_t value);
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int index, int word_off, uint32_t *value);
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+ int start_record, int count);
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t value);
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value);
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+ int count);
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int byte, int byte_val, uint32_t *value_set);
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+ int start_record, int count);
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t value);
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+ int start_record, int count);
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value);
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value);
+
+struct hst_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_hst_rcp_categories;
+ union {
+ struct hw_mod_hst_v2_s v2;
+ };
+};
+
+enum hw_hst_e {
+ /* functions */
+ HW_HST_RCP_PRESET_ALL = 0,
+ HW_HST_RCP_FIND,
+ HW_HST_RCP_COMPARE,
+ /* Control fields */
+ HW_HST_RCP_STRIP_MODE = FIELD_START_INDEX,
+ HW_HST_RCP_START_DYN,
+ HW_HST_RCP_START_OFS,
+ HW_HST_RCP_END_DYN,
+ HW_HST_RCP_END_OFS,
+ HW_HST_RCP_MODIF0_CMD,
+ HW_HST_RCP_MODIF0_DYN,
+ HW_HST_RCP_MODIF0_OFS,
+ HW_HST_RCP_MODIF0_VALUE,
+ HW_HST_RCP_MODIF1_CMD,
+ HW_HST_RCP_MODIF1_DYN,
+ HW_HST_RCP_MODIF1_OFS,
+ HW_HST_RCP_MODIF1_VALUE,
+ HW_HST_RCP_MODIF2_CMD,
+ HW_HST_RCP_MODIF2_DYN,
+ HW_HST_RCP_MODIF2_OFS,
+ HW_HST_RCP_MODIF2_VALUE,
+
+};
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be);
+int hw_mod_hst_alloc(struct flow_api_backend_s *be);
+void hw_mod_hst_free(struct flow_api_backend_s *be);
+int hw_mod_hst_reset(struct flow_api_backend_s *be);
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+ int index, uint32_t value);
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+ int index, uint32_t *value);
+
+struct flm_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_categories;
+ uint32_t nb_size_mb;
+ uint32_t nb_entry_size;
+ uint32_t nb_variant;
+ uint32_t nb_prios;
+ uint32_t nb_pst_profiles;
+ union {
+ struct hw_mod_flm_v17_s v17;
+ struct hw_mod_flm_v20_s v20;
+ };
+};
+
+enum hw_flm_e {
+ /* functions */
+ HW_FLM_CONTROL_PRESET_ALL = 0,
+ HW_FLM_RCP_PRESET_ALL,
+ HW_FLM_FLOW_LRN_DATA_V17,
+ HW_FLM_FLOW_INF_DATA_V17,
+ HW_FLM_FLOW_STA_DATA_V17,
+ /* Control fields */
+ HW_FLM_CONTROL_ENABLE = FIELD_START_INDEX,
+ HW_FLM_CONTROL_INIT,
+ HW_FLM_CONTROL_LDS,
+ HW_FLM_CONTROL_LFS,
+ HW_FLM_CONTROL_LIS,
+ HW_FLM_CONTROL_UDS,
+ HW_FLM_CONTROL_UIS,
+ HW_FLM_CONTROL_RDS,
+ HW_FLM_CONTROL_RIS,
+ HW_FLM_CONTROL_PDS,
+ HW_FLM_CONTROL_PIS,
+ HW_FLM_CONTROL_CRCWR,
+ HW_FLM_CONTROL_CRCRD,
+ HW_FLM_CONTROL_RBL,
+ HW_FLM_CONTROL_EAB,
+ HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,
+ HW_FLM_STATUS_CALIBDONE,
+ HW_FLM_STATUS_INITDONE,
+ HW_FLM_STATUS_IDLE,
+ HW_FLM_STATUS_CRITICAL,
+ HW_FLM_STATUS_PANIC,
+ HW_FLM_STATUS_CRCERR,
+ HW_FLM_STATUS_EFT_BP,
+ HW_FLM_TIMEOUT_T,
+ HW_FLM_SCRUB_I,
+ HW_FLM_LOAD_BIN,
+ HW_FLM_LOAD_PPS,
+ HW_FLM_LOAD_LPS,
+ HW_FLM_LOAD_APS,
+ HW_FLM_PRIO_LIMIT0,
+ HW_FLM_PRIO_FT0,
+ HW_FLM_PRIO_LIMIT1,
+ HW_FLM_PRIO_FT1,
+ HW_FLM_PRIO_LIMIT2,
+ HW_FLM_PRIO_FT2,
+ HW_FLM_PRIO_LIMIT3,
+ HW_FLM_PRIO_FT3,
+ HW_FLM_PST_PRESET_ALL,
+ HW_FLM_PST_BP,
+ HW_FLM_PST_PP,
+ HW_FLM_PST_TP,
+ HW_FLM_RCP_LOOKUP,
+ HW_FLM_RCP_QW0_DYN,
+ HW_FLM_RCP_QW0_OFS,
+ HW_FLM_RCP_QW0_SEL,
+ HW_FLM_RCP_QW4_DYN,
+ HW_FLM_RCP_QW4_OFS,
+ HW_FLM_RCP_SW8_DYN,
+ HW_FLM_RCP_SW8_OFS,
+ HW_FLM_RCP_SW8_SEL,
+ HW_FLM_RCP_SW9_DYN,
+ HW_FLM_RCP_SW9_OFS,
+ HW_FLM_RCP_MASK,
+ HW_FLM_RCP_KID,
+ HW_FLM_RCP_OPN,
+ HW_FLM_RCP_IPN,
+ HW_FLM_RCP_BYT_DYN,
+ HW_FLM_RCP_BYT_OFS,
+ HW_FLM_RCP_TXPLM,
+ HW_FLM_RCP_AUTO_IPV4_MASK,
+ HW_FLM_BUF_CTRL_LRN_FREE,
+ HW_FLM_BUF_CTRL_INF_AVAIL,
+ HW_FLM_BUF_CTRL_STA_AVAIL,
+ HW_FLM_STAT_LRN_DONE,
+ HW_FLM_STAT_LRN_IGNORE,
+ HW_FLM_STAT_LRN_FAIL,
+ HW_FLM_STAT_UNL_DONE,
+ HW_FLM_STAT_UNL_IGNORE,
+ HW_FLM_STAT_REL_DONE,
+ HW_FLM_STAT_REL_IGNORE,
+ HW_FLM_STAT_PRB_DONE,
+ HW_FLM_STAT_PRB_IGNORE,
+ HW_FLM_STAT_AUL_DONE,
+ HW_FLM_STAT_AUL_IGNORE,
+ HW_FLM_STAT_AUL_FAIL,
+ HW_FLM_STAT_TUL_DONE,
+ HW_FLM_STAT_FLOWS,
+ HW_FLM_STAT_STA_DONE, /* module ver 0.20 */
+ HW_FLM_STAT_INF_DONE, /* module ver 0.20 */
+ HW_FLM_STAT_INF_SKIP, /* module ver 0.20 */
+ HW_FLM_STAT_PCK_HIT, /* module ver 0.20 */
+ HW_FLM_STAT_PCK_MISS, /* module ver 0.20 */
+ HW_FLM_STAT_PCK_UNH, /* module ver 0.20 */
+ HW_FLM_STAT_PCK_DIS, /* module ver 0.20 */
+ HW_FLM_STAT_CSH_HIT, /* module ver 0.20 */
+ HW_FLM_STAT_CSH_MISS, /* module ver 0.20 */
+ HW_FLM_STAT_CSH_UNH, /* module ver 0.20 */
+ HW_FLM_STAT_CUC_START, /* module ver 0.20 */
+ HW_FLM_STAT_CUC_MOVE, /* module ver 0.20 */
+};
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be);
+int hw_mod_flm_alloc(struct flow_api_backend_s *be);
+void hw_mod_flm_free(struct flow_api_backend_s *be);
+int hw_mod_flm_reset(struct flow_api_backend_s *be);
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value);
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_status_update(struct flow_api_backend_s *be);
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value);
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value);
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value);
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value);
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value);
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value);
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value);
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be);
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value);
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t value);
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t *value);
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t *value);
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t value);
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t *value);
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be);
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be);
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value);
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+ enum hw_flm_e field, const uint32_t *value);
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value,
+ uint32_t word_cnt);
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value);
+
+struct hsh_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_rcp;
+ union {
+ struct hw_mod_hsh_v5_s v5;
+ };
+};
+
+enum hw_hsh_e {
+ /* functions */
+ HW_HSH_RCP_PRESET_ALL = 0,
+ HW_HSH_RCP_COMPARE,
+ HW_HSH_RCP_FIND,
+ /* fields */
+ HW_HSH_RCP_LOAD_DIST_TYPE = FIELD_START_INDEX,
+ HW_HSH_RCP_MAC_PORT_MASK,
+ HW_HSH_RCP_SORT,
+ HW_HSH_RCP_QW0_PE,
+ HW_HSH_RCP_QW0_OFS,
+ HW_HSH_RCP_QW4_PE,
+ HW_HSH_RCP_QW4_OFS,
+ HW_HSH_RCP_W8_PE,
+ HW_HSH_RCP_W8_OFS,
+ HW_HSH_RCP_W8_SORT,
+ HW_HSH_RCP_W9_PE,
+ HW_HSH_RCP_W9_OFS,
+ HW_HSH_RCP_W9_SORT,
+ HW_HSH_RCP_W9_P,
+ HW_HSH_RCP_P_MASK,
+ HW_HSH_RCP_WORD_MASK,
+ HW_HSH_RCP_SEED,
+ HW_HSH_RCP_TNL_P,
+ HW_HSH_RCP_HSH_VALID,
+ HW_HSH_RCP_HSH_TYPE,
+ HW_HSH_RCP_AUTO_IPV4_MASK
+
+};
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be);
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be);
+void hw_mod_hsh_free(struct flow_api_backend_s *be);
+int hw_mod_hsh_reset(struct flow_api_backend_s *be);
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+ uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+ uint32_t index, uint32_t word_off, uint32_t *value);
+
+struct qsl_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_rcp_categories;
+ uint32_t nb_qst_entries;
+ union {
+ struct hw_mod_qsl_v7_s v7;
+ };
+};
+
+enum hw_qsl_e {
+ /* functions */
+ HW_QSL_RCP_PRESET_ALL = 0,
+ HW_QSL_RCP_COMPARE,
+ HW_QSL_RCP_FIND,
+ HW_QSL_QST_PRESET_ALL,
+ /* fields */
+ HW_QSL_RCP_DISCARD = FIELD_START_INDEX,
+ HW_QSL_RCP_DROP,
+ HW_QSL_RCP_TBL_LO,
+ HW_QSL_RCP_TBL_HI,
+ HW_QSL_RCP_TBL_IDX,
+ HW_QSL_RCP_TBL_MSK,
+ HW_QSL_RCP_LR,
+ HW_QSL_RCP_TSA,
+ HW_QSL_RCP_VLI,
+ HW_QSL_QST_QUEUE,
+ HW_QSL_QST_EN, /* Alias: HW_QSL_QST_QEN */
+ HW_QSL_QST_TX_PORT,
+ HW_QSL_QST_LRE,
+ HW_QSL_QST_TCI,
+ HW_QSL_QST_VEN,
+ HW_QSL_QEN_EN,
+ HW_QSL_UNMQ_DEST_QUEUE,
+ HW_QSL_UNMQ_EN,
+
+};
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be);
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be);
+void hw_mod_qsl_free(struct flow_api_backend_s *be);
+int hw_mod_qsl_reset(struct flow_api_backend_s *be);
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t *value);
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t *value);
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t *value);
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t *value);
+
+struct slc_func_s {
+ COMMON_FUNC_INFO_S;
+ union {
+ struct hw_mod_slc_v1_s v1;
+ };
+};
+
+enum hw_slc_e {
+ /* functions */
+ HW_SLC_RCP_PRESET_ALL = 0,
+ HW_SLC_RCP_COMPARE,
+ HW_SLC_RCP_FIND,
+ /* fields */
+ HW_SLC_RCP_SLC_EN = FIELD_START_INDEX,
+ HW_SLC_RCP_DYN,
+ HW_SLC_RCP_OFS,
+ HW_SLC_RCP_PCAP
+};
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be);
+int hw_mod_slc_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_free(struct flow_api_backend_s *be);
+int hw_mod_slc_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+ uint32_t index, uint32_t *value);
+
+struct slc_lr_func_s {
+ COMMON_FUNC_INFO_S;
+ union {
+ struct hw_mod_slc_lr_v2_s v2;
+ };
+};
+
+enum hw_slc_lr_e {
+ /* functions */
+ HW_SLC_LR_RCP_PRESET_ALL = 0,
+ HW_SLC_LR_RCP_COMPARE,
+ HW_SLC_LR_RCP_FIND,
+ /* fields */
+ HW_SLC_LR_RCP_SLC_EN = FIELD_START_INDEX,
+ HW_SLC_LR_RCP_DYN,
+ HW_SLC_LR_RCP_OFS,
+ HW_SLC_LR_RCP_PCAP
+};
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be);
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be);
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+ uint32_t index, uint32_t *value);
+
+struct pdb_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_pdb_rcp_categories;
+
+ union {
+ struct hw_mod_pdb_v9_s v9;
+ };
+};
+
+enum hw_pdb_e {
+ /* functions */
+ HW_PDB_RCP_PRESET_ALL = 0,
+ HW_PDB_RCP_COMPARE,
+ HW_PDB_RCP_FIND,
+ /* fields */
+ HW_PDB_RCP_DESCRIPTOR = FIELD_START_INDEX,
+ HW_PDB_RCP_DESC_LEN,
+ HW_PDB_RCP_TX_PORT,
+ HW_PDB_RCP_TX_IGNORE,
+ HW_PDB_RCP_TX_NOW,
+ HW_PDB_RCP_CRC_OVERWRITE,
+ HW_PDB_RCP_ALIGN,
+ HW_PDB_RCP_OFS0_DYN,
+ HW_PDB_RCP_OFS0_REL,
+ HW_PDB_RCP_OFS1_DYN,
+ HW_PDB_RCP_OFS1_REL,
+ HW_PDB_RCP_OFS2_DYN,
+ HW_PDB_RCP_OFS2_REL,
+ HW_PDB_RCP_IP_PROT_TNL,
+ HW_PDB_RCP_PPC_HSH,
+ HW_PDB_RCP_DUPLICATE_EN,
+ HW_PDB_RCP_DUPLICATE_BIT,
+ HW_PDB_RCP_PCAP_KEEP_FCS,
+ HW_PDB_CONFIG_TS_FORMAT,
+ HW_PDB_CONFIG_PORT_OFS,
+};
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be);
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be);
+void hw_mod_pdb_free(struct flow_api_backend_s *be);
+int hw_mod_pdb_reset(struct flow_api_backend_s *be);
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+ uint32_t index, uint32_t *value);
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be);
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+ uint32_t value);
+
+struct ioa_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_rcp_categories;
+ uint32_t nb_roa_epp_entries;
+ union {
+ struct hw_mod_ioa_v4_s v4;
+ };
+};
+
+enum hw_ioa_e {
+ /* functions */
+ HW_IOA_RCP_PRESET_ALL = 0,
+ HW_IOA_RCP_COMPARE,
+ HW_IOA_RCP_FIND,
+ HW_IOA_ROA_EPP_PRESET_ALL,
+ HW_IOA_ROA_EPP_COMPARE,
+ HW_IOA_ROA_EPP_FIND,
+ /* fields */
+ HW_IOA_RCP_TUNNEL_POP = FIELD_START_INDEX,
+ HW_IOA_RCP_VLAN_POP,
+ HW_IOA_RCP_VLAN_PUSH,
+ HW_IOA_RCP_VLAN_VID,
+ HW_IOA_RCP_VLAN_DEI,
+ HW_IOA_RCP_VLAN_PCP,
+ HW_IOA_RCP_VLAN_TPID_SEL,
+ HW_IOA_RCP_QUEUE_OVERRIDE_EN,
+ HW_IOA_RCP_QUEUE_ID,
+ HW_IOA_CONFIG_CUST_TPID_0,
+ HW_IOA_CONFIG_CUST_TPID_1,
+ HW_IOA_ROA_EPP_PUSH_TUNNEL,
+ HW_IOA_ROA_EPP_TX_PORT,
+};
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be);
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be);
+void hw_mod_ioa_free(struct flow_api_backend_s *be);
+int hw_mod_ioa_reset(struct flow_api_backend_s *be);
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t index, uint32_t *value);
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t value);
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t index, uint32_t *value);
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+
+struct roa_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_tun_categories;
+ uint32_t nb_lag_entries;
+ union {
+ struct hw_mod_roa_v6_s v6;
+ };
+};
+
+enum hw_roa_e {
+ /* functions */
+ HW_ROA_TUNHDR_COMPARE = 0,
+ HW_ROA_TUNCFG_PRESET_ALL,
+ HW_ROA_TUNCFG_COMPARE,
+ HW_ROA_TUNCFG_FIND,
+ /* fields */
+ HW_ROA_TUNHDR = FIELD_START_INDEX,
+ HW_ROA_TUNCFG_TUN_LEN,
+ HW_ROA_TUNCFG_TUN_TYPE,
+ HW_ROA_TUNCFG_TUN_VLAN,
+ HW_ROA_TUNCFG_IP_TYPE,
+ HW_ROA_TUNCFG_IPCS_UPD,
+ HW_ROA_TUNCFG_IPCS_PRECALC,
+ HW_ROA_TUNCFG_IPTL_UPD,
+ HW_ROA_TUNCFG_IPTL_PRECALC,
+ HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD,
+ HW_ROA_TUNCFG_TX_LAG_IX,
+ HW_ROA_TUNCFG_RECIRCULATE,
+ HW_ROA_TUNCFG_PUSH_TUNNEL,
+ HW_ROA_TUNCFG_RECIRC_PORT,
+ HW_ROA_TUNCFG_RECIRC_BYPASS,
+ HW_ROA_CONFIG_FWD_RECIRCULATE,
+ HW_ROA_CONFIG_FWD_NORMAL_PCKS,
+ HW_ROA_CONFIG_FWD_TXPORT0,
+ HW_ROA_CONFIG_FWD_TXPORT1,
+ HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS,
+ HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS,
+ HW_ROA_LAGCFG_TXPHY_PORT,
+ HW_ROA_IGS_PKT_DROP,
+ HW_ROA_IGS_BYTE_DROP,
+ HW_ROA_RCC_PKT_DROP,
+ HW_ROA_RCC_BYTE_DROP,
+};
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be);
+int hw_mod_roa_alloc(struct flow_api_backend_s *be);
+void hw_mod_roa_free(struct flow_api_backend_s *be);
+int hw_mod_roa_reset(struct flow_api_backend_s *be);
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t word_off, uint32_t value);
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t word_off, uint32_t *value);
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t *value);
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t value);
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t *value);
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t value);
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t *value);
+int hw_mod_roa_igs_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t value);
+int hw_mod_roa_igs_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t *value);
+int hw_mod_roa_igs_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_igs_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t value);
+int hw_mod_roa_igs_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t *value);
+int hw_mod_roa_igs_byte_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_pkt_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t value);
+int hw_mod_roa_rcc_pkt_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t *value);
+int hw_mod_roa_rcc_pkt_flush(struct flow_api_backend_s *be);
+int hw_mod_roa_rcc_byte_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t value);
+int hw_mod_roa_rcc_byte_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t *value);
+int hw_mod_roa_rcc_byte_flush(struct flow_api_backend_s *be);
+
+struct rmc_func_s {
+ COMMON_FUNC_INFO_S;
+ union {
+ struct hw_mod_rmc_v1_3_s v1_3;
+ };
+};
+
+enum hw_rmc_e {
+ HW_RMC_BLOCK_STATT = FIELD_START_INDEX,
+ HW_RMC_BLOCK_KEEPA,
+ HW_RMC_BLOCK_RPP_SLICE,
+ HW_RMC_BLOCK_MAC_PORT,
+ HW_RMC_LAG_PHY_ODD_EVEN,
+};
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be);
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be);
+void hw_mod_rmc_free(struct flow_api_backend_s *be);
+int hw_mod_rmc_reset(struct flow_api_backend_s *be);
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+ uint32_t value);
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+ uint32_t *value);
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be);
+
+struct tpe_func_s {
+ COMMON_FUNC_INFO_S;
+ uint32_t nb_rcp_categories;
+ uint32_t nb_ifr_categories;
+ uint32_t nb_cpy_writers;
+ uint32_t nb_rpl_depth;
+ uint32_t nb_rpl_ext_categories;
+ union {
+ struct hw_mod_tpe_v1_s v1;
+ struct hw_mod_tpe_v2_s v2;
+ };
+};
+
+enum hw_tpe_e {
+ /* functions */
+ HW_TPE_PRESET_ALL = 0,
+ HW_TPE_FIND,
+ HW_TPE_COMPARE,
+ /* Control fields */
+ HW_TPE_RPP_RCP_EXP = FIELD_START_INDEX,
+ HW_TPE_IFR_RCP_EN,
+ HW_TPE_IFR_RCP_MTU,
+ HW_TPE_INS_RCP_DYN,
+ HW_TPE_INS_RCP_OFS,
+ HW_TPE_INS_RCP_LEN,
+ HW_TPE_RPL_RCP_DYN,
+ HW_TPE_RPL_RCP_OFS,
+ HW_TPE_RPL_RCP_LEN,
+ HW_TPE_RPL_RCP_RPL_PTR,
+ HW_TPE_RPL_RCP_EXT_PRIO,
+ HW_TPE_RPL_EXT_RPL_PTR,
+ HW_TPE_RPL_EXT_META_RPL_LEN, /* SW only */
+ HW_TPE_RPL_RPL_VALUE,
+ HW_TPE_CPY_RCP_READER_SELECT,
+ HW_TPE_CPY_RCP_DYN,
+ HW_TPE_CPY_RCP_OFS,
+ HW_TPE_CPY_RCP_LEN,
+ HW_TPE_HFU_RCP_LEN_A_WR,
+ HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,
+ HW_TPE_HFU_RCP_LEN_A_POS_DYN,
+ HW_TPE_HFU_RCP_LEN_A_POS_OFS,
+ HW_TPE_HFU_RCP_LEN_A_ADD_DYN,
+ HW_TPE_HFU_RCP_LEN_A_ADD_OFS,
+ HW_TPE_HFU_RCP_LEN_A_SUB_DYN,
+ HW_TPE_HFU_RCP_LEN_B_WR,
+ HW_TPE_HFU_RCP_LEN_B_POS_DYN,
+ HW_TPE_HFU_RCP_LEN_B_POS_OFS,
+ HW_TPE_HFU_RCP_LEN_B_ADD_DYN,
+ HW_TPE_HFU_RCP_LEN_B_ADD_OFS,
+ HW_TPE_HFU_RCP_LEN_B_SUB_DYN,
+ HW_TPE_HFU_RCP_LEN_C_WR,
+ HW_TPE_HFU_RCP_LEN_C_POS_DYN,
+ HW_TPE_HFU_RCP_LEN_C_POS_OFS,
+ HW_TPE_HFU_RCP_LEN_C_ADD_DYN,
+ HW_TPE_HFU_RCP_LEN_C_ADD_OFS,
+ HW_TPE_HFU_RCP_LEN_C_SUB_DYN,
+ HW_TPE_HFU_RCP_TTL_WR,
+ HW_TPE_HFU_RCP_TTL_POS_DYN,
+ HW_TPE_HFU_RCP_TTL_POS_OFS,
+ HW_TPE_HFU_RCP_CS_INF,
+ HW_TPE_HFU_RCP_L3_PRT,
+ HW_TPE_HFU_RCP_L3_FRAG,
+ HW_TPE_HFU_RCP_TUNNEL,
+ HW_TPE_HFU_RCP_L4_PRT,
+ HW_TPE_HFU_RCP_OUTER_L3_OFS,
+ HW_TPE_HFU_RCP_OUTER_L4_OFS,
+ HW_TPE_HFU_RCP_INNER_L3_OFS,
+ HW_TPE_HFU_RCP_INNER_L4_OFS,
+ HW_TPE_CSU_RCP_OUTER_L3_CMD,
+ HW_TPE_CSU_RCP_OUTER_L4_CMD,
+ HW_TPE_CSU_RCP_INNER_L3_CMD,
+ HW_TPE_CSU_RCP_INNER_L4_CMD,
+};
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be);
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be);
+void hw_mod_tpe_free(struct flow_api_backend_s *be);
+int hw_mod_tpe_reset(struct flow_api_backend_s *be);
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value);
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, int index, uint32_t value);
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, int index, uint32_t *value);
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value);
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value);
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value);
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value);
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value);
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value);
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value);
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value);
+
+enum debug_mode_e {
+ FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
+ FLOW_BACKEND_DEBUG_MODE_WRITE = 0x0001
+};
+
+struct flow_api_backend_ops {
+ int version;
+ int (*set_debug_mode)(void *dev, enum debug_mode_e mode);
+ int (*get_nb_phy_port)(void *dev);
+ int (*get_nb_rx_port)(void *dev);
+ int (*get_ltx_avail)(void *dev);
+ int (*get_nb_cat_funcs)(void *dev);
+ int (*get_nb_categories)(void *dev);
+ int (*get_nb_cat_km_if_cnt)(void *dev);
+ int (*get_nb_cat_km_if_m0)(void *dev);
+ int (*get_nb_cat_km_if_m1)(void *dev);
+
+ int (*get_nb_queues)(void *dev);
+ int (*get_nb_km_flow_types)(void *dev);
+ int (*get_nb_pm_ext)(void *dev);
+ int (*get_nb_len)(void *dev);
+ int (*get_kcc_size)(void *dev);
+ int (*get_kcc_banks)(void *dev);
+ int (*get_nb_km_categories)(void *dev);
+ int (*get_nb_km_cam_banks)(void *dev);
+ int (*get_nb_km_cam_record_words)(void *dev);
+ int (*get_nb_km_cam_records)(void *dev);
+ int (*get_nb_km_tcam_banks)(void *dev);
+ int (*get_nb_km_tcam_bank_width)(void *dev);
+ int (*get_nb_flm_categories)(void *dev);
+ int (*get_nb_flm_size_mb)(void *dev);
+ int (*get_nb_flm_entry_size)(void *dev);
+ int (*get_nb_flm_variant)(void *dev);
+ int (*get_nb_flm_prios)(void *dev);
+ int (*get_nb_flm_pst_profiles)(void *dev);
+ int (*get_nb_hst_categories)(void *dev);
+ int (*get_nb_qsl_categories)(void *dev);
+ int (*get_nb_qsl_qst_entries)(void *dev);
+ int (*get_nb_pdb_categories)(void *dev);
+ int (*get_nb_ioa_categories)(void *dev);
+ int (*get_nb_roa_categories)(void *dev);
+ int (*get_nb_tpe_categories)(void *dev);
+ int (*get_nb_tx_cpy_writers)(void *dev);
+ int (*get_nb_tx_cpy_mask_mem)(void *dev);
+ int (*get_nb_tx_rpl_depth)(void *dev);
+ int (*get_nb_tx_rpl_ext_categories)(void *dev);
+ int (*get_nb_tpe_ifr_categories)(void *dev);
+
+ int (*alloc_rx_queue)(void *dev, int queue_id);
+ int (*free_rx_queue)(void *dev, int hw_queue);
+
+ /* CAT */
+ bool (*get_cat_present)(void *dev);
+ uint32_t (*get_cat_version)(void *dev);
+ int (*cat_cfn_flush)(void *dev, const struct cat_func_s *cat,
+ int cat_func, int cnt);
+ int (*cat_kce_flush)(void *dev, const struct cat_func_s *cat,
+ int km_if_idx, int index, int cnt);
+ int (*cat_kcs_flush)(void *dev, const struct cat_func_s *cat,
+ int km_if_idx, int cat_func, int cnt);
+ int (*cat_fte_flush)(void *dev, const struct cat_func_s *cat,
+ int km_if_idx, int index, int cnt);
+ int (*cat_cte_flush)(void *dev, const struct cat_func_s *cat,
+ int cat_func, int cnt);
+ int (*cat_cts_flush)(void *dev, const struct cat_func_s *cat, int index,
+ int cnt);
+ int (*cat_cot_flush)(void *dev, const struct cat_func_s *cat,
+ int cat_func, int cnt);
+ int (*cat_cct_flush)(void *dev, const struct cat_func_s *cat, int index,
+ int cnt);
+ int (*cat_exo_flush)(void *dev, const struct cat_func_s *cat, int index,
+ int cnt);
+ int (*cat_rck_flush)(void *dev, const struct cat_func_s *cat, int index,
+ int cnt);
+ int (*cat_len_flush)(void *dev, const struct cat_func_s *cat, int index,
+ int cnt);
+ int (*cat_kcc_flush)(void *dev, const struct cat_func_s *cat, int index,
+ int cnt);
+ int (*cat_cce_flush)(void *dev, const struct cat_func_s *cat, int index,
+ int cnt);
+ int (*cat_ccs_flush)(void *dev, const struct cat_func_s *cat, int index,
+ int cnt);
+
+ /* KM */
+ bool (*get_km_present)(void *dev);
+ uint32_t (*get_km_version)(void *dev);
+ int (*km_rcp_flush)(void *dev, const struct km_func_s *km, int category,
+ int cnt);
+ int (*km_cam_flush)(void *dev, const struct km_func_s *km, int bank,
+ int record, int cnt);
+ int (*km_tcam_flush)(void *dev, const struct km_func_s *km, int bank,
+ int byte, int value, int cnt);
+ int (*km_tci_flush)(void *dev, const struct km_func_s *km, int bank,
+ int record, int cnt);
+ int (*km_tcq_flush)(void *dev, const struct km_func_s *km, int bank,
+ int record, int cnt);
+
+ /* FLM */
+ bool (*get_flm_present)(void *dev);
+ uint32_t (*get_flm_version)(void *dev);
+ int (*flm_control_flush)(void *dev, const struct flm_func_s *flm);
+ int (*flm_status_flush)(void *dev, const struct flm_func_s *flm);
+ int (*flm_status_update)(void *dev, const struct flm_func_s *flm);
+ int (*flm_timeout_flush)(void *dev, const struct flm_func_s *flm);
+ int (*flm_scrub_flush)(void *dev, const struct flm_func_s *flm);
+ int (*flm_load_bin_flush)(void *dev, const struct flm_func_s *flm);
+ int (*flm_load_pps_flush)(void *dev, const struct flm_func_s *flm);
+ int (*flm_load_lps_flush)(void *dev, const struct flm_func_s *flm);
+ int (*flm_load_aps_flush)(void *dev, const struct flm_func_s *flm);
+ int (*flm_prio_flush)(void *dev, const struct flm_func_s *flm);
+ int (*flm_pst_flush)(void *dev, const struct flm_func_s *flm, int index,
+ int cnt);
+ int (*flm_rcp_flush)(void *dev, const struct flm_func_s *flm, int index,
+ int cnt);
+ int (*flm_buf_ctrl_update)(void *dev, const struct flm_func_s *flm);
+ int (*flm_stat_update)(void *dev, const struct flm_func_s *flm);
+ int (*flm_lrn_data_flush)(void *be_dev, const struct flm_func_s *flm,
+ const uint32_t *lrn_data, uint32_t size);
+ int (*flm_inf_data_update)(void *be_dev, const struct flm_func_s *flm,
+ uint32_t *lrn_data, uint32_t size);
+ int (*flm_sta_data_update)(void *be_dev, const struct flm_func_s *flm,
+ uint32_t *lrn_data, uint32_t size);
+
+ /* HSH */
+ bool (*get_hsh_present)(void *dev);
+ uint32_t (*get_hsh_version)(void *dev);
+ int (*hsh_rcp_flush)(void *dev, const struct hsh_func_s *hsh,
+ int category, int cnt);
+
+ /* HST */
+ bool (*get_hst_present)(void *dev);
+ uint32_t (*get_hst_version)(void *dev);
+ int (*hst_rcp_flush)(void *dev, const struct hst_func_s *hst,
+ int category, int cnt);
+
+ /* QSL */
+ bool (*get_qsl_present)(void *dev);
+ uint32_t (*get_qsl_version)(void *dev);
+ int (*qsl_rcp_flush)(void *dev, const struct qsl_func_s *qsl,
+ int category, int cnt);
+ int (*qsl_qst_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+ int cnt);
+ int (*qsl_qen_flush)(void *dev, const struct qsl_func_s *qsl, int entry,
+ int cnt);
+ int (*qsl_unmq_flush)(void *dev, const struct qsl_func_s *qsl,
+ int entry, int cnt);
+
+ /* SLC */
+ bool (*get_slc_present)(void *dev);
+ uint32_t (*get_slc_version)(void *dev);
+ int (*slc_rcp_flush)(void *dev, const struct slc_func_s *slc,
+ int category, int cnt);
+
+ /* SLC LR */
+ bool (*get_slc_lr_present)(void *dev);
+ uint32_t (*get_slc_lr_version)(void *dev);
+ int (*slc_lr_rcp_flush)(void *dev, const struct slc_lr_func_s *slc_lr,
+ int category, int cnt);
+
+ /* PDB */
+ bool (*get_pdb_present)(void *dev);
+ uint32_t (*get_pdb_version)(void *dev);
+ int (*pdb_rcp_flush)(void *dev, const struct pdb_func_s *pdb,
+ int category, int cnt);
+ int (*pdb_config_flush)(void *dev, const struct pdb_func_s *pdb);
+
+ /* IOA */
+ bool (*get_ioa_present)(void *dev);
+ uint32_t (*get_ioa_version)(void *dev);
+ int (*ioa_rcp_flush)(void *dev, const struct ioa_func_s *ioa, int index,
+ int cnt);
+ int (*ioa_special_tpid_flush)(void *dev, const struct ioa_func_s *ioa);
+ int (*ioa_roa_epp_flush)(void *dev, const struct ioa_func_s *ioa,
+ int index, int cnt);
+
+ /* ROA */
+ bool (*get_roa_present)(void *dev);
+ uint32_t (*get_roa_version)(void *dev);
+ int (*roa_tunhdr_flush)(void *dev, const struct roa_func_s *roa,
+ int index, int cnt);
+ int (*roa_tuncfg_flush)(void *dev, const struct roa_func_s *roa,
+ int index, int cnt);
+ int (*roa_config_flush)(void *dev, const struct roa_func_s *roa);
+ int (*roa_lagcfg_flush)(void *dev, const struct roa_func_s *roa,
+ int index, int cnt);
+
+ /* RMC */
+ bool (*get_rmc_present)(void *dev);
+ uint32_t (*get_rmc_version)(void *dev);
+ int (*rmc_ctrl_flush)(void *dev, const struct rmc_func_s *rmc);
+
+ /* TPE */
+ bool (*get_tpe_present)(void *dev);
+ uint32_t (*get_tpe_version)(void *dev);
+ int (*tpe_rpp_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+ int (*tpe_rpp_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+ int (*tpe_ifr_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+ int (*tpe_ins_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+ int (*tpe_rpl_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+ int (*tpe_rpl_ext_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+ int (*tpe_rpl_rpl_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+ int (*tpe_cpy_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+ int (*tpe_hfu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+ int (*tpe_csu_rcp_flush)(void *dev, const struct tpe_func_s *tpe,
+ int index, int cnt);
+};
+
+struct flow_api_backend_s {
+ void *be_dev;
+ const struct flow_api_backend_ops *iface;
+
+ /* flow filter FPGA modules */
+ struct cat_func_s cat;
+ struct km_func_s km;
+ struct flm_func_s flm;
+ struct hsh_func_s hsh;
+ struct hst_func_s hst;
+ struct qsl_func_s qsl;
+ struct slc_func_s slc;
+ struct slc_lr_func_s slc_lr;
+ struct pdb_func_s pdb;
+ struct ioa_func_s ioa;
+ struct roa_func_s roa;
+ struct rmc_func_s rmc;
+ struct tpe_func_s tpe;
+
+ /* NIC attributes */
+ unsigned int num_phy_ports;
+ unsigned int num_rx_ports;
+
+ /* flow filter resource capacities */
+ unsigned int max_categories;
+ unsigned int max_queues;
+};
+
+int flow_api_backend_init(struct flow_api_backend_s *dev,
+ const struct flow_api_backend_ops *iface,
+ void *be_dev);
+int flow_api_backend_reset(struct flow_api_backend_s *dev);
+int flow_api_backend_done(struct flow_api_backend_s *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_BACKEND_H__ */
new file mode 100644
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_API_ENGINE_H_
+#define _FLOW_API_ENGINE_H_
+
+#include <stdint.h>
+#include "stream_binary_flow_api.h"
+
+struct flow_elem;
+/*
+ * ****************************************************
+ * Resource management
+ * ****************************************************
+ */
+#define BIT_CONTAINER_8_ALIGN(x) (((x) + 7) / 8)
+
+/*
+ * Resource management
+ * These are free resources in FPGA
+ * Other FPGA memory lists are linked to one of these
+ * and will implicitly follow them
+ */
+enum res_type_e {
+ RES_QUEUE,
+ RES_CAT_CFN,
+ RES_CAT_COT,
+ RES_CAT_EXO,
+ RES_CAT_LEN,
+ RES_KM_FLOW_TYPE,
+ RES_KM_CATEGORY,
+ RES_HSH_RCP,
+ RES_PDB_RCP,
+ RES_QSL_RCP,
+ RES_QSL_QST,
+ RES_SLC_RCP,
+ RES_IOA_RCP,
+ RES_ROA_RCP,
+ RES_FLM_FLOW_TYPE,
+ RES_FLM_RCP,
+ RES_HST_RCP,
+ RES_TPE_RCP,
+ RES_TPE_EXT,
+ RES_TPE_RPL,
+ RES_COUNT,
+ RES_INVALID
+};
+
+/*
+ * ****************************************************
+ * Flow NIC offload management
+ * ****************************************************
+ */
+#define MAX_OUTPUT_DEST (128)
+#define NB_QSL_QEN_ADDR 32
+
+#define INVALID_FLOW_STAT_ID 0xffffffff
+
+#define MAX_WORD_NUM 24
+#define MAX_BANKS 6
+
+#define MAX_TCAM_START_OFFSETS 4
+
+#define MAX_TAG_INDEX 8
+
+#define MAX_FLM_MTRS_SUPPORTED 4
+#define MAX_CPY_WRITERS_SUPPORTED 8
+
+/*
+ * 128 128 32 32 32
+ * Have | QW0 || QW4 || SW8 || SW9 | SWX in FPGA
+ *
+ * Each word may start at any offset, though
+ * they are combined in chronological order, with all enabled to
+ * build the extracted match data, thus that is how the match key
+ * must be build
+ *
+ */
+enum extractor_e {
+ KM_USE_EXTRACTOR_UNDEF,
+ KM_USE_EXTRACTOR_QWORD,
+ KM_USE_EXTRACTOR_SWORD,
+};
+
+struct match_elem_s {
+ enum extractor_e extr;
+ int masked_for_tcam; /* if potentially selected for TCAM */
+ uint32_t e_word[4];
+ uint32_t e_mask[4];
+
+ int extr_start_offs_id;
+ int8_t rel_offs;
+ uint32_t word_len;
+};
+
+enum cam_tech_use_e { KM_CAM, KM_TCAM, KM_SYNERGY };
+
+#define MAX_MATCH_FIELDS 16
+
+struct km_flow_def_s {
+ struct flow_api_backend_s *be;
+
+ /* For keeping track of identical entries */
+ struct km_flow_def_s *reference;
+ struct km_flow_def_s *root;
+
+ /* For collect flow elements and sorting */
+ struct match_elem_s match[MAX_MATCH_FIELDS];
+ struct match_elem_s *match_map[MAX_MATCH_FIELDS];
+ int num_ftype_elem;
+
+ /* Finally formatted CAM/TCAM entry */
+ enum cam_tech_use_e target;
+ uint32_t entry_word[MAX_WORD_NUM];
+ uint32_t entry_mask[MAX_WORD_NUM];
+ int key_word_size;
+
+ /* TCAM calculated possible bank start offsets */
+ int start_offsets[MAX_TCAM_START_OFFSETS];
+ int num_start_offsets;
+
+ /* Flow information */
+
+ /*
+ * HW input port ID needed for compare. In port must be identical on flow
+ * types
+ */
+ uint32_t port_id;
+ uint32_t info; /* used for color (actions) */
+ int info_set;
+ int flow_type; /* 0 is illegal and used as unset */
+ int flushed_to_target; /* if this km entry has been finally programmed into NIC hw */
+
+ /* CAM specific bank management */
+ int cam_paired;
+ int record_indexes[MAX_BANKS];
+ int bank_used;
+ uint32_t *cuckoo_moves; /* for CAM statistics only */
+ struct cam_distrib_s *cam_dist;
+ struct hasher_s *hsh;
+
+ /* TCAM specific bank management */
+ struct tcam_distrib_s *tcam_dist;
+ int tcam_start_bank;
+ int tcam_record;
+};
+
+/*
+ * KCC-CAM
+ */
+struct kcc_key_s {
+ uint64_t sb_data : 32;
+ uint64_t sb_type : 8;
+ uint64_t cat_cfn : 8;
+ uint64_t port : 16;
+};
+
+#define KCC_ID_INVALID 0xffffffff
+
+struct kcc_flow_def_s {
+ struct flow_api_backend_s *be;
+ union {
+ uint64_t key64;
+ uint32_t key32[2];
+ struct kcc_key_s key;
+ };
+ uint32_t km_category;
+ uint32_t id;
+
+ uint8_t *kcc_unique_ids;
+
+ int flushed_to_target;
+ int record_indexes[MAX_BANKS];
+ int bank_used;
+ uint32_t *cuckoo_moves; /* for CAM statistics only */
+ struct kcc_cam_distrib_s *cam_dist;
+ struct hasher_s *hsh;
+};
+
+/*
+ * Tunnel encapsulation header definition
+ */
+enum { TUN_TYPE_VXLAN = 0, TUN_TYPE_NVGRE = 1 };
+
+#define MAX_TUN_HDR_SIZE 128
+
+struct tunnel_header_s {
+ union {
+ uint8_t hdr8[MAX_TUN_HDR_SIZE];
+ uint32_t hdr32[(MAX_TUN_HDR_SIZE + 3) / 4];
+ } d;
+ uint32_t user_port_id;
+ uint8_t len;
+
+ uint8_t nb_vlans;
+
+ uint8_t ip_version; /* 4: v4, 6: v6 */
+ uint16_t ip_csum_precalc;
+
+ uint8_t new_outer;
+ uint8_t l2_len;
+ uint8_t l3_len;
+ uint8_t l4_len;
+};
+
+enum port_type_e {
+ PORT_NONE, /* not defined or drop */
+ PORT_INTERNAL, /* no queues attached */
+ PORT_PHY, /* MAC phy output queue */
+ PORT_VIRT, /* Memory queues to Host */
+};
+
+enum special_partial_match_e {
+ SPCIAL_MATCH_NONE,
+ SPECIAL_MATCH_LACP,
+};
+
+#define PORT_ID_NONE 0xffffffff
+
+struct output_s {
+ uint32_t owning_port_id; /* the port who owns this output destination */
+ enum port_type_e type;
+ int id; /* depending on port type: queue ID or physical port id or not used */
+ int active; /* activated */
+};
+
+struct nic_flow_def {
+ /*
+ * Frame Decoder match info collected
+ */
+ int l2_prot;
+ int l3_prot;
+ int l4_prot;
+ int tunnel_prot;
+ int tunnel_l3_prot;
+ int tunnel_l4_prot;
+ int vlans;
+ int fragmentation;
+ /*
+ * Additional meta data for various functions
+ */
+ int in_port_override;
+ int l4_dst_port;
+ /*
+ * Output destination info collection
+ */
+ struct output_s dst_id[MAX_OUTPUT_DEST]; /* define the output to use */
+ /* total number of available queues defined for all outputs - i.e. number of dst_id's */
+ int dst_num_avail;
+
+ /*
+ * To identify high priority match with mark for special SW processing (non-OVS)
+ */
+ enum special_partial_match_e special_match;
+
+ /*
+ * Mark or Action info collection
+ */
+ uint32_t mark;
+ uint64_t roa_actions;
+ uint64_t ioa_actions;
+
+ uint32_t jump_to_group;
+
+ uint32_t mtr_ids[MAX_FLM_MTRS_SUPPORTED];
+
+ int full_offload;
+ /*
+ * Action push tunnel
+ */
+ struct tunnel_header_s tun_hdr;
+
+ /*
+ * If DPDK RTE tunnel helper API used
+ * this holds the tunnel if used in flow
+ */
+ struct tunnel_s *tnl;
+
+ /*
+ * Header Stripper
+ */
+ int header_strip_start_dyn;
+ int header_strip_start_ofs;
+ int header_strip_end_dyn;
+ int header_strip_end_ofs;
+ int header_strip_removed_outer_ip;
+
+ /*
+ * Modify field
+ */
+ struct {
+ uint32_t select;
+ uint32_t dyn;
+ uint32_t ofs;
+ uint32_t len;
+ uint32_t level;
+ union {
+ uint8_t value8[16];
+ uint16_t value16[8];
+ uint32_t value32[4];
+ };
+ } modify_field[MAX_CPY_WRITERS_SUPPORTED];
+
+ uint32_t modify_field_count;
+ uint8_t ttl_sub_enable;
+ uint8_t ttl_sub_ipv4;
+ uint8_t ttl_sub_outer;
+
+ /*
+ * Key Matcher flow definitions
+ */
+ struct km_flow_def_s km;
+
+ /*
+ * Key Matcher Category CAM
+ */
+ struct kcc_flow_def_s *kcc;
+ int kcc_referenced;
+
+ /*
+ * TX fragmentation IFR/RPP_LR MTU recipe
+ */
+ uint8_t flm_mtu_fragmentation_recipe;
+};
+
+enum flow_handle_type {
+ FLOW_HANDLE_TYPE_FLOW,
+ FLOW_HANDLE_TYPE_FLM,
+};
+
+struct flow_handle {
+ enum flow_handle_type type;
+
+ struct flow_eth_dev *dev;
+ struct flow_handle *next;
+ struct flow_handle *prev;
+
+ union {
+ struct {
+ /*
+ * 1st step conversion and validation of flow
+ * verified and converted flow match + actions structure
+ */
+ struct nic_flow_def *fd;
+ /*
+ * 2nd step NIC HW resource allocation and configuration
+ * NIC resource management structures
+ */
+ struct {
+ int index; /* allocation index into NIC raw resource table */
+ /* number of contiguous allocations needed for this resource */
+ int count;
+ /*
+ * This resource if not initially created by this flow, but reused
+ * by it
+ */
+ int referenced;
+ } resource[RES_COUNT];
+ int flushed;
+
+ uint32_t flow_stat_id;
+ uint32_t color;
+ int cao_enabled;
+ uint32_t cte;
+
+ uint32_t port_id; /* MAC port ID or override of virtual in_port */
+ uint32_t flm_ref_count;
+ uint8_t flm_group_index;
+ uint8_t flm_ft_index;
+ };
+
+ struct {
+ uint32_t flm_data[10];
+ uint8_t flm_prot;
+ uint8_t flm_kid;
+ uint8_t flm_prio;
+
+ uint16_t flm_rpl_ext_ptr;
+ uint32_t flm_nat_ipv4;
+ uint16_t flm_nat_port;
+ uint8_t flm_dscp;
+ uint32_t flm_teid;
+ uint8_t flm_rqi;
+ uint8_t flm_qfi;
+
+ uint8_t flm_mtu_fragmentation_recipe;
+
+ struct flow_handle *flm_owner;
+ };
+ };
+};
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km,
+ void **handle);
+void km_free_ndev_resource_management(void **handle);
+
+int km_get_cam_population_level(void *cam_dist, uint32_t *cam_elem,
+ uint32_t *cuckoo_moves);
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+ uint32_t e_mask[4], uint32_t word_len,
+ enum frame_offs_e start, int8_t offset);
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id);
+/*
+ * Compares 2 KM key definitions after first collect validate and optimization.
+ * km is compared against an existing km1.
+ * if identical, km1 flow_type is returned
+ */
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1);
+
+void km_set_info(struct km_flow_def_s *km, int on);
+int km_rcp_set(struct km_flow_def_s *km, int index);
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+ struct km_flow_def_s *km1);
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color);
+int km_clear_data_match_entry(struct km_flow_def_s *km);
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+ void **handle);
+void kcc_free_ndev_resource_management(void **handle);
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc);
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc);
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1);
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category);
+
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc);
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid);
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni);
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port);
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn);
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc);
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc);
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc);
+
+/*
+ * Group management
+ */
+int flow_group_handle_create(void **handle, uint32_t group_count);
+int flow_group_handle_destroy(void **handle);
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+ uint32_t *group_out);
+int flow_group_translate_release(void *handle, uint32_t translated_group);
+
+/*
+ * Actions management
+ */
+uint8_t flow_tunnel_alloc_virt_port(void);
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port);
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+ uint32_t *vni);
+int tunnel_release(struct tunnel_s *tnl);
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl);
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id);
+int tunnel_get_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+ uint8_t vport);
+
+int is_virtual_port(uint8_t virt_port);
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+ struct nic_flow_def *fd,
+ const struct flow_elem *elem);
+
+/*
+ * statistics
+ */
+uint32_t flow_actions_create_flow_stat_id(uint32_t *stat_map, uint32_t mark);
+void flow_actions_delete_flow_stat_id(uint32_t *stat_map,
+ uint32_t flow_stat_id);
+
+#endif /* _FLOW_API_ENGINE_H_ */
new file mode 100644
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef __FLOW_API_NIC_SETUP_H__
+#define __FLOW_API_NIC_SETUP_H__
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct flow_api_backend_ops; /* Operation list for common FPGA module API for backend */
+struct flow_nic_dev; /* adapter device */
+
+/*
+ * Flow capable NIC backend - creating flow api instance for adapter nr (backend)
+ */
+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,
+ const struct flow_api_backend_ops *be_if,
+ void *be_dev);
+int flow_api_done(struct flow_nic_dev *dev);
+void *flow_api_get_be_dev(struct flow_nic_dev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __FLOW_API_NIC_SETUP_H__ */
new file mode 100644
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define OWNER_ID_COUNT 256
+
+struct group_lookup_entry_s {
+ uint64_t ref_counter;
+ uint32_t *reverse_lookup;
+};
+
+struct group_handle_s {
+ uint32_t group_count;
+
+ uint32_t *translation_table;
+
+ struct group_lookup_entry_s *lookup_entries;
+};
+
+int flow_group_handle_create(void **handle, uint32_t group_count)
+{
+ struct group_handle_s *group_handle;
+
+ *handle = calloc(1, sizeof(struct group_handle_s));
+ group_handle = *handle;
+
+ group_handle->group_count = group_count;
+ group_handle->translation_table = calloc((uint32_t)(group_count * OWNER_ID_COUNT),
+ sizeof(uint32_t));
+ group_handle->lookup_entries =
+ calloc(group_count, sizeof(struct group_lookup_entry_s));
+
+ return *handle != NULL ? 0 : -1;
+}
+
+int flow_group_handle_destroy(void **handle)
+{
+ if (*handle) {
+ struct group_handle_s *group_handle =
+ (struct group_handle_s *)*handle;
+
+ free(group_handle->translation_table);
+ free(group_handle->lookup_entries);
+
+ free(*handle);
+ *handle = NULL;
+ }
+
+ return 0;
+}
+
+int flow_group_translate_get(void *handle, uint8_t owner_id, uint32_t group_in,
+ uint32_t *group_out)
+{
+ struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+ uint32_t *table_ptr;
+ uint32_t lookup;
+
+ if (group_handle == NULL || group_in >= group_handle->group_count)
+ return -1;
+
+ /* Don't translate group 0 */
+ if (group_in == 0) {
+ *group_out = 0;
+ return 0;
+ }
+
+ table_ptr = &group_handle->translation_table[owner_id * OWNER_ID_COUNT +
+ group_in];
+ lookup = *table_ptr;
+
+ if (lookup == 0) {
+ for (lookup = 1;
+ lookup < group_handle->group_count &&
+ group_handle->lookup_entries[lookup].ref_counter > 0;
+ ++lookup)
+ ;
+
+ if (lookup < group_handle->group_count) {
+ group_handle->lookup_entries[lookup].reverse_lookup =
+ table_ptr;
+ group_handle->lookup_entries[lookup].ref_counter += 1;
+
+ *table_ptr = lookup;
+ } else {
+ return -1;
+ }
+ } else {
+ group_handle->lookup_entries[lookup].ref_counter += 1;
+ }
+ *group_out = lookup;
+ return 0;
+}
+
+int flow_group_translate_release(void *handle, uint32_t translated_group)
+{
+ struct group_handle_s *group_handle = (struct group_handle_s *)handle;
+ struct group_lookup_entry_s *lookup;
+
+ if (group_handle == NULL ||
+ translated_group >= group_handle->group_count)
+ return -1;
+
+ /* Don't translate group 0 */
+ if (translated_group == 0)
+ return 0;
+
+ lookup = &group_handle->lookup_entries[translated_group];
+
+ if (lookup->reverse_lookup && lookup->ref_counter > 0) {
+ lookup->ref_counter -= 1;
+ if (lookup->ref_counter == 0) {
+ *lookup->reverse_lookup = 0;
+ lookup->reverse_lookup = NULL;
+ }
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include "flow_hasher.h"
+
+#include <math.h>
+
+/* #define TESTING */
+
+#ifdef TESTING
+#include <stdio.h>
+int hash_test(struct hasher_s *hsh, int banks, int record_bw);
+#endif
+
+static uint32_t shuffle(uint32_t x)
+{
+ return (((x & 0x00000002) << 29) | ((x & 0xAAAAAAA8) >> 3) |
+ ((x & 0x15555555) << 3) | ((x & 0x40000000) >> 29));
+}
+
+static uint32_t ror_inv(uint32_t x, const int s)
+{
+ return ((x >> s) | ((~x) << (32 - s)));
+}
+
+static uint32_t combine(uint32_t x, uint32_t y)
+{
+ uint32_t x1 = ror_inv(x, 15);
+ uint32_t x2 = ror_inv(x, 13);
+ uint32_t y1 = ror_inv(y, 3);
+ uint32_t y2 = ror_inv(y, 27);
+
+ return (x ^ y ^
+ ((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+ (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+ (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint32_t mix(uint32_t x, uint32_t y)
+{
+ return shuffle(combine(x, y));
+}
+
+static uint64_t ror_inv3(uint64_t x)
+{
+ const uint64_t m = 0xE0000000E0000000ULL;
+
+ return (((x >> 3) | m) ^ ((x << 29) & m));
+}
+
+static uint64_t ror_inv13(uint64_t x)
+{
+ const uint64_t m = 0xFFF80000FFF80000ULL;
+
+ return (((x >> 13) | m) ^ ((x << 19) & m));
+}
+
+static uint64_t ror_inv15(uint64_t x)
+{
+ const uint64_t m = 0xFFFE0000FFFE0000ULL;
+
+ return (((x >> 15) | m) ^ ((x << 17) & m));
+}
+
+static uint64_t ror_inv27(uint64_t x)
+{
+ const uint64_t m = 0xFFFFFFE0FFFFFFE0ULL;
+
+ return (((x >> 27) | m) ^ ((x << 5) & m));
+}
+
+static uint64_t shuffle64(uint64_t x)
+{
+ return (((x & 0x0000000200000002) << 29) |
+ ((x & 0xAAAAAAA8AAAAAAA8) >> 3) |
+ ((x & 0x1555555515555555) << 3) |
+ ((x & 0x4000000040000000) >> 29));
+}
+
+static uint64_t pair(uint32_t x, uint32_t y)
+{
+ return (((uint64_t)x << 32) | y);
+}
+
+static uint64_t combine64(uint64_t x, uint64_t y)
+{
+ uint64_t x1 = ror_inv15(x);
+ uint64_t x2 = ror_inv13(x);
+ uint64_t y1 = ror_inv3(y);
+ uint64_t y2 = ror_inv27(y);
+
+ return (x ^ y ^
+ ((x1 & y1 & ~x2 & ~y2) | (x1 & ~y1 & x2 & ~y2) |
+ (x1 & ~y1 & ~x2 & y2) | (~x1 & y1 & x2 & ~y2) |
+ (~x1 & y1 & ~x2 & y2) | (~x1 & ~y1 & x2 & y2)));
+}
+
+static uint64_t mix64(uint64_t x, uint64_t y)
+{
+ return shuffle64(combine64(x, y));
+}
+
+static uint32_t calc16(const uint32_t key[16])
+{
+ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Layer 0 */
+ /* \./ \./ \./ \./ \./ \./ \./ \./ */
+ /* 0 1 2 3 4 5 6 7 Layer 1 */
+ /* \__.__/ \__.__/ \__.__/ \__.__/ */
+ /* 0 1 2 3 Layer 2 */
+ /* \______.______/ \______.______/ */
+ /* 0 1 Layer 3 */
+ /* \______________.______________/ */
+ /* 0 Layer 4 */
+ /* / \ */
+ /* \./ */
+ /* 0 Layer 5 */
+ /* / \ */
+ /* \./ Layer 6 */
+ /* value */
+
+ uint64_t z;
+ uint32_t x;
+
+ z = mix64(mix64(mix64(pair(key[0], key[8]), pair(key[1], key[9])),
+ mix64(pair(key[2], key[10]), pair(key[3], key[11]))),
+ mix64(mix64(pair(key[4], key[12]), pair(key[5], key[13])),
+ mix64(pair(key[6], key[14]), pair(key[7], key[15]))));
+
+ x = mix((uint32_t)(z >> 32), (uint32_t)z);
+ x = mix(x, ror_inv(x, 17));
+ x = combine(x, ror_inv(x, 17));
+
+ return x;
+}
+
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result)
+{
+ uint64_t val;
+ uint32_t res;
+
+ val = calc16(key);
+ res = (uint32_t)val;
+
+ if (hsh->cam_bw > 32)
+ val = (val << (hsh->cam_bw - 32)) ^ val;
+
+ for (int i = 0; i < hsh->banks; i++) {
+ result[i] = (unsigned int)(val & hsh->cam_records_bw_mask);
+ val = val >> hsh->cam_records_bw;
+ }
+ return res;
+}
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records)
+{
+ hsh->banks = banks;
+ hsh->cam_records_bw = (int)(log2(nb_records - 1) + 1);
+ hsh->cam_records_bw_mask = (1U << hsh->cam_records_bw) - 1;
+ hsh->cam_bw = hsh->banks * hsh->cam_records_bw;
+
+#ifdef TESTING
+ int res = hash_test(hsh, _banks, (int)log2(nb_records - 1) + 1);
+
+ if (res)
+ printf("ERROR: testing hasher\n");
+#endif
+
+ return 0;
+}
+
+#ifdef TESTING
+int hash_test(struct hasher_s *hsh, int banks, int record_bw)
+{
+ int res = 0;
+ int val[10], resval[10];
+ uint32_t bits = 0;
+
+ uint32_t inval[16] = { 0xaaaabbbb, 0xccccdddd, 0xeeeeffff, 0x88881111 };
+ const uint32_t result = 0xACECAE65;
+
+ for (int i = 0; i < 16; i++)
+ printf("%08x,", inval[i]);
+ printf("\nbanks %i, records bit width: %i\n", banks, record_bw);
+
+ uint32_t ret = gethash(hsh, inval, val);
+
+ printf("Return VAL = %08X == %08X\n", ret, result);
+ res += (ret != result) ? 1 : 0;
+
+ int shft = (banks * record_bw) - 32;
+ int mask = (1 << record_bw) - 1;
+
+ if (shft > 0) {
+ bits = (ret >> (32 - shft));
+ ret ^= ret << shft;
+ }
+
+ resval[0] = ret & mask;
+ ret >>= record_bw;
+ resval[1] = ret & mask;
+ ret >>= record_bw;
+ resval[2] = ret & mask;
+ resval[2] |= (bits << (record_bw - shft));
+
+ for (int i = 0; i < 3; i++) {
+ printf("HASH %i: %i == %i\n", i, val[i], resval[i]);
+ res += (val[i] != resval[i]) ? 1 : 0;
+ }
+
+ return res;
+}
+#endif
new file mode 100644
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _FLOW_HASHER_H_
+#define _FLOW_HASHER_H_
+
+#include <stdint.h>
+
+struct hasher_s {
+ int banks;
+ int cam_records_bw;
+ uint32_t cam_records_bw_mask;
+ int cam_bw;
+};
+
+int init_hasher(struct hasher_s *hsh, int banks, int nb_records);
+uint32_t gethash(struct hasher_s *hsh, const uint32_t key[16], int *result);
+
+#endif /* _FLOW_HASHER_H_ */
new file mode 100644
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+/*
+ * KCC-CAM structures and defines
+ */
+struct kcc_cam_distrib_s {
+ struct kcc_flow_def_s *kcc_owner;
+ int ref_cnt;
+};
+
+#define BE_CAM_KCC_DIST_IDX(bnk) \
+ ({ \
+ int _temp_bnk = (bnk); \
+ (_temp_bnk * kcc->be->cat.kcc_records + kcc->record_indexes[_temp_bnk]); \
+ })
+
+
+#define BE_CAM_ENTRIES \
+ (kcc->be->cat.kcc_size * sizeof(struct kcc_cam_distrib_s))
+#define BE_UNIQUE_IDS_SIZE ((1U << kcc->be->cat.kcc_id_bit_size) / 8)
+
+#define KCC_CUCKOO_MOVE_MAX_DEPTH 8
+static int kcc_cam_addr_reserved_stack[KCC_CUCKOO_MOVE_MAX_DEPTH];
+
+void kcc_attach_ndev_resource_management(struct kcc_flow_def_s *kcc,
+ void **handle)
+{
+ /*
+ * KCC entries occupied in CAM - to manage the cuckoo shuffling
+ * and manage CAM population and usage
+ */
+ if (!*handle) {
+ *handle = calloc(1, BE_CAM_ENTRIES + sizeof(uint32_t) +
+ BE_UNIQUE_IDS_SIZE +
+ sizeof(struct hasher_s));
+ NT_LOG(DBG, FILTER,
+ "Allocate NIC DEV KCC-CAM record manager\n");
+ }
+ kcc->cam_dist = (struct kcc_cam_distrib_s *)*handle;
+ kcc->cuckoo_moves =
+ (uint32_t *)((char *)kcc->cam_dist + BE_CAM_ENTRIES);
+ kcc->kcc_unique_ids = (uint8_t *)((char *)kcc->cam_dist +
+ BE_CAM_ENTRIES + sizeof(uint32_t));
+
+ kcc->hsh = (struct hasher_s *)((char *)kcc->kcc_unique_ids +
+ BE_UNIQUE_IDS_SIZE);
+ init_hasher(kcc->hsh, kcc->be->cat.kcc_banks, kcc->be->cat.kcc_records);
+}
+
+void kcc_free_ndev_resource_management(void **handle)
+{
+ if (*handle) {
+ free(*handle);
+ NT_LOG(DBG, FILTER, "Free NIC DEV KCC-CAM record manager\n");
+ }
+ *handle = NULL;
+}
+
+/*
+ * Key for KCC CAM
+ */
+int kcc_key_add_no_sideband(struct kcc_flow_def_s *kcc)
+{
+ kcc->key.sb_data = 0xffffffff;
+ kcc->key.sb_type = 0;
+ return 0;
+}
+
+int kcc_key_add_vlan(struct kcc_flow_def_s *kcc, uint16_t tpid, uint16_t vid)
+{
+ kcc->key.sb_data = ((uint32_t)tpid << 16) | (vid & 0x0fff);
+ kcc->key.sb_type = 1;
+ return 0;
+}
+
+int kcc_key_add_vxlan(struct kcc_flow_def_s *kcc, uint32_t vni)
+{
+ kcc->key.sb_data = (vni & 0x00ffffff) | 0x02000000;
+ kcc->key.sb_type = 2;
+ return 0;
+}
+
+int kcc_key_add_port(struct kcc_flow_def_s *kcc, uint16_t port)
+{
+ kcc->key.port = port;
+ return 0;
+}
+
+int kcc_key_add_cat_cfn(struct kcc_flow_def_s *kcc, uint8_t cat_cfn)
+{
+ kcc->key.cat_cfn = cat_cfn;
+ return 0;
+}
+
+uint8_t kcc_key_get_cat_cfn(struct kcc_flow_def_s *kcc)
+{
+ return kcc->key.cat_cfn;
+}
+
+/*
+ * other settings for KCC CAM
+ */
+int kcc_add_km_category(struct kcc_flow_def_s *kcc, uint32_t category)
+{
+ kcc->km_category = category;
+ return 0;
+}
+
+int kcc_alloc_unique_id(struct kcc_flow_def_s *kcc)
+{
+ uint32_t i, ii;
+ /* search a free unique ID in allocation bitmap */
+ for (i = 0; i < BE_UNIQUE_IDS_SIZE; i++)
+ if (kcc->kcc_unique_ids[i] != 0xff)
+ break;
+
+ if (i == BE_UNIQUE_IDS_SIZE)
+ return -1;
+
+ for (ii = 0; ii < 8; ii++) {
+ if ((kcc->kcc_unique_ids[i] & (uint8_t)(1U << ii)) == 0) {
+ kcc->kcc_unique_ids[i] =
+ (uint8_t)(kcc->kcc_unique_ids[i] |
+ (uint8_t)(1U << ii));
+ kcc->id = (uint16_t)(i * 8 + ii);
+ NT_LOG(DBG, FILTER, "Allocate new KCC ID : %i\n",
+ kcc->id);
+ return (int)kcc->id;
+ }
+ }
+ return -1;
+}
+
+void kcc_free_unique_id(struct kcc_flow_def_s *kcc)
+{
+ if (kcc->id == KCC_ID_INVALID)
+ return;
+
+ uint32_t idx = kcc->id >> 3;
+ uint8_t shft = (uint8_t)(kcc->id & 7);
+
+ assert(idx < BE_UNIQUE_IDS_SIZE);
+ if (idx < BE_UNIQUE_IDS_SIZE) {
+ assert(kcc->kcc_unique_ids[idx] & (uint8_t)(1 << shft));
+ kcc->kcc_unique_ids[idx] &= (uint8_t)~(1 << shft);
+ NT_LOG(DBG, FILTER, "Free KCC ID : %i\n", kcc->id);
+ kcc->id = KCC_ID_INVALID;
+ }
+}
+
+int kcc_key_compare(struct kcc_flow_def_s *kcc, struct kcc_flow_def_s *kcc1)
+{
+ if (kcc->key64 == kcc1->key64)
+ return 1;
+ return 0;
+}
+
+static int kcc_cam_populate(struct kcc_flow_def_s *kcc, int bank)
+{
+ int res;
+ int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+ res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0,
+ kcc->key32[0]);
+ if (res)
+ return -1;
+ res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1,
+ kcc->key32[1]);
+ if (res)
+ return -1;
+ res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0,
+ kcc->km_category);
+ if (res)
+ return -1;
+ res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, kcc->id);
+ if (res)
+ return -1;
+ res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+ kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = kcc;
+ kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 1;
+ return res;
+}
+
+static int kcc_cam_reset_entry(struct kcc_flow_def_s *kcc, int bank)
+{
+ int res = 0;
+ int idx = bank * kcc->be->cat.kcc_records + kcc->record_indexes[bank];
+
+ res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 0, 0);
+ if (res)
+ return -1;
+ res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_KEY, idx, 1, 0);
+ if (res)
+ return -1;
+ res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_CATEGORY, idx, 0, 0);
+ if (res)
+ return -1;
+ res = hw_mod_cat_kcc_set(kcc->be, HW_CAT_KCC_ID, idx, 0, 0);
+ if (res)
+ return -1;
+ res = hw_mod_cat_kcc_flush(kcc->be, idx, 1);
+
+ kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner = NULL;
+ kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].ref_cnt = 0;
+
+ kcc->key64 = 0UL;
+ kcc->km_category = 0;
+ /* "kcc->id" holds an allocated unique id, so cleared/freed later */
+ return res;
+}
+
+static int kcc_move_cuckoo_index(struct kcc_flow_def_s *kcc)
+{
+ assert(kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)].kcc_owner);
+
+ for (uint32_t bank = 0; bank < kcc->be->cat.kcc_banks; bank++) {
+ /* It will not select itself */
+ if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(bank)].kcc_owner ==
+ NULL) {
+ /*
+ * Populate in new position
+ */
+ int res = kcc_cam_populate(kcc, bank);
+
+ if (res) {
+ NT_LOG(DBG, FILTER,
+ "Error: failed to write to KM CAM in cuckoo move\n");
+ return 0;
+ }
+
+ /*
+ * Reset/free entry in old bank
+ * HW flushes are really not needed, the old addresses are always taken over
+ * by the caller If you change this code in future updates, this may no
+ * longer be true then!
+ */
+ kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)]
+ .kcc_owner = NULL;
+ NT_LOG(DBG, FILTER,
+ "KCC Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+ kcc->bank_used, bank,
+ BE_CAM_KCC_DIST_IDX(kcc->bank_used),
+ BE_CAM_KCC_DIST_IDX(bank));
+
+ kcc->bank_used = bank;
+ (*kcc->cuckoo_moves)++;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int kcc_move_cuckoo_index_level(struct kcc_flow_def_s *kcc_parent,
+ int bank_idx, int levels,
+ int cam_adr_list_len)
+{
+ struct kcc_flow_def_s *kcc = kcc_parent->cam_dist[bank_idx].kcc_owner;
+
+ assert(levels <= KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+ if (kcc_move_cuckoo_index(kcc))
+ return 1;
+ if (levels <= 1)
+ return 0;
+
+ assert(cam_adr_list_len < KCC_CUCKOO_MOVE_MAX_DEPTH);
+
+ kcc_cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+ for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++) {
+ int reserved = 0;
+ int new_idx = BE_CAM_KCC_DIST_IDX(i);
+
+ for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+ i_reserved++) {
+ if (kcc_cam_addr_reserved_stack[i_reserved] ==
+ new_idx) {
+ reserved = 1;
+ break;
+ }
+ }
+ if (reserved)
+ continue;
+
+ int res = kcc_move_cuckoo_index_level(kcc, new_idx, levels - 1,
+ cam_adr_list_len);
+ if (res) {
+ if (kcc_move_cuckoo_index(kcc))
+ return 1;
+
+ else
+ assert(0);
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t kcc_hsh_key[16];
+
+static int kcc_write_data_to_cam(struct kcc_flow_def_s *kcc)
+{
+ int res = 0;
+ int val[MAX_BANKS];
+
+ kcc_hsh_key[0] = kcc->key32[1];
+ kcc_hsh_key[1] = kcc->key32[0];
+ NT_LOG(DBG, FILTER, "TEMP TEMP TEMP");
+ NT_LOG(DBG, FILTER, "Hash key[0] %08x", kcc_hsh_key[0]);
+ NT_LOG(DBG, FILTER, "Hash key[1] %08x", kcc_hsh_key[1]);
+ NT_LOG(DBG, FILTER, "TEMP TEMP TEMP - swapped");
+
+ /* 2-15 never changed - remains zero */
+
+ gethash(kcc->hsh, kcc_hsh_key, val);
+
+ for (uint32_t i = 0; i < kcc->be->cat.kcc_banks; i++)
+ kcc->record_indexes[i] = val[i];
+ NT_LOG(DBG, FILTER, "KCC HASH [%03X, %03X, %03X]\n",
+ kcc->record_indexes[0], kcc->record_indexes[1],
+ kcc->record_indexes[2]);
+
+ int bank = -1;
+ /*
+ * first step, see if any of the banks are free
+ */
+ for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks; i_bank++) {
+ if (kcc->cam_dist[BE_CAM_KCC_DIST_IDX(i_bank)].kcc_owner ==
+ NULL) {
+ bank = i_bank;
+ break;
+ }
+ }
+
+ if (bank < 0) {
+ /*
+ * Second step - cuckoo move existing flows if possible
+ */
+ for (uint32_t i_bank = 0; i_bank < kcc->be->cat.kcc_banks;
+ i_bank++) {
+ if (kcc_move_cuckoo_index_level(kcc,
+ BE_CAM_KCC_DIST_IDX(i_bank),
+ 4, 0)) {
+ bank = i_bank;
+ break;
+ }
+ }
+
+ if (bank < 0)
+ return -1;
+ }
+
+ /* populate CAM */
+ NT_LOG(DBG, FILTER, "KCC Bank = %i (addr %04X)\n", bank,
+ BE_CAM_KCC_DIST_IDX(bank));
+ res = kcc_cam_populate(kcc, bank);
+ if (res == 0) {
+ kcc->flushed_to_target = 1;
+ kcc->bank_used = bank;
+ } else {
+ NT_LOG(DBG, FILTER, "KCC CAM populate failed\n");
+ }
+ return res;
+}
+
+int kcc_write_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+ int res = -1;
+
+ NT_LOG(DBG, FILTER,
+ "KCC Write Data entry. Create New Key: %016lx, KM category %i, id %i\n",
+ kcc->key64, kcc->km_category, kcc->id);
+ res = kcc_write_data_to_cam(kcc);
+ return res;
+}
+
+static int kcc_clear_data_match_entry(struct kcc_flow_def_s *kcc)
+{
+ int res = 0;
+
+ if (kcc->flushed_to_target) {
+ res = kcc_cam_reset_entry(kcc, kcc->bank_used);
+ kcc->flushed_to_target = 0;
+ kcc->bank_used = 0;
+ }
+ return res;
+}
+
+int kcc_key_ref_count_add(struct kcc_flow_def_s *kcc)
+{
+ assert(kcc->bank_used >= 0 &&
+ kcc->bank_used < (int)kcc->be->cat.kcc_banks);
+
+ struct kcc_cam_distrib_s *cam_entry =
+ &kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+ NT_LOG(DBG, FILTER,
+ "KCC ADD Ref existing Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+ kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt + 1);
+ return ++cam_entry->ref_cnt;
+}
+
+int kcc_key_ref_count_dec(struct kcc_flow_def_s *kcc)
+{
+ if (kcc->bank_used < 0 || kcc->bank_used >= (int)kcc->be->cat.kcc_banks)
+ return -1;
+
+ struct kcc_cam_distrib_s *cam_entry =
+ &kcc->cam_dist[BE_CAM_KCC_DIST_IDX(kcc->bank_used)];
+
+ if (cam_entry->ref_cnt) {
+ if (--cam_entry->ref_cnt == 0) {
+ kcc_clear_data_match_entry(kcc);
+ NT_LOG(DBG, FILTER,
+ "KCC DEC Ref on Key became zero - Delete\n");
+ }
+ }
+
+ NT_LOG(DBG, FILTER,
+ "KCC DEC Ref on Key: %016lx, KM category %i, id %i (new ref count %i)\n",
+ kcc->key64, kcc->km_category, kcc->id, cam_entry->ref_cnt);
+ return cam_entry->ref_cnt;
+}
new file mode 100644
@@ -0,0 +1,1434 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+#include "flow_hasher.h"
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct cam_match_masks_s {
+ uint32_t word_len;
+ uint32_t key_mask[4];
+} cam_masks[] = {
+ { 4,
+ { 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff
+ }
+ }, /* IP6_SRC, IP6_DST */
+ { 4,
+ { 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000
+ }
+ }, /* DMAC,SMAC,ethtype */
+ { 4,
+ { 0xffffffff, 0xffff0000, 0x00000000,
+ 0xffff0000
+ }
+ }, /* DMAC,ethtype */
+ { 4,
+ { 0x00000000, 0x0000ffff, 0xffffffff,
+ 0xffff0000
+ }
+ }, /* SMAC,ethtype */
+ { 4, { 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000 } }, /* ETH_128 */
+ { 2,
+ { 0xffffffff, 0xffffffff, 0x00000000,
+ 0x00000000
+ }
+ }, /* IP4_COMBINED */
+ { 1, { 0xffffffff, 0x00000000, 0x00000000, 0x00000000 } },
+ /*
+ * ETH_TYPE, IP4_TTL_PROTO, IP4_SRC, IP4_DST, IP6_FLOW_TC,
+ * IP6_NEXT_HDR_HOP, TP_PORT_COMBINED, SIDEBAND_VNI
+ */
+ { 1,
+ { 0xffff0000, 0x00000000, 0x00000000,
+ 0x00000000
+ }
+ }, /* IP4_IHL_TOS, TP_PORT_SRC32_OR_ICMP, TCP_CTRL */
+ { 1,
+ { 0x0000ffff, 0x00000000, 0x00000000,
+ 0x00000000
+ }
+ }, /* TP_PORT_DST32 */
+ { 1,
+ { 0x00030000, 0x00000000, 0x00000000,
+ 0x00000000
+ }
+ }, /* IPv4 TOS mask bits used often by OVS */
+ { 1,
+ { 0x00300000, 0x00000000, 0x00000000,
+ 0x00000000
+ }
+ }, /* IPv6 TOS mask bits used often by OVS */
+};
+
+#define NUM_CAM_MASKS (ARRAY_SIZE(cam_masks))
+
+/*
+ * CAM structures and defines
+ */
+struct cam_distrib_s {
+ struct km_flow_def_s *km_owner;
+};
+
+#define CAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_cam_records + (rec))
+#define CAM_KM_DIST_IDX(bnk) \
+ ({ \
+ int _temp_bnk = (bnk); \
+ CAM_DIST_IDX(_temp_bnk, km->record_indexes[_temp_bnk]); \
+ })
+
+#define CUCKOO_MOVE_MAX_DEPTH 8
+static int cam_addr_reserved_stack[CUCKOO_MOVE_MAX_DEPTH];
+
+/*
+ * TCAM structures and defines
+ */
+struct tcam_distrib_s {
+ struct km_flow_def_s *km_owner;
+};
+
+#define TCAM_DIST_IDX(bnk, rec) ((bnk) * km->be->km.nb_tcam_bank_width + (rec))
+
+static int tcam_find_mapping(struct km_flow_def_s *km);
+
+void km_attach_ndev_resource_management(struct km_flow_def_s *km, void **handle)
+{
+#define CAM_ENTRIES \
+ (km->be->km.nb_cam_banks * km->be->km.nb_cam_records * \
+ sizeof(struct cam_distrib_s))
+#define TCAM_ENTRIES \
+ (km->be->km.nb_tcam_bank_width * km->be->km.nb_tcam_banks * \
+ sizeof(struct tcam_distrib_s))
+ /*
+ * KM entries occupied in CAM - to manage the cuckoo shuffling
+ * and manage CAM population and usage
+ * KM entries occupied in TCAM - to manage population and usage
+ */
+ if (!*handle) {
+ *handle = calloc(1, (size_t)CAM_ENTRIES + sizeof(uint32_t) +
+ (size_t)TCAM_ENTRIES +
+ sizeof(struct hasher_s));
+ NT_LOG(DBG, FILTER,
+ "Allocate NIC DEV CAM and TCAM record manager\n");
+ }
+ km->cam_dist = (struct cam_distrib_s *)*handle;
+ km->cuckoo_moves = (uint32_t *)((char *)km->cam_dist + CAM_ENTRIES);
+ km->tcam_dist =
+ (struct tcam_distrib_s *)((char *)km->cam_dist + CAM_ENTRIES +
+ sizeof(uint32_t));
+
+ km->hsh = (struct hasher_s *)((char *)km->tcam_dist + TCAM_ENTRIES);
+ init_hasher(km->hsh, km->be->km.nb_cam_banks,
+ km->be->km.nb_cam_records);
+}
+
+void km_free_ndev_resource_management(void **handle)
+{
+ if (*handle) {
+ free(*handle);
+ NT_LOG(DBG, FILTER,
+ "Free NIC DEV CAM and TCAM record manager\n");
+ }
+ *handle = NULL;
+}
+
+int km_add_match_elem(struct km_flow_def_s *km, uint32_t e_word[4],
+ uint32_t e_mask[4], uint32_t word_len,
+ enum frame_offs_e start_id, int8_t offset)
+{
+#ifdef FLOW_DEBUG
+ char *s = ntlog_helper_str_alloc("MATCH: ");
+
+ for (unsigned int i = 0; i < word_len; i++)
+ ntlog_helper_str_add(s, "%08x, ", e_word[i]);
+ NT_LOG(DBG, FILTER, "%s", s);
+ ntlog_helper_str_reset(s, "MASK : ");
+ for (unsigned int i = 0; i < word_len; i++)
+ ntlog_helper_str_add(s, "%08x, ", e_mask[i]);
+ NT_LOG(DBG, FILTER, "%s", s);
+ ntlog_helper_str_free(s);
+#endif
+
+ /* valid word_len 1,2,4 */
+ if (word_len == 3) {
+ word_len = 4;
+ e_word[3] = 0;
+ e_mask[3] = 0;
+ }
+ if (word_len < 1 || word_len > 4) {
+ assert(0);
+ return -1;
+ }
+
+ for (unsigned int i = 0; i < word_len; i++) {
+ km->match[km->num_ftype_elem].e_word[i] = e_word[i];
+ km->match[km->num_ftype_elem].e_mask[i] = e_mask[i];
+ }
+
+ km->match[km->num_ftype_elem].word_len = word_len;
+ km->match[km->num_ftype_elem].rel_offs = offset;
+ km->match[km->num_ftype_elem].extr_start_offs_id = start_id;
+
+ /*
+ * Determine here if this flow may better be put into TCAM
+ * Otherwise it will go into CAM
+ * This is dependent on a cam_masks list defined above
+ */
+ km->match[km->num_ftype_elem].masked_for_tcam = 1;
+ for (unsigned int msk = 0; msk < NUM_CAM_MASKS; msk++) {
+ if (word_len == cam_masks[msk].word_len) {
+ int match = 1;
+
+ for (unsigned int wd = 0; wd < word_len; wd++) {
+ if (e_mask[wd] != cam_masks[msk].key_mask[wd]) {
+ match = 0;
+ break;
+ }
+ }
+ if (match) {
+ /* Can go into CAM */
+ km->match[km->num_ftype_elem].masked_for_tcam =
+ 0;
+ }
+ }
+ }
+
+ km->num_ftype_elem++;
+ return 0;
+}
+
+void km_set_info(struct km_flow_def_s *km, int on)
+{
+ km->info_set = !!on;
+}
+
+static int get_word(struct km_flow_def_s *km, uint32_t size, int marked[])
+{
+ for (int i = 0; i < km->num_ftype_elem; i++) {
+ if (!marked[i] &&
+ !(km->match[i].extr_start_offs_id & SWX_INFO) &&
+ km->match[i].word_len == size)
+ return i;
+ }
+ return -1;
+}
+
+#ifdef FLOW_DEBUG
+static const char *get_prot_offset_descr(int idx)
+{
+ switch (idx) {
+ case DYN_SOF:
+ return "SOF";
+ case DYN_L2:
+ return "L2 header";
+ case DYN_FIRST_VLAN:
+ return "First VLAN";
+ case DYN_MPLS:
+ return "First MPLS";
+ case DYN_L3:
+ return "L3 header";
+ case DYN_ID_IPV4_6:
+ return "ID field IPv4/6";
+ case DYN_FINAL_IP_DST:
+ return "Final IP dest";
+ case DYN_L4:
+ return "L4 header";
+ case DYN_L4_PAYLOAD:
+ return "L4 payload";
+ case DYN_TUN_PAYLOAD:
+ return "Tunnel payload";
+ case DYN_TUN_L2:
+ return "Tunnel L2 header";
+ case DYN_TUN_VLAN:
+ return "First tunneled VLAN";
+ case DYN_TUN_MPLS:
+ return "First tunneled MPLS";
+ case DYN_TUN_L3:
+ return "Tunnel L3 header";
+ case DYN_TUN_ID_IPV4_6:
+ return "Tunnel ID field IPv4/6";
+ case DYN_TUN_FINAL_IP_DST:
+ return "Tunnel final IP dest";
+ case DYN_TUN_L4:
+ return "Tunnel L4 header";
+ case DYN_TUN_L4_PAYLOAD:
+ return "Tunnel L4 payload";
+ case SB_VNI:
+ return "VNI";
+ case SB_MAC_PORT:
+ return "In Port";
+ case SB_KCC_ID:
+ return "KCC ID";
+ default:
+ break;
+ }
+ return "<unknown>";
+}
+#endif
+
+#define MAX_QWORDS 2
+#define MAX_SWORDS 2
+
+int km_key_create(struct km_flow_def_s *km, uint32_t port_id)
+{
+ /*
+ * Create combined extractor mappings
+ * if key fields may be changed to cover un-mappable otherwise?
+ * split into cam and tcam and use synergy mode when available
+ *
+ */
+ int match_marked[MAX_MATCH_FIELDS];
+ int idx = 0;
+ int next = 0;
+ int m_idx;
+ int size;
+
+ memset(match_marked, 0, sizeof(match_marked));
+
+ /* build QWords */
+ for (int qwords = 0; qwords < MAX_QWORDS; qwords++) {
+ size = 4;
+ m_idx = get_word(km, size, match_marked);
+ if (m_idx < 0) {
+ size = 2;
+ m_idx = get_word(km, size, match_marked);
+
+ if (m_idx < 0) {
+ size = 1;
+ m_idx = get_word(km, 1, match_marked);
+ }
+ }
+ if (m_idx < 0) {
+ /* no more defined */
+ break;
+ }
+
+ match_marked[m_idx] = 1;
+
+ /* build match map list and set final extractor to use */
+ km->match_map[next] = &km->match[m_idx];
+ km->match[m_idx].extr = KM_USE_EXTRACTOR_QWORD;
+
+ /* build final entry words and mask array */
+ for (int i = 0; i < size; i++) {
+ km->entry_word[idx + i] = km->match[m_idx].e_word[i];
+ km->entry_mask[idx + i] = km->match[m_idx].e_mask[i];
+ }
+
+ idx += size;
+ next++;
+ }
+
+ m_idx = get_word(km, 4, match_marked);
+ if (m_idx >= 0) {
+ /* cannot match more QWords */
+ return -1;
+ }
+
+ /*
+ * On km v6+ we have DWORDs here instead. However, we only use them as SWORDs for now
+ * No match would be able to exploit these as DWORDs because of maximum length of 12 words
+ * in CAM
+ * The last 2 words are taken by KCC-ID/SWX and Color. You could have one or none QWORDs
+ * where then both these DWORDs were possible in 10 words, but we don't have such use case
+ * built in yet
+ */
+ /* build SWords */
+ for (int swords = 0; swords < MAX_SWORDS; swords++) {
+ m_idx = get_word(km, 1, match_marked);
+ if (m_idx < 0) {
+ /* no more defined */
+ break;
+ }
+
+ match_marked[m_idx] = 1;
+ /* build match map list and set final extractor to use */
+ km->match_map[next] = &km->match[m_idx];
+ km->match[m_idx].extr = KM_USE_EXTRACTOR_SWORD;
+
+ /* build final entry words and mask array */
+ km->entry_word[idx] = km->match[m_idx].e_word[0];
+ km->entry_mask[idx] = km->match[m_idx].e_mask[0];
+ idx++;
+ next++;
+ }
+
+ /*
+ * Make sure we took them all
+ */
+ m_idx = get_word(km, 1, match_marked);
+ if (m_idx >= 0) {
+ /* cannot match more SWords */
+ return -1;
+ }
+
+ /*
+ * Handle SWX words specially
+ */
+ int swx_found = 0;
+
+ for (int i = 0; i < km->num_ftype_elem; i++) {
+ if (km->match[i].extr_start_offs_id & SWX_INFO) {
+ km->match_map[next] = &km->match[i];
+ km->match[i].extr = KM_USE_EXTRACTOR_SWORD;
+ /* build final entry words and mask array */
+ km->entry_word[idx] = km->match[i].e_word[0];
+ km->entry_mask[idx] = km->match[i].e_mask[0];
+ idx++;
+ next++;
+ swx_found = 1;
+ }
+ }
+
+ assert(next == km->num_ftype_elem);
+
+ km->key_word_size = idx;
+ km->port_id = port_id;
+
+ km->target = KM_CAM;
+ /*
+ * Finally decide if we want to put this match->action into the TCAM
+ * When SWX word used we need to put it into CAM always, no matter what mask pattern
+ * Later, when synergy mode is applied, we can do a split
+ */
+ if (!swx_found && km->key_word_size <= 6) {
+ for (int i = 0; i < km->num_ftype_elem; i++) {
+ if (km->match_map[i]->masked_for_tcam) {
+ /* At least one */
+ km->target = KM_TCAM;
+ }
+ }
+ }
+
+ NT_LOG(DBG, FILTER, "This flow goes into %s\n",
+ (km->target == KM_TCAM) ? "TCAM" : "CAM");
+
+ if (km->target == KM_TCAM) {
+ if (km->key_word_size > 10) {
+ /* do not support SWX in TCAM */
+ return -1;
+ }
+ /*
+ * adjust for unsupported key word size in TCAM
+ */
+ if ((km->key_word_size == 5 || km->key_word_size == 7 ||
+ km->key_word_size == 9)) {
+ km->entry_mask[km->key_word_size] = 0;
+ km->key_word_size++;
+ }
+
+ /*
+ * 1. the fact that the length of a key cannot change among the same used banks
+ *
+ * calculate possible start indexes
+ * unfortunately restrictions in TCAM lookup
+ * makes it hard to handle key lengths larger than 6
+ * when other sizes should be possible too
+ */
+ switch (km->key_word_size) {
+ case 1:
+ for (int i = 0; i < 4; i++)
+ km->start_offsets[0] = 8 + i;
+ km->num_start_offsets = 4;
+ break;
+ case 2:
+ km->start_offsets[0] = 6;
+ km->num_start_offsets = 1;
+ break;
+ case 3:
+ km->start_offsets[0] = 0;
+ km->num_start_offsets = 1;
+ /* enlarge to 6 */
+ km->entry_mask[km->key_word_size++] = 0;
+ km->entry_mask[km->key_word_size++] = 0;
+ km->entry_mask[km->key_word_size++] = 0;
+ break;
+ case 4:
+ km->start_offsets[0] = 0;
+ km->num_start_offsets = 1;
+ /* enlarge to 6 */
+ km->entry_mask[km->key_word_size++] = 0;
+ km->entry_mask[km->key_word_size++] = 0;
+ break;
+ case 6:
+ km->start_offsets[0] = 0;
+ km->num_start_offsets = 1;
+ break;
+
+ default:
+ NT_LOG(DBG, FILTER,
+ "Final Key word size too large: %i\n",
+ km->key_word_size);
+ return -1;
+ }
+
+#ifdef FLOW_DEBUG
+ char *s = ntlog_helper_str_alloc("TCAM offs: ");
+
+ for (int i = 0; i < km->num_start_offsets; i++)
+ ntlog_helper_str_add(s, "%i,", km->start_offsets[i]);
+ NT_LOG(DBG, FILTER, "%s", s);
+ ntlog_helper_str_free(s);
+#endif
+ }
+
+#ifdef FLOW_DEBUG
+ for (int i = 0; i < km->num_ftype_elem; i++) {
+ NT_LOG(DBG, FILTER,
+ "size %i -> Extr: %s, offset id: %s, rel offset: %i\n",
+ km->match_map[i]->word_len,
+ (km->match_map[i]->extr_start_offs_id & SWX_INFO) ?
+ "SIDEBAND" :
+ km->match_map[i]->extr == KM_USE_EXTRACTOR_SWORD ?
+ "SWORD" :
+ "QWORD",
+ get_prot_offset_descr(km->match_map[i]->extr_start_offs_id),
+ km->match_map[i]->rel_offs);
+ }
+ char *s = ntlog_helper_str_alloc("");
+
+ for (int i = 0; i < km->key_word_size; i++)
+ ntlog_helper_str_add(s, "%08x,", km->entry_word[i]);
+
+ NT_LOG(DBG, FILTER, "%s", s);
+
+ ntlog_helper_str_reset(s, "");
+ for (int i = 0; i < km->key_word_size; i++)
+ ntlog_helper_str_add(s, "%08x,", km->entry_mask[i]);
+
+ NT_LOG(DBG, FILTER, "%s", s);
+ ntlog_helper_str_free(s);
+#endif
+
+ return 0;
+}
+
+int km_key_compare(struct km_flow_def_s *km, struct km_flow_def_s *km1)
+{
+ if (km->target != km1->target ||
+ km->num_ftype_elem != km1->num_ftype_elem ||
+ km->key_word_size != km1->key_word_size ||
+ km->info_set != km1->info_set)
+ return 0;
+
+ /*
+ * before KCC-CAM:
+ * if port is added to match, then we can have different ports in CAT
+ * that reuses this flow type
+ */
+ int port_match_included = 0, kcc_swx_used = 0;
+
+ for (int i = 0; i < km->num_ftype_elem; i++) {
+ if (km->match[i].extr_start_offs_id == SB_MAC_PORT) {
+ port_match_included = 1;
+ break;
+ } else if (km->match_map[i]->extr_start_offs_id == SB_KCC_ID) {
+ kcc_swx_used = 1;
+ break;
+ }
+ }
+
+ /*
+ * If not using KCC and if port match is not included in CAM,
+ * we need to have same port_id to reuse
+ */
+ if (!kcc_swx_used && !port_match_included &&
+ km->port_id != km1->port_id)
+ return 0;
+
+ for (int i = 0; i < km->num_ftype_elem; i++) {
+ /* using same extractor types in same sequence */
+ if (km->match_map[i]->extr_start_offs_id != km1->match_map[i]->extr_start_offs_id ||
+ km->match_map[i]->rel_offs != km1->match_map[i]->rel_offs ||
+ km->match_map[i]->extr != km1->match_map[i]->extr ||
+ km->match_map[i]->word_len != km1->match_map[i]->word_len)
+ return 0;
+ }
+
+ if (km->target == KM_CAM) {
+ /* in CAM must exactly match on all masks */
+ for (int i = 0; i < km->key_word_size; i++) {
+ if (km->entry_mask[i] != km1->entry_mask[i])
+ return 0;
+ }
+
+ /* Would be set later if not reusing from km1 */
+ km->cam_paired = km1->cam_paired;
+ } else if (km->target == KM_TCAM) {
+ /*
+ * If TCAM, we must make sure Recipe Key Mask does not
+ * mask out enable bits in masks
+ * Note: it is important that km1 is the original creator
+ * of the KM Recipe, since it contains its true masks
+ */
+ for (int i = 0; i < km->key_word_size; i++) {
+ if ((km->entry_mask[i] & km1->entry_mask[i]) !=
+ km->entry_mask[i])
+ return 0;
+ }
+
+ km->tcam_start_bank = km1->tcam_start_bank;
+ km->tcam_record = -1; /* needs to be found later */
+ } else {
+ NT_LOG(DBG, FILTER,
+ "ERROR - KM target not defined or supported\n");
+ return 0;
+ }
+
+ /*
+ * Check for a flow clash. If already programmed return with -1
+ */
+ int double_match = 1;
+
+ for (int i = 0; i < km->key_word_size; i++) {
+ if ((km->entry_word[i] & km->entry_mask[i]) !=
+ (km1->entry_word[i] & km1->entry_mask[i])) {
+ double_match = 0;
+ break;
+ }
+ }
+
+ if (double_match)
+ return -1;
+
+ /*
+ * Note that TCAM and CAM may reuse same RCP and flow type
+ * when this happens, CAM entry wins on overlap
+ */
+
+ /* Use same KM Recipe and same flow type - return flow type */
+ return km1->flow_type;
+}
+
+int km_rcp_set(struct km_flow_def_s *km, int index)
+{
+ int qw = 0;
+ int sw = 0;
+ int swx = 0;
+
+ hw_mod_km_rcp_set(km->be, HW_KM_RCP_PRESET_ALL, index, 0, 0);
+
+ /* set extractor words, offs, contrib */
+ for (int i = 0; i < km->num_ftype_elem; i++) {
+ switch (km->match_map[i]->extr) {
+ case KM_USE_EXTRACTOR_SWORD:
+ if (km->match_map[i]->extr_start_offs_id & SWX_INFO) {
+ if (km->target == KM_CAM && swx == 0) {
+ /* SWX */
+ if (km->match_map[i]
+ ->extr_start_offs_id ==
+ SB_VNI) {
+ NT_LOG(DBG, FILTER,
+ "Set KM SWX sel A - VNI\n");
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_SWX_CCH,
+ index, 0, 1);
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_SWX_SEL_A,
+ index, 0,
+ SWX_SEL_ALL32);
+ } else if (km->match_map[i]
+ ->extr_start_offs_id ==
+ SB_MAC_PORT) {
+ NT_LOG(DBG, FILTER,
+ "Set KM SWX sel A - PTC + MAC\n");
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_SWX_SEL_A,
+ index, 0,
+ SWX_SEL_ALL32);
+ } else if (km->match_map[i]
+ ->extr_start_offs_id ==
+ SB_KCC_ID) {
+ NT_LOG(DBG, FILTER,
+ "Set KM SWX sel A - KCC ID\n");
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_SWX_CCH,
+ index, 0, 1);
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_SWX_SEL_A,
+ index, 0,
+ SWX_SEL_ALL32);
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ swx++;
+ } else {
+ if (sw == 0) {
+ /* DW8 */
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_DW8_DYN,
+ index, 0,
+ km->match_map[i]
+ ->extr_start_offs_id);
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_DW8_OFS,
+ index, 0,
+ km->match_map[i]->rel_offs);
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_DW8_SEL_A,
+ index, 0,
+ DW8_SEL_FIRST32);
+ NT_LOG(DBG, FILTER,
+ "Set KM DW8 sel A: dyn: %i, offs: %i\n",
+ km->match_map[i]
+ ->extr_start_offs_id,
+ km->match_map[i]->rel_offs);
+ } else if (sw == 1) {
+ /* DW10 */
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_DW10_DYN,
+ index, 0,
+ km->match_map[i]
+ ->extr_start_offs_id);
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_DW10_OFS,
+ index, 0,
+ km->match_map[i]->rel_offs);
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_DW10_SEL_A,
+ index, 0,
+ DW10_SEL_FIRST32);
+ NT_LOG(DBG, FILTER,
+ "Set KM DW10 sel A: dyn: %i, offs: %i\n",
+ km->match_map[i]
+ ->extr_start_offs_id,
+ km->match_map[i]->rel_offs);
+ } else {
+ return -1;
+ }
+ sw++;
+ }
+ break;
+
+ case KM_USE_EXTRACTOR_QWORD:
+ if (qw == 0) {
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW0_DYN,
+ index, 0,
+ km->match_map[i]->extr_start_offs_id);
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW0_OFS,
+ index, 0,
+ km->match_map[i]->rel_offs);
+ switch (km->match_map[i]->word_len) {
+ case 1:
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW0_SEL_A,
+ index, 0,
+ QW0_SEL_FIRST32);
+ break;
+ case 2:
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW0_SEL_A,
+ index, 0,
+ QW0_SEL_FIRST64);
+ break;
+ case 4:
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW0_SEL_A,
+ index, 0,
+ QW0_SEL_ALL128);
+ break;
+ default:
+ return -1;
+ }
+ NT_LOG(DBG, FILTER,
+ "Set KM QW0 sel A: dyn: %i, offs: %i, size: %i\n",
+ km->match_map[i]->extr_start_offs_id,
+ km->match_map[i]->rel_offs,
+ km->match_map[i]->word_len);
+ } else if (qw == 1) {
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW4_DYN,
+ index, 0,
+ km->match_map[i]->extr_start_offs_id);
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW4_OFS,
+ index, 0,
+ km->match_map[i]->rel_offs);
+ switch (km->match_map[i]->word_len) {
+ case 1:
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW4_SEL_A,
+ index, 0,
+ QW4_SEL_FIRST32);
+ break;
+ case 2:
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW4_SEL_A,
+ index, 0,
+ QW4_SEL_FIRST64);
+ break;
+ case 4:
+ hw_mod_km_rcp_set(km->be,
+ HW_KM_RCP_QW4_SEL_A,
+ index, 0,
+ QW4_SEL_ALL128);
+ break;
+ default:
+ return -1;
+ }
+ NT_LOG(DBG, FILTER,
+ "Set KM QW4 sel A: dyn: %i, offs: %i, size: %i\n",
+ km->match_map[i]->extr_start_offs_id,
+ km->match_map[i]->rel_offs,
+ km->match_map[i]->word_len);
+ } else {
+ return -1;
+ }
+ qw++;
+ break;
+ default:
+ return -1;
+ }
+ }
+
+ /* set mask A */
+ for (int i = 0; i < km->key_word_size; i++) {
+ hw_mod_km_rcp_set(km->be, HW_KM_RCP_MASK_A, index,
+ (km->be->km.nb_km_rcp_mask_a_word_size - 1) -
+ i,
+ km->entry_mask[i]);
+ NT_LOG(DBG, FILTER, "Set KM mask A: %08x\n", km->entry_mask[i]);
+ }
+
+ if (km->target == KM_CAM) {
+ /* set info - Color */
+ if (km->info_set) {
+ hw_mod_km_rcp_set(km->be, HW_KM_RCP_INFO_A, index, 0,
+ 1);
+ NT_LOG(DBG, FILTER, "Set KM info A\n");
+ }
+ /* set key length A */
+ hw_mod_km_rcp_set(km->be, HW_KM_RCP_EL_A, index, 0,
+ km->key_word_size + !!km->info_set -
+ 1); /* select id is -1 */
+ /* set Flow Type for Key A */
+ NT_LOG(DBG, FILTER, "Set KM EL A: %i\n",
+ km->key_word_size + !!km->info_set - 1);
+
+ hw_mod_km_rcp_set(km->be, HW_KM_RCP_FTM_A, index, 0,
+ 1 << km->flow_type);
+
+ NT_LOG(DBG, FILTER, "Set KM FTM A - ft: %i\n", km->flow_type);
+
+ /* Set Paired - only on the CAM part though... */
+ if ((uint32_t)(km->key_word_size + !!km->info_set) >
+ km->be->km.nb_cam_record_words) {
+ hw_mod_km_rcp_set(km->be, HW_KM_RCP_PAIRED,
+ index, 0, 1);
+ NT_LOG(DBG, FILTER, "Set KM CAM Paired\n");
+ km->cam_paired = 1;
+ }
+ } else if (km->target == KM_TCAM) {
+ uint32_t bank_bm = 0;
+
+ if (tcam_find_mapping(km) < 0) {
+ /* failed mapping into TCAM */
+ NT_LOG(DBG, FILTER, "INFO: TCAM mapping flow failed\n");
+ return -1;
+ }
+
+ assert((uint32_t)(km->tcam_start_bank + km->key_word_size) <=
+ km->be->km.nb_tcam_banks);
+
+ for (int i = 0; i < km->key_word_size; i++) {
+ bank_bm |= (1 << (km->be->km.nb_tcam_banks - 1 -
+ (km->tcam_start_bank + i)));
+ }
+
+ /* Set BANK_A */
+ hw_mod_km_rcp_set(km->be, HW_KM_RCP_BANK_A, index, 0, bank_bm);
+ /* Set Kl_A */
+ hw_mod_km_rcp_set(km->be, HW_KM_RCP_KL_A, index, 0,
+ km->key_word_size - 1);
+
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+static int cam_populate(struct km_flow_def_s *km, int bank)
+{
+ int res = 0;
+ int cnt = km->key_word_size + !!km->info_set;
+
+ for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+ i++, cnt--) {
+ res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+ km->record_indexes[bank],
+ km->entry_word[i]);
+ res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+ km->record_indexes[bank],
+ km->flow_type);
+ }
+ km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = km;
+
+ if (cnt) {
+ assert(km->cam_paired);
+ for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+ i++, cnt--) {
+ res |= hw_mod_km_cam_set(km->be,
+ HW_KM_CAM_W0 + i,
+ bank,
+ km->record_indexes[bank] + 1,
+ km->entry_word[km->be->km.nb_cam_record_words +
+ i]);
+ res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+ bank,
+ km->record_indexes[bank] + 1,
+ km->flow_type);
+ }
+ km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = km;
+ }
+
+ res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+ km->cam_paired ? 2 : 1);
+
+ return res;
+}
+
+static int cam_reset_entry(struct km_flow_def_s *km, int bank)
+{
+ int res = 0;
+ int cnt = km->key_word_size + !!km->info_set;
+
+ for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+ i++, cnt--) {
+ res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+ km->record_indexes[bank], 0);
+ res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i, bank,
+ km->record_indexes[bank], 0);
+ }
+ km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner = NULL;
+
+ if (cnt) {
+ assert(km->cam_paired);
+ for (uint32_t i = 0; i < km->be->km.nb_cam_record_words && cnt;
+ i++, cnt--) {
+ res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_W0 + i, bank,
+ km->record_indexes[bank] + 1,
+ 0);
+ res |= hw_mod_km_cam_set(km->be, HW_KM_CAM_FT0 + i,
+ bank,
+ km->record_indexes[bank] + 1,
+ 0);
+ }
+ km->cam_dist[CAM_KM_DIST_IDX(bank) + 1].km_owner = NULL;
+ }
+ res |= hw_mod_km_cam_flush(km->be, bank, km->record_indexes[bank],
+ km->cam_paired ? 2 : 1);
+ return res;
+}
+
+static int move_cuckoo_index(struct km_flow_def_s *km)
+{
+ assert(km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner);
+
+ for (uint32_t bank = 0; bank < km->be->km.nb_cam_banks; bank++) {
+ /* It will not select itself */
+ if (km->cam_dist[CAM_KM_DIST_IDX(bank)].km_owner == NULL) {
+ if (km->cam_paired) {
+ if (km->cam_dist[CAM_KM_DIST_IDX(bank) + 1]
+ .km_owner != NULL)
+ continue;
+ }
+
+ /*
+ * Populate in new position
+ */
+ int res = cam_populate(km, bank);
+
+ if (res) {
+ NT_LOG(DBG, FILTER,
+ "Error: failed to write to KM CAM in cuckoo move\n");
+ return 0;
+ }
+
+ /*
+ * Reset/free entry in old bank
+ * HW flushes are really not needed, the old addresses are always taken over
+ * by the caller
+ * If you change this code in future updates, this may no longer be true
+ * then!
+ */
+ km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+ NULL;
+ if (km->cam_paired)
+ km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+ .km_owner = NULL;
+
+ NT_LOG(DBG, FILTER,
+ "KM Cuckoo hash moved from bank %i to bank %i (%04X => %04X)\n",
+ km->bank_used, bank,
+ CAM_KM_DIST_IDX(km->bank_used),
+ CAM_KM_DIST_IDX(bank));
+ km->bank_used = bank;
+ (*km->cuckoo_moves)++;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int move_cuckoo_index_level(struct km_flow_def_s *km_parent,
+ int bank_idx, int levels,
+ int cam_adr_list_len)
+{
+ struct km_flow_def_s *km = km_parent->cam_dist[bank_idx].km_owner;
+
+ assert(levels <= CUCKOO_MOVE_MAX_DEPTH);
+
+ /*
+ * Only move if same pairness
+ * Can be extended later to handle both move of paired and single entries
+ */
+ if (!km || km_parent->cam_paired != km->cam_paired)
+ return 0;
+
+ if (move_cuckoo_index(km))
+ return 1;
+ if (levels <= 1)
+ return 0;
+
+ assert(cam_adr_list_len < CUCKOO_MOVE_MAX_DEPTH);
+
+ cam_addr_reserved_stack[cam_adr_list_len++] = bank_idx;
+
+ for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+ int reserved = 0;
+ int new_idx = CAM_KM_DIST_IDX(i);
+
+ for (int i_reserved = 0; i_reserved < cam_adr_list_len;
+ i_reserved++) {
+ if (cam_addr_reserved_stack[i_reserved] == new_idx) {
+ reserved = 1;
+ break;
+ }
+ }
+ if (reserved)
+ continue;
+
+ int res = move_cuckoo_index_level(km, new_idx, levels - 1,
+ cam_adr_list_len);
+ if (res) {
+ if (move_cuckoo_index(km))
+ return 1;
+
+ else
+ assert(0);
+ }
+ }
+
+ return 0;
+}
+
+static int km_write_data_to_cam(struct km_flow_def_s *km)
+{
+ int res = 0;
+ int val[MAX_BANKS];
+
+ assert(km->be->km.nb_cam_banks <= MAX_BANKS);
+ assert(km->cam_dist);
+
+ /* word list without info set */
+ gethash(km->hsh, km->entry_word, val);
+
+ for (uint32_t i = 0; i < km->be->km.nb_cam_banks; i++) {
+ /* if paired we start always on an even address - reset bit 0 */
+ km->record_indexes[i] = (km->cam_paired) ? val[i] & ~1 : val[i];
+ }
+ NT_LOG(DBG, FILTER, "KM HASH [%03X, %03X, %03X]\n",
+ km->record_indexes[0], km->record_indexes[1],
+ km->record_indexes[2]);
+
+ if (km->info_set) {
+ km->entry_word[km->key_word_size] =
+ km->info; /* finally set info */
+ }
+
+ int bank = -1;
+ /*
+ * first step, see if any of the banks are free
+ */
+ for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks; i_bank++) {
+ if (km->cam_dist[CAM_KM_DIST_IDX(i_bank)].km_owner == NULL) {
+ if (km->cam_paired == 0 ||
+ (km->cam_dist[CAM_KM_DIST_IDX(i_bank) + 1]
+ .km_owner == NULL)) {
+ bank = i_bank;
+ break;
+ }
+ }
+ }
+
+ if (bank < 0) {
+ /*
+ * Second step - cuckoo move existing flows if possible
+ */
+ for (uint32_t i_bank = 0; i_bank < km->be->km.nb_cam_banks;
+ i_bank++) {
+ if (move_cuckoo_index_level(km, CAM_KM_DIST_IDX(i_bank),
+ 4, 0)) {
+ bank = i_bank;
+ break;
+ }
+ }
+ }
+ if (bank < 0)
+ return -1;
+
+ /* populate CAM */
+ NT_LOG(DBG, FILTER, "KM Bank = %i (addr %04X)\n", bank,
+ CAM_KM_DIST_IDX(bank));
+ res = cam_populate(km, bank);
+ if (res == 0) {
+ km->flushed_to_target = 1;
+ km->bank_used = bank;
+ }
+
+ return res;
+}
+
+/*
+ * TCAM
+ */
+static int tcam_find_free_record(struct km_flow_def_s *km, int start_bank)
+{
+ for (uint32_t rec = 0; rec < km->be->km.nb_tcam_bank_width; rec++) {
+ if (km->tcam_dist[TCAM_DIST_IDX(start_bank, rec)].km_owner ==
+ NULL) {
+ int pass = 1;
+
+ for (int ii = 1; ii < km->key_word_size; ii++) {
+ if (km->tcam_dist[TCAM_DIST_IDX(start_bank + ii,
+ rec)]
+ .km_owner != NULL) {
+ pass = 0;
+ break;
+ }
+ }
+ if (pass) {
+ km->tcam_record = rec;
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tcam_find_mapping(struct km_flow_def_s *km)
+{
+ /* Search record and start index for this flow */
+ for (int bs_idx = 0; bs_idx < km->num_start_offsets; bs_idx++) {
+ if (tcam_find_free_record(km, km->start_offsets[bs_idx])) {
+ km->tcam_start_bank = km->start_offsets[bs_idx];
+ NT_LOG(DBG, FILTER,
+ "Found space in TCAM start bank %i, record %i\n",
+ km->tcam_start_bank, km->tcam_record);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int tcam_write_word(struct km_flow_def_s *km, int bank, int record,
+ uint32_t word, uint32_t mask)
+{
+ int err = 0;
+ uint32_t all_recs[3];
+
+ int rec_val = record / 32;
+ int rec_bit_shft = record % 32;
+ uint32_t rec_bit = (1 << rec_bit_shft);
+
+ assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+ for (int byte = 0; byte < 4; byte++) {
+ uint8_t a = (uint8_t)((word >> (24 - (byte * 8))) & 0xff);
+ uint8_t a_m = (uint8_t)((mask >> (24 - (byte * 8))) & 0xff);
+ /* calculate important value bits */
+ a = a & a_m;
+
+#ifdef FLOW_DEBUG
+ if (a_m == 0) {
+ NT_LOG(DBG, FILTER,
+ "bank %i, byte %i, All values, rec_val %i rec bit %08x\n",
+ bank, byte, rec_val, rec_bit);
+ }
+#endif
+
+ for (int val = 0; val < 256; val++) {
+ err |= hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+ byte, val, all_recs);
+ if ((val & a_m) == a) {
+ all_recs[rec_val] |= rec_bit;
+#ifdef FLOW_DEBUG
+ if (a_m) {
+ NT_LOG(DBG, FILTER,
+ "bank %i, byte %i, val %i(%02x), "
+ "rec_val %i rec bit %08x\n",
+ bank, byte, val, val, rec_val,
+ rec_bit);
+ }
+#endif
+ } else {
+ all_recs[rec_val] &= ~rec_bit;
+ }
+ err |= hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+ byte, val, all_recs);
+ if (err)
+ break;
+ }
+ }
+ /* flush bank */
+ err |= hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+ if (err == 0) {
+ assert(km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner ==
+ NULL);
+ km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = km;
+ }
+ return err;
+}
+
+static int km_write_data_to_tcam(struct km_flow_def_s *km)
+{
+ int err = 0;
+
+ if (km->tcam_record < 0) {
+ tcam_find_free_record(km, km->tcam_start_bank);
+ if (km->tcam_record < 0) {
+ NT_LOG(DBG, FILTER,
+ "FAILED to find space in TCAM for flow\n");
+ return -1;
+ }
+ NT_LOG(DBG, FILTER,
+ "Reused RCP: Found space in TCAM start bank %i, record %i\n",
+ km->tcam_start_bank, km->tcam_record);
+ }
+
+ /* Write KM_TCI */
+ err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+ km->tcam_record, km->info);
+ err |= hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+ km->tcam_record, km->flow_type);
+ err |= hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record,
+ 1);
+
+#ifdef FLOW_DEBUG
+ km->be->iface->set_debug_mode(km->be->be_dev,
+ FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+ for (int i = 0; i < km->key_word_size && !err; i++) {
+ err = tcam_write_word(km, km->tcam_start_bank + i,
+ km->tcam_record, km->entry_word[i],
+ km->entry_mask[i]);
+ }
+#ifdef FLOW_DEBUG
+ km->be->iface->set_debug_mode(km->be->be_dev,
+ FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+ if (err == 0)
+ km->flushed_to_target = 1;
+
+ return err;
+}
+
+static int tcam_reset_bank(struct km_flow_def_s *km, int bank, int record)
+{
+ int err = 0;
+ uint32_t all_recs[3];
+
+ int rec_val = record / 32;
+ int rec_bit_shft = record % 32;
+ uint32_t rec_bit = (1 << rec_bit_shft);
+
+ assert((km->be->km.nb_tcam_bank_width + 31) / 32 <= 3);
+
+ for (int byte = 0; byte < 4; byte++) {
+ for (int val = 0; val < 256; val++) {
+ err = hw_mod_km_tcam_get(km->be, HW_KM_TCAM_T, bank,
+ byte, val, all_recs);
+ if (err)
+ break;
+ all_recs[rec_val] &= ~rec_bit;
+ err = hw_mod_km_tcam_set(km->be, HW_KM_TCAM_T, bank,
+ byte, val, all_recs);
+ if (err)
+ break;
+ }
+ }
+ if (err)
+ return err;
+
+ /* flush bank */
+ err = hw_mod_km_tcam_flush(km->be, bank, ALL_BANK_ENTRIES);
+ km->tcam_dist[TCAM_DIST_IDX(bank, record)].km_owner = NULL;
+
+ NT_LOG(DBG, FILTER, "Reset TCAM bank %i, rec_val %i rec bit %08x\n",
+ bank, rec_val, rec_bit);
+
+ return err;
+}
+
+static int tcam_reset_entry(struct km_flow_def_s *km)
+{
+ int err = 0;
+
+ if (km->tcam_start_bank < 0 || km->tcam_record < 0) {
+ NT_LOG(DBG, FILTER, "FAILED to find space in TCAM for flow\n");
+ return -1;
+ }
+
+ /* Write KM_TCI */
+ hw_mod_km_tci_set(km->be, HW_KM_TCI_COLOR, km->tcam_start_bank,
+ km->tcam_record, 0);
+ hw_mod_km_tci_set(km->be, HW_KM_TCI_FT, km->tcam_start_bank,
+ km->tcam_record, 0);
+ hw_mod_km_tci_flush(km->be, km->tcam_start_bank, km->tcam_record, 1);
+
+#ifdef FLOW_DEBUG
+ km->be->iface->set_debug_mode(km->be->be_dev,
+ FLOW_BACKEND_DEBUG_MODE_NONE);
+#endif
+
+ for (int i = 0; i < km->key_word_size && !err; i++) {
+ err = tcam_reset_bank(km, km->tcam_start_bank + i,
+ km->tcam_record);
+ }
+#ifdef FLOW_DEBUG
+ km->be->iface->set_debug_mode(km->be->be_dev,
+ FLOW_BACKEND_DEBUG_MODE_WRITE);
+#endif
+
+ return err;
+}
+
+int km_refer_data_match_entry(struct km_flow_def_s *km,
+ struct km_flow_def_s *km1)
+{
+ int res = 0;
+
+ km->root = km1->root ? km1->root : km1;
+ while (km1->reference)
+ km1 = km1->reference;
+ km1->reference = km;
+
+ km->info = km1->info;
+
+ switch (km->target) {
+ case KM_CAM:
+ km->cam_paired = km1->cam_paired;
+ km->bank_used = km1->bank_used;
+ km->flushed_to_target = km1->flushed_to_target;
+ break;
+ case KM_TCAM:
+ km->tcam_start_bank = km1->tcam_start_bank;
+ km->tcam_record = km1->tcam_record;
+ km->flushed_to_target = km1->flushed_to_target;
+ break;
+ case KM_SYNERGY:
+ default:
+ res = -1;
+ break;
+ }
+
+ return res;
+}
+
+int km_write_data_match_entry(struct km_flow_def_s *km, uint32_t color)
+{
+ int res = -1;
+
+ km->info = color;
+ NT_LOG(DBG, FILTER, "Write Data entry Color: %08x\n", color);
+
+ switch (km->target) {
+ case KM_CAM:
+ res = km_write_data_to_cam(km);
+ break;
+ case KM_TCAM:
+ res = km_write_data_to_tcam(km);
+ break;
+ case KM_SYNERGY:
+ default:
+ break;
+ }
+ return res;
+}
+
+int km_clear_data_match_entry(struct km_flow_def_s *km)
+{
+ int res = 0;
+
+ if (km->root) {
+ struct km_flow_def_s *km1 = km->root;
+
+ while (km1->reference != km)
+ km1 = km1->reference;
+
+ km1->reference = km->reference;
+
+ km->flushed_to_target = 0;
+ km->bank_used = 0;
+ } else if (km->reference) {
+ km->reference->root = NULL;
+
+ switch (km->target) {
+ case KM_CAM:
+ km->cam_dist[CAM_KM_DIST_IDX(km->bank_used)].km_owner =
+ km->reference;
+ if (km->key_word_size + !!km->info_set > 1) {
+ assert(km->cam_paired);
+ km->cam_dist[CAM_KM_DIST_IDX(km->bank_used) + 1]
+ .km_owner = km->reference;
+ }
+ break;
+ case KM_TCAM:
+ for (int i = 0; i < km->key_word_size; i++) {
+ km->tcam_dist[TCAM_DIST_IDX(km->tcam_start_bank + i,
+ km->tcam_record)].km_owner = km->reference;
+ }
+ break;
+ case KM_SYNERGY:
+ default:
+ res = -1;
+ break;
+ }
+
+ km->flushed_to_target = 0;
+ km->bank_used = 0;
+ } else if (km->flushed_to_target) {
+ switch (km->target) {
+ case KM_CAM:
+ res = cam_reset_entry(km, km->bank_used);
+ break;
+ case KM_TCAM:
+ res = tcam_reset_entry(km);
+ break;
+ case KM_SYNERGY:
+ default:
+ res = -1;
+ break;
+ }
+ km->flushed_to_target = 0;
+ km->bank_used = 0;
+ }
+
+ return res;
+}
new file mode 100644
@@ -0,0 +1,787 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <arpa/inet.h> /* htons, htonl, ntohs */
+#include <stdio.h>
+#include "stream_binary_flow_api.h"
+
+#include "flow_api_backend.h"
+#include "flow_api_engine.h"
+
+#define MAX_HW_VIRT_PORTS 127 /* 255 reserved */
+#define VIRTUAL_TUNNEL_PORT_OFFSET 72
+
+struct tunnel_s {
+ struct tunnel_cfg_s cfg;
+ struct tunnel_cfg_s cfg_mask;
+ uint32_t flow_stat_id;
+ uint8_t vport;
+ int refcnt;
+ struct tunnel_s *next; /* linked list of defined tunnels */
+};
+
+int is_virtual_port(uint8_t virt_port)
+{
+ return !!(virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+ virt_port < MAX_HW_VIRT_PORTS);
+}
+
+/*
+ * New function for use with OVS 2.17.2
+ */
+static struct tunnel_s *tunnels;
+
+static uint8_t vport[MAX_HW_VIRT_PORTS - VIRTUAL_TUNNEL_PORT_OFFSET + 1];
+
+uint8_t flow_tunnel_alloc_virt_port(void)
+{
+ for (uint8_t i = VIRTUAL_TUNNEL_PORT_OFFSET; i < MAX_HW_VIRT_PORTS;
+ i++) {
+ if (!vport[i - VIRTUAL_TUNNEL_PORT_OFFSET]) {
+ vport[i - VIRTUAL_TUNNEL_PORT_OFFSET] = 1;
+ return i;
+ }
+ }
+
+ /* no more virtual ports */
+ return 255;
+}
+
+uint8_t flow_tunnel_free_virt_port(uint8_t virt_port)
+{
+ if (virt_port >= VIRTUAL_TUNNEL_PORT_OFFSET &&
+ virt_port < MAX_HW_VIRT_PORTS) {
+ vport[virt_port - VIRTUAL_TUNNEL_PORT_OFFSET] = 0;
+ return 0;
+ }
+ return -1;
+}
+
+#define check(_v1, _v2, _msk1, _msk2) ({ \
+ __typeof__(_v1) (v1) = (_v1); \
+ __typeof__(_v2) (v2) = (_v2); \
+ __typeof__(_msk1) (msk1) = (_msk1); \
+ __typeof__(_msk2) (msk2) = (_msk2); \
+ (((v1) & (msk1) & (msk2)) == ((v2) & (msk1) & (msk2))); \
+})
+
+#define check_tun_v4_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({ \
+ __typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+ __typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+ __typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+ __typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+ (check((tun_cfg)->v4.src_ip, (tun1_cfg)->v4.src_ip, \
+ (tun_msk)->v4.src_ip, (tun1_msk)->v4.src_ip) && \
+ check((tun_cfg)->v4.dst_ip, (tun1_cfg)->v4.dst_ip, \
+ (tun_msk)->v4.dst_ip, (tun1_msk)->v4.dst_ip) && \
+ check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+ (tun1_msk)->s_port) && \
+ check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+ (tun1_msk)->d_port)); \
+})
+
+#define check_tun_v6_equal(_tun_cfg, _tun_msk, _tun1_cfg, _tun1_msk) ({ \
+ __typeof__(_tun_cfg) (tun_cfg) = (_tun_cfg); \
+ __typeof__(_tun_msk) (tun_msk) = (_tun_msk); \
+ __typeof__(_tun1_cfg) (tun1_cfg) = (_tun1_cfg); \
+ __typeof__(_tun1_msk) (tun1_msk) = (_tun1_msk); \
+ (check((tun_cfg)->v6_long.src_ip[0], (tun1_cfg)->v6_long.src_ip[0], \
+ (tun_msk)->v6_long.src_ip[0], (tun1_msk)->v6_long.src_ip[0]) && \
+ check((tun_cfg)->v6_long.src_ip[1], (tun1_cfg)->v6_long.src_ip[1], \
+ (tun_msk)->v6_long.src_ip[1], (tun1_msk)->v6_long.src_ip[1]) && \
+ check((tun_cfg)->v6_long.dst_ip[0], (tun1_cfg)->v6_long.dst_ip[0], \
+ (tun_msk)->v6_long.dst_ip[0], (tun1_msk)->v6_long.dst_ip[0]) && \
+ check((tun_cfg)->v6_long.dst_ip[1], (tun1_cfg)->v6_long.dst_ip[1], \
+ (tun_msk)->v6_long.dst_ip[1], (tun1_msk)->v6_long.dst_ip[1]) && \
+ check((tun_cfg)->s_port, (tun1_cfg)->s_port, (tun_msk)->s_port, \
+ (tun1_msk)->s_port) && \
+ check((tun_cfg)->d_port, (tun1_cfg)->d_port, (tun_msk)->d_port, \
+ (tun1_msk)->d_port)); \
+})
+
+static int check_tun_match(struct tunnel_s *tun,
+ const struct tunnel_cfg_s *tnlcfg,
+ const struct tunnel_cfg_s *tnlcfg_mask)
+{
+ if (tun->cfg.tun_type == tnlcfg->tun_type) {
+ if (tun->cfg.ipversion == 4) {
+ return check_tun_v4_equal(&tun->cfg, &tun->cfg_mask,
+ tnlcfg, tnlcfg_mask);
+ } else {
+ return check_tun_v6_equal(&tun->cfg, &tun->cfg_mask,
+ tnlcfg, tnlcfg_mask);
+ }
+ }
+ return 0;
+}
+
+static struct tunnel_s *tunnel_get(const struct tunnel_cfg_s *tnlcfg,
+ const struct tunnel_cfg_s *tnlcfg_mask,
+ int tun_set)
+{
+ struct tunnel_s *tun = tunnels;
+
+ while (tun) {
+ if (tun->flow_stat_id != (uint32_t)-1) {
+ /* This tun is already defined and set */
+ if (tun_set) {
+ /*
+ * A tunnel full match definition - search for duplicate
+ */
+ if (memcmp(&tun->cfg, tnlcfg,
+ sizeof(struct tunnel_cfg_s)) == 0 &&
+ memcmp(&tun->cfg_mask, tnlcfg_mask,
+ sizeof(struct tunnel_cfg_s)) == 0)
+ break;
+ } else {
+ /*
+ * A tunnel match search
+ */
+ if (check_tun_match(tun, tnlcfg, tnlcfg_mask))
+ break;
+ }
+
+ } else if (tun_set) {
+ /*
+ * Check if this is a pre-configured tunnel for this one to be set
+ * try match them
+ */
+ if (check_tun_match(tun, tnlcfg, tnlcfg_mask)) {
+ /*
+ * Change the tun into the defining one - flow_stat_id is set later
+ */
+ memcpy(&tun->cfg, tnlcfg,
+ sizeof(struct tunnel_cfg_s));
+ memcpy(&tun->cfg_mask, tnlcfg_mask,
+ sizeof(struct tunnel_cfg_s));
+
+ break;
+ }
+
+ } /* else ignore - both unset */
+ tun = tun->next;
+ }
+
+ /*
+ * If not found, create and add it to db
+ */
+ if (!tun) {
+ uint8_t vport = flow_tunnel_alloc_virt_port();
+
+ NT_LOG(DBG, FILTER, "Create NEW tunnel allocate vport %i\n",
+ vport);
+
+ if (vport < 0xff) {
+ tun = calloc(1, sizeof(struct tunnel_s));
+ memcpy(&tun->cfg, tnlcfg, sizeof(struct tunnel_cfg_s));
+ memcpy(&tun->cfg_mask, tnlcfg_mask,
+ sizeof(struct tunnel_cfg_s));
+
+ /* flow_stat_id is set later from flow code */
+ tun->flow_stat_id = (uint32_t)-1;
+ tun->vport = vport;
+ tun->refcnt = 1;
+
+ tun->next = tunnels;
+ tunnels = tun;
+ }
+ } else {
+ tun->refcnt++;
+ NT_LOG(DBG, FILTER, "Found tunnel has vport %i - ref %i\n",
+ tun->vport, tun->refcnt);
+ }
+
+ return tun;
+}
+
+int tunnel_release(struct tunnel_s *tnl)
+{
+ struct tunnel_s *tun = tunnels, *prev = NULL;
+
+ NT_LOG(DBG, FILTER, "release tunnel vport %i, ref cnt %i..\n",
+ tnl->vport, tnl->refcnt);
+ /* find tunnel in list */
+ while (tun) {
+ if (tun == tnl)
+ break;
+ prev = tun;
+ tun = tun->next;
+ }
+
+ if (!tun) {
+ NT_LOG(DBG, FILTER,
+ "ERROR: Tunnel not found in tunnel release!\n");
+ return -1;
+ }
+
+ /* if last ref, take out of list */
+ if (--tun->refcnt == 0) {
+ if (prev)
+ prev->next = tun->next;
+ else
+ tunnels = tun->next;
+ flow_tunnel_free_virt_port(tun->vport);
+
+ NT_LOG(DBG, FILTER,
+ "tunnel ref count == 0 remove tunnel vport %i\n",
+ tun->vport);
+ free(tun);
+ }
+
+ return 0;
+}
+
+struct tunnel_s *tunnel_parse(const struct flow_elem *elem, int *idx,
+ uint32_t *vni)
+{
+ int eidx = *idx;
+ struct tunnel_cfg_s tnlcfg;
+ struct tunnel_cfg_s tnlcfg_mask;
+ struct tunnel_s *rtnl = NULL;
+
+ if (elem) {
+ eidx++;
+ memset(&tnlcfg, 0, sizeof(struct tunnel_cfg_s));
+ int valid = 1;
+ enum flow_elem_type last_type = FLOW_ELEM_TYPE_END;
+
+ tnlcfg.d_port = 0xffff;
+ tnlcfg.tun_type = -1;
+
+ if (vni)
+ *vni = (uint32_t)-1;
+
+ while (elem[eidx].type != FLOW_ELEM_TYPE_END &&
+ elem[eidx].type >= last_type && valid) {
+ switch (elem[eidx].type) {
+ case FLOW_ELEM_TYPE_ANY:
+ case FLOW_ELEM_TYPE_ETH:
+ /* Ignore */
+ break;
+ case FLOW_ELEM_TYPE_IPV4: {
+ const struct flow_elem_ipv4 *ipv4 =
+ (const struct flow_elem_ipv4 *)elem[eidx]
+ .spec;
+ const struct flow_elem_ipv4 *ipv4_mask =
+ (const struct flow_elem_ipv4 *)elem[eidx]
+ .mask;
+
+ tnlcfg.v4.src_ip = ipv4->hdr.src_ip;
+ tnlcfg.v4.dst_ip = ipv4->hdr.dst_ip;
+ tnlcfg_mask.v4.src_ip = ipv4_mask->hdr.src_ip;
+ tnlcfg_mask.v4.dst_ip = ipv4_mask->hdr.dst_ip;
+
+ tnlcfg.ipversion = 4;
+ }
+ break;
+ case FLOW_ELEM_TYPE_IPV6: {
+ const struct flow_elem_ipv6 *ipv6 =
+ (const struct flow_elem_ipv6 *)elem[eidx]
+ .spec;
+ const struct flow_elem_ipv6 *ipv6_mask =
+ (const struct flow_elem_ipv6 *)elem[eidx]
+ .mask;
+
+ memcpy(tnlcfg.v6.src_ip, ipv6->hdr.src_addr,
+ sizeof(tnlcfg.v6.src_ip));
+ memcpy(tnlcfg.v6.dst_ip, ipv6->hdr.dst_addr,
+ sizeof(tnlcfg.v6.dst_ip));
+ memcpy(tnlcfg_mask.v6.src_ip,
+ ipv6_mask->hdr.src_addr,
+ sizeof(tnlcfg.v6.src_ip));
+ memcpy(tnlcfg_mask.v6.dst_ip,
+ ipv6_mask->hdr.dst_addr,
+ sizeof(tnlcfg.v6.dst_ip));
+
+ tnlcfg.ipversion = 6;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_UDP: {
+ const struct flow_elem_udp *udp =
+ (const struct flow_elem_udp *)elem[eidx]
+ .spec;
+ const struct flow_elem_udp *udp_mask =
+ (const struct flow_elem_udp *)elem[eidx]
+ .mask;
+
+ tnlcfg.s_port = udp->hdr.src_port;
+ tnlcfg.d_port = udp->hdr.dst_port;
+ tnlcfg_mask.s_port = udp_mask->hdr.src_port;
+ tnlcfg_mask.d_port = udp_mask->hdr.dst_port;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_VXLAN: {
+ const struct flow_elem_vxlan *vxlan =
+ (const struct flow_elem_vxlan *)
+ elem[eidx]
+ .spec;
+ if (vni)
+ *vni = (uint32_t)(((uint32_t)
+ vxlan->vni[0]
+ << 16) |
+ ((uint32_t)
+ vxlan->vni[1]
+ << 8) |
+ ((uint32_t)vxlan
+ ->vni[2]));
+
+ tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+ }
+ break;
+ default:
+ valid = 0;
+ break;
+ }
+
+ last_type = elem[eidx].type;
+ eidx++;
+ }
+
+ /*
+ * vxlan ports : 4789 or 8472
+ */
+ if (tnlcfg.tun_type < 0 &&
+ (tnlcfg.d_port == 0xb512 || tnlcfg.d_port == 0x1821))
+ tnlcfg.tun_type = FLOW_ELEM_TYPE_VXLAN;
+
+ if (!valid || tnlcfg.ipversion == 0 || tnlcfg.tun_type < 0 ||
+ tnlcfg.d_port == 0xffff) {
+ NT_LOG(DBG, FILTER, "Invalid tunnel received\n");
+ return NULL;
+ }
+
+ /* search/add to DB */
+ rtnl = tunnel_get(&tnlcfg, &tnlcfg_mask,
+ vni ? 0 :
+ 1); /* if vni == NULL it is a tun set command */
+
+#ifdef FLOW_DEBUG
+ if (rtnl) {
+ if (vni)
+ NT_LOG(DBG, FILTER,
+ "MATCH A TUNNEL DEFINITION - PRESET "
+ "(PREALLOC VPORT) IF NOT FOUND:\n");
+ else
+ NT_LOG(DBG, FILTER,
+ "SET A TUNNEL DEFINITION:\n");
+ struct in_addr addr, mask;
+ char buf[64];
+
+ addr.s_addr = rtnl->cfg.v4.src_ip;
+ sprintf(buf, "%s", inet_ntoa(addr));
+ mask.s_addr = rtnl->cfg_mask.v4.src_ip;
+ NT_LOG(DBG, FILTER, " tun src IP: %s / %s\n", buf,
+ inet_ntoa(mask));
+ addr.s_addr = rtnl->cfg.v4.dst_ip;
+ sprintf(buf, "%s", inet_ntoa(addr));
+ mask.s_addr = rtnl->cfg_mask.v4.dst_ip;
+ NT_LOG(DBG, FILTER, " tun dst IP: %s / %s\n", buf,
+ inet_ntoa(mask));
+ NT_LOG(DBG, FILTER, " tun tp_src: %i / %04x\n",
+ htons(rtnl->cfg.s_port),
+ htons(rtnl->cfg_mask.s_port));
+ NT_LOG(DBG, FILTER, " tun tp_dst: %i / %04x\n",
+ htons(rtnl->cfg.d_port),
+ htons(rtnl->cfg_mask.d_port));
+ NT_LOG(DBG, FILTER, " tun ipver: %i\n",
+ rtnl->cfg.ipversion);
+ NT_LOG(DBG, FILTER, " tun flow_stat_id: %i\n",
+ rtnl->flow_stat_id);
+ NT_LOG(DBG, FILTER, " tun vport: %i\n",
+ rtnl->vport);
+ NT_LOG(DBG, FILTER, " tun refcnt: %i\n",
+ rtnl->refcnt);
+ }
+#endif
+
+ *idx = eidx; /* pointing to next or END */
+ }
+
+ return rtnl;
+}
+
+uint8_t get_tunnel_vport(struct tunnel_s *rtnl)
+{
+ return rtnl->vport;
+}
+
+void tunnel_set_flow_stat_id(struct tunnel_s *rtnl, uint32_t flow_stat_id)
+{
+ rtnl->flow_stat_id = flow_stat_id;
+}
+
+int tunnel_get_definition(struct tunnel_cfg_s *tuncfg, uint32_t flow_stat_id,
+ uint8_t vport)
+{
+ struct tunnel_s *tun = tunnels;
+
+ while (tun) {
+ if (tun->vport == vport && (flow_stat_id == tun->flow_stat_id ||
+ flow_stat_id == (uint32_t)-1)) {
+ memcpy(tuncfg, &tun->cfg, sizeof(struct tunnel_cfg_s));
+ return 0;
+ }
+ tun = tun->next;
+ }
+
+ return -1;
+}
+
+static be16_t ip_checksum_sum(const be16_t *data, unsigned int size,
+ be16_t seed)
+{
+ unsigned int sum = seed;
+ unsigned int idx;
+
+ for (idx = 0; idx < size / 2; idx++)
+ sum += (unsigned int)(data[idx]);
+ if (size & 1)
+ sum += (unsigned char)data[idx];
+ /* unfold */
+ while (sum >> 16)
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (be16_t)sum;
+}
+
+static void copy_unmasked(uint8_t *result, const struct flow_elem *elem,
+ uint8_t size)
+{
+ for (uint8_t i = 0; i < size; i++)
+ result[i] = ((const uint8_t *)elem->spec)[i];
+}
+
+int flow_tunnel_create_vxlan_hdr(struct flow_api_backend_s *be,
+ struct nic_flow_def *fd,
+ const struct flow_elem *elem)
+{
+ uint32_t eidx = 0;
+ uint8_t size;
+ struct ipv4_hdr_s *tun_ipv4 = NULL;
+ uint16_t *tun_hdr_eth_type_p = NULL;
+
+ if (elem) {
+ while (elem[eidx].type != FLOW_ELEM_TYPE_END) {
+ switch (elem[eidx].type) {
+ case FLOW_ELEM_TYPE_ETH: {
+ NT_LOG(DBG, FILTER,
+ "Tunnel: RTE_FLOW_ITEM_TYPE_ETH\n");
+ struct flow_elem_eth eth;
+
+ size = sizeof(struct flow_elem_eth);
+
+ copy_unmasked((uint8_t *)ð, &elem[eidx],
+ size);
+
+ memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+ ð, size);
+
+ /*
+ * Save a pointer to the tun header ethtype field
+ * (needed later in the IPv4 and IPv6 flow elem cases)
+ */
+ tun_hdr_eth_type_p =
+ (uint16_t *)&fd->tun_hdr.d
+ .hdr8[fd->tun_hdr.len + 12];
+
+#ifdef FLOW_DEBUG
+ NT_LOG(DBG, FILTER,
+ "dmac : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth.d_addr.addr_b[0],
+ eth.d_addr.addr_b[1],
+ eth.d_addr.addr_b[2],
+ eth.d_addr.addr_b[3],
+ eth.d_addr.addr_b[5],
+ eth.d_addr.addr_b[5]);
+ NT_LOG(DBG, FILTER,
+ "smac : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth.s_addr.addr_b[0],
+ eth.s_addr.addr_b[1],
+ eth.s_addr.addr_b[2],
+ eth.s_addr.addr_b[3],
+ eth.s_addr.addr_b[5],
+ eth.s_addr.addr_b[5]);
+ NT_LOG(DBG, FILTER, "type : %04x\n",
+ ntohs(eth.ether_type));
+#endif
+ fd->tun_hdr.len =
+ (uint8_t)(fd->tun_hdr.len + size);
+ }
+ break;
+ /* VLAN is not supported */
+
+ case FLOW_ELEM_TYPE_IPV4: {
+ NT_LOG(DBG, FILTER,
+ "Tunnel: RTE_FLOW_ITEM_TYPE_IPV4\n");
+ struct flow_elem_ipv4 ipv4;
+
+ size = sizeof(struct flow_elem_ipv4);
+
+ copy_unmasked((uint8_t *)&ipv4, &elem[eidx],
+ size);
+
+ if (ipv4.hdr.version_ihl != 0x45)
+ ipv4.hdr.version_ihl = 0x45;
+
+ if (ipv4.hdr.ttl == 0)
+ ipv4.hdr.ttl = 64;
+
+ if (ipv4.hdr.next_proto_id !=
+ 17) /* must be UDP */
+ ipv4.hdr.next_proto_id = 17;
+
+ ipv4.hdr.frag_offset =
+ htons(1 << 14); /* DF flag */
+
+ size = sizeof(struct ipv4_hdr_s);
+ memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+ &ipv4.hdr, size);
+
+ /* Set the tun header ethtype field to IPv4 (if empty) */
+ if (tun_hdr_eth_type_p &&
+ (*tun_hdr_eth_type_p == 0)) {
+ *tun_hdr_eth_type_p =
+ htons(0x0800); /* IPv4 */
+ }
+
+ tun_ipv4 = (struct ipv4_hdr_s *)&fd->tun_hdr.d
+ .hdr8[fd->tun_hdr.len];
+
+ NT_LOG(DBG, FILTER, "v_ihl : %02x\n",
+ tun_ipv4->version_ihl);
+ NT_LOG(DBG, FILTER, "tos : %02x\n",
+ tun_ipv4->tos);
+ NT_LOG(DBG, FILTER, "len : %d\n",
+ ntohs(tun_ipv4->length));
+ NT_LOG(DBG, FILTER, "id : %02x\n",
+ tun_ipv4->id);
+ NT_LOG(DBG, FILTER, "fl/frg : %04x\n",
+ ntohs(tun_ipv4->frag_offset));
+ NT_LOG(DBG, FILTER, "ttl : %02x\n",
+ tun_ipv4->ttl);
+ NT_LOG(DBG, FILTER, "prot : %02x\n",
+ tun_ipv4->next_proto_id);
+ NT_LOG(DBG, FILTER, "chksum : %04x\n",
+ ntohs(tun_ipv4->hdr_csum));
+ NT_LOG(DBG, FILTER, "src : %d.%d.%d.%d\n",
+ (tun_ipv4->src_ip & 0xff),
+ ((tun_ipv4->src_ip >> 8) & 0xff),
+ ((tun_ipv4->src_ip >> 16) & 0xff),
+ ((tun_ipv4->src_ip >> 24) & 0xff));
+ NT_LOG(DBG, FILTER, "dst : %d.%d.%d.%d\n",
+ (tun_ipv4->dst_ip & 0xff),
+ ((tun_ipv4->dst_ip >> 8) & 0xff),
+ ((tun_ipv4->dst_ip >> 16) & 0xff),
+ ((tun_ipv4->dst_ip >> 24) & 0xff));
+
+ fd->tun_hdr.len =
+ (uint8_t)(fd->tun_hdr.len + size);
+ fd->tun_hdr.ip_version = 4;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_IPV6: {
+ if (be->roa.ver < 6) {
+ NT_LOG(ERR, FILTER,
+ "Tunnel flow element type IPv6 requires ROA version 6 or higher (current version=%d)\n",
+ be->roa.ver);
+ return -1;
+ }
+
+ NT_LOG(DBG, FILTER,
+ "Tunnel: RTE_FLOW_ITEM_TYPE_IPV6\n");
+ struct flow_elem_ipv6 ipv6;
+
+ size = sizeof(struct flow_elem_ipv6);
+
+ copy_unmasked((uint8_t *)&ipv6, &elem[eidx],
+ size);
+
+ /*
+ * Make sure the version field (the 4 most significant bits of
+ * "vtc_flow") is set to 6
+ */
+ if ((ipv6.hdr.vtc_flow & htonl(0x60000000)) ==
+ 0) {
+ ipv6.hdr.vtc_flow |= htonl(0x60000000); /* Version = 6 */
+ }
+
+ if (ipv6.hdr.proto != 17) /* must be UDP */
+ ipv6.hdr.proto = 17;
+
+ if (ipv6.hdr.hop_limits == 0)
+ ipv6.hdr.hop_limits = 64;
+
+ size = sizeof(struct ipv6_hdr_s);
+ memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+ &ipv6.hdr, size);
+
+ /* Set the tun header ethtype field to IPv6 (if empty) */
+ if (tun_hdr_eth_type_p &&
+ (*tun_hdr_eth_type_p == 0)) {
+ *tun_hdr_eth_type_p =
+ htons(0x86DD); /* IPv6 */
+ }
+
+ NT_LOG(DBG, FILTER, "vtc_flow : %08x\n",
+ ntohl(ipv6.hdr.vtc_flow));
+ NT_LOG(DBG, FILTER, "payload_len : %04x\n",
+ ntohs(ipv6.hdr.payload_len));
+ NT_LOG(DBG, FILTER, "proto : %02x\n",
+ ipv6.hdr.proto);
+ NT_LOG(DBG, FILTER, "hop_limits : %02x\n",
+ ipv6.hdr.hop_limits);
+ NT_LOG(DBG, FILTER,
+ "src : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+ ipv6.hdr.src_addr[0],
+ ipv6.hdr.src_addr[1],
+ ipv6.hdr.src_addr[2],
+ ipv6.hdr.src_addr[3],
+ ipv6.hdr.src_addr[4],
+ ipv6.hdr.src_addr[5],
+ ipv6.hdr.src_addr[6],
+ ipv6.hdr.src_addr[7],
+ ipv6.hdr.src_addr[8],
+ ipv6.hdr.src_addr[9],
+ ipv6.hdr.src_addr[10],
+ ipv6.hdr.src_addr[11],
+ ipv6.hdr.src_addr[12],
+ ipv6.hdr.src_addr[13],
+ ipv6.hdr.src_addr[14],
+ ipv6.hdr.src_addr[15]);
+ NT_LOG(DBG, FILTER,
+ "dst : %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n",
+ ipv6.hdr.dst_addr[0],
+ ipv6.hdr.dst_addr[1],
+ ipv6.hdr.dst_addr[2],
+ ipv6.hdr.dst_addr[3],
+ ipv6.hdr.dst_addr[4],
+ ipv6.hdr.dst_addr[5],
+ ipv6.hdr.dst_addr[6],
+ ipv6.hdr.dst_addr[7],
+ ipv6.hdr.dst_addr[8],
+ ipv6.hdr.dst_addr[9],
+ ipv6.hdr.dst_addr[10],
+ ipv6.hdr.dst_addr[11],
+ ipv6.hdr.dst_addr[12],
+ ipv6.hdr.dst_addr[13],
+ ipv6.hdr.dst_addr[14],
+ ipv6.hdr.dst_addr[15]);
+
+ fd->tun_hdr.len =
+ (uint8_t)(fd->tun_hdr.len + size);
+ fd->tun_hdr.ip_version = 6;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_UDP: {
+ NT_LOG(DBG, FILTER,
+ "Tunnel: RTE_FLOW_ITEM_TYPE_UDP\n");
+ struct flow_elem_udp udp;
+
+ size = sizeof(struct flow_elem_udp);
+
+ copy_unmasked((uint8_t *)&udp, &elem[eidx],
+ size);
+
+ udp.hdr.cksum =
+ 0; /* set always the UDP checksum to 0 */
+
+ size = sizeof(struct udp_hdr_s);
+ memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+ &udp.hdr, size);
+
+ NT_LOG(DBG, FILTER, "src p : %d\n",
+ ntohs(udp.hdr.src_port));
+ NT_LOG(DBG, FILTER, "dst p : %d\n",
+ ntohs(udp.hdr.dst_port));
+ NT_LOG(DBG, FILTER, "len : %d\n",
+ ntohs(udp.hdr.len));
+ NT_LOG(DBG, FILTER, "chksum : %04x\n",
+ ntohs(udp.hdr.cksum));
+
+ fd->tun_hdr.len =
+ (uint8_t)(fd->tun_hdr.len + size);
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_VXLAN: {
+ struct flow_elem_vxlan vxlan_m;
+
+ size = sizeof(struct flow_elem_vxlan);
+
+ copy_unmasked((uint8_t *)&vxlan_m, &elem[eidx],
+ size);
+
+ vxlan_m.flags =
+ 0x08; /* set always I-flag - valid VNI */
+
+ NT_LOG(DBG, FILTER,
+ "Tunnel: RTE_FLOW_ITEM_TYPE_VXLAN - vni %u\n",
+ (vxlan_m.vni[0] << 16) +
+ (vxlan_m.vni[1] << 8) +
+ vxlan_m.vni[2]);
+
+ memcpy(&fd->tun_hdr.d.hdr8[fd->tun_hdr.len],
+ &vxlan_m, size);
+
+ NT_LOG(DBG, FILTER, "flags : %02x\n",
+ vxlan_m.flags);
+ NT_LOG(DBG, FILTER, "vni : %d\n",
+ (vxlan_m.vni[0] << 16) +
+ (vxlan_m.vni[1] << 8) +
+ vxlan_m.vni[2]);
+
+ fd->tun_hdr.len =
+ (uint8_t)(fd->tun_hdr.len + size);
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_PORT_ID: {
+ const struct flow_elem_port_id *port =
+ (const struct flow_elem_port_id *)
+ elem[eidx]
+ .spec;
+ fd->tun_hdr.user_port_id = port->id;
+ }
+ break;
+
+ case FLOW_ELEM_TYPE_VOID: {
+ NT_LOG(DBG, FILTER,
+ "Tunnel: RTE_FLOW_ITEM_TYPE_VOID (ignoring)\n");
+ }
+ break;
+
+ default:
+ NT_LOG(INF, FILTER,
+ "unsupported Tunnel flow element type %u\n",
+ elem[eidx].type);
+ return -1;
+ }
+
+ eidx++;
+ }
+ }
+
+ if (tun_ipv4) {
+ tun_ipv4->hdr_csum = 0;
+ tun_ipv4->length = 0;
+ fd->tun_hdr.ip_csum_precalc = ntohs(ip_checksum_sum((const be16_t *)&fd->tun_hdr.d
+ .hdr8[14],
+ (unsigned int)sizeof(struct ipv4_hdr_s),
+ (be16_t)htons((uint16_t)(fd->tun_hdr.len - sizeof(struct flow_elem_eth)))));
+
+ NT_LOG(DBG, FILTER,
+ "chksum precalc: %04x, precalc hdr len %u\n",
+ fd->tun_hdr.ip_csum_precalc,
+ fd->tun_hdr.len - sizeof(struct flow_elem_eth));
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,1789 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "CAT"
+#define _VER_ be->cat.ver
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int km_if_id,
+ int start_idx, int count);
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int km_if_id,
+ int start_idx, int count);
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int km_if_id,
+ int start_idx, int count);
+
+bool hw_mod_cat_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_cat_present(be->be_dev);
+}
+
+int hw_mod_cat_alloc(struct flow_api_backend_s *be)
+{
+ _VER_ = be->iface->get_cat_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "CAT MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ int nb = be->iface->get_nb_cat_funcs(be->be_dev);
+
+ if (nb <= 0)
+ return error_resource_count(__func__, "cat_funcs", _MOD_, _VER_);
+ be->cat.nb_cat_funcs = (uint32_t)nb;
+
+ nb = be->iface->get_nb_km_flow_types(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "km_flow_types", _MOD_, _VER_);
+ be->cat.nb_flow_types = (uint32_t)nb;
+
+ nb = be->iface->get_nb_pm_ext(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "pm_ext", _MOD_, _VER_);
+ be->cat.nb_pm_ext = (uint32_t)nb;
+
+ nb = be->iface->get_nb_len(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "len", _MOD_, _VER_);
+ be->cat.nb_len = (uint32_t)nb;
+
+ nb = be->iface->get_kcc_size(be->be_dev);
+ if (nb < 0)
+ return error_resource_count(__func__, "kcc_size", _MOD_, _VER_);
+ be->cat.kcc_size = (uint32_t)nb;
+
+ nb = be->iface->get_kcc_banks(be->be_dev);
+ if (nb < 0)
+ return error_resource_count(__func__, "kcc_banks", _MOD_, _VER_);
+ be->cat.kcc_banks = (uint32_t)nb;
+
+ nb = be->iface->get_nb_cat_km_if_cnt(be->be_dev);
+ if (nb < 0)
+ return error_resource_count(__func__, "km_if_count", _MOD_, _VER_);
+ be->cat.km_if_count = (uint32_t)nb;
+
+ int idx = be->iface->get_nb_cat_km_if_m0(be->be_dev);
+
+ be->cat.km_if_m0 = idx;
+
+ idx = be->iface->get_nb_cat_km_if_m1(be->be_dev);
+ be->cat.km_if_m1 = idx;
+
+ if (be->cat.kcc_banks)
+ be->cat.kcc_records = be->cat.kcc_size / be->cat.kcc_banks;
+ else
+ be->cat.kcc_records = 0;
+
+ be->cat.kcc_id_bit_size = 10;
+
+ switch (_VER_) {
+ case 18:
+ be->cat.cts_num = 11;
+ if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+ &be->cat.v18.cfn,
+ be->cat.nb_cat_funcs,
+ sizeof(struct cat_v18_cfn_s),
+ &be->cat.v18.kce,
+ (be->cat.nb_cat_funcs / 8),
+ sizeof(struct cat_v18_kce_s),
+ &be->cat.v18.kcs,
+ be->cat.nb_cat_funcs,
+ sizeof(struct cat_v18_kcs_s),
+ &be->cat.v18.fte,
+ (be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 2,
+ sizeof(struct cat_v18_fte_s),
+ &be->cat.v18.cte,
+ be->cat.nb_cat_funcs,
+ sizeof(struct cat_v18_cte_s),
+ &be->cat.v18.cts,
+ be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+ sizeof(struct cat_v18_cts_s),
+ &be->cat.v18.cot,
+ be->max_categories,
+ sizeof(struct cat_v18_cot_s),
+ &be->cat.v18.cct,
+ be->max_categories * 4,
+ sizeof(struct cat_v18_cct_s),
+ &be->cat.v18.exo,
+ be->cat.nb_pm_ext,
+ sizeof(struct cat_v18_exo_s),
+ &be->cat.v18.rck,
+ be->cat.nb_pm_ext * 64,
+ sizeof(struct cat_v18_rck_s),
+ &be->cat.v18.len,
+ be->cat.nb_len,
+ sizeof(struct cat_v18_len_s),
+ &be->cat.v18.kcc_cam,
+ be->cat.kcc_size,
+ sizeof(struct cat_v18_kcc_s)))
+ return -1;
+
+ break;
+ /* end case 18 */
+ case 21:
+ be->cat.cts_num = 11;
+ if (!callocate_mod(CAST_COMMON(&be->cat), 12,
+ &be->cat.v21.cfn,
+ be->cat.nb_cat_funcs,
+ sizeof(struct cat_v21_cfn_s),
+ &be->cat.v21.kce,
+ (be->cat.nb_cat_funcs / 8),
+ sizeof(struct cat_v21_kce_s),
+ &be->cat.v21.kcs,
+ be->cat.nb_cat_funcs,
+ sizeof(struct cat_v21_kcs_s),
+ &be->cat.v21.fte,
+ (be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+ sizeof(struct cat_v21_fte_s),
+ &be->cat.v21.cte,
+ be->cat.nb_cat_funcs,
+ sizeof(struct cat_v18_cte_s),
+ &be->cat.v21.cts,
+ be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+ sizeof(struct cat_v18_cts_s),
+ &be->cat.v21.cot,
+ be->max_categories,
+ sizeof(struct cat_v18_cot_s),
+ &be->cat.v21.cct,
+ be->max_categories * 4,
+ sizeof(struct cat_v18_cct_s),
+ &be->cat.v21.exo,
+ be->cat.nb_pm_ext,
+ sizeof(struct cat_v18_exo_s),
+ &be->cat.v21.rck,
+ be->cat.nb_pm_ext * 64,
+ sizeof(struct cat_v18_rck_s),
+ &be->cat.v21.len,
+ be->cat.nb_len,
+ sizeof(struct cat_v18_len_s),
+ &be->cat.v21.kcc_cam,
+ be->cat.kcc_size,
+ sizeof(struct cat_v18_kcc_s)))
+ return -1;
+
+ break;
+ /* end case 21 */
+ case 22:
+ be->cat.cts_num = 12;
+ if (!callocate_mod(CAST_COMMON(&be->cat), 14,
+ &be->cat.v22.cfn,
+ be->cat.nb_cat_funcs,
+ sizeof(struct cat_v21_cfn_s),
+ &be->cat.v22.kce,
+ (be->cat.nb_cat_funcs / 8),
+ sizeof(struct cat_v21_kce_s),
+ &be->cat.v22.kcs,
+ be->cat.nb_cat_funcs,
+ sizeof(struct cat_v21_kcs_s),
+ &be->cat.v22.fte,
+ (be->cat.nb_cat_funcs / 8) * be->cat.nb_flow_types * 4,
+ sizeof(struct cat_v21_fte_s),
+ &be->cat.v22.cte,
+ be->cat.nb_cat_funcs,
+ sizeof(struct cat_v22_cte_s),
+ &be->cat.v22.cts,
+ be->cat.nb_cat_funcs * ((be->cat.cts_num + 1) / 2),
+ sizeof(struct cat_v18_cts_s),
+ &be->cat.v22.cot,
+ be->max_categories,
+ sizeof(struct cat_v18_cot_s),
+ &be->cat.v22.cct,
+ be->max_categories * 4,
+ sizeof(struct cat_v18_cct_s),
+ &be->cat.v22.exo,
+ be->cat.nb_pm_ext,
+ sizeof(struct cat_v18_exo_s),
+ &be->cat.v22.rck,
+ be->cat.nb_pm_ext * 64,
+ sizeof(struct cat_v18_rck_s),
+ &be->cat.v22.len,
+ be->cat.nb_len,
+ sizeof(struct cat_v18_len_s),
+ &be->cat.v22.kcc_cam,
+ be->cat.kcc_size,
+ sizeof(struct cat_v18_kcc_s),
+ &be->cat.v22.cce,
+ 4,
+ sizeof(struct cat_v22_cce_s),
+ &be->cat.v22.ccs,
+ 1024,
+ sizeof(struct cat_v22_ccs_s)))
+ return -1;
+
+ break;
+ /* end case 22 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+void hw_mod_cat_free(struct flow_api_backend_s *be)
+{
+ if (be->cat.base) {
+ free(be->cat.base);
+ be->cat.base = NULL;
+ }
+}
+
+static int cfn_reset(struct flow_api_backend_s *be, int i)
+{
+ int err = hw_mod_cat_cfn_set(be, HW_CAT_CFN_PRESET_ALL, i, 0, 0);
+
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_ISL, i, 0,
+ 0xffffffff); /* accept both ISL or not ISL */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_CFP, i, 0,
+ 0xffffffff); /* accept both CFP or not CFP */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MAC, i, 0,
+ 0xffffffff); /* accept all MACs */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L2, i, 0,
+ 0xffffffff); /* accept all L2 prot */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VNTAG, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_VLAN, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_MPLS, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L3, i, 0,
+ 0xffffffff); /* accept all L3 prot */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_FRAG, i, 0,
+ 0xffffffff); /* accept all fragments */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_IP_PROT, i, 0,
+ 0xffffffff); /* IP prot check disabled */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_L4, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TUNNEL, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L2, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_VLAN, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_MPLS, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L3, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_FRAG, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_IP_PROT, i, 0,
+ 0xffffffff); /* inner IP prot check disabled */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PTC_TNL_L4, i, 0,
+ 0xffffffff); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_CV, i, 0, 3); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_FCS, i, 0, 3); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TRUNC, i, 0,
+ 0xffffffff); /* accept all truncations */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L3_CS, i, 0, 3); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_L4_CS, i, 0, 3); /* accept all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_PM_OR_INV, i, 0, 1); /* */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_LC_INV, i, 0, 1); /* */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM0_OR, i, 0,
+ 0xffffffff); /* or all */
+ if (_VER_ >= 21) {
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_KM1_OR, i, 0,
+ 0xffffffff); /* or all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L3_CS, i, 0,
+ 0xffffffff); /* or all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_L4_CS, i, 0,
+ 0xffffffff); /* or all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TTL_EXP, i, 0,
+ 0xffffffff); /* or all */
+ hw_mod_cat_cfn_set(be, HW_CAT_CFN_ERR_TNL_TTL_EXP, i, 0,
+ 0xffffffff); /* or all */
+ }
+ return err;
+}
+
+int hw_mod_cat_reset(struct flow_api_backend_s *be)
+{
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->cat);
+
+ NT_LOG(DBG, FILTER, "INIT CAT CFN\n");
+ if (hw_mod_cat_cfn_flush(be, 0, ALL_ENTRIES))
+ return -1;
+
+ if (_VER_ <= 18) {
+ NT_LOG(DBG, FILTER, "INIT CAT KCE\n");
+ if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, 0, 0,
+ ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT KCS\n");
+ if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, 0, 0,
+ ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT FTE\n");
+ if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, 0, 0,
+ ALL_ENTRIES))
+ return -1;
+ } else {
+ NT_LOG(DBG, FILTER, "INIT CAT KCE 0\n");
+ if (hw_mod_cat_kce_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+ 0, ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT KCS 0\n");
+ if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+ 0, ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT FTE 0\n");
+ if (hw_mod_cat_fte_flush(be, KM_FLM_IF_FIRST, be->cat.km_if_m0,
+ 0, ALL_ENTRIES))
+ return -1;
+
+ if (be->cat.km_if_count > 1) {
+ NT_LOG(DBG, FILTER, "INIT CAT KCE 1\n");
+ if (hw_mod_cat_kce_flush(be, KM_FLM_IF_SECOND,
+ be->cat.km_if_m1, 0,
+ ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT KCS 1\n");
+ if (hw_mod_cat_kcs_flush(be, KM_FLM_IF_SECOND,
+ be->cat.km_if_m1, 0,
+ ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT FTE 1\n");
+ if (hw_mod_cat_fte_flush(be, KM_FLM_IF_SECOND,
+ be->cat.km_if_m1, 0,
+ ALL_ENTRIES))
+ return -1;
+ }
+ }
+
+ NT_LOG(DBG, FILTER, "INIT CAT CTE\n");
+ if (hw_mod_cat_cte_flush(be, 0, ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT CTS\n");
+ if (hw_mod_cat_cts_flush(be, 0, ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT COT\n");
+ if (hw_mod_cat_cot_flush(be, 0, ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT CCT\n");
+ if (hw_mod_cat_cct_flush(be, 0, ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT EXO\n");
+ if (hw_mod_cat_exo_flush(be, 0, ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT RCK\n");
+ if (hw_mod_cat_rck_flush(be, 0, ALL_ENTRIES))
+ return -1;
+
+ NT_LOG(DBG, FILTER, "INIT CAT LEN\n");
+ if (hw_mod_cat_len_flush(be, 0, ALL_ENTRIES))
+ return -1;
+
+ if (be->cat.kcc_size) {
+ NT_LOG(DBG, FILTER, "INIT CAT KCC\n");
+ if (hw_mod_cat_kcc_flush(be, 0, ALL_ENTRIES))
+ return -1;
+ }
+
+ if (_VER_ > 21) {
+ NT_LOG(DBG, FILTER, "INIT CAT CCE\n");
+ if (hw_mod_cat_cce_flush(be, 0, ALL_ENTRIES))
+ return -1;
+ NT_LOG(DBG, FILTER, "INIT CAT CCS\n");
+ if (hw_mod_cat_ccs_flush(be, 0, ALL_ENTRIES))
+ return -1;
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_cfn_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ switch (count) {
+ case ALL_ENTRIES:
+ if (start_idx != 0)
+ return error_index_too_large(__func__);
+ return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+ be->cat.nb_cat_funcs);
+
+ default:
+ if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+ return error_index_too_large(__func__);
+ return be->iface->cat_cfn_flush(be->be_dev, &be->cat, start_idx,
+ count);
+ }
+}
+
+static int hw_mod_cat_cfn_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, int word_off,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if ((unsigned int)index >= be->cat.nb_cat_funcs)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 18:
+ switch (field) {
+ case HW_CAT_CFN_SET_ALL_DEFAULTS:
+ if (get)
+ return error_unsup_field(__func__);
+ return cfn_reset(be, index);
+ case HW_CAT_CFN_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->cat.v18.cfn[index], (uint8_t)*value,
+ sizeof(struct cat_v18_cfn_s));
+ break;
+ case HW_CAT_CFN_COMPARE:
+ rv = do_compare_indexes(be->cat.v18.cfn,
+ sizeof(struct cat_v18_cfn_s), index, word_off,
+ be->cat.nb_cat_funcs, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_CAT_CFN_FIND:
+ rv = find_equal_index(be->cat.v18.cfn,
+ sizeof(struct cat_v18_cfn_s), index, word_off,
+ be->cat.nb_cat_funcs, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_CAT_CFN_ENABLE:
+ get_set(&be->cat.v18.cfn[index].enable, value, get);
+ break;
+ case HW_CAT_CFN_INV:
+ get_set(&be->cat.v18.cfn[index].inv, value, get);
+ break;
+ case HW_CAT_CFN_PTC_INV:
+ get_set(&be->cat.v18.cfn[index].ptc_inv, value, get);
+ break;
+ case HW_CAT_CFN_PTC_ISL:
+ get_set(&be->cat.v18.cfn[index].ptc_isl, value, get);
+ break;
+ case HW_CAT_CFN_PTC_CFP:
+ get_set(&be->cat.v18.cfn[index].ptc_cfp, value, get);
+ break;
+ case HW_CAT_CFN_PTC_MAC:
+ get_set(&be->cat.v18.cfn[index].ptc_mac, value, get);
+ break;
+ case HW_CAT_CFN_PTC_L2:
+ get_set(&be->cat.v18.cfn[index].ptc_l2, value, get);
+ break;
+ case HW_CAT_CFN_PTC_VNTAG:
+ get_set(&be->cat.v18.cfn[index].ptc_vntag, value, get);
+ break;
+ case HW_CAT_CFN_PTC_VLAN:
+ get_set(&be->cat.v18.cfn[index].ptc_vlan, value, get);
+ break;
+ case HW_CAT_CFN_PTC_MPLS:
+ get_set(&be->cat.v18.cfn[index].ptc_mpls, value, get);
+ break;
+ case HW_CAT_CFN_PTC_L3:
+ get_set(&be->cat.v18.cfn[index].ptc_l3, value, get);
+ break;
+ case HW_CAT_CFN_PTC_FRAG:
+ get_set(&be->cat.v18.cfn[index].ptc_frag, value, get);
+ break;
+ case HW_CAT_CFN_PTC_IP_PROT:
+ get_set(&be->cat.v18.cfn[index].ptc_ip_prot, value, get);
+ break;
+ case HW_CAT_CFN_PTC_L4:
+ get_set(&be->cat.v18.cfn[index].ptc_l4, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TUNNEL:
+ get_set(&be->cat.v18.cfn[index].ptc_tunnel, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_L2:
+ get_set(&be->cat.v18.cfn[index].ptc_tnl_l2, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_VLAN:
+ get_set(&be->cat.v18.cfn[index].ptc_tnl_vlan, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_MPLS:
+ get_set(&be->cat.v18.cfn[index].ptc_tnl_mpls, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_L3:
+ get_set(&be->cat.v18.cfn[index].ptc_tnl_l3, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_FRAG:
+ get_set(&be->cat.v18.cfn[index].ptc_tnl_frag, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_IP_PROT:
+ get_set(&be->cat.v18.cfn[index].ptc_tnl_ip_prot, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_L4:
+ get_set(&be->cat.v18.cfn[index].ptc_tnl_l4, value, get);
+ break;
+ case HW_CAT_CFN_ERR_INV:
+ get_set(&be->cat.v18.cfn[index].err_inv, value, get);
+ break;
+ case HW_CAT_CFN_ERR_CV:
+ get_set(&be->cat.v18.cfn[index].err_cv, value, get);
+ break;
+ case HW_CAT_CFN_ERR_FCS:
+ get_set(&be->cat.v18.cfn[index].err_fcs, value, get);
+ break;
+ case HW_CAT_CFN_ERR_TRUNC:
+ get_set(&be->cat.v18.cfn[index].err_trunc, value, get);
+ break;
+ case HW_CAT_CFN_ERR_L3_CS:
+ get_set(&be->cat.v18.cfn[index].err_l3_cs, value, get);
+ break;
+ case HW_CAT_CFN_ERR_L4_CS:
+ get_set(&be->cat.v18.cfn[index].err_l4_cs, value, get);
+ break;
+ case HW_CAT_CFN_MAC_PORT:
+ get_set(&be->cat.v18.cfn[index].mac_port, value, get);
+ break;
+ case HW_CAT_CFN_PM_CMP:
+ if (word_off > 1)
+ return error_word_off_too_large(__func__);
+ get_set(&be->cat.v18.cfn[index].pm_cmp[word_off], value, get);
+ break;
+ case HW_CAT_CFN_PM_DCT:
+ get_set(&be->cat.v18.cfn[index].pm_dct, value, get);
+ break;
+ case HW_CAT_CFN_PM_EXT_INV:
+ get_set(&be->cat.v18.cfn[index].pm_ext_inv, value, get);
+ break;
+ case HW_CAT_CFN_PM_CMB:
+ get_set(&be->cat.v18.cfn[index].pm_cmb, value, get);
+ break;
+ case HW_CAT_CFN_PM_AND_INV:
+ get_set(&be->cat.v18.cfn[index].pm_and_inv, value, get);
+ break;
+ case HW_CAT_CFN_PM_OR_INV:
+ get_set(&be->cat.v18.cfn[index].pm_or_inv, value, get);
+ break;
+ case HW_CAT_CFN_PM_INV:
+ get_set(&be->cat.v18.cfn[index].pm_inv, value, get);
+ break;
+ case HW_CAT_CFN_LC:
+ get_set(&be->cat.v18.cfn[index].lc, value, get);
+ break;
+ case HW_CAT_CFN_LC_INV:
+ get_set(&be->cat.v18.cfn[index].lc_inv, value, get);
+ break;
+ case HW_CAT_CFN_KM0_OR:
+ get_set(&be->cat.v18.cfn[index].km_or, value, get);
+ break;
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18 */
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_CFN_SET_ALL_DEFAULTS:
+ if (get)
+ return error_unsup_field(__func__);
+ return cfn_reset(be, index);
+ case HW_CAT_CFN_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->cat.v21.cfn[index], (uint8_t)*value,
+ sizeof(struct cat_v21_cfn_s));
+ break;
+ case HW_CAT_CFN_COMPARE:
+ rv = do_compare_indexes(be->cat.v21.cfn,
+ sizeof(struct cat_v21_cfn_s), index, word_off,
+ be->cat.nb_cat_funcs, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_CAT_CFN_FIND:
+ rv = find_equal_index(be->cat.v21.cfn,
+ sizeof(struct cat_v21_cfn_s), index, word_off,
+ be->cat.nb_cat_funcs, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_CAT_CFN_COPY_FROM:
+ if (get)
+ return error_unsup_field(__func__);
+ memcpy(&be->cat.v21.cfn[index],
+ &be->cat.v21.cfn[*value],
+ sizeof(struct cat_v21_cfn_s));
+ break;
+ case HW_CAT_CFN_ENABLE:
+ get_set(&be->cat.v21.cfn[index].enable, value, get);
+ break;
+ case HW_CAT_CFN_INV:
+ get_set(&be->cat.v21.cfn[index].inv, value, get);
+ break;
+ case HW_CAT_CFN_PTC_INV:
+ get_set(&be->cat.v21.cfn[index].ptc_inv, value, get);
+ break;
+ case HW_CAT_CFN_PTC_ISL:
+ get_set(&be->cat.v21.cfn[index].ptc_isl, value, get);
+ break;
+ case HW_CAT_CFN_PTC_CFP:
+ get_set(&be->cat.v21.cfn[index].ptc_cfp, value, get);
+ break;
+ case HW_CAT_CFN_PTC_MAC:
+ get_set(&be->cat.v21.cfn[index].ptc_mac, value, get);
+ break;
+ case HW_CAT_CFN_PTC_L2:
+ get_set(&be->cat.v21.cfn[index].ptc_l2, value, get);
+ break;
+ case HW_CAT_CFN_PTC_VNTAG:
+ get_set(&be->cat.v21.cfn[index].ptc_vntag, value, get);
+ break;
+ case HW_CAT_CFN_PTC_VLAN:
+ get_set(&be->cat.v21.cfn[index].ptc_vlan, value, get);
+ break;
+ case HW_CAT_CFN_PTC_MPLS:
+ get_set(&be->cat.v21.cfn[index].ptc_mpls, value, get);
+ break;
+ case HW_CAT_CFN_PTC_L3:
+ get_set(&be->cat.v21.cfn[index].ptc_l3, value, get);
+ break;
+ case HW_CAT_CFN_PTC_FRAG:
+ get_set(&be->cat.v21.cfn[index].ptc_frag, value, get);
+ break;
+ case HW_CAT_CFN_PTC_IP_PROT:
+ get_set(&be->cat.v21.cfn[index].ptc_ip_prot, value, get);
+ break;
+ case HW_CAT_CFN_PTC_L4:
+ get_set(&be->cat.v21.cfn[index].ptc_l4, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TUNNEL:
+ get_set(&be->cat.v21.cfn[index].ptc_tunnel, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_L2:
+ get_set(&be->cat.v21.cfn[index].ptc_tnl_l2, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_VLAN:
+ get_set(&be->cat.v21.cfn[index].ptc_tnl_vlan, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_MPLS:
+ get_set(&be->cat.v21.cfn[index].ptc_tnl_mpls, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_L3:
+ get_set(&be->cat.v21.cfn[index].ptc_tnl_l3, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_FRAG:
+ get_set(&be->cat.v21.cfn[index].ptc_tnl_frag, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_IP_PROT:
+ get_set(&be->cat.v21.cfn[index].ptc_tnl_ip_prot, value, get);
+ break;
+ case HW_CAT_CFN_PTC_TNL_L4:
+ get_set(&be->cat.v21.cfn[index].ptc_tnl_l4, value, get);
+ break;
+ case HW_CAT_CFN_ERR_INV:
+ get_set(&be->cat.v21.cfn[index].err_inv, value, get);
+ break;
+ case HW_CAT_CFN_ERR_CV:
+ get_set(&be->cat.v21.cfn[index].err_cv, value, get);
+ break;
+ case HW_CAT_CFN_ERR_FCS:
+ get_set(&be->cat.v21.cfn[index].err_fcs, value, get);
+ break;
+ case HW_CAT_CFN_ERR_TRUNC:
+ get_set(&be->cat.v21.cfn[index].err_trunc, value, get);
+ break;
+ case HW_CAT_CFN_ERR_L3_CS:
+ get_set(&be->cat.v21.cfn[index].err_l3_cs, value, get);
+ break;
+ case HW_CAT_CFN_ERR_L4_CS:
+ get_set(&be->cat.v21.cfn[index].err_l4_cs, value, get);
+ break;
+ case HW_CAT_CFN_ERR_TNL_L3_CS:
+ get_set(&be->cat.v21.cfn[index].err_tnl_l3_cs, value, get);
+ break;
+ case HW_CAT_CFN_ERR_TNL_L4_CS:
+ get_set(&be->cat.v21.cfn[index].err_tnl_l4_cs, value, get);
+ break;
+ case HW_CAT_CFN_ERR_TTL_EXP:
+ get_set(&be->cat.v21.cfn[index].err_ttl_exp, value, get);
+ break;
+ case HW_CAT_CFN_ERR_TNL_TTL_EXP:
+ get_set(&be->cat.v21.cfn[index].err_tnl_ttl_exp, value, get);
+ break;
+ case HW_CAT_CFN_MAC_PORT:
+ get_set(&be->cat.v21.cfn[index].mac_port, value, get);
+ break;
+ case HW_CAT_CFN_PM_CMP:
+ if (word_off > 1)
+ return error_word_off_too_large(__func__);
+ get_set(&be->cat.v21.cfn[index].pm_cmp[word_off], value, get);
+ break;
+ case HW_CAT_CFN_PM_DCT:
+ get_set(&be->cat.v21.cfn[index].pm_dct, value, get);
+ break;
+ case HW_CAT_CFN_PM_EXT_INV:
+ get_set(&be->cat.v21.cfn[index].pm_ext_inv, value, get);
+ break;
+ case HW_CAT_CFN_PM_CMB:
+ get_set(&be->cat.v21.cfn[index].pm_cmb, value, get);
+ break;
+ case HW_CAT_CFN_PM_AND_INV:
+ get_set(&be->cat.v21.cfn[index].pm_and_inv, value, get);
+ break;
+ case HW_CAT_CFN_PM_OR_INV:
+ get_set(&be->cat.v21.cfn[index].pm_or_inv, value, get);
+ break;
+ case HW_CAT_CFN_PM_INV:
+ get_set(&be->cat.v21.cfn[index].pm_inv, value, get);
+ break;
+ case HW_CAT_CFN_LC:
+ get_set(&be->cat.v21.cfn[index].lc, value, get);
+ break;
+ case HW_CAT_CFN_LC_INV:
+ get_set(&be->cat.v21.cfn[index].lc_inv, value, get);
+ break;
+ case HW_CAT_CFN_KM0_OR:
+ get_set(&be->cat.v21.cfn[index].km0_or, value, get);
+ break;
+ case HW_CAT_CFN_KM1_OR:
+ get_set(&be->cat.v21.cfn[index].km1_or, value, get);
+ break;
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+int hw_mod_cat_cfn_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, int word_off, uint32_t value)
+{
+ return hw_mod_cat_cfn_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_cfn_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, int word_off, uint32_t *value)
+{
+ return hw_mod_cat_cfn_mod(be, field, index, word_off, value, 1);
+}
+
+static inline int
+find_km_flm_module_interface_index(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int km_if_id)
+{
+ int km_if_idx;
+
+ if (_VER_ == 18) {
+ km_if_idx = 0;
+ } else {
+ if (if_num == KM_FLM_IF_SECOND) {
+ if (be->cat.km_if_m1 == km_if_id)
+ km_if_idx = 1;
+ else
+ return error_unsup_field(__func__);
+ } else {
+ if (be->cat.km_if_m0 == km_if_id)
+ km_if_idx = 0;
+ else if (be->cat.km_if_m1 == km_if_id)
+ km_if_idx = 1;
+ else
+ return error_unsup_field(__func__);
+ }
+ }
+ return km_if_idx;
+}
+
+/*
+ * KCE
+ */
+
+static int hw_mod_cat_kce_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int km_if_id,
+ int start_idx, int count)
+{
+ /* writes 8 bits - one for each cfn - at a time */
+ if (count == ALL_ENTRIES)
+ count = be->cat.nb_cat_funcs / 8;
+ if ((unsigned int)(start_idx + count) > (be->cat.nb_cat_funcs / 8))
+ return error_index_too_large(__func__);
+ /* find KM module */
+ int km_if_idx;
+
+ km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+ if (km_if_idx < 0)
+ return km_if_idx;
+
+ return be->iface->cat_kce_flush(be->be_dev, &be->cat, km_if_idx,
+ start_idx, count);
+}
+
+int hw_mod_cat_kce_km_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count)
+{
+ return hw_mod_cat_kce_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kce_flm_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count)
+{
+ return hw_mod_cat_kce_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kce_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int km_if_id,
+ int index, uint32_t *value, int get)
+{
+ if ((unsigned int)index >= (be->cat.nb_cat_funcs / 8))
+ return error_index_too_large(__func__);
+
+ /* find KM module */
+ int km_if_idx;
+
+ km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+ if (km_if_idx < 0)
+ return km_if_idx;
+
+ switch (_VER_) {
+ case 18:
+ switch (field) {
+ case HW_CAT_KCE_ENABLE_BM:
+ get_set(&be->cat.v18.kce[index].enable_bm, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18 */
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_KCE_ENABLE_BM:
+ get_set(&be->cat.v21.kce[index].enable_bm[km_if_idx],
+ value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_kce_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value)
+{
+ return hw_mod_cat_kce_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kce_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value)
+{
+ return hw_mod_cat_kce_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kce_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value)
+{
+ return hw_mod_cat_kce_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kce_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value)
+{
+ return hw_mod_cat_kce_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * KCS
+ */
+static int hw_mod_cat_kcs_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int km_if_id,
+ int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->cat.nb_cat_funcs;
+ if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+ return error_index_too_large(__func__);
+
+ /* find KM module */
+ int km_if_idx;
+
+ km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+ if (km_if_idx < 0)
+ return km_if_idx;
+
+ return be->iface->cat_kcs_flush(be->be_dev, &be->cat, km_if_idx,
+ start_idx, count);
+}
+
+int hw_mod_cat_kcs_km_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count)
+{
+ return hw_mod_cat_kcs_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_kcs_flm_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count)
+{
+ return hw_mod_cat_kcs_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_kcs_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int km_if_id,
+ int index, uint32_t *value, int get)
+{
+ if ((unsigned int)index >= be->cat.nb_cat_funcs)
+ return error_index_too_large(__func__);
+ /* find KM module */
+ int km_if_idx;
+
+ km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+ if (km_if_idx < 0)
+ return km_if_idx;
+
+ switch (_VER_) {
+ case 18:
+ switch (field) {
+ case HW_CAT_KCS_CATEGORY:
+ get_set(&be->cat.v18.kcs[index].category, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18 */
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_KCS_CATEGORY:
+ get_set(&be->cat.v21.kcs[index].category[km_if_idx],
+ value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_kcs_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value)
+{
+ return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value)
+{
+ return hw_mod_cat_kcs_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_kcs_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value)
+{
+ return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_kcs_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value)
+{
+ return hw_mod_cat_kcs_mod(be, field, if_num, 1, index, value, 1);
+}
+
+/*
+ * FTE
+ */
+static int hw_mod_cat_fte_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int km_if_id,
+ int start_idx, int count)
+{
+ const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+ if (count == ALL_ENTRIES)
+ count = be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types *
+ key_cnt;
+ if ((unsigned int)(start_idx + count) >
+ (be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+ return error_index_too_large(__func__);
+
+ /* find KM module */
+ int km_if_idx;
+
+ km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+ if (km_if_idx < 0)
+ return km_if_idx;
+
+ return be->iface->cat_fte_flush(be->be_dev, &be->cat, km_if_idx,
+ start_idx, count);
+}
+
+int hw_mod_cat_fte_km_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count)
+{
+ return hw_mod_cat_fte_flush(be, if_num, 0, start_idx, count);
+}
+
+int hw_mod_cat_fte_flm_flush(struct flow_api_backend_s *be,
+ enum km_flm_if_select_e if_num, int start_idx,
+ int count)
+{
+ return hw_mod_cat_fte_flush(be, if_num, 1, start_idx, count);
+}
+
+static int hw_mod_cat_fte_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int km_if_id,
+ int index, uint32_t *value, int get)
+{
+ const uint32_t key_cnt = (_VER_ >= 20) ? 4 : 2;
+
+ if ((unsigned int)index >=
+ (be->cat.nb_cat_funcs / 8 * be->cat.nb_flow_types * key_cnt))
+ return error_index_too_large(__func__);
+ /* find KM module */
+ int km_if_idx;
+
+ km_if_idx = find_km_flm_module_interface_index(be, if_num, km_if_id);
+ if (km_if_idx < 0)
+ return km_if_idx;
+
+ switch (_VER_) {
+ case 18:
+ switch (field) {
+ case HW_CAT_FTE_ENABLE_BM:
+ get_set(&be->cat.v18.fte[index].enable_bm, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18 */
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_FTE_ENABLE_BM:
+ get_set(&be->cat.v21.fte[index].enable_bm[km_if_idx],
+ value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_fte_km_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value)
+{
+ return hw_mod_cat_fte_mod(be, field, if_num, 0, index, &value, 0);
+}
+
+int hw_mod_cat_fte_km_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value)
+{
+ return hw_mod_cat_fte_mod(be, field, if_num, 0, index, value, 1);
+}
+
+int hw_mod_cat_fte_flm_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t value)
+{
+ return hw_mod_cat_fte_mod(be, field, if_num, 1, index, &value, 0);
+}
+
+int hw_mod_cat_fte_flm_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ enum km_flm_if_select_e if_num, int index,
+ uint32_t *value)
+{
+ return hw_mod_cat_fte_mod(be, field, if_num, 1, index, value, 1);
+}
+
+int hw_mod_cat_cte_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->cat.nb_cat_funcs;
+ if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs)
+ return error_index_too_large(__func__);
+ return be->iface->cat_cte_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cte_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, uint32_t *value,
+ int get)
+{
+ if ((unsigned int)index >= be->cat.nb_cat_funcs)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 18:
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_CTE_ENABLE_BM:
+ get_set(&be->cat.v18.cte[index].enable_bm, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18/21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_cte_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_cat_cte_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cte_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_cat_cte_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cts_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ int addr_size = (_VER_ < 15) ? 8 : ((be->cat.cts_num + 1) / 2);
+
+ if (count == ALL_ENTRIES)
+ count = be->cat.nb_cat_funcs * addr_size;
+ if ((unsigned int)(start_idx + count) >
+ (be->cat.nb_cat_funcs * addr_size))
+ return error_index_too_large(__func__);
+ return be->iface->cat_cts_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cts_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, uint32_t *value,
+ int get)
+{
+ int addr_size = (be->cat.cts_num + 1) / 2;
+
+ if ((unsigned int)index >= (be->cat.nb_cat_funcs * addr_size))
+ return error_index_too_large(__func__);
+
+ switch (_VER_) {
+ case 18:
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_CTS_CAT_A:
+ get_set(&be->cat.v18.cts[index].cat_a, value, get);
+ break;
+ case HW_CAT_CTS_CAT_B:
+ get_set(&be->cat.v18.cts[index].cat_b, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18/21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_cts_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_cat_cts_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cts_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_cat_cts_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cot_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->max_categories;
+ if ((unsigned int)(start_idx + count) > be->max_categories)
+ return error_index_too_large(__func__);
+ return be->iface->cat_cot_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cot_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, uint32_t *value,
+ int get)
+{
+ int rv = 0;
+ if ((unsigned int)index >= be->max_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 18:
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_COT_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->cat.v18.cot[index], (uint8_t)*value,
+ sizeof(struct cat_v18_cot_s));
+ break;
+ case HW_CAT_COT_COMPARE:
+ rv = do_compare_indexes(be->cat.v18.cot,
+ sizeof(struct cat_v18_cot_s), index, *value,
+ be->max_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_CAT_COT_FIND:
+ rv = find_equal_index(be->cat.v18.cot,
+ sizeof(struct cat_v18_cot_s), index, *value,
+ be->max_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_CAT_COT_COLOR:
+ get_set(&be->cat.v18.cot[index].color, value, get);
+ break;
+ case HW_CAT_COT_KM:
+ get_set(&be->cat.v18.cot[index].km, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18/21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_cot_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_cat_cot_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cot_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_cat_cot_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cct_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->cat.nb_cat_funcs * 4;
+ if ((unsigned int)(start_idx + count) > be->cat.nb_cat_funcs * 4)
+ return error_index_too_large(__func__);
+ return be->iface->cat_cct_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cct_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, uint32_t *value,
+ int get)
+{
+ if ((unsigned int)index >= be->cat.nb_cat_funcs * 4)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 18:
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_CCT_COLOR:
+ get_set(&be->cat.v18.cct[index].color, value, get);
+ break;
+ case HW_CAT_CCT_KM:
+ get_set(&be->cat.v18.cct[index].km, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18/21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_cct_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_cat_cct_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cct_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_cat_cct_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_kcc_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->cat.kcc_size;
+ if ((unsigned int)(start_idx + count) > be->cat.kcc_size)
+ return error_index_too_large(__func__);
+ return be->iface->cat_kcc_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_kcc_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, int word_off,
+ uint32_t *value, int get)
+{
+ if ((unsigned int)index >= be->cat.kcc_size)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 18:
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_KCC_KEY:
+ if (word_off > 1)
+ return error_word_off_too_large(__func__);
+ get_set(&be->cat.v18.kcc_cam[index].key[word_off], value,
+ get);
+ break;
+
+ case HW_CAT_KCC_CATEGORY:
+ get_set(&be->cat.v18.kcc_cam[index].category, value, get);
+ break;
+
+ case HW_CAT_KCC_ID:
+ get_set(&be->cat.v18.kcc_cam[index].id, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18/21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_kcc_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, int word_off, uint32_t value)
+{
+ return hw_mod_cat_kcc_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_cat_kcc_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, int word_off, uint32_t *value)
+{
+ return hw_mod_cat_kcc_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_cat_exo_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->cat.nb_pm_ext;
+ if ((unsigned int)(start_idx + count) > be->cat.nb_pm_ext)
+ return error_index_too_large(__func__);
+ return be->iface->cat_exo_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_exo_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, uint32_t *value,
+ int get)
+{
+ if ((unsigned int)index >= be->cat.nb_pm_ext)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 18:
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_EXO_DYN:
+ get_set(&be->cat.v18.exo[index].dyn, value, get);
+ break;
+ case HW_CAT_EXO_OFS:
+ get_set_signed(&be->cat.v18.exo[index].ofs, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18/21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_exo_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_cat_exo_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_exo_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_cat_exo_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_rck_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->cat.nb_pm_ext * 64;
+ if ((unsigned int)(start_idx + count) > (be->cat.nb_pm_ext * 64))
+ return error_index_too_large(__func__);
+ return be->iface->cat_rck_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_rck_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, uint32_t *value,
+ int get)
+{
+ if ((unsigned int)index >= (be->cat.nb_pm_ext * 64))
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 18:
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_RCK_DATA:
+ get_set(&be->cat.v18.rck[index].rck_data, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18/21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_rck_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_cat_rck_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_rck_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_cat_rck_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_len_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->cat.nb_len;
+ if ((unsigned int)(start_idx + count) > be->cat.nb_len)
+ return error_index_too_large(__func__);
+ return be->iface->cat_len_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_len_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, uint32_t *value,
+ int get)
+{
+ if ((unsigned int)index >= be->cat.nb_len)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 18:
+ case 21:
+ case 22:
+ switch (field) {
+ case HW_CAT_LEN_LOWER:
+ get_set(&be->cat.v18.len[index].lower, value, get);
+ break;
+ case HW_CAT_LEN_UPPER:
+ get_set(&be->cat.v18.len[index].upper, value, get);
+ break;
+ case HW_CAT_LEN_DYN1:
+ get_set(&be->cat.v18.len[index].dyn1, value, get);
+ break;
+ case HW_CAT_LEN_DYN2:
+ get_set(&be->cat.v18.len[index].dyn2, value, get);
+ break;
+ case HW_CAT_LEN_INV:
+ get_set(&be->cat.v18.len[index].inv, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 18/21/22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_len_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_cat_len_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_len_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_cat_len_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_cce_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = 4;
+ if ((unsigned int)(start_idx + count) > 4)
+ return error_index_too_large(__func__);
+ return be->iface->cat_cce_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_cce_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, uint32_t *value,
+ int get)
+{
+ if ((unsigned int)index >= be->cat.nb_len)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 22:
+ switch (field) {
+ case HW_CAT_CCE_IMM:
+ get_set(&be->cat.v22.cce[index].imm, value, get);
+ break;
+ case HW_CAT_CCE_IND:
+ get_set(&be->cat.v22.cce[index].ind, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_cce_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_cat_cce_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_cce_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_cat_cce_mod(be, field, index, value, 1);
+}
+
+int hw_mod_cat_ccs_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = 1024;
+ if ((unsigned int)(start_idx + count) > 1024)
+ return error_index_too_large(__func__);
+ return be->iface->cat_ccs_flush(be->be_dev, &be->cat, start_idx, count);
+}
+
+static int hw_mod_cat_ccs_mod(struct flow_api_backend_s *be,
+ enum hw_cat_e field, int index, uint32_t *value,
+ int get)
+{
+ if ((unsigned int)index >= be->cat.nb_len)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 22:
+ switch (field) {
+ case HW_CAT_CCS_COR_EN:
+ get_set(&be->cat.v22.ccs[index].cor_en, value, get);
+ break;
+ case HW_CAT_CCS_COR:
+ get_set(&be->cat.v22.ccs[index].cor, value, get);
+ break;
+ case HW_CAT_CCS_HSH_EN:
+ get_set(&be->cat.v22.ccs[index].hsh_en, value, get);
+ break;
+ case HW_CAT_CCS_HSH:
+ get_set(&be->cat.v22.ccs[index].hsh, value, get);
+ break;
+ case HW_CAT_CCS_QSL_EN:
+ get_set(&be->cat.v22.ccs[index].qsl_en, value, get);
+ break;
+ case HW_CAT_CCS_QSL:
+ get_set(&be->cat.v22.ccs[index].qsl, value, get);
+ break;
+ case HW_CAT_CCS_IPF_EN:
+ get_set(&be->cat.v22.ccs[index].ipf_en, value, get);
+ break;
+ case HW_CAT_CCS_IPF:
+ get_set(&be->cat.v22.ccs[index].ipf, value, get);
+ break;
+ case HW_CAT_CCS_SLC_EN:
+ get_set(&be->cat.v22.ccs[index].slc_en, value, get);
+ break;
+ case HW_CAT_CCS_SLC:
+ get_set(&be->cat.v22.ccs[index].slc, value, get);
+ break;
+ case HW_CAT_CCS_PDB_EN:
+ get_set(&be->cat.v22.ccs[index].pdb_en, value, get);
+ break;
+ case HW_CAT_CCS_PDB:
+ get_set(&be->cat.v22.ccs[index].pdb, value, get);
+ break;
+ case HW_CAT_CCS_MSK_EN:
+ get_set(&be->cat.v22.ccs[index].msk_en, value, get);
+ break;
+ case HW_CAT_CCS_MSK:
+ get_set(&be->cat.v22.ccs[index].msk, value, get);
+ break;
+ case HW_CAT_CCS_HST_EN:
+ get_set(&be->cat.v22.ccs[index].hst_en, value, get);
+ break;
+ case HW_CAT_CCS_HST:
+ get_set(&be->cat.v22.ccs[index].hst, value, get);
+ break;
+ case HW_CAT_CCS_EPP_EN:
+ get_set(&be->cat.v22.ccs[index].epp_en, value, get);
+ break;
+ case HW_CAT_CCS_EPP:
+ get_set(&be->cat.v22.ccs[index].epp, value, get);
+ break;
+ case HW_CAT_CCS_TPE_EN:
+ get_set(&be->cat.v22.ccs[index].tpe_en, value, get);
+ break;
+ case HW_CAT_CCS_TPE:
+ get_set(&be->cat.v22.ccs[index].tpe, value, get);
+ break;
+ case HW_CAT_CCS_RRB_EN:
+ get_set(&be->cat.v22.ccs[index].rrb_en, value, get);
+ break;
+ case HW_CAT_CCS_RRB:
+ get_set(&be->cat.v22.ccs[index].rrb, value, get);
+ break;
+ case HW_CAT_CCS_SB0_TYPE:
+ get_set(&be->cat.v22.ccs[index].sb0_type, value, get);
+ break;
+ case HW_CAT_CCS_SB0_DATA:
+ get_set(&be->cat.v22.ccs[index].sb0_data, value, get);
+ break;
+ case HW_CAT_CCS_SB1_TYPE:
+ get_set(&be->cat.v22.ccs[index].sb1_type, value, get);
+ break;
+ case HW_CAT_CCS_SB1_DATA:
+ get_set(&be->cat.v22.ccs[index].sb1_data, value, get);
+ break;
+ case HW_CAT_CCS_SB2_TYPE:
+ get_set(&be->cat.v22.ccs[index].sb2_type, value, get);
+ break;
+ case HW_CAT_CCS_SB2_DATA:
+ get_set(&be->cat.v22.ccs[index].sb2_data, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 22 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_cat_ccs_set(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_cat_ccs_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_cat_ccs_get(struct flow_api_backend_s *be, enum hw_cat_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_cat_ccs_mod(be, field, index, value, 1);
+}
new file mode 100644
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V18_H_
+#define _HW_MOD_CAT_V18_H_
+
+struct cat_v18_cfn_s {
+ uint32_t enable;
+ uint32_t inv;
+ /* protocol checks */
+ uint32_t ptc_inv;
+ uint32_t ptc_isl;
+ uint32_t ptc_cfp;
+ uint32_t ptc_mac;
+ uint32_t ptc_l2;
+ uint32_t ptc_vntag;
+ uint32_t ptc_vlan;
+ uint32_t ptc_mpls;
+ uint32_t ptc_l3;
+ uint32_t ptc_frag;
+ uint32_t ptc_ip_prot;
+ uint32_t ptc_l4;
+ uint32_t ptc_tunnel;
+ uint32_t ptc_tnl_l2;
+ uint32_t ptc_tnl_vlan;
+ uint32_t ptc_tnl_mpls;
+ uint32_t ptc_tnl_l3;
+ uint32_t ptc_tnl_frag;
+ uint32_t ptc_tnl_ip_prot;
+ uint32_t ptc_tnl_l4;
+ /* error checks */
+ uint32_t err_inv;
+ uint32_t err_cv;
+ uint32_t err_fcs;
+ uint32_t err_trunc;
+ uint32_t err_l3_cs;
+ uint32_t err_l4_cs;
+ /* in port */
+ uint32_t mac_port;
+ /* pattern matcher */
+ uint32_t pm_cmp[2];
+ uint32_t pm_dct;
+ uint32_t pm_ext_inv;
+ uint32_t pm_cmb;
+ uint32_t pm_and_inv;
+ uint32_t pm_or_inv;
+ uint32_t pm_inv;
+ uint32_t lc;
+ uint32_t lc_inv;
+ uint32_t km_or;
+};
+
+struct cat_v18_kce_s {
+ uint32_t enable_bm;
+};
+
+struct cat_v18_kcs_s {
+ uint32_t category;
+};
+
+struct cat_v18_fte_s {
+ uint32_t enable_bm;
+};
+
+struct cat_v18_cte_s {
+ union {
+ uint32_t enable_bm;
+ struct {
+ uint32_t col : 1;
+ uint32_t cor : 1;
+ uint32_t hsh : 1;
+ uint32_t qsl : 1;
+ uint32_t ipf : 1;
+ uint32_t slc : 1;
+ uint32_t pdb : 1;
+ uint32_t msk : 1;
+ uint32_t hst : 1;
+ uint32_t epp : 1;
+ uint32_t tpe : 1;
+ } b;
+ };
+};
+
+struct cat_v18_cts_s {
+ uint32_t cat_a;
+ uint32_t cat_b;
+};
+
+struct cat_v18_cot_s {
+ uint32_t color;
+ uint32_t km;
+};
+
+struct cat_v18_cct_s {
+ uint32_t color;
+ uint32_t km;
+};
+
+struct cat_v18_exo_s {
+ uint32_t dyn;
+ int32_t ofs;
+};
+
+struct cat_v18_rck_s {
+ uint32_t rck_data;
+};
+
+struct cat_v18_len_s {
+ uint32_t lower;
+ uint32_t upper;
+ uint32_t dyn1;
+ uint32_t dyn2;
+ uint32_t inv;
+};
+
+struct cat_v18_kcc_s {
+ uint32_t key[2];
+ uint32_t category;
+ uint32_t id;
+};
+
+struct hw_mod_cat_v18_s {
+ struct cat_v18_cfn_s *cfn;
+ struct cat_v18_kce_s *kce;
+ struct cat_v18_kcs_s *kcs;
+ struct cat_v18_fte_s *fte;
+ struct cat_v18_cte_s *cte;
+ struct cat_v18_cts_s *cts;
+ struct cat_v18_cot_s *cot;
+ struct cat_v18_cct_s *cct;
+ struct cat_v18_exo_s *exo;
+ struct cat_v18_rck_s *rck;
+ struct cat_v18_len_s *len;
+ struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V18_H_ */
new file mode 100644
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V21_H_
+#define _HW_MOD_CAT_V21_H_
+
+#include "hw_mod_cat_v18.h"
+
+struct cat_v21_cfn_s {
+ uint32_t enable;
+ uint32_t inv;
+ /* protocol checks */
+ uint32_t ptc_inv;
+ uint32_t ptc_isl;
+ uint32_t ptc_cfp;
+ uint32_t ptc_mac;
+ uint32_t ptc_l2;
+ uint32_t ptc_vntag;
+ uint32_t ptc_vlan;
+ uint32_t ptc_mpls;
+ uint32_t ptc_l3;
+ uint32_t ptc_frag;
+ uint32_t ptc_ip_prot;
+ uint32_t ptc_l4;
+ uint32_t ptc_tunnel;
+ uint32_t ptc_tnl_l2;
+ uint32_t ptc_tnl_vlan;
+ uint32_t ptc_tnl_mpls;
+ uint32_t ptc_tnl_l3;
+ uint32_t ptc_tnl_frag;
+ uint32_t ptc_tnl_ip_prot;
+ uint32_t ptc_tnl_l4;
+ /* error checks */
+ uint32_t err_inv;
+ uint32_t err_cv;
+ uint32_t err_fcs;
+ uint32_t err_trunc;
+ uint32_t err_l3_cs;
+ uint32_t err_l4_cs;
+ uint32_t err_tnl_l3_cs;
+ uint32_t err_tnl_l4_cs;
+ uint32_t err_ttl_exp;
+ uint32_t err_tnl_ttl_exp;
+ /* in port */
+ uint32_t mac_port;
+ /* pattern matcher */
+ uint32_t pm_cmp[2];
+ uint32_t pm_dct;
+ uint32_t pm_ext_inv;
+ uint32_t pm_cmb;
+ uint32_t pm_and_inv;
+ uint32_t pm_or_inv;
+ uint32_t pm_inv;
+ uint32_t lc;
+ uint32_t lc_inv;
+ uint32_t km0_or;
+ uint32_t km1_or;
+};
+
+struct cat_v21_kce_s {
+ uint32_t enable_bm[2];
+};
+
+struct cat_v21_kcs_s {
+ uint32_t category[2];
+};
+
+struct cat_v21_fte_s {
+ uint32_t enable_bm[2];
+};
+
+struct hw_mod_cat_v21_s {
+ struct cat_v21_cfn_s *cfn;
+ struct cat_v21_kce_s *kce;
+ struct cat_v21_kcs_s *kcs;
+ struct cat_v21_fte_s *fte;
+ struct cat_v18_cte_s *cte;
+ struct cat_v18_cts_s *cts;
+ struct cat_v18_cot_s *cot;
+ struct cat_v18_cct_s *cct;
+ struct cat_v18_exo_s *exo;
+ struct cat_v18_rck_s *rck;
+ struct cat_v18_len_s *len;
+ struct cat_v18_kcc_s *kcc_cam;
+};
+
+#endif /* _HW_MOD_CAT_V21_H_ */
new file mode 100644
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_CAT_V22_H_
+#define _HW_MOD_CAT_V22_H_
+
+#include "hw_mod_cat_v21.h"
+
+struct cat_v22_cte_s {
+ union {
+ uint32_t enable_bm;
+ struct {
+ uint32_t col : 1;
+ uint32_t cor : 1;
+ uint32_t hsh : 1;
+ uint32_t qsl : 1;
+ uint32_t ipf : 1;
+ uint32_t slc : 1;
+ uint32_t pdb : 1;
+ uint32_t msk : 1;
+ uint32_t hst : 1;
+ uint32_t epp : 1;
+ uint32_t tpe : 1;
+ uint32_t rrb : 1;
+ } b;
+ };
+};
+
+struct cat_v22_cce_s {
+ uint32_t imm;
+ uint32_t ind;
+};
+
+struct cat_v22_ccs_s {
+ uint32_t cor_en;
+ uint32_t cor;
+ uint32_t hsh_en;
+ uint32_t hsh;
+ uint32_t qsl_en;
+ uint32_t qsl;
+ uint32_t ipf_en;
+ uint32_t ipf;
+ uint32_t slc_en;
+ uint32_t slc;
+ uint32_t pdb_en;
+ uint32_t pdb;
+ uint32_t msk_en;
+ uint32_t msk;
+ uint32_t hst_en;
+ uint32_t hst;
+ uint32_t epp_en;
+ uint32_t epp;
+ uint32_t tpe_en;
+ uint32_t tpe;
+ uint32_t rrb_en;
+ uint32_t rrb;
+ uint32_t sb0_type;
+ uint32_t sb0_data;
+ uint32_t sb1_type;
+ uint32_t sb1_data;
+ uint32_t sb2_type;
+ uint32_t sb2_data;
+};
+
+struct hw_mod_cat_v22_s {
+ struct cat_v21_cfn_s *cfn;
+ struct cat_v21_kce_s *kce; /* KCE 0/1 */
+ struct cat_v21_kcs_s *kcs; /* KCS 0/1 */
+ struct cat_v21_fte_s *fte; /* FTE 0/1 */
+ struct cat_v22_cte_s *cte;
+ struct cat_v18_cts_s *cts;
+ struct cat_v18_cot_s *cot;
+ struct cat_v18_cct_s *cct;
+ struct cat_v18_exo_s *exo;
+ struct cat_v18_rck_s *rck;
+ struct cat_v18_len_s *len;
+ struct cat_v18_kcc_s *kcc_cam;
+ struct cat_v22_cce_s *cce;
+ struct cat_v22_ccs_s *ccs;
+};
+
+#endif /* _HW_MOD_CAT_V22_H_ */
new file mode 100644
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "FLM"
+#define _VER_ be->flm.ver
+
+bool hw_mod_flm_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_flm_present(be->be_dev);
+}
+
+int hw_mod_flm_alloc(struct flow_api_backend_s *be)
+{
+ int nb;
+
+ _VER_ = be->iface->get_flm_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "FLM MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ nb = be->iface->get_nb_flm_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "flm_categories", _MOD_, _VER_);
+ be->flm.nb_categories = (uint32_t)nb;
+
+ nb = be->iface->get_nb_flm_size_mb(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "flm_size_mb", _MOD_, _VER_);
+ be->flm.nb_size_mb = (uint32_t)nb;
+
+ nb = be->iface->get_nb_flm_entry_size(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "flm_entry_size", _MOD_, _VER_);
+ be->flm.nb_entry_size = (uint32_t)nb;
+
+ nb = be->iface->get_nb_flm_variant(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+ be->flm.nb_variant = (uint32_t)nb;
+
+ nb = be->iface->get_nb_flm_prios(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "flm_prios", _MOD_, _VER_);
+ be->flm.nb_prios = (uint32_t)nb;
+
+ nb = be->iface->get_nb_flm_pst_profiles(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "flm_variant", _MOD_, _VER_);
+ be->flm.nb_pst_profiles = (uint32_t)nb;
+
+ switch (_VER_) {
+ case 17:
+ if (!callocate_mod(CAST_COMMON(&be->flm), 26,
+ &be->flm.v17.control, 1,
+ sizeof(struct flm_v17_control_s),
+ &be->flm.v17.status, 1,
+ sizeof(struct flm_v17_status_s),
+ &be->flm.v17.timeout, 1,
+ sizeof(struct flm_v17_timeout_s),
+ &be->flm.v17.scrub, 1,
+ sizeof(struct flm_v17_scrub_s),
+ &be->flm.v17.load_bin, 1,
+ sizeof(struct flm_v17_load_bin_s),
+ &be->flm.v17.load_pps, 1,
+ sizeof(struct flm_v17_load_pps_s),
+ &be->flm.v17.load_lps, 1,
+ sizeof(struct flm_v17_load_lps_s),
+ &be->flm.v17.load_aps, 1,
+ sizeof(struct flm_v17_load_aps_s),
+ &be->flm.v17.prio, 1,
+ sizeof(struct flm_v17_prio_s),
+ &be->flm.v17.pst, be->flm.nb_pst_profiles,
+ sizeof(struct flm_v17_pst_s),
+ &be->flm.v17.rcp, be->flm.nb_categories,
+ sizeof(struct flm_v17_rcp_s),
+ &be->flm.v17.buf_ctrl, 1,
+ sizeof(struct flm_v17_buf_ctrl_s),
+ &be->flm.v17.lrn_done, 1,
+ sizeof(struct flm_v17_stat_lrn_done_s),
+ &be->flm.v17.lrn_ignore, 1,
+ sizeof(struct flm_v17_stat_lrn_ignore_s),
+ &be->flm.v17.lrn_fail, 1,
+ sizeof(struct flm_v17_stat_lrn_fail_s),
+ &be->flm.v17.unl_done, 1,
+ sizeof(struct flm_v17_stat_unl_done_s),
+ &be->flm.v17.unl_ignore, 1,
+ sizeof(struct flm_v17_stat_unl_ignore_s),
+ &be->flm.v17.rel_done, 1,
+ sizeof(struct flm_v17_stat_rel_done_s),
+ &be->flm.v17.rel_ignore, 1,
+ sizeof(struct flm_v17_stat_rel_ignore_s),
+ &be->flm.v17.aul_done, 1,
+ sizeof(struct flm_v17_stat_aul_done_s),
+ &be->flm.v17.aul_ignore, 1,
+ sizeof(struct flm_v17_stat_aul_ignore_s),
+ &be->flm.v17.aul_fail, 1,
+ sizeof(struct flm_v17_stat_aul_fail_s),
+ &be->flm.v17.tul_done, 1,
+ sizeof(struct flm_v17_stat_tul_done_s),
+ &be->flm.v17.flows, 1,
+ sizeof(struct flm_v17_stat_flows_s),
+ &be->flm.v17.prb_done, 1,
+ sizeof(struct flm_v17_stat_prb_done_s),
+ &be->flm.v17.prb_ignore, 1,
+ sizeof(struct flm_v17_stat_prb_ignore_s)))
+ return -1;
+ break;
+
+ case 20:
+ if (!callocate_mod(CAST_COMMON(&be->flm), 38,
+ &be->flm.v17.control, 1,
+ sizeof(struct flm_v17_control_s),
+ &be->flm.v17.status, 1,
+ sizeof(struct flm_v17_status_s),
+ &be->flm.v17.timeout, 1,
+ sizeof(struct flm_v17_timeout_s),
+ &be->flm.v17.scrub, 1,
+ sizeof(struct flm_v17_scrub_s),
+ &be->flm.v17.load_bin, 1,
+ sizeof(struct flm_v17_load_bin_s),
+ &be->flm.v17.load_pps, 1,
+ sizeof(struct flm_v17_load_pps_s),
+ &be->flm.v17.load_lps, 1,
+ sizeof(struct flm_v17_load_lps_s),
+ &be->flm.v17.load_aps, 1,
+ sizeof(struct flm_v17_load_aps_s),
+ &be->flm.v17.prio, 1,
+ sizeof(struct flm_v17_prio_s),
+ &be->flm.v17.pst, be->flm.nb_pst_profiles,
+ sizeof(struct flm_v17_pst_s),
+ &be->flm.v17.rcp, be->flm.nb_categories,
+ sizeof(struct flm_v17_rcp_s),
+ &be->flm.v17.buf_ctrl, 1,
+ sizeof(struct flm_v17_buf_ctrl_s),
+ &be->flm.v17.lrn_done, 1,
+ sizeof(struct flm_v17_stat_lrn_done_s),
+ &be->flm.v17.lrn_ignore, 1,
+ sizeof(struct flm_v17_stat_lrn_ignore_s),
+ &be->flm.v17.lrn_fail, 1,
+ sizeof(struct flm_v17_stat_lrn_fail_s),
+ &be->flm.v17.unl_done, 1,
+ sizeof(struct flm_v17_stat_unl_done_s),
+ &be->flm.v17.unl_ignore, 1,
+ sizeof(struct flm_v17_stat_unl_ignore_s),
+ &be->flm.v17.rel_done, 1,
+ sizeof(struct flm_v17_stat_rel_done_s),
+ &be->flm.v17.rel_ignore, 1,
+ sizeof(struct flm_v17_stat_rel_ignore_s),
+ &be->flm.v17.aul_done, 1,
+ sizeof(struct flm_v17_stat_aul_done_s),
+ &be->flm.v17.aul_ignore, 1,
+ sizeof(struct flm_v17_stat_aul_ignore_s),
+ &be->flm.v17.aul_fail, 1,
+ sizeof(struct flm_v17_stat_aul_fail_s),
+ &be->flm.v17.tul_done, 1,
+ sizeof(struct flm_v17_stat_tul_done_s),
+ &be->flm.v17.flows, 1,
+ sizeof(struct flm_v17_stat_flows_s),
+ &be->flm.v17.prb_done, 1,
+ sizeof(struct flm_v17_stat_prb_done_s),
+ &be->flm.v17.prb_ignore, 1,
+ sizeof(struct flm_v17_stat_prb_ignore_s),
+ &be->flm.v20.sta_done, 1,
+ sizeof(struct flm_v20_stat_sta_done_s),
+ &be->flm.v20.inf_done, 1,
+ sizeof(struct flm_v20_stat_inf_done_s),
+ &be->flm.v20.inf_skip, 1,
+ sizeof(struct flm_v20_stat_inf_skip_s),
+ &be->flm.v20.pck_hit, 1,
+ sizeof(struct flm_v20_stat_pck_hit_s),
+ &be->flm.v20.pck_miss, 1,
+ sizeof(struct flm_v20_stat_pck_miss_s),
+ &be->flm.v20.pck_unh, 1,
+ sizeof(struct flm_v20_stat_pck_unh_s),
+ &be->flm.v20.pck_dis, 1,
+ sizeof(struct flm_v20_stat_pck_dis_s),
+ &be->flm.v20.csh_hit, 1,
+ sizeof(struct flm_v20_stat_csh_hit_s),
+ &be->flm.v20.csh_miss, 1,
+ sizeof(struct flm_v20_stat_csh_miss_s),
+ &be->flm.v20.csh_unh, 1,
+ sizeof(struct flm_v20_stat_csh_unh_s),
+ &be->flm.v20.cuc_start, 1,
+ sizeof(struct flm_v20_stat_cuc_start_s),
+ &be->flm.v20.cuc_move, 1,
+ sizeof(struct flm_v20_stat_cuc_move_s)))
+ return -1;
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+void hw_mod_flm_free(struct flow_api_backend_s *be)
+{
+ if (be->flm.base) {
+ free(be->flm.base);
+ be->flm.base = NULL;
+ }
+}
+
+int hw_mod_flm_reset(struct flow_api_backend_s *be)
+{
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->flm);
+
+ NT_LOG(DBG, FILTER, "INIT FLM\n");
+ hw_mod_flm_control_set(be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE, 0x10);
+
+ hw_mod_flm_control_flush(be);
+ hw_mod_flm_timeout_flush(be);
+ hw_mod_flm_scrub_flush(be);
+ hw_mod_flm_rcp_flush(be, 0, ALL_ENTRIES);
+
+ return 0;
+}
+
+int hw_mod_flm_control_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_control_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_control_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value, int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_CONTROL_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(be->flm.v17.control, (uint8_t)*value,
+ sizeof(struct flm_v17_control_s));
+ break;
+ case HW_FLM_CONTROL_ENABLE:
+ get_set(&be->flm.v17.control->enable, value, get);
+ break;
+ case HW_FLM_CONTROL_INIT:
+ get_set(&be->flm.v17.control->init, value, get);
+ break;
+ case HW_FLM_CONTROL_LDS:
+ get_set(&be->flm.v17.control->lds, value, get);
+ break;
+ case HW_FLM_CONTROL_LFS:
+ get_set(&be->flm.v17.control->lfs, value, get);
+ break;
+ case HW_FLM_CONTROL_LIS:
+ get_set(&be->flm.v17.control->lis, value, get);
+ break;
+ case HW_FLM_CONTROL_UDS:
+ get_set(&be->flm.v17.control->uds, value, get);
+ break;
+ case HW_FLM_CONTROL_UIS:
+ get_set(&be->flm.v17.control->uis, value, get);
+ break;
+ case HW_FLM_CONTROL_RDS:
+ get_set(&be->flm.v17.control->rds, value, get);
+ break;
+ case HW_FLM_CONTROL_RIS:
+ get_set(&be->flm.v17.control->ris, value, get);
+ break;
+ case HW_FLM_CONTROL_PDS:
+ get_set(&be->flm.v17.control->pds, value, get);
+ break;
+ case HW_FLM_CONTROL_PIS:
+ get_set(&be->flm.v17.control->pis, value, get);
+ break;
+ case HW_FLM_CONTROL_CRCWR:
+ get_set(&be->flm.v17.control->crcwr, value, get);
+ break;
+ case HW_FLM_CONTROL_CRCRD:
+ get_set(&be->flm.v17.control->crcrd, value, get);
+ break;
+ case HW_FLM_CONTROL_RBL:
+ get_set(&be->flm.v17.control->rbl, value, get);
+ break;
+ case HW_FLM_CONTROL_EAB:
+ get_set(&be->flm.v17.control->eab, value, get);
+ break;
+ case HW_FLM_CONTROL_SPLIT_SDRAM_USAGE:
+ get_set(&be->flm.v17.control->split_sdram_usage, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_control_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value)
+{
+ return hw_mod_flm_control_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_control_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_control_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_status_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_status_flush(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_status_update(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_status_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_status_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value, int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_STATUS_CALIBDONE:
+ get_set(&be->flm.v17.status->calibdone, value, get);
+ break;
+ case HW_FLM_STATUS_INITDONE:
+ get_set(&be->flm.v17.status->initdone, value, get);
+ break;
+ case HW_FLM_STATUS_IDLE:
+ get_set(&be->flm.v17.status->idle, value, get);
+ break;
+ case HW_FLM_STATUS_CRITICAL:
+ get_set(&be->flm.v17.status->critical, value, get);
+ break;
+ case HW_FLM_STATUS_PANIC:
+ get_set(&be->flm.v17.status->panic, value, get);
+ break;
+ case HW_FLM_STATUS_CRCERR:
+ get_set(&be->flm.v17.status->crcerr, value, get);
+ break;
+ case HW_FLM_STATUS_EFT_BP:
+ get_set(&be->flm.v17.status->eft_bp, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_status_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value)
+{
+ return hw_mod_flm_status_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_status_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_status_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_timeout_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_timeout_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_timeout_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value, int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_TIMEOUT_T:
+ get_set(&be->flm.v17.timeout->t, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_timeout_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value)
+{
+ return hw_mod_flm_timeout_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_timeout_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_timeout_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_scrub_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_scrub_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_scrub_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value, int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_SCRUB_I:
+ get_set(&be->flm.v17.scrub->i, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_scrub_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value)
+{
+ return hw_mod_flm_scrub_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_scrub_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_scrub_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_bin_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_load_bin_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_bin_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value,
+ int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_LOAD_BIN:
+ get_set(&be->flm.v17.load_bin->bin, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_load_bin_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value)
+{
+ return hw_mod_flm_load_bin_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_bin_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_load_bin_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_pps_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_load_pps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_pps_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value,
+ int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_LOAD_PPS:
+ get_set(&be->flm.v17.load_pps->pps, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_load_pps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value)
+{
+ return hw_mod_flm_load_pps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_pps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_load_pps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_lps_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_load_lps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_lps_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value,
+ int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_LOAD_LPS:
+ get_set(&be->flm.v17.load_lps->lps, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_load_lps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value)
+{
+ return hw_mod_flm_load_lps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_lps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_load_lps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_load_aps_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_load_aps_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_load_aps_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value,
+ int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_LOAD_APS:
+ get_set(&be->flm.v17.load_aps->aps, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_load_aps_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value)
+{
+ return hw_mod_flm_load_aps_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_load_aps_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_load_aps_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_prio_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_prio_flush(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_prio_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value, int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_PRIO_LIMIT0:
+ get_set(&be->flm.v17.prio->limit0, value, get);
+ break;
+ case HW_FLM_PRIO_FT0:
+ get_set(&be->flm.v17.prio->ft0, value, get);
+ break;
+ case HW_FLM_PRIO_LIMIT1:
+ get_set(&be->flm.v17.prio->limit1, value, get);
+ break;
+ case HW_FLM_PRIO_FT1:
+ get_set(&be->flm.v17.prio->ft1, value, get);
+ break;
+ case HW_FLM_PRIO_LIMIT2:
+ get_set(&be->flm.v17.prio->limit2, value, get);
+ break;
+ case HW_FLM_PRIO_FT2:
+ get_set(&be->flm.v17.prio->ft2, value, get);
+ break;
+ case HW_FLM_PRIO_LIMIT3:
+ get_set(&be->flm.v17.prio->limit3, value, get);
+ break;
+ case HW_FLM_PRIO_FT3:
+ get_set(&be->flm.v17.prio->ft3, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_prio_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t value)
+{
+ return hw_mod_flm_prio_mod(be, field, &value, 0);
+}
+
+int hw_mod_flm_prio_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_prio_mod(be, field, value, 1);
+}
+
+int hw_mod_flm_pst_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->flm.nb_pst_profiles;
+ if ((unsigned int)(start_idx + count) > be->flm.nb_pst_profiles)
+ return error_index_too_large(__func__);
+ return be->iface->flm_pst_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_pst_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, int index, uint32_t *value,
+ int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_PST_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->flm.v17.pst[index], (uint8_t)*value,
+ sizeof(struct flm_v17_pst_s));
+ break;
+ case HW_FLM_PST_BP:
+ get_set(&be->flm.v17.pst[index].bp, value, get);
+ break;
+ case HW_FLM_PST_PP:
+ get_set(&be->flm.v17.pst[index].pp, value, get);
+ break;
+ case HW_FLM_PST_TP:
+ get_set(&be->flm.v17.pst[index].tp, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_pst_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_flm_pst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_pst_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_flm_pst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->flm.nb_categories;
+ if ((unsigned int)(start_idx + count) > be->flm.nb_categories)
+ return error_index_too_large(__func__);
+ return be->iface->flm_rcp_flush(be->be_dev, &be->flm, start_idx, count);
+}
+
+static int hw_mod_flm_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_flm_e field, int index, uint32_t *value,
+ int get)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_RCP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->flm.v17.rcp[index], (uint8_t)*value,
+ sizeof(struct flm_v17_rcp_s));
+ break;
+ case HW_FLM_RCP_LOOKUP:
+ get_set(&be->flm.v17.rcp[index].lookup, value, get);
+ break;
+ case HW_FLM_RCP_QW0_DYN:
+ get_set(&be->flm.v17.rcp[index].qw0_dyn, value, get);
+ break;
+ case HW_FLM_RCP_QW0_OFS:
+ get_set(&be->flm.v17.rcp[index].qw0_ofs, value, get);
+ break;
+ case HW_FLM_RCP_QW0_SEL:
+ get_set(&be->flm.v17.rcp[index].qw0_sel, value, get);
+ break;
+ case HW_FLM_RCP_QW4_DYN:
+ get_set(&be->flm.v17.rcp[index].qw4_dyn, value, get);
+ break;
+ case HW_FLM_RCP_QW4_OFS:
+ get_set(&be->flm.v17.rcp[index].qw4_ofs, value, get);
+ break;
+ case HW_FLM_RCP_SW8_DYN:
+ get_set(&be->flm.v17.rcp[index].sw8_dyn, value, get);
+ break;
+ case HW_FLM_RCP_SW8_OFS:
+ get_set(&be->flm.v17.rcp[index].sw8_ofs, value, get);
+ break;
+ case HW_FLM_RCP_SW8_SEL:
+ get_set(&be->flm.v17.rcp[index].sw8_sel, value, get);
+ break;
+ case HW_FLM_RCP_SW9_DYN:
+ get_set(&be->flm.v17.rcp[index].sw9_dyn, value, get);
+ break;
+ case HW_FLM_RCP_SW9_OFS:
+ get_set(&be->flm.v17.rcp[index].sw9_ofs, value, get);
+ break;
+ case HW_FLM_RCP_MASK:
+ if (get) {
+ memcpy(value, be->flm.v17.rcp[index].mask,
+ sizeof(((struct flm_v17_rcp_s *)0)
+ ->mask));
+ } else {
+ memcpy(be->flm.v17.rcp[index].mask, value,
+ sizeof(((struct flm_v17_rcp_s *)0)
+ ->mask));
+ }
+ break;
+ case HW_FLM_RCP_KID:
+ get_set(&be->flm.v17.rcp[index].kid, value, get);
+ break;
+ case HW_FLM_RCP_OPN:
+ get_set(&be->flm.v17.rcp[index].opn, value, get);
+ break;
+ case HW_FLM_RCP_IPN:
+ get_set(&be->flm.v17.rcp[index].ipn, value, get);
+ break;
+ case HW_FLM_RCP_BYT_DYN:
+ get_set(&be->flm.v17.rcp[index].byt_dyn, value, get);
+ break;
+ case HW_FLM_RCP_BYT_OFS:
+ get_set(&be->flm.v17.rcp[index].byt_ofs, value, get);
+ break;
+ case HW_FLM_RCP_TXPLM:
+ get_set(&be->flm.v17.rcp[index].txplm, value, get);
+ break;
+ case HW_FLM_RCP_AUTO_IPV4_MASK:
+ get_set(&be->flm.v17.rcp[index].auto_ipv4_mask, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_rcp_set_mask(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t *value)
+{
+ if (field != HW_FLM_RCP_MASK)
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ return hw_mod_flm_rcp_mod(be, field, index, value, 0);
+}
+
+int hw_mod_flm_rcp_set(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t value)
+{
+ if (field == HW_FLM_RCP_MASK)
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ return hw_mod_flm_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_flm_rcp_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_flm_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_flm_buf_ctrl_update(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_buf_ctrl_update(be->be_dev, &be->flm);
+}
+
+static int hw_mod_flm_buf_ctrl_mod_get(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value)
+{
+ int get = 1; /* Only get supported */
+
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_BUF_CTRL_LRN_FREE:
+ get_set(&be->flm.v17.buf_ctrl->lrn_free, value, get);
+ break;
+ case HW_FLM_BUF_CTRL_INF_AVAIL:
+ get_set(&be->flm.v17.buf_ctrl->inf_avail, value, get);
+ break;
+ case HW_FLM_BUF_CTRL_STA_AVAIL:
+ get_set(&be->flm.v17.buf_ctrl->sta_avail, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_buf_ctrl_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ return hw_mod_flm_buf_ctrl_mod_get(be, field, value);
+}
+
+int hw_mod_flm_stat_update(struct flow_api_backend_s *be)
+{
+ return be->iface->flm_stat_update(be->be_dev, &be->flm);
+}
+
+int hw_mod_flm_stat_get(struct flow_api_backend_s *be, enum hw_flm_e field,
+ uint32_t *value)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_STAT_LRN_DONE:
+ *value = be->flm.v17.lrn_done->cnt;
+ break;
+ case HW_FLM_STAT_LRN_IGNORE:
+ *value = be->flm.v17.lrn_ignore->cnt;
+ break;
+ case HW_FLM_STAT_LRN_FAIL:
+ *value = be->flm.v17.lrn_fail->cnt;
+ break;
+ case HW_FLM_STAT_UNL_DONE:
+ *value = be->flm.v17.unl_done->cnt;
+ break;
+ case HW_FLM_STAT_UNL_IGNORE:
+ *value = be->flm.v17.unl_ignore->cnt;
+ break;
+ case HW_FLM_STAT_REL_DONE:
+ *value = be->flm.v17.rel_done->cnt;
+ break;
+ case HW_FLM_STAT_REL_IGNORE:
+ *value = be->flm.v17.rel_ignore->cnt;
+ break;
+ case HW_FLM_STAT_PRB_DONE:
+ *value = be->flm.v17.prb_done->cnt;
+ break;
+ case HW_FLM_STAT_PRB_IGNORE:
+ *value = be->flm.v17.prb_ignore->cnt;
+ break;
+ case HW_FLM_STAT_AUL_DONE:
+ *value = be->flm.v17.aul_done->cnt;
+ break;
+ case HW_FLM_STAT_AUL_IGNORE:
+ *value = be->flm.v17.aul_ignore->cnt;
+ break;
+ case HW_FLM_STAT_AUL_FAIL:
+ *value = be->flm.v17.aul_fail->cnt;
+ break;
+ case HW_FLM_STAT_TUL_DONE:
+ *value = be->flm.v17.tul_done->cnt;
+ break;
+ case HW_FLM_STAT_FLOWS:
+ *value = be->flm.v17.flows->cnt;
+ break;
+
+ default: {
+ if (_VER_ < 18)
+ return error_unsup_field(__func__);
+
+ switch (field) {
+ case HW_FLM_STAT_STA_DONE:
+ *value = be->flm.v20.sta_done->cnt;
+ break;
+ case HW_FLM_STAT_INF_DONE:
+ *value = be->flm.v20.inf_done->cnt;
+ break;
+ case HW_FLM_STAT_INF_SKIP:
+ *value = be->flm.v20.inf_skip->cnt;
+ break;
+ case HW_FLM_STAT_PCK_HIT:
+ *value = be->flm.v20.pck_hit->cnt;
+ break;
+ case HW_FLM_STAT_PCK_MISS:
+ *value = be->flm.v20.pck_miss->cnt;
+ break;
+ case HW_FLM_STAT_PCK_UNH:
+ *value = be->flm.v20.pck_unh->cnt;
+ break;
+ case HW_FLM_STAT_PCK_DIS:
+ *value = be->flm.v20.pck_dis->cnt;
+ break;
+ case HW_FLM_STAT_CSH_HIT:
+ *value = be->flm.v20.csh_hit->cnt;
+ break;
+ case HW_FLM_STAT_CSH_MISS:
+ *value = be->flm.v20.csh_miss->cnt;
+ break;
+ case HW_FLM_STAT_CSH_UNH:
+ *value = be->flm.v20.csh_unh->cnt;
+ break;
+ case HW_FLM_STAT_CUC_START:
+ *value = be->flm.v20.cuc_start->cnt;
+ break;
+ case HW_FLM_STAT_CUC_MOVE:
+ *value = be->flm.v20.cuc_move->cnt;
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ }
+ break;
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_lrn_data_set_flush(struct flow_api_backend_s *be,
+ enum hw_flm_e field, const uint32_t *value)
+{
+ int ret = 0;
+
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_FLOW_LRN_DATA_V17:
+ ret = be->iface->flm_lrn_data_flush(be->be_dev,
+ &be->flm, value,
+ sizeof(struct flm_v17_lrn_data_s) /
+ sizeof(uint32_t));
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return ret;
+}
+
+int hw_mod_flm_inf_data_update_get(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value,
+ uint32_t word_cnt)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_FLOW_INF_DATA_V17:
+ be->iface->flm_inf_data_update(be->be_dev, &be->flm,
+ value, word_cnt);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_flm_sta_data_update_get(struct flow_api_backend_s *be,
+ enum hw_flm_e field, uint32_t *value)
+{
+ switch (_VER_) {
+ case 17:
+ case 20:
+ switch (field) {
+ case HW_FLM_FLOW_STA_DATA_V17:
+ be->iface->flm_sta_data_update(be->be_dev,
+ &be->flm, value,
+ sizeof(struct flm_v17_sta_data_s) /
+ sizeof(uint32_t));
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V17_H_
+#define _HW_MOD_FLM_V17_H_
+
+struct flm_v17_mbr_idx_overlay {
+ uint64_t a : 28;
+ uint64_t b : 28;
+ uint64_t pad : 4;
+};
+
+struct flm_v17_control_s {
+ uint32_t enable;
+ uint32_t init;
+ uint32_t lds;
+ uint32_t lfs;
+ uint32_t lis;
+ uint32_t uds;
+ uint32_t uis;
+ uint32_t rds;
+ uint32_t ris;
+ uint32_t pds;
+ uint32_t pis;
+ uint32_t crcwr;
+ uint32_t crcrd;
+ uint32_t rbl;
+ uint32_t eab;
+ uint32_t split_sdram_usage;
+};
+
+struct flm_v17_status_s {
+ uint32_t calibdone;
+ uint32_t initdone;
+ uint32_t idle;
+ uint32_t critical;
+ uint32_t panic;
+ uint32_t crcerr;
+ uint32_t eft_bp;
+};
+
+struct flm_v17_timeout_s {
+ uint32_t t;
+};
+
+struct flm_v17_scrub_s {
+ uint32_t i;
+};
+
+struct flm_v17_load_bin_s {
+ uint32_t bin;
+};
+
+struct flm_v17_load_pps_s {
+ uint32_t pps;
+};
+
+struct flm_v17_load_lps_s {
+ uint32_t lps;
+};
+
+struct flm_v17_load_aps_s {
+ uint32_t aps;
+};
+
+struct flm_v17_prio_s {
+ uint32_t limit0;
+ uint32_t ft0;
+ uint32_t limit1;
+ uint32_t ft1;
+ uint32_t limit2;
+ uint32_t ft2;
+ uint32_t limit3;
+ uint32_t ft3;
+};
+
+struct flm_v17_pst_s {
+ uint32_t bp;
+ uint32_t pp;
+ uint32_t tp;
+};
+
+struct flm_v17_rcp_s {
+ uint32_t lookup;
+ uint32_t qw0_dyn;
+ uint32_t qw0_ofs;
+ uint32_t qw0_sel;
+ uint32_t qw4_dyn;
+ uint32_t qw4_ofs;
+ uint32_t sw8_dyn;
+ uint32_t sw8_ofs;
+ uint32_t sw8_sel;
+ uint32_t sw9_dyn;
+ uint32_t sw9_ofs;
+ uint32_t mask[10];
+ uint32_t kid;
+ uint32_t opn;
+ uint32_t ipn;
+ uint32_t byt_dyn;
+ uint32_t byt_ofs;
+ uint32_t txplm;
+ uint32_t auto_ipv4_mask;
+};
+
+struct flm_v17_buf_ctrl_s {
+ uint32_t lrn_free;
+ uint32_t inf_avail;
+ uint32_t sta_avail;
+};
+
+#pragma pack(1)
+struct flm_v17_lrn_data_s {
+ uint32_t sw9; /* 31:0 (32) */
+ uint32_t sw8; /* 63:32 (32) */
+ uint32_t qw4[4]; /* 191:64 (128) */
+ uint32_t qw0[4]; /* 319:192 (128) */
+ uint8_t prot; /* 327:320 (8) */
+ uint8_t kid; /* 335:328 (8) */
+ uint32_t nat_ip; /* 367:336 (32) */
+ uint32_t teid; /* 399:368 (32) */
+ uint16_t nat_port; /* 415:400 (16) */
+ uint16_t rate; /* 431:416 (16) */
+ uint16_t size; /* 447:432 (16) */
+ uint32_t color; /* 479:448 (32) */
+ uint32_t adj; /* 511:480 (32) */
+ uint8_t id[9]; /* 583:512 (72) */
+ uint16_t fill : 12; /* 595:584 (12) */
+ uint16_t ft : 4; /* 599:596 (4) */
+ uint8_t ft_mbr : 4; /* 603:600 (4) */
+ uint8_t ft_miss : 4; /* 607:604 (5) */
+
+ /* 635:608, 663:636, 691:664, 719:692 (4 x 28) Get/set with macros FLM_V17_MBR_IDx */
+ uint8_t mbr_idx[14];
+ uint32_t vol_idx : 3; /* 722:720 (3) */
+ uint32_t stat_prof : 4; /* 726:723 (4) */
+ uint32_t prio : 2; /* 728:727 (2) */
+ uint32_t ent : 1; /* 729:729 (1) */
+ uint32_t op : 4; /* 733:730 (4) */
+ uint32_t dscp : 6; /* 739:734 (6) */
+ uint32_t qfi : 6; /* 745:740 (6) */
+ uint32_t rqi : 1; /* 746:746 (1) */
+ uint32_t nat_en : 1; /* 747:747 (1) */
+ uint32_t pad0 : 4; /* 751:748 (4) */
+ uint16_t pad1 : 15; /* 752:766 (15) */
+ uint16_t eor : 1; /* 767:767 (1) */
+};
+
+struct flm_v17_inf_data_s {
+ uint64_t bytes;
+ uint64_t packets;
+ uint64_t ts;
+ uint64_t id0; /* id0 and id1 results in a 72-bit int */
+ uint32_t id1 : 8;
+ uint32_t cause : 3;
+ uint32_t pad : 20;
+ uint32_t eor : 1;
+};
+
+struct flm_v17_sta_data_s {
+ uint64_t id0; /* id0 and id1 results in a 72-bit int */
+ uint32_t id1 : 8;
+ uint32_t lds : 1;
+ uint32_t lfs : 1;
+ uint32_t lis : 1;
+ uint32_t uds : 1;
+ uint32_t uis : 1;
+ uint32_t rds : 1;
+ uint32_t ris : 1;
+ uint32_t pds : 1;
+ uint32_t pis : 1;
+ uint32_t pad : 14;
+ uint32_t eor : 1;
+};
+
+#pragma pack()
+struct flm_v17_stat_lrn_done_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_ignore_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_lrn_fail_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_done_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_unl_ignore_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_done_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_rel_ignore_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_done_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_ignore_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_aul_fail_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_tul_done_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_flows_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_done_s {
+ uint32_t cnt;
+};
+
+struct flm_v17_stat_prb_ignore_s {
+ uint32_t cnt;
+};
+
+struct hw_mod_flm_v17_s {
+ struct flm_v17_control_s *control;
+ struct flm_v17_status_s *status;
+ struct flm_v17_timeout_s *timeout;
+ struct flm_v17_scrub_s *scrub;
+ struct flm_v17_load_bin_s *load_bin;
+ struct flm_v17_load_pps_s *load_pps;
+ struct flm_v17_load_lps_s *load_lps;
+ struct flm_v17_load_aps_s *load_aps;
+ struct flm_v17_prio_s *prio;
+ struct flm_v17_pst_s *pst;
+ struct flm_v17_rcp_s *rcp;
+ struct flm_v17_buf_ctrl_s *buf_ctrl;
+ /* lrn_data is not handled by struct */
+ /* inf_data is not handled by struct */
+ /* sta_data is not handled by struct */
+ struct flm_v17_stat_lrn_done_s *lrn_done;
+ struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+ struct flm_v17_stat_lrn_fail_s *lrn_fail;
+ struct flm_v17_stat_unl_done_s *unl_done;
+ struct flm_v17_stat_unl_ignore_s *unl_ignore;
+ struct flm_v17_stat_rel_done_s *rel_done;
+ struct flm_v17_stat_rel_ignore_s *rel_ignore;
+ struct flm_v17_stat_aul_done_s *aul_done;
+ struct flm_v17_stat_aul_ignore_s *aul_ignore;
+ struct flm_v17_stat_aul_fail_s *aul_fail;
+ struct flm_v17_stat_tul_done_s *tul_done;
+ struct flm_v17_stat_flows_s *flows;
+ struct flm_v17_stat_prb_done_s *prb_done;
+ struct flm_v17_stat_prb_ignore_s *prb_ignore;
+};
+
+#endif /* _HW_MOD_FLM_V17_H_ */
new file mode 100644
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_FLM_V20_H_
+#define _HW_MOD_FLM_V20_H_
+
+struct flm_v20_stat_sta_done_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_done_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_inf_skip_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_hit_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_miss_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_unh_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_pck_dis_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_hit_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_miss_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_csh_unh_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_start_s {
+ uint32_t cnt;
+};
+
+struct flm_v20_stat_cuc_move_s {
+ uint32_t cnt;
+};
+
+struct hw_mod_flm_v20_s {
+ struct flm_v17_control_s *control;
+ struct flm_v17_status_s *status;
+ struct flm_v17_timeout_s *timeout;
+ struct flm_v17_scrub_s *scrub;
+ struct flm_v17_load_bin_s *load_bin;
+ struct flm_v17_load_pps_s *load_pps;
+ struct flm_v17_load_lps_s *load_lps;
+ struct flm_v17_load_aps_s *load_aps;
+ struct flm_v17_prio_s *prio;
+ struct flm_v17_pst_s *pst;
+ struct flm_v17_rcp_s *rcp;
+ struct flm_v17_buf_ctrl_s *buf_ctrl;
+ /*
+ * lrn_data is not handled by struct
+ * inf_data is not handled by struct
+ * sta_data is not handled by struct
+ */
+ struct flm_v17_stat_lrn_done_s *lrn_done;
+ struct flm_v17_stat_lrn_ignore_s *lrn_ignore;
+ struct flm_v17_stat_lrn_fail_s *lrn_fail;
+ struct flm_v17_stat_unl_done_s *unl_done;
+ struct flm_v17_stat_unl_ignore_s *unl_ignore;
+ struct flm_v17_stat_rel_done_s *rel_done;
+ struct flm_v17_stat_rel_ignore_s *rel_ignore;
+ struct flm_v17_stat_aul_done_s *aul_done;
+ struct flm_v17_stat_aul_ignore_s *aul_ignore;
+ struct flm_v17_stat_aul_fail_s *aul_fail;
+ struct flm_v17_stat_tul_done_s *tul_done;
+ struct flm_v17_stat_flows_s *flows;
+ struct flm_v17_stat_prb_done_s *prb_done;
+ struct flm_v17_stat_prb_ignore_s *prb_ignore;
+ struct flm_v20_stat_sta_done_s *sta_done;
+ struct flm_v20_stat_inf_done_s *inf_done;
+ struct flm_v20_stat_inf_skip_s *inf_skip;
+ struct flm_v20_stat_pck_hit_s *pck_hit;
+ struct flm_v20_stat_pck_miss_s *pck_miss;
+ struct flm_v20_stat_pck_unh_s *pck_unh;
+ struct flm_v20_stat_pck_dis_s *pck_dis;
+ struct flm_v20_stat_csh_hit_s *csh_hit;
+ struct flm_v20_stat_csh_miss_s *csh_miss;
+ struct flm_v20_stat_csh_unh_s *csh_unh;
+ struct flm_v20_stat_cuc_start_s *cuc_start;
+ struct flm_v20_stat_cuc_move_s *cuc_move;
+};
+
+#endif /* _HW_MOD_FLM_V20_H_ */
new file mode 100644
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HSH"
+#define _VER_ be->hsh.ver
+
+#define HSH_RCP_ENTRIES_V4 16
+#define HSH_RCP_ENTRIES_V5 32
+#define HSH_RCP_MAC_PORT_MASK_SIZE 4
+#define HSH_RCP_WORD_MASK_SIZE 10
+
+bool hw_mod_hsh_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_hsh_present(be->be_dev);
+}
+
+int hw_mod_hsh_alloc(struct flow_api_backend_s *be)
+{
+ _VER_ = be->iface->get_hsh_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "HSH MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ switch (_VER_) {
+ case 5:
+ be->hsh.nb_rcp = HSH_RCP_ENTRIES_V5;
+ if (!callocate_mod(CAST_COMMON(&be->hsh), 1,
+ &be->hsh.v5.rcp,
+ be->hsh.nb_rcp,
+ sizeof(struct hsh_v5_rcp_s)))
+ return -1;
+ break;
+ /* end case 5 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+void hw_mod_hsh_free(struct flow_api_backend_s *be)
+{
+ if (be->hsh.base) {
+ free(be->hsh.base);
+ be->hsh.base = NULL;
+ }
+}
+
+int hw_mod_hsh_reset(struct flow_api_backend_s *be)
+{
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->hsh);
+
+ NT_LOG(DBG, FILTER, "INIT HSH RCP\n");
+ return hw_mod_hsh_rcp_flush(be, 0, be->hsh.nb_rcp);
+}
+
+int hw_mod_hsh_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->hsh.nb_rcp;
+ if ((start_idx + count) > (int)be->hsh.nb_rcp)
+ return error_index_too_large(__func__);
+ return be->iface->hsh_rcp_flush(be->be_dev, &be->hsh, start_idx, count);
+}
+
+static int hw_mod_hsh_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_hsh_e field, uint32_t index,
+ uint32_t word_off, uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->hsh.nb_rcp)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 5:
+ switch (field) {
+ case HW_HSH_RCP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->hsh.v5.rcp[index], (uint8_t)*value,
+ sizeof(struct hsh_v5_rcp_s));
+ break;
+ case HW_HSH_RCP_COMPARE:
+ rv = do_compare_indexes(be->hsh.v5.rcp,
+ sizeof(struct hsh_v5_rcp_s), index, word_off,
+ be->hsh.nb_rcp, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_HSH_RCP_FIND:
+ rv = find_equal_index(be->hsh.v5.rcp,
+ sizeof(struct hsh_v5_rcp_s), index, word_off,
+ be->hsh.nb_rcp, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_HSH_RCP_LOAD_DIST_TYPE:
+ get_set(&be->hsh.v5.rcp[index].load_dist_type, value, get);
+ break;
+ case HW_HSH_RCP_MAC_PORT_MASK:
+ if (word_off > HSH_RCP_MAC_PORT_MASK_SIZE)
+ return error_word_off_too_large(__func__);
+ get_set(&be->hsh.v5.rcp[index].mac_port_mask[word_off],
+ value, get);
+ break;
+ case HW_HSH_RCP_SORT:
+ get_set(&be->hsh.v5.rcp[index].sort, value, get);
+ break;
+ case HW_HSH_RCP_QW0_PE:
+ get_set(&be->hsh.v5.rcp[index].qw0_pe, value, get);
+ break;
+ case HW_HSH_RCP_QW0_OFS:
+ get_set_signed(&be->hsh.v5.rcp[index].qw0_ofs, value, get);
+ break;
+ case HW_HSH_RCP_QW4_PE:
+ get_set(&be->hsh.v5.rcp[index].qw4_pe, value, get);
+ break;
+ case HW_HSH_RCP_QW4_OFS:
+ get_set_signed(&be->hsh.v5.rcp[index].qw4_ofs, value, get);
+ break;
+ case HW_HSH_RCP_W8_PE:
+ get_set(&be->hsh.v5.rcp[index].w8_pe, value, get);
+ break;
+ case HW_HSH_RCP_W8_OFS:
+ get_set_signed(&be->hsh.v5.rcp[index].w8_ofs, value, get);
+ break;
+ case HW_HSH_RCP_W8_SORT:
+ get_set(&be->hsh.v5.rcp[index].w8_sort, value, get);
+ break;
+ case HW_HSH_RCP_W9_PE:
+ get_set(&be->hsh.v5.rcp[index].w9_pe, value, get);
+ break;
+ case HW_HSH_RCP_W9_OFS:
+ get_set_signed(&be->hsh.v5.rcp[index].w9_ofs, value, get);
+ break;
+ case HW_HSH_RCP_W9_SORT:
+ get_set(&be->hsh.v5.rcp[index].w9_sort, value, get);
+ break;
+ case HW_HSH_RCP_W9_P:
+ get_set(&be->hsh.v5.rcp[index].w9_p, value, get);
+ break;
+ case HW_HSH_RCP_P_MASK:
+ get_set(&be->hsh.v5.rcp[index].p_mask, value, get);
+ break;
+ case HW_HSH_RCP_WORD_MASK:
+ if (word_off > HSH_RCP_WORD_MASK_SIZE)
+ return error_word_off_too_large(__func__);
+ get_set(&be->hsh.v5.rcp[index].word_mask[word_off],
+ value, get);
+ break;
+ case HW_HSH_RCP_SEED:
+ get_set(&be->hsh.v5.rcp[index].seed, value, get);
+ break;
+ case HW_HSH_RCP_TNL_P:
+ get_set(&be->hsh.v5.rcp[index].tnl_p, value, get);
+ break;
+ case HW_HSH_RCP_HSH_VALID:
+ get_set(&be->hsh.v5.rcp[index].hsh_valid, value, get);
+ break;
+ case HW_HSH_RCP_HSH_TYPE:
+ get_set(&be->hsh.v5.rcp[index].hsh_type, value, get);
+ break;
+ case HW_HSH_RCP_AUTO_IPV4_MASK:
+ get_set(&be->hsh.v5.rcp[index].auto_ipv4_mask, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 5 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_hsh_rcp_set(struct flow_api_backend_s *be, enum hw_hsh_e field,
+ uint32_t index, uint32_t word_off, uint32_t value)
+{
+ return hw_mod_hsh_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_hsh_rcp_get(struct flow_api_backend_s *be, enum hw_hsh_e field,
+ uint32_t index, uint32_t word_off, uint32_t *value)
+{
+ return hw_mod_hsh_rcp_mod(be, field, index, word_off, value, 1);
+}
new file mode 100644
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HSH_V5_H_
+#define _HW_MOD_HSH_V5_H_
+
+struct hsh_v5_rcp_s {
+ uint32_t load_dist_type;
+ uint32_t mac_port_mask[4];
+ uint32_t sort;
+ uint32_t qw0_pe;
+ int32_t qw0_ofs;
+ uint32_t qw4_pe;
+ int32_t qw4_ofs;
+ uint32_t w8_pe;
+ int32_t w8_ofs;
+ uint32_t w8_sort;
+ uint32_t w9_pe;
+ int32_t w9_ofs;
+ uint32_t w9_sort;
+ uint32_t w9_p;
+ uint32_t p_mask;
+ uint32_t word_mask[10];
+ uint32_t seed;
+ uint32_t tnl_p;
+ uint32_t hsh_valid;
+ uint32_t hsh_type;
+ uint32_t auto_ipv4_mask;
+};
+
+struct hw_mod_hsh_v5_s {
+ struct hsh_v5_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HSH_V5_H_ */
new file mode 100644
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "HST"
+#define _VER_ be->hst.ver
+
+bool hw_mod_hst_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_hst_present(be->be_dev);
+}
+
+int hw_mod_hst_alloc(struct flow_api_backend_s *be)
+{
+ int nb;
+
+ _VER_ = be->iface->get_hst_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "HST MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ nb = be->iface->get_nb_hst_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "hst_categories", _MOD_, _VER_);
+ be->hst.nb_hst_rcp_categories = (uint32_t)nb;
+
+ switch (_VER_) {
+ case 2:
+ if (!callocate_mod(CAST_COMMON(&be->hst), 1,
+ &be->hst.v2.rcp,
+ be->hst.nb_hst_rcp_categories,
+ sizeof(struct hst_v2_rcp_s)))
+ return -1;
+ break;
+ /* end case 2 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+void hw_mod_hst_free(struct flow_api_backend_s *be)
+{
+ if (be->hst.base) {
+ free(be->hst.base);
+ be->hst.base = NULL;
+ }
+}
+
+int hw_mod_hst_reset(struct flow_api_backend_s *be)
+{
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->hst);
+
+ NT_LOG(DBG, FILTER, "INIT HST RCP\n");
+ return hw_mod_hst_rcp_flush(be, 0, ALL_ENTRIES);
+}
+
+int hw_mod_hst_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->hst.nb_hst_rcp_categories;
+ if ((unsigned int)(start_idx + count) > be->hst.nb_hst_rcp_categories)
+ return error_index_too_large(__func__);
+ return be->iface->hst_rcp_flush(be->be_dev, &be->hst, start_idx, count);
+}
+
+static int hw_mod_hst_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_hst_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->hst.nb_hst_rcp_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 2:
+ switch (field) {
+ case HW_HST_RCP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->hst.v2.rcp[index], (uint8_t)*value,
+ sizeof(struct hst_v2_rcp_s));
+ break;
+ case HW_HST_RCP_FIND:
+ find_equal_index(be->hst.v2.rcp,
+ sizeof(struct hst_v2_rcp_s), index, *value,
+ be->hst.nb_hst_rcp_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_HST_RCP_COMPARE:
+ rv = do_compare_indexes(be->hst.v2.rcp,
+ sizeof(struct hst_v2_rcp_s), index, *value,
+ be->hst.nb_hst_rcp_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_HST_RCP_STRIP_MODE:
+ get_set(&be->hst.v2.rcp[index].strip_mode, value, get);
+ break;
+ case HW_HST_RCP_START_DYN:
+ get_set(&be->hst.v2.rcp[index].start_dyn, value, get);
+ break;
+ case HW_HST_RCP_START_OFS:
+ get_set(&be->hst.v2.rcp[index].start_ofs, value, get);
+ break;
+ case HW_HST_RCP_END_DYN:
+ get_set(&be->hst.v2.rcp[index].end_dyn, value, get);
+ break;
+ case HW_HST_RCP_END_OFS:
+ get_set(&be->hst.v2.rcp[index].end_ofs, value, get);
+ break;
+ case HW_HST_RCP_MODIF0_CMD:
+ get_set(&be->hst.v2.rcp[index].modif0_cmd, value, get);
+ break;
+ case HW_HST_RCP_MODIF0_DYN:
+ get_set(&be->hst.v2.rcp[index].modif0_dyn, value, get);
+ break;
+ case HW_HST_RCP_MODIF0_OFS:
+ get_set(&be->hst.v2.rcp[index].modif0_ofs, value, get);
+ break;
+ case HW_HST_RCP_MODIF0_VALUE:
+ get_set(&be->hst.v2.rcp[index].modif0_value, value, get);
+ break;
+ case HW_HST_RCP_MODIF1_CMD:
+ get_set(&be->hst.v2.rcp[index].modif1_cmd, value, get);
+ break;
+ case HW_HST_RCP_MODIF1_DYN:
+ get_set(&be->hst.v2.rcp[index].modif1_dyn, value, get);
+ break;
+ case HW_HST_RCP_MODIF1_OFS:
+ get_set(&be->hst.v2.rcp[index].modif1_ofs, value, get);
+ break;
+ case HW_HST_RCP_MODIF1_VALUE:
+ get_set(&be->hst.v2.rcp[index].modif1_value, value, get);
+ break;
+ case HW_HST_RCP_MODIF2_CMD:
+ get_set(&be->hst.v2.rcp[index].modif2_cmd, value, get);
+ break;
+ case HW_HST_RCP_MODIF2_DYN:
+ get_set(&be->hst.v2.rcp[index].modif2_dyn, value, get);
+ break;
+ case HW_HST_RCP_MODIF2_OFS:
+ get_set(&be->hst.v2.rcp[index].modif2_ofs, value, get);
+ break;
+ case HW_HST_RCP_MODIF2_VALUE:
+ get_set(&be->hst.v2.rcp[index].modif2_value, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 2 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_hst_rcp_set(struct flow_api_backend_s *be, enum hw_hst_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_hst_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_hst_rcp_get(struct flow_api_backend_s *be, enum hw_hst_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_hst_rcp_mod(be, field, index, value, 1);
+}
new file mode 100644
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_HST_V2_H_
+#define _HW_MOD_HST_V2_H_
+
+struct hst_v2_rcp_s {
+ uint32_t strip_mode;
+ uint32_t start_dyn;
+ uint32_t start_ofs;
+ uint32_t end_dyn;
+ uint32_t end_ofs;
+ uint32_t modif0_cmd;
+ uint32_t modif0_dyn;
+ uint32_t modif0_ofs;
+ uint32_t modif0_value;
+ uint32_t modif1_cmd;
+ uint32_t modif1_dyn;
+ uint32_t modif1_ofs;
+ uint32_t modif1_value;
+ uint32_t modif2_cmd;
+ uint32_t modif2_dyn;
+ uint32_t modif2_ofs;
+ uint32_t modif2_value;
+};
+
+struct hw_mod_hst_v2_s {
+ struct hst_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_HST_V2_H_ */
new file mode 100644
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "IOA"
+#define _VER_ be->ioa.ver
+
+bool hw_mod_ioa_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_ioa_present(be->be_dev);
+}
+
+int hw_mod_ioa_alloc(struct flow_api_backend_s *be)
+{
+ _VER_ = be->iface->get_ioa_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "IOA MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ int nb = be->iface->get_nb_ioa_categories(be->be_dev);
+
+ if (nb <= 0)
+ return error_resource_count(__func__, "ioa_categories", _MOD_, _VER_);
+ be->ioa.nb_rcp_categories = (uint32_t)nb;
+
+ /* NOTE: ROA number of categories are called here. FPGA uses a cross-indexing here - bad! */
+ nb = be->iface->get_nb_roa_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "roa_epp_entries", _MOD_, _VER_);
+ be->ioa.nb_roa_epp_entries = (uint32_t)nb;
+
+ switch (_VER_) {
+ case 4:
+ if (!callocate_mod(CAST_COMMON(&be->ioa), 3,
+ &be->ioa.v4.rcp, be->ioa.nb_rcp_categories,
+ sizeof(struct ioa_v4_rcp_s),
+ &be->ioa.v4.tpid, 1,
+ sizeof(struct ioa_v4_special_tpid_s),
+ &be->ioa.v4.roa_epp, be->ioa.nb_roa_epp_entries,
+ sizeof(struct ioa_v4_roa_epp_s)))
+ return -1;
+ break;
+ /* end case 4 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+void hw_mod_ioa_free(struct flow_api_backend_s *be)
+{
+ if (be->ioa.base) {
+ free(be->ioa.base);
+ be->ioa.base = NULL;
+ }
+}
+
+int hw_mod_ioa_reset(struct flow_api_backend_s *be)
+{
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->ioa);
+
+ NT_LOG(DBG, FILTER, "INIT IOA RCP\n");
+ hw_mod_ioa_rcp_flush(be, 0, ALL_ENTRIES);
+ NT_LOG(DBG, FILTER, "INIT IOA SPECIAL TPID\n");
+ hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_0, 0x8200);
+ hw_mod_ioa_config_set(be, HW_IOA_CONFIG_CUST_TPID_1, 0x8300);
+ hw_mod_ioa_config_flush(be);
+ NT_LOG(DBG, FILTER, "INIT IOA ROA EPP\n");
+ hw_mod_ioa_roa_epp_flush(be, 0, ALL_ENTRIES);
+ return 0;
+}
+
+int hw_mod_ioa_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->ioa.nb_rcp_categories;
+ if ((unsigned int)(start_idx + count) > be->ioa.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ return be->iface->ioa_rcp_flush(be->be_dev, &be->ioa, start_idx, count);
+}
+
+static int hw_mod_ioa_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_ioa_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->ioa.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 4:
+ switch (field) {
+ case HW_IOA_RCP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->ioa.v4.rcp[index], (uint8_t)*value,
+ sizeof(struct ioa_v4_rcp_s));
+ break;
+ case HW_IOA_RCP_FIND:
+ rv = find_equal_index(be->ioa.v4.rcp,
+ sizeof(struct ioa_v4_rcp_s), index, *value,
+ be->ioa.nb_rcp_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_IOA_RCP_COMPARE:
+ rv = do_compare_indexes(be->ioa.v4.rcp,
+ sizeof(struct ioa_v4_rcp_s), index, *value,
+ be->ioa.nb_rcp_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_IOA_RCP_TUNNEL_POP:
+ get_set(&be->ioa.v4.rcp[index].tunnel_pop, value, get);
+ break;
+ case HW_IOA_RCP_VLAN_POP:
+ get_set(&be->ioa.v4.rcp[index].vlan_pop, value, get);
+ break;
+ case HW_IOA_RCP_VLAN_PUSH:
+ get_set(&be->ioa.v4.rcp[index].vlan_push, value, get);
+ break;
+ case HW_IOA_RCP_VLAN_VID:
+ get_set(&be->ioa.v4.rcp[index].vlan_vid, value, get);
+ break;
+ case HW_IOA_RCP_VLAN_DEI:
+ get_set(&be->ioa.v4.rcp[index].vlan_dei, value, get);
+ break;
+ case HW_IOA_RCP_VLAN_PCP:
+ get_set(&be->ioa.v4.rcp[index].vlan_pcp, value, get);
+ break;
+ case HW_IOA_RCP_VLAN_TPID_SEL:
+ get_set(&be->ioa.v4.rcp[index].vlan_tpid_sel, value, get);
+ break;
+ case HW_IOA_RCP_QUEUE_OVERRIDE_EN:
+ get_set(&be->ioa.v4.rcp[index].queue_override_en, value, get);
+ break;
+ case HW_IOA_RCP_QUEUE_ID:
+ get_set(&be->ioa.v4.rcp[index].queue_id, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 4 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_ioa_rcp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_ioa_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_rcp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_ioa_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_ioa_config_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->ioa_special_tpid_flush(be->be_dev, &be->ioa);
+}
+
+int hw_mod_ioa_config_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t value)
+{
+ switch (_VER_) {
+ case 4:
+ switch (field) {
+ case HW_IOA_CONFIG_CUST_TPID_0:
+ be->ioa.v4.tpid->cust_tpid_0 = value;
+ break;
+ case HW_IOA_CONFIG_CUST_TPID_1:
+ be->ioa.v4.tpid->cust_tpid_1 = value;
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 4 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_ioa_roa_epp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->ioa.nb_roa_epp_entries;
+ if ((unsigned int)(start_idx + count) > be->ioa.nb_roa_epp_entries)
+ return error_index_too_large(__func__);
+ return be->iface->ioa_roa_epp_flush(be->be_dev, &be->ioa, start_idx,
+ count);
+}
+
+static int hw_mod_ioa_roa_epp_mod(struct flow_api_backend_s *be,
+ enum hw_ioa_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->ioa.nb_roa_epp_entries)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 4:
+ switch (field) {
+ case HW_IOA_ROA_EPP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->ioa.v4.roa_epp[index], (uint8_t)*value,
+ sizeof(struct ioa_v4_roa_epp_s));
+ break;
+ case HW_IOA_ROA_EPP_FIND:
+ rv = find_equal_index(be->ioa.v4.roa_epp,
+ sizeof(struct ioa_v4_roa_epp_s), index, *value,
+ be->ioa.nb_roa_epp_entries, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_IOA_ROA_EPP_COMPARE:
+ rv = do_compare_indexes(be->ioa.v4.roa_epp,
+ sizeof(struct ioa_v4_roa_epp_s), index, *value,
+ be->ioa.nb_roa_epp_entries, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_IOA_ROA_EPP_PUSH_TUNNEL:
+ get_set(&be->ioa.v4.roa_epp[index].push_tunnel, value, get);
+ break;
+ case HW_IOA_ROA_EPP_TX_PORT:
+ get_set(&be->ioa.v4.roa_epp[index].tx_port, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 4 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_ioa_roa_epp_set(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_ioa_roa_epp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_ioa_roa_epp_get(struct flow_api_backend_s *be, enum hw_ioa_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_ioa_roa_epp_mod(be, field, index, value, 1);
+}
new file mode 100644
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_IOA_V4_H_
+#define _HW_MOD_IOA_V4_H_
+
+struct ioa_v4_rcp_s {
+ uint32_t tunnel_pop;
+ uint32_t vlan_pop;
+ uint32_t vlan_push;
+ uint32_t vlan_vid;
+ uint32_t vlan_dei;
+ uint32_t vlan_pcp;
+ uint32_t vlan_tpid_sel;
+ uint32_t queue_override_en;
+ uint32_t queue_id;
+};
+
+struct ioa_v4_special_tpid_s {
+ uint32_t cust_tpid_0;
+ uint32_t cust_tpid_1;
+};
+
+struct ioa_v4_roa_epp_s {
+ uint32_t push_tunnel;
+ uint32_t tx_port;
+};
+
+struct hw_mod_ioa_v4_s {
+ struct ioa_v4_rcp_s *rcp;
+ struct ioa_v4_special_tpid_s *tpid;
+ struct ioa_v4_roa_epp_s *roa_epp;
+};
+
+#endif /* _HW_MOD_IOA_V4_H_ */
new file mode 100644
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "KM"
+#define _VER_ be->km.ver
+
+#define KM_TCQ_ENTRIES 2048
+#define KM_RCP_MASK_A_SIZE 11
+#define KM_RCP_MASK_D_A_SIZE \
+ 12 /* Mask for double size word extractors for DW8/DW10 */
+#define KM_RCP_MASK_B_SIZE 6
+
+bool hw_mod_km_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_km_present(be->be_dev);
+}
+
+int hw_mod_km_alloc(struct flow_api_backend_s *be)
+{
+ int nb;
+
+ _VER_ = be->iface->get_km_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "KM MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ nb = be->iface->get_nb_km_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "km_categories", _MOD_, _VER_);
+ be->km.nb_categories = (uint32_t)nb;
+
+ nb = be->iface->get_nb_km_cam_banks(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "cam_banks", _MOD_, _VER_);
+ be->km.nb_cam_banks = (uint32_t)nb;
+
+ nb = be->iface->get_nb_km_cam_records(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "cam_records", _MOD_, _VER_);
+ be->km.nb_cam_records = (uint32_t)nb;
+
+ nb = be->iface->get_nb_km_cam_record_words(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "cam_record_words", _MOD_, _VER_);
+ be->km.nb_cam_record_words = (uint32_t)nb;
+
+ nb = be->iface->get_nb_km_tcam_banks(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "tcam_banks", _MOD_, _VER_);
+ be->km.nb_tcam_banks = (uint32_t)nb;
+
+ nb = be->iface->get_nb_km_tcam_bank_width(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "tcam_bank_width", _MOD_, _VER_);
+ be->km.nb_tcam_bank_width = (uint32_t)nb;
+
+ switch (_VER_) {
+ case 7:
+ be->km.nb_km_rcp_mask_a_word_size = 12;
+ be->km.nb_km_rcp_mask_b_word_size = 6;
+ if (!callocate_mod(CAST_COMMON(&be->km), 5,
+ &be->km.v7.rcp,
+ be->km.nb_categories,
+ sizeof(struct km_v7_rcp_s),
+ &be->km.v7.cam,
+ be->km.nb_cam_banks * be->km.nb_cam_records,
+ sizeof(struct km_v7_cam_s),
+ &be->km.v7.tcam,
+ be->km.nb_tcam_banks * 4 * 256,
+ sizeof(struct km_v7_tcam_s),
+ &be->km.v7.tci,
+ be->km.nb_tcam_banks * be->km.nb_tcam_bank_width,
+ sizeof(struct km_v7_tci_s),
+ &be->km.v7.tcq,
+ KM_TCQ_ENTRIES,
+ sizeof(struct km_v7_tcq_s)))
+ return -1;
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+void hw_mod_km_free(struct flow_api_backend_s *be)
+{
+ if (be->km.base) {
+ free(be->km.base);
+ be->km.base = NULL;
+ }
+}
+
+int hw_mod_km_reset(struct flow_api_backend_s *be)
+{
+ uint32_t tcam_v_set[3] = { 0x00000000, 0x00000000, 0x00000000 };
+ /* int err = 0; */
+
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->km);
+
+ NT_LOG(DBG, FILTER, "INIT KM RCP\n");
+ hw_mod_km_rcp_flush(be, 0, ALL_ENTRIES);
+
+ /* init CAM - all zero */
+ NT_LOG(DBG, FILTER, "INIT KM CAM\n");
+ hw_mod_km_cam_flush(be, 0, 0, ALL_ENTRIES);
+
+ /* init TCAM - all zero */
+ NT_LOG(DBG, FILTER, "INIT KM TCAM\n");
+ for (unsigned int i = 0; i < be->km.nb_tcam_banks; i++) {
+ /*
+ * TCAM entries are cache controlled, thus need to hard reset initially to sync
+ * cache with HW
+ */
+ hw_mod_km_tcam_set(be, HW_KM_TCAM_BANK_RESET, i, 0, 0,
+ tcam_v_set);
+ }
+ hw_mod_km_tcam_flush(be, 0, ALL_ENTRIES);
+
+ /* init TCI - all zero */
+ NT_LOG(DBG, FILTER, "INIT KM TCI\n");
+ hw_mod_km_tci_flush(be, 0, 0, ALL_ENTRIES);
+
+ NT_LOG(DBG, FILTER, "INIT KM TCQ\n");
+ for (unsigned int i = 0; i < be->km.nb_tcam_bank_width; i++)
+ hw_mod_km_tcq_flush(be, 0, i, be->km.nb_tcam_banks);
+
+ return 0;
+}
+
+int hw_mod_km_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->km.nb_categories;
+ if ((unsigned int)(start_idx + count) > be->km.nb_categories)
+ return error_index_too_large(__func__);
+ return be->iface->km_rcp_flush(be->be_dev, &be->km, start_idx, count);
+}
+
+static int hw_mod_km_rcp_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+ int index, int word_off, uint32_t *value, int get)
+{
+ if ((unsigned int)index >= be->km.nb_categories)
+ return error_index_too_large(__func__);
+
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_KM_RCP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->km.v7.rcp[index], (uint8_t)*value,
+ sizeof(struct km_v7_rcp_s));
+ break;
+ case HW_KM_RCP_QW0_DYN:
+ get_set(&be->km.v7.rcp[index].qw0_dyn, value, get);
+ break;
+ case HW_KM_RCP_QW0_OFS:
+ get_set_signed(&be->km.v7.rcp[index].qw0_ofs, value, get);
+ break;
+ case HW_KM_RCP_QW0_SEL_A:
+ get_set(&be->km.v7.rcp[index].qw0_sel_a, value, get);
+ break;
+ case HW_KM_RCP_QW0_SEL_B:
+ get_set(&be->km.v7.rcp[index].qw0_sel_b, value, get);
+ break;
+ case HW_KM_RCP_QW4_DYN:
+ get_set(&be->km.v7.rcp[index].qw4_dyn, value, get);
+ break;
+ case HW_KM_RCP_QW4_OFS:
+ get_set_signed(&be->km.v7.rcp[index].qw4_ofs, value, get);
+ break;
+ case HW_KM_RCP_QW4_SEL_A:
+ get_set(&be->km.v7.rcp[index].qw4_sel_a, value, get);
+ break;
+ case HW_KM_RCP_QW4_SEL_B:
+ get_set(&be->km.v7.rcp[index].qw4_sel_b, value, get);
+ break;
+ case HW_KM_RCP_DW8_DYN:
+ get_set(&be->km.v7.rcp[index].dw8_dyn, value, get);
+ break;
+ case HW_KM_RCP_DW8_OFS:
+ get_set_signed(&be->km.v7.rcp[index].dw8_ofs, value, get);
+ break;
+ case HW_KM_RCP_DW8_SEL_A:
+ get_set(&be->km.v7.rcp[index].dw8_sel_a, value, get);
+ break;
+ case HW_KM_RCP_DW8_SEL_B:
+ get_set(&be->km.v7.rcp[index].dw8_sel_b, value, get);
+ break;
+ case HW_KM_RCP_DW10_DYN:
+ get_set(&be->km.v7.rcp[index].dw10_dyn, value, get);
+ break;
+ case HW_KM_RCP_DW10_OFS:
+ get_set_signed(&be->km.v7.rcp[index].dw10_ofs, value, get);
+ break;
+ case HW_KM_RCP_DW10_SEL_A:
+ get_set(&be->km.v7.rcp[index].dw10_sel_a, value, get);
+ break;
+ case HW_KM_RCP_DW10_SEL_B:
+ get_set(&be->km.v7.rcp[index].dw10_sel_b, value, get);
+ break;
+ case HW_KM_RCP_SWX_CCH:
+ get_set(&be->km.v7.rcp[index].swx_cch, value, get);
+ break;
+ case HW_KM_RCP_SWX_SEL_A:
+ get_set(&be->km.v7.rcp[index].swx_sel_a, value, get);
+ break;
+ case HW_KM_RCP_SWX_SEL_B:
+ get_set(&be->km.v7.rcp[index].swx_sel_b, value, get);
+ break;
+ case HW_KM_RCP_MASK_A:
+ if (word_off > KM_RCP_MASK_D_A_SIZE)
+ return error_word_off_too_large(__func__);
+ get_set(&be->km.v7.rcp[index].mask_d_a[word_off], value, get);
+ break;
+ case HW_KM_RCP_MASK_B:
+ if (word_off > KM_RCP_MASK_B_SIZE)
+ return error_word_off_too_large(__func__);
+ get_set(&be->km.v7.rcp[index].mask_b[word_off], value, get);
+ break;
+ case HW_KM_RCP_DUAL:
+ get_set(&be->km.v7.rcp[index].dual, value, get);
+ break;
+ case HW_KM_RCP_PAIRED:
+ get_set(&be->km.v7.rcp[index].paired, value, get);
+ break;
+ case HW_KM_RCP_EL_A:
+ get_set(&be->km.v7.rcp[index].el_a, value, get);
+ break;
+ case HW_KM_RCP_EL_B:
+ get_set(&be->km.v7.rcp[index].el_b, value, get);
+ break;
+ case HW_KM_RCP_INFO_A:
+ get_set(&be->km.v7.rcp[index].info_a, value, get);
+ break;
+ case HW_KM_RCP_INFO_B:
+ get_set(&be->km.v7.rcp[index].info_b, value, get);
+ break;
+ case HW_KM_RCP_FTM_A:
+ get_set(&be->km.v7.rcp[index].ftm_a, value, get);
+ break;
+ case HW_KM_RCP_FTM_B:
+ get_set(&be->km.v7.rcp[index].ftm_b, value, get);
+ break;
+ case HW_KM_RCP_BANK_A:
+ get_set(&be->km.v7.rcp[index].bank_a, value, get);
+ break;
+ case HW_KM_RCP_BANK_B:
+ get_set(&be->km.v7.rcp[index].bank_b, value, get);
+ break;
+ case HW_KM_RCP_KL_A:
+ get_set(&be->km.v7.rcp[index].kl_a, value, get);
+ break;
+ case HW_KM_RCP_KL_B:
+ get_set(&be->km.v7.rcp[index].kl_b, value, get);
+ break;
+ case HW_KM_RCP_KEYWAY_A:
+ get_set(&be->km.v7.rcp[index].keyway_a, value, get);
+ break;
+ case HW_KM_RCP_KEYWAY_B:
+ get_set(&be->km.v7.rcp[index].keyway_b, value, get);
+ break;
+ case HW_KM_RCP_SYNERGY_MODE:
+ get_set(&be->km.v7.rcp[index].synergy_mode, value, get);
+ break;
+ case HW_KM_RCP_DW0_B_DYN:
+ get_set(&be->km.v7.rcp[index].dw0_b_dyn, value, get);
+ break;
+ case HW_KM_RCP_DW0_B_OFS:
+ get_set_signed(&be->km.v7.rcp[index].dw0_b_ofs, value, get);
+ break;
+ case HW_KM_RCP_DW2_B_DYN:
+ get_set(&be->km.v7.rcp[index].dw2_b_dyn, value, get);
+ break;
+ case HW_KM_RCP_DW2_B_OFS:
+ get_set_signed(&be->km.v7.rcp[index].dw2_b_ofs, value, get);
+ break;
+ case HW_KM_RCP_SW4_B_DYN:
+ get_set(&be->km.v7.rcp[index].sw4_b_dyn, value, get);
+ break;
+ case HW_KM_RCP_SW4_B_OFS:
+ get_set_signed(&be->km.v7.rcp[index].sw4_b_ofs, value, get);
+ break;
+ case HW_KM_RCP_SW5_B_DYN:
+ get_set(&be->km.v7.rcp[index].sw5_b_dyn, value, get);
+ break;
+ case HW_KM_RCP_SW5_B_OFS:
+ get_set_signed(&be->km.v7.rcp[index].sw5_b_ofs, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_km_rcp_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int index, int word_off, uint32_t value)
+{
+ return hw_mod_km_rcp_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_km_rcp_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int index, int word_off, uint32_t *value)
+{
+ return hw_mod_km_rcp_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_km_cam_flush(struct flow_api_backend_s *be, int start_bank,
+ int start_record, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->km.nb_cam_records * be->km.nb_cam_banks;
+
+ unsigned int end =
+ start_bank * be->km.nb_cam_records + start_record + count;
+ if (end > (be->km.nb_cam_banks * be->km.nb_cam_records))
+ return error_index_too_large(__func__);
+
+ return be->iface->km_cam_flush(be->be_dev, &be->km, start_bank,
+ start_record, count);
+}
+
+static int hw_mod_km_cam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value, int get)
+{
+ if ((unsigned int)bank >= be->km.nb_cam_banks)
+ return error_index_too_large(__func__);
+ if ((unsigned int)record >= be->km.nb_cam_records)
+ return error_index_too_large(__func__);
+
+ unsigned int index = bank * be->km.nb_cam_records + record;
+
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_KM_CAM_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->km.v7.cam[index], (uint8_t)*value,
+ sizeof(struct km_v7_cam_s));
+ break;
+ case HW_KM_CAM_W0:
+ get_set(&be->km.v7.cam[index].w0, value, get);
+ break;
+ case HW_KM_CAM_W1:
+ get_set(&be->km.v7.cam[index].w1, value, get);
+ break;
+ case HW_KM_CAM_W2:
+ get_set(&be->km.v7.cam[index].w2, value, get);
+ break;
+ case HW_KM_CAM_W3:
+ get_set(&be->km.v7.cam[index].w3, value, get);
+ break;
+ case HW_KM_CAM_W4:
+ get_set(&be->km.v7.cam[index].w4, value, get);
+ break;
+ case HW_KM_CAM_W5:
+ get_set(&be->km.v7.cam[index].w5, value, get);
+ break;
+ case HW_KM_CAM_FT0:
+ get_set(&be->km.v7.cam[index].ft0, value, get);
+ break;
+ case HW_KM_CAM_FT1:
+ get_set(&be->km.v7.cam[index].ft1, value, get);
+ break;
+ case HW_KM_CAM_FT2:
+ get_set(&be->km.v7.cam[index].ft2, value, get);
+ break;
+ case HW_KM_CAM_FT3:
+ get_set(&be->km.v7.cam[index].ft3, value, get);
+ break;
+ case HW_KM_CAM_FT4:
+ get_set(&be->km.v7.cam[index].ft4, value, get);
+ break;
+ case HW_KM_CAM_FT5:
+ get_set(&be->km.v7.cam[index].ft5, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+int hw_mod_km_cam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t value)
+{
+ return hw_mod_km_cam_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_cam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value)
+{
+ return hw_mod_km_cam_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcam_flush(struct flow_api_backend_s *be, int start_bank,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->km.nb_tcam_banks * 4 * 256;
+ else if (count == ALL_BANK_ENTRIES)
+ count = 4 * 256;
+
+ unsigned int end = start_bank * 4 * 256 + count;
+
+ if (end > (be->km.nb_tcam_banks * 4 * 256))
+ return error_index_too_large(__func__);
+
+ return be->iface->km_tcam_flush(be->be_dev, &be->km, start_bank, 0, 0,
+ count);
+}
+
+static int hw_mod_km_tcam_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int byte, int byte_val,
+ uint32_t *value_set, int get)
+{
+ unsigned int start_index = bank * 4 * 256 + (int)byte * 256 + byte_val;
+
+ if (start_index >= (be->km.nb_tcam_banks * 4 * 256))
+ return error_index_too_large(__func__);
+
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_KM_TCAM_BANK_RESET:
+ if (get)
+ return error_unsup_field(__func__);
+ {
+ int start_idx = bank * 4 * 256;
+
+ for (int i = 0; i < 4 * 256; i++) {
+ be->km.v7.tcam[start_idx + i].t[0] =
+ value_set[0];
+ be->km.v7.tcam[start_idx + i].t[1] =
+ value_set[1];
+ be->km.v7.tcam[start_idx + i].t[2] =
+ value_set[2];
+ be->km.v7.tcam[start_idx + i].dirty = 1;
+ }
+ }
+ break;
+ case HW_KM_TCAM_T: {
+ int index = bank * 4 * 256 + byte * 256 + byte_val;
+
+ if (get) {
+ value_set[0] = be->km.v7.tcam[index].t[0];
+ value_set[1] = be->km.v7.tcam[index].t[1];
+ value_set[2] = be->km.v7.tcam[index].t[2];
+ } else {
+ /* only change if any bits has to be changed */
+ if (be->km.v7.tcam[index].t[0] !=
+ value_set[0] ||
+ be->km.v7.tcam[index].t[1] !=
+ value_set[1] ||
+ be->km.v7.tcam[index].t[2] !=
+ value_set[2]) {
+ be->km.v7.tcam[index].t[0] =
+ value_set[0];
+ be->km.v7.tcam[index].t[1] =
+ value_set[1];
+ be->km.v7.tcam[index].t[2] =
+ value_set[2];
+ be->km.v7.tcam[index].dirty = 1;
+ }
+ }
+ }
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+int hw_mod_km_tcam_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int byte, int byte_val, uint32_t *value_set)
+{
+ return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+ 0);
+}
+
+int hw_mod_km_tcam_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int byte, int byte_val, uint32_t *value_set)
+{
+ return hw_mod_km_tcam_mod(be, field, bank, byte, byte_val, value_set,
+ 1);
+}
+
+int hw_mod_km_tci_flush(struct flow_api_backend_s *be, int start_bank,
+ int start_record, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+ unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+ start_record + count;
+
+ if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+ return error_index_too_large(__func__);
+
+ return be->iface->km_tci_flush(be->be_dev, &be->km, start_bank,
+ start_record, count);
+}
+
+static int hw_mod_km_tci_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value, int get)
+{
+ unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+ if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+ return error_index_too_large(__func__);
+
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_KM_TCI_COLOR:
+ get_set(&be->km.v7.tci[index].color, value, get);
+ break;
+ case HW_KM_TCI_FT:
+ get_set(&be->km.v7.tci[index].ft, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+int hw_mod_km_tci_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t value)
+{
+ return hw_mod_km_tci_mod(be, field, bank, record, &value, 0);
+}
+
+int hw_mod_km_tci_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value)
+{
+ return hw_mod_km_tci_mod(be, field, bank, record, value, 1);
+}
+
+int hw_mod_km_tcq_flush(struct flow_api_backend_s *be, int start_bank,
+ int start_record, int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->km.nb_tcam_banks * be->km.nb_tcam_bank_width;
+
+ unsigned int end = (int)start_bank * be->km.nb_tcam_bank_width +
+ start_record + count;
+
+ if (end > (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+ return error_index_too_large(__func__);
+
+ return be->iface->km_tcq_flush(be->be_dev, &be->km, start_bank,
+ start_record, count);
+}
+
+static int hw_mod_km_tcq_mod(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value, int get)
+{
+ unsigned int index = bank * be->km.nb_tcam_bank_width + record;
+
+ if (index >= (be->km.nb_tcam_banks * be->km.nb_tcam_bank_width))
+ return error_index_too_large(__func__);
+
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_KM_TCQ_BANK_MASK:
+ get_set(&be->km.v7.tcq[index].bank_mask, value, get);
+ break;
+ case HW_KM_TCQ_QUAL:
+ get_set(&be->km.v7.tcq[index].qual, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 7 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+int hw_mod_km_tcq_set(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value)
+{
+ return hw_mod_km_tcq_mod(be, field, bank, record, value, 0);
+}
+
+int hw_mod_km_tcq_get(struct flow_api_backend_s *be, enum hw_km_e field,
+ int bank, int record, uint32_t *value)
+{
+ return hw_mod_km_tcq_mod(be, field, bank, record, value, 1);
+}
new file mode 100644
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_KM_V7_H_
+#define _HW_MOD_KM_V7_H_
+
+struct km_v7_rcp_s {
+ uint32_t qw0_dyn;
+ int32_t qw0_ofs;
+ uint32_t qw0_sel_a;
+ uint32_t qw0_sel_b;
+ uint32_t qw4_dyn;
+ int32_t qw4_ofs;
+ uint32_t qw4_sel_a;
+ uint32_t qw4_sel_b;
+ uint32_t dw8_dyn;
+ int32_t dw8_ofs;
+ uint32_t dw8_sel_a;
+ uint32_t dw8_sel_b;
+ uint32_t dw10_dyn;
+ int32_t dw10_ofs;
+ uint32_t dw10_sel_a;
+ uint32_t dw10_sel_b;
+ uint32_t swx_cch;
+ uint32_t swx_sel_a;
+ uint32_t swx_sel_b;
+ uint32_t mask_d_a[12];
+ uint32_t mask_b[6];
+ uint32_t dual;
+ uint32_t paired;
+ uint32_t el_a;
+ uint32_t el_b;
+ uint32_t info_a;
+ uint32_t info_b;
+ uint32_t ftm_a;
+ uint32_t ftm_b;
+ uint32_t bank_a;
+ uint32_t bank_b;
+ uint32_t kl_a;
+ uint32_t kl_b;
+ uint32_t keyway_a;
+ uint32_t keyway_b;
+ uint32_t synergy_mode;
+ uint32_t dw0_b_dyn;
+ int32_t dw0_b_ofs;
+ uint32_t dw2_b_dyn;
+ int32_t dw2_b_ofs;
+ uint32_t sw4_b_dyn;
+ int32_t sw4_b_ofs;
+ uint32_t sw5_b_dyn;
+ int32_t sw5_b_ofs;
+};
+
+struct km_v7_cam_s {
+ uint32_t w0;
+ uint32_t w1;
+ uint32_t w2;
+ uint32_t w3;
+ uint32_t w4;
+ uint32_t w5;
+ uint32_t ft0;
+ uint32_t ft1;
+ uint32_t ft2;
+ uint32_t ft3;
+ uint32_t ft4;
+ uint32_t ft5;
+};
+
+struct km_v7_tcam_s {
+ uint32_t t[3];
+ uint32_t dirty;
+};
+
+struct km_v7_tci_s {
+ uint32_t color;
+ uint32_t ft;
+};
+
+struct km_v7_tcq_s {
+ uint32_t bank_mask;
+ uint32_t qual;
+};
+
+struct hw_mod_km_v7_s {
+ struct km_v7_rcp_s *rcp;
+ struct km_v7_cam_s *cam;
+ struct km_v7_tcam_s *tcam;
+ struct km_v7_tci_s *tci;
+ struct km_v7_tcq_s *tcq;
+};
+
+#endif /* _HW_MOD_KM_V7_H_ */
new file mode 100644
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "PDB"
+#define _VER_ be->pdb.ver
+
+bool hw_mod_pdb_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_pdb_present(be->be_dev);
+}
+
+int hw_mod_pdb_alloc(struct flow_api_backend_s *be)
+{
+ int nb;
+
+ _VER_ = be->iface->get_pdb_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "PDB MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ nb = be->iface->get_nb_pdb_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "pdb_categories", _MOD_, _VER_);
+ be->pdb.nb_pdb_rcp_categories = (uint32_t)nb;
+
+ switch (_VER_) {
+ case 9:
+ if (!callocate_mod(CAST_COMMON(&be->pdb), 2,
+ &be->pdb.v9.rcp,
+ be->pdb.nb_pdb_rcp_categories,
+ sizeof(struct pdb_v9_rcp_s),
+ &be->pdb.v9.config,
+ 1,
+ sizeof(struct pdb_v9_config_s)))
+ return -1;
+ break;
+ /* end case 9 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+void hw_mod_pdb_free(struct flow_api_backend_s *be)
+{
+ if (be->pdb.base) {
+ free(be->pdb.base);
+ be->pdb.base = NULL;
+ }
+}
+
+int hw_mod_pdb_reset(struct flow_api_backend_s *be)
+{
+ int err = 0;
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->pdb);
+
+ NT_LOG(DBG, FILTER, "INIT PDB RCP\n");
+ err |= hw_mod_pdb_rcp_flush(be, 0, ALL_ENTRIES);
+
+ NT_LOG(DBG, FILTER, "INIT PDB CONFIG\n");
+ err |= hw_mod_pdb_config_flush(be);
+ return err;
+}
+
+int hw_mod_pdb_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->pdb.nb_pdb_rcp_categories;
+ if ((unsigned int)(start_idx + count) > be->pdb.nb_pdb_rcp_categories)
+ return error_index_too_large(__func__);
+ return be->iface->pdb_rcp_flush(be->be_dev, &be->pdb, start_idx, count);
+}
+
+static int hw_mod_pdb_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_pdb_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->pdb.nb_pdb_rcp_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 9:
+ switch (field) {
+ case HW_PDB_RCP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->pdb.v9.rcp[index], (uint8_t)*value,
+ sizeof(struct pdb_v9_rcp_s));
+ break;
+ case HW_PDB_RCP_FIND:
+ rv = find_equal_index(be->pdb.v9.rcp,
+ sizeof(struct pdb_v9_rcp_s), index, *value,
+ be->pdb.nb_pdb_rcp_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_PDB_RCP_COMPARE:
+ rv = do_compare_indexes(be->pdb.v9.rcp,
+ sizeof(struct pdb_v9_rcp_s), index, *value,
+ be->pdb.nb_pdb_rcp_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_PDB_RCP_DESCRIPTOR:
+ get_set(&be->pdb.v9.rcp[index].descriptor, value, get);
+ break;
+ case HW_PDB_RCP_DESC_LEN:
+ get_set(&be->pdb.v9.rcp[index].desc_len, value, get);
+ break;
+ case HW_PDB_RCP_TX_PORT:
+ get_set(&be->pdb.v9.rcp[index].tx_port, value, get);
+ break;
+ case HW_PDB_RCP_TX_IGNORE:
+ get_set(&be->pdb.v9.rcp[index].tx_ignore, value, get);
+ break;
+ case HW_PDB_RCP_TX_NOW:
+ get_set(&be->pdb.v9.rcp[index].tx_now, value, get);
+ break;
+ case HW_PDB_RCP_CRC_OVERWRITE:
+ get_set(&be->pdb.v9.rcp[index].crc_overwrite, value, get);
+ break;
+ case HW_PDB_RCP_ALIGN:
+ get_set(&be->pdb.v9.rcp[index].align, value, get);
+ break;
+ case HW_PDB_RCP_OFS0_DYN:
+ get_set(&be->pdb.v9.rcp[index].ofs0_dyn, value, get);
+ break;
+ case HW_PDB_RCP_OFS0_REL:
+ get_set_signed(&be->pdb.v9.rcp[index].ofs0_rel, value, get);
+ break;
+ case HW_PDB_RCP_OFS1_DYN:
+ get_set(&be->pdb.v9.rcp[index].ofs1_dyn, value, get);
+ break;
+ case HW_PDB_RCP_OFS1_REL:
+ get_set_signed(&be->pdb.v9.rcp[index].ofs1_rel, value, get);
+ break;
+ case HW_PDB_RCP_OFS2_DYN:
+ get_set(&be->pdb.v9.rcp[index].ofs2_dyn, value, get);
+ break;
+ case HW_PDB_RCP_OFS2_REL:
+ get_set_signed(&be->pdb.v9.rcp[index].ofs2_rel, value, get);
+ break;
+ case HW_PDB_RCP_IP_PROT_TNL:
+ get_set(&be->pdb.v9.rcp[index].ip_prot_tnl, value, get);
+ break;
+ case HW_PDB_RCP_PPC_HSH:
+ get_set(&be->pdb.v9.rcp[index].ppc_hsh, value, get);
+ break;
+ case HW_PDB_RCP_DUPLICATE_EN:
+ get_set(&be->pdb.v9.rcp[index].duplicate_en, value, get);
+ break;
+ case HW_PDB_RCP_DUPLICATE_BIT:
+ get_set(&be->pdb.v9.rcp[index].duplicate_bit, value, get);
+ break;
+ case HW_PDB_RCP_PCAP_KEEP_FCS:
+ get_set(&be->pdb.v9.rcp[index].pcap_keep_fcs, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 9 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_pdb_rcp_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_pdb_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_pdb_rcp_get(struct flow_api_backend_s *be, enum hw_pdb_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_pdb_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_pdb_config_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->pdb_config_flush(be->be_dev, &be->pdb);
+}
+
+int hw_mod_pdb_config_set(struct flow_api_backend_s *be, enum hw_pdb_e field,
+ uint32_t value)
+{
+ switch (_VER_) {
+ case 9:
+ switch (field) {
+ case HW_PDB_CONFIG_TS_FORMAT:
+ be->pdb.v9.config->ts_format = value;
+ break;
+ case HW_PDB_CONFIG_PORT_OFS:
+ be->pdb.v9.config->port_ofs = value;
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 9 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_PDB_V9_H_
+#define _HW_MOD_PDB_V9_H_
+
+struct pdb_v9_rcp_s {
+ uint32_t descriptor;
+ uint32_t desc_len;
+ uint32_t tx_port;
+ uint32_t tx_ignore;
+ uint32_t tx_now;
+ uint32_t crc_overwrite;
+ uint32_t align;
+ uint32_t ofs0_dyn;
+ int32_t ofs0_rel;
+ uint32_t ofs1_dyn;
+ int32_t ofs1_rel;
+ uint32_t ofs2_dyn;
+ int32_t ofs2_rel;
+ uint32_t ip_prot_tnl;
+ uint32_t ppc_hsh;
+ uint32_t duplicate_en;
+ uint32_t duplicate_bit;
+ uint32_t pcap_keep_fcs; /* only field added to v9 cmp to v7/8 */
+};
+
+struct pdb_v9_config_s {
+ uint32_t ts_format;
+ uint32_t port_ofs;
+};
+
+struct hw_mod_pdb_v9_s {
+ struct pdb_v9_rcp_s *rcp;
+ struct pdb_v9_config_s *config;
+};
+
+#endif /* _HW_MOD_PDB_V9_H_ */
new file mode 100644
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "QSL"
+#define _VER_ be->qsl.ver
+
+#define QSL_QEN_ENTRIES 32
+#define QSL_QNMQ_ENTRIES 256
+
+bool hw_mod_qsl_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_qsl_present(be->be_dev);
+}
+
+int hw_mod_qsl_alloc(struct flow_api_backend_s *be)
+{
+ int nb;
+
+ _VER_ = be->iface->get_qsl_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "QSL MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ nb = be->iface->get_nb_qsl_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "qsl_categories", _MOD_, _VER_);
+ be->qsl.nb_rcp_categories = (uint32_t)nb;
+
+ nb = be->iface->get_nb_qsl_qst_entries(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "qsl_qst_entries", _MOD_, _VER_);
+ be->qsl.nb_qst_entries = (uint32_t)nb;
+
+ switch (_VER_) {
+ case 7:
+ if (!callocate_mod(CAST_COMMON(&be->qsl), 4,
+ &be->qsl.v7.rcp,
+ be->qsl.nb_rcp_categories,
+ sizeof(struct qsl_v7_rcp_s),
+ &be->qsl.v7.qst,
+ be->qsl.nb_qst_entries,
+ sizeof(struct qsl_v7_qst_s),
+ &be->qsl.v7.qen,
+ QSL_QEN_ENTRIES,
+ sizeof(struct qsl_v7_qen_s),
+ &be->qsl.v7.unmq,
+ QSL_QNMQ_ENTRIES,
+ sizeof(struct qsl_v7_unmq_s)))
+ return -1;
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+void hw_mod_qsl_free(struct flow_api_backend_s *be)
+{
+ if (be->qsl.base) {
+ free(be->qsl.base);
+ be->qsl.base = NULL;
+ }
+}
+
+int hw_mod_qsl_reset(struct flow_api_backend_s *be)
+{
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->qsl);
+
+ NT_LOG(DBG, FILTER, "INIT QSL RCP\n");
+ hw_mod_qsl_rcp_flush(be, 0, ALL_ENTRIES);
+
+ NT_LOG(DBG, FILTER, "INIT QSL QST\n");
+ hw_mod_qsl_qst_flush(be, 0, ALL_ENTRIES);
+
+ NT_LOG(DBG, FILTER, "INIT QSL QEN\n");
+ hw_mod_qsl_qen_flush(be, 0, ALL_ENTRIES);
+
+ NT_LOG(DBG, FILTER, "INIT QSL UNMQ\n");
+ be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, 0, 256);
+
+ return 0;
+}
+
+int hw_mod_qsl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->qsl.nb_rcp_categories;
+ if ((unsigned int)(start_idx + count) > be->qsl.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ return be->iface->qsl_rcp_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_qsl_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->qsl.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_QSL_RCP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->qsl.v7.rcp[index], (uint8_t)*value,
+ sizeof(struct qsl_v7_rcp_s));
+ break;
+ case HW_QSL_RCP_FIND:
+ rv = find_equal_index(be->qsl.v7.rcp,
+ sizeof(struct qsl_v7_rcp_s), index, *value,
+ be->qsl.nb_rcp_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_QSL_RCP_COMPARE:
+ rv = do_compare_indexes(be->qsl.v7.rcp,
+ sizeof(struct qsl_v7_rcp_s), index, *value,
+ be->qsl.nb_rcp_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_QSL_RCP_DISCARD:
+ get_set(&be->qsl.v7.rcp[index].discard, value, get);
+ break;
+ case HW_QSL_RCP_DROP:
+ get_set(&be->qsl.v7.rcp[index].drop, value, get);
+ break;
+ case HW_QSL_RCP_TBL_LO:
+ get_set(&be->qsl.v7.rcp[index].tbl_lo, value, get);
+ break;
+ case HW_QSL_RCP_TBL_HI:
+ get_set(&be->qsl.v7.rcp[index].tbl_hi, value, get);
+ break;
+ case HW_QSL_RCP_TBL_IDX:
+ get_set(&be->qsl.v7.rcp[index].tbl_idx, value, get);
+ break;
+ case HW_QSL_RCP_TBL_MSK:
+ get_set(&be->qsl.v7.rcp[index].tbl_msk, value, get);
+ break;
+ case HW_QSL_RCP_LR:
+ get_set(&be->qsl.v7.rcp[index].lr, value, get);
+ break;
+ case HW_QSL_RCP_TSA:
+ get_set(&be->qsl.v7.rcp[index].tsa, value, get);
+ break;
+ case HW_QSL_RCP_VLI:
+ get_set(&be->qsl.v7.rcp[index].vli, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_qsl_rcp_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_qsl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_rcp_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_qsl_rcp_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qst_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->qsl.nb_qst_entries;
+ if ((unsigned int)(start_idx + count) > be->qsl.nb_qst_entries)
+ return error_index_too_large(__func__);
+ return be->iface->qsl_qst_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qst_mod(struct flow_api_backend_s *be,
+ enum hw_qsl_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ if (index >= be->qsl.nb_qst_entries)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_QSL_QST_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->qsl.v7.qst[index], (uint8_t)*value,
+ sizeof(struct qsl_v7_qst_s));
+ break;
+ case HW_QSL_QST_QUEUE:
+ get_set(&be->qsl.v7.qst[index].queue, value, get);
+ break;
+ case HW_QSL_QST_EN:
+ get_set(&be->qsl.v7.qst[index].en, value, get);
+ break;
+ case HW_QSL_QST_TX_PORT:
+ get_set(&be->qsl.v7.qst[index].tx_port, value, get);
+ break;
+ case HW_QSL_QST_LRE:
+ get_set(&be->qsl.v7.qst[index].lre, value, get);
+ break;
+ case HW_QSL_QST_TCI:
+ get_set(&be->qsl.v7.qst[index].tci, value, get);
+ break;
+ case HW_QSL_QST_VEN:
+ get_set(&be->qsl.v7.qst[index].ven, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_qsl_qst_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_qsl_qst_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qst_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_qsl_qst_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_qen_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = QSL_QEN_ENTRIES;
+ if ((start_idx + count) > QSL_QEN_ENTRIES)
+ return error_index_too_large(__func__);
+ return be->iface->qsl_qen_flush(be->be_dev, &be->qsl, start_idx, count);
+}
+
+static int hw_mod_qsl_qen_mod(struct flow_api_backend_s *be,
+ enum hw_qsl_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ if (index >= QSL_QEN_ENTRIES)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_QSL_QEN_EN:
+ get_set(&be->qsl.v7.qen[index].en, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_qsl_qen_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_qsl_qen_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_qen_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_qsl_qen_mod(be, field, index, value, 1);
+}
+
+int hw_mod_qsl_unmq_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = QSL_QNMQ_ENTRIES;
+ if ((start_idx + count) > QSL_QNMQ_ENTRIES)
+ return error_index_too_large(__func__);
+ return be->iface->qsl_unmq_flush(be->be_dev, &be->qsl, start_idx,
+ count);
+}
+
+static int hw_mod_qsl_unmq_mod(struct flow_api_backend_s *be,
+ enum hw_qsl_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ if (index >= QSL_QNMQ_ENTRIES)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 7:
+ switch (field) {
+ case HW_QSL_UNMQ_DEST_QUEUE:
+ get_set(&be->qsl.v7.unmq[index].dest_queue, value, get);
+ break;
+ case HW_QSL_UNMQ_EN:
+ get_set(&be->qsl.v7.unmq[index].en, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 7 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_qsl_unmq_set(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_qsl_unmq_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_qsl_unmq_get(struct flow_api_backend_s *be, enum hw_qsl_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_qsl_unmq_mod(be, field, index, value, 1);
+}
new file mode 100644
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_QSL_V7_H_
+#define _HW_MOD_QSL_V7_H_
+
+struct qsl_v7_rcp_s {
+ uint32_t discard;
+ uint32_t drop;
+ uint32_t tbl_lo;
+ uint32_t tbl_hi;
+ uint32_t tbl_idx;
+ uint32_t tbl_msk;
+ uint32_t lr;
+ uint32_t tsa;
+ uint32_t vli;
+};
+
+struct qsl_v7_qst_s {
+ uint32_t queue;
+ uint32_t en;
+ uint32_t tx_port;
+ uint32_t lre;
+ uint32_t tci;
+ uint32_t ven;
+};
+
+struct qsl_v7_qen_s {
+ uint32_t en;
+};
+
+struct qsl_v7_unmq_s {
+ uint32_t dest_queue;
+ uint32_t en;
+};
+
+struct hw_mod_qsl_v7_s {
+ struct qsl_v7_rcp_s *rcp;
+ struct qsl_v7_qst_s *qst;
+ struct qsl_v7_qen_s *qen;
+ struct qsl_v7_unmq_s *unmq;
+};
+
+#endif /* _HW_MOD_QSL_V7_H_ */
new file mode 100644
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "RMC"
+#define _VER_ be->rmc.ver
+
+bool hw_mod_rmc_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_rmc_present(be->be_dev);
+}
+
+int hw_mod_rmc_alloc(struct flow_api_backend_s *be)
+{
+ _VER_ = be->iface->get_rmc_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "RMC MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ switch (_VER_) {
+ case 0x10003:
+ if (!callocate_mod(CAST_COMMON(&be->rmc), 1,
+ &be->rmc.v1_3.ctrl, 1, sizeof(struct rmc_v1_3_ctrl_s)))
+ return -1;
+ break;
+ /* end case 1_3 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+void hw_mod_rmc_free(struct flow_api_backend_s *be)
+{
+ if (be->rmc.base) {
+ free(be->rmc.base);
+ be->rmc.base = NULL;
+ }
+}
+
+int hw_mod_rmc_reset(struct flow_api_backend_s *be)
+{
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->rmc);
+
+ NT_LOG(DBG, FILTER, "INIT RMC CTRL\n");
+ /* disable block stat, block keep alive */
+ hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+ hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_KEEPA, 1);
+ hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_MAC_PORT,
+ 0xff); /* initially block all ports */
+ hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_STATT, 1);
+ hw_mod_rmc_ctrl_set(be, HW_RMC_BLOCK_RPP_SLICE, 0xf);
+ return hw_mod_rmc_ctrl_flush(be);
+}
+
+int hw_mod_rmc_ctrl_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->rmc_ctrl_flush(be->be_dev, &be->rmc);
+}
+
+static int hw_mod_rmc_ctrl_mod(struct flow_api_backend_s *be,
+ enum hw_rmc_e field, uint32_t *value, int get)
+{
+ switch (_VER_) {
+ case 0x10003:
+ switch (field) {
+ case HW_RMC_BLOCK_STATT:
+ get_set(&be->rmc.v1_3.ctrl->block_statt, value, get);
+ break;
+ case HW_RMC_BLOCK_KEEPA:
+ get_set(&be->rmc.v1_3.ctrl->block_keepa, value, get);
+ break;
+ case HW_RMC_BLOCK_RPP_SLICE:
+ get_set(&be->rmc.v1_3.ctrl->block_rpp_slice, value, get);
+ break;
+ case HW_RMC_BLOCK_MAC_PORT:
+ get_set(&be->rmc.v1_3.ctrl->block_mac_port, value, get);
+ break;
+ case HW_RMC_LAG_PHY_ODD_EVEN:
+ get_set(&be->rmc.v1_3.ctrl->lag_phy_odd_even, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 1.3 */
+
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_rmc_ctrl_set(struct flow_api_backend_s *be, enum hw_rmc_e field,
+ uint32_t value)
+{
+ return hw_mod_rmc_ctrl_mod(be, field, &value, 0);
+}
+
+int hw_mod_rmc_ctrl_get(struct flow_api_backend_s *be, enum hw_rmc_e field,
+ uint32_t *value)
+{
+ return hw_mod_rmc_ctrl_mod(be, field, value, 1);
+}
new file mode 100644
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_RMC_V1_3_H_
+#define _HW_MOD_RMC_V1_3_H_
+
+struct rmc_v1_3_ctrl_s {
+ uint32_t block_statt;
+ uint32_t block_keepa;
+ uint32_t block_rpp_slice;
+ uint32_t block_mac_port;
+ uint32_t lag_phy_odd_even;
+};
+
+struct hw_mod_rmc_v1_3_s {
+ struct rmc_v1_3_ctrl_s *ctrl;
+};
+
+#endif /* _HW_MOD_RMC_V1_3_H_ */
new file mode 100644
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "ROA"
+#define _VER_ be->roa.ver
+
+#define ROA_LAGCFG_ENTRIES 512
+
+bool hw_mod_roa_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_roa_present(be->be_dev);
+}
+
+int hw_mod_roa_alloc(struct flow_api_backend_s *be)
+{
+ int nb;
+
+ _VER_ = be->iface->get_roa_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "ROA MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ nb = be->iface->get_nb_roa_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "roa_categories", _MOD_, _VER_);
+ be->roa.nb_tun_categories = (uint32_t)nb;
+
+ be->roa.nb_tun_categories /= 4;
+
+ switch (_VER_) {
+ case 6:
+ be->roa.nb_lag_entries = ROA_LAGCFG_ENTRIES;
+ if (!callocate_mod(CAST_COMMON(&be->roa), 4,
+ &be->roa.v6.tunhdr,
+ be->roa.nb_tun_categories,
+ sizeof(struct roa_v6_tunhdr_s),
+ &be->roa.v6.tuncfg,
+ be->roa.nb_tun_categories,
+ sizeof(struct roa_v6_tuncfg_s),
+ &be->roa.v6.config,
+ 1,
+ sizeof(struct roa_v6_config_s),
+ &be->roa.v6.lagcfg,
+ be->roa.nb_lag_entries,
+ sizeof(struct roa_v6_lagcfg_s)))
+ return -1;
+ break;
+ /* end case 6 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+ return 0;
+}
+
+void hw_mod_roa_free(struct flow_api_backend_s *be)
+{
+ if (be->roa.base) {
+ free(be->roa.base);
+ be->roa.base = NULL;
+ }
+}
+
+int hw_mod_roa_reset(struct flow_api_backend_s *be)
+{
+ int err = 0;
+
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->roa);
+
+ NT_LOG(DBG, FILTER, "INIT ROA TUNHDR\n");
+ err = hw_mod_roa_tunhdr_flush(be, 0, ALL_ENTRIES);
+
+ NT_LOG(DBG, FILTER, "INIT ROA TUNCFG\n");
+ hw_mod_roa_tuncfg_flush(be, 0, ALL_ENTRIES);
+
+ NT_LOG(DBG, FILTER, "INIT ROA CONFIG\n");
+ hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_RECIRCULATE, 1);
+ hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_NORMAL_PCKS, 1);
+ hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT0, 1);
+ hw_mod_roa_config_set(be, HW_ROA_CONFIG_FWD_TXPORT1, 1);
+ hw_mod_roa_config_flush(be);
+
+ NT_LOG(DBG, FILTER, "INIT ROA LAGCFG\n");
+ hw_mod_roa_lagcfg_flush(be, 0, ALL_ENTRIES);
+
+ return err;
+}
+
+int hw_mod_roa_tunhdr_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->roa.nb_tun_categories;
+ if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+ return error_index_too_large(__func__);
+ return be->iface->roa_tunhdr_flush(be->be_dev, &be->roa, start_idx,
+ count);
+}
+
+static int hw_mod_roa_tunhdr_mod(struct flow_api_backend_s *be,
+ enum hw_roa_e field, uint32_t index,
+ uint32_t word_off, uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->roa.nb_tun_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 6:
+ switch (field) {
+ case HW_ROA_TUNHDR_COMPARE:
+ rv = do_compare_indexes(be->roa.v6.tunhdr,
+ sizeof(struct roa_v6_tunhdr_s), index, word_off,
+ be->roa.nb_tun_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_ROA_TUNHDR:
+ get_set(&be->roa.v6.tunhdr[index].tunnel_hdr[word_off],
+ value, get);
+ break;
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 6 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_roa_tunhdr_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t word_off, uint32_t value)
+{
+ return hw_mod_roa_tunhdr_mod(be, field, index, word_off, &value, 0);
+}
+
+int hw_mod_roa_tunhdr_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t word_off, uint32_t *value)
+{
+ return hw_mod_roa_tunhdr_mod(be, field, index, word_off, value, 1);
+}
+
+int hw_mod_roa_tuncfg_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->roa.nb_tun_categories;
+ if ((unsigned int)(start_idx + count) > be->roa.nb_tun_categories)
+ return error_index_too_large(__func__);
+ return be->iface->roa_tuncfg_flush(be->be_dev, &be->roa, start_idx,
+ count);
+}
+
+static int hw_mod_roa_tuncfg_mod(struct flow_api_backend_s *be,
+ enum hw_roa_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->roa.nb_tun_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 6:
+ switch (field) {
+ case HW_ROA_TUNCFG_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->roa.v6.tuncfg[index], (uint8_t)*value,
+ sizeof(struct roa_v6_tuncfg_s));
+ break;
+ case HW_ROA_TUNCFG_FIND:
+ rv = find_equal_index(be->roa.v6.tuncfg,
+ sizeof(struct roa_v6_tuncfg_s), index, *value,
+ be->roa.nb_tun_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_ROA_TUNCFG_COMPARE:
+ rv = do_compare_indexes(be->roa.v6.tuncfg,
+ sizeof(struct roa_v6_tuncfg_s), index, *value,
+ be->roa.nb_tun_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_ROA_TUNCFG_TUN_LEN:
+ get_set(&be->roa.v6.tuncfg[index].tun_len, value, get);
+ break;
+ case HW_ROA_TUNCFG_TUN_TYPE:
+ get_set(&be->roa.v6.tuncfg[index].tun_type, value, get);
+ break;
+ case HW_ROA_TUNCFG_TUN_VLAN:
+ get_set(&be->roa.v6.tuncfg[index].tun_vlan, value, get);
+ break;
+ case HW_ROA_TUNCFG_IP_TYPE:
+ get_set(&be->roa.v6.tuncfg[index].ip_type, value, get);
+ break;
+ case HW_ROA_TUNCFG_IPCS_UPD:
+ get_set(&be->roa.v6.tuncfg[index].ipcs_upd, value, get);
+ break;
+ case HW_ROA_TUNCFG_IPCS_PRECALC:
+ get_set(&be->roa.v6.tuncfg[index].ipcs_precalc, value, get);
+ break;
+ case HW_ROA_TUNCFG_IPTL_UPD:
+ get_set(&be->roa.v6.tuncfg[index].iptl_upd, value, get);
+ break;
+ case HW_ROA_TUNCFG_IPTL_PRECALC:
+ get_set(&be->roa.v6.tuncfg[index].iptl_precalc, value, get);
+ break;
+ case HW_ROA_TUNCFG_VXLAN_UDP_LEN_UPD:
+ get_set(&be->roa.v6.tuncfg[index].vxlan_udp_len_upd,
+ value, get);
+ break;
+ case HW_ROA_TUNCFG_TX_LAG_IX:
+ get_set(&be->roa.v6.tuncfg[index].tx_lag_ix, value, get);
+ break;
+ case HW_ROA_TUNCFG_RECIRCULATE:
+ get_set(&be->roa.v6.tuncfg[index].recirculate, value, get);
+ break;
+ case HW_ROA_TUNCFG_PUSH_TUNNEL:
+ get_set(&be->roa.v6.tuncfg[index].push_tunnel, value, get);
+ break;
+ case HW_ROA_TUNCFG_RECIRC_PORT:
+ get_set(&be->roa.v6.tuncfg[index].recirc_port, value, get);
+ break;
+ case HW_ROA_TUNCFG_RECIRC_BYPASS:
+ get_set(&be->roa.v6.tuncfg[index].recirc_bypass, value, get);
+ break;
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 6 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_roa_tuncfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_roa_tuncfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_tuncfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_roa_tuncfg_mod(be, field, index, value, 1);
+}
+
+int hw_mod_roa_config_flush(struct flow_api_backend_s *be)
+{
+ return be->iface->roa_config_flush(be->be_dev, &be->roa);
+}
+
+static int hw_mod_roa_config_mod(struct flow_api_backend_s *be,
+ enum hw_roa_e field, uint32_t *value, int get)
+{
+ switch (_VER_) {
+ case 6:
+ switch (field) {
+ case HW_ROA_CONFIG_FWD_RECIRCULATE:
+ get_set(&be->roa.v6.config->fwd_recirculate, value, get);
+ break;
+ case HW_ROA_CONFIG_FWD_NORMAL_PCKS:
+ get_set(&be->roa.v6.config->fwd_normal_pcks, value, get);
+ break;
+ case HW_ROA_CONFIG_FWD_TXPORT0:
+ get_set(&be->roa.v6.config->fwd_txport0, value, get);
+ break;
+ case HW_ROA_CONFIG_FWD_TXPORT1:
+ get_set(&be->roa.v6.config->fwd_txport1, value, get);
+ break;
+ case HW_ROA_CONFIG_FWD_CELLBUILDER_PCKS:
+ get_set(&be->roa.v6.config->fwd_cellbuilder_pcks, value, get);
+ break;
+ case HW_ROA_CONFIG_FWD_NON_NORMAL_PCKS:
+ get_set(&be->roa.v6.config->fwd_non_normal_pcks, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 6 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_roa_config_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t value)
+{
+ return hw_mod_roa_config_mod(be, field, &value, 0);
+}
+
+int hw_mod_roa_config_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t *value)
+{
+ return hw_mod_roa_config_mod(be, field, value, 1);
+}
+
+int hw_mod_roa_lagcfg_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->roa.nb_lag_entries;
+ if ((unsigned int)(start_idx + count) > be->roa.nb_lag_entries)
+ return error_index_too_large(__func__);
+ return be->iface->roa_lagcfg_flush(be->be_dev, &be->roa, start_idx,
+ count);
+}
+
+static int hw_mod_roa_lagcfg_mod(struct flow_api_backend_s *be,
+ enum hw_roa_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ if (index >= be->roa.nb_lag_entries)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 6:
+ switch (field) {
+ case HW_ROA_LAGCFG_TXPHY_PORT:
+ get_set(&be->roa.v6.lagcfg[index].txphy_port, value, get);
+ break;
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 6 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_roa_lagcfg_set(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_roa_lagcfg_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_roa_lagcfg_get(struct flow_api_backend_s *be, enum hw_roa_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_roa_lagcfg_mod(be, field, index, value, 1);
+}
new file mode 100644
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_ROA_V6_H_
+#define _HW_MOD_ROA_V6_H_
+
+struct roa_v6_tunhdr_s {
+ uint32_t tunnel_hdr[4 * 4];
+};
+
+struct roa_v6_tuncfg_s {
+ uint32_t tun_len;
+ uint32_t tun_type;
+ uint32_t tun_vlan;
+ uint32_t ip_type;
+ uint32_t ipcs_upd;
+ uint32_t ipcs_precalc;
+ uint32_t iptl_upd;
+ uint32_t iptl_precalc;
+ uint32_t vxlan_udp_len_upd;
+ uint32_t tx_lag_ix;
+ uint32_t recirculate;
+ uint32_t push_tunnel;
+ uint32_t recirc_port;
+ uint32_t recirc_bypass;
+};
+
+struct roa_v6_config_s {
+ uint32_t fwd_recirculate;
+ uint32_t fwd_normal_pcks;
+ uint32_t fwd_txport0;
+ uint32_t fwd_txport1;
+ uint32_t fwd_cellbuilder_pcks;
+ uint32_t fwd_non_normal_pcks;
+};
+
+struct roa_v6_lagcfg_s {
+ uint32_t txphy_port;
+};
+
+struct hw_mod_roa_v6_s {
+ struct roa_v6_tunhdr_s *tunhdr;
+ struct roa_v6_tuncfg_s *tuncfg;
+ struct roa_v6_config_s *config;
+ struct roa_v6_lagcfg_s *lagcfg;
+};
+
+#endif /* _HW_MOD_ROA_V6_H_ */
new file mode 100644
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC"
+#define _VER_ be->slc.ver
+
+bool hw_mod_slc_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_slc_present(be->be_dev);
+}
+
+int hw_mod_slc_alloc(struct flow_api_backend_s *be)
+{
+ _VER_ = be->iface->get_slc_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "SLC MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ switch (_VER_) {
+ case 1:
+ if (!callocate_mod(CAST_COMMON(&be->slc), 1,
+ &be->slc.v1.rcp,
+ be->max_categories,
+ sizeof(struct slc_v1_rcp_s)))
+ return -1;
+ break;
+ /* end case 1 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+void hw_mod_slc_free(struct flow_api_backend_s *be)
+{
+ if (be->slc.base) {
+ free(be->slc.base);
+ be->slc.base = NULL;
+ }
+}
+
+int hw_mod_slc_reset(struct flow_api_backend_s *be)
+{
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->slc);
+
+ NT_LOG(DBG, FILTER, "INIT SLC RCP\n");
+ return hw_mod_slc_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->max_categories;
+ if ((unsigned int)(start_idx + count) > be->max_categories)
+ return error_index_too_large(__func__);
+ return be->iface->slc_rcp_flush(be->be_dev, &be->slc, start_idx, count);
+}
+
+static int hw_mod_slc_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_slc_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->max_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 1:
+ switch (field) {
+ case HW_SLC_RCP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->slc.v1.rcp[index], (uint8_t)*value,
+ sizeof(struct hw_mod_slc_v1_s));
+ break;
+ case HW_SLC_RCP_FIND:
+ rv = find_equal_index(be->slc.v1.rcp,
+ sizeof(struct hw_mod_slc_v1_s), index, *value,
+ be->max_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_SLC_RCP_COMPARE:
+ rv = do_compare_indexes(be->slc.v1.rcp,
+ sizeof(struct hw_mod_slc_v1_s), index, *value,
+ be->max_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_SLC_RCP_SLC_EN:
+ get_set(&be->slc.v1.rcp[index].tail_slc_en, value, get);
+ break;
+ case HW_SLC_RCP_DYN:
+ get_set(&be->slc.v1.rcp[index].tail_dyn, value, get);
+ break;
+ case HW_SLC_RCP_OFS:
+ get_set_signed(&be->slc.v1.rcp[index].tail_ofs, value, get);
+ break;
+ case HW_SLC_RCP_PCAP:
+ get_set(&be->slc.v1.rcp[index].pcap, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 1 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_slc_rcp_set(struct flow_api_backend_s *be, enum hw_slc_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_slc_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_rcp_get(struct flow_api_backend_s *be, enum hw_slc_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_slc_rcp_mod(be, field, index, value, 1);
+}
new file mode 100644
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "SLC_LR"
+#define _VER_ be->slc_lr.ver
+
+bool hw_mod_slc_lr_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_slc_lr_present(be->be_dev);
+}
+
+int hw_mod_slc_lr_alloc(struct flow_api_backend_s *be)
+{
+ _VER_ = be->iface->get_slc_lr_version(be->be_dev);
+ NT_LOG(DBG, FILTER, "SLC LR MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ switch (_VER_) {
+ case 2:
+ if (!callocate_mod(CAST_COMMON(&be->slc_lr), 1,
+ &be->slc_lr.v2.rcp,
+ be->max_categories,
+ sizeof(struct slc_lr_v2_rcp_s)))
+ return -1;
+ break;
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+void hw_mod_slc_lr_free(struct flow_api_backend_s *be)
+{
+ if (be->slc_lr.base) {
+ free(be->slc_lr.base);
+ be->slc_lr.base = NULL;
+ }
+}
+
+int hw_mod_slc_lr_reset(struct flow_api_backend_s *be)
+{
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->slc_lr);
+
+ NT_LOG(DBG, FILTER, "INIT SLC LR RCP\n");
+ return hw_mod_slc_lr_rcp_flush(be, 0, be->max_categories);
+}
+
+int hw_mod_slc_lr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->max_categories;
+ if ((unsigned int)(start_idx + count) > be->max_categories)
+ return error_index_too_large(__func__);
+ return be->iface->slc_lr_rcp_flush(be->be_dev, &be->slc_lr, start_idx,
+ count);
+}
+
+static int hw_mod_slc_lr_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_slc_lr_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->max_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 2:
+ switch (field) {
+ case HW_SLC_LR_RCP_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->slc_lr.v2.rcp[index], (uint8_t)*value,
+ sizeof(struct hw_mod_slc_lr_v2_s));
+ break;
+ case HW_SLC_LR_RCP_FIND:
+ rv = find_equal_index(be->slc_lr.v2.rcp,
+ sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+ be->max_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_SLC_LR_RCP_COMPARE:
+ rv = do_compare_indexes(be->slc_lr.v2.rcp,
+ sizeof(struct hw_mod_slc_lr_v2_s), index, *value,
+ be->max_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_SLC_LR_RCP_SLC_EN:
+ get_set(&be->slc_lr.v2.rcp[index].tail_slc_en, value, get);
+ break;
+ case HW_SLC_LR_RCP_DYN:
+ get_set(&be->slc_lr.v2.rcp[index].tail_dyn, value, get);
+ break;
+ case HW_SLC_LR_RCP_OFS:
+ get_set_signed(&be->slc_lr.v2.rcp[index].tail_ofs,
+ value, get);
+ break;
+ case HW_SLC_LR_RCP_PCAP:
+ get_set(&be->slc_lr.v2.rcp[index].pcap, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_slc_lr_rcp_set(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+ uint32_t index, uint32_t value)
+{
+ return hw_mod_slc_lr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_slc_lr_rcp_get(struct flow_api_backend_s *be, enum hw_slc_lr_e field,
+ uint32_t index, uint32_t *value)
+{
+ return hw_mod_slc_lr_rcp_mod(be, field, index, value, 1);
+}
new file mode 100644
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_LR_V2_H_
+#define _HW_MOD_SLC_LR_V2_H_
+
+struct slc_lr_v2_rcp_s {
+ uint32_t tail_slc_en;
+ uint32_t tail_dyn;
+ int32_t tail_ofs;
+ uint32_t pcap;
+};
+
+struct hw_mod_slc_lr_v2_s {
+ struct slc_lr_v2_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V2_H_ */
new file mode 100644
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_SLC_V1_H_
+#define _HW_MOD_SLC_V1_H_
+
+struct slc_v1_rcp_s {
+ uint32_t tail_slc_en;
+ uint32_t tail_dyn;
+ int32_t tail_ofs;
+ uint32_t pcap;
+};
+
+struct hw_mod_slc_v1_s {
+ struct slc_v1_rcp_s *rcp;
+};
+
+#endif /* _HW_MOD_SLC_V1_H_ */
new file mode 100644
@@ -0,0 +1,983 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "flow_api_backend.h"
+
+#define _MOD_ "TPE"
+#define _VER_ be->tpe.ver
+
+bool hw_mod_tpe_present(struct flow_api_backend_s *be)
+{
+ return be->iface->get_tpe_present(be->be_dev);
+}
+
+int hw_mod_tpe_alloc(struct flow_api_backend_s *be)
+{
+ int nb;
+
+ _VER_ = be->iface->get_tpe_version(be->be_dev);
+ NT_LOG(DBG, FILTER, _MOD_ " MODULE VERSION %i.%i\n", VER_MAJOR(_VER_),
+ VER_MINOR(_VER_));
+
+ nb = be->iface->get_nb_tpe_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "tpe_categories", _MOD_, _VER_);
+ be->tpe.nb_rcp_categories = (uint32_t)nb;
+
+ be->tpe.nb_ifr_categories = 0;
+ if (_VER_ > 1) {
+ nb = be->iface->get_nb_tpe_ifr_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "tpe_ifr_categories", _MOD_, _VER_);
+ be->tpe.nb_ifr_categories = (uint32_t)nb;
+ }
+
+ nb = be->iface->get_nb_tx_cpy_writers(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "tx_cpy_writers", _MOD_, _VER_);
+ be->tpe.nb_cpy_writers = (uint32_t)nb;
+
+ nb = be->iface->get_nb_tx_rpl_depth(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "tx_rpl_depth", _MOD_, _VER_);
+ be->tpe.nb_rpl_depth = (uint32_t)nb;
+
+ nb = be->iface->get_nb_tx_rpl_ext_categories(be->be_dev);
+ if (nb <= 0)
+ return error_resource_count(__func__, "tx_rpl_ext_categories", _MOD_, _VER_);
+ be->tpe.nb_rpl_ext_categories = (uint32_t)nb;
+
+ switch (_VER_) {
+ case 1:
+ if (!callocate_mod(CAST_COMMON(&be->tpe), 8,
+ &be->tpe.v1.rpp_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_rpp_v0_rcp_s),
+ &be->tpe.v1.ins_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_ins_v1_rcp_s),
+ &be->tpe.v1.rpl_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_rpl_v2_rcp_s),
+ &be->tpe.v1.rpl_ext, be->tpe.nb_rpl_ext_categories,
+ sizeof(struct tpe_v1_rpl_v2_ext_s),
+ &be->tpe.v1.rpl_rpl, be->tpe.nb_rpl_depth,
+ sizeof(struct tpe_v1_rpl_v2_rpl_s),
+ &be->tpe.v1.cpy_rcp,
+ be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_cpy_v1_rcp_s),
+ &be->tpe.v1.hfu_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_hfu_v1_rcp_s),
+ &be->tpe.v1.csu_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_csu_v0_rcp_s)))
+ return -1;
+ break;
+ case 2:
+ if (!callocate_mod(CAST_COMMON(&be->tpe), 10,
+ &be->tpe.v2.rpp_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_rpp_v0_rcp_s),
+ &be->tpe.v2.rpp_ifr_rcp, be->tpe.nb_ifr_categories,
+ sizeof(struct tpe_v2_rpp_v1_ifr_rcp_s),
+ &be->tpe.v2.ifr_rcp, be->tpe.nb_ifr_categories,
+ sizeof(struct tpe_v2_ifr_v1_rcp_s),
+ &be->tpe.v2.ins_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_ins_v1_rcp_s),
+ &be->tpe.v2.rpl_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_rpl_v2_rcp_s),
+ &be->tpe.v2.rpl_ext, be->tpe.nb_rpl_ext_categories,
+ sizeof(struct tpe_v1_rpl_v2_ext_s),
+ &be->tpe.v2.rpl_rpl, be->tpe.nb_rpl_depth,
+ sizeof(struct tpe_v1_rpl_v2_rpl_s),
+ &be->tpe.v2.cpy_rcp,
+ be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_cpy_v1_rcp_s),
+ &be->tpe.v2.hfu_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_hfu_v1_rcp_s),
+ &be->tpe.v2.csu_rcp, be->tpe.nb_rcp_categories,
+ sizeof(struct tpe_v1_csu_v0_rcp_s)))
+ return -1;
+ break;
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+void hw_mod_tpe_free(struct flow_api_backend_s *be)
+{
+ if (be->tpe.base) {
+ free(be->tpe.base);
+ be->tpe.base = NULL;
+ }
+}
+
+int hw_mod_tpe_reset(struct flow_api_backend_s *be)
+{
+ int err = 0;
+
+ /* Zero entire cache area */
+ ZERO_MOD_CACHE(&be->tpe);
+
+ NT_LOG(DBG, FILTER, "INIT TPE\n");
+ err |= hw_mod_tpe_rpp_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_ins_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_rpl_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_rpl_ext_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_rpl_rpl_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_cpy_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_hfu_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_csu_rcp_flush(be, 0, ALL_ENTRIES);
+
+ if (_VER_ == 2) {
+ err |= hw_mod_tpe_rpp_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+ err |= hw_mod_tpe_ifr_rcp_flush(be, 0, ALL_ENTRIES);
+ }
+
+ return err;
+}
+
+/*
+ * RPP_IFR_RCP
+ */
+
+int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_ifr_categories;
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_rpp_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_rpp_ifr_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ if (index >= be->tpe.nb_ifr_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 2:
+ switch (field) {
+ case HW_TPE_IFR_RCP_EN:
+ get_set(&be->tpe.v2.rpp_ifr_rcp[index].en, value, get);
+ break;
+
+ case HW_TPE_IFR_RCP_MTU:
+ get_set(&be->tpe.v2.rpp_ifr_rcp[index].mtu, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_set(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, int index, uint32_t value)
+{
+ return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_ifr_rcp_get(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, int index, uint32_t *value)
+{
+ return hw_mod_tpe_rpp_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPP_RCP
+ */
+
+int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 1:
+ case 2:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->tpe.v1.rpp_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_rpp_v0_rcp_s));
+ break;
+ case HW_TPE_FIND:
+ rv = find_equal_index(be->tpe.v1.rpp_rcp,
+ sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_COMPARE:
+ rv = do_compare_indexes(be->tpe.v1.rpp_rcp,
+ sizeof(struct tpe_v1_rpp_v0_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_RPP_RCP_EXP:
+ get_set(&be->tpe.v1.rpp_rcp[index].exp, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpp_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_rpp_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * IFR_RCP
+ */
+
+int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_ifr_categories;
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_ifr_categories)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_ifr_rcp_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_ifr_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ if (index >= be->tpe.nb_ifr_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 2:
+ switch (field) {
+ case HW_TPE_IFR_RCP_EN:
+ get_set(&be->tpe.v2.ifr_rcp[index].en, value, get);
+ break;
+
+ case HW_TPE_IFR_RCP_MTU:
+ get_set(&be->tpe.v2.ifr_rcp[index].mtu, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_ifr_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_tpe_ifr_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ifr_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_ifr_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * INS_RCP
+ */
+
+int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 1:
+ case 2:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->tpe.v1.ins_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_ins_v1_rcp_s));
+ break;
+ case HW_TPE_FIND:
+ rv = find_equal_index(be->tpe.v1.ins_rcp,
+ sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_COMPARE:
+ rv = do_compare_indexes(be->tpe.v1.ins_rcp,
+ sizeof(struct tpe_v1_ins_v1_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_INS_RCP_DYN:
+ get_set(&be->tpe.v1.ins_rcp[index].dyn, value, get);
+ break;
+ case HW_TPE_INS_RCP_OFS:
+ get_set(&be->tpe.v1.ins_rcp[index].ofs, value, get);
+ break;
+ case HW_TPE_INS_RCP_LEN:
+ get_set(&be->tpe.v1.ins_rcp[index].len, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 1 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_ins_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_ins_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RCP
+ */
+
+int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 1:
+ case 2:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->tpe.v1.rpl_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_rpl_v2_rcp_s));
+ break;
+ case HW_TPE_FIND:
+ rv = find_equal_index(be->tpe.v1.rpl_rcp,
+ sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_COMPARE:
+ rv = do_compare_indexes(be->tpe.v1.rpl_rcp,
+ sizeof(struct tpe_v1_rpl_v2_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_RPL_RCP_DYN:
+ get_set(&be->tpe.v1.rpl_rcp[index].dyn, value, get);
+ break;
+ case HW_TPE_RPL_RCP_OFS:
+ get_set(&be->tpe.v1.rpl_rcp[index].ofs, value, get);
+ break;
+ case HW_TPE_RPL_RCP_LEN:
+ get_set(&be->tpe.v1.rpl_rcp[index].len, value, get);
+ break;
+ case HW_TPE_RPL_RCP_RPL_PTR:
+ get_set(&be->tpe.v1.rpl_rcp[index].rpl_ptr, value, get);
+ break;
+ case HW_TPE_RPL_RCP_EXT_PRIO:
+ get_set(&be->tpe.v1.rpl_rcp[index].ext_prio, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 1 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_rpl_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_EXT
+ */
+
+int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rpl_ext_categories;
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_ext_categories)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->tpe.nb_rpl_ext_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 1:
+ case 2:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->tpe.v1.rpl_ext[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_rpl_v2_ext_s));
+ break;
+ case HW_TPE_FIND:
+ rv = find_equal_index(be->tpe.v1.rpl_ext,
+ sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+ be->tpe.nb_rpl_ext_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_COMPARE:
+ rv = do_compare_indexes(be->tpe.v1.rpl_ext,
+ sizeof(struct tpe_v1_rpl_v2_ext_s), index, *value,
+ be->tpe.nb_rpl_ext_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_RPL_EXT_RPL_PTR:
+ get_set(&be->tpe.v1.rpl_ext[index].rpl_ptr, value, get);
+ break;
+ case HW_TPE_RPL_EXT_META_RPL_LEN:
+ get_set(&be->tpe.v1.rpl_ext[index].meta_rpl_len, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 1 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_rpl_ext_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_rpl_ext_mod(be, field, index, value, 1);
+}
+
+/*
+ * RPL_RPL
+ */
+
+int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rpl_depth;
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rpl_depth)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->tpe.nb_rpl_depth)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 1:
+ case 2:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->tpe.v1.rpl_rpl[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_rpl_v2_rpl_s));
+ break;
+ case HW_TPE_FIND:
+ rv = find_equal_index(be->tpe.v1.rpl_rpl,
+ sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+ be->tpe.nb_rpl_depth, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_COMPARE:
+ rv = do_compare_indexes(be->tpe.v1.rpl_rpl,
+ sizeof(struct tpe_v1_rpl_v2_rpl_s), index, *value,
+ be->tpe.nb_rpl_depth, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_RPL_RPL_VALUE:
+ if (get)
+ memcpy(value, be->tpe.v1.rpl_rpl[index].value,
+ sizeof(uint32_t) * 4);
+ else
+ memcpy(be->tpe.v1.rpl_rpl[index].value, value,
+ sizeof(uint32_t) * 4);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 1 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
+int hw_mod_tpe_rpl_rpl_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 1);
+}
+
+/*
+ * CPY_RCP
+ */
+
+int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ const uint32_t cpy_size =
+ be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+ if (count == ALL_ENTRIES)
+ count = cpy_size;
+ if ((unsigned int)(start_idx + count) > cpy_size)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ const uint32_t cpy_size =
+ be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+ int rv = 0;
+ if (index >= cpy_size)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 1:
+ case 2:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->tpe.v1.cpy_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_cpy_v1_rcp_s));
+ break;
+ case HW_TPE_FIND:
+ rv = find_equal_index(be->tpe.v1.cpy_rcp,
+ sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+ cpy_size, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_COMPARE:
+ rv = do_compare_indexes(be->tpe.v1.cpy_rcp,
+ sizeof(struct tpe_v1_cpy_v1_rcp_s), index, *value,
+ cpy_size, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_CPY_RCP_READER_SELECT:
+ get_set(&be->tpe.v1.cpy_rcp[index].reader_select, value, get);
+ break;
+ case HW_TPE_CPY_RCP_DYN:
+ get_set(&be->tpe.v1.cpy_rcp[index].dyn, value, get);
+ break;
+ case HW_TPE_CPY_RCP_OFS:
+ get_set(&be->tpe.v1.cpy_rcp[index].ofs, value, get);
+ break;
+ case HW_TPE_CPY_RCP_LEN:
+ get_set(&be->tpe.v1.cpy_rcp[index].len, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 1 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_cpy_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_cpy_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * HFU_RCP
+ */
+
+int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 1:
+ case 2:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->tpe.v1.hfu_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_hfu_v1_rcp_s));
+ break;
+ case HW_TPE_FIND:
+ rv = find_equal_index(be->tpe.v1.hfu_rcp,
+ sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_COMPARE:
+ rv = do_compare_indexes(be->tpe.v1.hfu_rcp,
+ sizeof(struct tpe_v1_hfu_v1_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_HFU_RCP_LEN_A_WR:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_a_wr, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_a_outer_l4_len,
+ value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_dyn, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_a_pos_ofs, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_dyn, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_a_add_ofs, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_a_sub_dyn, value, get);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_B_WR:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_b_wr, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_dyn, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_b_pos_ofs, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_dyn, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_b_add_ofs, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_b_sub_dyn, value, get);
+ break;
+
+ case HW_TPE_HFU_RCP_LEN_C_WR:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_c_wr, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_dyn, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_c_pos_ofs, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_dyn, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_c_add_ofs, value, get);
+ break;
+ case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].len_c_sub_dyn, value, get);
+ break;
+
+ case HW_TPE_HFU_RCP_TTL_WR:
+ get_set(&be->tpe.v1.hfu_rcp[index].ttl_wr, value, get);
+ break;
+ case HW_TPE_HFU_RCP_TTL_POS_DYN:
+ get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_dyn, value, get);
+ break;
+ case HW_TPE_HFU_RCP_TTL_POS_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].ttl_pos_ofs, value, get);
+ break;
+
+ case HW_TPE_HFU_RCP_CS_INF:
+ get_set(&be->tpe.v1.hfu_rcp[index].cs_inf, value, get);
+ break;
+ case HW_TPE_HFU_RCP_L3_PRT:
+ get_set(&be->tpe.v1.hfu_rcp[index].l3_prt, value, get);
+ break;
+ case HW_TPE_HFU_RCP_L3_FRAG:
+ get_set(&be->tpe.v1.hfu_rcp[index].l3_frag, value, get);
+ break;
+ case HW_TPE_HFU_RCP_TUNNEL:
+ get_set(&be->tpe.v1.hfu_rcp[index].tunnel, value, get);
+ break;
+ case HW_TPE_HFU_RCP_L4_PRT:
+ get_set(&be->tpe.v1.hfu_rcp[index].l4_prt, value, get);
+ break;
+ case HW_TPE_HFU_RCP_OUTER_L3_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].outer_l3_ofs, value, get);
+ break;
+ case HW_TPE_HFU_RCP_OUTER_L4_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].outer_l4_ofs, value, get);
+ break;
+ case HW_TPE_HFU_RCP_INNER_L3_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].inner_l3_ofs, value, get);
+ break;
+ case HW_TPE_HFU_RCP_INNER_L4_OFS:
+ get_set(&be->tpe.v1.hfu_rcp[index].inner_l4_ofs, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 1 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_hfu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_hfu_rcp_mod(be, field, index, value, 1);
+}
+
+/*
+ * CSU_RCP
+ */
+
+int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx,
+ int count)
+{
+ if (count == ALL_ENTRIES)
+ count = be->tpe.nb_rcp_categories;
+ if ((unsigned int)(start_idx + count) > be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx,
+ count);
+}
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be,
+ enum hw_tpe_e field, uint32_t index,
+ uint32_t *value, int get)
+{
+ int rv = 0;
+ if (index >= be->tpe.nb_rcp_categories)
+ return error_index_too_large(__func__);
+ switch (_VER_) {
+ case 1:
+ case 2:
+ switch (field) {
+ case HW_TPE_PRESET_ALL:
+ if (get)
+ return error_unsup_field(__func__);
+ memset(&be->tpe.v1.csu_rcp[index], (uint8_t)*value,
+ sizeof(struct tpe_v1_csu_v0_rcp_s));
+ break;
+ case HW_TPE_FIND:
+ rv = find_equal_index(be->tpe.v1.csu_rcp,
+ sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, value, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_COMPARE:
+ rv = do_compare_indexes(be->tpe.v1.csu_rcp,
+ sizeof(struct tpe_v1_csu_v0_rcp_s), index, *value,
+ be->tpe.nb_rcp_categories, get, __func__);
+ if (rv != 0)
+ return rv;
+ break;
+ case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+ get_set(&be->tpe.v1.csu_rcp[index].ol3_cmd, value, get);
+ break;
+ case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+ get_set(&be->tpe.v1.csu_rcp[index].ol4_cmd, value, get);
+ break;
+ case HW_TPE_CSU_RCP_INNER_L3_CMD:
+ get_set(&be->tpe.v1.csu_rcp[index].il3_cmd, value, get);
+ break;
+ case HW_TPE_CSU_RCP_INNER_L4_CMD:
+ get_set(&be->tpe.v1.csu_rcp[index].il4_cmd, value, get);
+ break;
+
+ default:
+ return error_unsup_field(__func__);
+ }
+ break;
+ /* end case 1 */
+ default:
+ return error_unsup_ver(__func__, _MOD_, _VER_);
+ }
+
+ return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t value)
+{
+ return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
+
+int hw_mod_tpe_csu_rcp_get(struct flow_api_backend_s *be, enum hw_tpe_e field,
+ int index, uint32_t *value)
+{
+ return hw_mod_tpe_csu_rcp_mod(be, field, index, value, 1);
+}
new file mode 100644
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V1_H_
+#define _HW_MOD_TPE_V1_H_
+
+struct tpe_v1_rpp_v0_rcp_s {
+ uint32_t exp;
+};
+
+struct tpe_v1_ins_v1_rcp_s {
+ uint32_t dyn;
+ uint32_t ofs;
+ uint32_t len;
+};
+
+struct tpe_v1_rpl_v2_rcp_s {
+ uint32_t dyn;
+ uint32_t ofs;
+ uint32_t len;
+ uint32_t rpl_ptr;
+ uint32_t ext_prio;
+};
+
+struct tpe_v1_rpl_v2_ext_s {
+ uint32_t rpl_ptr;
+ uint32_t meta_rpl_len; /* SW only */
+};
+
+struct tpe_v1_rpl_v2_rpl_s {
+ uint32_t value[4];
+};
+
+struct tpe_v1_cpy_v1_rcp_s {
+ uint32_t reader_select;
+ uint32_t dyn;
+ uint32_t ofs;
+ uint32_t len;
+};
+
+struct tpe_v1_hfu_v1_rcp_s {
+ uint32_t len_a_wr;
+ uint32_t len_a_outer_l4_len;
+ uint32_t len_a_pos_dyn;
+ uint32_t len_a_pos_ofs;
+ uint32_t len_a_add_dyn;
+ uint32_t len_a_add_ofs;
+ uint32_t len_a_sub_dyn;
+
+ uint32_t len_b_wr;
+ uint32_t len_b_pos_dyn;
+ uint32_t len_b_pos_ofs;
+ uint32_t len_b_add_dyn;
+ uint32_t len_b_add_ofs;
+ uint32_t len_b_sub_dyn;
+
+ uint32_t len_c_wr;
+ uint32_t len_c_pos_dyn;
+ uint32_t len_c_pos_ofs;
+ uint32_t len_c_add_dyn;
+ uint32_t len_c_add_ofs;
+ uint32_t len_c_sub_dyn;
+
+ uint32_t ttl_wr;
+ uint32_t ttl_pos_dyn;
+ uint32_t ttl_pos_ofs;
+
+ uint32_t cs_inf;
+ uint32_t l3_prt;
+ uint32_t l3_frag;
+ uint32_t tunnel;
+ uint32_t l4_prt;
+ uint32_t outer_l3_ofs;
+ uint32_t outer_l4_ofs;
+ uint32_t inner_l3_ofs;
+ uint32_t inner_l4_ofs;
+};
+
+struct tpe_v1_csu_v0_rcp_s {
+ uint32_t ol3_cmd;
+ uint32_t ol4_cmd;
+ uint32_t il3_cmd;
+ uint32_t il4_cmd;
+};
+
+struct hw_mod_tpe_v1_s {
+ struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+ struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+ struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+ struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+ struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+ struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+ struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+ struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V1_H_ */
new file mode 100644
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _HW_MOD_TPE_V2_H_
+#define _HW_MOD_TPE_V2_H_
+
+struct tpe_v2_rpp_v1_ifr_rcp_s {
+ uint32_t en;
+ uint32_t mtu;
+};
+
+struct tpe_v2_ifr_v1_rcp_s {
+ uint32_t en;
+ uint32_t mtu;
+};
+
+struct hw_mod_tpe_v2_s {
+ struct tpe_v1_rpp_v0_rcp_s *rpp_rcp;
+
+ struct tpe_v1_ins_v1_rcp_s *ins_rcp;
+
+ struct tpe_v1_rpl_v2_rcp_s *rpl_rcp;
+ struct tpe_v1_rpl_v2_ext_s *rpl_ext;
+ struct tpe_v1_rpl_v2_rpl_s *rpl_rpl;
+
+ struct tpe_v1_cpy_v1_rcp_s *cpy_rcp;
+
+ struct tpe_v1_hfu_v1_rcp_s *hfu_rcp;
+
+ struct tpe_v1_csu_v0_rcp_s *csu_rcp;
+
+ struct tpe_v2_rpp_v1_ifr_rcp_s *rpp_ifr_rcp;
+ struct tpe_v2_ifr_v1_rcp_s *ifr_rcp;
+};
+
+#endif /* _HW_MOD_TPE_V2_H_ */
new file mode 100644
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Napatech A/S
+ */
+
+#ifndef _STREAM_BINARY_FLOW_API_H_
+#define _STREAM_BINARY_FLOW_API_H_
+
+#include <stdint.h> /* uint16_t, uint32_t, uint64_t */
+#include <stdio.h> /* snprintf */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t be16_t; /* 16-bit big-endian */
+typedef uint32_t be32_t; /* 32-bit big-endian */
+typedef uint64_t be64_t; /* 64-bit big-endian */
+
+/*
+ * Flow frontend for binary programming interface
+ */
+
+#define FLOW_MAX_QUEUES 128
+
+#define RAW_ENCAP_DECAP_ELEMS_MAX 16
+
+/*
+ * Partial flow mark and special flow marks
+ */
+#define FLOW_MARK_LACP 0x7fffffff
+#define FLOW_MARK_MAX 0x7ffffffe
+/*
+ * Flow eth dev profile determines how the FPGA module resources are
+ * managed and what features are available
+ */
+enum flow_eth_dev_profile {
+ FLOW_ETH_DEV_PROFILE_VSWITCH = 0,
+ FLOW_ETH_DEV_PROFILE_INLINE = 1,
+};
+
+/*
+ * Flow rule attributes
+ */
+struct flow_attr {
+ uint32_t group; /* Priority group. */
+ uint32_t priority; /* Rule priority level within group. */
+ uint16_t forced_vlan_vid; /* Forced VLAN VID that filter must match. Ignored if 0. */
+ uint8_t caller_id; /* Unique ID of caller application. */
+};
+
+struct flow_queue_id_s {
+ int id;
+ int hw_id;
+};
+
+/* NT Private rte flow items. */
+
+/* NT Private rte flow actions. */
+
+enum flow_elem_type {
+ FLOW_ELEM_TYPE_END,
+ FLOW_ELEM_TYPE_ANY,
+ FLOW_ELEM_TYPE_ETH,
+ FLOW_ELEM_TYPE_VLAN,
+ FLOW_ELEM_TYPE_IPV4,
+ FLOW_ELEM_TYPE_IPV6,
+ FLOW_ELEM_TYPE_SCTP,
+ FLOW_ELEM_TYPE_TCP,
+ FLOW_ELEM_TYPE_UDP,
+ FLOW_ELEM_TYPE_ICMP,
+ FLOW_ELEM_TYPE_VXLAN,
+ FLOW_ELEM_TYPE_GTP,
+ FLOW_ELEM_TYPE_PORT_ID,
+ FLOW_ELEM_TYPE_TAG,
+ FLOW_ELEM_TYPE_VOID,
+
+ /*
+ * Not associated with a RTE_ITEM..., but rather an restoration API device specific
+ * extension
+ */
+ FLOW_ELEM_TYPE_TUNNEL
+};
+
+enum flow_action_type { /* conf structure */
+ FLOW_ACTION_TYPE_END, /* -none- : End tag for action list */
+ FLOW_ACTION_TYPE_POP_VLAN, /* -none- : Pops outer vlan tag */
+ FLOW_ACTION_TYPE_PUSH_VLAN, /* struct flow_action_push_vlan : Push VLAN TAG */
+ FLOW_ACTION_TYPE_SET_VLAN_VID, /* struct flow_action_set_vlan_vid : Set VLAN VID */
+ FLOW_ACTION_TYPE_SET_VLAN_PCP, /* struct flow_action_set_vlan_pcp : Set VLAN PCP */
+ /* -none- : Decapsulate outer most VXLAN tunnel from matched flow */
+ FLOW_ACTION_TYPE_VXLAN_DECAP,
+ FLOW_ACTION_TYPE_VXLAN_ENCAP, /* struct flow_action_vxlan_encap */
+ FLOW_ACTION_TYPE_DROP, /* -none- : Drop packets of this flow */
+ FLOW_ACTION_TYPE_COUNT, /* struct flow_action_count : Used for "query" flow function */
+ FLOW_ACTION_TYPE_MARK, /* struct flow_action_mark : Used to tag a flow in HW with a MARK */
+ FLOW_ACTION_TYPE_SET_TAG, /* struct flow_action_tag : Used to tag a flow in HW with a TAG */
+ /* struct flow_action_port_id : Destination port ID - HW port ID */
+ FLOW_ACTION_TYPE_PORT_ID,
+ FLOW_ACTION_TYPE_RSS, /* struct flow_action_rss : */
+ FLOW_ACTION_TYPE_QUEUE, /* struct flow_action_queue : */
+ FLOW_ACTION_TYPE_JUMP, /* struct flow_action_jump : */
+ /* struct flow_action_meter : Used to set MBR record ids in FLM learn records */
+ FLOW_ACTION_TYPE_METER,
+ FLOW_ACTION_TYPE_RAW_ENCAP, /* struct flow_action_raw_encap : */
+ FLOW_ACTION_TYPE_RAW_DECAP, /* struct flow_action_raw_decap : */
+ FLOW_ACTION_TYPE_MODIFY_FIELD, /* struct flow_action_modify_field : */
+
+ /*
+ * -none- : not associated with a RTE_ACTION..., but rather an restoration API device
+ * specific extension
+ */
+ FLOW_ACTION_TYPE_TUNNEL_SET
+};
+
+#pragma pack(1)
+struct ether_addr_s {
+ uint8_t addr_b[6];
+};
+
+#pragma pack()
+
+static inline void flow_ether_format_addr(char *buf, uint16_t size,
+ const struct ether_addr_s *eth_addr)
+{
+ snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+ eth_addr->addr_b[0], eth_addr->addr_b[1], eth_addr->addr_b[2],
+ eth_addr->addr_b[3], eth_addr->addr_b[4], eth_addr->addr_b[5]);
+}
+
+/*
+ * IPv4 Header
+ */
+#pragma pack(1)
+struct ipv4_hdr_s {
+ uint8_t version_ihl;
+ uint8_t tos;
+ be16_t length;
+ be16_t id;
+ be16_t frag_offset;
+ uint8_t ttl;
+ uint8_t next_proto_id;
+ be16_t hdr_csum;
+ be32_t src_ip;
+ be32_t dst_ip;
+};
+
+#pragma pack()
+/*
+ * IPv6 Header
+ */
+#pragma pack(1)
+struct ipv6_hdr_s {
+ be32_t vtc_flow; /* IP version, traffic class & flow label */
+ be16_t payload_len; /* IP packet length - includes ip header */
+ uint8_t proto;
+ uint8_t hop_limits;
+ uint8_t src_addr[16];
+ uint8_t dst_addr[16];
+};
+
+#pragma pack()
+
+/*
+ * SCTP Header
+ */
+#pragma pack(1)
+struct sctp_hdr_s {
+ be16_t src_port;
+ be16_t dst_port;
+ be32_t tag; /* Validation tag */
+ be32_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * TCP Header
+ */
+#pragma pack(1)
+struct tcp_hdr_s {
+ be16_t src_port;
+ be16_t dst_port;
+ be32_t sent_seq;
+ be32_t recv_ack;
+ uint8_t data_off;
+ uint8_t tcp_flags;
+ be16_t rx_win;
+ be16_t cksum;
+ be16_t tcp_urp;
+};
+
+#pragma pack()
+
+/*
+ * UDP Header
+ */
+#pragma pack(1)
+struct udp_hdr_s {
+ be16_t src_port;
+ be16_t dst_port;
+ be16_t len;
+ be16_t cksum;
+};
+
+#pragma pack()
+
+/*
+ * ICMP Header
+ */
+#pragma pack(1)
+struct icmp_hdr_s {
+ uint8_t type;
+ uint8_t code;
+ be16_t cksum;
+ be16_t ident;
+ be16_t seq_nb;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_ETH specification
+ */
+#pragma pack(1)
+struct flow_elem_eth {
+ struct ether_addr_s d_addr; /* DMAC */
+ struct ether_addr_s s_addr; /* SMAC */
+ be16_t ether_type; /* Frame type */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VLAN specification
+ */
+#pragma pack(1)
+struct flow_elem_vlan {
+ be16_t tci; /* Tag control information */
+ be16_t inner_type; /* Inner EtherType or TPID */
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_IPV4 specification
+ */
+struct flow_elem_ipv4 {
+ struct ipv4_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_IPV6 specification
+ */
+struct flow_elem_ipv6 {
+ struct ipv6_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_SCTP specification
+ */
+struct flow_elem_sctp {
+ struct sctp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_TCP specification
+ */
+struct flow_elem_tcp {
+ struct tcp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_UDP specification
+ */
+struct flow_elem_udp {
+ struct udp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ICMP specification
+ */
+struct flow_elem_icmp {
+ struct icmp_hdr_s hdr;
+};
+
+/*
+ * FLOW_ELEM_TYPE_GTP specification
+ */
+#pragma pack(1)
+struct flow_elem_gtp {
+ uint8_t v_pt_rsv_flags;
+ uint8_t msg_type;
+ be16_t msg_len;
+ be32_t teid;
+};
+
+#pragma pack()
+
+/*
+ * FLOW_ELEM_TYPE_VXLAN specification (RFC 7348)
+ */
+#pragma pack(1)
+struct flow_elem_vxlan {
+ uint8_t flags; /* Normally 0x08 (I flag) */
+ uint8_t rsvd0[3];
+ uint8_t vni[3];
+ uint8_t rsvd1;
+};
+
+#pragma pack()
+/*
+ * FLOW_ELEM_TYPE_PORT_ID specification
+ */
+struct flow_elem_port_id {
+ uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ELEM_TYPE_TAG specification
+ */
+struct flow_elem_tag {
+ uint32_t data;
+ uint8_t index;
+};
+
+/*
+ * FLOW_ELEM_TYPE_ANY specification
+ */
+struct flow_elem_any {
+ uint32_t num; /**< Number of layers covered. */
+};
+
+struct flow_elem {
+ enum flow_elem_type type; /* element type */
+ const void *spec; /* Pointer to element specification structure */
+ const void *mask; /* Bitmask applied to spec - same type */
+};
+
+/*
+ * FLOW_ACTION_TYPE_RSS
+ */
+enum flow_hash_function {
+ FLOW_HASH_FUNCTION_DEFAULT = 0,
+ FLOW_HASH_FUNCTION_TOEPLITZ, /* Unsupported in current supported FPGA */
+ FLOW_HASH_FUNCTION_SIMPLE_XOR /* Simple XOR - not supported */
+};
+
+struct flow_action_rss {
+ enum flow_hash_function func;
+ uint32_t level; /* only level 0 supported */
+ /* Specific RSS hash types (see like DPDK ETH_RSS_*) */
+ uint64_t types;
+ uint32_t key_len; /* Not supported yet - Hash key length in bytes */
+ uint32_t queue_num; /* Number of entries in queue */
+ const uint8_t *key; /* Not supported yet - Hash key */
+ const uint16_t *queue; /* Queue indices to use */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PUSH_VLAN
+ * Push a new vlan TAG
+ */
+struct flow_action_push_vlan {
+ be16_t ethertype;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_VID
+ */
+struct flow_action_set_vlan_vid {
+ be16_t vlan_vid;
+};
+
+/*
+ * FLOW_ACTION_TYPE_SET_VLAN_PCP
+ */
+struct flow_action_set_vlan_pcp {
+ uint8_t vlan_pcp; /* < VLAN priority. */
+};
+
+/*
+ * FLOW_ACTION_TYPE_VXLAN_ENCAP specification
+ * Valid flow definition:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct flow_action_vxlan_encap {
+ /* Encapsulating vxlan tunnel definition */
+ struct flow_elem *vxlan_tunnel;
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification
+ */
+struct flow_action_count {
+ uint32_t id; /* HW port no */
+};
+
+/*
+ * FLOW_ACTION_TYPE_COUNT specification (query)
+ */
+struct flow_query_count {
+ uint32_t reset : 1;
+ uint32_t hits_set : 1;
+ uint32_t bytes_set : 1;
+
+ uint32_t tcp_flags : 9;
+
+ uint32_t reserved : 20;
+ uint64_t hits;
+ uint64_t bytes;
+};
+
+/*
+ * FLOW_ACTION_TYPE_MARK specification
+ */
+struct flow_action_mark {
+ uint32_t id; /* mark flow with this ID */
+};
+
+/*
+ * FLOW_ACTION_TYPE_TAG specification
+ */
+struct flow_action_tag {
+ uint32_t data; /* tag flow with this value */
+ uint32_t mask; /* bit-mask applied to "data" */
+ uint8_t index; /* index of tag to set */
+};
+
+/*
+ * FLOW_ACTION_TYPE_PORT_ID specification
+ */
+struct flow_action_port_id {
+ uint32_t rte_flags; /* not used but to be binary compatible with rte flow */
+ uint32_t id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_QUEUE
+ */
+struct flow_action_queue {
+ uint16_t index;
+};
+
+/*
+ * FLOW_ACTION_TYPE_JUMP
+ */
+struct flow_action_jump {
+ uint32_t group;
+};
+
+/*
+ * FLOW_ACTION_TYPE_METER
+ */
+struct flow_action_meter {
+ uint32_t mtr_id;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_ENCAP
+ */
+struct flow_action_raw_encap {
+ uint8_t *data;
+ uint8_t *preserve;
+ size_t size;
+ struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+ int item_count;
+};
+
+/*
+ * FLOW_ACTION_TYPE_RAW_DECAP
+ */
+struct flow_action_raw_decap {
+ uint8_t *data;
+ size_t size;
+ struct flow_elem items[RAW_ENCAP_DECAP_ELEMS_MAX];
+ int item_count;
+};
+
+/*
+ * Field IDs for MODIFY_FIELD action.
+ */
+enum flow_field_id {
+ FLOW_FIELD_START = 0, /* Start of a packet. */
+ FLOW_FIELD_MAC_DST, /* Destination MAC Address. */
+ FLOW_FIELD_MAC_SRC, /* Source MAC Address. */
+ FLOW_FIELD_VLAN_TYPE, /* 802.1Q Tag Identifier. */
+ FLOW_FIELD_VLAN_ID, /* 802.1Q VLAN Identifier. */
+ FLOW_FIELD_MAC_TYPE, /* EtherType. */
+ FLOW_FIELD_IPV4_DSCP, /* IPv4 DSCP. */
+ FLOW_FIELD_IPV4_TTL, /* IPv4 Time To Live. */
+ FLOW_FIELD_IPV4_SRC, /* IPv4 Source Address. */
+ FLOW_FIELD_IPV4_DST, /* IPv4 Destination Address. */
+ FLOW_FIELD_IPV6_DSCP, /* IPv6 DSCP. */
+ FLOW_FIELD_IPV6_HOPLIMIT, /* IPv6 Hop Limit. */
+ FLOW_FIELD_IPV6_SRC, /* IPv6 Source Address. */
+ FLOW_FIELD_IPV6_DST, /* IPv6 Destination Address. */
+ FLOW_FIELD_TCP_PORT_SRC, /* TCP Source Port Number. */
+ FLOW_FIELD_TCP_PORT_DST, /* TCP Destination Port Number. */
+ FLOW_FIELD_TCP_SEQ_NUM, /* TCP Sequence Number. */
+ FLOW_FIELD_TCP_ACK_NUM, /* TCP Acknowledgment Number. */
+ FLOW_FIELD_TCP_FLAGS, /* TCP Flags. */
+ FLOW_FIELD_UDP_PORT_SRC, /* UDP Source Port Number. */
+ FLOW_FIELD_UDP_PORT_DST, /* UDP Destination Port Number. */
+ FLOW_FIELD_VXLAN_VNI, /* VXLAN Network Identifier. */
+ FLOW_FIELD_GENEVE_VNI, /* GENEVE Network Identifier. */
+ FLOW_FIELD_GTP_TEID, /* GTP Tunnel Endpoint Identifier. */
+ FLOW_FIELD_TAG, /* Tag value. */
+ FLOW_FIELD_MARK, /* Mark value. */
+ FLOW_FIELD_META, /* Metadata value. */
+ FLOW_FIELD_POINTER, /* Memory pointer. */
+ FLOW_FIELD_VALUE, /* Immediate value. */
+ FLOW_FIELD_IPV4_ECN, /* IPv4 ECN. */
+ FLOW_FIELD_IPV6_ECN, /* IPv6 ECN. */
+ FLOW_FIELD_GTP_PSC_QFI, /* GTP QFI. */
+ FLOW_FIELD_METER_COLOR, /* Meter color marker. */
+};
+
+/*
+ * Field description for MODIFY_FIELD action.
+ */
+struct flow_action_modify_data {
+ enum flow_field_id field; /* Field or memory type ID. */
+ union {
+ struct {
+ /* Encapsulation level or tag index. */
+ uint32_t level;
+ /* Number of bits to skip from a field. */
+ uint32_t offset;
+ };
+ /*
+ * Immediate value for FLOW_FIELD_VALUE, presented in the
+ * same byte order and length as in relevant rte_flow_item_xxx.
+ */
+ uint8_t value[16];
+ /*
+ * Memory address for FLOW_FIELD_POINTER, memory layout
+ * should be the same as for relevant field in the
+ * rte_flow_item_xxx structure.
+ */
+ void *pvalue;
+ };
+};
+
+/*
+ * Operation types for MODIFY_FIELD action.
+ */
+enum flow_modify_op {
+ FLOW_MODIFY_SET = 0,
+ FLOW_MODIFY_ADD,
+ FLOW_MODIFY_SUB,
+};
+
+/*
+ * FLOW_ACTION_TYPE_MODIFY_FIELD
+ */
+struct flow_action_modify_field {
+ enum flow_modify_op operation;
+ struct flow_action_modify_data dst;
+ struct flow_action_modify_data src;
+ uint32_t width;
+};
+
+struct flow_action {
+ enum flow_action_type type;
+ const void *conf;
+};
+
+enum flow_error_e { FLOW_ERROR_NONE, FLOW_ERROR_SUCCESS, FLOW_ERROR_GENERAL };
+
+struct flow_error {
+ enum flow_error_e type;
+ const char *message;
+};
+
+enum flow_lag_cmd {
+ FLOW_LAG_SET_ENTRY,
+ FLOW_LAG_SET_ALL,
+ FLOW_LAG_SET_BALANCE,
+};
+
+/*
+ * Tunnel definition for DPDK RTE tunnel helper function support
+ */
+struct tunnel_cfg_s {
+ union {
+ struct {
+ uint32_t src_ip; /* BE */
+ uint32_t dst_ip; /* BE */
+ } v4;
+ struct {
+ uint8_t src_ip[16];
+ uint8_t dst_ip[16];
+ } v6;
+ struct {
+ uint64_t src_ip[2];
+ uint64_t dst_ip[2];
+ } v6_long;
+ };
+ int ipversion;
+ uint16_t s_port; /* BE */
+ uint16_t d_port; /* BE */
+ int tun_type;
+};
+
+struct flow_eth_dev; /* port device */
+struct flow_handle;
+
+/*
+ * Device Management API
+ */
+int flow_reset_nic_dev(uint8_t adapter_no);
+
+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t hw_port_no,
+ uint32_t port_id, int alloc_queues,
+ struct flow_queue_id_s queue_ids[],
+ int *rss_target_id,
+ enum flow_eth_dev_profile flow_profile,
+ uint32_t exception_path);
+
+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,
+ struct flow_queue_id_s *queue_id);
+
+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev);
+
+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,
+ uint8_t vport);
+
+/*
+ * NT Flow API
+ */
+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],
+ const struct flow_action action[], struct flow_error *error);
+
+struct flow_handle *flow_create(struct flow_eth_dev *dev,
+ const struct flow_attr *attr,
+ const struct flow_elem item[],
+ const struct flow_action action[],
+ struct flow_error *error);
+
+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,
+ struct flow_error *error);
+
+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error);
+
+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,
+ const struct flow_action *action, void **data, uint32_t *length,
+ struct flow_error *error);
+
+/*
+ * NT Flow FLM Meter API
+ */
+int flow_mtr_supported(struct flow_eth_dev *dev);
+
+uint64_t flow_mtr_meter_policy_n_max(void);
+
+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,
+ uint64_t bucket_rate_a, uint64_t bucket_size_a,
+ uint64_t bucket_rate_b, uint64_t bucket_size_b);
+
+int flow_mtr_set_policy(struct flow_eth_dev *dev, uint32_t policy_id, int drop);
+
+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,
+ uint32_t profile_id, uint32_t policy_id,
+ uint64_t stats_mask);
+
+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id);
+
+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,
+ uint32_t adjust_value);
+
+uint32_t flow_mtr_meters_supported(void);
+
+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev);
+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,
+ uint64_t *stats_mask, uint64_t *green_pkt,
+ uint64_t *green_bytes, int clear);
+
+/*
+ * Config API
+ */
+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);
+
+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,
+ uint32_t value);
+
+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STREAM_BINARY_FLOW_API_H_ */