@@ -991,6 +991,7 @@ struct bnxt {
uint8_t ecpri_port_cnt;
uint8_t l2_etype_tunnel_cnt;
uint16_t vxlan_port;
+ uint16_t vxlan_ip_port;
uint16_t geneve_port;
uint16_t ecpri_port;
uint16_t vxlan_fw_dst_port_id;
@@ -1000,6 +1001,7 @@ struct bnxt {
uint16_t l2_etype_tunnel_id;
uint16_t ecpri_upar_in_use;
uint8_t l2_etype_upar_in_use;
+ uint8_t vxlan_ip_upar_in_use;
uint32_t fw_ver;
uint32_t hwrm_spec_code;
@@ -4847,6 +4847,12 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
bp->l2_etype_tunnel_id = port;
bp->l2_etype_upar_in_use = resp->upar_in_use;
break;
+ case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4:
+ bp->vxlan_ip_upar_in_use = resp->upar_in_use;
+ bp->vxlan_ip_port = port;
+ PMD_DRV_LOG_LINE(DEBUG, "vxlan_ip_upar_in_use %x port %x",
+ bp->vxlan_ip_upar_in_use, bp->vxlan_ip_port);
+ break;
default:
break;
}
@@ -558,6 +558,9 @@ bnxt_pmd_global_tunnel_set(uint16_t port_id, uint8_t type,
case BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN_GPE:
hwtype = HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE;
break;
+ case BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN_GPE_V6:
+ hwtype = HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6;
+ break;
default:
BNXT_TF_DBG(ERR, "Tunnel Type (%d) invalid\n", type);
return -EINVAL;
@@ -625,13 +628,41 @@ bnxt_pmd_global_tunnel_set(uint16_t port_id, uint8_t type,
* If BNXT_ULP_T_HA_SUPPORT is set to zero explicitly then
* hotupgrade is disabled.
*/
-int32_t bnxt_pmd_get_hot_upgrade_env(void)
+static bool bnxt_pmd_get_hot_upgrade_env(void)
{
char *env;
- int32_t hot_up = 1;
+ bool hot_up = 1;
env = getenv(BNXT_ULP_HOT_UP_DYNAMIC_ENV_VAR);
if (env && strcmp(env, "0") == 0)
hot_up = 0;
return hot_up;
}
+
+static bool hot_up_api;
+static bool hot_up_configured_by_api;
+/* There are two ways to configure hot upgrade.
+ * By either calling this bnxt_pmd_configure_hot_upgrade API or
+ * setting the BNXT_ULP_T_HA_SUPPORT environment variable.
+ * bnxt_pmd_configure_hot_upgrade takes precedence over the
+ * environment variable way. Once the setting is done through
+ * bnxt_pmd_configure_hot_upgrade, can't switch back to env
+ * variable.
+ *
+ * bnxt_pmd_configure_hot_upgrade must be called before
+ * dev_start eth_dev_ops is called for the configuration to
+ * take effect.
+ */
+void bnxt_pmd_configure_hot_upgrade(bool enable)
+{
+ hot_up_configured_by_api = true;
+ hot_up_api = enable;
+}
+
+bool bnxt_pmd_get_hot_up_config(void)
+{
+ if (hot_up_configured_by_api)
+ return hot_up_api;
+
+ return bnxt_pmd_get_hot_upgrade_env();
+}
@@ -21,6 +21,7 @@ enum bnxt_global_register_tunnel_type {
BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN,
BNXT_GLOBAL_REGISTER_TUNNEL_ECPRI,
BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN_GPE,
+ BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN_GPE_V6,
BNXT_GLOBAL_REGISTER_TUNNEL_MAX
};
@@ -59,5 +60,5 @@ int32_t
bnxt_tunnel_upar_id_get(struct bnxt *bp,
uint8_t type,
uint8_t *upar_id);
-int32_t bnxt_pmd_get_hot_upgrade_env(void);
+bool bnxt_pmd_get_hot_up_config(void);
#endif /* _BNXT_TF_PMD_ABSTRACT_H_ */
@@ -48,6 +48,14 @@ static int32_t bnxt_ulp_cntxt_list_init(void);
static int32_t bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx);
static void bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx);
+bool
+ulp_is_default_session_active(struct bnxt_ulp_context *ulp_ctx)
+{
+ if (ulp_ctx == NULL || ulp_ctx->g_tfp[0] == NULL)
+ return false;
+
+ return true;
+}
/*
* Allow the deletion of context only for the bnxt device that
* created the session.
@@ -294,7 +302,7 @@ bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
{
struct bnxt_ulp_resource_resv_info *unnamed;
struct bnxt_ulp_glb_resource_info *named;
- uint32_t unum, nnum;
+ uint32_t unum = 0, nnum = 0;
int32_t rc;
if (ulp_ctx == NULL || res == NULL) {
@@ -315,24 +323,21 @@ bnxt_ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx,
*/
/* Get the baseline counts */
unnamed = bnxt_ulp_app_resource_resv_list_get(&unum);
- if (unnamed == NULL) {
- BNXT_TF_DBG(ERR, "Unable to get shared resource resv list.\n");
- return -EINVAL;
- }
- rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, stype,
- res);
- if (rc) {
- BNXT_TF_DBG(ERR,
- "Unable to calc resources for shared session.\n");
- return -EINVAL;
+ if (unum) {
+ rc = bnxt_ulp_unnamed_resources_calc(ulp_ctx, unnamed, unum, stype,
+ res);
+ if (rc) {
+ BNXT_TF_DBG(ERR,
+ "Unable to calc resources for shared session.\n");
+ return -EINVAL;
+ }
}
/* Get the named list and add the totals */
named = bnxt_ulp_app_glb_resource_info_list_get(&nnum);
- if (named == NULL) {
- BNXT_TF_DBG(ERR, "Unable to get app global resource list\n");
- return -EINVAL;
- }
+ if (!nnum)
+ return 0;
+
rc = bnxt_ulp_named_resources_calc(ulp_ctx, named, nnum, stype, res);
if (rc)
BNXT_TF_DBG(ERR, "Unable to calc named resources\n");
@@ -442,7 +447,7 @@ bnxt_ulp_cntxt_app_caps_init(struct bnxt *bp,
}
if (info[i].flags & BNXT_ULP_APP_CAP_HA_DYNAMIC) {
/* Read the environment variable to determine hot up */
- if (!bnxt_pmd_get_hot_upgrade_env()) {
+ if (!bnxt_pmd_get_hot_up_config()) {
ulp_ctx->cfg_data->ulp_flags |=
BNXT_ULP_APP_HA_DYNAMIC;
/* reset Hot upgrade, dynamically disabled */
@@ -458,9 +463,14 @@ bnxt_ulp_cntxt_app_caps_init(struct bnxt *bp,
ulp_ctx->cfg_data->ulp_flags |=
BNXT_ULP_APP_L2_ETYPE;
+ if (info[i].flags & BNXT_ULP_APP_CAP_CUST_VXLAN)
+ ulp_ctx->cfg_data->ulp_flags |=
+ BNXT_ULP_CUST_VXLAN_SUPPORT;
+
bnxt_ulp_vxlan_ip_port_set(ulp_ctx, info[i].vxlan_ip_port);
bnxt_ulp_vxlan_port_set(ulp_ctx, info[i].vxlan_port);
bnxt_ulp_ecpri_udp_port_set(ulp_ctx, info[i].ecpri_udp_port);
+ bnxt_ulp_vxlan_gpe_next_proto_set(ulp_ctx, info[i].tunnel_next_proto);
/* set the shared session support from firmware */
fw = info[i].upgrade_fw_update;
@@ -531,6 +541,29 @@ bnxt_ulp_vxlan_ip_port_get(struct bnxt_ulp_context *ulp_ctx)
return (unsigned int)ulp_ctx->cfg_data->vxlan_ip_port;
}
+/* Function to set the number for vxlan_gpe next_proto into the context */
+uint32_t
+bnxt_ulp_vxlan_gpe_next_proto_set(struct bnxt_ulp_context *ulp_ctx,
+ uint8_t tunnel_next_proto)
+{
+ if (!ulp_ctx || !ulp_ctx->cfg_data)
+ return -EINVAL;
+
+ ulp_ctx->cfg_data->tunnel_next_proto = tunnel_next_proto;
+
+ return 0;
+}
+
+/* Function to retrieve the vxlan_gpe next_proto from the context. */
+uint8_t
+bnxt_ulp_vxlan_gpe_next_proto_get(struct bnxt_ulp_context *ulp_ctx)
+{
+ if (!ulp_ctx || !ulp_ctx->cfg_data)
+ return 0;
+
+ return ulp_ctx->cfg_data->tunnel_next_proto;
+}
+
/* Function to set the number for vxlan port into the context */
int
bnxt_ulp_vxlan_port_set(struct bnxt_ulp_context *ulp_ctx,
@@ -1602,6 +1635,28 @@ bnxt_ulp_destroy_vfr_default_rules(struct bnxt *bp, bool global)
}
}
+static void
+ulp_cust_vxlan_free(struct bnxt *bp)
+{
+ int rc;
+
+ if (ULP_APP_CUST_VXLAN_SUPPORT(bp->ulp_ctx)) {
+ rc = bnxt_tunnel_dst_port_free(bp,
+ bp->ulp_ctx->cfg_data->vxlan_port,
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to clear global vxlan port\n");
+ }
+
+ if (ULP_APP_CUST_VXLAN_IP_SUPPORT(bp->ulp_ctx)) {
+ rc = bnxt_tunnel_dst_port_free(bp,
+ bp->ulp_ctx->cfg_data->vxlan_ip_port,
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4);
+ if (rc)
+ BNXT_TF_DBG(ERR, "Failed to clear global custom vxlan port\n");
+ }
+}
+
/*
* When a port is deinit'ed by dpdk. This function is called
* and this function clears the ULP context and rest of the
@@ -1624,6 +1679,9 @@ bnxt_ulp_deinit(struct bnxt *bp,
BNXT_TF_DBG(ERR, "Failed to close HA (%d)\n", rc);
}
+ /* Free tunnel configuration */
+ ulp_cust_vxlan_free(bp);
+
/* clean up default flows */
bnxt_ulp_destroy_df_rules(bp, true);
@@ -2008,28 +2066,6 @@ ulp_l2_etype_tunnel_free(struct bnxt *bp)
bp->l2_etype_tunnel_cnt--;
}
-static void
-ulp_cust_vxlan_free(struct bnxt *bp)
-{
- int rc;
-
- if (ULP_APP_CUST_VXLAN_SUPPORT(bp->ulp_ctx)) {
- rc = bnxt_tunnel_dst_port_free(bp,
- bp->ulp_ctx->cfg_data->vxlan_port,
- HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN);
- if (rc)
- BNXT_TF_DBG(ERR, "Failed to clear global vxlan port\n");
- }
-
- if (ULP_APP_CUST_VXLAN_IP_SUPPORT(bp->ulp_ctx)) {
- rc = bnxt_tunnel_dst_port_free(bp,
- bp->ulp_ctx->cfg_data->vxlan_ip_port,
- HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4);
- if (rc)
- BNXT_TF_DBG(ERR, "Failed to clear global custom vxlan port\n");
- }
-}
-
/*
* When a port is de-initialized by dpdk. This functions clears up
* the port specific details.
@@ -2081,16 +2117,17 @@ bnxt_ulp_port_deinit(struct bnxt *bp)
/* Check the reference count to deinit or deattach*/
if (bp->ulp_ctx->cfg_data && bp->ulp_ctx->cfg_data->ref_cnt) {
bp->ulp_ctx->cfg_data->ref_cnt--;
+ /* Free tunnels for each port */
+ ulp_l2_etype_tunnel_free(bp);
if (bp->ulp_ctx->cfg_data->ref_cnt) {
- /* Free tunnel configurations */
- ulp_cust_vxlan_free(bp);
- ulp_l2_etype_tunnel_free(bp);
-
/* free the port details */
/* Free the default flow rule associated to this port */
bnxt_ulp_destroy_df_rules(bp, false);
bnxt_ulp_destroy_vfr_default_rules(bp, false);
+ /* Free the ulp context in the context entry list */
+ bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
+
/* free flows associated with this port */
bnxt_ulp_flush_port_flows(bp);
@@ -2105,9 +2142,6 @@ bnxt_ulp_port_deinit(struct bnxt *bp)
}
}
- /* Free the ulp context in the context entry list */
- bnxt_ulp_cntxt_list_del(bp->ulp_ctx);
-
/* clean up the session */
ulp_session_deinit(session);
@@ -58,11 +58,15 @@
#define ULP_APP_HA_IS_DYNAMIC(ctx) ((ctx)->cfg_data->ulp_flags &\
BNXT_ULP_APP_HA_DYNAMIC)
-#define ULP_APP_CUST_VXLAN_SUPPORT(ctx) ((ctx)->cfg_data->vxlan_port != 0)
+#define ULP_APP_CUST_VXLAN_EN(ctx) ((ctx)->cfg_data->ulp_flags &\
+ BNXT_ULP_CUST_VXLAN_SUPPORT)
#define ULP_APP_VXLAN_GPE_SUPPORT(ctx) ((ctx)->cfg_data->vxlan_gpe_port != 0)
-#define ULP_APP_CUST_VXLAN_IP_SUPPORT(ctx) ((ctx)->cfg_data->vxlan_ip_port != 0)
#define ULP_APP_L2_ETYPE_SUPPORT(ctx) ((ctx)->cfg_data->ulp_flags &\
BNXT_ULP_APP_L2_ETYPE)
+#define ULP_APP_CUST_VXLAN_SUPPORT(ctx) \
+ ((ctx) && (ctx)->cfg_data && (ctx)->cfg_data->vxlan_port != 0)
+#define ULP_APP_CUST_VXLAN_IP_SUPPORT(ctx)\
+ ((ctx) && (ctx)->cfg_data && (ctx)->cfg_data->vxlan_ip_port != 0)
enum bnxt_ulp_flow_mem_type {
BNXT_ULP_FLOW_MEM_TYPE_INT = 0,
@@ -120,10 +124,11 @@ struct bnxt_ulp_data {
uint32_t vxlan_gpe_port;
uint32_t vxlan_ip_port;
uint32_t ecpri_udp_port;
+ uint32_t hu_session_type;
uint8_t hu_reg_state;
uint8_t hu_reg_cnt;
- uint32_t hu_session_type;
uint8_t ha_pool_id;
+ uint8_t tunnel_next_proto;
enum bnxt_ulp_session_type def_session_type;
};
@@ -166,6 +171,9 @@ struct ulp_context_list_entry {
struct bnxt_ulp_context *ulp_ctx;
};
+bool
+ulp_is_default_session_active(struct bnxt_ulp_context *ulp_ctx);
+
/*
* Allow the deletion of context only for the bnxt device that
* created the session
@@ -381,15 +389,24 @@ bnxt_ulp_default_app_priority_get(struct bnxt_ulp_context *ulp_ctx);
int
bnxt_ulp_vxlan_ip_port_set(struct bnxt_ulp_context *ulp_ctx,
uint32_t vxlan_ip_port);
+
unsigned int
bnxt_ulp_vxlan_ip_port_get(struct bnxt_ulp_context *ulp_ctx);
int
bnxt_ulp_ecpri_udp_port_set(struct bnxt_ulp_context *ulp_ctx,
uint32_t ecpri_udp_port);
+
unsigned int
bnxt_ulp_ecpri_udp_port_get(struct bnxt_ulp_context *ulp_ctx);
+uint32_t
+bnxt_ulp_vxlan_gpe_next_proto_set(struct bnxt_ulp_context *ulp_ctx,
+ uint8_t tunnel_next_proto);
+
+uint8_t
+bnxt_ulp_vxlan_gpe_next_proto_get(struct bnxt_ulp_context *ulp_ctx);
+
int32_t
bnxt_flow_meter_init(struct bnxt *bp);
@@ -183,6 +183,7 @@ ulp_df_dev_port_handler(struct bnxt_ulp_context *ulp_ctx,
struct bnxt_ulp_mapper_create_parms *mapper_params)
{
uint16_t port_id;
+ uint16_t parif;
uint32_t ifindex;
int rc;
@@ -243,6 +244,18 @@ ulp_df_dev_port_handler(struct bnxt_ulp_context *ulp_ctx,
if (rc)
return rc;
+ /* Note:
+ * We save the drv_func_parif into CF_IDX of phy_port_parif,
+ * since that index is currently referenced by ingress templates
+ * for datapath flows. If in the future we change the parser to
+ * save it in the CF_IDX of drv_func_parif we also need to update
+ * the template.
+ * WARNING: Two VFs on same parent PF will not work, as the parif is
+ * based on fw fid of the parent PF.
+ */
+ parif = ULP_COMP_FLD_IDX_RD(mapper_params, BNXT_ULP_CF_IDX_DRV_FUNC_PARIF);
+ ULP_COMP_FLD_IDX_WR(mapper_params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, parif);
+
/* Set VF Func PARIF */
rc = ulp_set_parif_in_comp_fld(ulp_ctx, ifindex, BNXT_ULP_VF_FUNC_PARIF,
mapper_params);
@@ -1179,7 +1179,7 @@ ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx,
BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE,
sub_typ, ¶ms);
if (rc) {
- BNXT_TF_DBG(INFO, "CFA Action ptr not found for flow id %u\n",
+ BNXT_TF_DBG(DEBUG, "CFA Action ptr not found for flow id %u\n",
flow_id);
return -ENOENT;
}
@@ -56,6 +56,12 @@ ulp_mapper_glb_resource_info_list_get(uint32_t *num_entries)
return ulp_glb_resource_tbl;
}
+uint32_t bnxt_ulp_glb_app_id_sig_get(uint8_t app_id)
+{
+ if (app_id >= BNXT_ULP_GLB_SIG_TBL_SIZE)
+ return 0;
+ return ulp_glb_app_sig_tbl[app_id];
+}
/*
* Read the global resource from the mapper global resource list
*
@@ -230,9 +236,12 @@ ulp_mapper_glb_field_tbl_get(struct bnxt_ulp_mapper_parms *parms,
uint32_t operand,
uint8_t *val)
{
+ uint8_t app_id_sig;
uint32_t t_idx;
- t_idx = parms->app_id << (BNXT_ULP_APP_ID_SHIFT +
+ app_id_sig = bnxt_ulp_glb_app_id_sig_get(parms->app_id);
+
+ t_idx = app_id_sig << (BNXT_ULP_APP_ID_SHIFT +
BNXT_ULP_HDR_SIG_ID_SHIFT +
BNXT_ULP_GLB_FIELD_TBL_SHIFT);
t_idx += parms->class_tid << (BNXT_ULP_HDR_SIG_ID_SHIFT +
@@ -3314,6 +3323,11 @@ ulp_mapper_global_res_free(struct bnxt_ulp_context *ulp __rte_unused,
rc = bnxt_pmd_global_tunnel_set(port_id, ttype, dport,
&handle);
break;
+ case BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_VXLAN_GPE_V6:
+ ttype = BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN_GPE_V6;
+ rc = bnxt_pmd_global_tunnel_set(port_id, ttype, dport,
+ &handle);
+ break;
default:
rc = -EINVAL;
BNXT_TF_DBG(ERR, "Invalid ulp global resource type %d\n",
@@ -3374,6 +3388,9 @@ ulp_mapper_global_register_tbl_process(struct bnxt_ulp_mapper_parms *parms,
case BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_VXLAN_GPE:
ttype = BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN_GPE;
break;
+ case BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_VXLAN_GPE_V6:
+ ttype = BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN_GPE_V6;
+ break;
default:
rc = -EINVAL;
BNXT_TF_DBG(ERR, "Invalid ulp global resource type %d\n",
@@ -3421,15 +3438,14 @@ ulp_mapper_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx,
struct bnxt_ulp_mapper_data *mapper_data)
{
struct bnxt_ulp_glb_resource_info *glb_res;
- uint32_t num_glb_res_ids, idx, dev_id;
+ uint32_t num_entries = 0, idx, dev_id;
uint8_t app_id;
int32_t rc = 0;
- glb_res = ulp_mapper_glb_resource_info_list_get(&num_glb_res_ids);
- if (!glb_res || !num_glb_res_ids) {
- BNXT_TF_DBG(ERR, "Invalid Arguments\n");
- return -EINVAL;
- }
+ glb_res = ulp_mapper_glb_resource_info_list_get(&num_entries);
+ /* Check if there are no resources */
+ if (!num_entries)
+ return 0;
rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
if (rc) {
@@ -3446,7 +3462,7 @@ ulp_mapper_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx,
}
/* Iterate the global resources and process each one */
- for (idx = 0; idx < num_glb_res_ids; idx++) {
+ for (idx = 0; idx < num_entries; idx++) {
if (dev_id != glb_res[idx].device_id ||
glb_res[idx].app_id != app_id)
continue;
@@ -3480,15 +3496,14 @@ ulp_mapper_app_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx,
struct bnxt_ulp_mapper_data *mapper_data)
{
struct bnxt_ulp_glb_resource_info *glb_res;
- uint32_t num_glb_res_ids, idx, dev_id;
+ uint32_t num_entries, idx, dev_id;
uint8_t app_id;
int32_t rc = 0;
- glb_res = bnxt_ulp_app_glb_resource_info_list_get(&num_glb_res_ids);
- if (!glb_res || !num_glb_res_ids) {
- BNXT_TF_DBG(ERR, "Invalid Arguments\n");
- return -EINVAL;
- }
+ glb_res = bnxt_ulp_app_glb_resource_info_list_get(&num_entries);
+ /* Check if there are no resources */
+ if (!num_entries)
+ return 0;
rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id);
if (rc) {
@@ -3505,7 +3520,7 @@ ulp_mapper_app_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx,
}
/* Iterate the global resources and process each one */
- for (idx = 0; idx < num_glb_res_ids; idx++) {
+ for (idx = 0; idx < num_entries; idx++) {
if (dev_id != glb_res[idx].device_id ||
glb_res[idx].app_id != app_id)
continue;
@@ -98,6 +98,8 @@ struct bnxt_ulp_mapper_create_parms {
uint16_t port_id;
};
+uint32_t bnxt_ulp_glb_app_id_sig_get(uint8_t app_id);
+
/* Function to initialize any dynamic mapper data. */
int32_t
ulp_mapper_init(struct bnxt_ulp_context *ulp_ctx);
@@ -4,6 +4,7 @@
*/
#include "ulp_matcher.h"
+#include "ulp_mapper.h"
#include "ulp_utils.h"
/* Utility function to calculate the class matcher hash */
@@ -47,10 +48,13 @@ ulp_matcher_pattern_match(struct ulp_rte_parser_params *params,
struct bnxt_ulp_class_match_info *class_match;
uint32_t class_hid;
uint16_t tmpl_id;
+ uint32_t app_id_sig;
+
+ app_id_sig = bnxt_ulp_glb_app_id_sig_get(params->app_id);
/* calculate the hash of the given flow */
class_hid = ulp_matcher_class_hash_calculate((params->hdr_bitmap.bits ^
- params->app_id),
+ app_id_sig),
params->fld_s_bitmap.bits);
/* validate the calculate hash values */
@@ -71,9 +75,9 @@ ulp_matcher_pattern_match(struct ulp_rte_parser_params *params,
}
/* Match the application id before proceeding */
- if (params->app_id != class_match->app_sig) {
+ if (app_id_sig != class_match->app_sig) {
BNXT_TF_DBG(DEBUG, "Field to match the app id %u:%u\n",
- params->app_id, class_match->app_sig);
+ app_id_sig, class_match->app_sig);
goto error;
}
@@ -99,13 +103,16 @@ int32_t
ulp_matcher_action_match(struct ulp_rte_parser_params *params,
uint32_t *act_id)
{
+ struct bnxt_ulp_act_match_info *act_match;
+ uint32_t app_id_sig;
uint32_t act_hid;
uint16_t tmpl_id;
- struct bnxt_ulp_act_match_info *act_match;
+
+ app_id_sig = bnxt_ulp_glb_app_id_sig_get(params->app_id);
/* calculate the hash of the given flow action */
act_hid = ulp_matcher_action_hash_calculate(params->act_bitmap.bits,
- params->app_id);
+ app_id_sig);
/* validate the calculate hash values */
if (act_hid >= BNXT_ULP_ACT_SIG_TBL_MAX_SZ)
@@ -121,9 +128,9 @@ ulp_matcher_action_match(struct ulp_rte_parser_params *params,
}
/* Match the application id before proceeding */
- if (params->app_id != act_match->app_sig) {
+ if (app_id_sig != act_match->app_sig) {
BNXT_TF_DBG(DEBUG, "Field to match the app id %u:%u\n",
- params->app_id, act_match->app_sig);
+ app_id_sig, act_match->app_sig);
goto error;
}
@@ -50,8 +50,8 @@ int32_t ulp_port_db_init(struct bnxt_ulp_context *ulp_ctxt, uint8_t port_cnt)
/* Attach the port database to the ulp context. */
bnxt_ulp_cntxt_ptr2_port_db_set(ulp_ctxt, port_db);
- /* index 0 is not being used hence add 1 to size */
- port_db->ulp_intf_list_size = BNXT_PORT_DB_MAX_INTF_LIST + 1;
+ /* 256 VFs + PFs etc. so making it 512*/
+ port_db->ulp_intf_list_size = BNXT_PORT_DB_MAX_INTF_LIST * 2;
/* Allocate the port tables */
port_db->ulp_intf_list = rte_zmalloc("bnxt_ulp_port_db_intf_list",
port_db->ulp_intf_list_size *
@@ -249,10 +249,19 @@ bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
if (dir == BNXT_ULP_DIR_INGRESS) {
/* Set port PARIF */
if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
- BNXT_ULP_PHY_PORT_PARIF, &parif)) {
+ BNXT_ULP_DRV_FUNC_PARIF, &parif)) {
BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
return;
}
+ /* Note:
+ * We save the drv_func_parif into CF_IDX of phy_port_parif,
+ * since that index is currently referenced by ingress templates
+ * for datapath flows. If in the future we change the parser to
+ * save it in the CF_IDX of drv_func_parif we also need to update
+ * the template.
+ * WARNING: Two VFs on same parent PF will not work, as the parif is
+ * based on fw fid of the parent PF.
+ */
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
parif);
/* Set port SVIF */
@@ -1232,6 +1241,8 @@ ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
uint16_t dst_port, uint16_t dst_mask,
enum bnxt_ulp_hdr_bit hdr_bit)
{
+ struct bnxt *bp;
+
switch (hdr_bit) {
case BNXT_ULP_HDR_BIT_I_UDP:
case BNXT_ULP_HDR_BIT_I_TCP:
@@ -1281,19 +1292,53 @@ ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
break;
}
- if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
- tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
- ULP_BITMAP_SET(params->hdr_fp_bit.bits,
- BNXT_ULP_HDR_BIT_T_VXLAN);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ bp = bnxt_pmd_get_bp(params->port_id);
+ if (bp == NULL) {
+ BNXT_TF_DBG(ERR, "Invalid bp\n");
+ return;
}
- if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
- tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN_GPE)) {
+ /* vxlan dynamic customized port */
+ if (ULP_APP_CUST_VXLAN_EN(params->ulp_ctx)) {
+ /* ulp_rte_vxlan_hdr_handler will parser it further */
+ return;
+ }
+ /* vxlan static cutomized port */
+ else if (ULP_APP_CUST_VXLAN_SUPPORT(bp->ulp_ctx)) {
+ if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP &&
+ dst_port == tfp_cpu_to_be_16(bp->ulp_ctx->cfg_data->vxlan_port)) {
+ ULP_BITMAP_SET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ }
+ }
+ /* vxlan ip port */
+ else if (ULP_APP_CUST_VXLAN_IP_SUPPORT(bp->ulp_ctx)) {
+ if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP &&
+ dst_port == tfp_cpu_to_be_16(bp->ulp_ctx->cfg_data->vxlan_ip_port)) {
+ ULP_BITMAP_SET(params->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ if (bp->vxlan_ip_upar_in_use &
+ HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR0) {
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_VXLAN_IP_UPAR_ID,
+ ULP_WP_SYM_TUN_HDR_TYPE_UPAR1);
+ }
+ }
+ }
+ /* vxlan gpe port */
+ else if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP &&
+ dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN_GPE)) {
ULP_BITMAP_SET(params->hdr_fp_bit.bits,
BNXT_ULP_HDR_BIT_T_VXLAN_GPE);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
}
+ /* vxlan standard port */
+ else if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP &&
+ dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
+ ULP_BITMAP_SET(params->hdr_fp_bit.bits,
+ BNXT_ULP_HDR_BIT_T_VXLAN);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ }
}
/* Function to handle the parsing of RTE Flow item UDP Header. */
@@ -1536,6 +1581,17 @@ ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
ULP_UDP_PORT_VXLAN_MASK);
}
+ /* No need to check vxlan port for these conditions here */
+ if (ULP_APP_CUST_VXLAN_EN(params->ulp_ctx) ||
+ ULP_APP_CUST_VXLAN_SUPPORT(params->ulp_ctx) ||
+ ULP_APP_CUST_VXLAN_IP_SUPPORT(params->ulp_ctx))
+ return BNXT_TF_RC_SUCCESS;
+
+ /* Verify vxlan port */
+ if (dport != 0 && dport != ULP_UDP_PORT_VXLAN) {
+ BNXT_TF_DBG(ERR, "ParseErr:vxlan port is not valid\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
return BNXT_TF_RC_SUCCESS;
}
@@ -1603,6 +1659,18 @@ ulp_rte_vxlan_gpe_hdr_handler(const struct rte_flow_item *item,
ULP_UDP_PORT_VXLAN_GPE_MASK);
}
+ if (ULP_APP_CUST_VXLAN_EN(params->ulp_ctx) ||
+ ULP_APP_CUST_VXLAN_SUPPORT(params->ulp_ctx) ||
+ ULP_APP_CUST_VXLAN_IP_SUPPORT(params->ulp_ctx)) {
+ BNXT_TF_DBG(ERR, "ParseErr:vxlan setting is not valid\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+
+ /* Verify the vxlan gpe port */
+ if (dport != 0 && dport != ULP_UDP_PORT_VXLAN_GPE) {
+ BNXT_TF_DBG(ERR, "ParseErr:vxlan gpe port is not valid\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
return BNXT_TF_RC_SUCCESS;
}
@@ -373,6 +373,7 @@ struct bnxt_ulp_app_capabilities_info {
uint8_t ha_pool_id;
uint8_t ha_reg_state;
uint8_t ha_reg_cnt;
+ uint8_t tunnel_next_proto;
uint32_t flags;
};