get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/60588/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 60588,
    "url": "http://patchwork.dpdk.org/api/patches/60588/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20191006201409.8770-9-rmody@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20191006201409.8770-9-rmody@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20191006201409.8770-9-rmody@marvell.com",
    "date": "2019-10-06T20:14:08",
    "name": "[v2,8/9] net/qede/base: update the FW to 8.40.25.0",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "9624b50664bcf8a3bb1ad2543aeb87285ac5fef4",
    "submitter": {
        "id": 1211,
        "url": "http://patchwork.dpdk.org/api/people/1211/?format=api",
        "name": "Rasesh Mody",
        "email": "rmody@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patchwork.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20191006201409.8770-9-rmody@marvell.com/mbox/",
    "series": [
        {
            "id": 6714,
            "url": "http://patchwork.dpdk.org/api/series/6714/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=6714",
            "date": "2019-10-06T20:14:08",
            "name": null,
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/6714/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/60588/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/60588/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 0DB901D420;\n\tSun,  6 Oct 2019 22:14:46 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n\t[67.231.148.174]) by dpdk.org (Postfix) with ESMTP id E99E61D17D\n\tfor <dev@dpdk.org>; Sun,  6 Oct 2019 22:14:34 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n\tby mx0a-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n\tx96KEXVC010615; Sun, 6 Oct 2019 13:14:34 -0700",
            "from sc-exch03.marvell.com ([199.233.58.183])\n\tby mx0a-0016f401.pphosted.com with ESMTP id 2verhrc816-1\n\t(version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); \n\tSun, 06 Oct 2019 13:14:33 -0700",
            "from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH03.marvell.com\n\t(10.93.176.83) with Microsoft SMTP Server (TLS) id 15.0.1367.3;\n\tSun, 6 Oct 2019 13:14:31 -0700",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com\n\t(10.93.176.81) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n\tTransport; Sun, 6 Oct 2019 13:14:31 -0700",
            "from irv1user08.caveonetworks.com (unknown [10.104.116.105])\n\tby maili.marvell.com (Postfix) with ESMTP id 37E863F7040;\n\tSun,  6 Oct 2019 13:14:30 -0700 (PDT)",
            "(from rmody@localhost)\n\tby irv1user08.caveonetworks.com (8.14.4/8.14.4/Submit) id\n\tx96KEToM008869; Sun, 6 Oct 2019 13:14:29 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n\th=from : to : cc :\n\tsubject : date : message-id : in-reply-to : references : mime-version\n\t: content-type; s=pfpt0818;\n\tbh=IPYBuSxh9xvEiVs3fVVqq8ULYcLnC1F+ah3+IyPaJ7Q=; \n\tb=f2z5NqN7GXnARWPW30GPVrPVKv9RVMPryf5mR5CQWFpkYtrzs0s/aXc3A+poxaTTh1HP\n\t+ycVga3zlT4SP/HgEiiMa8uAdRG7HeXEL7xYl4Tol0i5aya6otCnDnGDUyYxudLq6moi\n\tE9QD5FWj0Yl8kwS/XbfgWrE/7A6sFU9e/8h5Nx6mxIv3/fuM7VpBoF5GjDzpZ9yFr2i2\n\t4MFIaKpJTSmZNQ5LEVU4Tib9Vhi7tADZ1Z4FLLv8ONFlwFEx6MMT5N/Cdqgym9bh4zoN\n\tEvLMsyYkbN5UwwcxGNdQ2b3d4i9IC5oW4T0cTMzkO7vFg4TAl5gS+l8Tj7Ess0N8W0Hq\n\tBg== ",
        "X-Authentication-Warning": "irv1user08.caveonetworks.com: rmody set sender to\n\trmody@marvell.com using -f",
        "From": "Rasesh Mody <rmody@marvell.com>",
        "To": "<dev@dpdk.org>, <jerinj@marvell.com>, <ferruh.yigit@intel.com>",
        "CC": "Rasesh Mody <rmody@marvell.com>, <GR-Everest-DPDK-Dev@marvell.com>",
        "Date": "Sun, 6 Oct 2019 13:14:08 -0700",
        "Message-ID": "<20191006201409.8770-9-rmody@marvell.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<20190930024921.21818-1-rmody@marvell.com>",
        "References": "<20190930024921.21818-1-rmody@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.95,1.0.8\n\tdefinitions=2019-10-06_08:2019-10-03,2019-10-06 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v2 8/9] net/qede/base: update the FW to 8.40.25.0",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch updates the FW to 8.40.25.0 and corresponding base driver\nchanges. It also updates the PMD version to 2.11.0.1. The FW updates\nconsists of enhancements and fixes as described below.\n\n - VF RX queue start ramrod can get stuck due to completion error.\n   Return EQ completion with error, when fail to load VF data. Use VF\n   FID in RX queue start ramrod\n - Fix big receive buffer initialization for 100G to address failure\n   leading to BRB hardware assertion\n - GRE tunnel traffic doesn't run when non-L2 ethernet protocol is enabled,\n   fix FW to not forward tunneled SYN packets to LL2.\n - Fix the FW assert that is caused during vport_update when\n   tx-switching is enabled\n - Add initial FW support for VF Representors\n - Add ecore_get_hsi_def_val() API to get default HSI values\n - Move following from .c to .h files:\n   TSTORM_QZONE_START and MSTORM_QZONE_START\n   enum ilt_clients\n   renamed struct ecore_dma_mem to phys_mem_desc and moved\n - Add ecore_cxt_set_cli() and ecore_cxt_set_blk() APIs to set client\n   config and block details\n - Use SET_FIELD() macro where appropriate\n - Address spell check and code alignment issues\n\nSigned-off-by: Rasesh Mody <rmody@marvell.com>\n---\n drivers/net/qede/base/ecore.h               |  73 ++-\n drivers/net/qede/base/ecore_cxt.c           | 497 ++++++++------\n drivers/net/qede/base/ecore_cxt.h           |  12 +\n drivers/net/qede/base/ecore_dcbx.c          |   5 +-\n drivers/net/qede/base/ecore_dev.c           | 586 ++++++++++-------\n drivers/net/qede/base/ecore_init_fw_funcs.c | 681 ++++++++++----------\n drivers/net/qede/base/ecore_init_fw_funcs.h | 107 ++-\n drivers/net/qede/base/ecore_init_ops.c      |  15 +-\n drivers/net/qede/base/ecore_init_ops.h      |   2 +-\n drivers/net/qede/base/ecore_int.c           | 129 ++--\n drivers/net/qede/base/ecore_int_api.h       |  11 +-\n drivers/net/qede/base/ecore_l2.c            |  10 +-\n drivers/net/qede/base/ecore_l2_api.h        |   2 +\n drivers/net/qede/base/ecore_mcp.c           | 287 +++++----\n drivers/net/qede/base/ecore_mcp.h           |   9 +-\n drivers/net/qede/base/ecore_proto_if.h      |   1 +\n drivers/net/qede/base/ecore_sp_commands.c   |  15 +-\n drivers/net/qede/base/ecore_spq.c           |  53 +-\n drivers/net/qede/base/ecore_sriov.c         | 157 +++--\n drivers/net/qede/base/ecore_vf.c            |  18 +-\n drivers/net/qede/qede_ethdev.h              |   2 +-\n drivers/net/qede/qede_main.c                |   2 +-\n drivers/net/qede/qede_rxtx.c                |   4 +-\n 23 files changed, 1584 insertions(+), 1094 deletions(-)",
    "diff": "diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h\nindex b1d8706c9..925b75cb9 100644\n--- a/drivers/net/qede/base/ecore.h\n+++ b/drivers/net/qede/base/ecore.h\n@@ -28,8 +28,8 @@\n #include \"mcp_public.h\"\n \n #define ECORE_MAJOR_VERSION\t\t8\n-#define ECORE_MINOR_VERSION\t\t37\n-#define ECORE_REVISION_VERSION\t\t20\n+#define ECORE_MINOR_VERSION\t\t40\n+#define ECORE_REVISION_VERSION\t\t18\n #define ECORE_ENGINEERING_VERSION\t0\n \n #define ECORE_VERSION\t\t\t\t\t\t\t\\\n@@ -467,6 +467,8 @@ struct ecore_wfq_data {\n \tbool configured;\n };\n \n+#define OFLD_GRP_SIZE 4\n+\n struct ecore_qm_info {\n \tstruct init_qm_pq_params    *qm_pq_params;\n \tstruct init_qm_vport_params *qm_vport_params;\n@@ -513,6 +515,8 @@ struct ecore_fw_data {\n \tconst u8 *modes_tree_buf;\n \tunion init_op *init_ops;\n \tconst u32 *arr_data;\n+\tconst u32 *fw_overlays;\n+\tu32 fw_overlays_len;\n \tu32 init_ops_size;\n };\n \n@@ -592,6 +596,7 @@ struct ecore_hwfn {\n \n \tu8\t\t\t\tnum_funcs_on_engine;\n \tu8\t\t\t\tenabled_func_idx;\n+\tu8\t\t\t\tnum_funcs_on_port;\n \n \t/* BAR access */\n \tvoid OSAL_IOMEM\t\t\t*regview;\n@@ -745,7 +750,6 @@ struct ecore_dev {\n #endif\n #define ECORE_IS_AH(dev)\t((dev)->type == ECORE_DEV_TYPE_AH)\n #define ECORE_IS_K2(dev)\tECORE_IS_AH(dev)\n-#define ECORE_IS_E4(dev)\t(ECORE_IS_BB(dev) || ECORE_IS_AH(dev))\n \n \tu16 vendor_id;\n \tu16 device_id;\n@@ -893,6 +897,7 @@ struct ecore_dev {\n \n #ifndef ASIC_ONLY\n \tbool\t\t\t\tb_is_emul_full;\n+\tbool\t\t\t\tb_is_emul_mac;\n #endif\n \t/* LLH info */\n \tu8\t\t\t\tppfid_bitmap;\n@@ -911,16 +916,52 @@ struct ecore_dev {\n \tu8\t\t\t\tengine_for_debug;\n };\n \n-#define NUM_OF_VFS(dev)\t\t(ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \\\n-\t\t\t\t\t\t  : MAX_NUM_VFS_K2)\n-#define NUM_OF_L2_QUEUES(dev)\t(ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \\\n-\t\t\t\t\t\t  : MAX_NUM_L2_QUEUES_K2)\n-#define NUM_OF_PORTS(dev)\t(ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \\\n-\t\t\t\t\t\t  : MAX_NUM_PORTS_K2)\n-#define NUM_OF_SBS(dev)\t\t(ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \\\n-\t\t\t\t\t\t  : MAX_SB_PER_PATH_K2)\n-#define NUM_OF_ENG_PFS(dev)\t(ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \\\n-\t\t\t\t\t\t  : MAX_NUM_PFS_K2)\n+enum ecore_hsi_def_type {\n+\tECORE_HSI_DEF_MAX_NUM_VFS,\n+\tECORE_HSI_DEF_MAX_NUM_L2_QUEUES,\n+\tECORE_HSI_DEF_MAX_NUM_PORTS,\n+\tECORE_HSI_DEF_MAX_SB_PER_PATH,\n+\tECORE_HSI_DEF_MAX_NUM_PFS,\n+\tECORE_HSI_DEF_MAX_NUM_VPORTS,\n+\tECORE_HSI_DEF_NUM_ETH_RSS_ENGINE,\n+\tECORE_HSI_DEF_MAX_QM_TX_QUEUES,\n+\tECORE_HSI_DEF_NUM_PXP_ILT_RECORDS,\n+\tECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,\n+\tECORE_HSI_DEF_MAX_QM_GLOBAL_RLS,\n+\tECORE_HSI_DEF_MAX_PBF_CMD_LINES,\n+\tECORE_HSI_DEF_MAX_BTB_BLOCKS,\n+\tECORE_NUM_HSI_DEFS\n+};\n+\n+u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev,\n+\t\t\t  enum ecore_hsi_def_type type);\n+\n+#define NUM_OF_VFS(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VFS)\n+#define NUM_OF_L2_QUEUES(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_L2_QUEUES)\n+#define NUM_OF_PORTS(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PORTS)\n+#define NUM_OF_SBS(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_SB_PER_PATH)\n+#define NUM_OF_ENG_PFS(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PFS)\n+#define NUM_OF_VPORTS(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VPORTS)\n+#define NUM_OF_RSS_ENGINES(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE)\n+#define NUM_OF_QM_TX_QUEUES(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_TX_QUEUES)\n+#define NUM_OF_PXP_ILT_RECORDS(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS)\n+#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)\n+#define NUM_OF_QM_GLOBAL_RLS(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS)\n+#define NUM_OF_PBF_CMD_LINES(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_PBF_CMD_LINES)\n+#define NUM_OF_BTB_BLOCKS(dev) \\\n+\tecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_BTB_BLOCKS)\n \n #define CRC8_TABLE_SIZE 256\n \n@@ -948,7 +989,6 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid)\n }\n \n #define PKT_LB_TC 9\n-#define MAX_NUM_VOQS_E4 20\n \n int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);\n void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,\n@@ -1023,4 +1063,9 @@ enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,\n enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid);\n enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev);\n \n+#define TSTORM_QZONE_START\tPXP_VF_BAR0_START_SDM_ZONE_A\n+\n+#define MSTORM_QZONE_START(dev) \\\n+\t(TSTORM_QZONE_START + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))\n+\n #endif /* __ECORE_H */\ndiff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c\nindex 0f04c9447..773b75ecd 100644\n--- a/drivers/net/qede/base/ecore_cxt.c\n+++ b/drivers/net/qede/base/ecore_cxt.c\n@@ -33,6 +33,10 @@\n /* Searcher constants */\n #define SRC_MIN_NUM_ELEMS 256\n \n+/* GFS constants */\n+#define RGFS_MIN_NUM_ELEMS\t256\n+#define TGFS_MIN_NUM_ELEMS\t256\n+\n /* Timers constants */\n #define TM_SHIFT\t7\n #define TM_ALIGN\t(1 << TM_SHIFT)\n@@ -114,16 +118,6 @@ struct ecore_conn_type_cfg {\n #define CDUT_SEG_BLK(n)\t\t(1 + (u8)(n))\n #define CDUT_FL_SEG_BLK(n, X)\t(1 + (n) + NUM_TASK_##X##_SEGMENTS)\n \n-enum ilt_clients {\n-\tILT_CLI_CDUC,\n-\tILT_CLI_CDUT,\n-\tILT_CLI_QM,\n-\tILT_CLI_TM,\n-\tILT_CLI_SRC,\n-\tILT_CLI_TSDM,\n-\tILT_CLI_MAX\n-};\n-\n struct ilt_cfg_pair {\n \tu32 reg;\n \tu32 val;\n@@ -133,6 +127,7 @@ struct ecore_ilt_cli_blk {\n \tu32 total_size;\t\t/* 0 means not active */\n \tu32 real_size_in_page;\n \tu32 start_line;\n+\tu32 dynamic_line_offset;\n \tu32 dynamic_line_cnt;\n };\n \n@@ -153,17 +148,6 @@ struct ecore_ilt_client_cfg {\n \tu32 vf_total_lines;\n };\n \n-/* Per Path -\n- *      ILT shadow table\n- *      Protocol acquired CID lists\n- *      PF start line in ILT\n- */\n-struct ecore_dma_mem {\n-\tdma_addr_t p_phys;\n-\tvoid *p_virt;\n-\tosal_size_t size;\n-};\n-\n #define MAP_WORD_SIZE\t\tsizeof(unsigned long)\n #define BITS_PER_MAP_WORD\t(MAP_WORD_SIZE * 8)\n \n@@ -173,6 +157,13 @@ struct ecore_cid_acquired_map {\n \tunsigned long *cid_map;\n };\n \n+struct ecore_src_t2 {\n+\tstruct phys_mem_desc\t*dma_mem;\n+\tu32\t\t\tnum_pages;\n+\tu64\t\t\tfirst_free;\n+\tu64\t\t\tlast_free;\n+};\n+\n struct ecore_cxt_mngr {\n \t/* Per protocl configuration */\n \tstruct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];\n@@ -193,17 +184,14 @@ struct ecore_cxt_mngr {\n \tstruct ecore_cid_acquired_map *acquired_vf[MAX_CONN_TYPES];\n \n \t/* ILT  shadow table */\n-\tstruct ecore_dma_mem *ilt_shadow;\n+\tstruct phys_mem_desc\t\t*ilt_shadow;\n \tu32 pf_start_line;\n \n \t/* Mutex for a dynamic ILT allocation */\n \tosal_mutex_t mutex;\n \n \t/* SRC T2 */\n-\tstruct ecore_dma_mem *t2;\n-\tu32 t2_num_pages;\n-\tu64 first_free;\n-\tu64 last_free;\n+\tstruct ecore_src_t2\t\tsrc_t2;\n \n \t/* The infrastructure originally was very generic and context/task\n \t * oriented - per connection-type we would set how many of those\n@@ -280,15 +268,17 @@ struct ecore_tm_iids {\n \tu32 per_vf_tids;\n };\n \n-static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,\n+static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,\n+\t\t\t      struct ecore_cxt_mngr *p_mngr,\n \t\t\t      struct ecore_tm_iids *iids)\n {\n+\tstruct ecore_conn_type_cfg *p_cfg;\n \tbool tm_vf_required = false;\n \tbool tm_required = false;\n \tu32 i, j;\n \n \tfor (i = 0; i < MAX_CONN_TYPES; i++) {\n-\t\tstruct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];\n+\t\tp_cfg = &p_mngr->conn_cfg[i];\n \n \t\tif (tm_cid_proto(i) || tm_required) {\n \t\t\tif (p_cfg->cid_count)\n@@ -490,43 +480,84 @@ static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,\n \t\t   p_blk->start_line);\n }\n \n-static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,\n-\t\t\t\t\t  enum ilt_clients ilt_client)\n+static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t     enum ilt_clients ilt_client,\n+\t\t\t\t\t     u32 *dynamic_line_offset,\n+\t\t\t\t\t     u32 *dynamic_line_cnt)\n {\n-\tu32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;\n \tstruct ecore_ilt_client_cfg *p_cli;\n-\tu32 lines_to_skip = 0;\n+\tstruct ecore_conn_type_cfg *p_cfg;\n \tu32 cxts_per_p;\n \n \t/* TBD MK: ILT code should be simplified once PROTO enum is changed */\n \n+\t*dynamic_line_offset = 0;\n+\t*dynamic_line_cnt = 0;\n+\n \tif (ilt_client == ILT_CLI_CDUC) {\n \t\tp_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];\n+\t\tp_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE];\n \n \t\tcxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /\n \t\t    (u32)CONN_CXT_SIZE(p_hwfn);\n \n-\t\tlines_to_skip = cid_count / cxts_per_p;\n+\t\t*dynamic_line_cnt = p_cfg->cid_count / cxts_per_p;\n+\t}\n+}\n+\n+static struct ecore_ilt_client_cfg *\n+ecore_cxt_set_cli(struct ecore_ilt_client_cfg *p_cli)\n+{\n+\tp_cli->active = false;\n+\tp_cli->first.val = 0;\n+\tp_cli->last.val = 0;\n+\treturn p_cli;\n+}\n+\n+static struct ecore_ilt_cli_blk *\n+ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk)\n+{\n+\tp_blk->total_size = 0;\n+\treturn p_blk;\n \t}\n \n-\treturn lines_to_skip;\n+static u32\n+ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)\n+{\n+\tstruct ecore_src_iids src_iids;\n+\tu32 elem_num = 0;\n+\n+\tOSAL_MEM_ZERO(&src_iids, sizeof(src_iids));\n+\tecore_cxt_src_iids(p_mngr, &src_iids);\n+\n+\t/* Both the PF and VFs searcher connections are stored in the per PF\n+\t * database. Thus sum the PF searcher cids and all the VFs searcher\n+\t * cids.\n+\t */\n+\telem_num = src_iids.pf_cids +\n+\t\t   src_iids.per_vf_cids * p_mngr->vf_count;\n+\tif (elem_num == 0)\n+\t\treturn elem_num;\n+\n+\telem_num = OSAL_MAX_T(u32, elem_num, SRC_MIN_NUM_ELEMS);\n+\telem_num = OSAL_ROUNDUP_POW_OF_TWO(elem_num);\n+\n+\treturn elem_num;\n }\n \n enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n {\n+\tu32 curr_line, total, i, task_size, line, total_size, elem_size;\n \tstruct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;\n-\tu32 curr_line, total, i, task_size, line;\n \tstruct ecore_ilt_client_cfg *p_cli;\n \tstruct ecore_ilt_cli_blk *p_blk;\n \tstruct ecore_cdu_iids cdu_iids;\n-\tstruct ecore_src_iids src_iids;\n \tstruct ecore_qm_iids qm_iids;\n \tstruct ecore_tm_iids tm_iids;\n \tstruct ecore_tid_seg *p_seg;\n \n \tOSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));\n \tOSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));\n-\tOSAL_MEM_ZERO(&src_iids, sizeof(src_iids));\n \tOSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));\n \n \tp_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);\n@@ -536,7 +567,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t\t   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);\n \n \t/* CDUC */\n-\tp_cli = &p_mngr->clients[ILT_CLI_CDUC];\n+\tp_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);\n \n \tcurr_line = p_mngr->pf_start_line;\n \n@@ -546,7 +577,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t/* get the counters for the CDUC,CDUC and QM clients  */\n \tecore_cxt_cdu_iids(p_mngr, &cdu_iids);\n \n-\tp_blk = &p_cli->pf_blks[CDUC_BLK];\n+\tp_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);\n \n \ttotal = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);\n \n@@ -556,11 +587,12 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \tecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);\n \tp_cli->pf_total_lines = curr_line - p_blk->start_line;\n \n-\tp_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,\n-\t\t\t\t\t\t\t\t ILT_CLI_CDUC);\n+\tecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC,\n+\t\t\t\t\t &p_blk->dynamic_line_offset,\n+\t\t\t\t\t &p_blk->dynamic_line_cnt);\n \n \t/* CDUC VF */\n-\tp_blk = &p_cli->vf_blks[CDUC_BLK];\n+\tp_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);\n \ttotal = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);\n \n \tecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,\n@@ -574,7 +606,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t\t\t\t       ILT_CLI_CDUC);\n \n \t/* CDUT PF */\n-\tp_cli = &p_mngr->clients[ILT_CLI_CDUT];\n+\tp_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);\n \tp_cli->first.val = curr_line;\n \n \t/* first the 'working' task memory */\n@@ -583,7 +615,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t\tif (!p_seg || p_seg->count == 0)\n \t\t\tcontinue;\n \n-\t\tp_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];\n+\t\tp_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);\n \t\ttotal = p_seg->count * p_mngr->task_type_size[p_seg->type];\n \t\tecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,\n \t\t\t\t       p_mngr->task_type_size[p_seg->type]);\n@@ -598,7 +630,8 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t\tif (!p_seg || p_seg->count == 0)\n \t\t\tcontinue;\n \n-\t\tp_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];\n+\t\tp_blk =\n+\t\t     ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);\n \n \t\tif (!p_seg->has_fl_mem) {\n \t\t\t/* The segment is active (total size pf 'working'\n@@ -631,7 +664,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t\tecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,\n \t\t\t\t       ILT_CLI_CDUT);\n \t}\n-\tp_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;\n+\tp_cli->pf_total_lines = curr_line - p_cli->first.val;\n \n \t/* CDUT VF */\n \tp_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);\n@@ -643,7 +676,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t\t/* 'working' memory */\n \t\ttotal = p_seg->count * p_mngr->task_type_size[p_seg->type];\n \n-\t\tp_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];\n+\t\tp_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);\n \t\tecore_ilt_cli_blk_fill(p_cli, p_blk,\n \t\t\t\t       curr_line, total,\n \t\t\t\t       p_mngr->task_type_size[p_seg->type]);\n@@ -652,7 +685,8 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t\t\t\t       ILT_CLI_CDUT);\n \n \t\t/* 'init' memory */\n-\t\tp_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];\n+\t\tp_blk =\n+\t\t     ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);\n \t\tif (!p_seg->has_fl_mem) {\n \t\t\t/* see comment above */\n \t\t\tline = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;\n@@ -664,15 +698,17 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t\t\tecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,\n \t\t\t\t\t       ILT_CLI_CDUT);\n \t\t}\n-\t\tp_cli->vf_total_lines = curr_line -\n-\t\t    p_cli->vf_blks[0].start_line;\n+\t\tp_cli->vf_total_lines = curr_line - (p_cli->first.val +\n+\t\t\t\t\t\t     p_cli->pf_total_lines);\n \n \t\t/* Now for the rest of the VFs */\n \t\tfor (i = 1; i < p_mngr->vf_count; i++) {\n+\t\t\t/* don't set p_blk i.e. don't clear total_size */\n \t\t\tp_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];\n \t\t\tecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,\n \t\t\t\t\t       ILT_CLI_CDUT);\n \n+\t\t\t/* don't set p_blk i.e. don't clear total_size */\n \t\t\tp_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];\n \t\t\tecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,\n \t\t\t\t\t       ILT_CLI_CDUT);\n@@ -680,13 +716,19 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t}\n \n \t/* QM */\n-\tp_cli = &p_mngr->clients[ILT_CLI_QM];\n-\tp_blk = &p_cli->pf_blks[0];\n-\n+\tp_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);\n+\tp_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);\n+\n+\t/* At this stage, after the first QM configuration, the PF PQs amount\n+\t * is the highest possible. Save this value at qm_info->ilt_pf_pqs to\n+\t * detect overflows in the future.\n+\t * Even though VF PQs amount can be larger than VF count, use vf_count\n+\t * because each VF requires only the full amount of CIDs.\n+\t */\n \tecore_cxt_qm_iids(p_hwfn, &qm_iids);\n-\ttotal = ecore_qm_pf_mem_size(qm_iids.cids,\n+\ttotal = ecore_qm_pf_mem_size(p_hwfn, qm_iids.cids,\n \t\t\t\t     qm_iids.vf_cids, qm_iids.tids,\n-\t\t\t\t     p_hwfn->qm_info.num_pqs,\n+\t\t\t\t     p_hwfn->qm_info.num_pqs + OFLD_GRP_SIZE,\n \t\t\t\t     p_hwfn->qm_info.num_vf_pqs);\n \n \tDP_VERBOSE(p_hwfn, ECORE_MSG_ILT,\n@@ -701,39 +743,15 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \tecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);\n \tp_cli->pf_total_lines = curr_line - p_blk->start_line;\n \n-\t/* SRC */\n-\tp_cli = &p_mngr->clients[ILT_CLI_SRC];\n-\tecore_cxt_src_iids(p_mngr, &src_iids);\n-\n-\t/* Both the PF and VFs searcher connections are stored in the per PF\n-\t * database. Thus sum the PF searcher cids and all the VFs searcher\n-\t * cids.\n-\t */\n-\ttotal = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;\n-\tif (total) {\n-\t\tu32 local_max = OSAL_MAX_T(u32, total,\n-\t\t\t\t\t   SRC_MIN_NUM_ELEMS);\n-\n-\t\ttotal = OSAL_ROUNDUP_POW_OF_TWO(local_max);\n-\n-\t\tp_blk = &p_cli->pf_blks[0];\n-\t\tecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,\n-\t\t\t\t       total * sizeof(struct src_ent),\n-\t\t\t\t       sizeof(struct src_ent));\n-\n-\t\tecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,\n-\t\t\t\t       ILT_CLI_SRC);\n-\t\tp_cli->pf_total_lines = curr_line - p_blk->start_line;\n-\t}\n-\n \t/* TM PF */\n-\tp_cli = &p_mngr->clients[ILT_CLI_TM];\n-\tecore_cxt_tm_iids(p_mngr, &tm_iids);\n+\tp_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);\n+\tecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);\n \ttotal = tm_iids.pf_cids + tm_iids.pf_tids_total;\n \tif (total) {\n-\t\tp_blk = &p_cli->pf_blks[0];\n+\t\tp_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);\n \t\tecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,\n-\t\t\t\t       total * TM_ELEM_SIZE, TM_ELEM_SIZE);\n+\t\t\t\t       total * TM_ELEM_SIZE,\n+\t\t\t\t       TM_ELEM_SIZE);\n \n \t\tecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,\n \t\t\t\t       ILT_CLI_TM);\n@@ -743,7 +761,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t/* TM VF */\n \ttotal = tm_iids.per_vf_cids + tm_iids.per_vf_tids;\n \tif (total) {\n-\t\tp_blk = &p_cli->vf_blks[0];\n+\t\tp_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]);\n \t\tecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,\n \t\t\t\t       total * TM_ELEM_SIZE, TM_ELEM_SIZE);\n \n@@ -757,12 +775,28 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \t\t}\n \t}\n \n+\t/* SRC */\n+\tp_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);\n+\ttotal = ecore_cxt_src_elements(p_mngr);\n+\n+\tif (total) {\n+\t\ttotal_size = total * sizeof(struct src_ent);\n+\t\telem_size = sizeof(struct src_ent);\n+\n+\t\tp_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);\n+\t\tecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,\n+\t\t\t\t       total_size, elem_size);\n+\t\tecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,\n+\t\t\t\t       ILT_CLI_SRC);\n+\t\tp_cli->pf_total_lines = curr_line - p_blk->start_line;\n+\t}\n+\n \t/* TSDM (SRQ CONTEXT) */\n \ttotal = ecore_cxt_get_srq_count(p_hwfn);\n \n \tif (total) {\n-\t\tp_cli = &p_mngr->clients[ILT_CLI_TSDM];\n-\t\tp_blk = &p_cli->pf_blks[SRQ_BLK];\n+\t\tp_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);\n+\t\tp_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);\n \t\tecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,\n \t\t\t\t       total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);\n \n@@ -783,29 +817,60 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)\n \n static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)\n {\n-\tstruct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;\n+\tstruct ecore_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;\n \tu32 i;\n \n-\tif (!p_mngr->t2)\n+\tif (!p_t2 || !p_t2->dma_mem)\n \t\treturn;\n \n-\tfor (i = 0; i < p_mngr->t2_num_pages; i++)\n-\t\tif (p_mngr->t2[i].p_virt)\n+\tfor (i = 0; i < p_t2->num_pages; i++)\n+\t\tif (p_t2->dma_mem[i].virt_addr)\n \t\t\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,\n-\t\t\t\t\t       p_mngr->t2[i].p_virt,\n-\t\t\t\t\t       p_mngr->t2[i].p_phys,\n-\t\t\t\t\t       p_mngr->t2[i].size);\n+\t\t\t\t\t       p_t2->dma_mem[i].virt_addr,\n+\t\t\t\t\t       p_t2->dma_mem[i].phys_addr,\n+\t\t\t\t\t       p_t2->dma_mem[i].size);\n \n-\tOSAL_FREE(p_hwfn->p_dev, p_mngr->t2);\n+\tOSAL_FREE(p_hwfn->p_dev, p_t2->dma_mem);\n+\tp_t2->dma_mem = OSAL_NULL;\n+}\n+\n+static enum _ecore_status_t\n+ecore_cxt_t2_alloc_pages(struct ecore_hwfn *p_hwfn,\n+\t\t\t struct ecore_src_t2 *p_t2,\n+\t\t\t u32 total_size, u32 page_size)\n+{\n+\tvoid **p_virt;\n+\tu32 size, i;\n+\n+\tif (!p_t2 || !p_t2->dma_mem)\n+\t\treturn ECORE_INVAL;\n+\n+\tfor (i = 0; i < p_t2->num_pages; i++) {\n+\t\tsize = OSAL_MIN_T(u32, total_size, page_size);\n+\t\tp_virt = &p_t2->dma_mem[i].virt_addr;\n+\n+\t\t*p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,\n+\t\t\t\t\t\t  &p_t2->dma_mem[i].phys_addr,\n+\t\t\t\t\t\t  size);\n+\t\tif (!p_t2->dma_mem[i].virt_addr)\n+\t\t\treturn ECORE_NOMEM;\n+\n+\t\tOSAL_MEM_ZERO(*p_virt, size);\n+\t\tp_t2->dma_mem[i].size = size;\n+\t\ttotal_size -= size;\n+\t}\n+\n+\treturn ECORE_SUCCESS;\n }\n \n static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)\n {\n \tstruct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;\n \tu32 conn_num, total_size, ent_per_page, psz, i;\n+\tstruct phys_mem_desc *p_t2_last_page;\n \tstruct ecore_ilt_client_cfg *p_src;\n \tstruct ecore_src_iids src_iids;\n-\tstruct ecore_dma_mem *p_t2;\n+\tstruct ecore_src_t2 *p_t2;\n \tenum _ecore_status_t rc;\n \n \tOSAL_MEM_ZERO(&src_iids, sizeof(src_iids));\n@@ -823,49 +888,39 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)\n \n \t/* use the same page size as the SRC ILT client */\n \tpsz = ILT_PAGE_IN_BYTES(p_src->p_size.val);\n-\tp_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);\n+\tp_t2 = &p_mngr->src_t2;\n+\tp_t2->num_pages = DIV_ROUND_UP(total_size, psz);\n \n \t/* allocate t2 */\n-\tp_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,\n-\t\t\t\t p_mngr->t2_num_pages *\n-\t\t\t\t sizeof(struct ecore_dma_mem));\n-\tif (!p_mngr->t2) {\n+\tp_t2->dma_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,\n+\t\t\t\t    p_t2->num_pages *\n+\t\t\t\t    sizeof(struct phys_mem_desc));\n+\tif (!p_t2->dma_mem) {\n \t\tDP_NOTICE(p_hwfn, false, \"Failed to allocate t2 table\\n\");\n \t\trc = ECORE_NOMEM;\n \t\tgoto t2_fail;\n \t}\n \n-\t/* allocate t2 pages */\n-\tfor (i = 0; i < p_mngr->t2_num_pages; i++) {\n-\t\tu32 size = OSAL_MIN_T(u32, total_size, psz);\n-\t\tvoid **p_virt = &p_mngr->t2[i].p_virt;\n-\n-\t\t*p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,\n-\t\t\t\t\t\t  &p_mngr->t2[i].p_phys, size);\n-\t\tif (!p_mngr->t2[i].p_virt) {\n-\t\t\trc = ECORE_NOMEM;\n-\t\t\tgoto t2_fail;\n-\t\t}\n-\t\tOSAL_MEM_ZERO(*p_virt, size);\n-\t\tp_mngr->t2[i].size = size;\n-\t\ttotal_size -= size;\n-\t}\n+\trc = ecore_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);\n+\tif (rc)\n+\t\tgoto t2_fail;\n \n \t/* Set the t2 pointers */\n \n \t/* entries per page - must be a power of two */\n \tent_per_page = psz / sizeof(struct src_ent);\n \n-\tp_mngr->first_free = (u64)p_mngr->t2[0].p_phys;\n+\tp_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;\n \n-\tp_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];\n-\tp_mngr->last_free = (u64)p_t2->p_phys +\n-\t    ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);\n+\tp_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];\n+\tp_t2->last_free = (u64)p_t2_last_page->phys_addr +\n+\t\t\t  ((conn_num - 1) & (ent_per_page - 1)) *\n+\t\t\t  sizeof(struct src_ent);\n \n-\tfor (i = 0; i < p_mngr->t2_num_pages; i++) {\n+\tfor (i = 0; i < p_t2->num_pages; i++) {\n \t\tu32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);\n-\t\tstruct src_ent *entries = p_mngr->t2[i].p_virt;\n-\t\tu64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;\n+\t\tstruct src_ent *entries = p_t2->dma_mem[i].virt_addr;\n+\t\tu64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;\n \t\tu32 j;\n \n \t\tfor (j = 0; j < ent_num - 1; j++) {\n@@ -873,8 +928,8 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)\n \t\t\tentries[j].next = OSAL_CPU_TO_BE64(val);\n \t\t}\n \n-\t\tif (i < p_mngr->t2_num_pages - 1)\n-\t\t\tval = (u64)p_mngr->t2[i + 1].p_phys;\n+\t\tif (i < p_t2->num_pages - 1)\n+\t\t\tval = (u64)p_t2->dma_mem[i + 1].phys_addr;\n \t\telse\n \t\t\tval = 0;\n \t\tentries[j].next = OSAL_CPU_TO_BE64(val);\n@@ -921,13 +976,13 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)\n \tilt_size = ecore_cxt_ilt_shadow_size(p_cli);\n \n \tfor (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {\n-\t\tstruct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];\n+\t\tstruct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];\n \n-\t\tif (p_dma->p_virt)\n+\t\tif (p_dma->virt_addr)\n \t\t\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,\n \t\t\t\t\t       p_dma->p_virt,\n-\t\t\t\t\t       p_dma->p_phys, p_dma->size);\n-\t\tp_dma->p_virt = OSAL_NULL;\n+\t\t\t\t\t       p_dma->phys_addr, p_dma->size);\n+\t\tp_dma->virt_addr = OSAL_NULL;\n \t}\n \tOSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);\n \tp_mngr->ilt_shadow = OSAL_NULL;\n@@ -938,28 +993,33 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,\n \t\t    struct ecore_ilt_cli_blk *p_blk,\n \t\t    enum ilt_clients ilt_client, u32 start_line_offset)\n {\n-\tstruct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;\n-\tu32 lines, line, sz_left, lines_to_skip = 0;\n+\tstruct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;\n+\tu32 lines, line, sz_left, lines_to_skip, first_skipped_line;\n \n \t/* Special handling for RoCE that supports dynamic allocation */\n \tif (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)\n \t\treturn ECORE_SUCCESS;\n \n-\tlines_to_skip = p_blk->dynamic_line_cnt;\n-\n \tif (!p_blk->total_size)\n \t\treturn ECORE_SUCCESS;\n \n \tsz_left = p_blk->total_size;\n+\tlines_to_skip = p_blk->dynamic_line_cnt;\n \tlines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;\n \tline = p_blk->start_line + start_line_offset -\n-\t    p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;\n+\t       p_hwfn->p_cxt_mngr->pf_start_line;\n+\tfirst_skipped_line = line + p_blk->dynamic_line_offset;\n \n-\tfor (; lines; lines--) {\n+\twhile (lines) {\n \t\tdma_addr_t p_phys;\n \t\tvoid *p_virt;\n \t\tu32 size;\n \n+\t\tif (lines_to_skip && (line == first_skipped_line)) {\n+\t\t\tline += lines_to_skip;\n+\t\t\tcontinue;\n+\t\t}\n+\n \t\tsize = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);\n \n /* @DPDK */\n@@ -971,8 +1031,8 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,\n \t\t\treturn ECORE_NOMEM;\n \t\tOSAL_MEM_ZERO(p_virt, size);\n \n-\t\tilt_shadow[line].p_phys = p_phys;\n-\t\tilt_shadow[line].p_virt = p_virt;\n+\t\tilt_shadow[line].phys_addr = p_phys;\n+\t\tilt_shadow[line].virt_addr = p_virt;\n \t\tilt_shadow[line].size = size;\n \n \t\tDP_VERBOSE(p_hwfn, ECORE_MSG_ILT,\n@@ -982,6 +1042,7 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,\n \n \t\tsz_left -= size;\n \t\tline++;\n+\t\tlines--;\n \t}\n \n \treturn ECORE_SUCCESS;\n@@ -997,7 +1058,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)\n \n \tsize = ecore_cxt_ilt_shadow_size(clients);\n \tp_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,\n-\t\t\t\t\t size * sizeof(struct ecore_dma_mem));\n+\t\t\t\t\t size * sizeof(struct phys_mem_desc));\n \n \tif (!p_mngr->ilt_shadow) {\n \t\tDP_NOTICE(p_hwfn, false, \"Failed to allocate ilt shadow table\\n\");\n@@ -1007,7 +1068,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)\n \n \tDP_VERBOSE(p_hwfn, ECORE_MSG_ILT,\n \t\t   \"Allocated 0x%x bytes for ilt shadow\\n\",\n-\t\t   (u32)(size * sizeof(struct ecore_dma_mem)));\n+\t\t   (u32)(size * sizeof(struct phys_mem_desc)));\n \n \tfor_each_ilt_valid_client(i, clients) {\n \t\tfor (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {\n@@ -1058,7 +1119,7 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)\n }\n \n static enum _ecore_status_t\n-ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,\n+__ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,\n \t\t\t   u32 cid_start, u32 cid_count,\n \t\t\t   struct ecore_cid_acquired_map *p_map)\n {\n@@ -1082,49 +1143,67 @@ ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,\n \treturn ECORE_SUCCESS;\n }\n \n-static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)\n+static enum _ecore_status_t\n+ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, u32 start_cid,\n+\t\t\t   u32 vf_start_cid)\n {\n \tstruct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;\n-\tu32 max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);\n-\tu32 start_cid = 0, vf_start_cid = 0;\n-\tu32 type, vf;\n+\tu32 vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);\n+\tstruct ecore_cid_acquired_map *p_map;\n+\tstruct ecore_conn_type_cfg *p_cfg;\n+\tenum _ecore_status_t rc;\n \n-\tfor (type = 0; type < MAX_CONN_TYPES; type++) {\n-\t\tstruct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];\n-\t\tstruct ecore_cid_acquired_map *p_map;\n+\tp_cfg = &p_mngr->conn_cfg[type];\n \n \t\t/* Handle PF maps */\n \t\tp_map = &p_mngr->acquired[type];\n-\t\tif (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,\n-\t\t\t\t\t       p_cfg->cid_count, p_map))\n-\t\t\tgoto cid_map_fail;\n+\trc = __ecore_cid_map_alloc_single(p_hwfn, type, start_cid,\n+\t\t\t\t\t  p_cfg->cid_count, p_map);\n+\tif (rc != ECORE_SUCCESS)\n+\t\treturn rc;\n+\n+\t/* Handle VF maps */\n+\tfor (vf = 0; vf < max_num_vfs; vf++) {\n+\t\tp_map = &p_mngr->acquired_vf[type][vf];\n+\t\trc = __ecore_cid_map_alloc_single(p_hwfn, type, vf_start_cid,\n+\t\t\t\t\t\t  p_cfg->cids_per_vf, p_map);\n+\t\tif (rc != ECORE_SUCCESS)\n+\t\t\treturn rc;\n+\t}\n \n-\t\t/* Handle VF maps */\n-\t\tfor (vf = 0; vf < max_num_vfs; vf++) {\n-\t\t\tp_map = &p_mngr->acquired_vf[type][vf];\n-\t\t\tif (ecore_cid_map_alloc_single(p_hwfn, type,\n-\t\t\t\t\t\t       vf_start_cid,\n-\t\t\t\t\t\t       p_cfg->cids_per_vf,\n-\t\t\t\t\t\t       p_map))\n-\t\t\t\tgoto cid_map_fail;\n-\t\t}\n+\treturn ECORE_SUCCESS;\n+}\n+\n+static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)\n+{\n+\tstruct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;\n+\tu32 start_cid = 0, vf_start_cid = 0;\n+\tu32 type;\n+\tenum _ecore_status_t rc;\n+\n+\tfor (type = 0; type < MAX_CONN_TYPES; type++) {\n+\t\trc = ecore_cid_map_alloc_single(p_hwfn, type, start_cid,\n+\t\t\t\t\t\tvf_start_cid);\n+\t\tif (rc != ECORE_SUCCESS)\n+\t\t\tgoto cid_map_fail;\n \n-\t\tstart_cid += p_cfg->cid_count;\n-\t\tvf_start_cid += p_cfg->cids_per_vf;\n+\t\tstart_cid += p_mngr->conn_cfg[type].cid_count;\n+\t\tvf_start_cid += p_mngr->conn_cfg[type].cids_per_vf;\n \t}\n \n \treturn ECORE_SUCCESS;\n \n cid_map_fail:\n \tecore_cid_map_free(p_hwfn);\n-\treturn ECORE_NOMEM;\n+\treturn rc;\n }\n \n enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)\n {\n+\tstruct ecore_cid_acquired_map *acquired_vf;\n \tstruct ecore_ilt_client_cfg *clients;\n \tstruct ecore_cxt_mngr *p_mngr;\n-\tu32 i;\n+\tu32 i, max_num_vfs;\n \n \tp_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));\n \tif (!p_mngr) {\n@@ -1132,9 +1211,6 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)\n \t\treturn ECORE_NOMEM;\n \t}\n \n-\t/* Set the cxt mangr pointer prior to further allocations */\n-\tp_hwfn->p_cxt_mngr = p_mngr;\n-\n \t/* Initialize ILT client registers */\n \tclients = p_mngr->clients;\n \tclients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);\n@@ -1183,6 +1259,22 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)\n #endif\n \tOSAL_MUTEX_INIT(&p_mngr->mutex);\n \n+\t/* Set the cxt mangr pointer prior to further allocations */\n+\tp_hwfn->p_cxt_mngr = p_mngr;\n+\n+\tmax_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);\n+\tfor (i = 0; i < MAX_CONN_TYPES; i++) {\n+\t\tacquired_vf = OSAL_CALLOC(p_hwfn->p_dev, GFP_KERNEL,\n+\t\t\t\t\t  max_num_vfs, sizeof(*acquired_vf));\n+\t\tif (!acquired_vf) {\n+\t\t\tDP_NOTICE(p_hwfn, false,\n+\t\t\t\t  \"Failed to allocate an array of `struct ecore_cid_acquired_map'\\n\");\n+\t\t\treturn ECORE_NOMEM;\n+\t\t}\n+\n+\t\tp_mngr->acquired_vf[i] = acquired_vf;\n+\t}\n+\n \treturn ECORE_SUCCESS;\n }\n \n@@ -1220,6 +1312,8 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)\n \n void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)\n {\n+\tu32 i;\n+\n \tif (!p_hwfn->p_cxt_mngr)\n \t\treturn;\n \n@@ -1229,7 +1323,11 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)\n #ifdef CONFIG_ECORE_LOCK_ALLOC\n \tOSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);\n #endif\n+\tfor (i = 0; i < MAX_CONN_TYPES; i++)\n+\t\tOSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr->acquired_vf[i]);\n \tOSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);\n+\n+\tp_hwfn->p_cxt_mngr = OSAL_NULL;\n }\n \n void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)\n@@ -1435,14 +1533,10 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \t\t      bool is_pf_loading)\n {\n \tstruct ecore_qm_info *qm_info = &p_hwfn->qm_info;\n-\tstruct ecore_mcp_link_state *p_link;\n \tstruct ecore_qm_iids iids;\n \n \tOSAL_MEM_ZERO(&iids, sizeof(iids));\n \tecore_cxt_qm_iids(p_hwfn, &iids);\n-\n-\tp_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;\n-\n \tecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,\n \t\t\t    qm_info->max_phys_tcs_per_port,\n \t\t\t    is_pf_loading,\n@@ -1452,7 +1546,7 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \t\t\t    qm_info->num_vf_pqs,\n \t\t\t    qm_info->start_vport,\n \t\t\t    qm_info->num_vports, qm_info->pf_wfq,\n-\t\t\t    qm_info->pf_rl, p_link->speed,\n+\t\t\t    qm_info->pf_rl,\n \t\t\t    p_hwfn->qm_info.qm_pq_params,\n \t\t\t    p_hwfn->qm_info.qm_vport_params);\n }\n@@ -1601,7 +1695,7 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)\n {\n \tstruct ecore_ilt_client_cfg *clients;\n \tstruct ecore_cxt_mngr *p_mngr;\n-\tstruct ecore_dma_mem *p_shdw;\n+\tstruct phys_mem_desc *p_shdw;\n \tu32 line, rt_offst, i;\n \n \tecore_ilt_bounds_init(p_hwfn);\n@@ -1626,10 +1720,10 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)\n \t\t\t/** p_virt could be OSAL_NULL incase of dynamic\n \t\t\t *  allocation\n \t\t\t */\n-\t\t\tif (p_shdw[line].p_virt != OSAL_NULL) {\n+\t\t\tif (p_shdw[line].virt_addr != OSAL_NULL) {\n \t\t\t\tSET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);\n \t\t\t\tSET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,\n-\t\t\t\t\t  (p_shdw[line].p_phys >> 12));\n+\t\t\t\t\t  (p_shdw[line].phys_addr >> 12));\n \n \t\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_ILT,\n \t\t\t\t\t\"Setting RT[0x%08x] from\"\n@@ -1637,7 +1731,7 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)\n \t\t\t\t\t\" Physical addr: 0x%lx\\n\",\n \t\t\t\t\trt_offst, line, i,\n \t\t\t\t\t(unsigned long)(p_shdw[line].\n-\t\t\t\t\t\t\tp_phys >> 12));\n+\t\t\t\t\t\t\tphys_addr >> 12));\n \t\t\t}\n \n \t\t\tSTORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);\n@@ -1666,9 +1760,9 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)\n \t\t     OSAL_LOG2(rounded_conn_num));\n \n \tSTORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,\n-\t\t\t p_hwfn->p_cxt_mngr->first_free);\n+\t\t\t p_hwfn->p_cxt_mngr->src_t2.first_free);\n \tSTORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,\n-\t\t\t p_hwfn->p_cxt_mngr->last_free);\n+\t\t\t p_hwfn->p_cxt_mngr->src_t2.last_free);\n \tDP_VERBOSE(p_hwfn, ECORE_MSG_ILT,\n \t\t   \"Configured SEARCHER for 0x%08x connections\\n\",\n \t\t   conn_num);\n@@ -1699,18 +1793,18 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)\n \tu8 i;\n \n \tOSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));\n-\tecore_cxt_tm_iids(p_mngr, &tm_iids);\n+\tecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);\n \n \t/* @@@TBD No pre-scan for now */\n \n-\t/* Note: We assume consecutive VFs for a PF */\n-\tfor (i = 0; i < p_mngr->vf_count; i++) {\n \t\tcfg_word = 0;\n \t\tSET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);\n-\t\tSET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);\n \t\tSET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);\n+\tSET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);\n \t\tSET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */\n \n+\t/* Note: We assume consecutive VFs for a PF */\n+\tfor (i = 0; i < p_mngr->vf_count; i++) {\n \t\trt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +\n \t\t    (sizeof(cfg_word) / sizeof(u32)) *\n \t\t    (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);\n@@ -1728,7 +1822,7 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)\n \t    (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);\n \tSTORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);\n \n-\t/* enale scan */\n+\t/* enable scan */\n \tSTORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,\n \t\t     tm_iids.pf_cids ? 0x1 : 0x0);\n \n@@ -1972,10 +2066,10 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,\n \tline = p_info->iid / cxts_per_p;\n \n \t/* Make sure context is allocated (dynamic allocation) */\n-\tif (!p_mngr->ilt_shadow[line].p_virt)\n+\tif (!p_mngr->ilt_shadow[line].virt_addr)\n \t\treturn ECORE_INVAL;\n \n-\tp_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +\n+\tp_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].virt_addr +\n \t    p_info->iid % cxts_per_p * conn_cxt_size;\n \n \tDP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),\n@@ -2074,7 +2168,7 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,\n \n \tOSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);\n \n-\tif (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)\n+\tif (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)\n \t\tgoto out0;\n \n \tp_ptt = ecore_ptt_acquire(p_hwfn);\n@@ -2094,8 +2188,8 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,\n \t}\n \tOSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);\n \n-\tp_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;\n-\tp_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;\n+\tp_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;\n+\tp_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;\n \tp_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =\n \t\tp_blk->real_size_in_page;\n \n@@ -2107,7 +2201,7 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,\n \tSET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);\n \tSET_FIELD(ilt_hw_entry,\n \t\t  ILT_ENTRY_PHY_ADDR,\n-\t\t  (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));\n+\t\t (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >> 12));\n \n /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */\n \n@@ -2115,21 +2209,6 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,\n \t\t\t    reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),\n \t\t\t    OSAL_NULL /* default parameters */);\n \n-\tif (elem_type == ECORE_ELEM_CXT) {\n-\t\tu32 last_cid_allocated = (1 + (iid / elems_per_p)) *\n-\t\t\t\t\t elems_per_p;\n-\n-\t\t/* Update the relevant register in the parser */\n-\t\tecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,\n-\t\t\t last_cid_allocated - 1);\n-\n-\t\tif (!p_hwfn->b_rdma_enabled_in_prs) {\n-\t\t\t/* Enable RoCE search */\n-\t\t\tecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);\n-\t\t\tp_hwfn->b_rdma_enabled_in_prs = true;\n-\t\t}\n-\t}\n-\n out1:\n \tecore_ptt_release(p_hwfn, p_ptt);\n out0:\n@@ -2196,16 +2275,16 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,\n \t}\n \n \tfor (i = shadow_start_line; i < shadow_end_line; i++) {\n-\t\tif (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)\n+\t\tif (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)\n \t\t\tcontinue;\n \n \t\tOSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,\n-\t\t\t\t       p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,\n-\t\t\t\t       p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,\n-\t\t\t\t       p_hwfn->p_cxt_mngr->ilt_shadow[i].size);\n+\t\t\t\t    p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,\n+\t\t\t\t    p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr,\n+\t\t\t\t    p_hwfn->p_cxt_mngr->ilt_shadow[i].size);\n \n-\t\tp_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;\n-\t\tp_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;\n+\t\tp_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = OSAL_NULL;\n+\t\tp_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;\n \t\tp_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;\n \n \t\t/* compute absolute offset */\ndiff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h\nindex f8c955cac..55f08027d 100644\n--- a/drivers/net/qede/base/ecore_cxt.h\n+++ b/drivers/net/qede/base/ecore_cxt.h\n@@ -22,6 +22,18 @@ enum ecore_cxt_elem_type {\n \tECORE_ELEM_TASK\n };\n \n+enum ilt_clients {\n+\tILT_CLI_CDUC,\n+\tILT_CLI_CDUT,\n+\tILT_CLI_QM,\n+\tILT_CLI_TM,\n+\tILT_CLI_SRC,\n+\tILT_CLI_TSDM,\n+\tILT_CLI_RGFS,\n+\tILT_CLI_TGFS,\n+\tILT_CLI_MAX\n+};\n+\n u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,\n \t\t\t\t  enum protocol_type type,\n \t\t\t\t  u32 *vf_cid);\ndiff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c\nindex b82ca49ff..ccd4383bb 100644\n--- a/drivers/net/qede/base/ecore_dcbx.c\n+++ b/drivers/net/qede/base/ecore_dcbx.c\n@@ -310,8 +310,9 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \t\t\tcontinue;\n \n \t\t/* if no app tlv was present, don't override in FW */\n-\t\tecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false,\n-\t\t\t\t\t   priority, tc, type);\n+\t\tecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,\n+\t\t\t\t\t  p_data->arr[DCBX_PROTOCOL_ETH].enable,\n+\t\t\t\t\t  priority, tc, type);\n \t}\n \n \treturn ECORE_SUCCESS;\ndiff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c\nindex 2a11b4d29..2c47aba48 100644\n--- a/drivers/net/qede/base/ecore_dev.c\n+++ b/drivers/net/qede/base/ecore_dev.c\n@@ -39,6 +39,10 @@\n static osal_spinlock_t qm_lock;\n static u32 qm_lock_ref_cnt;\n \n+#ifndef ASIC_ONLY\n+static bool b_ptt_gtt_init;\n+#endif\n+\n /******************** Doorbell Recovery *******************/\n /* The doorbell recovery mechanism consists of a list of entries which represent\n  * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each\n@@ -963,13 +967,13 @@ ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,\n \n \t/* Filter enable - should be done first when removing a filter */\n \tif (b_write_access && !p_details->enable) {\n-\t\taddr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;\n+\t\taddr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;\n \t\tecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,\n \t\t\t       p_details->enable);\n \t}\n \n \t/* Filter value */\n-\taddr = NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 2 * filter_idx * 0x4;\n+\taddr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;\n \tOSAL_MEMSET(&params, 0, sizeof(params));\n \n \tif (b_write_access) {\n@@ -991,7 +995,7 @@ ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,\n \t\treturn rc;\n \n \t/* Filter mode */\n-\taddr = NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + filter_idx * 0x4;\n+\taddr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4;\n \tif (b_write_access)\n \t\tecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode);\n \telse\n@@ -999,7 +1003,7 @@ ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t\t addr);\n \n \t/* Filter protocol type */\n-\taddr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + filter_idx * 0x4;\n+\taddr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4;\n \tif (b_write_access)\n \t\tecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,\n \t\t\t       p_details->protocol_type);\n@@ -1018,7 +1022,7 @@ ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,\n \n \t/* Filter enable - should be done last when adding a filter */\n \tif (!b_write_access || p_details->enable) {\n-\t\taddr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;\n+\t\taddr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;\n \t\tif (b_write_access)\n \t\t\tecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,\n \t\t\t\t       p_details->enable);\n@@ -1031,7 +1035,7 @@ ecore_llh_access_filter(struct ecore_hwfn *p_hwfn,\n }\n \n static enum _ecore_status_t\n-ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n+ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \t\t\tu8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,\n \t\t\tu32 high, u32 low)\n {\n@@ -1054,7 +1058,7 @@ ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n }\n \n static enum _ecore_status_t\n-ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,\n+ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn,\n \t\t\t   struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)\n {\n \tstruct ecore_llh_filter_details filter_details;\n@@ -1066,24 +1070,6 @@ ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,\n \t\t\t\t       true /* write access */);\n }\n \n-static enum _ecore_status_t\n-ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n-\t\t     u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,\n-\t\t     u32 low)\n-{\n-\treturn ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,\n-\t\t\t\t       filter_idx, filter_prot_type,\n-\t\t\t\t       high, low);\n-}\n-\n-static enum _ecore_status_t\n-ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n-\t\t\tu8 abs_ppfid, u8 filter_idx)\n-{\n-\treturn ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,\n-\t\t\t\t\t  filter_idx);\n-}\n-\n enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,\n \t\t\t\t\t      u8 mac_addr[ETH_ALEN])\n {\n@@ -1424,7 +1410,7 @@ void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)\n \n \tfor (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;\n \t     filter_idx++) {\n-\t\trc = ecore_llh_remove_filter_e4(p_hwfn, p_ptt,\n+\t\trc = ecore_llh_remove_filter(p_hwfn, p_ptt,\n \t\t\t\t\t\tabs_ppfid, filter_idx);\n \t\tif (rc != ECORE_SUCCESS)\n \t\t\tgoto out;\n@@ -1464,18 +1450,22 @@ enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,\n \treturn ECORE_SUCCESS;\n }\n \n-static enum _ecore_status_t\n-ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n-\t\t\tu8 ppfid)\n+enum _ecore_status_t\n+ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)\n {\n+\tstruct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);\n+\tstruct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);\n \tstruct ecore_llh_filter_details filter_details;\n \tu8 abs_ppfid, filter_idx;\n \tu32 addr;\n \tenum _ecore_status_t rc;\n \n+\tif (!p_ptt)\n+\t\treturn ECORE_AGAIN;\n+\n \trc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);\n \tif (rc != ECORE_SUCCESS)\n-\t\treturn rc;\n+\t\tgoto out;\n \n \taddr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;\n \tDP_NOTICE(p_hwfn, false,\n@@ -1490,7 +1480,7 @@ ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \t\t\t\t\t      filter_idx, &filter_details,\n \t\t\t\t\t      false /* read access */);\n \t\tif (rc != ECORE_SUCCESS)\n-\t\t\treturn rc;\n+\t\t\tgoto out;\n \n \t\tDP_NOTICE(p_hwfn, false,\n \t\t\t  \"filter %2hhd: enable %d, value 0x%016lx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\\n\",\n@@ -1500,20 +1490,8 @@ ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \t\t\t  filter_details.protocol_type, filter_details.hdr_sel);\n \t}\n \n-\treturn ECORE_SUCCESS;\n-}\n-\n-enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)\n-{\n-\tstruct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);\n-\tstruct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);\n-\tenum _ecore_status_t rc;\n-\n-\tif (p_ptt == OSAL_NULL)\n-\t\treturn ECORE_AGAIN;\n-\n-\trc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);\n \n+out:\n \tecore_ptt_release(p_hwfn, p_ptt);\n \n \treturn rc;\n@@ -1851,6 +1829,7 @@ static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)\n {\n \t/* Initialize qm port parameters */\n \tu8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;\n+\tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n \n \t/* indicate how ooo and high pri traffic is dealt with */\n \tactive_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?\n@@ -1859,11 +1838,14 @@ static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)\n \tfor (i = 0; i < num_ports; i++) {\n \t\tstruct init_qm_port_params *p_qm_port =\n \t\t\t&p_hwfn->qm_info.qm_port_params[i];\n+\t\tu16 pbf_max_cmd_lines;\n \n \t\tp_qm_port->active = 1;\n \t\tp_qm_port->active_phys_tcs = active_phys_tcs;\n-\t\tp_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;\n-\t\tp_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;\n+\t\tpbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(p_dev);\n+\t\tp_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;\n+\t\tp_qm_port->num_btb_blocks =\n+\t\t\tNUM_OF_BTB_BLOCKS(p_dev) / num_ports;\n \t}\n }\n \n@@ -1938,6 +1920,10 @@ static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,\n \t\t(pq_init_flags & PQ_INIT_PF_RL ||\n \t\t pq_init_flags & PQ_INIT_VF_RL);\n \n+\t/* The \"rl_id\" is set as the \"vport_id\" */\n+\tqm_info->qm_pq_params[pq_idx].rl_id =\n+\t\tqm_info->qm_pq_params[pq_idx].vport_id;\n+\n \t/* qm params accounting */\n \tqm_info->num_pqs++;\n \tif (!(pq_init_flags & PQ_INIT_SHARE_VPORT))\n@@ -2247,10 +2233,10 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)\n \t/* pq table */\n \tfor (i = 0; i < qm_info->num_pqs; i++) {\n \t\tpq = &qm_info->qm_pq_params[i];\n-\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_HW,\n-\t\t\t   \"pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\\n\",\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_SP,\n+\t\t\t   \"pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d, rl_id %d\\n\",\n \t\t\t   qm_info->start_pq + i, pq->port_id, pq->vport_id,\n-\t\t\t   pq->tc_id, pq->wrr_group, pq->rl_valid);\n+\t\t\t   pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);\n \t}\n }\n \n@@ -2531,6 +2517,13 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)\n \t\t\t\t  \"Failed to allocate dbg user info structure\\n\");\n \t\t\tgoto alloc_err;\n \t\t}\n+\n+\t\trc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);\n+\t\tif (rc) {\n+\t\t\tDP_NOTICE(p_hwfn, false,\n+\t\t\t\t  \"Failed to allocate dbg user info structure\\n\");\n+\t\t\tgoto alloc_err;\n+\t\t}\n \t} /* hwfn loop */\n \n \trc = ecore_llh_alloc(p_dev);\n@@ -2652,7 +2645,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)\n {\n \tint hw_mode = 0;\n \n-\tif (ECORE_IS_BB_B0(p_hwfn->p_dev)) {\n+\tif (ECORE_IS_BB(p_hwfn->p_dev)) {\n \t\thw_mode |= 1 << MODE_BB;\n \t} else if (ECORE_IS_AH(p_hwfn->p_dev)) {\n \t\thw_mode |= 1 << MODE_K2;\n@@ -2712,50 +2705,88 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)\n }\n \n #ifndef ASIC_ONLY\n-/* MFW-replacement initializations for non-ASIC */\n-static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,\n+/* MFW-replacement initializations for emulation */\n+static enum _ecore_status_t ecore_hw_init_chip(struct ecore_dev *p_dev,\n \t\t\t\t\t       struct ecore_ptt *p_ptt)\n {\n-\tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n-\tu32 pl_hv = 1;\n-\tint i;\n+\tstruct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);\n+\tu32 pl_hv, wr_mbs;\n+\tint i, pos;\n+\tu16 ctrl = 0;\n \n-\tif (CHIP_REV_IS_EMUL(p_dev)) {\n-\t\tif (ECORE_IS_AH(p_dev))\n-\t\t\tpl_hv |= 0x600;\n+\tif (!CHIP_REV_IS_EMUL(p_dev)) {\n+\t\tDP_NOTICE(p_dev, false,\n+\t\t\t  \"ecore_hw_init_chip() shouldn't be called in a non-emulation environment\\n\");\n+\t\treturn ECORE_INVAL;\n \t}\n \n+\tpl_hv = ECORE_IS_BB(p_dev) ? 0x1 : 0x401;\n \tecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);\n \n \tif (ECORE_IS_AH(p_dev))\n \t\tecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2, 0x3ffffff);\n \n-\t/* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */\n-\t/* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */\n-\tif (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))\n+\t/* Initialize port mode to 4x10G_E (10G with 4x10 SERDES) */\n+\tif (ECORE_IS_BB(p_dev))\n \t\tecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);\n \n-\tif (CHIP_REV_IS_EMUL(p_dev)) {\n-\t\tif (ECORE_IS_AH(p_dev)) {\n-\t\t\t/* 2 for 4-port, 1 for 2-port, 0 for 1-port */\n-\t\t\tecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,\n-\t\t\t\t (p_dev->num_ports_in_engine >> 1));\n+\tif (ECORE_IS_AH(p_dev)) {\n+\t\t/* 2 for 4-port, 1 for 2-port, 0 for 1-port */\n+\t\tecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,\n+\t\t\t p_dev->num_ports_in_engine >> 1);\n \n-\t\t\tecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,\n-\t\t\t\t p_dev->num_ports_in_engine == 4 ? 0 : 3);\n-\t\t}\n+\t\tecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,\n+\t\t\t p_dev->num_ports_in_engine == 4 ? 0 : 3);\n \t}\n \n-\t/* Poll on RBC */\n+\t/* Signal the PSWRQ block to start initializing internal memories */\n \tecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);\n \tfor (i = 0; i < 100; i++) {\n \t\tOSAL_UDELAY(50);\n \t\tif (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)\n \t\t\tbreak;\n \t}\n-\tif (i == 100)\n+\tif (i == 100) {\n \t\tDP_NOTICE(p_hwfn, true,\n \t\t\t  \"RBC done failed to complete in PSWRQ2\\n\");\n+\t\treturn ECORE_TIMEOUT;\n+\t}\n+\n+\t/* Indicate PSWRQ to initialize steering tag table with zeros */\n+\tecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RESET_STT, 1);\n+\tfor (i = 0; i < 100; i++) {\n+\t\tOSAL_UDELAY(50);\n+\t\tif (!ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_RESET_STT))\n+\t\t\tbreak;\n+\t}\n+\tif (i == 100) {\n+\t\tDP_NOTICE(p_hwfn, true,\n+\t\t\t  \"Steering tag table initialization failed to complete in PSWRQ2\\n\");\n+\t\treturn ECORE_TIMEOUT;\n+\t}\n+\n+\t/* Clear a possible PSWRQ2 STT parity which might have been generated by\n+\t * a previous MSI-X read.\n+\t */\n+\tecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_PRTY_STS_WR_H_0, 0x8);\n+\n+\t/* Configure PSWRQ2_REG_WR_MBS0 according to the MaxPayloadSize field in\n+\t * the PCI configuration space. The value is common for all PFs, so it\n+\t * is okay to do it according to the first loading PF.\n+\t */\n+\tpos = OSAL_PCI_FIND_CAPABILITY(p_dev, PCI_CAP_ID_EXP);\n+\tif (!pos) {\n+\t\tDP_NOTICE(p_dev, true,\n+\t\t\t  \"Failed to find the PCI Express Capability structure in the PCI config space\\n\");\n+\t\treturn ECORE_IO;\n+\t}\n+\n+\tOSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_EXP_DEVCTL, &ctrl);\n+\twr_mbs = (ctrl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;\n+\tecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0, wr_mbs);\n+\n+\t/* Configure the PGLUE_B to discard mode */\n+\tecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_DISCARD_NBLOCK, 0x3f);\n \n \treturn ECORE_SUCCESS;\n }\n@@ -2768,7 +2799,8 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,\n static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)\n {\n \tu32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;\n-\tint i, igu_sb_id;\n+\tu32 igu_sb_id;\n+\tint i;\n \n \tfor_each_hwfn(p_dev, i) {\n \t\tstruct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];\n@@ -2866,8 +2898,8 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,\n \tecore_gtt_init(p_hwfn);\n \n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_dev)) {\n-\t\trc = ecore_hw_init_chip(p_hwfn, p_ptt);\n+\tif (CHIP_REV_IS_EMUL(p_dev) && IS_LEAD_HWFN(p_hwfn)) {\n+\t\trc = ecore_hw_init_chip(p_dev, p_ptt);\n \t\tif (rc != ECORE_SUCCESS)\n \t\t\treturn rc;\n \t}\n@@ -2885,7 +2917,8 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,\n \t\t\t\tqm_info->max_phys_tcs_per_port,\n \t\t\t\tqm_info->pf_rl_en, qm_info->pf_wfq_en,\n \t\t\t\tqm_info->vport_rl_en, qm_info->vport_wfq_en,\n-\t\t\t\tqm_info->qm_port_params);\n+\t\t\t\tqm_info->qm_port_params,\n+\t\t\t\tOSAL_NULL /* global RLs are not configured */);\n \n \tecore_cxt_hw_init_common(p_hwfn);\n \n@@ -2906,7 +2939,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,\n \t\t/* Workaround clears ROCE search for all functions to prevent\n \t\t * involving non initialized function in processing ROCE packet.\n \t\t */\n-\t\tnum_pfs = NUM_OF_ENG_PFS(p_dev);\n+\t\tnum_pfs = (u16)NUM_OF_ENG_PFS(p_dev);\n \t\tfor (pf_id = 0; pf_id < num_pfs; pf_id++) {\n \t\t\tecore_fid_pretend(p_hwfn, p_ptt, pf_id);\n \t\t\tecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);\n@@ -2922,7 +2955,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,\n \t * This is not done inside the init tool since it currently can't\n \t * perform a pretending to VFs.\n \t */\n-\tmax_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;\n+\tmax_num_vfs = (u8)NUM_OF_VFS(p_dev);\n \tfor (vf_id = 0; vf_id < max_num_vfs; vf_id++) {\n \t\tconcrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);\n \t\tecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);\n@@ -2982,8 +3015,6 @@ static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,\n {\n \tu8 loopback = 0, port = p_hwfn->port_id * 2;\n \n-\tDP_INFO(p_hwfn->p_dev, \"Configurating Emulation Link %02x\\n\", port);\n-\n \t/* XLPORT MAC MODE *//* 0 Quad, 4 Single... */\n \tecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,\n \t\t\t port);\n@@ -3113,6 +3144,25 @@ static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,\n }\n #endif\n \n+static u32 ecore_hw_norm_region_conn(struct ecore_hwfn *p_hwfn)\n+{\n+\tu32 norm_region_conn;\n+\n+\t/* The order of CIDs allocation is according to the order of\n+\t * 'enum protocol_type'. Therefore, the number of CIDs for the normal\n+\t * region is calculated based on the CORE CIDs, in case of non-ETH\n+\t * personality, and otherwise - based on the ETH CIDs.\n+\t */\n+\tnorm_region_conn =\n+\t\tecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +\n+\t\tecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,\n+\t\t\t\t\t      OSAL_NULL) +\n+\t\tecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,\n+\t\t\t\t\t      OSAL_NULL);\n+\n+\treturn norm_region_conn;\n+}\n+\n static enum _ecore_status_t\n ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,\n \t\t       struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)\n@@ -3183,8 +3233,8 @@ static enum _ecore_status_t\n ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,\n \t\t\t      struct ecore_ptt *p_ptt)\n {\n+\tu32 norm_region_conn, min_addr_reg1;\n \tu32 pwm_regsize, norm_regsize;\n-\tu32 non_pwm_conn, min_addr_reg1;\n \tu32 db_bar_size, n_cpus;\n \tu32 roce_edpm_mode;\n \tu32 pf_dems_shift;\n@@ -3209,11 +3259,8 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,\n \t * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is\n \t * in units of 4,096 bytes.\n \t */\n-\tnon_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +\n-\t    ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,\n-\t\t\t\t\t  OSAL_NULL) +\n-\t    ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);\n-\tnorm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn,\n+\tnorm_region_conn = ecore_hw_norm_region_conn(p_hwfn);\n+\tnorm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * norm_region_conn,\n \t\t\t       OSAL_PAGE_SIZE);\n \tmin_addr_reg1 = norm_regsize / 4096;\n \tpwm_regsize = db_bar_size - norm_regsize;\n@@ -3292,10 +3339,11 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t       struct ecore_ptt *p_ptt,\n \t\t\t\t\t       int hw_mode)\n {\n+\tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n \tenum _ecore_status_t rc\t= ECORE_SUCCESS;\n \n \t/* In CMT the gate should be cleared by the 2nd hwfn */\n-\tif (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))\n+\tif (!ECORE_IS_CMT(p_dev) || !IS_LEAD_HWFN(p_hwfn))\n \t\tSTORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);\n \n \trc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,\n@@ -3306,16 +3354,11 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,\n \tecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);\n \n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_ASIC(p_hwfn->p_dev))\n-\t\treturn ECORE_SUCCESS;\n+\tif (CHIP_REV_IS_FPGA(p_dev) && ECORE_IS_BB(p_dev))\n+\t\tecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);\n \n-\tif (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {\n-\t\tif (ECORE_IS_AH(p_hwfn->p_dev))\n-\t\t\treturn ECORE_SUCCESS;\n-\t\telse if (ECORE_IS_BB(p_hwfn->p_dev))\n-\t\t\tecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);\n-\t} else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {\n-\t\tif (ECORE_IS_CMT(p_hwfn->p_dev)) {\n+\tif (CHIP_REV_IS_EMUL(p_dev)) {\n+\t\tif (ECORE_IS_CMT(p_dev)) {\n \t\t\t/* Activate OPTE in CMT */\n \t\t\tu32 val;\n \n@@ -3334,13 +3377,24 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,\n \t\t\t\t 0x55555555);\n \t\t}\n \n+\t\t/* Set the TAGMAC default function on the port if needed.\n+\t\t * The ppfid should be set in the vector, except in BB which has\n+\t\t * a bug in the LLH where the ppfid is actually engine based.\n+\t\t */\n+\t\tif (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_dev->mf_bits)) {\n+\t\t\tu8 pf_id = p_hwfn->rel_pf_id;\n+\n+\t\t\tif (!ECORE_IS_BB(p_dev))\n+\t\t\t\tpf_id /= p_dev->num_ports_in_engine;\n+\t\t\tecore_wr(p_hwfn, p_ptt,\n+\t\t\t\t NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);\n+\t\t}\n+\n \t\tecore_emul_link_init(p_hwfn, p_ptt);\n-\t} else {\n-\t\tDP_INFO(p_hwfn->p_dev, \"link is not being configured\\n\");\n \t}\n #endif\n \n-\treturn rc;\n+\treturn ECORE_SUCCESS;\n }\n \n static enum _ecore_status_t\n@@ -3755,9 +3809,9 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,\n \t\t\tgoto load_err;\n \n \t\t/* Clear the pglue_b was_error indication.\n-\t\t * In E4 it must be done after the BME and the internal\n-\t\t * FID_enable for the PF are set, since VDMs may cause the\n-\t\t * indication to be set again.\n+\t\t * It must be done after the BME and the internal FID_enable for\n+\t\t * the PF are set, since VDMs may cause the indication to be set\n+\t\t * again.\n \t\t */\n \t\tecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);\n \n@@ -4361,11 +4415,41 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,\n \treturn ECORE_SUCCESS;\n }\n \n+#define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2\n+#define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB\n+\n+static u32 ecore_hsi_def_val[][MAX_CHIP_IDS] = {\n+\t{MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},\n+\t{MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},\n+\t{MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},\n+\t{MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2, },\n+\t{MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},\n+\t{MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},\n+\t{ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},\n+\t{MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},\n+\t{PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},\n+\t{RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},\n+\t{MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},\n+\t{PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},\n+\t{BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},\n+};\n+\n+u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev, enum ecore_hsi_def_type type)\n+{\n+\tenum chip_ids chip_id = ECORE_IS_BB(p_dev) ? CHIP_BB : CHIP_K2;\n+\n+\tif (type >= ECORE_NUM_HSI_DEFS) {\n+\t\tDP_ERR(p_dev, \"Unexpected HSI definition type [%d]\\n\", type);\n+\t\treturn 0;\n+\t}\n+\n+\treturn ecore_hsi_def_val[type][chip_id];\n+}\n+\n static enum _ecore_status_t\n ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,\n \t\t\t    struct ecore_ptt *p_ptt)\n {\n-\tbool b_ah = ECORE_IS_AH(p_hwfn->p_dev);\n \tu32 resc_max_val, mcp_resp;\n \tu8 res_id;\n \tenum _ecore_status_t rc;\n@@ -4407,27 +4491,24 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t    u32 *p_resc_num, u32 *p_resc_start)\n {\n \tu8 num_funcs = p_hwfn->num_funcs_on_engine;\n-\tbool b_ah = ECORE_IS_AH(p_hwfn->p_dev);\n+\tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n \n \tswitch (res_id) {\n \tcase ECORE_L2_QUEUE:\n-\t\t*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :\n-\t\t\t\t MAX_NUM_L2_QUEUES_BB) / num_funcs;\n+\t\t*p_resc_num = NUM_OF_L2_QUEUES(p_dev) / num_funcs;\n \t\tbreak;\n \tcase ECORE_VPORT:\n-\t\t*p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :\n-\t\t\t\t MAX_NUM_VPORTS_BB) / num_funcs;\n+\t\t*p_resc_num = NUM_OF_VPORTS(p_dev) / num_funcs;\n \t\tbreak;\n \tcase ECORE_RSS_ENG:\n-\t\t*p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :\n-\t\t\t\t ETH_RSS_ENGINE_NUM_BB) / num_funcs;\n+\t\t*p_resc_num = NUM_OF_RSS_ENGINES(p_dev) / num_funcs;\n \t\tbreak;\n \tcase ECORE_PQ:\n-\t\t*p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :\n-\t\t\t\t MAX_QM_TX_QUEUES_BB) / num_funcs;\n+\t\t*p_resc_num = NUM_OF_QM_TX_QUEUES(p_dev) / num_funcs;\n+\t\t*p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */\n \t\tbreak;\n \tcase ECORE_RL:\n-\t\t*p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;\n+\t\t*p_resc_num = NUM_OF_QM_GLOBAL_RLS(p_dev) / num_funcs;\n \t\tbreak;\n \tcase ECORE_MAC:\n \tcase ECORE_VLAN:\n@@ -4435,11 +4516,10 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,\n \t\t*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;\n \t\tbreak;\n \tcase ECORE_ILT:\n-\t\t*p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :\n-\t\t\t\t PXP_NUM_ILT_RECORDS_BB) / num_funcs;\n+\t\t*p_resc_num = NUM_OF_PXP_ILT_RECORDS(p_dev) / num_funcs;\n \t\tbreak;\n \tcase ECORE_LL2_QUEUE:\n-\t\t*p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;\n+\t\t*p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;\n \t\tbreak;\n \tcase ECORE_RDMA_CNQ_RAM:\n \tcase ECORE_CMDQS_CQS:\n@@ -4448,9 +4528,7 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,\n \t\t*p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;\n \t\tbreak;\n \tcase ECORE_RDMA_STATS_QUEUE:\n-\t\t/* @DPDK */\n-\t\t*p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :\n-\t\t\t\t MAX_NUM_VPORTS_BB) / num_funcs;\n+\t\t*p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(p_dev) / num_funcs;\n \t\tbreak;\n \tcase ECORE_BDQ:\n \t\t/* @DPDK */\n@@ -4588,7 +4666,7 @@ static enum _ecore_status_t ecore_hw_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,\n \t/* 4-ports mode has limitations that should be enforced:\n \t * - BB: the MFW can access only PPFIDs which their corresponding PFIDs\n \t *       belong to this certain port.\n-\t * - AH/E5: only 4 PPFIDs per port are available.\n+\t * - AH: only 4 PPFIDs per port are available.\n \t */\n \tif (ecore_device_num_ports(p_dev) == 4) {\n \t\tu8 mask;\n@@ -4627,7 +4705,8 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,\n {\n \tstruct ecore_resc_unlock_params resc_unlock_params;\n \tstruct ecore_resc_lock_params resc_lock_params;\n-\tbool b_ah = ECORE_IS_AH(p_hwfn->p_dev);\n+\tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n+\tu32 max_ilt_lines;\n \tu8 res_id;\n \tenum _ecore_status_t rc;\n #ifndef ASIC_ONLY\n@@ -4703,9 +4782,9 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,\n \t}\n \n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {\n+\tif (CHIP_REV_IS_EMUL(p_dev)) {\n \t\t/* Reduced build contains less PQs */\n-\t\tif (!(p_hwfn->p_dev->b_is_emul_full)) {\n+\t\tif (!(p_dev->b_is_emul_full)) {\n \t\t\tresc_num[ECORE_PQ] = 32;\n \t\t\tresc_start[ECORE_PQ] = resc_num[ECORE_PQ] *\n \t\t\t    p_hwfn->enabled_func_idx;\n@@ -4713,26 +4792,27 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,\n \n \t\t/* For AH emulation, since we have a possible maximal number of\n \t\t * 16 enabled PFs, in case there are not enough ILT lines -\n-\t\t * allocate only first PF as RoCE and have all the other ETH\n-\t\t * only with less ILT lines.\n+\t\t * allocate only first PF as RoCE and have all the other as\n+\t\t * ETH-only with less ILT lines.\n+\t\t * In case we increase the number of ILT lines for PF0, we need\n+\t\t * also to correct the start value for PF1-15.\n \t\t */\n-\t\tif (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)\n-\t\t\tresc_num[ECORE_ILT] = OSAL_MAX_T(u32,\n-\t\t\t\t\t\t\t resc_num[ECORE_ILT],\n+\t\tif (ECORE_IS_AH(p_dev) && p_dev->b_is_emul_full) {\n+\t\t\tif (!p_hwfn->rel_pf_id) {\n+\t\t\t\tresc_num[ECORE_ILT] =\n+\t\t\t\t\tOSAL_MAX_T(u32, resc_num[ECORE_ILT],\n \t\t\t\t\t\t\t roce_min_ilt_lines);\n+\t\t\t} else if (resc_num[ECORE_ILT] < roce_min_ilt_lines) {\n+\t\t\t\tresc_start[ECORE_ILT] += roce_min_ilt_lines -\n+\t\t\t\t\t\t\t resc_num[ECORE_ILT];\n+\t\t\t}\n+\t\t}\n \t}\n-\n-\t/* Correct the common ILT calculation if PF0 has more */\n-\tif (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&\n-\t    p_hwfn->p_dev->b_is_emul_full &&\n-\t    p_hwfn->rel_pf_id && resc_num[ECORE_ILT] < roce_min_ilt_lines)\n-\t\tresc_start[ECORE_ILT] += roce_min_ilt_lines -\n-\t\t    resc_num[ECORE_ILT];\n #endif\n \n \t/* Sanity for ILT */\n-\tif ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||\n-\t    (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {\n+\tmax_ilt_lines = NUM_OF_PXP_ILT_RECORDS(p_dev);\n+\tif (RESC_END(p_hwfn, ECORE_ILT) > max_ilt_lines) {\n \t\tDP_NOTICE(p_hwfn, true,\n \t\t\t  \"Can't assign ILT pages [%08x,...,%08x]\\n\",\n \t\t\t  RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,\n@@ -4764,6 +4844,28 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,\n \treturn rc;\n }\n \n+#ifndef ASIC_ONLY\n+static enum _ecore_status_t\n+ecore_emul_hw_get_nvm_info(struct ecore_hwfn *p_hwfn)\n+{\n+\tif (IS_LEAD_HWFN(p_hwfn)) {\n+\t\tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n+\n+\t\t/* The MF mode on emulation is either default or NPAR 1.0 */\n+\t\tp_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |\n+\t\t\t\t 1 << ECORE_MF_LLH_PROTO_CLSS |\n+\t\t\t\t 1 << ECORE_MF_LL2_NON_UNICAST;\n+\t\tif (p_hwfn->num_funcs_on_port > 1)\n+\t\t\tp_dev->mf_bits |= 1 << ECORE_MF_INTER_PF_SWITCH |\n+\t\t\t\t\t  1 << ECORE_MF_DISABLE_ARFS;\n+\t\telse\n+\t\t\tp_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;\n+\t}\n+\n+\treturn ECORE_SUCCESS;\n+}\n+#endif\n+\n static enum _ecore_status_t\n ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,\n \t\t      struct ecore_ptt *p_ptt,\n@@ -4775,6 +4877,11 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,\n \tstruct ecore_mcp_link_params *link;\n \tenum _ecore_status_t rc;\n \n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_SLOW(p_hwfn->p_dev))\n+\t\treturn ecore_emul_hw_get_nvm_info(p_hwfn);\n+#endif\n+\n \t/* Read global nvm_cfg address */\n \tnvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);\n \n@@ -5122,49 +5229,17 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,\n \t\t   p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);\n }\n \n-static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,\n-\t\t\t\t      struct ecore_ptt *p_ptt)\n-{\n-\tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n-\tu32 port_mode;\n-\n #ifndef ASIC_ONLY\n-\t/* Read the port mode */\n-\tif (CHIP_REV_IS_FPGA(p_dev))\n-\t\tport_mode = 4;\n-\telse if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))\n-\t\t/* In CMT on emulation, assume 1 port */\n-\t\tport_mode = 1;\n-\telse\n-#endif\n-\tport_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);\n-\n-\tif (port_mode < 3) {\n-\t\tp_dev->num_ports_in_engine = 1;\n-\t} else if (port_mode <= 5) {\n-\t\tp_dev->num_ports_in_engine = 2;\n-\t} else {\n-\t\tDP_NOTICE(p_hwfn, true, \"PORT MODE: %d not supported\\n\",\n-\t\t\t  p_dev->num_ports_in_engine);\n-\n-\t\t/* Default num_ports_in_engine to something */\n-\t\tp_dev->num_ports_in_engine = 1;\n-\t}\n-}\n-\n-static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,\n+static void ecore_emul_hw_info_port_num(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t struct ecore_ptt *p_ptt)\n {\n \tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n-\tu32 port;\n-\tint i;\n+\tu32 eco_reserved;\n \n-\tp_dev->num_ports_in_engine = 0;\n+\t/* MISCS_REG_ECO_RESERVED[15:12]: num of ports in an engine */\n+\teco_reserved = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);\n \n-#ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_dev)) {\n-\t\tport = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);\n-\t\tswitch ((port & 0xf000) >> 12) {\n+\tswitch ((eco_reserved & 0xf000) >> 12) {\n \t\tcase 1:\n \t\t\tp_dev->num_ports_in_engine = 1;\n \t\t\tbreak;\n@@ -5176,49 +5251,43 @@ static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,\n \t\t\tbreak;\n \t\tdefault:\n \t\t\tDP_NOTICE(p_hwfn, false,\n-\t\t\t\t  \"Unknown port mode in ECO_RESERVED %08x\\n\",\n-\t\t\t\t  port);\n-\t\t}\n-\t} else\n-#endif\n-\t\tfor (i = 0; i < MAX_NUM_PORTS_K2; i++) {\n-\t\t\tport = ecore_rd(p_hwfn, p_ptt,\n-\t\t\t\t\tCNIG_REG_NIG_PORT0_CONF_K2 +\n-\t\t\t\t\t(i * 4));\n-\t\t\tif (port & 1)\n-\t\t\t\tp_dev->num_ports_in_engine++;\n+\t\t\t  \"Emulation: Unknown port mode [ECO_RESERVED 0x%08x]\\n\",\n+\t\t\t  eco_reserved);\n+\t\tp_dev->num_ports_in_engine = 2; /* Default to something */\n+\t\tbreak;\n \t\t}\n \n-\tif (!p_dev->num_ports_in_engine) {\n-\t\tDP_NOTICE(p_hwfn, true, \"All NIG ports are inactive\\n\");\n-\n-\t\t/* Default num_ports_in_engine to something */\n-\t\tp_dev->num_ports_in_engine = 1;\n-\t}\n+\tp_dev->num_ports = p_dev->num_ports_in_engine *\n+\t\t\t   ecore_device_num_engines(p_dev);\n }\n+#endif\n \n+/* Determine the number of ports of the device and per engine */\n static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,\n \t\t\t\t   struct ecore_ptt *p_ptt)\n {\n \tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n+\tu32 addr, global_offsize, global_addr;\n \n-\t/* Determine the number of ports per engine */\n-\tif (ECORE_IS_BB(p_dev))\n-\t\tecore_hw_info_port_num_bb(p_hwfn, p_ptt);\n-\telse\n-\t\tecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);\n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_TEDIBEAR(p_dev)) {\n+\t\tp_dev->num_ports_in_engine = 1;\n+\t\tp_dev->num_ports = 2;\n+\t\treturn;\n+\t}\n+\n+\tif (CHIP_REV_IS_EMUL(p_dev)) {\n+\t\tecore_emul_hw_info_port_num(p_hwfn, p_ptt);\n+\t\treturn;\n+\t}\n+#endif\n \n-\t/* Get the total number of ports of the device */\n-\tif (ECORE_IS_CMT(p_dev)) {\n \t\t/* In CMT there is always only one port */\n+\tif (ECORE_IS_CMT(p_dev)) {\n+\t\tp_dev->num_ports_in_engine = 1;\n \t\tp_dev->num_ports = 1;\n-#ifndef ASIC_ONLY\n-\t} else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {\n-\t\tp_dev->num_ports = p_dev->num_ports_in_engine *\n-\t\t\t\t   ecore_device_num_engines(p_dev);\n-#endif\n-\t} else {\n-\t\tu32 addr, global_offsize, global_addr;\n+\t\treturn;\n+\t}\n \n \t\taddr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,\n \t\t\t\t\t    PUBLIC_GLOBAL);\n@@ -5226,7 +5295,9 @@ static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,\n \t\tglobal_addr = SECTION_ADDR(global_offsize, 0);\n \t\taddr = global_addr + OFFSETOF(struct public_global, max_ports);\n \t\tp_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);\n-\t}\n+\n+\tp_dev->num_ports_in_engine = p_dev->num_ports >>\n+\t\t\t\t     (ecore_device_num_engines(p_dev) - 1);\n }\n \n static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,\n@@ -5280,15 +5351,9 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \n \tecore_mcp_get_capabilities(p_hwfn, p_ptt);\n \n-#ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {\n-#endif\n \trc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);\n \tif (rc != ECORE_SUCCESS)\n \t\treturn rc;\n-#ifndef ASIC_ONLY\n-\t}\n-#endif\n \n \trc = ecore_int_igu_read_cam(p_hwfn, p_ptt);\n \tif (rc != ECORE_SUCCESS) {\n@@ -5332,16 +5397,15 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \t\tprotocol = p_hwfn->mcp_info->func_info.protocol;\n \t\tp_hwfn->hw_info.personality = protocol;\n \t}\n-\n #ifndef ASIC_ONLY\n-\t/* To overcome ILT lack for emulation, until at least until we'll have\n-\t * a definite answer from system about it, allow only PF0 to be RoCE.\n+\telse if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {\n+\t\t/* AH emulation:\n+\t\t * Allow only PF0 to be RoCE to overcome a lack of ILT lines.\n \t */\n-\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {\n-\t\tif (!p_hwfn->rel_pf_id)\n-\t\t\tp_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;\n-\t\telse\n+\t\tif (ECORE_IS_AH(p_hwfn->p_dev) && p_hwfn->rel_pf_id)\n \t\t\tp_hwfn->hw_info.personality = ECORE_PCI_ETH;\n+\t\telse\n+\t\t\tp_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;\n \t}\n #endif\n \n@@ -5379,6 +5443,18 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \treturn rc;\n }\n \n+#define ECORE_MAX_DEVICE_NAME_LEN (8)\n+\n+void ecore_get_dev_name(struct ecore_dev *p_dev, u8 *name, u8 max_chars)\n+{\n+\tu8 n;\n+\n+\tn = OSAL_MIN_T(u8, max_chars, ECORE_MAX_DEVICE_NAME_LEN);\n+\tOSAL_SNPRINTF((char *)name, n, \"%s %c%d\",\n+\t\t      ECORE_IS_BB(p_dev) ? \"BB\" : \"AH\",\n+\t\t      'A' + p_dev->chip_rev, (int)p_dev->chip_metal);\n+}\n+\n static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t       struct ecore_ptt *p_ptt)\n {\n@@ -5423,9 +5499,9 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,\n \t}\n \n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_dev)) {\n+\tif (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_BB(p_dev)) {\n \t\t/* For some reason we have problems with this register\n-\t\t * in B0 emulation; Simply assume no CMT\n+\t\t * in BB B0 emulation; Simply assume no CMT\n \t\t */\n \t\tDP_NOTICE(p_dev->hwfns, false,\n \t\t\t  \"device on emul - assume no CMT\\n\");\n@@ -5456,14 +5532,17 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,\n \n \tif (CHIP_REV_IS_EMUL(p_dev)) {\n \t\ttmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);\n-\t\tif (tmp & (1 << 29)) {\n-\t\t\tDP_NOTICE(p_hwfn, false,\n-\t\t\t\t  \"Emulation: Running on a FULL build\\n\");\n-\t\t\tp_dev->b_is_emul_full = true;\n-\t\t} else {\n+\n+\t\t/* MISCS_REG_ECO_RESERVED[29]: full/reduced emulation build */\n+\t\tp_dev->b_is_emul_full = !!(tmp & (1 << 29));\n+\n+\t\t/* MISCS_REG_ECO_RESERVED[28]: emulation build w/ or w/o MAC */\n+\t\tp_dev->b_is_emul_mac = !!(tmp & (1 << 28));\n+\n \t\t\tDP_NOTICE(p_hwfn, false,\n-\t\t\t\t  \"Emulation: Running on a REDUCED build\\n\");\n-\t\t}\n+\t\t\t  \"Emulation: Running on a %s build %s MAC\\n\",\n+\t\t\t  p_dev->b_is_emul_full ? \"full\" : \"reduced\",\n+\t\t\t  p_dev->b_is_emul_mac ? \"with\" : \"without\");\n \t}\n #endif\n \n@@ -5533,7 +5612,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,\n \tp_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);\n \n \t/* First hwfn learns basic information, e.g., number of hwfns */\n-\tif (!p_hwfn->my_id) {\n+\tif (IS_LEAD_HWFN(p_hwfn)) {\n \t\trc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);\n \t\tif (rc != ECORE_SUCCESS) {\n \t\t\tif (p_params->b_relaxed_probe)\n@@ -5543,6 +5622,33 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,\n \t\t}\n \t}\n \n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && !b_ptt_gtt_init) {\n+\t\tstruct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;\n+\t\tu32 val;\n+\n+\t\t/* Initialize PTT/GTT (done by MFW on ASIC) */\n+\t\tecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_START_INIT_PTT_GTT, 1);\n+\t\tOSAL_MSLEEP(10);\n+\t\tecore_ptt_invalidate(p_hwfn);\n+\t\tval = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_INIT_DONE_PTT_GTT);\n+\t\tif (val != 1) {\n+\t\t\tDP_ERR(p_hwfn,\n+\t\t\t       \"PTT and GTT init in PGLUE_B didn't complete\\n\");\n+\t\t\tgoto err1;\n+\t\t}\n+\n+\t\t/* Clear a possible PGLUE_B parity from a previous GRC access */\n+\t\tecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_PRTY_STS_WR_H_0, 0x380);\n+\n+\t\tb_ptt_gtt_init = true;\n+\t}\n+#endif\n+\n+\t/* Store the precompiled init data ptrs */\n+\tif (IS_LEAD_HWFN(p_hwfn))\n+\t\tecore_init_iro_array(p_hwfn->p_dev);\n+\n \tecore_hw_hwfn_prepare(p_hwfn);\n \n \t/* Initialize MCP structure */\n@@ -5581,9 +5687,6 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,\n \n \t/* Check if mdump logs/data are present and update the epoch value */\n \tif (IS_LEAD_HWFN(p_hwfn)) {\n-#ifndef ASIC_ONLY\n-\t\tif (!CHIP_REV_IS_EMUL(p_dev)) {\n-#endif\n \t\trc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,\n \t\t\t\t\t      &mdump_info);\n \t\tif (rc == ECORE_SUCCESS && mdump_info.num_of_logs)\n@@ -5600,9 +5703,6 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,\n \n \t\tecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,\n \t\t\t\t\t   p_params->epoch);\n-#ifndef ASIC_ONLY\n-\t\t}\n-#endif\n \t}\n \n \t/* Allocate the init RT array and initialize the init-ops engine */\n@@ -5615,10 +5715,12 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,\n \t}\n #ifndef ASIC_ONLY\n \tif (CHIP_REV_IS_FPGA(p_dev)) {\n-\t\tDP_NOTICE(p_hwfn, false,\n-\t\t\t  \"FPGA: workaround; Prevent DMAE parities\\n\");\n-\t\tecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2,\n-\t\t\t 7);\n+\t\tif (ECORE_IS_AH(p_dev)) {\n+\t\t\tDP_NOTICE(p_hwfn, false,\n+\t\t\t\t  \"FPGA: workaround; Prevent DMAE parities\\n\");\n+\t\t\tecore_wr(p_hwfn, p_hwfn->p_main_ptt,\n+\t\t\t\t PCIE_REG_PRTY_MASK_K2, 7);\n+\t\t}\n \n \t\tDP_NOTICE(p_hwfn, false,\n \t\t\t  \"FPGA: workaround: Set VF bar0 size\\n\");\n@@ -5652,10 +5754,6 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,\n \tif (p_params->b_relaxed_probe)\n \t\tp_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;\n \n-\t/* Store the precompiled init data ptrs */\n-\tif (IS_PF(p_dev))\n-\t\tecore_init_iro_array(p_dev);\n-\n \t/* Initialize the first hwfn - will learn number of hwfns */\n \trc = ecore_hw_prepare_single(p_hwfn, p_dev->regview,\n \t\t\t\t     p_dev->doorbells, p_dev->db_phys_addr,\n@@ -5665,7 +5763,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,\n \n \tp_params->personality = p_hwfn->hw_info.personality;\n \n-\t/* initilalize 2nd hwfn if necessary */\n+\t/* Initialize 2nd hwfn if necessary */\n \tif (ECORE_IS_CMT(p_dev)) {\n \t\tvoid OSAL_IOMEM *p_regview, *p_doorbell;\n \t\tu8 OSAL_IOMEM *addr;\n@@ -6382,7 +6480,7 @@ static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,\n \tstruct ecore_mcp_link_state *p_link;\n \tint rc = ECORE_SUCCESS;\n \n-\tp_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;\n+\tp_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;\n \n \tif (!p_link->min_pf_rate) {\n \t\tp_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;\ndiff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c\nindex a0a6e3aba..d746aaed1 100644\n--- a/drivers/net/qede/base/ecore_init_fw_funcs.c\n+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c\n@@ -9,31 +9,29 @@\n #include \"ecore_init_ops.h\"\n #include \"reg_addr.h\"\n #include \"ecore_rt_defs.h\"\n-#include \"ecore_hsi_common.h\"\n #include \"ecore_hsi_init_func.h\"\n-#include \"ecore_hsi_eth.h\"\n #include \"ecore_hsi_init_tool.h\"\n #include \"ecore_iro.h\"\n #include \"ecore_init_fw_funcs.h\"\n-\n-#define CDU_VALIDATION_DEFAULT_CFG 61\n-\n static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {\n-\t{ 400,  336,  352,  304,  304,  384,  416,  352}, /* region 3 offsets */\n-\t{ 528,  496,  416,  448,  448,  512,  544,  480}, /* region 4 offsets */\n-\t{ 608,  544,  496,  512,  576,  592,  624,  560}  /* region 5 offsets */\n+\t{ 400,  336,  352,  368,  304,  384,  416,  352}, /* region 3 offsets */\n+\t{ 528,  496,  416,  512,  448,  512,  544,  480}, /* region 4 offsets */\n+\t{ 608,  544,  496,  576,  576,  592,  624,  560}  /* region 5 offsets */\n };\n static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {\n \t{ 240,  240,  112,    0,    0,    0,    0,   96}  /* region 1 offsets */\n };\n \n /* General constants */\n-#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \\\n-\t\t\t\tQM_PQ_ELEMENT_SIZE, 0x1000) : 0)\n-#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \\\n-\t\t\t\t  0)\n+#define QM_PQ_MEM_4KB(pq_size) \\\n+\t(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)\n+#define QM_PQ_SIZE_256B(pq_size) \\\n+\t(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)\n #define QM_INVALID_PQ_ID\t\t0xffff\n \n+/* Max link speed (in Mbps) */\n+#define QM_MAX_LINK_SPEED\t\t100000\n+\n /* Feature enable */\n #define QM_BYPASS_EN\t\t\t1\n #define QM_BYTE_CRD_EN\t\t\t1\n@@ -42,7 +40,8 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {\n #define QM_OTHER_PQS_PER_PF\t\t4\n \n /* VOQ constants */\n-#define QM_E5_NUM_EXT_VOQ\t\t(MAX_NUM_PORTS_E5 * NUM_OF_TCS)\n+#define MAX_NUM_VOQS\t\t\t(MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)\n+#define VOQS_BIT_MASK\t\t\t((1 << MAX_NUM_VOQS) - 1)\n \n /* WFQ constants: */\n \n@@ -53,8 +52,7 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {\n #define QM_WFQ_VP_PQ_VOQ_SHIFT\t\t0\n \n /* Bit  of PF in WFQ VP PQ map */\n-#define QM_WFQ_VP_PQ_PF_E4_SHIFT\t5\n-#define QM_WFQ_VP_PQ_PF_E5_SHIFT\t6\n+#define QM_WFQ_VP_PQ_PF_SHIFT\t\t5\n \n /* 0x9000 = 4*9*1024 */\n #define QM_WFQ_INC_VAL(weight)\t\t((weight) * 0x9000)\n@@ -62,9 +60,6 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {\n /* Max WFQ increment value is 0.7 * upper bound */\n #define QM_WFQ_MAX_INC_VAL\t\t((QM_WFQ_UPPER_BOUND * 7) / 10)\n \n-/* Number of VOQs in E5 QmWfqCrd register */\n-#define QM_WFQ_CRD_E5_NUM_VOQS\t\t16\n-\n /* RL constants: */\n \n /* Period in us */\n@@ -110,8 +105,6 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {\n /* Pure LB CmdQ lines (+spare) */\n #define PBF_CMDQ_PURE_LB_LINES\t\t150\n \n-#define PBF_CMDQ_LINES_E5_RSVD_RATIO\t8\n-\n #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \\\n \t(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \\\n \t ext_voq * \\\n@@ -175,42 +168,25 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {\n \t} while (0)\n \n #define WRITE_PQ_INFO_TO_RAM\t\t1\n-#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl)\t\\\n-\t(((vp) << 0) | ((pf) << 12) | ((tc) << 16) |    \\\n-\t ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))\n-#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \\\n-\t(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)\n \n-/******************** INTERNAL IMPLEMENTATION *********************/\n+#define PQ_INFO_ELEMENT(vp_pq_id, pf, tc, port, rl_valid, rl_id) \\\n+\t(((vp_pq_id) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \\\n+\t ((rl_valid ? 1 : 0) << 22) | (((rl_id) & 255) << 24) | \\\n+\t (((rl_id) >> 8) << 9))\n \n-/* Returns the external VOQ number */\n-static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,\n-\t\t\t    u8 port_id,\n-\t\t\t    u8 tc,\n-\t\t\t    u8 max_phys_tcs_per_port)\n-{\n-\tif (tc == PURE_LB_TC)\n-\t\treturn NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;\n-\telse\n-\t\treturn port_id * (max_phys_tcs_per_port) + tc;\n-}\n+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) (XSEM_REG_FAST_MEMORY + \\\n+\tSEM_FAST_REG_INT_RAM + XSTORM_PQ_INFO_OFFSET(pq_id))\n+\n+/******************** INTERNAL IMPLEMENTATION *********************/\n \n /* Prepare PF RL enable/disable runtime init values */\n static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)\n {\n \tSTORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);\n \tif (pf_rl_en) {\n-\t\tu8 num_ext_voqs = MAX_NUM_VOQS_E4;\n-\t\tu64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;\n-\n \t\t/* Enable RLs for all VOQs */\n \t\tSTORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,\n-\t\t\t     (u32)voq_bit_mask);\n-#ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET\n-\t\tif (num_ext_voqs >= 32)\n-\t\t\tSTORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,\n-\t\t\t\t     (u32)(voq_bit_mask >> 32));\n-#endif\n+\t\t\t     VOQS_BIT_MASK);\n \n \t\t/* Write RL period */\n \t\tSTORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,\n@@ -236,12 +212,13 @@ static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)\n \t\t\t     QM_WFQ_UPPER_BOUND);\n }\n \n-/* Prepare VPORT RL enable/disable runtime init values */\n-static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)\n+/* Prepare global RL enable/disable runtime init values */\n+static void ecore_enable_global_rl(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t   bool global_rl_en)\n {\n \tSTORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,\n-\t\t     vport_rl_en ? 1 : 0);\n-\tif (vport_rl_en) {\n+\t\t     global_rl_en ? 1 : 0);\n+\tif (global_rl_en) {\n \t\t/* Write RL period (use timer 0 only) */\n \t\tSTORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,\n \t\t\t     QM_RL_PERIOD_CLK_25M);\n@@ -272,19 +249,16 @@ static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)\n  * the specified VOQ\n  */\n static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,\n-\t\t\t\t\t u8 ext_voq,\n+\t\t\t\t\t u8 voq,\n \t\t\t\t\t u16 cmdq_lines)\n {\n-\tu32 qm_line_crd;\n+\tu32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);\n \n-\tqm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);\n-\n-\tOVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),\n+\tOVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),\n \t\t\t (u32)cmdq_lines);\n-\tSTORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,\n-\t\t\t qm_line_crd);\n-\tSTORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,\n-\t\t\t qm_line_crd);\n+\tSTORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);\n+\tSTORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,\n+\t\t     qm_line_crd);\n }\n \n /* Prepare runtime init values to allocate PBF command queue lines. */\n@@ -294,12 +268,11 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\t\t     struct init_qm_port_params\n \t\t\t\t     port_params[MAX_NUM_PORTS])\n {\n-\tu8 tc, ext_voq, port_id, num_tcs_in_port;\n-\tu8 num_ext_voqs = MAX_NUM_VOQS_E4;\n+\tu8 tc, voq, port_id, num_tcs_in_port;\n \n \t/* Clear PBF lines of all VOQs */\n-\tfor (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)\n-\t\tSTORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);\n+\tfor (voq = 0; voq < MAX_NUM_VOQS; voq++)\n+\t\tSTORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);\n \n \tfor (port_id = 0; port_id < max_ports_per_engine; port_id++) {\n \t\tu16 phys_lines, phys_lines_per_tc;\n@@ -308,8 +281,7 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\tcontinue;\n \n \t\t/* Find number of command queue lines to divide between the\n-\t\t * active physical TCs. In E5, 1/8 of the lines are reserved.\n-\t\t * the lines for pure LB TC are subtracted.\n+\t\t * active physical TCs.\n \t\t */\n \t\tphys_lines = port_params[port_id].num_pbf_cmd_lines;\n \t\tphys_lines -= PBF_CMDQ_PURE_LB_LINES;\n@@ -324,18 +296,16 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,\n \n \t\t/* Init registers per active TC */\n \t\tfor (tc = 0; tc < max_phys_tcs_per_port; tc++) {\n-\t\t\text_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,\n-\t\t\t\t\t\t    max_phys_tcs_per_port);\n-\t\t\tif (((port_params[port_id].active_phys_tcs >> tc) &\n-\t\t\t    0x1) == 1)\n-\t\t\t\tecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,\n+\t\t\tvoq = VOQ(port_id, tc, max_phys_tcs_per_port);\n+\t\t\tif (((port_params[port_id].active_phys_tcs >>\n+\t\t\t      tc) & 0x1) == 1)\n+\t\t\t\tecore_cmdq_lines_voq_rt_init(p_hwfn, voq,\n \t\t\t\t\t\t\t     phys_lines_per_tc);\n \t\t}\n \n \t\t/* Init registers for pure LB TC */\n-\t\text_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,\n-\t\t\t\t\t    max_phys_tcs_per_port);\n-\t\tecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,\n+\t\tvoq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port);\n+\t\tecore_cmdq_lines_voq_rt_init(p_hwfn, voq,\n \t\t\t\t\t     PBF_CMDQ_PURE_LB_LINES);\n \t}\n }\n@@ -367,7 +337,7 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\t\t     port_params[MAX_NUM_PORTS])\n {\n \tu32 usable_blocks, pure_lb_blocks, phys_blocks;\n-\tu8 tc, ext_voq, port_id, num_tcs_in_port;\n+\tu8 tc, voq, port_id, num_tcs_in_port;\n \n \tfor (port_id = 0; port_id < max_ports_per_engine; port_id++) {\n \t\tif (!port_params[port_id].active)\n@@ -399,24 +369,58 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,\n \t\tfor (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {\n \t\t\tif (((port_params[port_id].active_phys_tcs >> tc) &\n \t\t\t     0x1) == 1) {\n-\t\t\t\text_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,\n-\t\t\t\t\t\t\t max_phys_tcs_per_port);\n+\t\t\t\tvoq = VOQ(port_id, tc, max_phys_tcs_per_port);\n \t\t\t\tSTORE_RT_REG(p_hwfn,\n-\t\t\t\t\tPBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),\n+\t\t\t\t\tPBF_BTB_GUARANTEED_RT_OFFSET(voq),\n \t\t\t\t\tphys_blocks);\n \t\t\t}\n \t\t}\n \n \t\t/* Init pure LB TC */\n-\t\text_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,\n-\t\t\t\t\t    max_phys_tcs_per_port);\n-\t\tSTORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),\n+\t\tvoq = VOQ(port_id, PURE_LB_TC, max_phys_tcs_per_port);\n+\t\tSTORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),\n \t\t\t     pure_lb_blocks);\n \t}\n }\n \n+/* Prepare runtime init values for the specified RL.\n+ * If global_rl_params is OSAL_NULL, max link speed (100Gbps) is used instead.\n+ * Return -1 on error.\n+ */\n+static int ecore_global_rl_rt_init(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t   struct init_qm_global_rl_params\n+\t\t\t\t     global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])\n+{\n+\tu32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |\n+\t\t\t  (u32)QM_RL_CRD_REG_SIGN_BIT;\n+\tu32 inc_val;\n+\tu16 rl_id;\n+\n+\t/* Go over all global RLs */\n+\tfor (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {\n+\t\tu32 rate_limit = global_rl_params ?\n+\t\t\t\t global_rl_params[rl_id].rate_limit : 0;\n+\n+\t\tinc_val = QM_RL_INC_VAL(rate_limit ?\n+\t\t\t\t\trate_limit : QM_MAX_LINK_SPEED);\n+\t\tif (inc_val > QM_VP_RL_MAX_INC_VAL(QM_MAX_LINK_SPEED)) {\n+\t\t\tDP_NOTICE(p_hwfn, true, \"Invalid rate limit configuration.\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tSTORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,\n+\t\t\t     (u32)QM_RL_CRD_REG_SIGN_BIT);\n+\t\tSTORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,\n+\t\t\t     upper_bound);\n+\t\tSTORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,\n+\t\t\t     inc_val);\n+\t}\n+\n+\treturn 0;\n+}\n+\n /* Prepare Tx PQ mapping runtime init values for the specified PF */\n-static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,\n+static int ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\t\t    struct ecore_ptt *p_ptt,\n \t\t\t\t    u8 pf_id,\n \t\t\t\t    u8 max_phys_tcs_per_port,\n@@ -426,7 +430,7 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\t\t    u16 start_pq,\n \t\t\t\t    u16 num_pf_pqs,\n \t\t\t\t    u16 num_vf_pqs,\n-\t\t\t\t    u8 start_vport,\n+\t\t\t\t   u16 start_vport,\n \t\t\t\t    u32 base_mem_addr_4kb,\n \t\t\t\t    struct init_qm_pq_params *pq_params,\n \t\t\t\t    struct init_qm_vport_params *vport_params)\n@@ -436,6 +440,9 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,\n \tu32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;\n \tu16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;\n \tu32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;\n+\t#if (WRITE_PQ_INFO_TO_RAM != 0)\n+\t\tu32 pq_info = 0;\n+\t#endif\n \n \tnum_pqs = num_pf_pqs + num_vf_pqs;\n \n@@ -459,24 +466,22 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,\n \n \t/* Go over all Tx PQs */\n \tfor (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {\n-\t\tu32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;\n-\t\tu8 ext_voq, vport_id_in_pf;\n-\t\tbool is_vf_pq, rl_valid;\n-\t\tu16 first_tx_pq_id;\n-\n-\t\text_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,\n-\t\t\t\t\t    pq_params[i].tc_id,\n-\t\t\t\t\t    max_phys_tcs_per_port);\n+\t\tu16 first_tx_pq_id, vport_id_in_pf;\n+\t\tstruct qm_rf_pq_map tx_pq_map;\n+\t\tbool is_vf_pq;\n+\t\tu8 voq;\n+\n+\t\tvoq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,\n+\t\t\t  max_phys_tcs_per_port);\n \t\tis_vf_pq = (i >= num_pf_pqs);\n-\t\trl_valid = pq_params[i].rl_valid > 0;\n \n \t\t/* Update first Tx PQ of VPORT/TC */\n \t\tvport_id_in_pf = pq_params[i].vport_id - start_vport;\n \t\tfirst_tx_pq_id =\n \t\tvport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];\n \t\tif (first_tx_pq_id == QM_INVALID_PQ_ID) {\n-\t\t\tu32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |\n-\t\t\t\t       (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));\n+\t\t\tu32 map_val = (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |\n+\t\t\t\t      (pf_id << QM_WFQ_VP_PQ_PF_SHIFT);\n \n \t\t\t/* Create new VP PQ */\n \t\t\tvport_params[vport_id_in_pf].\n@@ -488,20 +493,10 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\t\t     first_tx_pq_id, map_val);\n \t\t}\n \n-\t\t/* Check RL ID */\n-\t\tif (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {\n-\t\t\tDP_NOTICE(p_hwfn, true,\n-\t\t\t\t  \"Invalid VPORT ID for rate limiter config\\n\");\n-\t\t\trl_valid = false;\n-\t\t}\n-\n \t\t/* Prepare PQ map entry */\n-\t\tstruct qm_rf_pq_map tx_pq_map;\n-\n \t\tQM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, pq_id, first_tx_pq_id,\n-\t\t\t\t  rl_valid ? 1 : 0,\n-\t\t\t\t  rl_valid ? pq_params[i].vport_id : 0,\n-\t\t\t\t  ext_voq, pq_params[i].wrr_group);\n+\t\t\t\t  pq_params[i].rl_valid, pq_params[i].rl_id,\n+\t\t\t\t  voq, pq_params[i].wrr_group);\n \n \t\t/* Set PQ base address */\n \t\tSTORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,\n@@ -514,17 +509,15 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t     (pq_id * 2) + j, 0);\n \n \t\t/* Write PQ info to RAM */\n-\t\tif (WRITE_PQ_INFO_TO_RAM != 0) {\n-\t\t\tu32 pq_info = 0;\n-\n-\t\t\tpq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,\n-\t\t\t\t\t\t  pq_params[i].tc_id,\n-\t\t\t\t\t\t  pq_params[i].port_id,\n-\t\t\t\t\t\t  rl_valid ? 1 : 0, rl_valid ?\n-\t\t\t\t\t\t  pq_params[i].vport_id : 0);\n-\t\t\tecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),\n-\t\t\t\t pq_info);\n-\t\t}\n+#if (WRITE_PQ_INFO_TO_RAM != 0)\n+\t\tpq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,\n+\t\t\t\t\t  pq_params[i].tc_id,\n+\t\t\t\t\t  pq_params[i].port_id,\n+\t\t\t\t\t  pq_params[i].rl_valid,\n+\t\t\t\t\t  pq_params[i].rl_id);\n+\t\tecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),\n+\t\t\t pq_info);\n+#endif\n \n \t\t/* If VF PQ, add indication to PQ VF mask */\n \t\tif (is_vf_pq) {\n@@ -541,6 +534,8 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,\n \t\tif (tx_pq_vf_mask[i])\n \t\t\tSTORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +\n \t\t\t\t     i, tx_pq_vf_mask[i]);\n+\n+\treturn 0;\n }\n \n /* Prepare Other PQ mapping runtime init values for the specified PF */\n@@ -598,7 +593,7 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\t\tstruct init_qm_pq_params *pq_params)\n {\n \tu32 inc_val, crd_reg_offset;\n-\tu8 ext_voq;\n+\tu8 voq;\n \tu16 i;\n \n \tinc_val = QM_WFQ_INC_VAL(pf_wfq);\n@@ -609,13 +604,12 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,\n \t}\n \n \tfor (i = 0; i < num_tx_pqs; i++) {\n-\t\text_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,\n-\t\t\t\t\t    pq_params[i].tc_id,\n-\t\t\t\t\t    max_phys_tcs_per_port);\n+\t\tvoq = VOQ(pq_params[i].port_id, pq_params[i].tc_id,\n+\t\t\t  max_phys_tcs_per_port);\n \t\tcrd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?\n \t\t\t\t  QM_REG_WFQPFCRD_RT_OFFSET :\n \t\t\t\t  QM_REG_WFQPFCRD_MSB_RT_OFFSET) +\n-\t\t\t\t ext_voq * MAX_NUM_PFS_BB +\n+\t\t\t\t voq * MAX_NUM_PFS_BB +\n \t\t\t\t (pf_id % MAX_NUM_PFS_BB);\n \t\tOVERWRITE_RT_REG(p_hwfn, crd_reg_offset,\n \t\t\t\t (u32)QM_WFQ_CRD_REG_SIGN_BIT);\n@@ -655,19 +649,19 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)\n  * Return -1 on error.\n  */\n static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,\n-\t\t\t\tu8 num_vports,\n+\t\t\t\tu16 num_vports,\n \t\t\t\tstruct init_qm_vport_params *vport_params)\n {\n-\tu16 vport_pq_id;\n+\tu16 vp_pq_id, vport_id;\n \tu32 inc_val;\n-\tu8 tc, i;\n+\tu8 tc;\n \n \t/* Go over all PF VPORTs */\n-\tfor (i = 0; i < num_vports; i++) {\n-\t\tif (!vport_params[i].wfq)\n+\tfor (vport_id = 0; vport_id < num_vports; vport_id++) {\n+\t\tif (!vport_params[vport_id].wfq)\n \t\t\tcontinue;\n \n-\t\tinc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);\n+\t\tinc_val = QM_WFQ_INC_VAL(vport_params[vport_id].wfq);\n \t\tif (inc_val > QM_WFQ_MAX_INC_VAL) {\n \t\t\tDP_NOTICE(p_hwfn, true,\n \t\t\t\t  \"Invalid VPORT WFQ weight configuration\\n\");\n@@ -676,56 +670,16 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,\n \n \t\t/* Each VPORT can have several VPORT PQ IDs for various TCs */\n \t\tfor (tc = 0; tc < NUM_OF_TCS; tc++) {\n-\t\t\tvport_pq_id = vport_params[i].first_tx_pq_id[tc];\n-\t\t\tif (vport_pq_id != QM_INVALID_PQ_ID) {\n-\t\t\t\tSTORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +\n-\t\t\t\t\t     vport_pq_id,\n-\t\t\t\t\t     (u32)QM_WFQ_CRD_REG_SIGN_BIT);\n-\t\t\t\tSTORE_RT_REG(p_hwfn,\n-\t\t\t\t\t     QM_REG_WFQVPWEIGHT_RT_OFFSET +\n-\t\t\t\t\t     vport_pq_id, inc_val);\n-\t\t\t}\n+\t\t\tvp_pq_id = vport_params[vport_id].first_tx_pq_id[tc];\n+\t\t\tif (vp_pq_id == QM_INVALID_PQ_ID)\n+\t\t\t\tcontinue;\n+\n+\t\t\tSTORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +\n+\t\t\t\t     vp_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT);\n+\t\t\tSTORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +\n+\t\t\t\t     vp_pq_id, inc_val);\n \t\t}\n \t}\n-\treturn 0;\n-}\n-\n-/* Prepare VPORT RL runtime init values for the specified VPORTs.\n- * Return -1 on error.\n- */\n-static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,\n-\t\t\t\t  u8 start_vport,\n-\t\t\t\t  u8 num_vports,\n-\t\t\t\t  u32 link_speed,\n-\t\t\t\t  struct init_qm_vport_params *vport_params)\n-{\n-\tu8 i, vport_id;\n-\tu32 inc_val;\n-\n-\tif (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {\n-\t\tDP_NOTICE(p_hwfn, true,\n-\t\t\t  \"Invalid VPORT ID for rate limiter configuration\\n\");\n-\t\treturn -1;\n-\t}\n-\n-\t/* Go over all PF VPORTs */\n-\tfor (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {\n-\t\tinc_val = QM_RL_INC_VAL(link_speed);\n-\t\tif (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {\n-\t\t\tDP_NOTICE(p_hwfn, true,\n-\t\t\t\t  \"Invalid VPORT rate-limit configuration\\n\");\n-\t\t\treturn -1;\n-\t\t}\n-\n-\t\tSTORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,\n-\t\t\t     (u32)QM_RL_CRD_REG_SIGN_BIT);\n-\t\tSTORE_RT_REG(p_hwfn,\n-\t\t\t     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,\n-\t\t\t     QM_VP_RL_UPPER_BOUND(link_speed) |\n-\t\t\t     (u32)QM_RL_CRD_REG_SIGN_BIT);\n-\t\tSTORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,\n-\t\t\t     inc_val);\n-\t}\n \n \treturn 0;\n }\n@@ -769,10 +723,10 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,\n \treturn ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);\n }\n \n-\n /******************** INTERFACE IMPLEMENTATION *********************/\n \n-u32 ecore_qm_pf_mem_size(u32 num_pf_cids,\n+u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn,\n+\t\t\t u32 num_pf_cids,\n \t\t\t\t\t\t u32 num_vf_cids,\n \t\t\t\t\t\t u32 num_tids,\n \t\t\t\t\t\t u16 num_pf_pqs,\n@@ -788,25 +742,26 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\t    u8 max_phys_tcs_per_port,\n \t\t\t    bool pf_rl_en,\n \t\t\t    bool pf_wfq_en,\n-\t\t\t    bool vport_rl_en,\n+\t\t\t    bool global_rl_en,\n \t\t\t    bool vport_wfq_en,\n \t\t\t    struct init_qm_port_params\n-\t\t\t    port_params[MAX_NUM_PORTS])\n+\t\t\t\t   port_params[MAX_NUM_PORTS],\n+\t\t\t    struct init_qm_global_rl_params\n+\t\t\t\t   global_rl_params[COMMON_MAX_QM_GLOBAL_RLS])\n {\n-\tu32 mask;\n+\tu32 mask = 0;\n \n \t/* Init AFullOprtnstcCrdMask */\n-\tmask = (QM_OPPOR_LINE_VOQ_DEF <<\n-\t\tQM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |\n-\t\t(QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |\n-\t\t(pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |\n-\t\t(vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |\n-\t\t(pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |\n-\t\t(vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |\n-\t\t(QM_OPPOR_FW_STOP_DEF <<\n-\t\t QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |\n-\t\t(QM_OPPOR_PQ_EMPTY_DEF <<\n-\t\t QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);\n+\tSET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,\n+\t\t  QM_OPPOR_LINE_VOQ_DEF);\n+\tSET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);\n+\tSET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, pf_wfq_en);\n+\tSET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, vport_wfq_en);\n+\tSET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, pf_rl_en);\n+\tSET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, global_rl_en);\n+\tSET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);\n+\tSET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY,\n+\t\t  QM_OPPOR_PQ_EMPTY_DEF);\n \tSTORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);\n \n \t/* Enable/disable PF RL */\n@@ -815,8 +770,8 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,\n \t/* Enable/disable PF WFQ */\n \tecore_enable_pf_wfq(p_hwfn, pf_wfq_en);\n \n-\t/* Enable/disable VPORT RL */\n-\tecore_enable_vport_rl(p_hwfn, vport_rl_en);\n+\t/* Enable/disable global RL */\n+\tecore_enable_global_rl(p_hwfn, global_rl_en);\n \n \t/* Enable/disable VPORT WFQ */\n \tecore_enable_vport_wfq(p_hwfn, vport_wfq_en);\n@@ -829,6 +784,8 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,\n \tecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,\n \t\t\t\t max_phys_tcs_per_port, port_params);\n \n+\tecore_global_rl_rt_init(p_hwfn, global_rl_params);\n+\n \treturn 0;\n }\n \n@@ -843,24 +800,25 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\tu16 start_pq,\n \t\t\tu16 num_pf_pqs,\n \t\t\tu16 num_vf_pqs,\n-\t\t\tu8 start_vport,\n-\t\t\tu8 num_vports,\n+\t\t\tu16 start_vport,\n+\t\t\tu16 num_vports,\n \t\t\tu16 pf_wfq,\n \t\t\tu32 pf_rl,\n-\t\t\tu32 link_speed,\n \t\t\tstruct init_qm_pq_params *pq_params,\n \t\t\tstruct init_qm_vport_params *vport_params)\n {\n \tu32 other_mem_size_4kb;\n-\tu8 tc, i;\n+\tu16 vport_id;\n+\tu8 tc;\n \n \tother_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *\n \t\t\t     QM_OTHER_PQS_PER_PF;\n \n \t/* Clear first Tx PQ ID array for each VPORT */\n-\tfor (i = 0; i < num_vports; i++)\n+\tfor (vport_id = 0; vport_id < num_vports; vport_id++)\n \t\tfor (tc = 0; tc < NUM_OF_TCS; tc++)\n-\t\t\tvport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;\n+\t\t\tvport_params[vport_id].first_tx_pq_id[tc] =\n+\t\t\t\tQM_INVALID_PQ_ID;\n \n \t/* Map Other PQs (if any) */\n #if QM_OTHER_PQS_PER_PF > 0\n@@ -869,10 +827,12 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,\n #endif\n \n \t/* Map Tx PQs */\n-\tecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,\n-\t\t\t\tis_pf_loading, num_pf_cids, num_vf_cids,\n-\t\t\t\tstart_pq, num_pf_pqs, num_vf_pqs, start_vport,\n-\t\t\t\tother_mem_size_4kb, pq_params, vport_params);\n+\tif (ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,\n+\t\t\t\t    is_pf_loading, num_pf_cids, num_vf_cids,\n+\t\t\t\t    start_pq, num_pf_pqs, num_vf_pqs,\n+\t\t\t\t    start_vport, other_mem_size_4kb, pq_params,\n+\t\t\t\t    vport_params))\n+\t\treturn -1;\n \n \t/* Init PF WFQ */\n \tif (pf_wfq)\n@@ -885,15 +845,10 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,\n \tif (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))\n \t\treturn -1;\n \n-\t/* Set VPORT WFQ */\n+\t/* Init VPORT WFQ */\n \tif (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))\n \t\treturn -1;\n \n-\t/* Set VPORT RL */\n-\tif (ecore_vport_rl_rt_init\n-\t    (p_hwfn, start_vport, num_vports, link_speed, vport_params))\n-\t\treturn -1;\n-\n \treturn 0;\n }\n \n@@ -935,27 +890,49 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,\n \n int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,\n \t\t\t struct ecore_ptt *p_ptt,\n-\t\t\t u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)\n+\t\t\t u16 first_tx_pq_id[NUM_OF_TCS],\n+\t\t\t u16 wfq)\n {\n-\tu16 vport_pq_id;\n+\tu16 vp_pq_id;\n \tu32 inc_val;\n \tu8 tc;\n \n-\tinc_val = QM_WFQ_INC_VAL(vport_wfq);\n+\tinc_val = QM_WFQ_INC_VAL(wfq);\n \tif (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {\n \t\tDP_NOTICE(p_hwfn, true,\n \t\t\t  \"Invalid VPORT WFQ weight configuration\\n\");\n \t\treturn -1;\n \t}\n \n+\t/* A VPORT can have several VPORT PQ IDs for various TCs */\n \tfor (tc = 0; tc < NUM_OF_TCS; tc++) {\n-\t\tvport_pq_id = first_tx_pq_id[tc];\n-\t\tif (vport_pq_id != QM_INVALID_PQ_ID) {\n+\t\tvp_pq_id = first_tx_pq_id[tc];\n+\t\tif (vp_pq_id != QM_INVALID_PQ_ID) {\n \t\t\tecore_wr(p_hwfn, p_ptt,\n-\t\t\t\t QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);\n+\t\t\t\t QM_REG_WFQVPWEIGHT + vp_pq_id * 4, inc_val);\n \t\t}\n \t}\n \n+\treturn 0;\n+\t\t}\n+\n+int ecore_init_global_rl(struct ecore_hwfn *p_hwfn,\n+\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t u16 rl_id,\n+\t\t\t u32 rate_limit)\n+{\n+\tu32 inc_val;\n+\n+\tinc_val = QM_RL_INC_VAL(rate_limit);\n+\tif (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {\n+\t\tDP_NOTICE(p_hwfn, true, \"Invalid rate limit configuration.\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + rl_id * 4,\n+\t\t (u32)QM_RL_CRD_REG_SIGN_BIT);\n+\tecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);\n+\n \treturn 0;\n }\n \n@@ -1024,6 +1001,7 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,\n \treturn true;\n }\n \n+#ifndef UNUSED_HSI_FUNC\n \n /* NIG: ETS configuration constants */\n #define NIG_TX_ETS_CLIENT_OFFSET\t4\n@@ -1247,6 +1225,9 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,\n \t}\n }\n \n+#endif /* UNUSED_HSI_FUNC */\n+\n+#ifndef UNUSED_HSI_FUNC\n \n /* PRS: ETS configuration constants */\n #define PRS_ETS_MIN_WFQ_BYTES\t\t1600\n@@ -1313,6 +1294,8 @@ void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,\n \t}\n }\n \n+#endif /* UNUSED_HSI_FUNC */\n+#ifndef UNUSED_HSI_FUNC\n \n /* BRB: RAM configuration constants */\n #define BRB_TOTAL_RAM_BLOCKS_BB\t4800\n@@ -1425,13 +1408,74 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,\n \t}\n }\n \n-/* In MF should be called once per port to set EtherType of OuterTag */\n+#endif /* UNUSED_HSI_FUNC */\n+#ifndef UNUSED_HSI_FUNC\n+\n+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size)\t\t\\\n+\tdo {\t\t\t\t\t\t\t\\\n+\t\tu32 i;\t\t\t\t\t\t\\\n+\t\tfor (i = 0; i < (arr_size); i++)\t\t\\\n+\t\t\tecore_wr(dev, ptt, ((addr) + (4 * i)),\t\\\n+\t\t\t\t ((u32 *)&(arr))[i]);\t\t\\\n+\t} while (0)\n+\n+#ifndef DWORDS_TO_BYTES\n+#define DWORDS_TO_BYTES(dwords)\t\t((dwords) * REG_SIZE)\n+#endif\n+\n+\n+/**\n+ * @brief ecore_dmae_to_grc - is an internal function - writes from host to\n+ * wide-bus registers (split registers are not supported yet)\n+ *\n+ * @param p_hwfn -       HW device data\n+ * @param p_ptt -       ptt window used for writing the registers.\n+ * @param pData - pointer to source data.\n+ * @param addr - Destination register address.\n+ * @param len_in_dwords - data length in DWARDS (u32)\n+ */\n+static int ecore_dmae_to_grc(struct ecore_hwfn *p_hwfn,\n+\t\t\t     struct ecore_ptt *p_ptt,\n+\t\t\t     u32 *pData,\n+\t\t\t     u32 addr,\n+\t\t\t     u32 len_in_dwords)\n+{\n+\tstruct dmae_params params;\n+\tbool read_using_dmae = false;\n+\n+\tif (!pData)\n+\t\treturn -1;\n+\n+\t/* Set DMAE params */\n+\tOSAL_MEMSET(&params, 0, sizeof(params));\n+\n+\tSET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 1);\n+\n+\t/* Execute DMAE command */\n+\tread_using_dmae = !ecore_dmae_host2grc(p_hwfn, p_ptt,\n+\t\t\t\t\t       (u64)(osal_uintptr_t)(pData),\n+\t\t\t\t\t       addr, len_in_dwords, &params);\n+\tif (!read_using_dmae)\n+\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,\n+\t\t\t   \"Failed writing to chip using DMAE, using GRC instead\\n\");\n+\n+\t/* If not read using DMAE, read using GRC */\n+\tif (!read_using_dmae)\n+\t\t/* write to registers using GRC */\n+\t\tARR_REG_WR(p_hwfn, p_ptt, addr, pData, len_in_dwords);\n+\n+\treturn len_in_dwords;\n+}\n+\n+/* In MF, should be called once per port to set EtherType of OuterTag */\n void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)\n {\n \t/* Update DORQ register */\n \tSTORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);\n }\n \n+#endif /* UNUSED_HSI_FUNC */\n+\n #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \\\n (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))\n #define PRS_ETH_TUNN_OUTPUT_FORMAT        -188897008\n@@ -1580,8 +1624,8 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,\n \t\t ip_geneve_enable ? 1 : 0);\n }\n \n-#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET   4\n-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT      -927094512\n+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET      3\n+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT   -925189872\n \n void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,\n \t\t\t\t  struct ecore_ptt *p_ptt,\n@@ -1599,10 +1643,9 @@ void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,\n \t\t/* set VXLAN_NO_L2_ENABLE flag */\n \t\treg_val |= cfg_mask;\n \n-\t\t/* update PRS FIC  register */\n+\t\t/* update PRS FIC Format register */\n \t\tecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,\n \t\t (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);\n-\t} else  {\n \t\t/* clear VXLAN_NO_L2_ENABLE flag */\n \t\treg_val &= ~cfg_mask;\n \t}\n@@ -1611,6 +1654,8 @@ void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,\n \tecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);\n }\n \n+#ifndef UNUSED_HSI_FUNC\n+\n #define T_ETH_PACKET_ACTION_GFT_EVENTID  23\n #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272\n #define T_ETH_PACKET_MATCH_RFS_EVENTID 25\n@@ -1623,6 +1668,9 @@ void ecore_gft_disable(struct ecore_hwfn *p_hwfn,\n \t\t       struct ecore_ptt *p_ptt,\n \t\t       u16 pf_id)\n {\n+\tstruct regpair ram_line;\n+\tOSAL_MEMSET(&ram_line, 0, sizeof(ram_line));\n+\n \t/* disable gft search for PF */\n \tecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);\n \n@@ -1632,10 +1680,10 @@ void ecore_gft_disable(struct ecore_hwfn *p_hwfn,\n \tecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);\n \n \t/* Zero ramline */\n-\tecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +\n-\t\t\t\tRAM_LINE_SIZE * pf_id, 0);\n-\tecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +\n-\t\t\t\tRAM_LINE_SIZE * pf_id + REG_SIZE, 0);\n+\tecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,\n+\t\t\t  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,\n+\t\t\t  sizeof(ram_line) / REG_SIZE);\n+\n }\n \n \n@@ -1662,7 +1710,8 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,\n \t\t\t       bool ipv6,\n \t\t\t       enum gft_profile_type profile_type)\n {\n-\tu32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;\n+\tu32 reg_val, cam_line, search_non_ip_as_gft;\n+\tstruct regpair ram_line = { 0 };\n \n \tif (!ipv6 && !ipv4)\n \t\tDP_NOTICE(p_hwfn, true, \"gft_config: must accept at least on of - ipv4 or ipv6'\\n\");\n@@ -1723,35 +1772,33 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,\n \t\t\t    PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);\n \n \t/* Write line to RAM - compare to filter 4 tuple */\n-\tram_line_lo = 0;\n-\tram_line_hi = 0;\n \n \t/* Search no IP as GFT */\n \tsearch_non_ip_as_gft = 0;\n \n \t/* Tunnel type */\n-\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);\n-\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);\n+\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);\n+\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);\n \n \tif (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {\n-\t\tSET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);\n-\t\tSET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);\n-\t\tSET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);\n-\t\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);\n-\t\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);\n-\t\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);\n+\t\tSET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);\n+\t\tSET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);\n+\t\tSET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);\n+\t\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);\n+\t\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);\n+\t\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);\n \t} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {\n-\t\tSET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);\n-\t\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);\n-\t\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);\n+\t\tSET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);\n+\t\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);\n+\t\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);\n \t} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {\n-\t\tSET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);\n-\t\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);\n+\t\tSET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);\n+\t\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);\n \t} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {\n-\t\tSET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);\n-\t\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);\n+\t\tSET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);\n+\t\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);\n \t} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {\n-\t\tSET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);\n+\t\tSET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);\n \n \t\t/* Allow tunneled traffic without inner IP */\n \t\tsearch_non_ip_as_gft = 1;\n@@ -1759,23 +1806,25 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,\n \n \tecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,\n \t\t search_non_ip_as_gft);\n-\tecore_wr(p_hwfn, p_ptt,\n-\t\t PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,\n-\t\t ram_line_lo);\n-\tecore_wr(p_hwfn, p_ptt,\n-\t\t PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +\n-\t\t REG_SIZE, ram_line_hi);\n+\tecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,\n+\t\t\t  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,\n+\t\t\t  sizeof(ram_line) / REG_SIZE);\n \n \t/* Set default profile so that no filter match will happen */\n-\tecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *\n-\t\t PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);\n-\tecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *\n-\t\t PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);\n+\tram_line.lo = 0xffffffff;\n+\tram_line.hi = 0x3ff;\n+\tecore_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,\n+\t\t\t  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *\n+\t\t\t  PRS_GFT_CAM_LINES_NO_MATCH,\n+\t\t\t  sizeof(ram_line) / REG_SIZE);\n \n \t/* Enable gft search */\n \tecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);\n }\n \n+\n+#endif /* UNUSED_HSI_FUNC */\n+\n /* Configure VF zone size mode */\n void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,\n \t\t\t\t    struct ecore_ptt *p_ptt, u16 mode,\n@@ -1854,10 +1903,9 @@ static u8 cdu_crc8_table[CRC8_TABLE_SIZE];\n /* Calculate and return CDU validation byte per connection type / region /\n  * cid\n  */\n-static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)\n+static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t u8 conn_type, u8 region, u32 cid)\n {\n-\tconst u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;\n-\n \tstatic u8 crc8_table_valid;\t/*automatically initialized to 0*/\n \tu8 crc, validation_byte = 0;\n \tu32 validation_string = 0;\n@@ -1874,15 +1922,20 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)\n \t * [7:4]   = Region\n \t * [3:0]   = Type\n \t */\n-\tif ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)\n-\t\tvalidation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);\n-\n-\tif ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)\n-\t\tvalidation_string |= ((region & 0xF) << 4);\n+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \\\n+\tCDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)\n+\tvalidation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);\n+#endif\n \n-\tif ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)\n-\t\tvalidation_string |= (conn_type & 0xF);\n+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \\\n+\tCDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)\n+\tvalidation_string |= ((region & 0xF) << 4);\n+#endif\n \n+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \\\n+\tCDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)\n+\tvalidation_string |= (conn_type & 0xF);\n+#endif\n \t/* Convert to big-endian and calculate CRC8*/\n \tdata_to_crc = OSAL_BE32_TO_CPU(validation_string);\n \n@@ -1899,40 +1952,41 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)\n \t * [6:3]\t= connection_type[3:0]\n \t * [2:0]\t= crc[2:0]\n \t */\n-\n-\tvalidation_byte |= ((validation_cfg >>\n+\tvalidation_byte |= ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >>\n \t\t\t     CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;\n \n-\tif ((validation_cfg >>\n-\t     CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)\n-\t\tvalidation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);\n-\telse\n-\t\tvalidation_byte |= crc & 0x7F;\n-\n+#if ((CDU_CONTEXT_VALIDATION_DEFAULT_CFG >> \\\n+\tCDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)\n+\tvalidation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);\n+#else\n+\tvalidation_byte |= crc & 0x7F;\n+#endif\n \treturn validation_byte;\n }\n \n /* Calcualte and set validation bytes for session context */\n-void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,\n+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       void *p_ctx_mem, u16 ctx_size,\n \t\t\t\t       u8 ctx_type, u32 cid)\n {\n \tu8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;\n \n-\tp_ctx = (u8 *)p_ctx_mem;\n+\tp_ctx = (u8 * const)p_ctx_mem;\n+\n \tx_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];\n \tt_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];\n \tu_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];\n \n \tOSAL_MEMSET(p_ctx, 0, ctx_size);\n \n-\t*x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);\n-\t*t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);\n-\t*u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);\n+\t*x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);\n+\t*t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);\n+\t*u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);\n }\n \n /* Calcualte and set validation bytes for task context */\n-void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,\n-\t\t\t\t    u32 tid)\n+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,\n+\t\t\t\t    u16 ctx_size, u8 ctx_type, u32 tid)\n {\n \tu8 *p_ctx, *region1_val_ptr;\n \n@@ -1941,16 +1995,19 @@ void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,\n \n \tOSAL_MEMSET(p_ctx, 0, ctx_size);\n \n-\t*region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);\n+\t*region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 1,\n+\t\t\t\t\t\t\t  tid);\n }\n \n /* Memset session context to 0 while preserving validation bytes */\n-void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)\n+void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,\n+\t\t\t      u32 ctx_size, u8 ctx_type)\n {\n \tu8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;\n \tu8 x_val, t_val, u_val;\n \n-\tp_ctx = (u8 *)p_ctx_mem;\n+\tp_ctx = (u8 * const)p_ctx_mem;\n+\n \tx_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];\n \tt_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];\n \tu_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];\n@@ -1967,7 +2024,8 @@ void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)\n }\n \n /* Memset task context to 0 while preserving validation bytes */\n-void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)\n+void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,\n+\t\t\t   u32 ctx_size, u8 ctx_type)\n {\n \tu8 *p_ctx, *region1_val_ptr;\n \tu8 region1_val;\n@@ -1988,62 +2046,15 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,\n {\n \tu32 ctx_validation;\n \n-\t/* Enable validation for connection region 3 - bits [31:24] */\n-\tctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;\n+\t/* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */\n+\tctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 24;\n \tecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);\n \n-\t/* Enable validation for connection region 5 - bits [15: 8] */\n-\tctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;\n+\t/* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */\n+\tctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 8;\n \tecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);\n \n-\t/* Enable validation for connection region 1 - bits [15: 8] */\n-\tctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;\n+\t/* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */\n+\tctx_validation = CDU_CONTEXT_VALIDATION_DEFAULT_CFG << 8;\n \tecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);\n }\n-\n-\n-/*******************************************************************************\n- * File name : rdma_init.c\n- * Author    : Michael Shteinbok\n- *******************************************************************************\n- *******************************************************************************\n- * Description:\n- * RDMA HSI functions\n- *\n- *******************************************************************************\n- * Notes: This is the input to the auto generated file drv_init_fw_funcs.c\n- *\n- *******************************************************************************\n- */\n-static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,\n-\t\t\t\t\t  u8 storm_id)\n-{\n-\tswitch (storm_id) {\n-\tcase 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +\n-\t\t       TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);\n-\tcase 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +\n-\t\t       MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);\n-\tcase 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +\n-\t\t       USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);\n-\tcase 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +\n-\t\t       XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);\n-\tcase 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +\n-\t\t       YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);\n-\tcase 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +\n-\t\t       PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);\n-\n-\tdefault: return 0;\n-\t}\n-}\n-\n-void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,\n-\t\t\t\tstruct ecore_ptt *p_ptt,\n-\t\t\t\tu8 assert_level[NUM_STORMS])\n-{\n-\tu8 storm_id;\n-\tfor (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {\n-\t\tu32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);\n-\n-\t\tecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);\n-\t}\n-}\ndiff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h\nindex 3503a90c1..1d1b107c4 100644\n--- a/drivers/net/qede/base/ecore_init_fw_funcs.h\n+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h\n@@ -6,7 +6,20 @@\n \n #ifndef _INIT_FW_FUNCS_H\n #define _INIT_FW_FUNCS_H\n-/* Forward declarations */\n+#include \"ecore_hsi_common.h\"\n+#include \"ecore_hsi_eth.h\"\n+\n+/* Physical memory descriptor */\n+struct phys_mem_desc {\n+\tdma_addr_t phys_addr;\n+\tvoid *virt_addr;\n+\tu32 size; /* In bytes */\n+};\n+\n+/* Returns the VOQ based on port and TC */\n+#define VOQ(port, tc, max_phys_tcs_per_port) \\\n+\t((tc) == PURE_LB_TC ? NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + (port) : \\\n+\t (port) * (max_phys_tcs_per_port) + (tc))\n \n struct init_qm_pq_params;\n \n@@ -16,6 +29,7 @@ struct init_qm_pq_params;\n  * Returns the required host memory size in 4KB units.\n  * Must be called before all QM init HSI functions.\n  *\n+ * @param p_hwfn -\t\tHW device data\n  * @param num_pf_cids - number of connections used by this PF\n  * @param num_vf_cids -\tnumber of connections used by VFs of this PF\n  * @param num_tids -\tnumber of tasks used by this PF\n@@ -24,7 +38,8 @@ struct init_qm_pq_params;\n  *\n  * @return The required host memory size in 4KB units.\n  */\n-u32 ecore_qm_pf_mem_size(u32 num_pf_cids,\n+u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn,\n+\t\t\t u32 num_pf_cids,\n \t\t\t\t\t\t u32 num_vf_cids,\n \t\t\t\t\t\t u32 num_tids,\n \t\t\t\t\t\t u16 num_pf_pqs,\n@@ -39,20 +54,24 @@ u32 ecore_qm_pf_mem_size(u32 num_pf_cids,\n  * @param max_phys_tcs_per_port\t- max number of physical TCs per port in HW\n  * @param pf_rl_en\t\t- enable per-PF rate limiters\n  * @param pf_wfq_en\t\t- enable per-PF WFQ\n- * @param vport_rl_en\t\t- enable per-VPORT rate limiters\n+ * @param global_rl_en -\t  enable global rate limiters\n  * @param vport_wfq_en\t\t- enable per-VPORT WFQ\n- * @param port_params - array of size MAX_NUM_PORTS with params for each port\n+ * @param port_params -\t\t  array with parameters for each port.\n+ * @param global_rl_params -\t  array with parameters for each global RL.\n+ *\t\t\t\t  If OSAL_NULL, global RLs are not configured.\n  *\n  * @return 0 on success, -1 on error.\n  */\n int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,\n-\t\t\t u8 max_ports_per_engine,\n-\t\t\t u8 max_phys_tcs_per_port,\n-\t\t\t bool pf_rl_en,\n-\t\t\t bool pf_wfq_en,\n-\t\t\t bool vport_rl_en,\n-\t\t\t bool vport_wfq_en,\n-\t\t\t struct init_qm_port_params port_params[MAX_NUM_PORTS]);\n+\t\t\t    u8 max_ports_per_engine,\n+\t\t\t    u8 max_phys_tcs_per_port,\n+\t\t\t    bool pf_rl_en,\n+\t\t\t    bool pf_wfq_en,\n+\t\t\t    bool global_rl_en,\n+\t\t\t    bool vport_wfq_en,\n+\t\t\t  struct init_qm_port_params port_params[MAX_NUM_PORTS],\n+\t\t\t  struct init_qm_global_rl_params\n+\t\t\t\t global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]);\n \n /**\n  * @brief ecore_qm_pf_rt_init  Prepare QM runtime init values for the PF phase\n@@ -76,7 +95,6 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,\n  *\t\t   be 0. otherwise, the weight must be non-zero.\n  * @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't\n  *                configure. ignored if PF RL is globally disabled.\n- * @param link_speed -\t\t  link speed in Mbps.\n  * @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for\n  *                    each Tx PQ associated with the specified PF.\n  * @param vport_params - array of size num_vports with parameters for each\n@@ -95,11 +113,10 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,\n \t\t\tu16 start_pq,\n \t\t\tu16 num_pf_pqs,\n \t\t\tu16 num_vf_pqs,\n-\t\t\tu8 start_vport,\n-\t\t\tu8 num_vports,\n+\t\t\tu16 start_vport,\n+\t\t\tu16 num_vports,\n \t\t\tu16 pf_wfq,\n \t\t\tu32 pf_rl,\n-\t\t\tu32 link_speed,\n \t\t\tstruct init_qm_pq_params *pq_params,\n \t\t\tstruct init_qm_vport_params *vport_params);\n \n@@ -141,14 +158,30 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,\n  * @param first_tx_pq_id- An array containing the first Tx PQ ID associated\n  *                        with the VPORT for each TC. This array is filled by\n  *                        ecore_qm_pf_rt_init\n- * @param vport_wfq\t\t- WFQ weight. Must be non-zero.\n+ * @param wfq -\t\t   WFQ weight. Must be non-zero.\n  *\n  * @return 0 on success, -1 on error.\n  */\n int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t\t struct ecore_ptt *p_ptt,\n \t\t\t\t\t\t u16 first_tx_pq_id[NUM_OF_TCS],\n-\t\t\t\t\t\t u16 vport_wfq);\n+\t\t\t u16 wfq);\n+\n+/**\n+ * @brief ecore_init_global_rl - Initializes the rate limit of the specified\n+ * rate limiter.\n+ *\n+ * @param p_hwfn -\t\tHW device data\n+ * @param p_ptt -\t\tptt window used for writing the registers\n+ * @param rl_id -\tRL ID\n+ * @param rate_limit -\trate limit in Mb/sec units\n+ *\n+ * @return 0 on success, -1 on error.\n+ */\n+int ecore_init_global_rl(struct ecore_hwfn *p_hwfn,\n+\t\t\t struct ecore_ptt *p_ptt,\n+\t\t\t u16 rl_id,\n+\t\t\t u32 rate_limit);\n \n /**\n  * @brief ecore_init_vport_rl - Initializes the rate limit of the specified\n@@ -283,8 +316,9 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,\n \n /**\n  * @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp\n- *                                    port\n+ * port.\n  *\n+ * @param p_hwfn -       HW device data\n  * @param p_ptt     - ptt window used for writing the registers.\n  * @param dest_port - vxlan destination udp port.\n  */\n@@ -295,6 +329,7 @@ void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,\n /**\n  * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW\n  *\n+ * @param p_hwfn -      HW device data\n  * @param p_ptt\t\t- ptt window used for writing the registers.\n  * @param vxlan_enable\t- vxlan enable flag.\n  */\n@@ -305,6 +340,7 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,\n /**\n  * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW\n  *\n+ * @param p_hwfn -        HW device data\n  * @param p_ptt          - ptt window used for writing the registers.\n  * @param eth_gre_enable - eth GRE enable enable flag.\n  * @param ip_gre_enable  - IP GRE enable enable flag.\n@@ -318,6 +354,7 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,\n  * @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination\n  *                                     udp port\n  *\n+ * @param p_hwfn -       HW device data\n  * @param p_ptt     - ptt window used for writing the registers.\n  * @param dest_port - geneve destination udp port.\n  */\n@@ -326,8 +363,9 @@ void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,\n \t\t\t\tu16 dest_port);\n \n /**\n- * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW\n+ * @brief ecore_set_geneve_enable - enable or disable GRE tunnel in HW\n  *\n+ * @param p_hwfn -         HW device data\n  * @param p_ptt             - ptt window used for writing the registers.\n  * @param eth_geneve_enable - eth GENEVE enable enable flag.\n  * @param ip_geneve_enable  - IP GENEVE enable enable flag.\n@@ -347,7 +385,7 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,\n \t\t\t\t   struct ecore_ptt *p_ptt);\n \n /**\n- * @brief ecore_gft_disable - Disable and GFT\n+ * @brief ecore_gft_disable - Disable GFT\n  *\n  * @param p_hwfn -   HW device data\n  * @param p_ptt -   ptt window used for writing the registers.\n@@ -360,6 +398,7 @@ void ecore_gft_disable(struct ecore_hwfn *p_hwfn,\n /**\n  * @brief ecore_gft_config - Enable and configure HW for GFT\n *\n+ * @param p_hwfn -   HW device data\n * @param p_ptt\t- ptt window used for writing the registers.\n  * @param pf_id - pf on which to enable GFT.\n * @param tcp\t- set profile tcp packets.\n@@ -382,12 +421,13 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,\n * @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be\n *                                         used before first ETH queue started.\n *\n-*\n+ * @param p_hwfn -      HW device data\n * @param p_ptt        -  ptt window used for writing the registers. Don't care\n-*                        if runtime_init used\n+ *           if runtime_init used.\n * @param mode         -  VF zone size mode. Use enum vf_zone_size_mode.\n-* @param runtime_init -  Set 1 to init runtime registers in engine phase. Set 0\n-*                        if VF zone size mode configured after engine phase.\n+ * @param runtime_init - Set 1 to init runtime registers in engine phase.\n+ *           Set 0 if VF zone size mode configured after engine\n+ *           phase.\n */\n void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt\n \t\t\t\t    *p_ptt, u16 mode, bool runtime_init);\n@@ -396,6 +436,7 @@ void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt\n  * @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by\n  * VF zone size mode.\n *\n+ * @param p_hwfn -         HW device data\n * @param stat_cnt_id         -  statistic counter id\n * @param vf_zone_size_mode   -  VF zone size mode. Use enum vf_zone_size_mode.\n */\n@@ -406,6 +447,7 @@ u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,\n  * @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone\n  * size mode.\n *\n+ * @param p_hwfn -           HW device data\n * @param vf_id               -  vf id.\n * @param vf_queue_id         -  per VF rx queue id.\n * @param vf_zone_size_mode   -  vf zone size mode. Use enum vf_zone_size_mode.\n@@ -416,6 +458,7 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8\n  * @brief ecore_enable_context_validation - Enable and configure context\n  *                                          validation.\n  *\n+ * @param p_hwfn -   HW device data\n  * @param p_ptt - ptt window used for writing the registers.\n  */\n void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,\n@@ -424,12 +467,14 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,\n  * @brief ecore_calc_session_ctx_validation - Calcualte validation byte for\n  * session context.\n  *\n+ * @param p_hwfn -\t\tHW device data\n  * @param p_ctx_mem -\tpointer to context memory.\n  * @param ctx_size -\tcontext size.\n  * @param ctx_type -\tcontext type.\n  * @param cid -\t\tcontext cid.\n  */\n-void ecore_calc_session_ctx_validation(void *p_ctx_mem,\n+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t       void *p_ctx_mem,\n \t\t\t\t       u16 ctx_size,\n \t\t\t\t       u8 ctx_type,\n \t\t\t\t       u32 cid);\n@@ -438,12 +483,14 @@ void ecore_calc_session_ctx_validation(void *p_ctx_mem,\n  * @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task\n  * context.\n  *\n+ * @param p_hwfn -\t\tHW device data\n  * @param p_ctx_mem -\tpointer to context memory.\n  * @param ctx_size -\tcontext size.\n  * @param ctx_type -\tcontext type.\n  * @param tid -\t\t    context tid.\n  */\n-void ecore_calc_task_ctx_validation(void *p_ctx_mem,\n+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t    void *p_ctx_mem,\n \t\t\t\t    u16 ctx_size,\n \t\t\t\t    u8 ctx_type,\n \t\t\t\t    u32 tid);\n@@ -457,18 +504,22 @@ void ecore_calc_task_ctx_validation(void *p_ctx_mem,\n  * @param ctx_size -  size to initialzie.\n  * @param ctx_type -  context type.\n  */\n-void ecore_memset_session_ctx(void *p_ctx_mem,\n+void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn,\n+\t\t\t      void *p_ctx_mem,\n \t\t\t      u32 ctx_size,\n \t\t\t      u8 ctx_type);\n+\n /**\n  * @brief ecore_memset_task_ctx - Memset task context to 0 while preserving\n  * validation bytes.\n  *\n+ * @param p_hwfn -\t\tHW device data\n  * @param p_ctx_mem - pointer to context memory.\n  * @param ctx_size -  size to initialzie.\n  * @param ctx_type -  context type.\n  */\n-void ecore_memset_task_ctx(void *p_ctx_mem,\n+void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn,\n+\t\t\t   void *p_ctx_mem,\n \t\t\t   u32 ctx_size,\n \t\t\t   u8 ctx_type);\n \ndiff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c\nindex ad8570a08..ea964ea2f 100644\n--- a/drivers/net/qede/base/ecore_init_ops.c\n+++ b/drivers/net/qede/base/ecore_init_ops.c\n@@ -15,7 +15,6 @@\n \n #include \"ecore_iro_values.h\"\n #include \"ecore_sriov.h\"\n-#include \"ecore_gtt_values.h\"\n #include \"reg_addr.h\"\n #include \"ecore_init_ops.h\"\n \n@@ -24,7 +23,7 @@\n \n void ecore_init_iro_array(struct ecore_dev *p_dev)\n {\n-\tp_dev->iro_arr = iro_arr;\n+\tp_dev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;\n }\n \n /* Runtime configuration helpers */\n@@ -473,9 +472,9 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,\n \t\t\t\t    int phase, int phase_id, int modes)\n {\n \tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n+\tbool b_dmae = (phase != PHASE_ENGINE);\n \tu32 cmd_num, num_init_ops;\n \tunion init_op *init;\n-\tbool b_dmae = false;\n \tenum _ecore_status_t rc = ECORE_SUCCESS;\n \n \tnum_init_ops = p_dev->fw_data->init_ops_size;\n@@ -511,7 +510,6 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,\n \t\tcase INIT_OP_IF_PHASE:\n \t\t\tcmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,\n \t\t\t\t\t\t\tphase_id);\n-\t\t\tb_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);\n \t\t\tbreak;\n \t\tcase INIT_OP_DELAY:\n \t\t\t/* ecore_init_run is always invoked from\n@@ -522,6 +520,9 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,\n \n \t\tcase INIT_OP_CALLBACK:\n \t\t\trc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);\n+\t\t\tif (phase == PHASE_ENGINE &&\n+\t\t\t    cmd->callback.callback_id == DMAE_READY_CB)\n+\t\t\t\tb_dmae = true;\n \t\t\tbreak;\n \t\t}\n \n@@ -567,11 +568,17 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,\n \tfw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));\n \tlen = buf_hdr[BIN_BUF_INIT_CMD].length;\n \tfw->init_ops_size = len / sizeof(struct init_raw_op);\n+\toffset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;\n+\tfw->fw_overlays = (u32 *)(fw_data + offset);\n+\tlen = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;\n+\tfw->fw_overlays_len = len;\n #else\n \tfw->init_ops = (union init_op *)init_ops;\n \tfw->arr_data = (u32 *)init_val;\n \tfw->modes_tree_buf = (u8 *)modes_tree_buf;\n \tfw->init_ops_size = init_ops_size;\n+\tfw->fw_overlays = fw_overlays;\n+\tfw->fw_overlays_len = sizeof(fw_overlays);\n #endif\n \n \treturn ECORE_SUCCESS;\ndiff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h\nindex 21e433309..0cbf293b3 100644\n--- a/drivers/net/qede/base/ecore_init_ops.h\n+++ b/drivers/net/qede/base/ecore_init_ops.h\n@@ -95,6 +95,6 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,\n \t\t\t     osal_size_t       size);\n \n #define STORE_RT_REG_AGG(hwfn, offset, val)\t\t\t\\\n-\tecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))\n+\tecore_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val))\n \n #endif /* __ECORE_INIT_OPS__ */\ndiff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c\nindex c8536380c..b1e127849 100644\n--- a/drivers/net/qede/base/ecore_int.c\n+++ b/drivers/net/qede/base/ecore_int.c\n@@ -28,8 +28,10 @@ struct ecore_pi_info {\n \n struct ecore_sb_sp_info {\n \tstruct ecore_sb_info sb_info;\n-\t/* per protocol index data */\n+\n+\t/* Per protocol index data */\n \tstruct ecore_pi_info pi_info_arr[MAX_PIS_PER_SB];\n+\tosal_size_t pi_info_arr_size;\n };\n \n enum ecore_attention_type {\n@@ -58,10 +60,10 @@ struct aeu_invert_reg_bit {\n #define ATTENTION_OFFSET_MASK\t\t(0x000ff000)\n #define ATTENTION_OFFSET_SHIFT\t\t(12)\n \n-#define ATTENTION_BB_MASK\t\t(0x00700000)\n+#define ATTENTION_BB_MASK\t\t(0xf)\n #define ATTENTION_BB_SHIFT\t\t(20)\n #define ATTENTION_BB(value)\t\t((value) << ATTENTION_BB_SHIFT)\n-#define ATTENTION_BB_DIFFERENT\t\t(1 << 23)\n+#define ATTENTION_BB_DIFFERENT\t\t(1 << 24)\n \n #define\tATTENTION_CLEAR_ENABLE\t\t(1 << 28)\n \tunsigned int flags;\n@@ -606,6 +608,8 @@ enum aeu_invert_reg_special_type {\n \tAEU_INVERT_REG_SPECIAL_CNIG_1,\n \tAEU_INVERT_REG_SPECIAL_CNIG_2,\n \tAEU_INVERT_REG_SPECIAL_CNIG_3,\n+\tAEU_INVERT_REG_SPECIAL_MCP_UMP_TX,\n+\tAEU_INVERT_REG_SPECIAL_MCP_SCPAD,\n \tAEU_INVERT_REG_SPECIAL_MAX,\n };\n \n@@ -615,6 +619,8 @@ aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {\n \t{\"CNIG port 1\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},\n \t{\"CNIG port 2\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},\n \t{\"CNIG port 3\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},\n+\t{\"MCP Latched ump_tx\", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},\n+\t{\"MCP Latched scratchpad\", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},\n };\n \n /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */\n@@ -678,10 +684,15 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {\n \t  {\"AVS stop status ready\", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},\n \t  {\"MSTAT\", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},\n \t  {\"MSTAT per-path\", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},\n-\t  {\"Reserved %d\", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,\n-\t   MAX_BLOCK_ID},\n+\t\t\t{\"OPTE\", ATTENTION_PAR, OSAL_NULL, BLOCK_OPTE},\n+\t\t\t{\"MCP\", ATTENTION_PAR, OSAL_NULL, BLOCK_MCP},\n+\t\t\t{\"MS\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_MS},\n+\t\t\t{\"UMAC\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_UMAC},\n+\t\t\t{\"LED\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_LED},\n+\t\t\t{\"BMBN\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_BMBN},\n \t  {\"NIG\", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},\n \t  {\"BMB/OPTE/MCP\", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},\n+\t\t\t{\"BMB\", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},\n \t  {\"BTB\", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},\n \t  {\"BRB\", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},\n \t  {\"PRS\", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},\n@@ -784,10 +795,17 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {\n \t  {\"MCP Latched memory\", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},\n \t  {\"MCP Latched scratchpad cache\", ATTENTION_SINGLE, OSAL_NULL,\n \t   MAX_BLOCK_ID},\n-\t  {\"MCP Latched ump_tx\", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},\n-\t  {\"MCP Latched scratchpad\", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},\n-\t  {\"Reserved %d\", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,\n-\t   MAX_BLOCK_ID},\n+\t  {\"AVS\", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |\n+\t   ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_UMP_TX), OSAL_NULL,\n+\t   BLOCK_AVS_WRAP},\n+\t  {\"AVS\", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |\n+\t   ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_SCPAD), OSAL_NULL,\n+\t   BLOCK_AVS_WRAP},\n+\t  {\"PCIe core\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},\n+\t  {\"PCIe link up\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},\n+\t  {\"PCIe hot reset\", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},\n+\t  {\"Reserved %d\", (9 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,\n+\t    MAX_BLOCK_ID},\n \t  }\n \t },\n \n@@ -955,14 +973,22 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,\n \t/* @DPDK */\n \t/* Reach assertion if attention is fatal */\n \tif (b_fatal || (strcmp(p_bit_name, \"PGLUE B RBC\") == 0)) {\n+#ifndef ASIC_ONLY\n+\t\tDP_NOTICE(p_hwfn, !CHIP_REV_IS_EMUL(p_hwfn->p_dev),\n+\t\t\t  \"`%s': Fatal attention\\n\", p_bit_name);\n+#else\n \t\tDP_NOTICE(p_hwfn, true, \"`%s': Fatal attention\\n\",\n \t\t\t  p_bit_name);\n+#endif\n \n \t\tecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);\n \t}\n \n \t/* Prevent this Attention from being asserted in the future */\n \tif (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||\n+#ifndef ASIC_ONLY\n+\t    CHIP_REV_IS_EMUL(p_hwfn->p_dev) ||\n+#endif\n \t    p_hwfn->p_dev->attn_clr_en) {\n \t\tu32 val;\n \t\tu32 mask = ~bitmask;\n@@ -1013,6 +1039,13 @@ static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,\n \t\tp_aeu->bit_name);\n }\n \n+#define MISC_REG_AEU_AFTER_INVERT_IGU(n) \\\n+\t(MISC_REG_AEU_AFTER_INVERT_1_IGU + (n) * 0x4)\n+\n+#define MISC_REG_AEU_ENABLE_IGU_OUT(n, group) \\\n+\t(MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (n) * 0x4 + \\\n+\t (group) * 0x4 * NUM_ATTN_REGS)\n+\n /**\n  * @brief - handles deassertion of previously asserted attentions.\n  *\n@@ -1032,8 +1065,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,\n \t/* Read the attention registers in the AEU */\n \tfor (i = 0; i < NUM_ATTN_REGS; i++) {\n \t\taeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,\n-\t\t\t\t\t  MISC_REG_AEU_AFTER_INVERT_1_IGU +\n-\t\t\t\t\t  i * 0x4);\n+\t\t\t\t\t  MISC_REG_AEU_AFTER_INVERT_IGU(i));\n \t\tDP_VERBOSE(p_hwfn, ECORE_MSG_INTR,\n \t\t\t   \"Deasserted bits [%d]: %08x\\n\", i, aeu_inv_arr[i]);\n \t}\n@@ -1043,7 +1075,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,\n \t\tstruct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];\n \t\tu32 parities;\n \n-\t\taeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);\n+\t\taeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, 0);\n \t\ten = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);\n \t\tparities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;\n \n@@ -1074,9 +1106,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,\n \t\tfor (i = 0; i < NUM_ATTN_REGS; i++) {\n \t\t\tu32 bits;\n \n-\t\t\taeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +\n-\t\t\t\t i * sizeof(u32) +\n-\t\t\t\t k * sizeof(u32) * NUM_ATTN_REGS;\n+\t\t\taeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, k);\n \t\t\ten = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);\n \t\t\tbits = aeu_inv_arr[i] & en;\n \n@@ -1249,7 +1279,6 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)\n \tstruct ecore_pi_info *pi_info = OSAL_NULL;\n \tstruct ecore_sb_attn_info *sb_attn;\n \tstruct ecore_sb_info *sb_info;\n-\tint arr_size;\n \tu16 rc = 0;\n \n \tif (!p_hwfn)\n@@ -1261,7 +1290,6 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)\n \t}\n \n \tsb_info = &p_hwfn->p_sp_sb->sb_info;\n-\tarr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);\n \tif (!sb_info) {\n \t\tDP_ERR(p_hwfn->p_dev,\n \t\t       \"Status block is NULL - cannot ack interrupts\\n\");\n@@ -1326,14 +1354,14 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)\n \t\tecore_int_attentions(p_hwfn);\n \n \tif (rc & ECORE_SB_IDX) {\n-\t\tint pi;\n+\t\tosal_size_t pi;\n \n \t\t/* Since we only looked at the SB index, it's possible more\n \t\t * than a single protocol-index on the SB incremented.\n \t\t * Iterate over all configured protocol indices and check\n \t\t * whether something happened for each.\n \t\t */\n-\t\tfor (pi = 0; pi < arr_size; pi++) {\n+\t\tfor (pi = 0; pi < p_hwfn->p_sp_sb->pi_info_arr_size; pi++) {\n \t\t\tpi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];\n \t\t\tif (pi_info->comp_cb != OSAL_NULL)\n \t\t\t\tpi_info->comp_cb(p_hwfn, pi_info->cookie);\n@@ -1514,7 +1542,7 @@ static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,\n \tif (IS_VF(p_hwfn->p_dev))\n \t\treturn;/* @@@TBD MichalK- VF CAU... */\n \n-\tsb_offset = igu_sb_id * MAX_PIS_PER_SB;\n+\tsb_offset = igu_sb_id * PIS_PER_SB;\n \tOSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));\n \n \tSET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);\n@@ -1623,7 +1651,7 @@ void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,\n {\n \t/* zero status block and ack counter */\n \tsb_info->sb_ack = 0;\n-\tOSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));\n+\tOSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size);\n \n \tif (IS_PF(p_hwfn->p_dev))\n \t\tecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,\n@@ -1706,6 +1734,14 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,\n \t\t\t\t       dma_addr_t sb_phy_addr, u16 sb_id)\n {\n \tsb_info->sb_virt = sb_virt_addr;\n+\tstruct status_block *sb_virt;\n+\n+\tsb_virt = (struct status_block *)sb_info->sb_virt;\n+\n+\tsb_info->sb_size = sizeof(*sb_virt);\n+\tsb_info->sb_pi_array = sb_virt->pi_array;\n+\tsb_info->sb_prod_index = &sb_virt->prod_index;\n+\n \tsb_info->sb_phys = sb_phy_addr;\n \n \tsb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);\n@@ -1737,16 +1773,16 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,\n \t/* The igu address will hold the absolute address that needs to be\n \t * written to for a specific status block\n \t */\n-\tif (IS_PF(p_hwfn->p_dev)) {\n+\tif (IS_PF(p_hwfn->p_dev))\n \t\tsb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +\n-\t\t    GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);\n+\t\t\t\t     GTT_BAR0_MAP_REG_IGU_CMD +\n+\t\t\t\t     (sb_info->igu_sb_id << 3);\n \n-\t} else {\n-\t\tsb_info->igu_addr =\n-\t\t    (u8 OSAL_IOMEM *)p_hwfn->regview +\n+\telse\n+\t\tsb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +\n \t\t    PXP_VF_BAR0_START_IGU +\n-\t\t    ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);\n-\t}\n+\t\t\t\t     ((IGU_CMD_INT_ACK_BASE +\n+\t\t\t\t       sb_info->igu_sb_id) << 3);\n \n \tsb_info->flags |= ECORE_SB_INFO_INIT;\n \n@@ -1767,7 +1803,7 @@ enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,\n \n \t/* zero status block and ack counter */\n \tsb_info->sb_ack = 0;\n-\tOSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));\n+\tOSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size);\n \n \tif (IS_VF(p_hwfn->p_dev)) {\n \t\tecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);\n@@ -1816,11 +1852,10 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,\n \tvoid *p_virt;\n \n \t/* SB struct */\n-\tp_sb =\n-\t    OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,\n-\t\t       sizeof(*p_sb));\n+\tp_sb = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb));\n \tif (!p_sb) {\n-\t\tDP_NOTICE(p_hwfn, false, \"Failed to allocate `struct ecore_sb_info'\\n\");\n+\t\tDP_NOTICE(p_hwfn, false,\n+\t\t\t  \"Failed to allocate `struct ecore_sb_info'\\n\");\n \t\treturn ECORE_NOMEM;\n \t}\n \n@@ -1838,7 +1873,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,\n \tecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,\n \t\t\t  p_virt, p_phys, ECORE_SP_SB_ID);\n \n-\tOSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));\n+\tp_sb->pi_info_arr_size = PIS_PER_SB;\n \n \treturn ECORE_SUCCESS;\n }\n@@ -1853,14 +1888,14 @@ enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,\n \tu8 pi;\n \n \t/* Look for a free index */\n-\tfor (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {\n+\tfor (pi = 0; pi < p_sp_sb->pi_info_arr_size; pi++) {\n \t\tif (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)\n \t\t\tcontinue;\n \n \t\tp_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;\n \t\tp_sp_sb->pi_info_arr[pi].cookie = cookie;\n \t\t*sb_idx = pi;\n-\t\t*p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];\n+\t\t*p_fw_cons = &p_sp_sb->sb_info.sb_pi_array[pi];\n \t\trc = ECORE_SUCCESS;\n \t\tbreak;\n \t}\n@@ -1988,10 +2023,9 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,\n \t\t\t\t     bool cleanup_set,\n \t\t\t\t     u16 opaque_fid)\n {\n-\tu32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;\n-\tu32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;\n-\tu32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;\n-\tu8 type = 0;\t\t/* FIXME MichalS type??? */\n+\tu32 data = 0, cmd_ctrl = 0, sb_bit, sb_bit_addr, pxp_addr;\n+\tu32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH, val;\n+\tu8 type = 0;\n \n \tOSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -\n \t\t\t   IGU_REG_CLEANUP_STATUS_0) != 0x200);\n@@ -2006,6 +2040,7 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,\n \tSET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);\n \n \t/* Set the control register */\n+\tpxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;\n \tSET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);\n \tSET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);\n \tSET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);\n@@ -2077,9 +2112,11 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,\n \t\t\t  igu_sb_id);\n \n \t/* Clear the CAU for the SB */\n-\tfor (pi = 0; pi < 12; pi++)\n+\tfor (pi = 0; pi < PIS_PER_SB; pi++)\n \t\tecore_wr(p_hwfn, p_ptt,\n-\t\t\t CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);\n+\t\t\t CAU_REG_PI_MEMORY +\n+\t\t\t (igu_sb_id * PIS_PER_SB + pi) * 4,\n+\t\t\t 0);\n }\n \n void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,\n@@ -2679,12 +2716,12 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t  struct ecore_sb_info_dbg *p_info)\n {\n \tu16 sbid = p_sb->igu_sb_id;\n-\tint i;\n+\tu32 i;\n \n \tif (IS_VF(p_hwfn->p_dev))\n \t\treturn ECORE_INVAL;\n \n-\tif (sbid > NUM_OF_SBS(p_hwfn->p_dev))\n+\tif (sbid >= NUM_OF_SBS(p_hwfn->p_dev))\n \t\treturn ECORE_INVAL;\n \n \tp_info->igu_prod = ecore_rd(p_hwfn, p_ptt,\n@@ -2692,10 +2729,10 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,\n \tp_info->igu_cons = ecore_rd(p_hwfn, p_ptt,\n \t\t\t\t    IGU_REG_CONSUMER_MEM + sbid * 4);\n \n-\tfor (i = 0; i < MAX_PIS_PER_SB; i++)\n+\tfor (i = 0; i < PIS_PER_SB; i++)\n \t\tp_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,\n \t\t\t\t\t      CAU_REG_PI_MEMORY +\n-\t\t\t\t\t      sbid * 4 * MAX_PIS_PER_SB +\n+\t\t\t\t\t      sbid * 4 * PIS_PER_SB +\n \t\t\t\t\t      i * 4);\n \n \treturn ECORE_SUCCESS;\ndiff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h\nindex abea2a716..d7b6b86cc 100644\n--- a/drivers/net/qede/base/ecore_int_api.h\n+++ b/drivers/net/qede/base/ecore_int_api.h\n@@ -24,7 +24,12 @@ enum ecore_int_mode {\n #endif\n \n struct ecore_sb_info {\n-\tstruct status_block *sb_virt;\n+\tvoid *sb_virt; /* ptr to \"struct status_block_e{4,5}\" */\n+\tu32 sb_size; /* size of \"struct status_block_e{4,5}\" */\n+\t__le16 *sb_pi_array; /* ptr to \"sb_virt->pi_array\" */\n+\t__le32 *sb_prod_index; /* ptr to \"sb_virt->prod_index\" */\n+#define STATUS_BLOCK_PROD_INDEX_MASK\t0xFFFFFF\n+\n \tdma_addr_t sb_phys;\n \tu32 sb_ack;\t\t/* Last given ack */\n \tu16 igu_sb_id;\n@@ -42,7 +47,7 @@ struct ecore_sb_info {\n struct ecore_sb_info_dbg {\n \tu32 igu_prod;\n \tu32 igu_cons;\n-\tu16 pi[MAX_PIS_PER_SB];\n+\tu16 pi[PIS_PER_SB];\n };\n \n struct ecore_sb_cnt_info {\n@@ -64,7 +69,7 @@ static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)\n \n \t/* barrier(); status block is written to by the chip */\n \t/* FIXME: need some sort of barrier. */\n-\tprod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &\n+\tprod = OSAL_LE32_TO_CPU(*sb_info->sb_prod_index) &\n \t       STATUS_BLOCK_PROD_INDEX_MASK;\n \tif (sb_info->sb_ack != prod) {\n \t\tsb_info->sb_ack = prod;\ndiff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c\nindex 5dcdc84fc..b20d83762 100644\n--- a/drivers/net/qede/base/ecore_l2.c\n+++ b/drivers/net/qede/base/ecore_l2.c\n@@ -2323,18 +2323,17 @@ ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,\n \t\t\t   struct ecore_ptt *p_ptt,\n \t\t\t   struct ecore_queue_cid *p_cid, u32 rate)\n {\n-\tstruct ecore_mcp_link_state *p_link;\n+\tu16 rl_id;\n \tu8 vport;\n \n \tvport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);\n-\tp_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;\n \n \tDP_VERBOSE(p_hwfn, ECORE_MSG_LINK,\n \t\t   \"About to rate limit qm vport %d for queue %d with rate %d\\n\",\n \t\t   vport, p_cid->rel.queue_id, rate);\n \n-\treturn ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,\n-\t\t\t\t   p_link->speed);\n+\trl_id = vport; /* The \"rl_id\" is set as the \"vport_id\" */\n+\treturn ecore_init_global_rl(p_hwfn, p_ptt, rl_id, rate);\n }\n \n #define RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT    100\n@@ -2358,8 +2357,7 @@ ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,\n \tif (rc != ECORE_SUCCESS)\n \t\treturn rc;\n \n-\taddr = (u8 OSAL_IOMEM *)p_hwfn->regview +\n-\t       GTT_BAR0_MAP_REG_TSDM_RAM +\n+\taddr = (u8 *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +\n \t       TSTORM_ETH_RSS_UPDATE_OFFSET(p_hwfn->rel_pf_id);\n \n \t*(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr);\ndiff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h\nindex acde81fad..bebf412ed 100644\n--- a/drivers/net/qede/base/ecore_l2_api.h\n+++ b/drivers/net/qede/base/ecore_l2_api.h\n@@ -302,6 +302,8 @@ struct ecore_sp_vport_start_params {\n \tbool b_err_big_pkt;\n \tbool b_err_anti_spoof;\n \tbool b_err_ctrl_frame;\n+\tbool b_en_rgfs;\n+\tbool b_en_tgfs;\n };\n \n /**\ndiff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c\nindex 6559d8040..a5aa07438 100644\n--- a/drivers/net/qede/base/ecore_mcp.c\n+++ b/drivers/net/qede/base/ecore_mcp.c\n@@ -22,13 +22,23 @@\n #include \"ecore_sp_commands.h\"\n #include \"ecore_cxt.h\"\n \n-#define CHIP_MCP_RESP_ITER_US 10\n-#define EMUL_MCP_RESP_ITER_US (1000 * 1000)\n #define GRCBASE_MCP\t0xe00000\n \n+#define ECORE_MCP_RESP_ITER_US\t\t10\n #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)\t/* Account for 5 sec */\n #define ECORE_MCP_RESET_RETRIES (50 * 1000)\t/* Account for 500 msec */\n \n+#ifndef ASIC_ONLY\n+/* Non-ASIC:\n+ * The waiting interval is multiplied by 100 to reduce the impact of the\n+ * built-in delay of 100usec in each ecore_rd().\n+ * In addition, a factor of 4 comparing to ASIC is applied.\n+ */\n+#define ECORE_EMUL_MCP_RESP_ITER_US\t(ECORE_MCP_RESP_ITER_US * 100)\n+#define ECORE_EMUL_DRV_MB_MAX_RETRIES\t((ECORE_DRV_MB_MAX_RETRIES / 100) * 4)\n+#define ECORE_EMUL_MCP_RESET_RETRIES\t((ECORE_MCP_RESET_RETRIES / 100) * 4)\n+#endif\n+\n #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \\\n \tecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \\\n \t\t _val)\n@@ -186,22 +196,23 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t\t   struct ecore_ptt *p_ptt)\n {\n \tstruct ecore_mcp_info *p_info = p_hwfn->mcp_info;\n+\tu32 drv_mb_offsize, mfw_mb_offsize, val;\n \tu8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;\n \tu8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;\n-\tu32 drv_mb_offsize, mfw_mb_offsize;\n \tu32 mcp_pf_id = MCP_PF_ID(p_hwfn);\n \n+\tval = ecore_rd(p_hwfn, p_ptt, MCP_REG_CACHE_PAGING_ENABLE);\n+\tp_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);\n+\tif (!p_info->public_base) {\n+\t\tDP_NOTICE(p_hwfn, false,\n+\t\t\t  \"The address of the MCP scratch-pad is not configured\\n\");\n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {\n-\t\tDP_NOTICE(p_hwfn, false, \"Emulation - assume no MFW\\n\");\n-\t\tp_info->public_base = 0;\n-\t\treturn ECORE_INVAL;\n-\t}\n+\t\t/* Zeroed \"public_base\" implies no MFW */\n+\t\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev))\n+\t\t\tDP_INFO(p_hwfn, \"Emulation: Assume no MFW\\n\");\n #endif\n-\n-\tp_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);\n-\tif (!p_info->public_base)\n \t\treturn ECORE_INVAL;\n+\t}\n \n \tp_info->public_base |= GRCBASE_MCP;\n \n@@ -293,7 +304,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,\n \n \tif (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {\n \t\tDP_NOTICE(p_hwfn, false, \"MCP is not initialized\\n\");\n-\t\t/* Do not free mcp_info here, since public_base indicate that\n+\t\t/* Do not free mcp_info here, since \"public_base\" indicates that\n \t\t * the MCP is not initialized\n \t\t */\n \t\treturn ECORE_SUCCESS;\n@@ -334,14 +345,16 @@ static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,\n enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,\n \t\t\t\t     struct ecore_ptt *p_ptt)\n {\n-\tu32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;\n+\tu32 prev_generic_por_0, seq, delay = ECORE_MCP_RESP_ITER_US, cnt = 0;\n+\tu32 retries = ECORE_MCP_RESET_RETRIES;\n \tenum _ecore_status_t rc = ECORE_SUCCESS;\n \n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev))\n-\t\tdelay = EMUL_MCP_RESP_ITER_US;\n+\tif (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {\n+\t\tdelay = ECORE_EMUL_MCP_RESP_ITER_US;\n+\t\tretries = ECORE_EMUL_MCP_RESET_RETRIES;\n+\t}\n #endif\n-\n \tif (p_hwfn->mcp_info->b_block_cmd) {\n \t\tDP_NOTICE(p_hwfn, false,\n \t\t\t  \"The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\\n\");\n@@ -351,23 +364,24 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,\n \t/* Ensure that only a single thread is accessing the mailbox */\n \tOSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);\n \n-\torg_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);\n+\tprev_generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);\n \n \t/* Set drv command along with the updated sequence */\n \tecore_mcp_reread_offsets(p_hwfn, p_ptt);\n \tseq = ++p_hwfn->mcp_info->drv_mb_seq;\n \tDRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));\n \n+\t/* Give the MFW up to 500 second (50*1000*10usec) to resume */\n \tdo {\n-\t\t/* Wait for MFW response */\n \t\tOSAL_UDELAY(delay);\n-\t\t/* Give the FW up to 500 second (50*1000*10usec) */\n-\t} while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,\n-\t\t\t\t\t\tMISCS_REG_GENERIC_POR_0)) &&\n-\t\t (cnt++ < ECORE_MCP_RESET_RETRIES));\n \n-\tif (org_mcp_reset_seq !=\n-\t    ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {\n+\t\tif (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=\n+\t\t    prev_generic_por_0)\n+\t\t\tbreak;\n+\t} while (cnt++ < retries);\n+\n+\tif (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=\n+\t    prev_generic_por_0) {\n \t\tDP_VERBOSE(p_hwfn, ECORE_MSG_SP,\n \t\t\t   \"MCP was reset after %d usec\\n\", cnt * delay);\n \t} else {\n@@ -380,6 +394,71 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,\n \treturn rc;\n }\n \n+#ifndef ASIC_ONLY\n+static void ecore_emul_mcp_load_req(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t    struct ecore_mcp_mb_params *p_mb_params)\n+{\n+\tif (GET_MFW_FIELD(p_mb_params->param, DRV_ID_MCP_HSI_VER) !=\n+\t    1 /* ECORE_LOAD_REQ_HSI_VER_1 */) {\n+\t\tp_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1;\n+\t\treturn;\n+\t}\n+\n+\tif (!loaded)\n+\t\tp_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;\n+\telse if (!loaded_port[p_hwfn->port_id])\n+\t\tp_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_PORT;\n+\telse\n+\t\tp_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_FUNCTION;\n+\n+\t/* On CMT, always tell that it's engine */\n+\tif (ECORE_IS_CMT(p_hwfn->p_dev))\n+\t\tp_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;\n+\n+\tloaded++;\n+\tloaded_port[p_hwfn->port_id]++;\n+\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_SP,\n+\t\t   \"Load phase: 0x%08x load cnt: 0x%x port id=%d port_load=%d\\n\",\n+\t\t   p_mb_params->mcp_resp, loaded, p_hwfn->port_id,\n+\t\t   loaded_port[p_hwfn->port_id]);\n+}\n+\n+static void ecore_emul_mcp_unload_req(struct ecore_hwfn *p_hwfn)\n+{\n+\tloaded--;\n+\tloaded_port[p_hwfn->port_id]--;\n+\tDP_VERBOSE(p_hwfn, ECORE_MSG_SP, \"Unload cnt: 0x%x\\n\", loaded);\n+}\n+\n+static enum _ecore_status_t\n+ecore_emul_mcp_cmd(struct ecore_hwfn *p_hwfn,\n+\t\t   struct ecore_mcp_mb_params *p_mb_params)\n+{\n+\tif (!CHIP_REV_IS_EMUL(p_hwfn->p_dev))\n+\t\treturn ECORE_INVAL;\n+\n+\tswitch (p_mb_params->cmd) {\n+\tcase DRV_MSG_CODE_LOAD_REQ:\n+\t\tecore_emul_mcp_load_req(p_hwfn, p_mb_params);\n+\t\tbreak;\n+\tcase DRV_MSG_CODE_UNLOAD_REQ:\n+\t\tecore_emul_mcp_unload_req(p_hwfn);\n+\t\tbreak;\n+\tcase DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT:\n+\tcase DRV_MSG_CODE_RESOURCE_CMD:\n+\tcase DRV_MSG_CODE_MDUMP_CMD:\n+\tcase DRV_MSG_CODE_GET_ENGINE_CONFIG:\n+\tcase DRV_MSG_CODE_GET_PPFID_BITMAP:\n+\t\treturn ECORE_NOTIMPL;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\treturn ECORE_SUCCESS;\n+}\n+#endif\n+\n /* Must be called while cmd_lock is acquired */\n static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)\n {\n@@ -488,13 +567,18 @@ void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,\n \t\t\t      struct ecore_ptt *p_ptt)\n {\n \tu32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;\n+\tu32 delay = ECORE_MCP_RESP_ITER_US;\n \n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev))\n+\t\tdelay = ECORE_EMUL_MCP_RESP_ITER_US;\n+#endif\n \tcpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);\n \tcpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);\n \tcpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);\n-\tOSAL_UDELAY(CHIP_MCP_RESP_ITER_US);\n+\tOSAL_UDELAY(delay);\n \tcpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);\n-\tOSAL_UDELAY(CHIP_MCP_RESP_ITER_US);\n+\tOSAL_UDELAY(delay);\n \tcpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);\n \n \tDP_NOTICE(p_hwfn, false,\n@@ -617,15 +701,21 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,\n {\n \tosal_size_t union_data_size = sizeof(union drv_union_data);\n \tu32 max_retries = ECORE_DRV_MB_MAX_RETRIES;\n-\tu32 delay = CHIP_MCP_RESP_ITER_US;\n+\tu32 usecs = ECORE_MCP_RESP_ITER_US;\n \n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev))\n-\t\tdelay = EMUL_MCP_RESP_ITER_US;\n-\t/* There is a built-in delay of 100usec in each MFW response read */\n-\tif (CHIP_REV_IS_FPGA(p_hwfn->p_dev))\n-\t\tmax_retries /= 10;\n+\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn))\n+\t\treturn ecore_emul_mcp_cmd(p_hwfn, p_mb_params);\n+\n+\tif (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {\n+\t\tmax_retries = ECORE_EMUL_DRV_MB_MAX_RETRIES;\n+\t\tusecs = ECORE_EMUL_MCP_RESP_ITER_US;\n+\t}\n #endif\n+\tif (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {\n+\t\tmax_retries = DIV_ROUND_UP(max_retries, 1000);\n+\t\tusecs *= 1000;\n+\t}\n \n \t/* MCP not initialized */\n \tif (!ecore_mcp_is_init(p_hwfn)) {\n@@ -650,7 +740,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,\n \t}\n \n \treturn _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,\n-\t\t\t\t\tdelay);\n+\t\t\t\t\tusecs);\n }\n \n enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,\n@@ -660,18 +750,6 @@ enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,\n \tstruct ecore_mcp_mb_params mb_params;\n \tenum _ecore_status_t rc;\n \n-#ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {\n-\t\tif (cmd == DRV_MSG_CODE_UNLOAD_REQ) {\n-\t\t\tloaded--;\n-\t\t\tloaded_port[p_hwfn->port_id]--;\n-\t\t\tDP_VERBOSE(p_hwfn, ECORE_MSG_SP, \"Unload cnt: 0x%x\\n\",\n-\t\t\t\t   loaded);\n-\t\t}\n-\t\treturn ECORE_SUCCESS;\n-\t}\n-#endif\n-\n \tOSAL_MEM_ZERO(&mb_params, sizeof(mb_params));\n \tmb_params.cmd = cmd;\n \tmb_params.param = param;\n@@ -745,34 +823,6 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,\n \treturn ECORE_SUCCESS;\n }\n \n-#ifndef ASIC_ONLY\n-static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,\n-\t\t\t\t    u32 *p_load_code)\n-{\n-\tstatic int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;\n-\n-\tif (!loaded)\n-\t\tload_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;\n-\telse if (!loaded_port[p_hwfn->port_id])\n-\t\tload_phase = FW_MSG_CODE_DRV_LOAD_PORT;\n-\telse\n-\t\tload_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;\n-\n-\t/* On CMT, always tell that it's engine */\n-\tif (ECORE_IS_CMT(p_hwfn->p_dev))\n-\t\tload_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;\n-\n-\t*p_load_code = load_phase;\n-\tloaded++;\n-\tloaded_port[p_hwfn->port_id]++;\n-\n-\tDP_VERBOSE(p_hwfn, ECORE_MSG_SP,\n-\t\t   \"Load phase: %x load cnt: 0x%x port id=%d port_load=%d\\n\",\n-\t\t   *p_load_code, loaded, p_hwfn->port_id,\n-\t\t   loaded_port[p_hwfn->port_id]);\n-}\n-#endif\n-\n static bool\n ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,\n \t\t\t enum ecore_override_force_load override_force_load)\n@@ -1004,13 +1054,6 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,\n \tu8 mfw_drv_role = 0, mfw_force_cmd;\n \tenum _ecore_status_t rc;\n \n-#ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {\n-\t\tecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);\n-\t\treturn ECORE_SUCCESS;\n-\t}\n-#endif\n-\n \tOSAL_MEM_ZERO(&in_params, sizeof(in_params));\n \tin_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;\n \tin_params.drv_ver_0 = ECORE_VERSION;\n@@ -1166,15 +1209,17 @@ static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,\n \tu32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);\n \tu32 path_addr = SECTION_ADDR(mfw_path_offsize,\n \t\t\t\t     ECORE_PATH_ID(p_hwfn));\n-\tu32 disabled_vfs[VF_MAX_STATIC / 32];\n+\tu32 disabled_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];\n \tint i;\n \n+\tOSAL_MEM_ZERO(disabled_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);\n+\n \tDP_VERBOSE(p_hwfn, ECORE_MSG_SP,\n \t\t   \"Reading Disabled VF information from [offset %08x],\"\n \t\t   \" path_addr %08x\\n\",\n \t\t   mfw_path_offsize, path_addr);\n \n-\tfor (i = 0; i < (VF_MAX_STATIC / 32); i++) {\n+\tfor (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) {\n \t\tdisabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,\n \t\t\t\t\t   path_addr +\n \t\t\t\t\t   OFFSETOF(struct public_path,\n@@ -1193,16 +1238,11 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t  struct ecore_ptt *p_ptt,\n \t\t\t\t\t  u32 *vfs_to_ack)\n {\n-\tu32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,\n-\t\t\t\t\tPUBLIC_FUNC);\n-\tu32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);\n-\tu32 func_addr = SECTION_ADDR(mfw_func_offsize,\n-\t\t\t\t     MCP_PF_ID(p_hwfn));\n \tstruct ecore_mcp_mb_params mb_params;\n \tenum _ecore_status_t rc;\n-\tint i;\n+\tu16 i;\n \n-\tfor (i = 0; i < (VF_MAX_STATIC / 32); i++)\n+\tfor (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)\n \t\tDP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),\n \t\t\t   \"Acking VFs [%08x,...,%08x] - %08x\\n\",\n \t\t\t   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);\n@@ -1210,7 +1250,7 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,\n \tOSAL_MEM_ZERO(&mb_params, sizeof(mb_params));\n \tmb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;\n \tmb_params.p_data_src = vfs_to_ack;\n-\tmb_params.data_src_size = VF_MAX_STATIC / 8;\n+\tmb_params.data_src_size = (u8)VF_BITMAP_SIZE_IN_BYTES;\n \trc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,\n \t\t\t\t     &mb_params);\n \tif (rc != ECORE_SUCCESS) {\n@@ -1219,13 +1259,6 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,\n \t\treturn ECORE_TIMEOUT;\n \t}\n \n-\t/* TMP - clear the ACK bits; should be done by MFW */\n-\tfor (i = 0; i < (VF_MAX_STATIC / 32); i++)\n-\t\tecore_wr(p_hwfn, p_ptt,\n-\t\t\t func_addr +\n-\t\t\t OFFSETOF(struct public_func, drv_ack_vf_disabled) +\n-\t\t\t i * sizeof(u32), 0);\n-\n \treturn rc;\n }\n \n@@ -1471,8 +1504,11 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,\n \tu32 cmd;\n \n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev))\n+\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {\n+\t\tif (b_up)\n+\t\t\tOSAL_LINK_UPDATE(p_hwfn);\n \t\treturn ECORE_SUCCESS;\n+\t}\n #endif\n \n \t/* Set the shmem configuration according to params */\n@@ -1853,6 +1889,13 @@ ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,\n \tstruct mdump_config_stc mdump_config;\n \tenum _ecore_status_t rc;\n \n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {\n+\t\tDP_INFO(p_hwfn, \"Emulation: Can't get mdump info\\n\");\n+\t\treturn ECORE_NOTIMPL;\n+\t}\n+#endif\n+\n \tOSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));\n \n \taddr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,\n@@ -2042,6 +2085,9 @@ ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)\n \t/* update storm FW with negotiation results */\n \tecore_sp_pf_update_ufp(p_hwfn);\n \n+\t/* update stag pcp value */\n+\tecore_sp_pf_update_stag(p_hwfn);\n+\n \treturn ECORE_SUCCESS;\n }\n \n@@ -2159,9 +2205,9 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,\n \tu32 global_offsize;\n \n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {\n-\t\tDP_NOTICE(p_hwfn, false, \"Emulation - can't get MFW version\\n\");\n-\t\treturn ECORE_SUCCESS;\n+\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {\n+\t\tDP_INFO(p_hwfn, \"Emulation: Can't get MFW version\\n\");\n+\t\treturn ECORE_NOTIMPL;\n \t}\n #endif\n \n@@ -2203,26 +2249,29 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t      struct ecore_ptt *p_ptt,\n \t\t\t\t\t      u32 *p_media_type)\n {\n-\tenum _ecore_status_t rc = ECORE_SUCCESS;\n+\t*p_media_type = MEDIA_UNSPECIFIED;\n \n \t/* TODO - Add support for VFs */\n \tif (IS_VF(p_hwfn->p_dev))\n \t\treturn ECORE_INVAL;\n \n \tif (!ecore_mcp_is_init(p_hwfn)) {\n+#ifndef ASIC_ONLY\n+\t\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {\n+\t\t\tDP_INFO(p_hwfn, \"Emulation: Can't get media type\\n\");\n+\t\t\treturn ECORE_NOTIMPL;\n+\t\t}\n+#endif\n \t\tDP_NOTICE(p_hwfn, false, \"MFW is not initialized!\\n\");\n \t\treturn ECORE_BUSY;\n \t}\n \n-\tif (!p_ptt) {\n-\t\t*p_media_type = MEDIA_UNSPECIFIED;\n-\t\trc = ECORE_INVAL;\n-\t} else {\n-\t\t*p_media_type = ecore_rd(p_hwfn, p_ptt,\n-\t\t\t\t\t p_hwfn->mcp_info->port_addr +\n-\t\t\t\t\t OFFSETOF(struct public_port,\n-\t\t\t\t\t\t  media_type));\n-\t}\n+\tif (!p_ptt)\n+\t\treturn ECORE_INVAL;\n+\n+\t*p_media_type = ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t p_hwfn->mcp_info->port_addr +\n+\t\t\t\t OFFSETOF(struct public_port, media_type));\n \n \treturn ECORE_SUCCESS;\n }\n@@ -2626,9 +2675,9 @@ enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,\n \tu32 flash_size;\n \n #ifndef ASIC_ONLY\n-\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {\n-\t\tDP_NOTICE(p_hwfn, false, \"Emulation - can't get flash size\\n\");\n-\t\treturn ECORE_INVAL;\n+\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {\n+\t\tDP_INFO(p_hwfn, \"Emulation: Can't get flash size\\n\");\n+\t\treturn ECORE_NOTIMPL;\n \t}\n #endif\n \n@@ -2725,6 +2774,16 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t      struct ecore_ptt *p_ptt,\n \t\t\t\t\t      u8 vf_id, u8 num)\n {\n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {\n+\t\tDP_INFO(p_hwfn,\n+\t\t\t\"Emulation: Avoid sending the %s mailbox command\\n\",\n+\t\t\tECORE_IS_BB(p_hwfn->p_dev) ? \"CFG_VF_MSIX\" :\n+\t\t\t\t\t\t     \"CFG_PF_VFS_MSIX\");\n+\t\treturn ECORE_SUCCESS;\n+\t}\n+#endif\n+\n \tif (ECORE_IS_BB(p_hwfn->p_dev))\n \t\treturn ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);\n \telse\ndiff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h\nindex 2c052b7fa..185cc2339 100644\n--- a/drivers/net/qede/base/ecore_mcp.h\n+++ b/drivers/net/qede/base/ecore_mcp.h\n@@ -75,11 +75,16 @@ struct ecore_mcp_mb_params {\n \tu32 cmd;\n \tu32 param;\n \tvoid *p_data_src;\n-\tu8 data_src_size;\n \tvoid *p_data_dst;\n-\tu8 data_dst_size;\n \tu32 mcp_resp;\n \tu32 mcp_param;\n+\tu8 data_src_size;\n+\tu8 data_dst_size;\n+\tu32 flags;\n+#define ECORE_MB_FLAG_CAN_SLEEP         (0x1 << 0)\n+#define ECORE_MB_FLAG_AVOID_BLOCK       (0x1 << 1)\n+#define ECORE_MB_FLAGS_IS_SET(params, flag) \\\n+\t((params) != OSAL_NULL && ((params)->flags & ECORE_MB_FLAG_##flag))\n };\n \n struct ecore_drv_tlv_hdr {\ndiff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h\nindex f91b25e20..64509f7cc 100644\n--- a/drivers/net/qede/base/ecore_proto_if.h\n+++ b/drivers/net/qede/base/ecore_proto_if.h\n@@ -62,6 +62,7 @@ struct ecore_iscsi_pf_params {\n \tu8\t\tnum_uhq_pages_in_ring;\n \tu8\t\tnum_queues;\n \tu8\t\tlog_page_size;\n+\tu8\t\tlog_page_size_conn;\n \tu8\t\trqe_log_size;\n \tu8\t\tmax_fin_rt;\n \tu8\t\tgl_rq_pi;\ndiff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c\nindex 49a5ff552..9860a62b5 100644\n--- a/drivers/net/qede/base/ecore_sp_commands.c\n+++ b/drivers/net/qede/base/ecore_sp_commands.c\n@@ -355,14 +355,16 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,\n \t\tp_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;\n \n \t/* enable_stag_pri_change should be set if port is in BD mode or,\n-\t * UFP with Host Control mode or, UFP with DCB over base interface.\n+\t * UFP with Host Control mode.\n \t */\n \tif (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {\n-\t\tif ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||\n-\t\t    (p_hwfn->p_dcbx_info->results.dcbx_enabled))\n+\t\tif (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)\n \t\t\tp_ramrod->outer_tag_config.enable_stag_pri_change = 1;\n \t\telse\n \t\t\tp_ramrod->outer_tag_config.enable_stag_pri_change = 0;\n+\n+\t\tp_ramrod->outer_tag_config.outer_tag.tci |=\n+\t\t\tOSAL_CPU_TO_LE16(((u16)p_hwfn->ufp_info.tc << 13));\n \t}\n \n \t/* Place EQ address in RAMROD */\n@@ -459,8 +461,7 @@ enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)\n \t\treturn rc;\n \n \tp_ent->ramrod.pf_update.update_enable_stag_pri_change = true;\n-\tif ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||\n-\t    (p_hwfn->p_dcbx_info->results.dcbx_enabled))\n+\tif (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)\n \t\tp_ent->ramrod.pf_update.enable_stag_pri_change = 1;\n \telse\n \t\tp_ent->ramrod.pf_update.enable_stag_pri_change = 0;\n@@ -637,6 +638,10 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)\n \tif (rc != ECORE_SUCCESS)\n \t\treturn rc;\n \n+\tif (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))\n+\t\tp_ent->ramrod.pf_update.mf_vlan |=\n+\t\t\tOSAL_CPU_TO_LE16(((u16)p_hwfn->ufp_info.tc << 13));\n+\n \treturn ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);\n }\n \ndiff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c\nindex 486b21dd9..6c386821f 100644\n--- a/drivers/net/qede/base/ecore_spq.c\n+++ b/drivers/net/qede/base/ecore_spq.c\n@@ -185,11 +185,26 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)\n /***************************************************************************\n  * HSI access\n  ***************************************************************************/\n+\n+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK\t\t\t0x1\n+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT\t\t\t0\n+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK\t\t0x1\n+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT\t\t7\n+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK\t\t0x1\n+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT\t\t4\n+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK\t0x1\n+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT\t6\n+\n static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,\n \t\t\t\t    struct ecore_spq *p_spq)\n {\n+\t__le32 *p_spq_base_lo, *p_spq_base_hi;\n+\tstruct regpair *p_consolid_base_addr;\n+\tu8 *p_flags1, *p_flags9, *p_flags10;\n \tstruct core_conn_context *p_cxt;\n \tstruct ecore_cxt_info cxt_info;\n+\tu32 core_conn_context_size;\n+\t__le16 *p_physical_q0;\n \tu16 physical_q;\n \tenum _ecore_status_t rc;\n \n@@ -197,41 +212,39 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,\n \n \trc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);\n \n-\tif (rc < 0) {\n+\tif (rc != ECORE_SUCCESS) {\n \t\tDP_NOTICE(p_hwfn, true, \"Cannot find context info for cid=%d\\n\",\n \t\t\t  p_spq->cid);\n \t\treturn;\n \t}\n \n \tp_cxt = cxt_info.p_cxt;\n+\tcore_conn_context_size = sizeof(*p_cxt);\n+\tp_flags1 = &p_cxt->xstorm_ag_context.flags1;\n+\tp_flags9 = &p_cxt->xstorm_ag_context.flags9;\n+\tp_flags10 = &p_cxt->xstorm_ag_context.flags10;\n+\tp_physical_q0 = &p_cxt->xstorm_ag_context.physical_q0;\n+\tp_spq_base_lo = &p_cxt->xstorm_st_context.spq_base_lo;\n+\tp_spq_base_hi = &p_cxt->xstorm_st_context.spq_base_hi;\n+\tp_consolid_base_addr = &p_cxt->xstorm_st_context.consolid_base_addr;\n \n \t/* @@@TBD we zero the context until we have ilt_reset implemented. */\n-\tOSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));\n-\n-\tif (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {\n-\t\tSET_FIELD(p_cxt->xstorm_ag_context.flags10,\n-\t\t\t  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);\n-\t\tSET_FIELD(p_cxt->xstorm_ag_context.flags1,\n-\t\t\t  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);\n-\t\t/* SET_FIELD(p_cxt->xstorm_ag_context.flags10,\n-\t\t *\t  E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);\n-\t\t */\n-\t\tSET_FIELD(p_cxt->xstorm_ag_context.flags9,\n-\t\t\t  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);\n-\t}\n+\tOSAL_MEM_ZERO(p_cxt, core_conn_context_size);\n+\n+\tSET_FIELD(*p_flags10, XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);\n+\tSET_FIELD(*p_flags1, XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);\n+\tSET_FIELD(*p_flags9, XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);\n \n \t/* CDU validation - FIXME currently disabled */\n \n \t/* QM physical queue */\n \tphysical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);\n-\tp_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);\n+\t*p_physical_q0 = OSAL_CPU_TO_LE16(physical_q);\n \n-\tp_cxt->xstorm_st_context.spq_base_lo =\n-\t    DMA_LO_LE(p_spq->chain.p_phys_addr);\n-\tp_cxt->xstorm_st_context.spq_base_hi =\n-\t    DMA_HI_LE(p_spq->chain.p_phys_addr);\n+\t*p_spq_base_lo = DMA_LO_LE(p_spq->chain.p_phys_addr);\n+\t*p_spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr);\n \n-\tDMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,\n+\tDMA_REGPAIR_LE(*p_consolid_base_addr,\n \t\t       p_hwfn->p_consq->chain.p_phys_addr);\n }\n \ndiff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c\nindex 264217252..deee04ac4 100644\n--- a/drivers/net/qede/base/ecore_sriov.c\n+++ b/drivers/net/qede/base/ecore_sriov.c\n@@ -906,7 +906,7 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,\n  *\n  * @brief ecore_iov_config_perm_table - configure the permission\n  *      zone table.\n- *      In E4, queue zone permission table size is 320x9. There\n+ *      The queue zone permission table size is 320x9. There\n  *      are 320 VF queues for single engine device (256 for dual\n  *      engine device), and each entry has the following format:\n  *      {Valid, VF[7:0]}\n@@ -967,6 +967,9 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,\n \n \tfor (qid = 0; qid < num_rx_queues; qid++) {\n \t\tp_block = ecore_get_igu_free_sb(p_hwfn, false);\n+\t\tif (!p_block)\n+\t\t\tcontinue;\n+\n \t\tvf->igu_sbs[qid] = p_block->igu_sb_id;\n \t\tp_block->status &= ~ECORE_IGU_STATUS_FREE;\n \t\tSET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);\n@@ -1064,6 +1067,15 @@ void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,\n \tp_bulletin->capability_speed = p_caps->speed_capabilities;\n }\n \n+#ifndef ASIC_ONLY\n+static void ecore_emul_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  struct ecore_ptt *p_ptt)\n+{\n+\t/* Increase the maximum number of DORQ FIFO entries used by child VFs */\n+\tecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT_LIM, 0x3ec);\n+}\n+#endif\n+\n enum _ecore_status_t\n ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,\n \t\t\t struct ecore_ptt *p_ptt,\n@@ -1188,18 +1200,39 @@ ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,\n \t\t\t   &link_params, &link_state, &link_caps);\n \n \trc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);\n+\tif (rc != ECORE_SUCCESS)\n+\t\treturn rc;\n \n-\tif (rc == ECORE_SUCCESS) {\n-\t\tvf->b_init = true;\n-\t\tp_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=\n+\tvf->b_init = true;\n+#ifndef REMOVE_DBG\n+\tp_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=\n \t\t\t(1ULL << (vf->relative_vf_id % 64));\n+#endif\n \n-\t\tif (IS_LEAD_HWFN(p_hwfn))\n-\t\t\tp_hwfn->p_dev->p_iov_info->num_vfs++;\n+\tif (IS_LEAD_HWFN(p_hwfn))\n+\t\tp_hwfn->p_dev->p_iov_info->num_vfs++;\n+\n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev))\n+\t\tecore_emul_iov_init_hw_for_vf(p_hwfn, p_ptt);\n+#endif\n+\n+\treturn ECORE_SUCCESS;\n \t}\n \n-\treturn rc;\n+#ifndef ASIC_ONLY\n+static void ecore_emul_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,\n+\t\t\t\t\t  struct ecore_ptt *p_ptt)\n+{\n+\tif (!ecore_mcp_is_init(p_hwfn)) {\n+\t\tu32 sriov_dis = ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t\t PGLUE_B_REG_SR_IOV_DISABLED_REQUEST);\n+\n+\t\tecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR,\n+\t\t\t sriov_dis);\n }\n+}\n+#endif\n \n enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t\t struct ecore_ptt *p_ptt,\n@@ -1257,6 +1290,11 @@ enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,\n \t\t\tp_hwfn->p_dev->p_iov_info->num_vfs--;\n \t}\n \n+#ifndef ASIC_ONLY\n+\tif (CHIP_REV_IS_EMUL(p_hwfn->p_dev))\n+\t\tecore_emul_iov_release_hw_for_vf(p_hwfn, p_ptt);\n+#endif\n+\n \treturn ECORE_SUCCESS;\n }\n \n@@ -1391,7 +1429,7 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,\n \n \teng_vf_id = p_vf->abs_vf_id;\n \n-\tOSAL_MEMSET(&params, 0, sizeof(struct dmae_params));\n+\tOSAL_MEMSET(&params, 0, sizeof(params));\n \tSET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);\n \tparams.dst_vf_id = eng_vf_id;\n \n@@ -1787,7 +1825,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,\n \t/* fill in pfdev info */\n \tpfdev_info->chip_num = p_hwfn->p_dev->chip_num;\n \tpfdev_info->db_size = 0;\t/* @@@ TBD MichalK Vf Doorbells */\n-\tpfdev_info->indices_per_sb = MAX_PIS_PER_SB;\n+\tpfdev_info->indices_per_sb = PIS_PER_SB;\n \n \tpfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |\n \t\t\t\t   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;\n@@ -2247,10 +2285,14 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,\n \tecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,\n \t\t      sizeof(struct channel_list_end_tlv));\n \n-\t/* Update the TLV with the response */\n+\t/* Update the TLV with the response.\n+\t * The VF Rx producers are located in the vf zone.\n+\t */\n \tif ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {\n \t\treq = &mbx->req_virt->start_rxq;\n-\t\tp_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +\n+\n+\t\tp_tlv->offset =\n+\t\t\tPXP_VF_BAR0_START_MSDM_ZONE_B +\n \t\t\t\tOFFSETOF(struct mstorm_vf_zone,\n \t\t\t\t\t non_trigger.eth_rx_queue_producers) +\n \t\t\t\tsizeof(struct eth_rx_prod_data) * req->rx_qid;\n@@ -2350,13 +2392,15 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,\n \tif (p_cid == OSAL_NULL)\n \t\tgoto out;\n \n-\t/* Legacy VFs have their Producers in a different location, which they\n-\t * calculate on their own and clean the producer prior to this.\n+\t/* The VF Rx producers are located in the vf zone.\n+\t * Legacy VFs have their producers in the queue zone, but they\n+\t * calculate the location by their own and clean them prior to this.\n \t */\n \tif (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))\n \t\tREG_WR(p_hwfn,\n \t\t       GTT_BAR0_MAP_REG_MSDM_RAM +\n-\t\t       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),\n+\t\t       MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,\n+\t\t\t\t\t\t  req->rx_qid),\n \t\t       0);\n \n \trc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,\n@@ -3855,48 +3899,70 @@ ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,\n \treturn ECORE_SUCCESS;\n }\n \n+#define MAX_NUM_EXT_VOQS\t(MAX_NUM_PORTS * NUM_OF_TCS)\n+\n static enum _ecore_status_t\n ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,\n \t\t\t  struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)\n {\n-\tu32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];\n-\tint i, cnt;\n+\tu32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;\n+\tu8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;\n+\tu8 max_ports_per_engine = p_hwfn->p_dev->num_ports_in_engine;\n+\tu32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;\n+\tu32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;\n+\tu8 port_id, tc, tc_id = 0, voq = 0;\n+\tint cnt;\n \n \t/* Read initial consumers & producers */\n-\tfor (i = 0; i < MAX_NUM_VOQS_E4; i++) {\n-\t\tu32 prod;\n-\n-\t\tcons[i] = ecore_rd(p_hwfn, p_ptt,\n-\t\t\t\t   PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +\n-\t\t\t\t   i * 0x40);\n+\tfor (port_id = 0; port_id < max_ports_per_engine; port_id++) {\n+\t\t/* \"max_phys_tcs_per_port\" active TCs + 1 pure LB TC */\n+\t\tfor (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {\n+\t\t\ttc_id = (tc < max_phys_tcs_per_port) ?\n+\t\t\t\ttc :\n+\t\t\t\tPURE_LB_TC;\n+\t\t\tvoq = VOQ(port_id, tc_id, max_phys_tcs_per_port);\n+\t\t\tcons[voq] = ecore_rd(p_hwfn, p_ptt,\n+\t\t\t\t\t     cons_voq0_addr + voq * 0x40);\n \t\tprod = ecore_rd(p_hwfn, p_ptt,\n-\t\t\t\tPBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +\n-\t\t\t\ti * 0x40);\n-\t\tdistance[i] = prod - cons[i];\n+\t\t\t\t\tprod_voq0_addr + voq * 0x40);\n+\t\t\tdistance[voq] = prod - cons[voq];\n+\t\t}\n \t}\n \n \t/* Wait for consumers to pass the producers */\n-\ti = 0;\n+\tport_id = 0;\n+\ttc = 0;\n \tfor (cnt = 0; cnt < 50; cnt++) {\n-\t\tfor (; i < MAX_NUM_VOQS_E4; i++) {\n-\t\t\tu32 tmp;\n-\n+\t\tfor (; port_id < max_ports_per_engine; port_id++) {\n+\t\t\t/* \"max_phys_tcs_per_port\" active TCs + 1 pure LB TC */\n+\t\t\tfor (; tc < max_phys_tcs_per_port + 1; tc++) {\n+\t\t\t\ttc_id = (tc < max_phys_tcs_per_port) ?\n+\t\t\t\t\ttc :\n+\t\t\t\t\tPURE_LB_TC;\n+\t\t\t\tvoq = VOQ(port_id, tc_id,\n+\t\t\t\t\t  max_phys_tcs_per_port);\n \t\t\ttmp = ecore_rd(p_hwfn, p_ptt,\n-\t\t\t\t       PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +\n-\t\t\t\t       i * 0x40);\n-\t\t\tif (distance[i] > tmp - cons[i])\n+\t\t\t\t\t       cons_voq0_addr + voq * 0x40);\n+\t\t\tif (distance[voq] > tmp - cons[voq])\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\t\tif (tc == max_phys_tcs_per_port + 1)\n+\t\t\t\ttc = 0;\n+\t\t\telse\n \t\t\t\tbreak;\n \t\t}\n \n-\t\tif (i == MAX_NUM_VOQS_E4)\n+\t\tif (port_id == max_ports_per_engine)\n \t\t\tbreak;\n \n \t\tOSAL_MSLEEP(20);\n \t}\n \n \tif (cnt == 50) {\n-\t\tDP_ERR(p_hwfn, \"VF[%d] - pbf polling failed on VOQ %d\\n\",\n-\t\t       p_vf->abs_vf_id, i);\n+\t\tDP_ERR(p_hwfn,\n+\t\t       \"VF[%d] - pbf polling failed on VOQ %d [port_id %d, tc_id %d]\\n\",\n+\t\t       p_vf->abs_vf_id, voq, port_id, tc_id);\n \t\treturn ECORE_TIMEOUT;\n \t}\n \n@@ -3996,11 +4062,11 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,\n enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t      struct ecore_ptt *p_ptt)\n {\n-\tu32 ack_vfs[VF_MAX_STATIC / 32];\n+\tu32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];\n \tenum _ecore_status_t rc = ECORE_SUCCESS;\n \tu16 i;\n \n-\tOSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));\n+\tOSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);\n \n \t/* Since BRB <-> PRS interface can't be tested as part of the flr\n \t * polling due to HW limitations, simply sleep a bit. And since\n@@ -4019,10 +4085,10 @@ enum _ecore_status_t\n ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,\n \t\t\t\tstruct ecore_ptt *p_ptt, u16 rel_vf_id)\n {\n-\tu32 ack_vfs[VF_MAX_STATIC / 32];\n+\tu32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];\n \tenum _ecore_status_t rc = ECORE_SUCCESS;\n \n-\tOSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));\n+\tOSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);\n \n \t/* Wait instead of polling the BRB <-> PRS interface */\n \tOSAL_MSLEEP(100);\n@@ -4039,7 +4105,8 @@ bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)\n \tu16 i;\n \n \tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV, \"Marking FLR-ed VFs\\n\");\n-\tfor (i = 0; i < (VF_MAX_STATIC / 32); i++)\n+\n+\tfor (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)\n \t\tDP_VERBOSE(p_hwfn, ECORE_MSG_IOV,\n \t\t\t   \"[%08x,...,%08x]: %08x\\n\",\n \t\t\t   i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);\n@@ -4396,7 +4463,7 @@ enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,\n \tif (!vf_info)\n \t\treturn ECORE_INVAL;\n \n-\tOSAL_MEMSET(&params, 0, sizeof(struct dmae_params));\n+\tOSAL_MEMSET(&params, 0, sizeof(params));\n \tSET_FIELD(params.flags, DMAE_PARAMS_SRC_VF_VALID, 0x1);\n \tSET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1);\n \tparams.src_vf_id = vf_info->abs_vf_id;\n@@ -4785,9 +4852,9 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,\n \t\t\t\t\t\t struct ecore_ptt *p_ptt,\n \t\t\t\t\t\t int vfid, int val)\n {\n-\tstruct ecore_mcp_link_state *p_link;\n \tstruct ecore_vf_info *vf;\n \tu8 abs_vp_id = 0;\n+\tu16 rl_id;\n \tenum _ecore_status_t rc;\n \n \tvf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);\n@@ -4799,10 +4866,8 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,\n \tif (rc != ECORE_SUCCESS)\n \t\treturn rc;\n \n-\tp_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;\n-\n-\treturn ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,\n-\t\t\t\t   p_link->speed);\n+\trl_id = abs_vp_id; /* The \"rl_id\" is set as the \"vport_id\" */\n+\treturn ecore_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);\n }\n \n enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,\ndiff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c\nindex 3ba6a0cf2..24846cfb5 100644\n--- a/drivers/net/qede/base/ecore_vf.c\n+++ b/drivers/net/qede/base/ecore_vf.c\n@@ -257,6 +257,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)\n \tstruct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;\n \tstruct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;\n \tstruct ecore_vf_acquire_sw_info vf_sw_info;\n+\tstruct ecore_dev *p_dev = p_hwfn->p_dev;\n \tstruct vf_pf_resc_request *p_resc;\n \tbool resources_acquired = false;\n \tstruct vfpf_acquire_tlv *req;\n@@ -427,20 +428,20 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)\n \tp_iov->bulletin.size = resp->bulletin_size;\n \n \t/* get HW info */\n-\tp_hwfn->p_dev->type = resp->pfdev_info.dev_type;\n-\tp_hwfn->p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;\n+\tp_dev->type = resp->pfdev_info.dev_type;\n+\tp_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;\n \n \tDP_INFO(p_hwfn, \"Chip details - %s%d\\n\",\n-\t\tECORE_IS_BB(p_hwfn->p_dev) ? \"BB\" : \"AH\",\n+\t\tECORE_IS_BB(p_dev) ? \"BB\" : \"AH\",\n \t\tCHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);\n \n-\tp_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;\n+\tp_dev->chip_num = pfdev_info->chip_num & 0xffff;\n \n \t/* Learn of the possibility of CMT */\n \tif (IS_LEAD_HWFN(p_hwfn)) {\n \t\tif (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {\n \t\t\tDP_INFO(p_hwfn, \"100g VF\\n\");\n-\t\t\tp_hwfn->p_dev->num_hwfns = 2;\n+\t\t\tp_dev->num_hwfns = 2;\n \t\t}\n \t}\n \n@@ -636,10 +637,6 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)\n \treturn ECORE_NOMEM;\n }\n \n-#define TSTORM_QZONE_START   PXP_VF_BAR0_START_SDM_ZONE_A\n-#define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START + \\\n-\t\t\t\t   (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))\n-\n /* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */\n static void\n __ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,\n@@ -828,8 +825,7 @@ ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,\n \t\tu8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];\n \t\tu32 init_prod_val = 0;\n \n-\t\t*pp_prod = (u8 OSAL_IOMEM *)\n-\t\t\t   p_hwfn->regview +\n+\t\t*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +\n \t\t\t   MSTORM_QZONE_START(p_hwfn->p_dev) +\n \t\t\t   (hw_qid) * MSTORM_QZONE_SIZE;\n \ndiff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h\nindex b5f93e9fa..559638508 100644\n--- a/drivers/net/qede/qede_ethdev.h\n+++ b/drivers/net/qede/qede_ethdev.h\n@@ -44,7 +44,7 @@\n /* Driver versions */\n #define QEDE_PMD_VER_PREFIX\t\t\"QEDE PMD\"\n #define QEDE_PMD_VERSION_MAJOR\t\t2\n-#define QEDE_PMD_VERSION_MINOR\t        10\n+#define QEDE_PMD_VERSION_MINOR\t        11\n #define QEDE_PMD_VERSION_REVISION       0\n #define QEDE_PMD_VERSION_PATCH\t        1\n \ndiff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c\nindex 8a108f99c..c9caec645 100644\n--- a/drivers/net/qede/qede_main.c\n+++ b/drivers/net/qede/qede_main.c\n@@ -18,7 +18,7 @@\n char qede_fw_file[PATH_MAX];\n \n static const char * const QEDE_DEFAULT_FIRMWARE =\n-\t\"/lib/firmware/qed/qed_init_values-8.37.7.0.bin\";\n+\t\"/lib/firmware/qed/qed_init_values-8.40.25.0.bin\";\n \n static void\n qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)\ndiff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c\nindex 40a246229..a28dd0a07 100644\n--- a/drivers/net/qede/qede_rxtx.c\n+++ b/drivers/net/qede/qede_rxtx.c\n@@ -805,7 +805,7 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n \t\tfp->rxq->hw_rxq_prod_addr = ret_params.p_prod;\n \t\tfp->rxq->handle = ret_params.p_handle;\n \n-\t\tfp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];\n+\t\tfp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];\n \t\tqede_update_rx_prod(qdev, fp->rxq);\n \t\teth_dev->data->rx_queue_state[rx_queue_id] =\n \t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n@@ -863,7 +863,7 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)\n \t\ttxq->doorbell_addr = ret_params.p_doorbell;\n \t\ttxq->handle = ret_params.p_handle;\n \n-\t\ttxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];\n+\t\ttxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];\n \t\tSET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,\n \t\t\t\tDB_DEST_XCM);\n \t\tSET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,\n",
    "prefixes": [
        "v2",
        "8/9"
    ]
}