get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/130921/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 130921,
    "url": "http://patchwork.dpdk.org/api/patches/130921/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230830165135.3540627-6-mko-plv@napatech.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230830165135.3540627-6-mko-plv@napatech.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230830165135.3540627-6-mko-plv@napatech.com",
    "date": "2023-08-30T16:51:33",
    "name": "[v10,6/8] net/ntnic: adds flow logic",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "9d6c36e267dc67e1569d99881c26ada20886c1b1",
    "submitter": {
        "id": 3153,
        "url": "http://patchwork.dpdk.org/api/people/3153/?format=api",
        "name": "Mykola Kostenok",
        "email": "mko-plv@napatech.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230830165135.3540627-6-mko-plv@napatech.com/mbox/",
    "series": [
        {
            "id": 29378,
            "url": "http://patchwork.dpdk.org/api/series/29378/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=29378",
            "date": "2023-08-30T16:51:31",
            "name": "[v10,1/8] net/ntnic: initial commit which adds register defines",
            "version": 10,
            "mbox": "http://patchwork.dpdk.org/series/29378/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/130921/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/130921/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5A10941FD1;\n\tWed, 30 Aug 2023 18:51:30 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C6CEA40298;\n\tWed, 30 Aug 2023 18:51:21 +0200 (CEST)",
            "from egress-ip54b.ess.de.barracuda.com\n (egress-ip54b.ess.de.barracuda.com [35.157.190.245])\n by mails.dpdk.org (Postfix) with ESMTP id 0476B40288\n for <dev@dpdk.org>; Wed, 30 Aug 2023 18:51:19 +0200 (CEST)",
            "from EUR04-DB3-obe.outbound.protection.outlook.com\n (mail-db3eur04lp2050.outbound.protection.outlook.com [104.47.12.50]) by\n mx-outbound46-73.eu-central-1c.ess.aws.cudaops.com (version=TLSv1.2\n cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NO);\n Wed, 30 Aug 2023 16:51:18 +0000",
            "from DU7PR01CA0032.eurprd01.prod.exchangelabs.com\n (2603:10a6:10:50e::8) by AS8P190MB2007.EURP190.PROD.OUTLOOK.COM\n (2603:10a6:20b:529::13) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6745.20; Wed, 30 Aug\n 2023 16:51:09 +0000",
            "from DB5PEPF00014B96.eurprd02.prod.outlook.com\n (2603:10a6:10:50e:cafe::1a) by DU7PR01CA0032.outlook.office365.com\n (2603:10a6:10:50e::8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6745.20 via Frontend\n Transport; Wed, 30 Aug 2023 16:51:09 +0000",
            "from k8s-node.default.svc.cluster.local (178.72.21.4) by\n DB5PEPF00014B96.mail.protection.outlook.com (10.167.8.234) with Microsoft\n SMTP Server id 15.20.6745.17 via Frontend Transport; Wed, 30 Aug 2023\n 16:51:09 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=IHveu2n5zSoCmHYFejRPaqe1i/crdAO0T0ZogSQac+8HhQVQhitsTekmckfJlWYH9S9oD1uDpzIXNFHttM3BeSrPR0vAA7Gg7NtKnAqq7jTuBJJcgPeZvpG0C8rPsi/z8wx77uc4SDs9uNiJqwNIXo+kcE/UQ4eF72M1nTP+E1J+TaunZlgocNb2DqgLk+q2GdZOlw4+z8QQsI1Ct7raJXxUwJKh/zXALrJaRZ4w7JCQaaOFOZlwT5HnoHBxu19ZiMleRW6vqcmw7Z9W7bC5MoXDB3DXlHf8toIJe78C5Ku41U/+EIgcckflrgtZ6amYfCjcCorKI6K68+npgv7rBQ==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=t0C0D2l9lPlyIFkTK65D/eWdp6Dz4qzXNY/NtHyiDi8=;\n b=gjnwmqr36MLMGE4DE9jJPDC953E96LsJolyQEC3UnKiTp2e5fxCNsBB9w3+uKlwfkJBSzmTwA5+E2UB9reL6C9OOARVR1l8FB+q6RgoJAcnpX/c/3TuSUF+WPe/a+hCzPW57nTpzirLhc/mg8WItINcfdfuSJubJKScg2/cOj/zUJ6Gare+6501XwQyDYIbndTNdR0OvnfQSJIh4obhZz4kHyUEercrHSpy4R8/VFMXhd5vFNKP9L3k9d9DIYcyJc6GO/vqKFjz5BI666h2/0YMongnbDGzeNHuY8pWatPBWj6BWKszkr2A6Ss4e8QZuOOExQto7L5BGD3g3zgIYgA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=fail (sender ip is\n 178.72.21.4) smtp.rcpttodomain=dpdk.org smtp.mailfrom=napatech.com;\n dmarc=fail (p=none sp=none pct=100) action=none header.from=napatech.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=napatech.com;\n s=selector1;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=t0C0D2l9lPlyIFkTK65D/eWdp6Dz4qzXNY/NtHyiDi8=;\n b=lRGchvMx6DZCayrnXGwJR5FB0s89jAH+35SvKnIFyFpmcUCtqsEZvNC3kJS7G64/UQ06wzs3DDGz00a5WGWGNuTHMplBjcsjLaSUXZNfBveoN3iW6P9E+bYN582ey68j26+RGdsHsEcI5TyvADEIZ+vaBJzJD8tAwSdu4SrLwtM=",
        "X-MS-Exchange-Authentication-Results": "spf=fail (sender IP is 178.72.21.4)\n smtp.mailfrom=napatech.com; dkim=none (message not signed)\n header.d=none;dmarc=fail action=none header.from=napatech.com;",
        "Received-SPF": "Fail (protection.outlook.com: domain of napatech.com does not\n designate 178.72.21.4 as permitted sender) receiver=protection.outlook.com;\n client-ip=178.72.21.4; helo=k8s-node.default.svc.cluster.local;",
        "From": "Mykola Kostenok <mko-plv@napatech.com>",
        "To": "dev@dpdk.org,\n\tmko-plv@napatech.com,\n\tthomas@monjalon.net",
        "Cc": "ckm@napatech.com",
        "Subject": "[PATCH v10 6/8] net/ntnic: adds flow logic",
        "Date": "Wed, 30 Aug 2023 18:51:33 +0200",
        "Message-Id": "<20230830165135.3540627-6-mko-plv@napatech.com>",
        "X-Mailer": "git-send-email 2.39.3",
        "In-Reply-To": "<20230830165135.3540627-1-mko-plv@napatech.com>",
        "References": "<20230816132552.2483752-1-mko-plv@napatech.com>\n <20230830165135.3540627-1-mko-plv@napatech.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DB5PEPF00014B96:EE_|AS8P190MB2007:EE_",
        "Content-Type": "text/plain",
        "X-MS-Office365-Filtering-Correlation-Id": "81b3dc71-ab90-401e-4ced-08dba9794f7a",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n m+m2zxFgYFALk7hkWpkYVeCVjC6vM2tf7BxBpvX5d3FT1ilm8qK5lsPDSlZigC12YZ95+ApiuhDG6KC+e08RmMI/9hBGacSli91SVHfmJ/5p0HQOAPUEfY9L4yt56VwZCO1WRTKMaxHrgZ0JOwszQmvA8N7NjmHJc63TvNaGRjCvcJa3n07jg1qTaG4vdE9UXMTZTN5qsKA3z9YY3F5i0w707QvBedPdq7myQwVgaowBzKIn/rpGOqyUjwmzQuZ+gHTiPy0+qqSxbW6JcX35SYUuOD89GgvvYi2fOgrzgxO9pf5BRXpBiQDCCAyZs0lGJR28p6fkkLyxtm2aTY07d7r2Rt9/zd39O6hJdWkxfV5bgRpuVnn5ov5WnOrnK8pAHk48egW3G5M9ZutiADLUZg6iN65hXtx9m2iDlhC/QW9FxlA2PI3fnIr7W4CAmZS3JdlcdNNfwKbuC4ePhFIVXaTGVe+Z1M77h2IDlRNiPk9a3vWfIyAWHhRWbyGYVlnrJVBnn+EXjjgnjY47+av+/Cr0pj03KPLZPsnVssxsOkTiKmzTnIdCVMYu241cu8U3eap/5tZPMuSsWDn1DVxfcUJwp1PQ3F9Gfgom7sf1fjHfg87aa6D6eF57fDxSvo62L+JxejTWc8r6cm9/bmNRvzKWf4TaGlvZB+7eyEqcvPU6rG6ockP5IZtHTPf9brCXrCe0dTUmqsTvbyZdKT23cmkQmcSCcjoUFZXISMbiMsO0P9uhb+X3W0GQHvt9ZNFUo0PIenyB3gZUIwHS+9lDTw==",
        "X-Forefront-Antispam-Report": "CIP:178.72.21.4; CTRY:DK; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:k8s-node.default.svc.cluster.local;\n PTR:InfoDomainNonexistent; CAT:NONE;\n SFS:(13230031)(376002)(136003)(39840400004)(396003)(346002)(186009)(451199024)(82310400011)(1800799009)(46966006)(36840700001)(6512007)(6486002)(6506007)(6666004)(83380400001)(478600001)(9316004)(30864003)(336012)(26005)(2906002)(36736006)(316002)(107886003)(4326008)(8676002)(70586007)(41300700001)(5660300002)(70206006)(8936002)(36756003)(2616005)(36860700001)(47076005)(956004)(40480700001)(1076003)(86362001)(81166007)(356005)(36900700001)(559001)(579004)(309714004);\n DIR:OUT; SFP:1101;",
        "X-MS-Exchange-AntiSpam-ExternalHop-MessageData-ChunkCount": "1",
        "X-MS-Exchange-AntiSpam-ExternalHop-MessageData-0": "\n /zY4XvuUbCZo8Tl5Pu1xdPanKclaoJc6+1+KmDDbWraoE56JgZrDi9tjBhez7tjHd36TiYrnjM77DcyDf8r02Y9cMFO1sHIWRjZD6CwEnhh2neQURpcBZsI1lRaeWzn0P6dI4IusosdypvwFDm3Yf4UENGpFvnqsA5URWmuIX2knhE9Yj7NBhU5FEFYZGiSrCh64B/OTJuB5O/wLRjRX9gMTFIiSu30q2w7xm1S8lBGnXaD+yfbvb56lp+uaHHOyNjJ02FQhW/gviwJfub2UXzhZVoSU7v0KtbZo7/PI+Z7tbHJ/AQFz9cL0sxXF6PVjrrsvJvu9onPHR1lkkOIoYTjYxwkW7UmvghdC+wUbwjX4NQqHTZofZVkbcwSjyBc3443QLY0uOW870WL7+TL53It3TZ12SoQA92LbTNchOa2UXAMwfB8JgSBB6h+rJ9Q4yGQzIn8Jzif0tXScBo7FK6mfzNRIi9Whfl53RlwrR/Qf5LRofgzNqUtnZ+PUpdDA1gnXS6iYIpiLUud0UdcPr1ArbltVXqqv/o3Ebc0sbIqCfU5vqzHdoz8DXykzSsLF8kNN5PF8FR9Mk1G6QffiYpS2jYAT3VzYjCOiI/Cl5hXhVUYf09VE/hCVUGOeR73IBzGV6nNldcnEzEtmZLqEJGZV47QotXprNfmo1a5+gRTd2iSSIH2KjCMHkaSMq/2JlImwYeRwCXA3hN4N90RWZ5mrAZ0Nmc/kUpvnx6XVrYmxBE0SiTAcpB4xcGj3LJk8LDJFmO9hrK9nDXX36H/jyxX1lCOU5c0VvELd5oog/Rc=",
        "X-OriginatorOrg": "napatech.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "30 Aug 2023 16:51:09.0577 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 81b3dc71-ab90-401e-4ced-08dba9794f7a",
        "X-MS-Exchange-CrossTenant-Id": "c4540d0b-728a-4233-9da5-9ea30c7ec3ed",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=c4540d0b-728a-4233-9da5-9ea30c7ec3ed; Ip=[178.72.21.4];\n Helo=[k8s-node.default.svc.cluster.local]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DB5PEPF00014B96.eurprd02.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "AS8P190MB2007",
        "X-BESS-ID": "1693414278-311849-12341-38032-1",
        "X-BESS-VER": "2019.1_20230829.1833",
        "X-BESS-Apparent-Source-IP": "104.47.12.50",
        "X-BESS-Parts": "H4sIAAAAAAACA4uuVkqtKFGyUioBkjpK+cVKVsaGxmbmRkB2BlDYMM3cJMXcMN\n XcyMA8NTHJNNkk1cjSJMXS3NLE1CjFMEWpNhYA3cHkQEMAAAA=",
        "X-BESS-Outbound-Spam-Score": "0.00",
        "X-BESS-Outbound-Spam-Report": "Code version 3.2,\n rules version 3.2.2.250494 [from\n cloudscan8-197.eu-central-1a.ess.aws.cudaops.com]\n Rule breakdown below\n pts rule name              description\n ---- ---------------------- --------------------------------\n 0.00 LARGE_BODY_SHORTCUT    META:  ",
        "X-BESS-Outbound-Spam-Status": "SCORE=0.00 using account:ESS113687 scores of\n KILL_LEVEL=7.0 tests=LARGE_BODY_SHORTCUT",
        "X-BESS-BRTS-Status": "1",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Christian Koue Muf <ckm@napatech.com>\n\nThis logic layer takes rte_flow style patterns and actions as input,\nand programs the FPGA accordingly.\n\nSigned-off-by: Christian Koue Muf <ckm@napatech.com>\nReviewed-by: Mykola Kostenok <mko-plv@napatech.com>\n---\nv2:\n* Fixed WARNING:TYPO_SPELLING\n---\n drivers/net/ntnic/adapter/nt4ga_adapter.c     |   13 +\n drivers/net/ntnic/adapter/nt4ga_stat.c        |   20 +-\n drivers/net/ntnic/meson.build                 |    4 +\n drivers/net/ntnic/nthw/flow_api/flow_api.c    | 1306 +++++\n drivers/net/ntnic/nthw/flow_api/flow_api.h    |  291 +\n .../nthw/flow_api/flow_api_profile_inline.c   | 5118 +++++++++++++++++\n .../nthw/flow_api/flow_api_profile_inline.h   |   56 +\n .../net/ntnic/nthw/flow_filter/flow_backend.c | 3205 +++++++++++\n .../net/ntnic/nthw/flow_filter/flow_backend.h |   15 +\n .../net/ntnic/nthw/flow_filter/flow_filter.c  |   39 +\n .../net/ntnic/nthw/flow_filter/flow_filter.h  |   16 +\n 11 files changed, 10078 insertions(+), 5 deletions(-)\n create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.c\n create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api.h\n create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c\n create mode 100644 drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h\n create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.c\n create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_backend.h\n create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.c\n create mode 100644 drivers/net/ntnic/nthw/flow_filter/flow_filter.h",
    "diff": "diff --git a/drivers/net/ntnic/adapter/nt4ga_adapter.c b/drivers/net/ntnic/adapter/nt4ga_adapter.c\nindex 259aae2831..f9493202c3 100644\n--- a/drivers/net/ntnic/adapter/nt4ga_adapter.c\n+++ b/drivers/net/ntnic/adapter/nt4ga_adapter.c\n@@ -10,6 +10,8 @@\n #include \"nt4ga_pci_ta_tg.h\"\n #include \"nt4ga_link_100g.h\"\n \n+#include \"flow_filter.h\"\n+\n /* Sensors includes */\n #include \"board_sensors.h\"\n #include \"avr_sensors.h\"\n@@ -306,6 +308,17 @@ int nt4ga_adapter_init(struct adapter_info_s *p_adapter_info)\n \tn_nim_ports = fpga_info->n_nims;\n \tassert(n_nim_ports >= 1);\n \n+\t/* Nt4ga Init Filter */\n+\tnt4ga_filter_t *p_filter = &p_adapter_info->nt4ga_filter;\n+\n+\tres = flow_filter_init(p_fpga, &p_filter->mp_flow_device,\n+\t\t\t     p_adapter_info->adapter_no);\n+\tif (res != 0) {\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: Cannot initialize filter\\n\",\n+\t\t       p_adapter_id_str);\n+\t\treturn res;\n+\t}\n+\n \t/*\n \t * HIF/PCI TA/TG\n \t */\ndiff --git a/drivers/net/ntnic/adapter/nt4ga_stat.c b/drivers/net/ntnic/adapter/nt4ga_stat.c\nindex b61c73ea12..2c822c6b97 100644\n--- a/drivers/net/ntnic/adapter/nt4ga_stat.c\n+++ b/drivers/net/ntnic/adapter/nt4ga_stat.c\n@@ -7,6 +7,7 @@\n #include \"nthw_drv.h\"\n #include \"nthw_fpga.h\"\n #include \"nt4ga_adapter.h\"\n+#include \"flow_filter.h\"\n \n #define NO_FLAGS 0\n \n@@ -16,12 +17,13 @@ static inline uint64_t timestamp2ns(uint64_t ts)\n \treturn ((ts >> 32) * 1000000000) + (ts & 0xffffffff);\n }\n \n-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,\n+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,\n+\t\t\t\t   nt4ga_stat_t *p_nt4ga_stat,\n \t\t\t\t   uint32_t *p_stat_dma_virtual);\n static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,\n \t\t\t\t    uint32_t *p_stat_dma_virtual);\n \n-int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,\n+int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info,\n \t\t      nt4ga_stat_t *p_nt4ga_stat)\n {\n \tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n@@ -39,7 +41,7 @@ int nt4ga_stat_collect(struct adapter_info_s *p_adapter_info _unused,\n \t} else {\n \t\tp_nt4ga_stat->last_timestamp =\n \t\t\ttimestamp2ns(*p_nthw_stat->mp_timestamp);\n-\t\tnt4ga_stat_collect_cap_v1_stats(p_nt4ga_stat,\n+\t\tnt4ga_stat_collect_cap_v1_stats(p_adapter_info, p_nt4ga_stat,\n \t\t\t\t\t       p_nt4ga_stat->p_stat_dma_virtual);\n \t}\n \treturn 0;\n@@ -198,7 +200,9 @@ int nt4ga_stat_setup(struct adapter_info_s *p_adapter_info)\n \t\t\treturn -1;\n \t\t}\n \n-\t\tp_nt4ga_stat->flm_stat_ver = 0;\n+\t\tstruct flow_nic_dev *ndev =\n+\t\t\t\tp_adapter_info->nt4ga_filter.mp_flow_device;\n+\t\tp_nt4ga_stat->flm_stat_ver = ndev->be.flm.ver;\n \n \t\tp_nt4ga_stat->mp_stat_structs_flm =\n \t\t\tcalloc(1, sizeof(struct flm_counters_v1));\n@@ -394,10 +398,12 @@ static int nt4ga_stat_collect_virt_v1_stats(nt4ga_stat_t *p_nt4ga_stat,\n }\n \n /* Called with stat mutex locked */\n-static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,\n+static int nt4ga_stat_collect_cap_v1_stats(struct adapter_info_s *p_adapter_info,\n+\t\t\t\t\t   nt4ga_stat_t *p_nt4ga_stat,\n \t\t\t\t\t   uint32_t *p_stat_dma_virtual)\n {\n \tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n+\tstruct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;\n \n \tconst int n_rx_ports = p_nt4ga_stat->mn_rx_ports;\n \tconst int n_tx_ports = p_nt4ga_stat->mn_tx_ports;\n@@ -701,5 +707,9 @@ static int nt4ga_stat_collect_cap_v1_stats(nt4ga_stat_t *p_nt4ga_stat,\n \t\tp_nt4ga_stat->a_port_tx_drops_total[p] += new_drop_events_sum;\n \t}\n \n+\t/* _update and get FLM stats */\n+\tflow_get_flm_stats(ndev, (uint64_t *)p_nt4ga_stat->mp_stat_structs_flm,\n+\t\t\t   sizeof(struct flm_counters_v1) / sizeof(uint64_t));\n+\n \treturn 0;\n }\ndiff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build\nindex 8a5a3d5deb..0ae574f9ca 100644\n--- a/drivers/net/ntnic/meson.build\n+++ b/drivers/net/ntnic/meson.build\n@@ -61,8 +61,10 @@ sources = files(\n     'nthw/core/nthw_spim.c',\n     'nthw/core/nthw_spis.c',\n     'nthw/core/nthw_tsm.c',\n+    'nthw/flow_api/flow_api.c',\n     'nthw/flow_api/flow_api_actions.c',\n     'nthw/flow_api/flow_api_backend.c',\n+    'nthw/flow_api/flow_api_profile_inline.c',\n     'nthw/flow_api/flow_engine/flow_group.c',\n     'nthw/flow_api/flow_engine/flow_hasher.c',\n     'nthw/flow_api/flow_engine/flow_kcc.c',\n@@ -81,6 +83,8 @@ sources = files(\n     'nthw/flow_api/hw_mod/hw_mod_slc.c',\n     'nthw/flow_api/hw_mod/hw_mod_slc_lr.c',\n     'nthw/flow_api/hw_mod/hw_mod_tpe.c',\n+    'nthw/flow_filter/flow_backend.c',\n+    'nthw/flow_filter/flow_filter.c',\n     'nthw/flow_filter/flow_nthw_cat.c',\n     'nthw/flow_filter/flow_nthw_csu.c',\n     'nthw/flow_filter/flow_nthw_flm.c',\ndiff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c\nnew file mode 100644\nindex 0000000000..2598e1e27b\n--- /dev/null\n+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c\n@@ -0,0 +1,1306 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <stdio.h>\n+#include <stdarg.h>\n+#include <string.h>\n+#include <stdlib.h>\n+#include <assert.h>\n+#include <inttypes.h>\n+#include <pthread.h>\n+#include <arpa/inet.h> /* htons, htonl, ntohs */\n+\n+#include \"ntlog.h\"\n+\n+#include \"flow_api.h\"\n+\n+#include \"flow_api_nic_setup.h\"\n+#include \"stream_binary_flow_api.h\"\n+#include \"flow_api_actions.h\"\n+#include \"flow_api_backend.h\"\n+#include \"flow_api_engine.h\"\n+\n+#include \"flow_api_profile_inline.h\"\n+\n+#define SCATTER_GATHER\n+\n+const char *dbg_res_descr[] = {\n+\t/* RES_QUEUE */ \"RES_QUEUE\",\n+\t/* RES_CAT_CFN */ \"RES_CAT_CFN\",\n+\t/* RES_CAT_COT */ \"RES_CAT_COT\",\n+\t/* RES_CAT_EXO */ \"RES_CAT_EXO\",\n+\t/* RES_CAT_LEN */ \"RES_CAT_LEN\",\n+\t/* RES_KM_FLOW_TYPE */ \"RES_KM_FLOW_TYPE\",\n+\t/* RES_KM_CATEGORY */ \"RES_KM_CATEGORY\",\n+\t/* RES_HSH_RCP */ \"RES_HSH_RCP\",\n+\t/* RES_PDB_RCP */ \"RES_PDB_RCP\",\n+\t/* RES_QSL_RCP */ \"RES_QSL_RCP\",\n+\t/* RES_QSL_LTX */ \"RES_QSL_LTX\",\n+\t/* RES_QSL_QST */ \"RES_QSL_QST\",\n+\t/* RES_SLC_RCP */ \"RES_SLC_RCP\",\n+\t/* RES_IOA_RCP */ \"RES_IOA_RCP\",\n+\t/* RES_ROA_RCP */ \"RES_ROA_RCP\",\n+\t/* RES_FLM_FLOW_TYPE */ \"RES_FLM_FLOW_TYPE\",\n+\t/* RES_FLM_RCP */ \"RES_FLM_RCP\",\n+\t/* RES_HST_RCP */ \"RES_HST_RCP\",\n+\t/* RES_TPE_RCP */ \"RES_TPE_RCP\",\n+\t/* RES_TPE_EXT */ \"RES_TPE_EXT\",\n+\t/* RES_TPE_RPL */ \"RES_TPE_RPL\",\n+\t/* RES_COUNT */ \"RES_COUNT\",\n+\t/* RES_INVALID */ \"RES_INVALID\"\n+};\n+\n+static struct flow_nic_dev *dev_base;\n+static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;\n+\n+/*\n+ * *****************************************************************************\n+ * Error handling\n+ * *****************************************************************************\n+ */\n+\n+static const struct {\n+\tconst char *message;\n+} err_msg[] = {\n+\t/* 00 */ { \"Operation successfully completed\" },\n+\t/* 01 */ { \"Operation failed\" },\n+\t/* 02 */ { \"Memory allocation failed\" },\n+\t/* 03 */ { \"Too many output destinations\" },\n+\t/* 04 */ { \"Too many output queues for RSS\" },\n+\t/* 05 */ { \"The VLAN TPID specified is not supported\" },\n+\t/* 06 */ { \"The VxLan Push header specified is not accepted\" },\n+\t/* 07 */\n+\t{ \"While interpreting VxLan Pop action, could not find a destination port\" },\n+\t/* 08 */ { \"Failed in creating a HW-internal VTEP port\" },\n+\t/* 09 */ { \"Too many VLAN tag matches\" },\n+\t/* 10 */ { \"IPv6 invalid header specified\" },\n+\t/* 11 */ { \"Too many tunnel ports. HW limit reached\" },\n+\t/* 12 */ { \"Unknown or unsupported flow match element received\" },\n+\t/* 13 */ { \"Match failed because of HW limitations\" },\n+\t/* 14 */ { \"Match failed because of HW resource limitations\" },\n+\t/* 15 */ { \"Match failed because of too complex element definitions\" },\n+\t/* 16 */ { \"Action failed. To too many output destinations\" },\n+\t/* 17 */ { \"Action Output failed, due to HW resource exhaustion\" },\n+\t/* 18 */\n+\t{ \"Push Tunnel Header action cannot output to multiple destination queues\" },\n+\t/* 19 */ { \"Inline action HW resource exhaustion\" },\n+\t/* 20 */ { \"Action retransmit/recirculate HW resource exhaustion\" },\n+\t/* 21 */ { \"Flow counter HW resource exhaustion\" },\n+\t/* 22 */ { \"Internal HW resource exhaustion to handle Actions\" },\n+\t/* 23 */ { \"Internal HW QSL compare failed\" },\n+\t/* 24 */ { \"Internal CAT CFN reuse failed\" },\n+\t/* 25 */ { \"Match variations too complex\" },\n+\t/* 26 */ { \"Match failed because of CAM/TCAM full\" },\n+\t/* 27 */ { \"Internal creation of a tunnel end point port failed\" },\n+\t/* 28 */ { \"Unknown or unsupported flow action received\" },\n+\t/* 29 */ { \"Removing flow failed\" },\n+\t/* 30 */\n+\t{ \"No output queue specified. Ignore this flow offload and uses default queue\" },\n+\t/* 31 */ { \"No output queue found\" },\n+\t/* 32 */ { \"Unsupported EtherType or rejected caused by offload policy\" },\n+\t/* 33 */\n+\t{ \"Destination port specified is invalid or not reachable from this NIC\" },\n+\t/* 34 */ { \"Partial offload is not supported in this configuration\" },\n+\t/* 35 */ { \"Match failed because of CAT CAM exhausted\" },\n+\t/* 36 */\n+\t{ \"Match failed because of CAT CAM Key clashed with an existing KCC Key\" },\n+\t/* 37 */ { \"Match failed because of CAT CAM write failed\" },\n+\t/* 38 */ { \"Partial flow mark too big for device\" },\n+};\n+\n+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error)\n+{\n+\tassert(msg < ERR_MSG_NO_MSG);\n+\tif (error) {\n+\t\terror->message = err_msg[msg].message;\n+\t\terror->type = (msg == ERR_SUCCESS) ? FLOW_ERROR_SUCCESS :\n+\t\t\t      FLOW_ERROR_GENERAL;\n+\t}\n+}\n+\n+/*\n+ * *****************************************************************************\n+ * Resources\n+ * *****************************************************************************\n+ */\n+\n+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,\n+\t\t\t    uint32_t alignment)\n+{\n+\tfor (unsigned int i = 0; i < ndev->res[res_type].resource_count;\n+\t\t\ti += alignment) {\n+\t\tif (!flow_nic_is_resource_used(ndev, res_type, i)) {\n+\t\t\tflow_nic_mark_resource_used(ndev, res_type, i);\n+\t\t\tndev->res[res_type].ref[i] = 1;\n+\t\t\treturn i;\n+\t\t}\n+\t}\n+\treturn -1;\n+}\n+\n+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,\n+\t\t\t\t  enum res_type_e res_type)\n+{\n+\tif (!flow_nic_is_resource_used(ndev, res_type, idx)) {\n+\t\tflow_nic_mark_resource_used(ndev, res_type, idx);\n+\t\tndev->res[res_type].ref[idx] = 1;\n+\t\treturn 0;\n+\t}\n+\treturn -1;\n+}\n+\n+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,\n+\t\t\t\t   enum res_type_e res_type, unsigned int num,\n+\t\t\t\t   uint32_t alignment)\n+{\n+\tunsigned int idx_offs;\n+\n+\tfor (unsigned int res_idx = 0;\n+\t\t\tres_idx < ndev->res[res_type].resource_count - (num - 1);\n+\t\t\tres_idx += alignment) {\n+\t\tif (!flow_nic_is_resource_used(ndev, res_type, res_idx)) {\n+\t\t\tfor (idx_offs = 1; idx_offs < num; idx_offs++) {\n+\t\t\t\tif (flow_nic_is_resource_used(ndev, res_type,\n+\t\t\t\t\t\t\t      res_idx + idx_offs))\n+\t\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tif (idx_offs < num)\n+\t\t\t\tcontinue;\n+\n+\t\t\t/* found a contiguous number of \"num\" res_type elements - allocate them */\n+\t\t\tfor (idx_offs = 0; idx_offs < num; idx_offs++) {\n+\t\t\t\tflow_nic_mark_resource_used(ndev, res_type,\n+\t\t\t\t\t\t\t    res_idx + idx_offs);\n+\t\t\t\tndev->res[res_type].ref[res_idx + idx_offs] = 1;\n+\t\t\t}\n+\t\t\treturn res_idx;\n+\t\t}\n+\t}\n+\treturn -1;\n+}\n+\n+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,\n+\t\t\t    int idx)\n+{\n+\tflow_nic_mark_resource_unused(ndev, res_type, idx);\n+}\n+\n+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,\n+\t\t\t  int index)\n+{\n+\tNT_LOG(DBG, FILTER,\n+\t       \"Reference resource %s idx %i (before ref cnt %i)\\n\",\n+\t       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);\n+\tassert(flow_nic_is_resource_used(ndev, res_type, index));\n+\tif (ndev->res[res_type].ref[index] == (uint32_t)-1)\n+\t\treturn -1;\n+\tndev->res[res_type].ref[index]++;\n+\treturn 0;\n+}\n+\n+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,\n+\t\t\t    int index)\n+{\n+\tNT_LOG(DBG, FILTER,\n+\t       \"De-reference resource %s idx %i (before ref cnt %i)\\n\",\n+\t       dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);\n+\tassert(flow_nic_is_resource_used(ndev, res_type, index));\n+\tassert(ndev->res[res_type].ref[index]);\n+\t/* deref */\n+\tndev->res[res_type].ref[index]--;\n+\tif (!ndev->res[res_type].ref[index])\n+\t\tflow_nic_free_resource(ndev, res_type, index);\n+\treturn !!ndev->res[res_type]\n+\t       .ref[index]; /* if 0 resource has been freed */\n+}\n+\n+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,\n+\t\t\t\t     enum res_type_e res_type, int idx_start)\n+{\n+\tfor (unsigned int i = idx_start; i < ndev->res[res_type].resource_count;\n+\t\t\ti++) {\n+\t\tif (flow_nic_is_resource_used(ndev, res_type, i))\n+\t\t\treturn i;\n+\t}\n+\treturn -1;\n+}\n+\n+/*\n+ * Allocate a number flow resources.\n+ *\n+ * Arguments:\n+ *   ndev       : device\n+ *   res_type   : resource type\n+ *   fh         : flow handle\n+ *   count      : number of (contiguous) resources to be allocated\n+ *   alignment  : start index alignment\n+ *                  1: the allocation can start at any index\n+ *                  2: the allocation must start at index modulus 2 (0, 2, 4, 6, ...)\n+ *                  3: the allocation must start at index modulus 3 (0, 3, 6, 9, ...)\n+ *                  etc.\n+ * Returns:\n+ *          0   : success\n+ *         -1   : failure\n+ */\n+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,\n+\t\t\t\t  enum res_type_e res_type,\n+\t\t\t\t  struct flow_handle *fh, uint32_t count,\n+\t\t\t\t  uint32_t alignment)\n+{\n+\tif (count > 1) {\n+\t\t/* Contiguous */\n+\t\tfh->resource[res_type].index =\n+\t\t\tflow_nic_alloc_resource_contig(ndev, res_type, count, alignment);\n+\t} else {\n+\t\tfh->resource[res_type].index =\n+\t\t\tflow_nic_alloc_resource(ndev, res_type, alignment);\n+\t}\n+\n+\tif (fh->resource[res_type].index < 0)\n+\t\treturn -1;\n+\tfh->resource[res_type].count = count;\n+\treturn 0;\n+}\n+\n+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,\n+\t\t\t\t\tenum res_type_e res_type, int idx,\n+\t\t\t\t\tstruct flow_handle *fh)\n+{\n+\tint err = flow_nic_alloc_resource_index(ndev, idx, res_type);\n+\n+\tif (err)\n+\t\treturn err;\n+\n+\tfh->resource[res_type].index = idx;\n+\tif (fh->resource[res_type].index < 0)\n+\t\treturn -1;\n+\tfh->resource[res_type].count = 1;\n+\treturn 0;\n+}\n+\n+/*\n+ * *****************************************************************************\n+ * Hash\n+ * *****************************************************************************\n+ */\n+\n+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,\n+\t\t\tenum flow_nic_hash_e algorithm)\n+{\n+\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0, 0);\n+\tswitch (algorithm) {\n+\tcase HASH_ALGO_5TUPLE:\n+\t\t/* need to create an IPv6 hashing and enable the adaptive ip mask bit */\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE,\n+\t\t\t\t   hsh_idx, 0, 2);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,\n+\t\t\t\t   DYN_FINAL_IP_DST);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,\n+\t\t\t\t   -16);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,\n+\t\t\t\t   DYN_FINAL_IP_DST);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,\n+\t\t\t\t   0);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx, 0,\n+\t\t\t\t   DYN_L4);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx, 0, 0);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_PE, hsh_idx, 0, 0);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_OFS, hsh_idx, 0, 0);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W9_P, hsh_idx, 0, 0);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_P_MASK, hsh_idx, 0, 1);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 8,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 9,\n+\t\t\t\t   0);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_SEED, hsh_idx, 0,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_VALID, hsh_idx, 0,\n+\t\t\t\t   1);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,\n+\t\t\t\t   HASH_5TUPLE);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,\n+\t\t\t\t   hsh_idx, 0, 1);\n+\n+\t\tNT_LOG(DBG, FILTER,\n+\t\t       \"Set IPv6 5-tuple hasher with adaptive IPv4 hashing\\n\");\n+\t\tbreak;\n+\tdefault:\n+\tcase HASH_ALGO_ROUND_ROBIN:\n+\t\t/* zero is round-robin */\n+\t\tbreak;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,\n+\t\t\t       struct nt_eth_rss f)\n+{\n+\tuint64_t fields = f.fields;\n+\n+\tint res = 0;\n+\n+\tres |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, hsh_idx, 0,\n+\t\t\t\t  0);\n+\tres |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_LOAD_DIST_TYPE, hsh_idx,\n+\t\t\t\t  0, 2);\n+\tswitch (fields) {\n+\tcase NT_ETH_RSS_C_VLAN:\n+\t\t/*\n+\t\t * Here we are using 1st VLAN to point C-VLAN which is only try for the single VLAN\n+\t\t * provider\n+\t\t */\n+\t\tres |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_PE, hsh_idx,\n+\t\t\t\t\t  0, DYN_FIRST_VLAN);\n+\t\tres |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_W8_OFS, hsh_idx,\n+\t\t\t\t\t  0, 0);\n+\t\tres |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK,\n+\t\t\t\t\t  hsh_idx, 8, 0xffffffff);\n+\t\tres |= hw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE,\n+\t\t\t\t\t  hsh_idx, 0, HASH_LAST_VLAN_ID);\n+\t\tif (res) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"VLAN hasher is not set hardware communication problem has \"\n+\t\t\t       \"occurred. The cardware could be in inconsistent state. Rerun.\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tNT_LOG(DBG, FILTER, \"Set VLAN hasher.\\n\");\n+\t\treturn 0;\n+\tcase NT_ETH_RSS_LEVEL_OUTERMOST | NT_ETH_RSS_L3_DST_ONLY | NT_ETH_RSS_IP:\n+\t\t/* need to create an IPv6 hashing and enable the adaptive ip mask bit */\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_PE, hsh_idx, 0,\n+\t\t\t\t   DYN_FINAL_IP_DST);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW4_OFS, hsh_idx, 0,\n+\t\t\t\t   0);\n+\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 4,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 5,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 6,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 7,\n+\t\t\t\t   0xffffffff);\n+\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,\n+\t\t\t\t   HASH_OUTER_DST_IP);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,\n+\t\t\t\t   hsh_idx, 0, 1);\n+\t\tif (res) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"Outer dst IP hasher is not set hardware communication problem has \"\n+\t\t\t       \"occurred. The cardware could be in inconsistent state. Rerun.\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tNT_LOG(DBG, FILTER, \"Set outer dst IP hasher.\\n\");\n+\t\treturn 0;\n+\tcase NT_ETH_RSS_LEVEL_INNERMOST | NT_ETH_RSS_L3_SRC_ONLY | NT_ETH_RSS_IP:\n+\t\t/* need to create an IPv6 hashing and enable the adaptive ip mask bit */\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_PE, hsh_idx, 0,\n+\t\t\t\t   DYN_TUN_FINAL_IP_DST);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_QW0_OFS, hsh_idx, 0,\n+\t\t\t\t   -16);\n+\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 0,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 1,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 2,\n+\t\t\t\t   0xffffffff);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_WORD_MASK, hsh_idx, 3,\n+\t\t\t\t   0xffffffff);\n+\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_HSH_TYPE, hsh_idx, 0,\n+\t\t\t\t   HASH_INNER_SRC_IP);\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_AUTO_IPV4_MASK,\n+\t\t\t\t   hsh_idx, 0, 1);\n+\t\tif (res) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"Inner (depth = 1) src IP hasher is not set hardware communication \"\n+\t\t\t       \"problem has occurred. The cardware could be in inconsistent state. \"\n+\t\t\t       \"Rerun.\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tNT_LOG(DBG, FILTER, \"Set outer dst IP hasher.\\n\");\n+\t\treturn 0;\n+\tdefault:\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"RSS bit flags can't be set up. \"\n+\t\t       \"Flags combination is not supported.\");\n+\t\treturn -1;\n+\t}\n+}\n+\n+/*\n+ * *****************************************************************************\n+ * Nic port/adapter lookup\n+ * *****************************************************************************\n+ */\n+\n+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port)\n+{\n+\tstruct flow_nic_dev *nic_dev = dev_base;\n+\n+\twhile (nic_dev) {\n+\t\tif (nic_dev->adapter_no == adapter_no)\n+\t\t\tbreak;\n+\t\tnic_dev = nic_dev->next;\n+\t}\n+\n+\tif (!nic_dev)\n+\t\treturn NULL;\n+\n+\tstruct flow_eth_dev *dev = nic_dev->eth_base;\n+\n+\twhile (dev) {\n+\t\tif (port == dev->port)\n+\t\t\treturn dev;\n+\t\tdev = dev->next;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no)\n+{\n+\tstruct flow_nic_dev *ndev = dev_base;\n+\n+\twhile (ndev) {\n+\t\tif (adapter_no == ndev->adapter_no)\n+\t\t\tbreak;\n+\t\tndev = ndev->next;\n+\t}\n+\treturn ndev;\n+}\n+\n+/*\n+ * *****************************************************************************\n+ * LAG control implementation\n+ * *****************************************************************************\n+ */\n+\n+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask)\n+{\n+\tpthread_mutex_lock(&base_mtx);\n+\tstruct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);\n+\n+\tif (!ndev) {\n+\t\t/* Error invalid nic device */\n+\t\tpthread_mutex_unlock(&base_mtx);\n+\t\treturn -1;\n+\t}\n+\t/*\n+\t * Sets each 2 ports for each bit N as Lag. Ports N*2+N*2+1 are merged together\n+\t * and reported as N*2 incoming port\n+\t */\n+\thw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_LAG_PHY_ODD_EVEN, port_mask);\n+\thw_mod_rmc_ctrl_flush(&ndev->be);\n+\tpthread_mutex_unlock(&base_mtx);\n+\treturn 0;\n+}\n+\n+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask)\n+{\n+\tpthread_mutex_lock(&base_mtx);\n+\tstruct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);\n+\n+\tif (!ndev) {\n+\t\t/* Error invalid nic device */\n+\t\tpthread_mutex_unlock(&base_mtx);\n+\t\treturn -1;\n+\t}\n+\t/* Blocks for traffic from port */\n+\thw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, port_mask);\n+\thw_mod_rmc_ctrl_flush(&ndev->be);\n+\tpthread_mutex_unlock(&base_mtx);\n+\treturn 0;\n+}\n+\n+static void write_lag_entry(struct flow_api_backend_s *be, uint32_t index,\n+\t\t\t    uint32_t value)\n+{\n+\thw_mod_roa_lagcfg_set(be, HW_ROA_LAGCFG_TXPHY_PORT, index, value);\n+\thw_mod_roa_lagcfg_flush(be, index, 1);\n+}\n+\n+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,\n+\t\t   uint32_t value)\n+{\n+\tpthread_mutex_lock(&base_mtx);\n+\tstruct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);\n+\n+\tif (!ndev) {\n+\t\t/* Error invalid nic device */\n+\t\tpthread_mutex_unlock(&base_mtx);\n+\t\treturn -1;\n+\t}\n+\n+\tswitch (cmd) {\n+\tcase FLOW_LAG_SET_ENTRY:\n+\t\twrite_lag_entry(&ndev->be, index, value);\n+\t\tbreak;\n+\n+\tcase FLOW_LAG_SET_ALL:\n+\t\tindex &= 3;\n+\t\tfor (unsigned int i = 0; i < ndev->be.roa.nb_lag_entries;\n+\t\t\t\ti += 4)\n+\t\t\twrite_lag_entry(&ndev->be, i + index, value);\n+\t\tbreak;\n+\n+\tcase FLOW_LAG_SET_BALANCE:\n+\t\t/*\n+\t\t * This function will balance the output port\n+\t\t * value: The balance of the distribution:\n+\t\t * port  P0 / P1\n+\t\t * 0:    0  / 100    port 0 is disabled\n+\t\t * 25:   25 / 75\n+\t\t * 50:   50 / 50\n+\t\t * 75:   75 / 25\n+\t\t * 100:  100/  0     port 1 is disabled\n+\t\t */\n+\t{\n+\t\t/* Clamp the balance to 100% output on port 1 */\n+\t\tif (value > 100)\n+\t\t\tvalue = 100;\n+\t\tdouble balance = ((double)value / 100.0);\n+\t\tdouble block_count =\n+\t\t\t(double)ndev->be.roa.nb_lag_entries / 4.0;\n+\n+\t\tint output_port = 1;\n+\t\tint port0_output_block_count =\n+\t\t\t(int)(block_count * balance);\n+\n+\t\tfor (int block = 0; block < block_count; block++) {\n+\t\t\t/* When the target port0 balance is reached. */\n+\t\t\tif (block >= port0_output_block_count)\n+\t\t\t\toutput_port = 2;\n+\t\t\t/* Write an entire hash block to a given output port. */\n+\t\t\tfor (int idx = 0; idx < 4; idx++) {\n+\t\t\t\twrite_lag_entry(&ndev->be,\n+\t\t\t\t\t\tblock * 4 + idx,\n+\t\t\t\t\t\toutput_port);\n+\t\t\t} /* for each index in hash block */\n+\t\t} /* for each hash block */\n+\t}\n+\n+\tbreak;\n+\tdefault:\n+\t\tpthread_mutex_unlock(&base_mtx);\n+\t\treturn -1;\n+\t}\n+\n+\tpthread_mutex_unlock(&base_mtx);\n+\treturn 0;\n+}\n+\n+/*\n+ * *****************************************************************************\n+ * Flow API\n+ * *****************************************************************************\n+ */\n+\n+int flow_validate(struct flow_eth_dev *dev, const struct flow_elem item[],\n+\t\t  const struct flow_action action[], struct flow_error *error)\n+{\n+\tif (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {\n+\t\tNT_LOG(ERR, FILTER, \"vSwitch profile not supported\");\n+\t\treturn -1;\n+\t}\n+\treturn flow_validate_profile_inline(dev, item, action, error);\n+}\n+\n+struct flow_handle *flow_create(struct flow_eth_dev *dev,\n+\t\t\t\tconst struct flow_attr *attr,\n+\t\t\t\tconst struct flow_elem item[],\n+\t\t\t\tconst struct flow_action action[],\n+\t\t\t\tstruct flow_error *error)\n+{\n+\tif (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {\n+\t\tNT_LOG(ERR, FILTER, \"vSwitch profile not supported\");\n+\t\treturn NULL;\n+\t}\n+\treturn flow_create_profile_inline(dev, attr, item, action, error);\n+}\n+\n+int flow_destroy(struct flow_eth_dev *dev, struct flow_handle *flow,\n+\t\t struct flow_error *error)\n+{\n+\tif (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {\n+\t\tNT_LOG(ERR, FILTER, \"vSwitch profile not supported\");\n+\t\treturn -1;\n+\t}\n+\treturn flow_destroy_profile_inline(dev, flow, error);\n+}\n+\n+int flow_flush(struct flow_eth_dev *dev, struct flow_error *error)\n+{\n+\tif (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {\n+\t\tNT_LOG(ERR, FILTER, \"vSwitch profile not supported\");\n+\t\treturn -1;\n+\t}\n+\treturn flow_flush_profile_inline(dev, error);\n+}\n+\n+int flow_query(struct flow_eth_dev *dev, struct flow_handle *flow,\n+\t       const struct flow_action *action, void **data, uint32_t *length,\n+\t       struct flow_error *error)\n+{\n+\tif (dev->ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {\n+\t\tNT_LOG(ERR, FILTER, \"vSwitch profile not supported\");\n+\t\treturn -1;\n+\t}\n+\treturn flow_query_profile_inline(dev, flow, action, data, length,\n+\t\t\t\t\t error);\n+}\n+\n+/*\n+ * *****************************************************************************\n+ * Device Management API\n+ * *****************************************************************************\n+ */\n+\n+static void nic_insert_eth_port_dev(struct flow_nic_dev *ndev,\n+\t\t\t\t    struct flow_eth_dev *dev)\n+{\n+\tdev->next = ndev->eth_base;\n+\tndev->eth_base = dev;\n+}\n+\n+static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev,\n+\t\t\t\t   struct flow_eth_dev *eth_dev)\n+{\n+\tstruct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;\n+\n+\twhile (dev) {\n+\t\tif (dev == eth_dev) {\n+\t\t\tif (prev)\n+\t\t\t\tprev->next = dev->next;\n+\n+\t\t\telse\n+\t\t\t\tndev->eth_base = dev->next;\n+\t\t\treturn 0;\n+\t\t}\n+\t\tprev = dev;\n+\t\tdev = dev->next;\n+\t}\n+\treturn -1;\n+}\n+\n+static void flow_ndev_reset(struct flow_nic_dev *ndev)\n+{\n+\t/* Delete all eth-port devices created on this NIC device */\n+\twhile (ndev->eth_base)\n+\t\tflow_delete_eth_dev(ndev->eth_base);\n+\n+\t/* Error check */\n+\twhile (ndev->flow_base) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"ERROR : Flows still defined but all eth-ports deleted. Flow %p\\n\",\n+\t\t       ndev->flow_base);\n+\n+\t\tif (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH)  {\n+\t\t\tNT_LOG(ERR, FILTER, \"vSwitch profile not supported\");\n+\t\t\treturn;\n+\t\t}\n+\t\tflow_destroy_profile_inline(ndev->flow_base->dev,\n+\t\t\t\t\t    ndev->flow_base, NULL);\n+\t}\n+\n+\tif (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {\n+\t\tNT_LOG(ERR, FILTER, \"vSwitch profile not supported\");\n+\t\treturn;\n+\t}\n+\tdone_flow_management_of_ndev_profile_inline(ndev);\n+\n+\tkm_free_ndev_resource_management(&ndev->km_res_handle);\n+\tkcc_free_ndev_resource_management(&ndev->kcc_res_handle);\n+\n+#ifdef FLOW_DEBUG\n+\t/*\n+\t * free all resources default allocated, initially for this NIC DEV\n+\t * Is not really needed since the bitmap will be freed in a sec. Therefore\n+\t * only in debug mode\n+\t */\n+\n+\t/* Check if all resources has been released */\n+\tNT_LOG(DBG, FILTER, \"Delete NIC DEV Adaptor %i\\n\", ndev->adapter_no);\n+\tfor (unsigned int i = 0; i < RES_COUNT; i++) {\n+\t\tint err = 0;\n+#if defined(FLOW_DEBUG)\n+\t\tNT_LOG(DBG, FILTER, \"RES state for: %s\\n\", dbg_res_descr[i]);\n+#endif\n+\t\tfor (unsigned int ii = 0; ii < ndev->res[i].resource_count;\n+\t\t\t\tii++) {\n+\t\t\tint ref = ndev->res[i].ref[ii];\n+\t\t\tint used = flow_nic_is_resource_used(ndev, i, ii);\n+\n+\t\t\tif (ref || used) {\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"  [%i]: ref cnt %i, used %i\\n\", ii, ref,\n+\t\t\t\t       used);\n+\t\t\t\terr = 1;\n+\t\t\t}\n+\t\t}\n+\t\tif (err)\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"ERROR - some resources not freed\\n\");\n+\t}\n+#endif\n+}\n+\n+int flow_reset_nic_dev(uint8_t adapter_no)\n+{\n+\tstruct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);\n+\n+\tif (!ndev)\n+\t\treturn -1;\n+\tflow_ndev_reset(ndev);\n+\tflow_api_backend_reset(&ndev->be);\n+\treturn 0;\n+}\n+\n+/*\n+ * adapter_no       physical adapter no\n+ * port_no          local port no\n+ * alloc_rx_queues  number of rx-queues to allocate for this eth_dev\n+ */\n+struct flow_eth_dev *flow_get_eth_dev(uint8_t adapter_no, uint8_t port_no,\n+\t\t\t\t      uint32_t port_id, int alloc_rx_queues,\n+\t\t\t\t      struct flow_queue_id_s queue_ids[],\n+\t\t\t\t      int *rss_target_id,\n+\t\t\t\t      enum flow_eth_dev_profile flow_profile,\n+\t\t\t\t      uint32_t exception_path)\n+{\n+\tint i;\n+\tstruct flow_eth_dev *eth_dev = NULL;\n+\n+\tNT_LOG(DBG, FILTER,\n+\t       \"Get eth-port adapter %i, port %i, port_id %u, rx queues %i, profile %i\\n\",\n+\t       adapter_no, port_no, port_id, alloc_rx_queues, flow_profile);\n+\n+\tif (MAX_OUTPUT_DEST < FLOW_MAX_QUEUES) {\n+\t\tassert(0);\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"ERROR: Internal array for multiple queues too small for API\\n\");\n+\t}\n+\n+\tpthread_mutex_lock(&base_mtx);\n+\tstruct flow_nic_dev *ndev = get_nic_dev_from_adapter_no(adapter_no);\n+\n+\tif (!ndev) {\n+\t\t/* Error - no flow api found on specified adapter */\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"ERROR: no flow interface registered for adapter %d\\n\",\n+\t\t       adapter_no);\n+\t\tpthread_mutex_unlock(&base_mtx);\n+\t\treturn NULL;\n+\t}\n+\n+\tif (ndev->ports < ((uint16_t)port_no + 1)) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"ERROR: port exceeds supported port range for adapter\\n\");\n+\t\tpthread_mutex_unlock(&base_mtx);\n+\t\treturn NULL;\n+\t}\n+\n+\tif ((alloc_rx_queues - 1) >\n+\t\t\tFLOW_MAX_QUEUES) { /* 0th is exception so +1 */\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"ERROR: Exceeds supported number of rx queues per eth device\\n\");\n+\t\tpthread_mutex_unlock(&base_mtx);\n+\t\treturn NULL;\n+\t}\n+\n+\t/* don't accept multiple eth_dev's on same NIC and same port */\n+\teth_dev = nic_and_port_to_eth_dev(adapter_no, port_no);\n+\tif (eth_dev) {\n+\t\tNT_LOG(DBG, FILTER,\n+\t\t       \"Re-opening existing NIC port device: NIC DEV: %i Port %i\\n\",\n+\t\t       adapter_no, port_no);\n+\t\tpthread_mutex_unlock(&base_mtx);\n+\t\tflow_delete_eth_dev(eth_dev);\n+\t\teth_dev = NULL;\n+\t}\n+\n+\teth_dev = calloc(1, sizeof(struct flow_eth_dev));\n+\tif (!eth_dev) {\n+\t\tNT_LOG(ERR, FILTER, \"ERROR: calloc failed\\n\");\n+\t\tgoto err_exit1;\n+\t}\n+\n+\tpthread_mutex_lock(&ndev->mtx);\n+\n+\teth_dev->ndev = ndev;\n+\teth_dev->port = port_no;\n+\teth_dev->port_id = port_id;\n+\n+#ifdef FLOW_DEBUG\n+\tndev->be.iface->set_debug_mode(ndev->be.be_dev,\n+\t\t\t\t       FLOW_BACKEND_DEBUG_MODE_WRITE);\n+#endif\n+\n+\t/* First time then NIC is initialized */\n+\tif (!ndev->flow_mgnt_prepared) {\n+\t\tndev->flow_profile = flow_profile;\n+\t\t/* Initialize modules if needed - recipe 0 is used as no-match and must be setup */\n+\t\tif (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH) {\n+\t\t\tNT_LOG(ERR, FILTER, \"vSwitch profile not supported\");\n+\t\t\tgoto err_exit0;\n+\t\t} else if (initialize_flow_management_of_ndev_profile_inline(ndev)\n+\t\t\t!= 0) {\n+\t\t\tgoto err_exit0;\n+\t\t}\n+\t} else {\n+\t\t/* check if same flow type is requested, otherwise fail */\n+\t\tif (ndev->flow_profile != flow_profile) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: Different flow types requested on same NIC device. \"\n+\t\t\t       \"Not supported.\\n\");\n+\t\t\tgoto err_exit0;\n+\t\t}\n+\t}\n+\n+\t/* Allocate the requested queues in HW for this dev */\n+\n+\tfor (i = 0; i < alloc_rx_queues; i++) {\n+#ifdef SCATTER_GATHER\n+\t\teth_dev->rx_queue[i] = queue_ids[i];\n+#else\n+\t\tint queue_id = flow_nic_alloc_resource(ndev, RES_QUEUE, 1);\n+\n+\t\tif (queue_id < 0) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: no more free queue IDs in NIC\\n\");\n+\t\t\tgoto err_exit0;\n+\t\t}\n+\n+\t\teth_dev->rx_queue[eth_dev->num_queues].id = (uint8_t)queue_id;\n+\t\teth_dev->rx_queue[eth_dev->num_queues].hw_id =\n+\t\t\tndev->be.iface->alloc_rx_queue(ndev->be.be_dev,\n+\t\t\t\teth_dev->rx_queue[eth_dev->num_queues].id);\n+\t\tif (eth_dev->rx_queue[eth_dev->num_queues].hw_id < 0) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: could not allocate a new queue\\n\");\n+\t\t\tgoto err_exit0;\n+\t\t}\n+\n+\t\tif (queue_ids) {\n+\t\t\tqueue_ids[eth_dev->num_queues] =\n+\t\t\t\teth_dev->rx_queue[eth_dev->num_queues];\n+\t\t}\n+#endif\n+\t\tif (i == 0 && (flow_profile == FLOW_ETH_DEV_PROFILE_VSWITCH ||\n+\t\t\t\t(flow_profile == FLOW_ETH_DEV_PROFILE_INLINE &&\n+\t\t\t\t exception_path))) {\n+\t\t\t/*\n+\t\t\t * Init QSL UNM - unmatched - redirects otherwise discarded packets in QSL\n+\t\t\t */\n+\t\t\tif (hw_mod_qsl_unmq_set(&ndev->be,\n+\t\t\t\t\t\tHW_QSL_UNMQ_DEST_QUEUE,\n+\t\t\t\t\t\teth_dev->port,\n+\t\t\t\t\t\teth_dev->rx_queue[0].hw_id) < 0)\n+\t\t\t\tgoto err_exit0;\n+\t\t\tif (hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN,\n+\t\t\t\t\t\teth_dev->port, 1) < 0)\n+\t\t\t\tgoto err_exit0;\n+\t\t\tif (hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1) <\n+\t\t\t\t\t0)\n+\t\t\t\tgoto err_exit0;\n+\t\t}\n+\n+\t\teth_dev->num_queues++;\n+\t}\n+\n+\teth_dev->rss_target_id = -1;\n+\n+\tif (flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {\n+\t\tfor (i = 0; i < eth_dev->num_queues; i++) {\n+\t\t\tuint32_t qen_value = 0;\n+\t\t\tuint32_t queue_id =\n+\t\t\t\t(uint32_t)eth_dev->rx_queue[i].hw_id;\n+\n+\t\t\thw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,\n+\t\t\t\t\t   queue_id / 4, &qen_value);\n+\t\t\thw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,\n+\t\t\t\t\t   queue_id / 4,\n+\t\t\t\t\t   qen_value | (1 << (queue_id % 4)));\n+\t\t\thw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);\n+\t\t}\n+\t}\n+\n+\t*rss_target_id = eth_dev->rss_target_id;\n+\n+#ifdef FLOW_DEBUG\n+\tndev->be.iface->set_debug_mode(ndev->be.be_dev,\n+\t\t\t\t       FLOW_BACKEND_DEBUG_MODE_NONE);\n+#endif\n+\n+\tnic_insert_eth_port_dev(ndev, eth_dev);\n+\n+\tpthread_mutex_unlock(&ndev->mtx);\n+\tpthread_mutex_unlock(&base_mtx);\n+\treturn eth_dev;\n+\n+err_exit0:\n+\tpthread_mutex_unlock(&ndev->mtx);\n+\tpthread_mutex_unlock(&base_mtx);\n+\n+err_exit1:\n+\tif (eth_dev)\n+\t\tfree(eth_dev);\n+\n+#ifdef FLOW_DEBUG\n+\tndev->be.iface->set_debug_mode(ndev->be.be_dev,\n+\t\t\t\t       FLOW_BACKEND_DEBUG_MODE_NONE);\n+#endif\n+\n+\tNT_LOG(DBG, FILTER, \"ERR in %s\\n\", __func__);\n+\treturn NULL; /* Error exit */\n+}\n+\n+int flow_eth_dev_add_queue(struct flow_eth_dev *eth_dev,\n+\t\t\t   struct flow_queue_id_s *queue_id)\n+{\n+\tuint32_t qen_value = 0;\n+\n+\teth_dev->rx_queue[eth_dev->num_queues].id = queue_id->id;\n+\teth_dev->rx_queue[eth_dev->num_queues].hw_id = queue_id->hw_id;\n+\teth_dev->num_queues += 1;\n+\n+\thw_mod_qsl_qen_get(&eth_dev->ndev->be, HW_QSL_QEN_EN,\n+\t\t\t   queue_id->hw_id / 4, &qen_value);\n+\thw_mod_qsl_qen_set(&eth_dev->ndev->be, HW_QSL_QEN_EN,\n+\t\t\t   queue_id->hw_id / 4,\n+\t\t\t   qen_value | (1 << (queue_id->hw_id % 4)));\n+\thw_mod_qsl_qen_flush(&eth_dev->ndev->be, queue_id->hw_id / 4, 1);\n+\n+\treturn 0;\n+}\n+\n+int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)\n+{\n+\tstruct flow_nic_dev *ndev = eth_dev->ndev;\n+\n+\tif (!ndev) {\n+\t\t/* Error invalid nic device */\n+\t\treturn -1;\n+\t}\n+\n+\tNT_LOG(DBG, FILTER, \"Delete eth-port device %p, port %i\\n\", eth_dev,\n+\t       eth_dev->port);\n+\n+#ifdef FLOW_DEBUG\n+\tndev->be.iface->set_debug_mode(ndev->be.be_dev,\n+\t\t\t\t       FLOW_BACKEND_DEBUG_MODE_WRITE);\n+#endif\n+\n+\t/* delete all created flows from this device */\n+\tpthread_mutex_lock(&ndev->mtx);\n+\n+\tstruct flow_handle *flow = ndev->flow_base;\n+\n+\twhile (flow) {\n+\t\tif (flow->dev == eth_dev) {\n+\t\t\tstruct flow_handle *flow_next = flow->next;\n+\n+\t\t\tif (ndev->flow_profile ==\n+\t\t\t\t\tFLOW_ETH_DEV_PROFILE_VSWITCH) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"vSwitch profile not supported\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tflow_destroy_locked_profile_inline(eth_dev,\n+\t\t\t\t\t\t\t   flow, NULL);\n+\t\t\tflow = flow_next;\n+\t\t} else {\n+\t\t\tflow = flow->next;\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * remove unmatched queue if setup in QSL\n+\t * remove exception queue setting in QSL UNM\n+\t */\n+\thw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port,\n+\t\t\t    0);\n+\thw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);\n+\thw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);\n+\n+\tif (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE) {\n+\t\tfor (int i = 0; i < eth_dev->num_queues; ++i) {\n+\t\t\tuint32_t qen_value = 0;\n+\t\t\tuint32_t queue_id =\n+\t\t\t\t(uint32_t)eth_dev->rx_queue[i].hw_id;\n+\n+\t\t\thw_mod_qsl_qen_get(&ndev->be, HW_QSL_QEN_EN,\n+\t\t\t\t\t   queue_id / 4, &qen_value);\n+\t\t\thw_mod_qsl_qen_set(&ndev->be, HW_QSL_QEN_EN,\n+\t\t\t\t\t   queue_id / 4,\n+\t\t\t\t\t   qen_value & ~(1U << (queue_id % 4)));\n+\t\t\thw_mod_qsl_qen_flush(&ndev->be, queue_id / 4, 1);\n+\t\t}\n+\t}\n+\n+#ifdef FLOW_DEBUG\n+\tndev->be.iface->set_debug_mode(ndev->be.be_dev,\n+\t\t\t\t       FLOW_BACKEND_DEBUG_MODE_NONE);\n+#endif\n+\n+#ifndef SCATTER_GATHER\n+\t/* free rx queues */\n+\tfor (int i = 0; i < eth_dev->num_queues; i++) {\n+\t\tndev->be.iface->free_rx_queue(ndev->be.be_dev,\n+\t\t\t\t\t      eth_dev->rx_queue[i].hw_id);\n+\t\tflow_nic_deref_resource(ndev, RES_QUEUE,\n+\t\t\t\t\teth_dev->rx_queue[i].id);\n+\t}\n+#endif\n+\n+\t/* take eth_dev out of ndev list */\n+\tif (nic_remove_eth_port_dev(ndev, eth_dev) != 0)\n+\t\tNT_LOG(ERR, FILTER, \"ERROR : eth_dev %p not found\\n\", eth_dev);\n+\n+\tpthread_mutex_unlock(&ndev->mtx);\n+\n+\t/* free eth_dev */\n+\tfree(eth_dev);\n+\treturn 0;\n+}\n+\n+int flow_get_tunnel_definition(struct tunnel_cfg_s *tun, uint32_t flow_stat_id,\n+\t\t\t       uint8_t vport)\n+{\n+\treturn tunnel_get_definition(tun, flow_stat_id, vport);\n+}\n+\n+/*\n+ * *****************************  Flow API NIC Setup  ***************************************\n+ * Flow backend creation function - register and initialize common backend API to FPA modules\n+ * ******************************************************************************************\n+ */\n+\n+static int init_resource_elements(struct flow_nic_dev *ndev,\n+\t\t\t\t  enum res_type_e res_type, uint32_t count)\n+{\n+\tassert(ndev->res[res_type].alloc_bm == NULL);\n+\t/* allocate bitmap and ref counter */\n+\tndev->res[res_type].alloc_bm =\n+\t\tcalloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));\n+\tif (ndev->res[res_type].alloc_bm) {\n+\t\tndev->res[res_type].ref =\n+\t\t\t(uint32_t *)&ndev->res[res_type]\n+\t\t\t.alloc_bm[BIT_CONTAINER_8_ALIGN(count)];\n+\t\tndev->res[res_type].resource_count = count;\n+\t\treturn 0;\n+\t}\n+\treturn -1;\n+}\n+\n+static void done_resource_elements(struct flow_nic_dev *ndev,\n+\t\t\t\t   enum res_type_e res_type)\n+{\n+\tassert(ndev);\n+\tif (ndev->res[res_type].alloc_bm)\n+\t\tfree(ndev->res[res_type].alloc_bm);\n+}\n+\n+static void list_insert_flow_nic(struct flow_nic_dev *ndev)\n+{\n+\tpthread_mutex_lock(&base_mtx);\n+\tndev->next = dev_base;\n+\tdev_base = ndev;\n+\tpthread_mutex_unlock(&base_mtx);\n+}\n+\n+static int list_remove_flow_nic(struct flow_nic_dev *ndev)\n+{\n+\tpthread_mutex_lock(&base_mtx);\n+\tstruct flow_nic_dev *nic_dev = dev_base, *prev = NULL;\n+\n+\twhile (nic_dev) {\n+\t\tif (nic_dev == ndev) {\n+\t\t\tif (prev)\n+\t\t\t\tprev->next = nic_dev->next;\n+\t\t\telse\n+\t\t\t\tdev_base = nic_dev->next;\n+\t\t\tpthread_mutex_unlock(&base_mtx);\n+\t\t\treturn 0;\n+\t\t}\n+\t\tprev = nic_dev;\n+\t\tnic_dev = nic_dev->next;\n+\t}\n+\n+\tpthread_mutex_unlock(&base_mtx);\n+\treturn -1;\n+}\n+\n+struct flow_nic_dev *flow_api_create(uint8_t adapter_no,\n+\t\t\t\t     const struct flow_api_backend_ops *be_if,\n+\t\t\t\t     void *be_dev)\n+{\n+\tif (!be_if || be_if->version != 1) {\n+\t\tNT_LOG(DBG, FILTER, \"ERR: %s\\n\", __func__);\n+\t\treturn NULL;\n+\t}\n+\n+\tstruct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));\n+\n+\tif (!ndev) {\n+\t\tNT_LOG(ERR, FILTER, \"ERROR: calloc failed\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\t/*\n+\t * To dump module initialization writes use\n+\t * FLOW_BACKEND_DEBUG_MODE_WRITE\n+\t * then remember to set it ...NONE afterwards again\n+\t */\n+\tbe_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);\n+\n+\tif (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)\n+\t\tgoto err_exit;\n+\tndev->adapter_no = adapter_no;\n+\n+\tndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ?\n+\t\t\t\t 256 :\n+\t\t\t\t ndev->be.num_rx_ports);\n+\n+\t/*\n+\t * Free resources in NIC must be managed by this module\n+\t * Get resource sizes and create resource manager elements\n+\t */\n+\tif (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_CAT_CFN,\n+\t\t\t\t   ndev->be.cat.nb_cat_funcs))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_KM_FLOW_TYPE,\n+\t\t\t\t   ndev->be.cat.nb_flow_types))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_KM_CATEGORY,\n+\t\t\t\t   ndev->be.km.nb_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_PDB_RCP,\n+\t\t\t\t   ndev->be.pdb.nb_pdb_rcp_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_QSL_RCP,\n+\t\t\t\t   ndev->be.qsl.nb_rcp_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_QSL_QST,\n+\t\t\t\t   ndev->be.qsl.nb_qst_entries))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_SLC_RCP, ndev->be.max_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_IOA_RCP,\n+\t\t\t\t   ndev->be.ioa.nb_rcp_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_ROA_RCP,\n+\t\t\t\t   ndev->be.roa.nb_tun_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_FLM_FLOW_TYPE,\n+\t\t\t\t   ndev->be.cat.nb_flow_types))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_FLM_RCP,\n+\t\t\t\t   ndev->be.flm.nb_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_HST_RCP,\n+\t\t\t\t   ndev->be.hst.nb_hst_rcp_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_TPE_RCP,\n+\t\t\t\t   ndev->be.tpe.nb_rcp_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_TPE_EXT,\n+\t\t\t\t   ndev->be.tpe.nb_rpl_ext_categories))\n+\t\tgoto err_exit;\n+\tif (init_resource_elements(ndev, RES_TPE_RPL,\n+\t\t\t\t   ndev->be.tpe.nb_rpl_depth))\n+\t\tgoto err_exit;\n+\n+\t/* may need IPF, COR */\n+\n+\t/* check all defined has been initialized */\n+\tfor (int i = 0; i < RES_COUNT; i++)\n+\t\tassert(ndev->res[i].alloc_bm);\n+\n+\tpthread_mutex_init(&ndev->mtx, NULL);\n+\tlist_insert_flow_nic(ndev);\n+\n+\treturn ndev;\n+\n+err_exit:\n+\tif (ndev)\n+\t\tflow_api_done(ndev);\n+\tNT_LOG(DBG, FILTER, \"ERR: %s\\n\", __func__);\n+\treturn NULL;\n+}\n+\n+int flow_api_done(struct flow_nic_dev *ndev)\n+{\n+\tNT_LOG(DBG, FILTER, \"FLOW API DONE\\n\");\n+\tif (ndev) {\n+\t\tflow_ndev_reset(ndev);\n+\n+\t\t/* delete resource management allocations for this ndev */\n+\t\tfor (int i = 0; i < RES_COUNT; i++)\n+\t\t\tdone_resource_elements(ndev, i);\n+\n+\t\tflow_api_backend_done(&ndev->be);\n+\t\tlist_remove_flow_nic(ndev);\n+\t\tfree(ndev);\n+\t}\n+\treturn 0;\n+}\n+\n+void *flow_api_get_be_dev(struct flow_nic_dev *ndev)\n+{\n+\tif (!ndev) {\n+\t\tNT_LOG(DBG, FILTER, \"ERR: %s\\n\", __func__);\n+\t\treturn NULL;\n+\t}\n+\treturn ndev->be.be_dev;\n+}\n+\n+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no)\n+{\n+\tstruct flow_eth_dev *eth_dev =\n+\t\tnic_and_port_to_eth_dev(adapter_no, port_no);\n+\treturn eth_dev->num_queues;\n+}\n+\n+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no)\n+{\n+\tstruct flow_eth_dev *eth_dev =\n+\t\tnic_and_port_to_eth_dev(adapter_no, port_no);\n+\treturn eth_dev->rx_queue[queue_no].hw_id;\n+}\n+\n+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)\n+{\n+\tif (ndev->flow_profile == FLOW_ETH_DEV_PROFILE_INLINE)\n+\t\treturn flow_get_flm_stats_profile_inline(ndev, data, size);\n+\treturn -1;\n+}\ndiff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.h b/drivers/net/ntnic/nthw/flow_api/flow_api.h\nnew file mode 100644\nindex 0000000000..9dbaac49e8\n--- /dev/null\n+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.h\n@@ -0,0 +1,291 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef _FLOW_API_H_\n+#define _FLOW_API_H_\n+\n+#include <pthread.h>\n+\n+#include \"ntlog.h\"\n+#include \"stream_binary_flow_api.h\"\n+\n+#include \"flow_api_actions.h\"\n+#include \"flow_api_backend.h\"\n+#include \"flow_api_engine.h\"\n+\n+/*\n+ * ****************************************************\n+ *        Flow NIC and Eth port device management\n+ * ****************************************************\n+ */\n+\n+struct hw_mod_resource_s {\n+\tuint8_t *alloc_bm; /* allocation bitmap */\n+\tuint32_t *ref; /* reference counter for each resource element */\n+\tuint32_t resource_count; /* number of total available entries */\n+};\n+\n+/*\n+ * Set of definitions to be used to map desirable fields for RSS\n+ * hash functions. Supposed to be used with dpdk, so the values\n+ * correspond to dpdk definitions, but we avoid dependency to\n+ * dpdk headers here.\n+ */\n+\n+#define NT_ETH_RSS_IPV4 (UINT64_C(1) << 2)\n+#define NT_ETH_RSS_FRAG_IPV4 (UINT64_C(1) << 3)\n+#define NT_ETH_RSS_NONFRAG_IPV4_OTHER (UINT64_C(1) << 7)\n+#define NT_ETH_RSS_IPV6 (UINT64_C(1) << 8)\n+#define NT_ETH_RSS_FRAG_IPV6 (UINT64_C(1) << 9)\n+#define NT_ETH_RSS_NONFRAG_IPV6_OTHER (UINT64_C(1) << 13)\n+#define NT_ETH_RSS_IPV6_EX (UINT64_C(1) << 15)\n+#define NT_ETH_RSS_C_VLAN (UINT64_C(1) << 26)\n+#define NT_ETH_RSS_L3_DST_ONLY (UINT64_C(1) << 62)\n+#define NT_ETH_RSS_L3_SRC_ONLY (UINT64_C(1) << 63)\n+\n+#define NT_ETH_RSS_IP                                           \\\n+\t(NT_ETH_RSS_IPV4 | NT_ETH_RSS_FRAG_IPV4 |               \\\n+\t NT_ETH_RSS_NONFRAG_IPV4_OTHER | NT_ETH_RSS_IPV6 |      \\\n+\t NT_ETH_RSS_FRAG_IPV6 | NT_ETH_RSS_NONFRAG_IPV6_OTHER | \\\n+\t NT_ETH_RSS_IPV6_EX)\n+\n+/*\n+ * level 1, requests RSS to be performed on the outermost packet\n+ * encapsulation level.\n+ */\n+#define NT_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)\n+\n+/*\n+ * level 2, requests RSS to be performed on the specified inner packet\n+ * encapsulation level, from outermost to innermost (lower to higher values).\n+ */\n+#define NT_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)\n+\n+/*\n+ * Struct wrapping unsigned 64 bit integer carry RSS hash option bits\n+ * to avoid occasional incorrect usage interfacing with higher level\n+ * framework (e.g. DPDK)\n+ */\n+struct nt_eth_rss {\n+\tuint64_t fields;\n+};\n+\n+struct flow_eth_dev {\n+\tstruct flow_nic_dev *ndev; /* NIC that owns this port device */\n+\tuint8_t port; /* NIC port id */\n+\tuint32_t port_id; /* App assigned port_id - may be DPDK port_id */\n+\n+\tstruct flow_queue_id_s\n+\t\trx_queue[FLOW_MAX_QUEUES + 1]; /* 0th for exception */\n+\tint num_queues; /* VSWITCH has exceptions sent on queue 0 per design */\n+\n+\tint rss_target_id; /* QSL_HSH index if RSS needed QSL v6+ */\n+\tstruct flow_eth_dev *next;\n+};\n+\n+enum flow_nic_hash_e {\n+\tHASH_ALGO_ROUND_ROBIN = 0,\n+\tHASH_ALGO_5TUPLE,\n+};\n+\n+/* registered NIC backends */\n+struct flow_nic_dev {\n+\tuint8_t adapter_no; /* physical adapter no in the host system */\n+\tuint16_t ports; /* number of in-ports addressable on this NIC */\n+\tenum flow_eth_dev_profile\n+\tflow_profile; /* flow profile this NIC is initially prepared for */\n+\tint flow_mgnt_prepared;\n+\n+\tstruct hw_mod_resource_s\n+\t\tres[RES_COUNT]; /* raw NIC resource allocation table */\n+\tvoid *flm_res_handle;\n+\tvoid *km_res_handle;\n+\tvoid *kcc_res_handle;\n+\n+\tvoid *flm_mtr_handle;\n+\tvoid *ft_res_handle;\n+\tvoid *mtr_stat_handle;\n+\tvoid *group_handle;\n+\n+\t/* statistics */\n+\tuint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];\n+\n+\tstruct flow_handle\n+\t\t*flow_base; /* linked list of all flows created on this NIC */\n+\tstruct flow_handle *\n+\t\tflow_base_flm; /* linked list of all FLM flows created on this NIC */\n+\n+\tstruct flow_api_backend_s be; /* NIC backend API */\n+\tstruct flow_eth_dev *\n+\t\teth_base; /* linked list of created eth-port devices on this NIC */\n+\tpthread_mutex_t mtx;\n+\n+\tint default_qsl_drop_index; /* pre allocated default QSL Drop */\n+\tint default_qsl_discard_index; /* pre allocated default QSL Discard */\n+\t/* RSS hash function settings bitfields correspond to data used for hashing */\n+\tstruct nt_eth_rss\n+\t\trss_hash_config;\n+\tstruct flow_nic_dev *next; /* next NIC linked list */\n+};\n+\n+/*\n+ * ****************************************************\n+ * Error\n+ * ****************************************************\n+ */\n+\n+enum flow_nic_err_msg_e {\n+\tERR_SUCCESS = 0,\n+\tERR_FAILED = 1,\n+\tERR_MEMORY = 2,\n+\tERR_OUTPUT_TOO_MANY = 3,\n+\tERR_RSS_TOO_MANY_QUEUES = 4,\n+\tERR_VLAN_TYPE_NOT_SUPPORTED = 5,\n+\tERR_VXLAN_HEADER_NOT_ACCEPTED = 6,\n+\tERR_VXLAN_POP_INVALID_RECIRC_PORT = 7,\n+\tERR_VXLAN_POP_FAILED_CREATING_VTEP = 8,\n+\tERR_MATCH_VLAN_TOO_MANY = 9,\n+\tERR_MATCH_INVALID_IPV6_HDR = 10,\n+\tERR_MATCH_TOO_MANY_TUNNEL_PORTS = 11,\n+\tERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM = 12,\n+\tERR_MATCH_FAILED_BY_HW_LIMITS = 13,\n+\tERR_MATCH_RESOURCE_EXHAUSTION = 14,\n+\tERR_MATCH_FAILED_TOO_COMPLEX = 15,\n+\tERR_ACTION_REPLICATION_FAILED = 16,\n+\tERR_ACTION_OUTPUT_RESOURCE_EXHAUSTION = 17,\n+\tERR_ACTION_TUNNEL_HEADER_PUSH_OUTPUT_LIMIT = 18,\n+\tERR_ACTION_INLINE_MOD_RESOURCE_EXHAUSTION = 19,\n+\tERR_ACTION_RETRANSMIT_RESOURCE_EXHAUSTION = 20,\n+\tERR_ACTION_FLOW_COUNTER_EXHAUSTION = 21,\n+\tERR_ACTION_INTERNAL_RESOURCE_EXHAUSTION = 22,\n+\tERR_INTERNAL_QSL_COMPARE_FAILED = 23,\n+\tERR_INTERNAL_CAT_FUNC_REUSE_FAILED = 24,\n+\tERR_MATCH_ENTROPY_FAILED = 25,\n+\tERR_MATCH_CAM_EXHAUSTED = 26,\n+\tERR_INTERNAL_VIRTUAL_PORT_CREATION_FAILED = 27,\n+\tERR_ACTION_UNSUPPORTED = 28,\n+\tERR_REMOVE_FLOW_FAILED = 29,\n+\tERR_ACTION_NO_OUTPUT_DEFINED_USE_DEFAULT = 30,\n+\tERR_ACTION_NO_OUTPUT_QUEUE_FOUND = 31,\n+\tERR_MATCH_UNSUPPORTED_ETHER_TYPE = 32,\n+\tERR_OUTPUT_INVALID = 33,\n+\tERR_MATCH_PARTIAL_OFFLOAD_NOT_SUPPORTED = 34,\n+\tERR_MATCH_CAT_CAM_EXHAUSTED = 35,\n+\tERR_MATCH_KCC_KEY_CLASH = 36,\n+\tERR_MATCH_CAT_CAM_FAILED = 37,\n+\tERR_PARTIAL_FLOW_MARK_TOO_BIG = 38,\n+\tERR_FLOW_PRIORITY_VALUE_INVALID = 39,\n+\tERR_MSG_NO_MSG\n+};\n+\n+void flow_nic_set_error(enum flow_nic_err_msg_e msg, struct flow_error *error);\n+\n+/*\n+ * ****************************************************\n+ * Resources\n+ * ****************************************************\n+ */\n+\n+extern const char *dbg_res_descr[];\n+\n+#define flow_nic_set_bit(arr, x) \\\n+\tdo { \\\n+\t\tuint8_t *_temp_arr = (arr); \\\n+\t\tsize_t _temp_x = (x); \\\n+\t\t_temp_arr[_temp_x / 8] = (uint8_t)(_temp_arr[_temp_x / 8] | \\\n+\t\t(uint8_t)(1 << (_temp_x % 8))); \\\n+\t} while (0)\n+\n+\n+\n+#define flow_nic_unset_bit(arr, x) \\\n+\tdo { \\\n+\t\tsize_t _temp_x = (x); \\\n+\t\tarr[_temp_x / 8] &= (uint8_t)~(1 << (_temp_x % 8)); \\\n+\t} while (0)\n+\n+#define flow_nic_is_bit_set(arr, x) \\\n+\t({ \\\n+\t\tsize_t _temp_x = (x); \\\n+\t\t(arr[_temp_x / 8] & (uint8_t)(1 << (_temp_x % 8))); \\\n+\t})\n+\n+#define flow_nic_mark_resource_used(_ndev, res_type, index) \\\n+\tdo { \\\n+\t\tstruct flow_nic_dev *_temp_ndev = (_ndev); \\\n+\t\t__typeof__(res_type) _temp_res_type = (res_type); \\\n+\t\tsize_t _temp_index = (index); \\\n+\t\tNT_LOG(DBG, FILTER, \"mark resource used: %s idx %zu\\n\", \\\n+\t\tdbg_res_descr[_temp_res_type], _temp_index); \\\n+\t\tassert(flow_nic_is_bit_set(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index) \\\n+\t\t== 0); \\\n+\t\tflow_nic_set_bit(_temp_ndev->res[_temp_res_type].alloc_bm, _temp_index); \\\n+\t} while (0)\n+\n+\n+\n+#define flow_nic_mark_resource_unused(_ndev, res_type, index) \\\n+\tdo { \\\n+\t\t__typeof__(res_type) _temp_res_type = (res_type); \\\n+\t\tsize_t _temp_index = (index); \\\n+\t\tNT_LOG(DBG, FILTER, \"mark resource unused: %s idx %zu\\n\", \\\n+\t\tdbg_res_descr[_temp_res_type], _temp_index); \\\n+\t\tflow_nic_unset_bit((_ndev)->res[_temp_res_type].alloc_bm, _temp_index); \\\n+\t} while (0)\n+\n+\n+#define flow_nic_is_resource_used(_ndev, res_type, index) \\\n+\t(!!flow_nic_is_bit_set((_ndev)->res[res_type].alloc_bm, index))\n+\n+int flow_nic_alloc_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,\n+\t\t\t    uint32_t alignment);\n+int flow_nic_alloc_resource_index(struct flow_nic_dev *ndev, int idx,\n+\t\t\t\t  enum res_type_e res_type);\n+int flow_nic_alloc_resource_contig(struct flow_nic_dev *ndev,\n+\t\t\t\t   enum res_type_e res_type, unsigned int num,\n+\t\t\t\t   uint32_t alignment);\n+void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,\n+\t\t\t    int idx);\n+\n+int flow_nic_ref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,\n+\t\t\t  int index);\n+int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type,\n+\t\t\t    int index);\n+int flow_nic_find_next_used_resource(struct flow_nic_dev *ndev,\n+\t\t\t\t     enum res_type_e res_type, int idx_start);\n+\n+int flow_nic_allocate_fh_resource(struct flow_nic_dev *ndev,\n+\t\t\t\t  enum res_type_e res_type,\n+\t\t\t\t  struct flow_handle *fh, uint32_t count,\n+\t\t\t\t  uint32_t alignment);\n+int flow_nic_allocate_fh_resource_index(struct flow_nic_dev *ndev,\n+\t\t\t\t\tenum res_type_e res_type, int idx,\n+\t\t\t\t\tstruct flow_handle *fh);\n+\n+/*\n+ * ****************************************************\n+ * Other\n+ * ****************************************************\n+ */\n+\n+struct flow_eth_dev *nic_and_port_to_eth_dev(uint8_t adapter_no, uint8_t port);\n+struct flow_nic_dev *get_nic_dev_from_adapter_no(uint8_t adapter_no);\n+\n+int flow_nic_set_hasher(struct flow_nic_dev *ndev, int hsh_idx,\n+\t\t\tenum flow_nic_hash_e algorithm);\n+int flow_nic_set_hasher_fields(struct flow_nic_dev *ndev, int hsh_idx,\n+\t\t\t       struct nt_eth_rss fields);\n+\n+int lag_set_config(uint8_t adapter_no, enum flow_lag_cmd cmd, uint32_t index,\n+\t\t   uint32_t value);\n+int lag_set_port_block(uint8_t adapter_no, uint32_t port_mask);\n+int lag_set_port_group(uint8_t adapter_no, uint32_t port_mask);\n+\n+int flow_get_num_queues(uint8_t adapter_no, uint8_t port_no);\n+int flow_get_hw_id(uint8_t adapter_no, uint8_t port_no, uint8_t queue_no);\n+\n+int flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data,\n+\t\t       uint64_t size);\n+\n+#endif\ndiff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c\nnew file mode 100644\nindex 0000000000..fa9240cb7b\n--- /dev/null\n+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.c\n@@ -0,0 +1,5118 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <stdio.h>\n+#include <stdarg.h>\n+#include <string.h>\n+#include <stdlib.h>\n+#include <assert.h>\n+#include <pthread.h>\n+#include <unistd.h> /* sleep() */\n+#include <arpa/inet.h> /* htons, htonl, ntohs */\n+#include <stdatomic.h>\n+\n+#include <inttypes.h>\n+\n+#include \"ntlog.h\"\n+\n+#include \"flow_api_nic_setup.h\"\n+#include \"stream_binary_flow_api.h\"\n+#include \"flow_api.h\"\n+#include \"flow_api_actions.h\"\n+#include \"flow_api_backend.h\"\n+#include \"flow_api_engine.h\"\n+#include \"flow_api_profile_inline.h\"\n+\n+#include <rte_spinlock.h>\n+\n+#define UNUSED __rte_unused\n+\n+/*\n+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.\n+ */\n+static int set_flow_type_km(struct flow_nic_dev *ndev, int cfn_index,\n+\t\t\t    int flow_type, int lookup, int enable)\n+{\n+\tconst int max_lookups = 4;\n+\tconst int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;\n+\n+\tint fte_index =\n+\t\t(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;\n+\tint fte_field = cfn_index % cat_funcs;\n+\n+\tuint32_t current_bm = 0;\n+\tuint32_t fte_field_bm = 1 << fte_field;\n+\n+\thw_mod_cat_fte_km_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,\n+\t\t\t      fte_index, &current_bm);\n+\n+\tuint32_t final_bm = enable ? (fte_field_bm | current_bm) :\n+\t\t\t    (~fte_field_bm & current_bm);\n+\n+\tif (current_bm != final_bm) {\n+\t\thw_mod_cat_fte_km_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,\n+\t\t\t\t      KM_FLM_IF_FIRST, fte_index, final_bm);\n+\t\thw_mod_cat_fte_km_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,\n+\t\t\t\t\t1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * lookup refers to key A/B/C/D, and can have values 0, 1, 2, and 3.\n+ */\n+static int set_flow_type_flm(struct flow_nic_dev *ndev, int cfn_index,\n+\t\t\t     int flow_type, int lookup, int enable)\n+{\n+\tconst int max_lookups = 4;\n+\tconst int cat_funcs = (int)ndev->be.cat.nb_cat_funcs / 8;\n+\n+\tint fte_index =\n+\t\t(8 * flow_type + cfn_index / cat_funcs) * max_lookups + lookup;\n+\tint fte_field = cfn_index % cat_funcs;\n+\n+\tuint32_t current_bm = 0;\n+\tuint32_t fte_field_bm = 1 << fte_field;\n+\n+\thw_mod_cat_fte_flm_get(&ndev->be, HW_CAT_FTE_ENABLE_BM, KM_FLM_IF_FIRST,\n+\t\t\t       fte_index, &current_bm);\n+\n+\tuint32_t final_bm = enable ? (fte_field_bm | current_bm) :\n+\t\t\t    (~fte_field_bm & current_bm);\n+\n+\tif (current_bm != final_bm) {\n+\t\thw_mod_cat_fte_flm_set(&ndev->be, HW_CAT_FTE_ENABLE_BM,\n+\t\t\t\t       KM_FLM_IF_FIRST, fte_index, final_bm);\n+\t\thw_mod_cat_fte_flm_flush(&ndev->be, KM_FLM_IF_FIRST, fte_index,\n+\t\t\t\t\t 1);\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int rx_queue_idx_to_hw_id(struct flow_eth_dev *dev, int id)\n+{\n+\tfor (int i = 0; i < dev->num_queues; ++i) {\n+\t\tif (dev->rx_queue[i].id == id)\n+\t\t\treturn dev->rx_queue[i].hw_id;\n+\t}\n+\treturn -1;\n+}\n+\n+/*\n+ * Flow Matcher functionality\n+ */\n+\n+static int flm_sdram_calibrate(struct flow_nic_dev *ndev)\n+{\n+\tint success = 0;\n+\n+\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_PRESET_ALL, 0x0);\n+\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_SPLIT_SDRAM_USAGE,\n+\t\t\t       0x10);\n+\thw_mod_flm_control_flush(&ndev->be);\n+\n+\t/* Wait for ddr4 calibration/init done */\n+\tfor (uint32_t i = 0; i < 1000000; ++i) {\n+\t\tuint32_t value = 0;\n+\n+\t\thw_mod_flm_status_update(&ndev->be);\n+\t\thw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_CALIBDONE,\n+\t\t\t\t      &value);\n+\t\tif (value) {\n+\t\t\tsuccess = 1;\n+\t\t\tbreak;\n+\t\t}\n+\t\tusleep(1);\n+\t}\n+\n+\tif (!success) {\n+\t\t/* \"Flow matcher initialization failed - SDRAM calibration failed\"; */\n+\t\treturn -1;\n+\t}\n+\n+\t/* Set the flow scrubber and timeout settings */\n+\thw_mod_flm_timeout_set(&ndev->be, HW_FLM_TIMEOUT_T, 0);\n+\thw_mod_flm_timeout_flush(&ndev->be);\n+\n+\thw_mod_flm_scrub_set(&ndev->be, HW_FLM_SCRUB_I, 100);\n+\thw_mod_flm_scrub_flush(&ndev->be);\n+\n+\treturn 0;\n+}\n+\n+static int flm_sdram_reset(struct flow_nic_dev *ndev, int enable)\n+{\n+\tint success = 0;\n+\n+\t/*\n+\t * Make sure no lookup is performed during init, i.e.\n+\t * disable every category and disable FLM\n+\t */\n+\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, 0x0);\n+\thw_mod_flm_control_flush(&ndev->be);\n+\n+\tfor (uint32_t i = 1; i < ndev->be.flm.nb_categories; ++i)\n+\t\thw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, i, 0x0);\n+\thw_mod_flm_rcp_flush(&ndev->be, 1, ndev->be.flm.nb_categories - 1);\n+\n+\t/* Wait for FLM to enter Idle state */\n+\tfor (uint32_t i = 0; i < 1000000; ++i) {\n+\t\tuint32_t value = 0;\n+\n+\t\thw_mod_flm_status_update(&ndev->be);\n+\t\thw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_IDLE, &value);\n+\t\tif (value) {\n+\t\t\tsuccess = 1;\n+\t\t\tbreak;\n+\t\t}\n+\t\tusleep(1);\n+\t}\n+\n+\tif (!success) {\n+\t\t/* \"Flow matcher initialization failed - never idle\"; */\n+\t\treturn -1;\n+\t}\n+\n+\tsuccess = 0;\n+\n+\t/* Start SDRAM initialization */\n+\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x1);\n+\thw_mod_flm_control_flush(&ndev->be);\n+\n+\tfor (uint32_t i = 0; i < 1000000; ++i) {\n+\t\tuint32_t value = 0;\n+\n+\t\thw_mod_flm_status_update(&ndev->be);\n+\t\thw_mod_flm_status_get(&ndev->be, HW_FLM_STATUS_INITDONE,\n+\t\t\t\t      &value);\n+\t\tif (value) {\n+\t\t\tsuccess = 1;\n+\t\t\tbreak;\n+\t\t}\n+\t\tusleep(1);\n+\t}\n+\n+\tif (!success) {\n+\t\t/* \"Flow matcher initialization failed - SDRAM initialization incomplete\"; */\n+\t\treturn -1;\n+\t}\n+\n+\t/* Set the INIT value back to zero to clear the bit in the SW register cache */\n+\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_INIT, 0x0);\n+\thw_mod_flm_control_flush(&ndev->be);\n+\n+\t/* Enable FLM */\n+\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_ENABLE, enable);\n+\thw_mod_flm_control_flush(&ndev->be);\n+\n+\treturn 0;\n+}\n+\n+#define FLM_FLOW_RCP_MAX 32\n+#define FLM_FLOW_FT_MAX 16\n+\n+struct flm_flow_ft_ident_s {\n+\tunion {\n+\t\tstruct {\n+\t\t\tuint64_t in_use : 1;\n+\t\t\tuint64_t drop : 1;\n+\t\t\tuint64_t ltx_en : 1;\n+\t\t\tuint64_t ltx_port : 1;\n+\t\t\tuint64_t queue_en : 1;\n+\t\t\tuint64_t queue : 8;\n+\t\t\tuint64_t encap_len : 8;\n+\t\t\tuint64_t encap_vlans : 2;\n+\t\t\tuint64_t encap_ip : 1;\n+\t\t\tuint64_t decap_end : 5;\n+\t\t\tuint64_t jump_to_group : 8;\n+\t\t\tuint64_t pad : 27;\n+\t\t};\n+\t\tuint64_t data;\n+\t};\n+};\n+\n+struct flm_flow_key_def_s {\n+\tunion {\n+\t\tstruct {\n+\t\t\tuint64_t qw0_dyn : 7;\n+\t\t\tuint64_t qw0_ofs : 8;\n+\t\t\tuint64_t qw4_dyn : 7;\n+\t\t\tuint64_t qw4_ofs : 8;\n+\t\t\tuint64_t sw8_dyn : 7;\n+\t\t\tuint64_t sw8_ofs : 8;\n+\t\t\tuint64_t sw9_dyn : 7;\n+\t\t\tuint64_t sw9_ofs : 8;\n+\t\t\tuint64_t outer_proto : 1;\n+\t\t\tuint64_t inner_proto : 1;\n+\t\t\tuint64_t pad : 2;\n+\t\t};\n+\t\tuint64_t data;\n+\t};\n+};\n+\n+static struct flm_flow_ft_ident_s flow_def_to_ft_ident(struct nic_flow_def *fd)\n+{\n+\tstruct flm_flow_ft_ident_s ft_ident;\n+\n+\tassert(sizeof(struct flm_flow_ft_ident_s) == sizeof(uint64_t));\n+\n+\tmemset(&ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));\n+\tft_ident.in_use = 1;\n+\n+\tif (fd->dst_num_avail == 0) {\n+\t\tft_ident.drop = 1;\n+\t} else {\n+\t\tfor (int i = 0; i < fd->dst_num_avail; ++i) {\n+\t\t\tif (fd->dst_id[i].type == PORT_PHY) {\n+\t\t\t\tft_ident.ltx_en = 1;\n+\t\t\t\tft_ident.ltx_port = fd->dst_id[i].id;\n+\t\t\t} else if (fd->dst_id[i].type == PORT_VIRT) {\n+\t\t\t\tft_ident.queue_en = 1;\n+\t\t\t\tft_ident.queue = fd->dst_id[i].id;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (fd->tun_hdr.len > 0) {\n+\t\tft_ident.encap_len = fd->tun_hdr.len;\n+\t\tft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;\n+\t\tft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;\n+\t}\n+\n+\tft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;\n+\n+\tif (fd->jump_to_group != UINT32_MAX)\n+\t\tft_ident.jump_to_group = fd->jump_to_group & 0xff;\n+\n+\treturn ft_ident;\n+}\n+\n+static inline void set_key_def_qw(struct flm_flow_key_def_s *key_def,\n+\t\t\t\t  unsigned int qw, unsigned int dyn,\n+\t\t\t\t  unsigned int ofs)\n+{\n+\tassert(qw < 2);\n+\tif (qw == 0) {\n+\t\tkey_def->qw0_dyn = dyn & 0x7f;\n+\t\tkey_def->qw0_ofs = ofs & 0xff;\n+\t} else {\n+\t\tkey_def->qw4_dyn = dyn & 0x7f;\n+\t\tkey_def->qw4_ofs = ofs & 0xff;\n+\t}\n+}\n+\n+static inline void set_key_def_sw(struct flm_flow_key_def_s *key_def,\n+\t\t\t\t  unsigned int sw, unsigned int dyn,\n+\t\t\t\t  unsigned int ofs)\n+{\n+\tassert(sw < 2);\n+\tif (sw == 0) {\n+\t\tkey_def->sw8_dyn = dyn & 0x7f;\n+\t\tkey_def->sw8_ofs = ofs & 0xff;\n+\t} else {\n+\t\tkey_def->sw9_dyn = dyn & 0x7f;\n+\t\tkey_def->sw9_ofs = ofs & 0xff;\n+\t}\n+}\n+\n+struct flm_flow_group_s {\n+\tint cfn_group0;\n+\tint km_ft_group0;\n+\tstruct flow_handle *fh_group0;\n+\n+\tstruct flm_flow_key_def_s key_def;\n+\n+\tint miss_enabled;\n+\n+\tstruct flm_flow_group_ft_s {\n+\t\tstruct flm_flow_ft_ident_s ident;\n+\t\tstruct flow_handle *fh;\n+\t} ft[FLM_FLOW_FT_MAX];\n+\n+\tuint32_t cashed_ft_index;\n+};\n+\n+struct flm_flow_handle_s {\n+\tstruct flm_flow_group_s groups[FLM_FLOW_RCP_MAX];\n+};\n+\n+static void flm_flow_handle_create(void **handle)\n+{\n+\tstruct flm_flow_handle_s *flm_handle;\n+\n+\tif (!*handle)\n+\t\t*handle = calloc(1, sizeof(struct flm_flow_handle_s));\n+\n+\telse\n+\t\tmemset(*handle, 0x0, sizeof(struct flm_flow_handle_s));\n+\n+\tflm_handle = (struct flm_flow_handle_s *)*handle;\n+\n+\tfor (int i = 0; i < FLM_FLOW_RCP_MAX; ++i) {\n+\t\tflm_handle->groups[i].cfn_group0 = -1;\n+\t\tflm_handle->groups[i].fh_group0 = NULL;\n+\t}\n+}\n+\n+static void flm_flow_handle_remove(void **handle)\n+{\n+\tfree(*handle);\n+\t*handle = NULL;\n+}\n+\n+static int flm_flow_setup_group(struct flow_eth_dev *dev, uint32_t group_index,\n+\t\t\t\tint cfn, int km_ft, struct flow_handle *fh)\n+{\n+\tstruct flm_flow_handle_s *flm_handle =\n+\t\t(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;\n+\tstruct flm_flow_group_s *flm_group;\n+\n+\tif (group_index >= FLM_FLOW_RCP_MAX) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"FLM: Invalid index for FLM programming: Group=%d\\n\",\n+\t\t       (int)group_index);\n+\t\treturn -1;\n+\t}\n+\n+\tflm_group = &flm_handle->groups[group_index];\n+\n+\tflm_group->cfn_group0 = cfn;\n+\tflm_group->km_ft_group0 = km_ft;\n+\tflm_group->fh_group0 = fh;\n+\tflm_group->miss_enabled = 0;\n+\n+\treturn 0;\n+}\n+\n+static int flm_flow_destroy_group(struct flow_eth_dev *dev,\n+\t\t\t\t  uint32_t group_index)\n+{\n+\tstruct flm_flow_handle_s *flm_handle =\n+\t\t(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;\n+\tstruct flm_flow_group_s *flm_group;\n+\n+\tif (group_index >= FLM_FLOW_RCP_MAX) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"FLM: Invalid index for FLM programming: Group=%d\\n\",\n+\t\t       (int)group_index);\n+\t\treturn -1;\n+\t}\n+\n+\tflm_group = &flm_handle->groups[group_index];\n+\n+\tmemset(flm_group, 0x0, sizeof(struct flm_flow_group_s));\n+\tflm_group->cfn_group0 = -1;\n+\n+\treturn 0;\n+}\n+\n+static int flm_flow_get_group_miss_fh(struct flow_eth_dev *dev,\n+\t\t\t\t      uint32_t group_index,\n+\t\t\t\t      struct flow_handle **fh_miss)\n+{\n+\tstruct flm_flow_handle_s *flm_handle =\n+\t\t(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;\n+\n+\tif (group_index >= FLM_FLOW_RCP_MAX) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"FLM: Invalid index for FLM programming: Group=%d\\n\",\n+\t\t       (int)group_index);\n+\t\treturn -1;\n+\t}\n+\n+\t*fh_miss = flm_handle->groups[group_index].fh_group0;\n+\n+\treturn 0;\n+}\n+\n+static int flm_flow_setup_rcp(struct flow_eth_dev *dev,\n+\t\t\t      struct flm_flow_key_def_s *key_def,\n+\t\t\t      uint32_t *packet_mask, uint32_t group_index)\n+{\n+\tif (group_index >= FLM_FLOW_RCP_MAX) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"FLM: Invalid index for FLM programming: Group=%d\\n\",\n+\t\t       (int)group_index);\n+\t\treturn -1;\n+\t}\n+\n+\tuint32_t flm_mask[10] = {\n+\t\tpacket_mask[0], /* SW9 */\n+\t\tpacket_mask[1], /* SW8 */\n+\t\tpacket_mask[5], packet_mask[4],\n+\t\tpacket_mask[3], packet_mask[2], /* QW4 */\n+\t\tpacket_mask[9], packet_mask[8],\n+\t\tpacket_mask[7], packet_mask[6], /* QW0 */\n+\t};\n+\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_LOOKUP, group_index, 1);\n+\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_DYN, group_index,\n+\t\t\t   key_def->qw0_dyn);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_OFS, group_index,\n+\t\t\t   key_def->qw0_ofs);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW0_SEL, group_index, 0);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_DYN, group_index,\n+\t\t\t   key_def->qw4_dyn);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_QW4_OFS, group_index,\n+\t\t\t   key_def->qw4_ofs);\n+\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_DYN, group_index,\n+\t\t\t   key_def->sw8_dyn);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_OFS, group_index,\n+\t\t\t   key_def->sw8_ofs);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW8_SEL, group_index, 0);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_DYN, group_index,\n+\t\t\t   key_def->sw9_dyn);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_SW9_OFS, group_index,\n+\t\t\t   key_def->sw9_ofs);\n+\n+\thw_mod_flm_rcp_set_mask(&dev->ndev->be, HW_FLM_RCP_MASK, group_index,\n+\t\t\t\tflm_mask);\n+\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_KID, group_index,\n+\t\t\t   group_index + 2);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_OPN, group_index,\n+\t\t\t   key_def->outer_proto);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_IPN, group_index,\n+\t\t\t   key_def->inner_proto);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_DYN, group_index, 0);\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_BYT_OFS, group_index,\n+\t\t\t   -20);\n+\n+\thw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);\n+\n+\treturn 0;\n+}\n+\n+static int flm_flow_destroy_rcp(struct flow_eth_dev *dev, uint32_t group_index)\n+{\n+\tstruct flm_flow_handle_s *flm_handle =\n+\t\t(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;\n+\tstruct flm_flow_group_s *flm_group;\n+\n+\tif (group_index >= FLM_FLOW_RCP_MAX) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"FLM: Invalid index for FLM programming: Group=%d\\n\",\n+\t\t       (int)group_index);\n+\t\treturn -1;\n+\t}\n+\n+\tflm_group = &flm_handle->groups[group_index];\n+\n+\thw_mod_flm_rcp_set(&dev->ndev->be, HW_FLM_RCP_PRESET_ALL, group_index,\n+\t\t\t   0);\n+\thw_mod_flm_rcp_flush(&dev->ndev->be, group_index, 1);\n+\n+\tif (flm_group->miss_enabled) {\n+\t\tuint32_t bm = 0;\n+\n+\t\t/* Change group 0 FLM RCP selection to point to 0 */\n+\t\thw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,\n+\t\t\t\t       KM_FLM_IF_FIRST, flm_group->cfn_group0,\n+\t\t\t\t       0);\n+\t\thw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,\n+\t\t\t\t\t flm_group->cfn_group0, 1);\n+\n+\t\t/* Change group 0 FT MISS to FT UNHANDLED */\n+\t\tset_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 0);\n+\t\tset_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 1);\n+\n+\t\t/* Finally, disable FLM for group 0 */\n+\t\thw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t       KM_FLM_IF_FIRST,\n+\t\t\t\t       flm_group->cfn_group0 / 8, &bm);\n+\t\thw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t       KM_FLM_IF_FIRST, flm_group->cfn_group0 / 8,\n+\t\t\t\t       bm & ~(1 << (flm_group->cfn_group0 % 8)));\n+\t\thw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,\n+\t\t\t\t\t flm_group->cfn_group0 / 8, 1);\n+\n+\t\tmemset(&flm_group->key_def, 0x0,\n+\t\t       sizeof(struct flm_flow_key_def_s));\n+\t\tflm_group->miss_enabled = 0;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int flm_flow_learn_prepare(struct flow_eth_dev *dev,\n+\t\t\t\t  struct flow_handle *fh, uint32_t group_index,\n+\t\t\t\t  struct flm_flow_key_def_s *key_def,\n+\t\t\t\t  uint32_t *packet_mask,\n+\t\t\t\t  /* Return values */\n+\t\t\t\t  uint32_t *kid, uint32_t *ft, int *cfn_to_copy,\n+\t\t\t\t  int *cfn_to_copy_km_ft,\n+\t\t\t\t  struct flow_handle **fh_existing)\n+{\n+\tstruct flm_flow_handle_s *flm_handle =\n+\t\t(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;\n+\tstruct flm_flow_group_s *flm_group;\n+\tstruct flm_flow_ft_ident_s temp_ft_ident;\n+\tstruct nic_flow_def *fd = fh->fd;\n+\n+\tif (group_index >= FLM_FLOW_RCP_MAX) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"FLM: Invalid index for FLM programming: Group=%d\\n\",\n+\t\t       (int)group_index);\n+\t\treturn -1;\n+\t}\n+\n+\tflm_group = &flm_handle->groups[group_index];\n+\n+\tif (flm_group->cfn_group0 < 0) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"FLM: Attempt to program to a unset CFN: Group=%d\\n\",\n+\t\t       (int)group_index);\n+\t\treturn -1;\n+\t}\n+\n+\tif (!flm_group->miss_enabled) {\n+\t\tuint32_t bm = 0;\n+\n+\t\tif (flow_nic_allocate_fh_resource_index(dev->ndev, RES_FLM_RCP,\n+\t\t\t\t\t\t\t(int)group_index, fh)) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: Could not get FLM RCP resource\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Change group 0 FLM RCP selection to point to \"group_index\" */\n+\t\thw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,\n+\t\t\t\t       KM_FLM_IF_FIRST, flm_group->cfn_group0,\n+\t\t\t\t       group_index);\n+\t\thw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,\n+\t\t\t\t\t flm_group->cfn_group0, 1);\n+\n+\t\t/* Setup FLM RCP \"group_index\" */\n+\t\tflm_flow_setup_rcp(dev, key_def, packet_mask, group_index);\n+\n+\t\t/*\n+\t\t * Change group 0 FT UNHANDLED to FT MISS\n+\t\t * Note: Once this step is done, the filter is invalid until the KCE step is done\n+\t\t */\n+\t\tset_flow_type_flm(dev->ndev, flm_group->cfn_group0, 1, 2, 0);\n+\t\tset_flow_type_flm(dev->ndev, flm_group->cfn_group0, 0, 2, 1);\n+\n+\t\t/* Finally, enable FLM for group 0 */\n+\t\thw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t       KM_FLM_IF_FIRST,\n+\t\t\t\t       flm_group->cfn_group0 / 8, &bm);\n+\t\thw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t       KM_FLM_IF_FIRST,\n+\t\t\t\t       flm_group->cfn_group0 / 8,\n+\t\t\t\t       bm | (1 << (flm_group->cfn_group0 % 8)));\n+\t\thw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,\n+\t\t\t\t\t flm_group->cfn_group0 / 8, 1);\n+\n+\t\tflm_group->key_def.data = key_def->data;\n+\t\tflm_group->miss_enabled = 1;\n+\t}\n+\n+\tif (flm_group->key_def.data != key_def->data) {\n+\t\tNT_LOG(ERR, FILTER,\n+\t\t       \"FLM: Attempt to program 2 different types of flows into group=%d\\n\",\n+\t\t       (int)group_index);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Create action set */\n+\tmemset(&temp_ft_ident, 0x0, sizeof(struct flm_flow_ft_ident_s));\n+\ttemp_ft_ident.in_use = 1;\n+\n+\tif (fd->dst_num_avail == 0) {\n+\t\ttemp_ft_ident.drop = 1;\n+\t} else {\n+\t\tfor (int i = 0; i < fd->dst_num_avail; ++i) {\n+\t\t\tif (fd->dst_id[i].type == PORT_PHY) {\n+\t\t\t\ttemp_ft_ident.ltx_en = 1;\n+\t\t\t\ttemp_ft_ident.ltx_port = fd->dst_id[i].id;\n+\t\t\t} else if (fd->dst_id[i].type == PORT_VIRT) {\n+\t\t\t\ttemp_ft_ident.queue_en = 1;\n+\t\t\t\ttemp_ft_ident.queue = fd->dst_id[i].id;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* Set encap/decap data */\n+\tif (fd->tun_hdr.len > 0) {\n+\t\ttemp_ft_ident.encap_len = fd->tun_hdr.len;\n+\t\ttemp_ft_ident.encap_vlans = fd->tun_hdr.nb_vlans & 0x3;\n+\t\ttemp_ft_ident.encap_ip = fd->tun_hdr.ip_version == 4 ? 0 : 1;\n+\t}\n+\n+\ttemp_ft_ident.decap_end = fd->header_strip_end_dyn & 0x1f;\n+\n+\t/* Find ft ident or create a new one */\n+\tuint32_t ft_index = 0;\n+\n+\tif (flm_group->cashed_ft_index > 0 &&\n+\t\t\tflm_group->ft[flm_group->cashed_ft_index].ident.data ==\n+\t\t\ttemp_ft_ident.data) {\n+\t\tft_index = flm_group->cashed_ft_index;\n+\t\t*fh_existing = flm_group->ft[ft_index].fh;\n+\t} else {\n+\t\tfor (ft_index = 2; ft_index < FLM_FLOW_FT_MAX; ++ft_index) {\n+\t\t\tstruct flm_flow_ft_ident_s *ft_ident =\n+\t\t\t\t\t&flm_group->ft[ft_index].ident;\n+\t\t\tif (ft_ident->data == 0) {\n+\t\t\t\tft_ident->data = temp_ft_ident.data;\n+\t\t\t\t*cfn_to_copy = flm_group->cfn_group0;\n+\t\t\t\t*cfn_to_copy_km_ft = flm_group->km_ft_group0;\n+\t\t\t\tflm_group->ft[ft_index].fh = fh;\n+\t\t\t\tfh->flm_group_index = (uint8_t)group_index;\n+\t\t\t\tfh->flm_ft_index = (uint8_t)ft_index;\n+\t\t\t\tbreak;\n+\t\t\t} else if (ft_ident->data == temp_ft_ident.data) {\n+\t\t\t\t*fh_existing = flm_group->ft[ft_index].fh;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (ft_index >= FLM_FLOW_FT_MAX) {\n+\t\t\tNT_LOG(ERR, FILTER, \"FLM: FT resource not available\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tflm_group->cashed_ft_index = ft_index;\n+\t}\n+\n+\t/* Set return values */\n+\t *kid = group_index + 2;\n+\t *ft = ft_index;\n+\n+\treturn 0;\n+}\n+\n+static int flow_flm_destroy_owner(struct flow_eth_dev *dev,\n+\t\t\t\t  struct flow_handle *fh)\n+{\n+\tint error = 0;\n+\n+\tstruct flm_flow_handle_s *flm_handle =\n+\t\t(struct flm_flow_handle_s *)dev->ndev->flm_res_handle;\n+\tstruct flm_flow_group_s *flm_group =\n+\t\t\t&flm_handle->groups[fh->flm_group_index];\n+\n+\tmemset(&flm_group->ft[fh->flm_ft_index], 0x0,\n+\t       sizeof(struct flm_flow_group_ft_s));\n+\n+\terror |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t   flm_group->km_ft_group0, 0, 0);\n+\terror |= set_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t   (int)fh->flm_ft_index, 2, 0);\n+\n+\treturn error;\n+}\n+\n+#define FLM_MTR_PROFILE_SIZE 0x100000\n+\n+struct flm_flow_mtr_handle_s {\n+\tstruct dual_buckets_s {\n+\t\tuint16_t rate_a;\n+\t\tuint16_t rate_b;\n+\t\tuint16_t size_a;\n+\t\tuint16_t size_b;\n+\t} dual_buckets[FLM_MTR_PROFILE_SIZE];\n+};\n+\n+int flow_mtr_supported(struct flow_eth_dev *dev)\n+{\n+\treturn hw_mod_flm_present(&dev->ndev->be) &&\n+\t       dev->ndev->be.flm.nb_variant == 2;\n+}\n+\n+uint64_t flow_mtr_meter_policy_n_max(void)\n+{\n+\treturn FLM_MTR_PROFILE_SIZE;\n+}\n+\n+static inline uint64_t convert_to_bucket_size_units(uint64_t value)\n+{\n+\t/* Assumes a 40-bit int as input */\n+\tuint64_t lo_bits = (value & 0xfffff) * 1000000000;\n+\tuint64_t hi_bits = ((value >> 20) & 0xfffff) * 1000000000;\n+\tuint64_t round_up =\n+\t\t(hi_bits & 0xfffff) || (lo_bits & 0xffffffffff) ? 1 : 0;\n+\treturn (hi_bits >> 20) + (lo_bits >> 40) + round_up;\n+}\n+\n+int flow_mtr_set_profile(struct flow_eth_dev *dev, uint32_t profile_id,\n+\t\t\t uint64_t bucket_rate_a, uint64_t bucket_size_a,\n+\t\t\t uint64_t bucket_rate_b, uint64_t bucket_size_b)\n+{\n+\tstruct flow_nic_dev *ndev = dev->ndev;\n+\tstruct flm_flow_mtr_handle_s *handle =\n+\t\t(struct flm_flow_mtr_handle_s *)ndev->flm_mtr_handle;\n+\tstruct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];\n+\n+\tuint64_t bucket_rate_shift_a = 0;\n+\tuint64_t bucket_rate_shift_b = 0;\n+\n+\tuint64_t bucket_size_shift_a = 0;\n+\tuint64_t bucket_size_shift_b = 0;\n+\n+\t/* Round rates up to nearest 128 bytes/sec and shift to 128 bytes/sec units */\n+\tbucket_rate_a = (bucket_rate_a & 0x7f) ? (bucket_rate_a >> 7) + 1 :\n+\t\t\t(bucket_rate_a >> 7);\n+\tbucket_rate_b = (bucket_rate_b & 0x7f) ? (bucket_rate_b >> 7) + 1 :\n+\t\t\t(bucket_rate_b >> 7);\n+\n+\t/* Round rate down to max rate supported */\n+\tif (bucket_rate_a > 0x7ff8000)\n+\t\tbucket_rate_a = 0x7ff8000;\n+\tif (bucket_rate_b > 0x7ff8000)\n+\t\tbucket_rate_b = 0x7ff8000;\n+\n+\t/* Find shift to convert into 12-bit int */\n+\twhile ((bucket_rate_a >> bucket_rate_shift_a) > 0xfff)\n+\t\tbucket_rate_shift_a += 1;\n+\twhile ((bucket_rate_b >> bucket_rate_shift_b) > 0xfff)\n+\t\tbucket_rate_shift_b += 1;\n+\n+\t/* Store in format [11:0] shift-left [15:12] */\n+\tbuckets->rate_a = (bucket_rate_a >> bucket_rate_shift_a) |\n+\t\t\t  (bucket_rate_shift_a << 12);\n+\tbuckets->rate_b = (bucket_rate_b >> bucket_rate_shift_b) |\n+\t\t\t  (bucket_rate_shift_b << 12);\n+\n+\t/* Round size down to 38-bit int */\n+\tif (bucket_size_a > 0x3fffffffff)\n+\t\tbucket_size_a = 0x3fffffffff;\n+\tif (bucket_size_b > 0x3fffffffff)\n+\t\tbucket_size_b = 0x3fffffffff;\n+\n+\t/* Convert size to units of 2^40 / 10^9. Output is a 28-bit int. */\n+\tbucket_size_a = convert_to_bucket_size_units(bucket_size_a);\n+\tbucket_size_b = convert_to_bucket_size_units(bucket_size_b);\n+\n+\t/* Round rate down to max rate supported */\n+\tif (bucket_size_a > 0x7ff8000)\n+\t\tbucket_size_a = 0x7ff8000;\n+\tif (bucket_size_b > 0x7ff8000)\n+\t\tbucket_size_b = 0x7ff8000;\n+\n+\t/* Find shift to convert into 12-bit int */\n+\twhile ((bucket_size_a >> bucket_size_shift_a) > 0xfff)\n+\t\tbucket_size_shift_a += 1;\n+\twhile ((bucket_size_b >> bucket_size_shift_b) > 0xfff)\n+\t\tbucket_size_shift_b += 1;\n+\n+\t/* Store in format [11:0] shift-left [15:12] */\n+\tbuckets->size_a = (bucket_size_a >> bucket_size_shift_a) |\n+\t\t\t  (bucket_size_shift_a << 12);\n+\tbuckets->size_b = (bucket_size_b >> bucket_size_shift_b) |\n+\t\t\t  (bucket_size_shift_b << 12);\n+\n+\treturn 0;\n+}\n+\n+int flow_mtr_set_policy(UNUSED struct flow_eth_dev *dev,\n+\t\t\tUNUSED uint32_t policy_id, UNUSED int drop)\n+{\n+\treturn 0;\n+}\n+\n+#define FLM_MTR_STAT_SIZE 0x1000000\n+#define WORDS_PER_INF_DATA \\\n+\t(sizeof(struct flm_v17_inf_data_s) / sizeof(uint32_t))\n+#define MAX_INF_DATA_RECORDS_PER_READ 20\n+#define UINT64_MSB ((uint64_t)1 << 63)\n+\n+/* 2^23 bytes ~ 8MB */\n+#define FLM_PERIODIC_STATS_BYTE_LIMIT 8\n+/* 2^16 pkt ~ 64K pkt */\n+#define FLM_PERIODIC_STATS_PKT_LIMIT 5\n+/* 2^38 ns ~ 275 sec */\n+#define FLM_PERIODIC_STATS_BYTE_TIMEOUT 23\n+\n+uint32_t flow_mtr_meters_supported(void)\n+{\n+\treturn FLM_MTR_STAT_SIZE;\n+}\n+\n+struct mtr_stat_s {\n+\tstruct dual_buckets_s *buckets;\n+\n+\tvolatile atomic_uint_fast64_t n_pkt;\n+\tvolatile atomic_uint_fast64_t n_bytes;\n+\tuint64_t n_pkt_base;\n+\tuint64_t n_bytes_base;\n+\tvolatile atomic_uint_fast64_t stats_mask;\n+};\n+\n+#define WORDS_PER_LEARN_DATA sizeof(struct flm_v17_lrn_data_s)\n+#define FLM_PROG_MAX_RETRY 100\n+\n+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,\n+\t\t\t\t\tuint32_t *data);\n+\n+static int flow_flm_apply(struct flow_eth_dev *dev,\n+\t\t\t  struct flm_v17_lrn_data_s *learn_record)\n+{\n+\tuint32_t lrn_ready;\n+\tuint32_t retry = 0;\n+\tuint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];\n+\n+\thw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_LRN_FREE,\n+\t\t\t\t&lrn_ready);\n+\tif (lrn_ready < WORDS_PER_LEARN_DATA) {\n+\t\thw_mod_flm_buf_ctrl_update(&dev->ndev->be);\n+\t\thw_mod_flm_buf_ctrl_get(&dev->ndev->be,\n+\t\t\t\t\tHW_FLM_BUF_CTRL_LRN_FREE, &lrn_ready);\n+\t\twhile (lrn_ready < WORDS_PER_LEARN_DATA) {\n+\t\t\t++retry;\n+\t\t\tif (retry > FLM_PROG_MAX_RETRY)\n+\t\t\t\treturn 1;\n+\n+\t\t\tflm_read_inf_rec_locked(dev, data);\n+\n+\t\t\thw_mod_flm_buf_ctrl_update(&dev->ndev->be);\n+\t\t\thw_mod_flm_buf_ctrl_get(&dev->ndev->be,\n+\t\t\t\t\t\tHW_FLM_BUF_CTRL_LRN_FREE,\n+\t\t\t\t\t\t&lrn_ready);\n+\t\t}\n+\t}\n+\n+\tint res = hw_mod_flm_lrn_data_set_flush(&dev->ndev->be,\n+\t\t\t\t\t\tHW_FLM_FLOW_LRN_DATA_V17,\n+\t\t\t\t\t\t(uint32_t *)learn_record);\n+\treturn res;\n+}\n+\n+int flow_mtr_create_meter(struct flow_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t  uint32_t profile_id, UNUSED uint32_t policy_id,\n+\t\t\t  uint64_t stats_mask)\n+{\n+\tpthread_mutex_lock(&dev->ndev->mtx);\n+\n+\tstruct flm_flow_mtr_handle_s *handle =\n+\t\t(struct flm_flow_mtr_handle_s *)dev->ndev->flm_mtr_handle;\n+\tstruct dual_buckets_s *buckets = &handle->dual_buckets[profile_id];\n+\n+\tstruct flm_v17_lrn_data_s learn_record;\n+\n+\tmemset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));\n+\n+\tlearn_record.sw9 = mtr_id + 1;\n+\tlearn_record.kid = 1;\n+\n+\tlearn_record.rate = buckets->rate_a;\n+\tlearn_record.size = buckets->size_a;\n+\tlearn_record.fill = buckets->size_a & 0x0fff;\n+\n+\tlearn_record.ft_mbr = 15; /* FT to assign if MBR has been exceeded */\n+\n+\tlearn_record.ent = 1;\n+\tlearn_record.op = 1;\n+\tlearn_record.eor = 1;\n+\n+\tlearn_record.id[0] = mtr_id & 0xff;\n+\tlearn_record.id[1] = (mtr_id >> 8) & 0xff;\n+\tlearn_record.id[2] = (mtr_id >> 16) & 0xff;\n+\tlearn_record.id[3] = (mtr_id >> 24) & 0xff;\n+\tlearn_record.id[8] = 1U << 7;\n+\n+\tif (stats_mask)\n+\t\tlearn_record.vol_idx = 1;\n+\n+\tint res = flow_flm_apply(dev, &learn_record);\n+\n+\tif (res == 0) {\n+\t\tstruct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;\n+\n+\t\tmtr_stat[mtr_id].buckets = buckets;\n+\t\tatomic_store(&mtr_stat[mtr_id].stats_mask, stats_mask);\n+\t}\n+\n+\tpthread_mutex_unlock(&dev->ndev->mtx);\n+\n+\treturn res;\n+}\n+\n+int flow_mtr_destroy_meter(struct flow_eth_dev *dev, uint32_t mtr_id)\n+{\n+\tpthread_mutex_lock(&dev->ndev->mtx);\n+\n+\tstruct flm_v17_lrn_data_s learn_record;\n+\n+\tmemset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));\n+\n+\tlearn_record.sw9 = mtr_id + 1;\n+\tlearn_record.kid = 1;\n+\n+\tlearn_record.ent = 1;\n+\tlearn_record.op = 0;\n+\tlearn_record.eor = 1;\n+\n+\tlearn_record.id[0] = mtr_id & 0xff;\n+\tlearn_record.id[1] = (mtr_id >> 8) & 0xff;\n+\tlearn_record.id[2] = (mtr_id >> 16) & 0xff;\n+\tlearn_record.id[3] = (mtr_id >> 24) & 0xff;\n+\tlearn_record.id[8] = 1U << 7;\n+\n+\t/* Clear statistics so stats_mask prevents updates of counters on deleted meters */\n+\tstruct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;\n+\n+\tatomic_store(&mtr_stat[mtr_id].stats_mask, 0);\n+\tatomic_store(&mtr_stat[mtr_id].n_bytes, 0);\n+\tatomic_store(&mtr_stat[mtr_id].n_pkt, 0);\n+\tmtr_stat[mtr_id].n_bytes_base = 0;\n+\tmtr_stat[mtr_id].n_pkt_base = 0;\n+\tmtr_stat[mtr_id].buckets = NULL;\n+\n+\tint res = flow_flm_apply(dev, &learn_record);\n+\n+\tpthread_mutex_unlock(&dev->ndev->mtx);\n+\n+\treturn res;\n+}\n+\n+int flm_mtr_adjust_stats(struct flow_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t uint32_t adjust_value)\n+{\n+\tpthread_mutex_lock(&dev->ndev->mtx);\n+\n+\tstruct mtr_stat_s *mtr_stat =\n+\t\t&((struct mtr_stat_s *)dev->ndev->mtr_stat_handle)[mtr_id];\n+\n+\tstruct flm_v17_lrn_data_s learn_record;\n+\n+\tmemset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));\n+\n+\tlearn_record.sw9 = mtr_id + 1;\n+\tlearn_record.kid = 1;\n+\n+\tlearn_record.rate = mtr_stat->buckets->rate_a;\n+\tlearn_record.size = mtr_stat->buckets->size_a;\n+\tlearn_record.adj = adjust_value;\n+\n+\tlearn_record.ft_mbr = 15;\n+\n+\tlearn_record.ent = 1;\n+\tlearn_record.op = 2;\n+\tlearn_record.eor = 1;\n+\n+\tif (atomic_load(&mtr_stat->stats_mask))\n+\t\tlearn_record.vol_idx = 1;\n+\n+\tint res = flow_flm_apply(dev, &learn_record);\n+\n+\tpthread_mutex_unlock(&dev->ndev->mtx);\n+\n+\treturn res;\n+}\n+\n+static uint32_t flm_read_inf_rec_locked(struct flow_eth_dev *dev,\n+\t\t\t\t\tuint32_t *data)\n+{\n+\tuint32_t inf_cnt = 0;\n+\n+\thw_mod_flm_buf_ctrl_get(&dev->ndev->be, HW_FLM_BUF_CTRL_INF_AVAIL,\n+\t\t\t\t&inf_cnt);\n+\tif (inf_cnt < WORDS_PER_INF_DATA) {\n+\t\thw_mod_flm_buf_ctrl_update(&dev->ndev->be);\n+\t\thw_mod_flm_buf_ctrl_get(&dev->ndev->be,\n+\t\t\t\t\tHW_FLM_BUF_CTRL_INF_AVAIL, &inf_cnt);\n+\t}\n+\n+\tuint32_t records_to_read = inf_cnt / WORDS_PER_INF_DATA;\n+\n+\tif (records_to_read == 0)\n+\t\treturn 0;\n+\tif (records_to_read > MAX_INF_DATA_RECORDS_PER_READ)\n+\t\trecords_to_read = MAX_INF_DATA_RECORDS_PER_READ;\n+\n+\thw_mod_flm_inf_data_update_get(&dev->ndev->be, HW_FLM_FLOW_INF_DATA_V17,\n+\t\t\t\t       data,\n+\t\t\t\t       records_to_read * WORDS_PER_INF_DATA);\n+\n+\treturn records_to_read;\n+}\n+\n+uint32_t flm_mtr_update_stats(struct flow_eth_dev *dev)\n+{\n+\tuint32_t data[WORDS_PER_INF_DATA * MAX_INF_DATA_RECORDS_PER_READ];\n+\n+\tpthread_mutex_lock(&dev->ndev->mtx);\n+\tuint32_t records = flm_read_inf_rec_locked(dev, data);\n+\n+\tpthread_mutex_unlock(&dev->ndev->mtx);\n+\n+\tstruct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;\n+\n+\tfor (uint32_t i = 0; i < records; ++i) {\n+\t\tuint32_t *p_record = &data[i * WORDS_PER_INF_DATA];\n+\n+\t\t/* Check that received record hold valid meter statistics */\n+\t\tif ((p_record[6] < flow_mtr_meters_supported() &&\n+\t\t\t\tp_record[7] == 0 && (p_record[8] >> 31) == 1)) {\n+\t\t\tuint32_t id = p_record[6];\n+\n+\t\t\t/* Don't update a deleted meter */\n+\t\t\tuint64_t stats_mask =\n+\t\t\t\tatomic_load(&mtr_stat[id].stats_mask);\n+\t\t\tif (stats_mask) {\n+\t\t\t\tuint64_t nb = ((uint64_t)p_record[1] << 32) |\n+\t\t\t\t\t      p_record[0];\n+\t\t\t\tuint64_t np = ((uint64_t)p_record[3] << 32) |\n+\t\t\t\t\t      p_record[2];\n+\n+\t\t\t\tatomic_store(&mtr_stat[id].n_pkt,\n+\t\t\t\t\t     np | UINT64_MSB);\n+\t\t\t\tatomic_store(&mtr_stat[id].n_bytes, nb);\n+\t\t\t\tatomic_store(&mtr_stat[id].n_pkt, np);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn records;\n+}\n+\n+void flm_mtr_read_stats(struct flow_eth_dev *dev, uint32_t id,\n+\t\t\tuint64_t *stats_mask, uint64_t *green_pkt,\n+\t\t\tuint64_t *green_bytes, int clear)\n+{\n+\tstruct mtr_stat_s *mtr_stat = dev->ndev->mtr_stat_handle;\n+\t*stats_mask = atomic_load(&mtr_stat[id].stats_mask);\n+\tif (*stats_mask) {\n+\t\tuint64_t pkt_1;\n+\t\tuint64_t pkt_2;\n+\t\tuint64_t nb;\n+\n+\t\tdo {\n+\t\t\tdo {\n+\t\t\t\tpkt_1 = atomic_load(&mtr_stat[id].n_pkt);\n+\t\t\t} while (pkt_1 & UINT64_MSB);\n+\t\t\tnb = atomic_load(&mtr_stat[id].n_bytes);\n+\t\t\tpkt_2 = atomic_load(&mtr_stat[id].n_pkt);\n+\t\t} while (pkt_1 != pkt_2);\n+\n+\t\t*green_pkt = pkt_1 - mtr_stat[id].n_pkt_base;\n+\t\t*green_bytes = nb - mtr_stat[id].n_bytes_base;\n+\t\tif (clear) {\n+\t\t\tmtr_stat[id].n_pkt_base = pkt_1;\n+\t\t\tmtr_stat[id].n_bytes_base = nb;\n+\t\t}\n+\t}\n+}\n+\n+static inline uint8_t convert_port_to_ifr_mtu_recipe(uint32_t port)\n+{\n+\treturn port + 1;\n+}\n+\n+static uint8_t get_port_from_port_id(struct flow_nic_dev *ndev,\n+\t\t\t\t     uint32_t port_id)\n+{\n+\tstruct flow_eth_dev *dev = ndev->eth_base;\n+\n+\twhile (dev) {\n+\t\tif (dev->port_id == port_id)\n+\t\t\treturn dev->port;\n+\t\tdev = dev->next;\n+\t}\n+\n+\treturn UINT8_MAX;\n+}\n+\n+static void nic_insert_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)\n+{\n+\tif (ndev->flow_base)\n+\t\tndev->flow_base->prev = fh;\n+\tfh->next = ndev->flow_base;\n+\tfh->prev = NULL;\n+\tndev->flow_base = fh;\n+}\n+\n+static void nic_remove_flow(struct flow_nic_dev *ndev, struct flow_handle *fh)\n+{\n+\tstruct flow_handle *next = fh->next;\n+\tstruct flow_handle *prev = fh->prev;\n+\n+\tif (next && prev) {\n+\t\tprev->next = next;\n+\t\tnext->prev = prev;\n+\t} else if (next) {\n+\t\tndev->flow_base = next;\n+\t\tnext->prev = NULL;\n+\t} else if (prev) {\n+\t\tprev->next = NULL;\n+\t} else if (ndev->flow_base == fh) {\n+\t\tndev->flow_base = NULL;\n+\t}\n+}\n+\n+static void nic_insert_flow_flm(struct flow_nic_dev *ndev,\n+\t\t\t\tstruct flow_handle *fh)\n+{\n+\tif (ndev->flow_base_flm)\n+\t\tndev->flow_base_flm->prev = fh;\n+\tfh->next = ndev->flow_base_flm;\n+\tfh->prev = NULL;\n+\tndev->flow_base_flm = fh;\n+}\n+\n+static void nic_remove_flow_flm(struct flow_nic_dev *ndev,\n+\t\t\t\tstruct flow_handle *fh_flm)\n+{\n+\tstruct flow_handle *next = fh_flm->next;\n+\tstruct flow_handle *prev = fh_flm->prev;\n+\n+\tif (next && prev) {\n+\t\tprev->next = next;\n+\t\tnext->prev = prev;\n+\t} else if (next) {\n+\t\tndev->flow_base_flm = next;\n+\t\tnext->prev = NULL;\n+\t} else if (prev) {\n+\t\tprev->next = NULL;\n+\t} else if (ndev->flow_base_flm == fh_flm) {\n+\t\tndev->flow_base_flm = NULL;\n+\t}\n+}\n+\n+static int flow_elem_type_vlan(const struct flow_elem elem[], int eidx, uint16_t implicit_vlan_vid,\n+\tstruct flow_error *error, struct nic_flow_def *fd, unsigned int sw_counter,\n+\tuint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)\n+{\n+\tconst struct flow_elem_vlan *vlan_spec = (const struct flow_elem_vlan *)elem[eidx].spec;\n+\tconst struct flow_elem_vlan *vlan_mask = (const struct flow_elem_vlan *)elem[eidx].mask;\n+\n+\tif (vlan_spec != NULL && vlan_mask != NULL) {\n+\t\tif (vlan_mask->tci) {\n+\t\t\tif (implicit_vlan_vid > 0) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"Multiple VLANs not supported \"\n+\t\t\t\t\t\"for implicit VLAN patterns.\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM, error);\n+\t\t\t\tfree(fd);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tif (sw_counter >= 2) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"Key size too big. Out of SW resources.\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\tfree(fd);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tuint32_t *sw_data = &packet_data[1 - sw_counter];\n+\t\t\tuint32_t *sw_mask = &packet_mask[1 - sw_counter];\n+\n+\t\t\tsw_mask[0] = ntohs(vlan_mask->tci);\n+\t\t\tsw_data[0] = ntohs(vlan_spec->tci) & sw_mask[0];\n+\n+\t\t\tkm_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_FIRST_VLAN, 0);\n+\t\t\tset_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);\n+\t\t\tsw_counter += 1;\n+\t\t}\n+\t}\n+\n+\tfd->vlans += 1;\n+\treturn 0;\n+}\n+\n+static int flow_elem_type_ipv4(const struct flow_elem elem[], int eidx, struct flow_error *error,\n+\tstruct nic_flow_def *fd, unsigned int qw_counter, unsigned int sw_counter,\n+\tuint32_t *packet_data, uint32_t *packet_mask, struct flm_flow_key_def_s *key_def,\n+\tuint32_t any_count)\n+{\n+\tconst struct flow_elem_ipv4 *ipv4_spec = (const struct flow_elem_ipv4 *)elem[eidx].spec;\n+\tconst struct flow_elem_ipv4 *ipv4_mask = (const struct flow_elem_ipv4 *)elem[eidx].mask;\n+\n+\tif (ipv4_spec != NULL && ipv4_mask != NULL) {\n+\t\tif (ipv4_spec->hdr.frag_offset == 0xffff && ipv4_mask->hdr.frag_offset == 0xffff)\n+\t\t\tfd->fragmentation = 0xfe;\n+\n+\t\tif (qw_counter < 2 && (ipv4_mask->hdr.src_ip || ipv4_mask->hdr.dst_ip)) {\n+\t\t\tuint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];\n+\t\t\tuint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];\n+\n+\t\t\tqw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);\n+\t\t\tqw_mask[1] = ntohl(ipv4_mask->hdr.dst_ip);\n+\n+\t\t\tqw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & qw_mask[0];\n+\t\t\tqw_data[1] = ntohl(ipv4_spec->hdr.dst_ip) & qw_mask[1];\n+\n+\t\t\tkm_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 2, DYN_L3, 12);\n+\t\t\tset_key_def_qw(key_def, qw_counter, DYN_L3, 12);\n+\t\t\tqw_counter += 1;\n+\t\t} else {\n+\t\t\tif (2 - sw_counter < ((ipv4_mask->hdr.src_ip ? 1U : 0U) +\n+\t\t\t\t(ipv4_mask->hdr.dst_ip ? 1U : 0U))) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"Key size too big. Out of SW resources.\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\tfree(fd);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tif (ipv4_mask->hdr.src_ip) {\n+\t\t\t\tuint32_t *sw_data = &packet_data[1 - sw_counter];\n+\t\t\t\tuint32_t *sw_mask = &packet_mask[1 - sw_counter];\n+\n+\t\t\t\tsw_mask[0] = ntohl(ipv4_mask->hdr.src_ip);\n+\t\t\t\tsw_data[0] = ntohl(ipv4_spec->hdr.src_ip) & sw_mask[0];\n+\n+\t\t\t\tkm_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 12);\n+\t\t\t\tset_key_def_sw(key_def, sw_counter, DYN_L3, 12);\n+\t\t\t\tsw_counter += 1;\n+\t\t\t}\n+\n+\t\t\tif (ipv4_mask->hdr.dst_ip) {\n+\t\t\t\tuint32_t *sw_data = &packet_data[1 - sw_counter];\n+\t\t\t\tuint32_t *sw_mask = &packet_mask[1 - sw_counter];\n+\n+\t\t\t\tsw_mask[0] = ntohl(ipv4_mask->hdr.dst_ip);\n+\t\t\t\tsw_data[0] = ntohl(ipv4_spec->hdr.dst_ip) & sw_mask[0];\n+\n+\t\t\t\tkm_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L3, 16);\n+\t\t\t\tset_key_def_sw(key_def, sw_counter, DYN_L3, 16);\n+\t\t\t\tsw_counter += 1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (any_count > 0 || fd->l3_prot != -1)\n+\t\tfd->tunnel_l3_prot = PROT_TUN_L3_IPV4;\n+\telse\n+\t\tfd->l3_prot = PROT_L3_IPV4;\n+\treturn 0;\n+}\n+\n+static int flow_elem_type_ipv6(const struct flow_elem elem[], int eidx, struct flow_error *error,\n+\tstruct nic_flow_def *fd, unsigned int qw_counter, uint32_t *packet_data,\n+\tuint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)\n+{\n+\tconst struct flow_elem_ipv6 *ipv6_spec = (const struct flow_elem_ipv6 *)elem[eidx].spec;\n+\tconst struct flow_elem_ipv6 *ipv6_mask = (const struct flow_elem_ipv6 *)elem[eidx].mask;\n+\n+\tif (ipv6_spec != NULL && ipv6_mask != NULL) {\n+\t\tif (is_non_zero(ipv6_spec->hdr.src_addr, 16)) {\n+\t\t\tif (qw_counter >= 2) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"Key size too big. Out of QW resources.\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\tfree(fd);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tuint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];\n+\t\t\tuint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];\n+\n+\t\t\tmemcpy(&qw_data[0], ipv6_spec->hdr.src_addr, 16);\n+\t\t\tmemcpy(&qw_mask[0], ipv6_mask->hdr.src_addr, 16);\n+\n+\t\t\tqw_data[0] = ntohl(qw_data[0]);\n+\t\t\tqw_data[1] = ntohl(qw_data[1]);\n+\t\t\tqw_data[2] = ntohl(qw_data[2]);\n+\t\t\tqw_data[3] = ntohl(qw_data[3]);\n+\n+\t\t\tqw_mask[0] = ntohl(qw_mask[0]);\n+\t\t\tqw_mask[1] = ntohl(qw_mask[1]);\n+\t\t\tqw_mask[2] = ntohl(qw_mask[2]);\n+\t\t\tqw_mask[3] = ntohl(qw_mask[3]);\n+\n+\t\t\tqw_data[0] &= qw_mask[0];\n+\t\t\tqw_data[1] &= qw_mask[1];\n+\t\t\tqw_data[2] &= qw_mask[2];\n+\t\t\tqw_data[3] &= qw_mask[3];\n+\n+\t\t\tkm_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 8);\n+\t\t\tset_key_def_qw(key_def, qw_counter, DYN_L3, 8);\n+\t\t\tqw_counter += 1;\n+\t\t}\n+\n+\t\tif (is_non_zero(ipv6_spec->hdr.dst_addr, 16)) {\n+\t\t\tif (qw_counter >= 2) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"Key size too big. Out of QW resources.\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\tfree(fd);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tuint32_t *qw_data = &packet_data[2 + 4 - qw_counter * 4];\n+\t\t\tuint32_t *qw_mask = &packet_mask[2 + 4 - qw_counter * 4];\n+\n+\t\t\tmemcpy(&qw_data[0], ipv6_spec->hdr.dst_addr, 16);\n+\t\t\tmemcpy(&qw_mask[0], ipv6_mask->hdr.dst_addr, 16);\n+\n+\t\t\tqw_data[0] = ntohl(qw_data[0]);\n+\t\t\tqw_data[1] = ntohl(qw_data[1]);\n+\t\t\tqw_data[2] = ntohl(qw_data[2]);\n+\t\t\tqw_data[3] = ntohl(qw_data[3]);\n+\n+\t\t\tqw_mask[0] = ntohl(qw_mask[0]);\n+\t\t\tqw_mask[1] = ntohl(qw_mask[1]);\n+\t\t\tqw_mask[2] = ntohl(qw_mask[2]);\n+\t\t\tqw_mask[3] = ntohl(qw_mask[3]);\n+\t\t\tqw_data[0] &= qw_mask[0];\n+\t\t\tqw_data[1] &= qw_mask[1];\n+\t\t\tqw_data[2] &= qw_mask[2];\n+\t\t\tqw_data[3] &= qw_mask[3];\n+\n+\t\t\tkm_add_match_elem(&fd->km, &qw_data[0], &qw_mask[0], 4, DYN_L3, 24);\n+\t\t\tset_key_def_qw(key_def, qw_counter, DYN_L3, 24);\n+\t\t\tqw_counter += 1;\n+\t\t}\n+\t}\n+\n+\tif (any_count > 0 || fd->l3_prot != -1)\n+\t\tfd->tunnel_l3_prot = PROT_TUN_L3_IPV6;\n+\telse\n+\t\tfd->l3_prot = PROT_L3_IPV6;\n+\treturn 0;\n+}\n+\n+static int flow_elem_type_upd(const struct flow_elem elem[], int eidx, struct flow_error *error,\n+\tstruct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,\n+\tuint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)\n+{\n+\tconst struct flow_elem_udp *udp_spec = (const struct flow_elem_udp *)elem[eidx].spec;\n+\tconst struct flow_elem_udp *udp_mask = (const struct flow_elem_udp *)elem[eidx].mask;\n+\n+\tif (udp_spec != NULL && udp_mask != NULL) {\n+\t\tif (udp_mask->hdr.src_port || udp_mask->hdr.dst_port) {\n+\t\t\tif (sw_counter >= 2) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"Key size too big. Out of SW resources.\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\tfree(fd);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tuint32_t *sw_data = &packet_data[1 - sw_counter];\n+\t\t\tuint32_t *sw_mask = &packet_mask[1 - sw_counter];\n+\n+\t\t\tsw_mask[0] = (ntohs(udp_mask->hdr.src_port) << 16) |\n+\t\t\t\tntohs(udp_mask->hdr.dst_port);\n+\t\t\tsw_data[0] = ((ntohs(udp_spec->hdr.src_port) << 16) |\n+\t\t\t\tntohs(udp_spec->hdr.dst_port)) & sw_mask[0];\n+\n+\t\t\tkm_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);\n+\t\t\tset_key_def_sw(key_def, sw_counter, DYN_L4, 0);\n+\t\t\tsw_counter += 1;\n+\t\t}\n+\t}\n+\n+\tif (any_count > 0 || fd->l4_prot != -1) {\n+\t\tfd->tunnel_l4_prot = PROT_TUN_L4_UDP;\n+\t\tkey_def->inner_proto = 1;\n+\t} else {\n+\t\tfd->l4_prot = PROT_L4_UDP;\n+\t\tkey_def->outer_proto = 1;\n+\t}\n+\treturn 0;\n+}\n+\n+static int flow_elem_type_sctp(const struct flow_elem elem[], int eidx, struct flow_error *error,\n+\tstruct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,\n+\tuint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)\n+{\n+\tconst struct flow_elem_sctp *sctp_spec = (const struct flow_elem_sctp *)elem[eidx].spec;\n+\tconst struct flow_elem_sctp *sctp_mask = (const struct flow_elem_sctp *)elem[eidx].mask;\n+\n+\tif (sctp_spec != NULL && sctp_mask != NULL) {\n+\t\tif (sctp_mask->hdr.src_port || sctp_mask->hdr.dst_port) {\n+\t\t\tif (sw_counter >= 2) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"Key size too big. Out of SW resources.\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\tfree(fd);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tuint32_t *sw_data = &packet_data[1 - sw_counter];\n+\t\t\tuint32_t *sw_mask = &packet_mask[1 - sw_counter];\n+\n+\t\t\tsw_mask[0] = (ntohs(sctp_mask->hdr.src_port) << 16) |\n+\t\t\t\tntohs(sctp_mask->hdr.dst_port);\n+\t\t\tsw_data[0] = ((ntohs(sctp_spec->hdr.src_port) << 16) |\n+\t\t\t\tntohs(sctp_spec->hdr.dst_port)) & sw_mask[0];\n+\n+\t\t\tkm_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);\n+\t\t\tset_key_def_sw(key_def, sw_counter, DYN_L4, 0);\n+\t\t\tsw_counter += 1;\n+\t\t}\n+\t}\n+\n+\tif (any_count > 0 || fd->l4_prot != -1) {\n+\t\tfd->tunnel_l4_prot = PROT_TUN_L4_SCTP;\n+\t\tkey_def->inner_proto = 1;\n+\t} else {\n+\t\tfd->l4_prot = PROT_L4_SCTP;\n+\t\tkey_def->outer_proto = 1;\n+\t}\n+\treturn 0;\n+}\n+\n+static int flow_elem_type_tcp(const struct flow_elem elem[], int eidx, struct flow_error *error,\n+\tstruct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,\n+\tuint32_t *packet_mask, struct flm_flow_key_def_s *key_def, uint32_t any_count)\n+{\n+\tconst struct flow_elem_tcp *tcp_spec = (const struct flow_elem_tcp *)elem[eidx].spec;\n+\tconst struct flow_elem_tcp *tcp_mask = (const struct flow_elem_tcp *)elem[eidx].mask;\n+\n+\tif (tcp_spec != NULL && tcp_mask != NULL) {\n+\t\tif (tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port) {\n+\t\t\tif (sw_counter >= 2) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"Key size too big. Out of SW resources.\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\tfree(fd);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tuint32_t *sw_data = &packet_data[1 - sw_counter];\n+\t\t\tuint32_t *sw_mask = &packet_mask[1 - sw_counter];\n+\n+\t\t\tsw_mask[0] = (ntohs(tcp_mask->hdr.src_port) << 16) |\n+\t\t\t\tntohs(tcp_mask->hdr.dst_port);\n+\t\t\tsw_data[0] = ((ntohs(tcp_spec->hdr.src_port) << 16) |\n+\t\t\t\tntohs(tcp_spec->hdr.dst_port)) & sw_mask[0];\n+\n+\t\t\tkm_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4, 0);\n+\t\t\tset_key_def_sw(key_def, sw_counter, DYN_L4, 0);\n+\t\t\tsw_counter += 1;\n+\t\t}\n+\t}\n+\n+\tif (any_count > 0 || fd->l4_prot != -1) {\n+\t\tfd->tunnel_l4_prot = PROT_TUN_L4_TCP;\n+\t\tkey_def->inner_proto = 1;\n+\t} else {\n+\t\tfd->l4_prot = PROT_L4_TCP;\n+\t\tkey_def->outer_proto = 1;\n+\t}\n+\treturn 0;\n+}\n+\n+static int flow_elem_type_gtp(const struct flow_elem elem[], int eidx, struct flow_error *error,\n+\tstruct nic_flow_def *fd, unsigned int sw_counter, uint32_t *packet_data,\n+\tuint32_t *packet_mask, struct flm_flow_key_def_s *key_def)\n+{\n+\tconst struct flow_elem_gtp *gtp_spec = (const struct flow_elem_gtp *)elem[eidx].spec;\n+\tconst struct flow_elem_gtp *gtp_mask = (const struct flow_elem_gtp *)elem[eidx].mask;\n+\n+\tif (gtp_spec != NULL && gtp_mask != NULL) {\n+\t\tif (gtp_mask->teid) {\n+\t\t\tif (sw_counter >= 2) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"Key size too big. Out of SW resources.\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\tfree(fd);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tuint32_t *sw_data = &packet_data[1 - sw_counter];\n+\t\t\tuint32_t *sw_mask = &packet_mask[1 - sw_counter];\n+\n+\t\t\tsw_mask[0] = ntohl(gtp_mask->teid);\n+\t\t\tsw_data[0] = ntohl(gtp_spec->teid) & sw_mask[0];\n+\n+\t\t\tkm_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1, DYN_L4_PAYLOAD, 4);\n+\t\t\tset_key_def_sw(key_def, sw_counter, DYN_L4_PAYLOAD, 4);\n+\t\t\tsw_counter += 1;\n+\t\t}\n+\t}\n+\n+\tfd->tunnel_prot = PROT_TUN_GTPV1U;\n+\treturn 0;\n+}\n+\n+static struct nic_flow_def *interpret_flow_elements(struct flow_eth_dev *dev,\n+\tconst struct flow_elem elem[], const struct flow_action action[],\n+\tstruct flow_error *error, uint16_t implicit_vlan_vid,\n+\tuint32_t *in_port_id, uint32_t *num_dest_port,\n+\tuint32_t *num_queues, uint32_t *packet_data,\n+\tuint32_t *packet_mask, struct flm_flow_key_def_s *key_def)\n+{\n+\tuint32_t any_count = 0;\n+\tint mtr_count = 0;\n+\n+\tunsigned int encap_decap_order = 0;\n+\n+\tunsigned int qw_counter = 0;\n+\tunsigned int sw_counter = 0;\n+\n+\tuint64_t modify_field_use_flags = 0x0;\n+\n+\t*in_port_id = UINT32_MAX;\n+\t*num_dest_port = 0;\n+\t*num_queues = 0;\n+\n+\tmemset(packet_data, 0x0, sizeof(uint32_t) * 10);\n+\tmemset(packet_mask, 0x0, sizeof(uint32_t) * 10);\n+\tkey_def->data = 0;\n+\n+\tif (action == NULL || elem == NULL) {\n+\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\tNT_LOG(ERR, FILTER, \"Flow items / actions missing\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\tstruct nic_flow_def *fd = calloc(1, sizeof(struct nic_flow_def));\n+\n+\tif (!fd) {\n+\t\tflow_nic_set_error(ERR_MEMORY, error);\n+\t\tNT_LOG(ERR, FILTER, \"ERR Memory\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\t/* Set default values for fd */\n+\tfd->full_offload = -1;\n+\tfd->in_port_override = -1;\n+\tfd->mark = UINT32_MAX;\n+\tfd->jump_to_group = UINT32_MAX;\n+\n+\tfd->l2_prot = -1;\n+\tfd->l3_prot = -1;\n+\tfd->l4_prot = -1;\n+\tfd->vlans = 0;\n+\tfd->tunnel_prot = -1;\n+\tfd->tunnel_l3_prot = -1;\n+\tfd->tunnel_l4_prot = -1;\n+\tfd->fragmentation = -1;\n+\n+\tNT_LOG(DBG, FILTER,\n+\t       \">>>>> [Dev %p] Nic %i, Port %i: fd %p - FLOW Interpretation <<<<<\\n\",\n+\t       dev, dev->ndev->adapter_no, dev->port, fd);\n+\n+\t/*\n+\t * Gather flow match + actions and convert into internal flow definition structure\n+\t * (struct nic_flow_def_s)\n+\t * This is the 1st step in the flow creation - validate, convert and prepare\n+\t */\n+\tfor (int aidx = 0; action[aidx].type != FLOW_ACTION_TYPE_END; ++aidx) {\n+\t\tswitch (action[aidx].type) {\n+\t\tcase FLOW_ACTION_TYPE_PORT_ID:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Dev:%p: FLOW_ACTION_TYPE_PORT_ID\\n\", dev);\n+\t\t\tif (action[aidx].conf) {\n+\t\t\t\tuint32_t port_id =\n+\t\t\t\t\t((const struct flow_action_port_id *)\n+\t\t\t\t\t action[aidx]\n+\t\t\t\t\t .conf)\n+\t\t\t\t\t->id;\n+\t\t\t\tuint8_t port = get_port_from_port_id(dev->ndev,\n+\t\t\t\t\t\t\t\t     port_id);\n+\n+\t\t\t\tif (fd->dst_num_avail == MAX_OUTPUT_DEST) {\n+\t\t\t\t\t/* ERROR too many output destinations */\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"Too many output destinations\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_OUTPUT_TOO_MANY,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tif (port >= dev->ndev->be.num_phy_ports) {\n+\t\t\t\t\t/* ERROR phy port out of range */\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"Phy port out of range\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_OUTPUT_INVALID,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\t/* New destination port to add */\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].owning_port_id =\n+\t\t\t\t\tport_id;\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].type = PORT_PHY;\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].id = (int)port;\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].active = 1;\n+\t\t\t\tfd->dst_num_avail++;\n+\n+\t\t\t\tif (fd->flm_mtu_fragmentation_recipe == 0) {\n+\t\t\t\t\tfd->flm_mtu_fragmentation_recipe =\n+\t\t\t\t\t\tconvert_port_to_ifr_mtu_recipe(port);\n+\t\t\t\t}\n+\n+\t\t\t\tif (fd->full_offload < 0)\n+\t\t\t\t\tfd->full_offload = 1;\n+\n+\t\t\t\t*num_dest_port += 1;\n+\n+\t\t\t\tNT_LOG(DBG, FILTER, \"Phy port ID: %i\\n\",\n+\t\t\t\t       (int)port);\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ACTION_TYPE_QUEUE:\n+\t\t\tNT_LOG(DBG, FILTER, \"Dev:%p: FLOW_ACTION_TYPE_QUEUE\\n\",\n+\t\t\t       dev);\n+\t\t\tif (action[aidx].conf) {\n+\t\t\t\tconst struct flow_action_queue *queue =\n+\t\t\t\t\t(const struct flow_action_queue *)\n+\t\t\t\t\taction[aidx]\n+\t\t\t\t\t.conf;\n+\n+\t\t\t\tint hw_id = rx_queue_idx_to_hw_id(dev,\n+\t\t\t\t\t\t\t\t  queue->index);\n+\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].owning_port_id =\n+\t\t\t\t\tdev->port;\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].id = hw_id;\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].type = PORT_VIRT;\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].active = 1;\n+\t\t\t\tfd->dst_num_avail++;\n+\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"Dev:%p: FLOW_ACTION_TYPE_QUEUE port %u, queue index: %u, hw id %u\\n\",\n+\t\t\t\t       dev, dev->port, queue->index, hw_id);\n+\n+\t\t\t\tfd->full_offload = 0;\n+\t\t\t\t*num_queues += 1;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ACTION_TYPE_RSS:\n+\t\t\tNT_LOG(DBG, FILTER, \"Dev:%p: FLOW_ACTION_TYPE_RSS\\n\",\n+\t\t\t       dev);\n+\t\t\tif (action[aidx].conf) {\n+\t\t\t\tconst struct flow_action_rss *rss =\n+\t\t\t\t\t(const struct flow_action_rss *)\n+\t\t\t\t\taction[aidx]\n+\t\t\t\t\t.conf;\n+\n+\t\t\t\tfor (uint32_t i = 0; i < rss->queue_num; ++i) {\n+\t\t\t\t\tint hw_id = rx_queue_idx_to_hw_id(dev, rss->queue[i]);\n+\n+\t\t\t\t\tfd->dst_id[fd->dst_num_avail]\n+\t\t\t\t\t.owning_port_id = dev->port;\n+\t\t\t\t\tfd->dst_id[fd->dst_num_avail].id =\n+\t\t\t\t\t\thw_id;\n+\t\t\t\t\tfd->dst_id[fd->dst_num_avail].type =\n+\t\t\t\t\t\tPORT_VIRT;\n+\t\t\t\t\tfd->dst_id[fd->dst_num_avail].active =\n+\t\t\t\t\t\t1;\n+\t\t\t\t\tfd->dst_num_avail++;\n+\t\t\t\t}\n+\n+\t\t\t\tfd->full_offload = 0;\n+\t\t\t\t*num_queues += rss->queue_num;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ACTION_TYPE_MARK:\n+\t\t\tNT_LOG(DBG, FILTER, \"Dev:%p: FLOW_ACTION_TYPE_MARK\\n\",\n+\t\t\t       dev);\n+\t\t\tif (action[aidx].conf) {\n+\t\t\t\tfd->mark = ((const struct flow_action_mark *)\n+\t\t\t\t\t    action[aidx]\n+\t\t\t\t\t    .conf)\n+\t\t\t\t\t   ->id;\n+\t\t\t\tNT_LOG(DBG, FILTER, \"Mark: %i\\n\", fd->mark);\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ACTION_TYPE_JUMP:\n+\t\t\tNT_LOG(DBG, FILTER, \"Dev:%p: FLOW_ACTION_TYPE_JUMP\\n\",\n+\t\t\t       dev);\n+\t\t\tif (action[aidx].conf) {\n+\t\t\t\tconst struct flow_action_jump *jump =\n+\t\t\t\t\t(const struct flow_action_jump *)\n+\t\t\t\t\taction[aidx]\n+\t\t\t\t\t.conf;\n+\t\t\t\tfd->jump_to_group = jump->group;\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"Dev:%p: FLOW_ACTION_TYPE_JUMP: group %u\\n\",\n+\t\t\t\t       dev, jump->group);\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ACTION_TYPE_DROP:\n+\t\t\tNT_LOG(DBG, FILTER, \"Dev:%p: FLOW_ACTION_TYPE_DROP\\n\",\n+\t\t\t       dev);\n+\t\t\tif (action[aidx].conf) {\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].owning_port_id =\n+\t\t\t\t\t0;\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].id = 0;\n+\t\t\t\tfd->dst_id[fd->dst_num_avail].type = PORT_NONE;\n+\t\t\t\tfd->dst_num_avail++;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ACTION_TYPE_METER:\n+\t\t\tNT_LOG(DBG, FILTER, \"Dev:%p: FLOW_ACTION_TYPE_METER\\n\",\n+\t\t\t       dev);\n+\t\t\tif (action[aidx].conf) {\n+\t\t\t\tconst struct flow_action_meter *meter =\n+\t\t\t\t\t(const struct flow_action_meter *)\n+\t\t\t\t\taction[aidx]\n+\t\t\t\t\t.conf;\n+\t\t\t\tif (mtr_count >= MAX_FLM_MTRS_SUPPORTED) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"ERROR: - Number of METER actions exceeds %d.\\n\",\n+\t\t\t\t\t       MAX_FLM_MTRS_SUPPORTED);\n+\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\t\t\t\tfd->mtr_ids[mtr_count++] = meter->mtr_id + 1;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ACTION_TYPE_RAW_ENCAP:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Dev:%p: FLOW_ACTION_TYPE_RAW_ENCAP\\n\", dev);\n+\t\t\tif (action[aidx].conf) {\n+\t\t\t\tconst struct flow_action_raw_encap *encap =\n+\t\t\t\t\t(const struct flow_action_raw_encap *)\n+\t\t\t\t\taction[aidx]\n+\t\t\t\t\t.conf;\n+\t\t\t\tconst struct flow_elem *items = encap->items;\n+\n+\t\t\t\tif (encap_decap_order != 1) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"ERROR: - RAW_ENCAP must follow RAW_DECAP.\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tif (encap->size == 0 || encap->size > 255 ||\n+\t\t\t\t\t\tencap->item_count < 2) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"ERROR: - RAW_ENCAP data/size invalid.\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tencap_decap_order = 2;\n+\n+\t\t\t\tfd->tun_hdr.len = (uint8_t)encap->size;\n+\t\t\t\tmemcpy(fd->tun_hdr.d.hdr8, encap->data,\n+\t\t\t\t       fd->tun_hdr.len);\n+\n+\t\t\t\twhile (items->type != FLOW_ELEM_TYPE_END) {\n+\t\t\t\t\tswitch (items->type) {\n+\t\t\t\t\tcase FLOW_ELEM_TYPE_ETH:\n+\t\t\t\t\t\tfd->tun_hdr.l2_len = 14;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_ELEM_TYPE_VLAN:\n+\t\t\t\t\t\tfd->tun_hdr.nb_vlans += 1;\n+\t\t\t\t\t\tfd->tun_hdr.l2_len += 4;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_ELEM_TYPE_IPV4:\n+\t\t\t\t\t\tfd->tun_hdr.ip_version = 4;\n+\t\t\t\t\t\tfd->tun_hdr.l3_len = sizeof(struct ipv4_hdr_s);\n+\t\t\t\t\t\tfd->tun_hdr.new_outer = 1;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_ELEM_TYPE_IPV6:\n+\t\t\t\t\t\tfd->tun_hdr.ip_version = 6;\n+\t\t\t\t\t\tfd->tun_hdr.l3_len = sizeof(struct ipv6_hdr_s);\n+\t\t\t\t\t\tfd->tun_hdr.new_outer = 1;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_ELEM_TYPE_SCTP:\n+\t\t\t\t\t\tfd->tun_hdr.l4_len = sizeof(struct sctp_hdr_s);\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_ELEM_TYPE_TCP:\n+\t\t\t\t\t\tfd->tun_hdr.l4_len = sizeof(struct tcp_hdr_s);\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_ELEM_TYPE_UDP:\n+\t\t\t\t\t\tfd->tun_hdr.l4_len = sizeof(struct udp_hdr_s);\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_ELEM_TYPE_ICMP:\n+\t\t\t\t\t\tfd->tun_hdr.l4_len = sizeof(struct icmp_hdr_s);\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tdefault:\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\t}\n+\t\t\t\t\titems++;\n+\t\t\t\t}\n+\n+\t\t\t\tif (fd->tun_hdr.nb_vlans > 3) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"ERROR: - Encapsulation with %d vlans not supported.\\n\",\n+\t\t\t\t\t       (int)fd->tun_hdr.nb_vlans);\n+\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\t/* Convert encap data to 128-bit little endian */\n+\t\t\t\tfor (size_t i = 0; i < (encap->size + 15) / 16;\n+\t\t\t\t\t\t++i) {\n+\t\t\t\t\tuint8_t *data =\n+\t\t\t\t\t\tfd->tun_hdr.d.hdr8 + i * 16;\n+\t\t\t\t\tfor (unsigned int j = 0; j < 8; ++j) {\n+\t\t\t\t\t\tuint8_t t = data[j];\n+\n+\t\t\t\t\t\tdata[j] = data[15 - j];\n+\t\t\t\t\t\tdata[15 - j] = t;\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ACTION_TYPE_RAW_DECAP:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Dev:%p: FLOW_ACTION_TYPE_RAW_DECAP\\n\", dev);\n+\t\t\tif (action[aidx].conf) {\n+\t\t\t\tconst struct flow_action_raw_decap *decap =\n+\t\t\t\t\t(const struct flow_action_raw_decap *)\n+\t\t\t\t\taction[aidx]\n+\t\t\t\t\t.conf;\n+\n+\t\t\t\tif (encap_decap_order != 0) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"ERROR: - RAW_ENCAP must follow RAW_DECAP.\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tif (decap->item_count < 2) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"ERROR: - RAW_DECAP must decap something.\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tencap_decap_order = 1;\n+\n+\t\t\t\tfd->header_strip_start_dyn = 2;\n+\t\t\t\tfd->header_strip_start_ofs = 2;\n+\n+\t\t\t\tswitch (decap->items[decap->item_count - 2]\n+\t\t\t\t\t\t.type) {\n+\t\t\t\tcase FLOW_ELEM_TYPE_ETH:\n+\t\t\t\tcase FLOW_ELEM_TYPE_VLAN:\n+\t\t\t\t\tfd->header_strip_end_dyn = 4;\n+\t\t\t\t\tfd->header_strip_end_ofs = 0;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase FLOW_ELEM_TYPE_IPV4:\n+\t\t\t\tcase FLOW_ELEM_TYPE_IPV6:\n+\t\t\t\t\tfd->header_strip_end_dyn = 7;\n+\t\t\t\t\tfd->header_strip_end_ofs = 0;\n+\t\t\t\t\tfd->header_strip_removed_outer_ip = 1;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase FLOW_ELEM_TYPE_SCTP:\n+\t\t\t\tcase FLOW_ELEM_TYPE_TCP:\n+\t\t\t\tcase FLOW_ELEM_TYPE_UDP:\n+\t\t\t\tcase FLOW_ELEM_TYPE_ICMP:\n+\t\t\t\t\tfd->header_strip_end_dyn = 8;\n+\t\t\t\t\tfd->header_strip_end_ofs = 0;\n+\t\t\t\t\tfd->header_strip_removed_outer_ip = 1;\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase FLOW_ELEM_TYPE_GTP:\n+\t\t\t\t\tfd->header_strip_end_dyn = 13;\n+\t\t\t\t\tfd->header_strip_end_ofs = 0;\n+\t\t\t\t\tfd->header_strip_removed_outer_ip = 1;\n+\t\t\t\t\tbreak;\n+\t\t\t\tdefault:\n+\t\t\t\t\tfd->header_strip_end_dyn = 1;\n+\t\t\t\t\tfd->header_strip_end_ofs = 0;\n+\t\t\t\t\tfd->header_strip_removed_outer_ip = 1;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ACTION_TYPE_MODIFY_FIELD:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Dev:%p: FLOW_ACTION_TYPE_MODIFY_FIELD\\n\", dev);\n+\t\t\t{\n+\t\t\t\tconst struct flow_action_modify_field *modify_field =\n+\t\t\t\t\t(const struct flow_action_modify_field *)\n+\t\t\t\t\taction[aidx]\n+\t\t\t\t\t.conf;\n+\t\t\t\tuint64_t modify_field_use_flag = 0;\n+\n+\t\t\t\tif (modify_field->src.field !=\n+\t\t\t\t\t\tFLOW_FIELD_VALUE) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"MODIFY_FIELD only src type VALUE is supported.\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tif (modify_field->dst.level > 2) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"MODIFY_FIELD only dst level 0, 1, and 2 is supported.\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tif (modify_field->dst.field ==\n+\t\t\t\t\t\tFLOW_FIELD_IPV4_TTL ||\n+\t\t\t\t\t\tmodify_field->dst.field ==\n+\t\t\t\t\t\tFLOW_FIELD_IPV6_HOPLIMIT) {\n+\t\t\t\t\tif (modify_field->operation !=\n+\t\t\t\t\t\t\tFLOW_MODIFY_SUB) {\n+\t\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t\t       \"MODIFY_FIELD only operation SUB is supported for TTL/HOPLIMIT.\\n\");\n+\t\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t\t   error);\n+\t\t\t\t\t\tfree(fd);\n+\t\t\t\t\t\treturn NULL;\n+\t\t\t\t\t}\n+\n+\t\t\t\t\tif (fd->ttl_sub_enable) {\n+\t\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t\t       \"MODIFY_FIELD TTL/HOPLIMIT resource already in use.\\n\");\n+\t\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t\t   error);\n+\t\t\t\t\t\tfree(fd);\n+\t\t\t\t\t\treturn NULL;\n+\t\t\t\t\t}\n+\n+\t\t\t\t\tfd->ttl_sub_enable = 1;\n+\t\t\t\t\tfd->ttl_sub_ipv4 =\n+\t\t\t\t\t\t(modify_field->dst.field ==\n+\t\t\t\t\t\t FLOW_FIELD_IPV4_TTL) ?\n+\t\t\t\t\t\t1 :\n+\t\t\t\t\t\t0;\n+\t\t\t\t\tfd->ttl_sub_outer =\n+\t\t\t\t\t\t(modify_field->dst.level <= 1) ?\n+\t\t\t\t\t\t1 :\n+\t\t\t\t\t\t0;\n+\t\t\t\t} else {\n+\t\t\t\t\tif (modify_field->operation !=\n+\t\t\t\t\t\t\tFLOW_MODIFY_SET) {\n+\t\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t\t       \"MODIFY_FIELD only operation SET \"\n+\t\t\t\t\t\t       \"is supported in general.\\n\");\n+\t\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t\t   error);\n+\t\t\t\t\t\tfree(fd);\n+\t\t\t\t\t\treturn NULL;\n+\t\t\t\t\t}\n+\n+\t\t\t\t\tif (fd->modify_field_count >=\n+\t\t\t\t\t\t\tdev->ndev->be.tpe.nb_cpy_writers) {\n+\t\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t\t       \"MODIFY_FIELD exceeded maximum of %u\"\n+\t\t\t\t\t\t       \" MODIFY_FIELD actions.\\n\",\n+\t\t\t\t\t\t       dev->ndev->be.tpe\n+\t\t\t\t\t\t       .nb_cpy_writers);\n+\t\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t\t   error);\n+\t\t\t\t\t\tfree(fd);\n+\t\t\t\t\t\treturn NULL;\n+\t\t\t\t\t}\n+\n+\t\t\t\t\tswitch (modify_field->dst.field) {\n+\t\t\t\t\tcase FLOW_FIELD_IPV4_DSCP:\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.select =\n+\t\t\t\t\t\t\tCPY_SELECT_DSCP_IPV4;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.dyn = DYN_L3;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.ofs = 1;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.len = 1;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_FIELD_IPV6_DSCP:\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.select =\n+\t\t\t\t\t\t\tCPY_SELECT_DSCP_IPV6;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.dyn = DYN_L3;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.ofs = 0;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t/*\n+\t\t\t\t\t\t * len=2 is needed because IPv6 DSCP overlaps 2\n+\t\t\t\t\t\t * bytes.\n+\t\t\t\t\t\t */\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.len = 2;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_FIELD_GTP_PSC_QFI:\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.select =\n+\t\t\t\t\t\t\tCPY_SELECT_RQI_QFI;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.dyn =\n+\t\t\t\t\t\t\tDYN_L4_PAYLOAD;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.ofs = 14;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.len = 1;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_FIELD_IPV4_SRC:\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.select =\n+\t\t\t\t\t\t\tCPY_SELECT_IPV4;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.dyn = DYN_L3;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.ofs = 12;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.len = 4;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_FIELD_IPV4_DST:\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.select =\n+\t\t\t\t\t\t\tCPY_SELECT_IPV4;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.dyn = DYN_L3;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.ofs = 16;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.len = 4;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_FIELD_TCP_PORT_SRC:\n+\t\t\t\t\t/* fallthrough */\n+\t\t\t\t\tcase FLOW_FIELD_UDP_PORT_SRC:\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.select =\n+\t\t\t\t\t\t\tCPY_SELECT_PORT;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.dyn = DYN_L4;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.ofs = 0;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.len = 2;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_FIELD_TCP_PORT_DST:\n+\t\t\t\t\t/* fallthrough */\n+\t\t\t\t\tcase FLOW_FIELD_UDP_PORT_DST:\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.select =\n+\t\t\t\t\t\t\tCPY_SELECT_PORT;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.dyn = DYN_L4;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.ofs = 2;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.len = 2;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tcase FLOW_FIELD_GTP_TEID:\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.select =\n+\t\t\t\t\t\t\tCPY_SELECT_TEID;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.dyn =\n+\t\t\t\t\t\t\tDYN_L4_PAYLOAD;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.ofs = 4;\n+\t\t\t\t\t\tfd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.len = 4;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tdefault:\n+\t\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t\t       \"MODIFY_FIELD dst type is not supported.\\n\");\n+\t\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t\t   error);\n+\t\t\t\t\t\tfree(fd);\n+\t\t\t\t\t\treturn NULL;\n+\t\t\t\t\t}\n+\n+\t\t\t\t\tmodify_field_use_flag =\n+\t\t\t\t\t\t1\n+\t\t\t\t\t\t<< fd->modify_field\n+\t\t\t\t\t\t[fd->modify_field_count]\n+\t\t\t\t\t\t.select;\n+\t\t\t\t\tif (modify_field_use_flag &\n+\t\t\t\t\t\t\tmodify_field_use_flags) {\n+\t\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t\t       \"MODIFY_FIELD dst type hardware \"\n+\t\t\t\t\t\t       \"resource already used.\\n\");\n+\t\t\t\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED,\n+\t\t\t\t\t\t\t\t   error);\n+\t\t\t\t\t\tfree(fd);\n+\t\t\t\t\t\treturn NULL;\n+\t\t\t\t\t}\n+\n+\t\t\t\t\tmemcpy(fd->modify_field\n+\t\t\t\t\t       [fd->modify_field_count]\n+\t\t\t\t\t       .value8,\n+\t\t\t\t\t       modify_field->src.value, 16);\n+\n+\t\t\t\t\tfd->modify_field[fd->modify_field_count]\n+\t\t\t\t\t.level =\n+\t\t\t\t\t\tmodify_field->dst.level;\n+\n+\t\t\t\t\tmodify_field_use_flags |=\n+\t\t\t\t\t\tmodify_field_use_flag;\n+\t\t\t\t\tfd->modify_field_count += 1;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"Invalid or unsupported flow action received - %i\\n\",\n+\t\t\t       action[aidx].type);\n+\t\t\tflow_nic_set_error(ERR_ACTION_UNSUPPORTED, error);\n+\t\t\tfree(fd);\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\n+\tif (!(encap_decap_order == 0 || encap_decap_order == 2)) {\n+\t\tNT_LOG(ERR, FILTER, \"Invalid encap/decap actions\\n\");\n+\t\tfree(fd);\n+\t\treturn NULL;\n+\t}\n+\n+\tif (implicit_vlan_vid > 0) {\n+\t\tuint32_t *sw_data = &packet_data[1 - sw_counter];\n+\t\tuint32_t *sw_mask = &packet_mask[1 - sw_counter];\n+\n+\t\tsw_mask[0] = 0x0fff;\n+\t\tsw_data[0] = implicit_vlan_vid & sw_mask[0];\n+\n+\t\tkm_add_match_elem(&fd->km, &sw_data[0], &sw_mask[0], 1,\n+\t\t\t\t  DYN_FIRST_VLAN, 0);\n+\t\tset_key_def_sw(key_def, sw_counter, DYN_FIRST_VLAN, 0);\n+\t\tsw_counter += 1;\n+\n+\t\tfd->vlans += 1;\n+\t}\n+\n+\t/*\n+\t * All Actions interpreted\n+\t */\n+\tfor (int eidx = 0; elem[eidx].type != FLOW_ELEM_TYPE_END; ++eidx) {\n+\t\tswitch (elem[eidx].type) {\n+\t\tcase FLOW_ELEM_TYPE_ANY:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_ANY\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\t{\n+\t\t\t\tconst struct flow_elem_any *any_spec =\n+\t\t\t\t\t(const struct flow_elem_any *)elem[eidx]\n+\t\t\t\t\t.spec;\n+\t\t\t\tconst struct flow_elem_any *any_mask =\n+\t\t\t\t\t(const struct flow_elem_any *)elem[eidx]\n+\t\t\t\t\t.mask;\n+\n+\t\t\t\tif (any_spec && any_mask) {\n+\t\t\t\t\tany_count += any_spec->num &\n+\t\t\t\t\t\t     any_mask->num;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_ETH:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_ETH\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\t{\n+\t\t\t\tconst struct flow_elem_eth *eth_spec =\n+\t\t\t\t\t(const struct flow_elem_eth *)elem[eidx]\n+\t\t\t\t\t.spec;\n+\t\t\t\tconst struct flow_elem_eth *eth_mask =\n+\t\t\t\t\t(const struct flow_elem_eth *)elem[eidx]\n+\t\t\t\t\t.mask;\n+\n+\t\t\t\tif (any_count > 0) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"Tunneled L2 ethernet not supported\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tif (qw_counter >= 2) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"Key size too big. Out of QW resources.\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\t\t\tfree(fd);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\n+\t\t\t\tif (eth_spec != NULL && eth_mask != NULL) {\n+\t\t\t\t\tif (is_non_zero(eth_mask->d_addr.addr_b,\n+\t\t\t\t\t\t\t6) ||\n+\t\t\t\t\t\t\tis_non_zero(eth_mask->s_addr.addr_b,\n+\t\t\t\t\t\t\t\t    6)) {\n+\t\t\t\t\t\tuint32_t *qw_data =\n+\t\t\t\t\t\t\t&packet_data[2 + 4 -\n+\t\t\t\t\t\t\t\t       qw_counter *\n+\t\t\t\t\t\t\t\t       4];\n+\t\t\t\t\t\tuint32_t *qw_mask =\n+\t\t\t\t\t\t\t&packet_mask[2 + 4 -\n+\t\t\t\t\t\t\t\t       qw_counter *\n+\t\t\t\t\t\t\t\t       4];\n+\n+\t\t\t\t\t\tqw_data[0] =\n+\t\t\t\t\t\t\t((eth_spec->d_addr\n+\t\t\t\t\t\t\t  .addr_b[0] &\n+\t\t\t\t\t\t\t  eth_mask->d_addr\n+\t\t\t\t\t\t\t  .addr_b[0])\n+\t\t\t\t\t\t\t << 24) +\n+\t\t\t\t\t\t\t((eth_spec->d_addr\n+\t\t\t\t\t\t\t  .addr_b[1] &\n+\t\t\t\t\t\t\t  eth_mask->d_addr\n+\t\t\t\t\t\t\t  .addr_b[1])\n+\t\t\t\t\t\t\t << 16) +\n+\t\t\t\t\t\t\t((eth_spec->d_addr\n+\t\t\t\t\t\t\t  .addr_b[2] &\n+\t\t\t\t\t\t\t  eth_mask->d_addr\n+\t\t\t\t\t\t\t  .addr_b[2])\n+\t\t\t\t\t\t\t << 8) +\n+\t\t\t\t\t\t\t(eth_spec->d_addr\n+\t\t\t\t\t\t\t .addr_b[3] &\n+\t\t\t\t\t\t\t eth_mask->d_addr\n+\t\t\t\t\t\t\t .addr_b[3]);\n+\n+\t\t\t\t\t\tqw_data[1] =\n+\t\t\t\t\t\t\t((eth_spec->d_addr\n+\t\t\t\t\t\t\t  .addr_b[4] &\n+\t\t\t\t\t\t\t  eth_mask->d_addr\n+\t\t\t\t\t\t\t  .addr_b[4])\n+\t\t\t\t\t\t\t << 24) +\n+\t\t\t\t\t\t\t((eth_spec->d_addr\n+\t\t\t\t\t\t\t  .addr_b[5] &\n+\t\t\t\t\t\t\t  eth_mask->d_addr\n+\t\t\t\t\t\t\t  .addr_b[5])\n+\t\t\t\t\t\t\t << 16) +\n+\t\t\t\t\t\t\t((eth_spec->s_addr\n+\t\t\t\t\t\t\t  .addr_b[0] &\n+\t\t\t\t\t\t\t  eth_mask->s_addr\n+\t\t\t\t\t\t\t  .addr_b[0])\n+\t\t\t\t\t\t\t << 8) +\n+\t\t\t\t\t\t\t(eth_spec->s_addr\n+\t\t\t\t\t\t\t .addr_b[1] &\n+\t\t\t\t\t\t\t eth_mask->s_addr\n+\t\t\t\t\t\t\t .addr_b[1]);\n+\n+\t\t\t\t\t\tqw_data[2] =\n+\t\t\t\t\t\t\t((eth_spec->s_addr\n+\t\t\t\t\t\t\t  .addr_b[2] &\n+\t\t\t\t\t\t\t  eth_mask->s_addr\n+\t\t\t\t\t\t\t  .addr_b[2])\n+\t\t\t\t\t\t\t << 24) +\n+\t\t\t\t\t\t\t((eth_spec->s_addr\n+\t\t\t\t\t\t\t  .addr_b[3] &\n+\t\t\t\t\t\t\t  eth_mask->s_addr\n+\t\t\t\t\t\t\t  .addr_b[3])\n+\t\t\t\t\t\t\t << 16) +\n+\t\t\t\t\t\t\t((eth_spec->s_addr\n+\t\t\t\t\t\t\t  .addr_b[4] &\n+\t\t\t\t\t\t\t  eth_mask->s_addr\n+\t\t\t\t\t\t\t  .addr_b[4])\n+\t\t\t\t\t\t\t << 8) +\n+\t\t\t\t\t\t\t(eth_spec->s_addr\n+\t\t\t\t\t\t\t .addr_b[5] &\n+\t\t\t\t\t\t\t eth_mask->s_addr\n+\t\t\t\t\t\t\t .addr_b[5]);\n+\n+\t\t\t\t\t\tqw_mask[0] = (eth_mask->d_addr\n+\t\t\t\t\t\t\t      .addr_b[0]\n+\t\t\t\t\t\t\t      << 24) +\n+\t\t\t\t\t\t\t     (eth_mask->d_addr\n+\t\t\t\t\t\t\t      .addr_b[1]\n+\t\t\t\t\t\t\t      << 16) +\n+\t\t\t\t\t\t\t     (eth_mask->d_addr\n+\t\t\t\t\t\t\t      .addr_b[2]\n+\t\t\t\t\t\t\t      << 8) +\n+\t\t\t\t\t\t\t     eth_mask->d_addr\n+\t\t\t\t\t\t\t     .addr_b[3];\n+\n+\t\t\t\t\t\tqw_mask[1] = (eth_mask->d_addr\n+\t\t\t\t\t\t\t      .addr_b[4]\n+\t\t\t\t\t\t\t      << 24) +\n+\t\t\t\t\t\t\t     (eth_mask->d_addr\n+\t\t\t\t\t\t\t      .addr_b[5]\n+\t\t\t\t\t\t\t      << 16) +\n+\t\t\t\t\t\t\t     (eth_mask->s_addr\n+\t\t\t\t\t\t\t      .addr_b[0]\n+\t\t\t\t\t\t\t      << 8) +\n+\t\t\t\t\t\t\t     eth_mask->s_addr\n+\t\t\t\t\t\t\t     .addr_b[1];\n+\n+\t\t\t\t\t\tqw_mask[2] = (eth_mask->s_addr\n+\t\t\t\t\t\t\t      .addr_b[2]\n+\t\t\t\t\t\t\t      << 24) +\n+\t\t\t\t\t\t\t     (eth_mask->s_addr\n+\t\t\t\t\t\t\t      .addr_b[3]\n+\t\t\t\t\t\t\t      << 16) +\n+\t\t\t\t\t\t\t     (eth_mask->s_addr\n+\t\t\t\t\t\t\t      .addr_b[4]\n+\t\t\t\t\t\t\t      << 8) +\n+\t\t\t\t\t\t\t     eth_mask->s_addr\n+\t\t\t\t\t\t\t     .addr_b[5];\n+\n+\t\t\t\t\t\tkm_add_match_elem(&fd->km,\n+\t\t\t\t\t\t\t\t  &qw_data[(size_t)(qw_counter *\n+\t\t\t\t\t\t\t\t  4)],\n+\t\t\t\t\t\t\t\t  &qw_mask[(size_t)(qw_counter *\n+\t\t\t\t\t\t\t\t  4)],\n+\t\t\t\t\t\t\t\t  3, DYN_L2, 0);\n+\t\t\t\t\t\tset_key_def_qw(key_def,\n+\t\t\t\t\t\t\t       qw_counter,\n+\t\t\t\t\t\t\t       DYN_L2, 0);\n+\t\t\t\t\t\tqw_counter += 1;\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\n+\t\t\t\tfd->l2_prot = PROT_L2_ETH2;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_VLAN:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_VLAN\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\t{\n+\t\t\t\tif (flow_elem_type_vlan(elem, eidx, implicit_vlan_vid, error, fd,\n+\t\t\t\t\tsw_counter, packet_data, packet_mask, key_def))\n+\t\t\t\t\treturn NULL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_IPV4:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_IPV4\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\t{\n+\t\t\t\tif (flow_elem_type_ipv4(elem, eidx, error, fd, qw_counter,\n+\t\t\t\t\tsw_counter, packet_data, packet_mask, key_def, any_count))\n+\t\t\t\t\treturn NULL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_IPV6:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_IPV6\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\t{\n+\t\t\t\tif (flow_elem_type_ipv6(elem, eidx, error, fd, qw_counter,\n+\t\t\t\t\tpacket_data, packet_mask, key_def, any_count))\n+\t\t\t\t\treturn NULL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_UDP:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_UDP\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\t{\n+\t\t\t\tif (flow_elem_type_upd(elem, eidx, error, fd, sw_counter,\n+\t\t\t\t\tpacket_data, packet_mask, key_def, any_count))\n+\t\t\t\t\treturn NULL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_SCTP:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i,Port %i:FLOW_ELEM_TYPE_SCTP\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\t{\n+\t\t\t\tif (flow_elem_type_sctp(elem, eidx, error, fd, sw_counter,\n+\t\t\t\t\tpacket_data, packet_mask, key_def, any_count))\n+\t\t\t\t\treturn NULL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_TCP:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_TCP\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\t{\n+\t\t\t\tif (flow_elem_type_tcp(elem, eidx, error, fd, sw_counter,\n+\t\t\t\t\tpacket_data, packet_mask, key_def, any_count))\n+\t\t\t\t\treturn NULL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_GTP:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_GTP\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\t{\n+\t\t\t\tif (flow_elem_type_gtp(elem, eidx, error, fd, sw_counter,\n+\t\t\t\t\tpacket_data, packet_mask, key_def))\n+\t\t\t\t\treturn NULL;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_PORT_ID:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_PORT_ID\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\tif (elem[eidx].spec) {\n+\t\t\t\t*in_port_id =\n+\t\t\t\t\t((const struct flow_elem_port_id *)\n+\t\t\t\t\t elem[eidx]\n+\t\t\t\t\t .spec)\n+\t\t\t\t\t->id;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\tcase FLOW_ELEM_TYPE_VOID:\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"Adap %i, Port %i: FLOW_ELEM_TYPE_VOID\\n\",\n+\t\t\t       dev->ndev->adapter_no, dev->port);\n+\t\t\tbreak;\n+\n+\t\tdefault:\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"Invalid or unsupported flow request: %d\\n\",\n+\t\t\t       (int)elem[eidx].type);\n+\t\t\tflow_nic_set_error(ERR_MATCH_INVALID_OR_UNSUPPORTED_ELEM,\n+\t\t\t\t\t   error);\n+\t\t\tfree(fd);\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\n+\treturn fd;\n+}\n+\n+static int reset_cat_function_setup(struct flow_eth_dev *dev, int cfn)\n+{\n+\t/* CFN */\n+\t{\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PRESET_ALL, cfn,\n+\t\t\t\t   0, 0);\n+\t\thw_mod_cat_cfn_flush(&dev->ndev->be, cfn, 1);\n+\t}\n+\n+\t/* KM */\n+\t{\n+\t\tuint32_t bm = 0;\n+\n+\t\thw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t      KM_FLM_IF_FIRST, cfn / 8, &bm);\n+\t\thw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t      KM_FLM_IF_FIRST, cfn / 8,\n+\t\t\t\t      bm & ~(1 << (cfn % 8)));\n+\t\thw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,\n+\t\t\t\t      KM_FLM_IF_FIRST, cfn, 0);\n+\n+\t\thw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,\n+\t\t\t\t\tcfn / 8, 1);\n+\t\thw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,\n+\t\t\t\t\t1);\n+\n+\t\tfor (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;\n+\t\t\t\tft++) {\n+\t\t\tset_flow_type_km(dev->ndev, cfn, ft, 0, 0);\n+\t\t\tset_flow_type_km(dev->ndev, cfn, ft, 1, 0);\n+\t\t\tset_flow_type_km(dev->ndev, cfn, ft, 2, 0);\n+\t\t\tset_flow_type_km(dev->ndev, cfn, ft, 3, 0);\n+\t\t}\n+\t}\n+\n+\t/* FLM */\n+\t{\n+\t\tuint32_t bm = 0;\n+\n+\t\thw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t       KM_FLM_IF_FIRST, cfn / 8, &bm);\n+\t\thw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t       KM_FLM_IF_FIRST, cfn / 8,\n+\t\t\t\t       bm & ~(1 << (cfn % 8)));\n+\t\thw_mod_cat_kcs_flm_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,\n+\t\t\t\t       KM_FLM_IF_FIRST, cfn, 0);\n+\n+\t\thw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,\n+\t\t\t\t\t cfn / 8, 1);\n+\t\thw_mod_cat_kcs_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST, cfn,\n+\t\t\t\t\t 1);\n+\n+\t\tfor (unsigned int ft = 0; ft < dev->ndev->be.cat.nb_flow_types;\n+\t\t\t\tft++) {\n+\t\t\tset_flow_type_flm(dev->ndev, cfn, ft, 0, 0);\n+\t\t\tset_flow_type_flm(dev->ndev, cfn, ft, 1, 0);\n+\t\t\tset_flow_type_flm(dev->ndev, cfn, ft, 2, 0);\n+\t\t\tset_flow_type_flm(dev->ndev, cfn, ft, 3, 0);\n+\t\t}\n+\t}\n+\n+\t/* CTE / CTS */\n+\t{\n+\t\tuint32_t cte = 0;\n+\n+\t\thw_mod_cat_cte_get(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM, cfn,\n+\t\t\t\t   &cte);\n+\n+\t\tif (cte) {\n+\t\t\tconst int cts_offset =\n+\t\t\t\t((int)dev->ndev->be.cat.cts_num + 1) / 2;\n+\n+\t\t\thw_mod_cat_cte_set(&dev->ndev->be, HW_CAT_CTE_ENABLE_BM,\n+\t\t\t\t\t   cfn, 0);\n+\t\t\thw_mod_cat_cte_flush(&dev->ndev->be, cfn, 1);\n+\n+\t\t\tfor (int cte_type = 0; cte_type < cts_offset;\n+\t\t\t\t\t++cte_type) {\n+\t\t\t\thw_mod_cat_cts_set(&dev->ndev->be,\n+\t\t\t\t\t\t   HW_CAT_CTS_CAT_A,\n+\t\t\t\t\t\t   cts_offset * cfn + cte_type,\n+\t\t\t\t\t\t   0);\n+\t\t\t\thw_mod_cat_cts_set(&dev->ndev->be,\n+\t\t\t\t\t\t   HW_CAT_CTS_CAT_B,\n+\t\t\t\t\t\t   cts_offset * cfn + cte_type,\n+\t\t\t\t\t\t   0);\n+\t\t\t}\n+\n+\t\t\thw_mod_cat_cts_flush(&dev->ndev->be, cts_offset * cfn,\n+\t\t\t\t\t     cts_offset);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int convert_fd_to_flm(struct flow_handle *fh, struct nic_flow_def *fd,\n+\t\t\t     const uint32_t *packet_data, uint32_t flm_key_id,\n+\t\t\t     uint16_t rpl_ext_ptr, uint32_t priority)\n+{\n+\tif (fh->type != FLOW_HANDLE_TYPE_FLM)\n+\t\treturn -1;\n+\n+\tswitch (fd->l4_prot) {\n+\tcase PROT_L4_TCP:\n+\t\tfh->flm_prot = 6;\n+\t\tbreak;\n+\tcase PROT_L4_UDP:\n+\t\tfh->flm_prot = 17;\n+\t\tbreak;\n+\tcase PROT_L4_SCTP:\n+\t\tfh->flm_prot = 132;\n+\t\tbreak;\n+\tcase PROT_L4_ICMP:\n+\t\tfh->flm_prot = 1;\n+\t\tbreak;\n+\tdefault:\n+\t\tswitch (fd->tunnel_l4_prot) {\n+\t\tcase PROT_TUN_L4_TCP:\n+\t\t\tfh->flm_prot = 6;\n+\t\t\tbreak;\n+\t\tcase PROT_TUN_L4_UDP:\n+\t\t\tfh->flm_prot = 17;\n+\t\t\tbreak;\n+\t\tcase PROT_TUN_L4_SCTP:\n+\t\t\tfh->flm_prot = 132;\n+\t\t\tbreak;\n+\t\tcase PROT_TUN_L4_ICMP:\n+\t\t\tfh->flm_prot = 1;\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tfh->flm_prot = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t\tbreak;\n+\t}\n+\n+\tmemcpy(fh->flm_data, packet_data, sizeof(uint32_t) * 10);\n+\n+\tfh->flm_kid = flm_key_id;\n+\tfh->flm_rpl_ext_ptr = rpl_ext_ptr;\n+\tfh->flm_prio = (uint8_t)priority;\n+\n+\tfor (unsigned int i = 0; i < fd->modify_field_count; ++i) {\n+\t\tswitch (fd->modify_field[i].select) {\n+\t\tcase CPY_SELECT_DSCP_IPV4:\n+\t\t/* fallthrough */\n+\t\tcase CPY_SELECT_DSCP_IPV6:\n+\t\t\tfh->flm_dscp = fd->modify_field[i].value8[0];\n+\t\t\tbreak;\n+\t\tcase CPY_SELECT_RQI_QFI:\n+\t\t\tfh->flm_rqi = (fd->modify_field[i].value8[0] >> 6) &\n+\t\t\t\t      0x1;\n+\t\t\tfh->flm_qfi = fd->modify_field[i].value8[0] & 0x3f;\n+\t\t\tbreak;\n+\t\tcase CPY_SELECT_IPV4:\n+\t\t\tfh->flm_nat_ipv4 =\n+\t\t\t\tntohl(fd->modify_field[i].value32[0]);\n+\t\t\tbreak;\n+\t\tcase CPY_SELECT_PORT:\n+\t\t\tfh->flm_nat_port =\n+\t\t\t\tntohs(fd->modify_field[i].value16[0]);\n+\t\t\tbreak;\n+\t\tcase CPY_SELECT_TEID:\n+\t\t\tfh->flm_teid = ntohl(fd->modify_field[i].value32[0]);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tfh->flm_mtu_fragmentation_recipe = fd->flm_mtu_fragmentation_recipe;\n+\n+\treturn 0;\n+}\n+\n+static int flm_flow_programming(struct flow_eth_dev *dev,\n+\t\t\t\tstruct flow_handle *fh, uint32_t *mtr_ids,\n+\t\t\t\tuint32_t flm_ft, uint32_t flm_op)\n+{\n+\tstruct flm_v17_lrn_data_s learn_record;\n+\n+\tif (fh->type != FLOW_HANDLE_TYPE_FLM)\n+\t\treturn -1;\n+\n+\tmemset(&learn_record, 0x0, sizeof(struct flm_v17_lrn_data_s));\n+\n+\tlearn_record.qw0[0] = fh->flm_data[9];\n+\tlearn_record.qw0[1] = fh->flm_data[8];\n+\tlearn_record.qw0[2] = fh->flm_data[7];\n+\tlearn_record.qw0[3] = fh->flm_data[6];\n+\tlearn_record.qw4[0] = fh->flm_data[5];\n+\tlearn_record.qw4[1] = fh->flm_data[4];\n+\tlearn_record.qw4[2] = fh->flm_data[3];\n+\tlearn_record.qw4[3] = fh->flm_data[2];\n+\tlearn_record.sw8 = fh->flm_data[1];\n+\tlearn_record.sw9 = fh->flm_data[0];\n+\tlearn_record.prot = fh->flm_prot;\n+\n+\tif (mtr_ids) {\n+\t\tFLM_V17_MBR_ID1(learn_record.mbr_idx) = mtr_ids[0];\n+\t\tFLM_V17_MBR_ID2(learn_record.mbr_idx) = mtr_ids[1];\n+\t\tFLM_V17_MBR_ID3(learn_record.mbr_idx) = mtr_ids[2];\n+\t\tFLM_V17_MBR_ID4(learn_record.mbr_idx) = mtr_ids[3];\n+\n+\t\t/* Last non-zero mtr is used for statistics */\n+\t\tuint8_t mbrs = 0;\n+\n+\t\twhile (mbrs < MAX_FLM_MTRS_SUPPORTED && mtr_ids[mbrs] != 0)\n+\t\t\t++mbrs;\n+\t\tlearn_record.vol_idx = mbrs;\n+\t}\n+\n+\tlearn_record.nat_ip = fh->flm_nat_ipv4;\n+\tlearn_record.nat_port = fh->flm_nat_port;\n+\tlearn_record.nat_en = fh->flm_nat_ipv4 || fh->flm_nat_port ? 1 : 0;\n+\n+\tlearn_record.dscp = fh->flm_dscp;\n+\tlearn_record.teid = fh->flm_teid;\n+\tlearn_record.qfi = fh->flm_qfi;\n+\tlearn_record.rqi = fh->flm_rqi;\n+\tlearn_record.color = fh->flm_rpl_ext_ptr &\n+\t\t\t     0x3ff; /* Lower 10 bits used for RPL EXT PTR */\n+\tlearn_record.color |= (fh->flm_mtu_fragmentation_recipe & 0xf)\n+\t\t\t      << 10; /* Bit [13:10] used for MTU recipe */\n+\n+\tlearn_record.ent = 0;\n+\tlearn_record.op = flm_op & 0xf;\n+\tlearn_record.prio = fh->flm_prio & 0x3;\n+\tlearn_record.ft = flm_ft;\n+\tlearn_record.kid = fh->flm_kid;\n+\tlearn_record.eor = 1;\n+\n+\tint res = flow_flm_apply(dev, &learn_record);\n+\treturn res;\n+}\n+\n+static int km_ft_handler(int *setup_km_ft, int *setup_km_rcp, int *setup_km,\n+\tstruct flow_handle *found_flow, int identical_flow_found, struct flow_eth_dev *dev,\n+\tstruct nic_flow_def *fd, struct flow_error *error, struct flow_handle *fh,\n+\tstruct flow_handle *flow)\n+{\n+\tif (!identical_flow_found) {\n+\t\t\t\t/* Find existing KM FT that can be reused */\n+\t\t{\n+\t\t\tint found_ft = 0, found_zero = 0;\n+\n+\t\t\tstruct flm_flow_ft_ident_s *ft_idents =\n+\t\t\t\t(struct flm_flow_ft_ident_s *)dev->ndev->ft_res_handle;\n+\t\t\tstruct flm_flow_ft_ident_s ft_ident = flow_def_to_ft_ident(fd);\n+\n+\t\t\tfor (int i = 1; i < FLM_FLOW_FT_MAX; ++i) {\n+\t\t\t\tif (ft_ident.data == ft_idents[i].data) {\n+\t\t\t\t\tfound_ft = i;\n+\t\t\t\t\tbreak;\n+\t\t\t\t} else if (found_zero == 0 && ft_idents[i].data == 0) {\n+\t\t\t\t\tfound_zero = i;\n+\t\t\t\t}\n+\t\t\t}\n+\n+\t\t\tif (found_ft) {\n+\t\t\t\tif (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE, found_ft)) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER, \"ERROR: Could not reference \"\n+\t\t\t\t\t       \"KM FLOW TYPE resource\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);\n+\t\t\t\t\treturn 1;\n+\t\t\t\t}\n+\n+\t\t\t\tfh->resource[RES_KM_FLOW_TYPE].count = 1;\n+\t\t\t\tfh->resource[RES_KM_FLOW_TYPE].index = found_ft;\n+\t\t\t\tfh->resource[RES_KM_FLOW_TYPE].referenced = 1;\n+\t\t\t} else if (found_zero) {\n+\t\t\t\tif (flow_nic_allocate_fh_resource_index(dev->ndev, RES_KM_FLOW_TYPE,\n+\t\t\t\tfound_zero, fh)) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER, \"ERROR: Could not get \"\n+\t\t\t\t\t       \"KM FLOW TYPE resource\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);\n+\t\t\t\t\treturn 1;\n+\t\t\t\t}\n+\n+\t\t\t\tft_idents[found_zero].data = ft_ident.data;\n+\t\t\t} else {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"ERROR: Could not get KM FLOW TYPE resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\t\t}\n+\t\t/* Attach resources to KM entry */\n+\t\tkm_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);\n+\t\tfd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;\n+\n+\t\t/* _update existing KM RCP or allocate a new RCP */\n+\t\tif (found_flow != NULL) {\n+\t\t\tif (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY, found_flow\n+\t\t\t\t->resource[RES_KM_CATEGORY].index)) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"ERROR: Could not reference \"\n+\t\t\t\t       \"KM CATEGORY resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\tfh->resource[RES_KM_CATEGORY].count = 1;\n+\t\t\tfh->resource[RES_KM_CATEGORY].index =\n+\t\t\t\tfound_flow->resource[RES_KM_CATEGORY].index;\n+\t\t\tfh->resource[RES_KM_CATEGORY].referenced = 1;\n+\n+\t\t\tif (fd->km.target == KM_CAM) {\n+\t\t\t\tuint32_t ft_a_mask = 0;\n+\n+\t\t\t\thw_mod_km_rcp_get(&dev->ndev->be, HW_KM_RCP_FTM_A,\n+\t\t\t\t\tfh->resource[RES_KM_CATEGORY].index, 0, &ft_a_mask);\n+\t\t\t\thw_mod_km_rcp_set(&dev->ndev->be, HW_KM_RCP_FTM_A,\n+\t\t\t\t\tfh->resource[RES_KM_CATEGORY].index, 0,\n+\t\t\t\t\tft_a_mask | (1 << fd->km.flow_type));\n+\t\t\t}\n+\t\t} else {\n+\t\t\tif (flow_nic_allocate_fh_resource(dev->ndev, RES_KM_CATEGORY, fh, 1, 1)) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"ERROR: Could not get KM CATEGORY resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\n+\t\t\t/* Note: km_rcp_set clears existing RCPs */\n+\t\t\tkm_rcp_set(&fd->km, fh->resource[RES_KM_CATEGORY].index);\n+\t\t}\n+\n+\t\t/* Set filter setup variables */\n+\t\t*setup_km = 1;\n+\t\t*setup_km_ft = fh->resource[RES_KM_FLOW_TYPE].index;\n+\t\t*setup_km_rcp = fh->resource[RES_KM_CATEGORY].index;\n+\n+\t\t/* _flush KM RCP and entry */\n+\t\thw_mod_km_rcp_flush(&dev->ndev->be, fh->resource[RES_KM_CATEGORY].index, 1);\n+\n+\t\tkm_write_data_match_entry(&fd->km, 0);\n+\t} else {\n+\t\tif (flow_nic_ref_resource(dev->ndev, RES_KM_FLOW_TYPE,\n+\t\t\tfound_flow->resource[RES_KM_FLOW_TYPE].index)) {\n+\t\t\tNT_LOG(ERR, FILTER, \"ERROR: Could not reference KM FLOW TYPE resource\\n\");\n+\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);\n+\t\t\treturn 1;\n+\t\t}\n+\n+\t\tfh->resource[RES_KM_FLOW_TYPE].count = 1;\n+\t\tfh->resource[RES_KM_FLOW_TYPE].index = found_flow->resource[RES_KM_FLOW_TYPE].index;\n+\t\tfh->resource[RES_KM_FLOW_TYPE].referenced = 1;\n+\n+\t\tif (flow_nic_ref_resource(dev->ndev, RES_KM_CATEGORY,\n+\t\t\tfound_flow->resource[RES_KM_CATEGORY].index)) {\n+\t\t\tNT_LOG(ERR, FILTER, \"ERROR: Could not reference KM CATEGORY resource\\n\");\n+\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);\n+\t\t\treturn 1;\n+\t\t}\n+\n+\t\tfh->resource[RES_KM_CATEGORY].count = 1;\n+\t\tfh->resource[RES_KM_CATEGORY].index = found_flow->resource[RES_KM_CATEGORY].index;\n+\t\tfh->resource[RES_KM_CATEGORY].referenced = 1;\n+\n+\t\tkm_attach_ndev_resource_management(&fd->km, &dev->ndev->km_res_handle);\n+\t\tfd->km.flow_type = fh->resource[RES_KM_FLOW_TYPE].index;\n+\n+\t\tkm_refer_data_match_entry(&fd->km, &found_flow->fd->km);\n+\n+\t\t*setup_km = 1;\n+\t\t*setup_km_ft = flow->resource[RES_KM_FLOW_TYPE].index;\n+\t\t*setup_km_rcp = flow->resource[RES_KM_CATEGORY].index;\n+\t}\n+\treturn 0;\n+}\n+\n+/*\n+ * Tunneling invalidates dynamic offsets, so change them to static\n+ * offsets starting at beginning of L2.\n+ */\n+static void align_tun_offset(struct nic_flow_def *fd, const uint32_t eth_length, int i,\n+\tuint32_t *ofs, uint32_t select, const uint32_t l2_length, const uint32_t l3_length,\n+\tconst uint32_t l4_length, uint32_t *dyn)\n+{\n+\tif (fd->tun_hdr.len > eth_length) {\n+\t\tif (!fd->tun_hdr.new_outer || fd->modify_field[i].level > 1) {\n+\t\t\tofs += fd->tun_hdr.len - eth_length;\n+\t\t} else {\n+\t\t\tswitch (select) {\n+\t\t\tcase CPY_SELECT_IPV4:\n+\t\t\tcase CPY_SELECT_DSCP_IPV4:\n+\t\t\tcase CPY_SELECT_DSCP_IPV6:\n+\t\t\t\t*ofs += l2_length;\n+\t\t\t\tbreak;\n+\t\t\tcase CPY_SELECT_PORT:\n+\t\t\t\t*ofs += l2_length + l3_length;\n+\t\t\t\tbreak;\n+\t\t\tcase CPY_SELECT_TEID:\n+\t\t\tcase CPY_SELECT_RQI_QFI:\n+\t\t\t\t*ofs += l2_length + l3_length + l4_length;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\t*dyn = 1;\n+\t\t}\n+\t}\n+}\n+\n+static struct flow_handle *\n+create_flow_filter(struct flow_eth_dev *dev, struct nic_flow_def *fd,\n+\t\t   const struct flow_attr *attr, struct flow_error *error,\n+\t\t   uint32_t port_id, uint32_t num_dest_port,\n+\t\t   uint32_t num_queues, uint32_t *packet_data,\n+\t\t   uint32_t *packet_mask, struct flm_flow_key_def_s *key_def)\n+{\n+\tuint32_t qsl_size = num_dest_port > num_queues ? num_dest_port :\n+\t\t\t    num_queues;\n+\tuint32_t flm_key_id = 0;\n+\tuint32_t flm_ft = 0;\n+\tuint16_t flm_rpl_ext_ptr = 0;\n+\n+\tstruct flow_handle *fh_flm = NULL;\n+\tstruct flow_handle *fh = calloc(1, sizeof(struct flow_handle));\n+\n+\tif (!fh) {\n+\t\tNT_LOG(ERR, FILTER, \"ERR memory\\n\");\n+\t\tflow_nic_set_error(ERR_MEMORY, error);\n+\t\treturn NULL;\n+\t}\n+\n+\tfh->type = FLOW_HANDLE_TYPE_FLOW;\n+\tfh->port_id = port_id;\n+\tfh->dev = dev;\n+\tfh->fd = fd;\n+\n+\tint setup_cat_cfn = 0;\n+\tint setup_cat_cot = 0;\n+\tint setup_cat_cts = 0;\n+\tint setup_qsl_rcp = 0;\n+\n+\tint setup_flm = 0;\n+\tint setup_flm_ft = 0;\n+\n+\tint setup_km = 0;\n+\tint setup_km_ft = 0;\n+\tint setup_km_rcp = 0;\n+\n+\tint setup_default_ft = 0;\n+\n+\tint setup_hst = 0;\n+\tint setup_tpe = 0;\n+\tint setup_tpe_encap_data = 0;\n+\n+\tint free_fd = 0;\n+\n+\tconst int empty_pattern =\n+\t\tfd->l2_prot < 0 && fd->l3_prot < 0 && fd->l4_prot < 0 &&\n+\t\tfd->vlans == 0 && fd->tunnel_prot < 0 &&\n+\t\tfd->tunnel_l3_prot < 0 && fd->tunnel_l4_prot < 0;\n+\n+\tif (attr->group > 0 && empty_pattern) {\n+\t\t/*\n+\t\t * Group 0 default filter actions\n+\t\t */\n+\t\tstruct flow_handle *fh_miss = NULL;\n+\n+\t\tif (flm_flow_get_group_miss_fh(dev, attr->group, &fh_miss)) {\n+\t\t\t/* Error was printed to log by flm_flow_get_group_miss_fh */\n+\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\tfree(fh);\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tif (fh_miss == NULL) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: Could not setup default action for uninitialized group\\n\");\n+\t\t\tflow_nic_set_error(ERR_FAILED, error);\n+\t\t\tfree(fh);\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tif (qsl_size > 0 &&\n+\t\t\t\tflow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST, fh,\n+\t\t\t\t\t\tqsl_size, 1)) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: Could not get QSL QST resource\\n\");\n+\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t   error);\n+\t\t\tfree(fh);\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tif (flow_nic_ref_resource(dev->ndev, RES_QSL_RCP,\n+\t\t\t\t\t  fh_miss->resource[RES_QSL_RCP].index)) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: Could not reference QSL RCP resource\\n\");\n+\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t   error);\n+\t\t\tfree(fh);\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tfh->resource[RES_QSL_RCP].count = 1;\n+\t\tfh->resource[RES_QSL_RCP].index =\n+\t\t\tfh_miss->resource[RES_QSL_RCP].index;\n+\t\tfh->resource[RES_QSL_RCP].referenced = 1;\n+\n+\t\tnic_insert_flow(dev->ndev, fh);\n+\n+\t\tsetup_qsl_rcp = 1;\n+\t} else if (attr->group > 0) {\n+\t\t/*\n+\t\t * FLM programming\n+\t\t */\n+\t\tstruct flow_handle *fh_existing = NULL;\n+\t\tint cfn_to_copy = -1;\n+\n+\t\tif (attr->priority >= dev->ndev->be.flm.nb_prios) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: Priority value of FLM flow exceeds %u\"\n+\t\t\t       \"\\n\",\n+\t\t\t       dev->ndev->be.flm.nb_prios);\n+\t\t\tflow_nic_set_error(ERR_FLOW_PRIORITY_VALUE_INVALID,\n+\t\t\t\t\t   error);\n+\t\t\tfree(fh);\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tif (flm_flow_learn_prepare(dev, fh, attr->group, key_def,\n+\t\t\t\t\t   packet_mask, &flm_key_id, &flm_ft,\n+\t\t\t\t\t   &cfn_to_copy, &setup_km_ft,\n+\t\t\t\t\t   &fh_existing)) {\n+\t\t\t/* Error was printed to log by flm_flow_learn_prepare */\n+\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t   error);\n+\t\t\tfree(fh);\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tsetup_tpe_encap_data = (fd->tun_hdr.len > 0);\n+\t\tsetup_tpe =\n+\t\t\t(fd->modify_field_count > 0 || fd->ttl_sub_enable > 0);\n+\n+\t\t/* Create HIT filter for new FLM FT */\n+\t\tif (cfn_to_copy >= 0) {\n+\t\t\tuint32_t value = 0;\n+\n+\t\t\tnic_insert_flow(dev->ndev, fh);\n+\n+\t\t\tsetup_qsl_rcp = 1;\n+\t\t\tsetup_cat_cot = 1;\n+\t\t\tsetup_cat_cts = 1;\n+\n+\t\t\tsetup_default_ft = 1;\n+\n+\t\t\tsetup_flm = 1;\n+\t\t\tsetup_flm_ft = (int)flm_ft;\n+\n+\t\t\tsetup_tpe |= setup_tpe_encap_data;\n+\n+\t\t\tif (fd->header_strip_start_dyn != fd->header_strip_end_dyn ||\n+\t\t\t\t\tfd->header_strip_start_ofs != fd->header_strip_end_ofs)\n+\t\t\t\tsetup_hst = 1;\n+\n+\t\t\tif (flow_nic_allocate_fh_resource(dev->ndev,\n+\t\t\t\t\t\t\t  RES_CAT_CFN,\n+\t\t\t\t\t\t\t  fh, 1, 1)) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not get CAT CFN resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\tif (flow_nic_allocate_fh_resource(dev->ndev,\n+\t\t\t\t\t\t\t  RES_CAT_COT,\n+\t\t\t\t\t\t\t  fh, 1, 1)) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not get CAT COT resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\tif (flow_nic_allocate_fh_resource(dev->ndev,\n+\t\t\t\t\t\t\t  RES_QSL_RCP,\n+\t\t\t\t\t\t\t  fh, 1, 1)) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not get QSL RCP resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\tif (qsl_size > 0 &&\n+\t\t\t\t\tflow_nic_allocate_fh_resource(dev->ndev,\n+\t\t\t\t\t\t\t\t      RES_QSL_QST,\n+\t\t\t\t\t\t\t\t      fh, qsl_size, 1)) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not get QSL QST resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t       \"FLM: Creating new CFN %d as a copy of CFN %d with FT %d\\n\",\n+\t\t\t       fh->resource[RES_CAT_CFN].index, cfn_to_copy,\n+\t\t\t       setup_flm_ft);\n+\n+\t\t\t/* Copy parts from base MISS filter */\n+\t\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_COPY_FROM,\n+\t\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t\t   cfn_to_copy);\n+\t\t\thw_mod_cat_cfn_flush(&dev->ndev->be,\n+\t\t\t\t\t     fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t\t     1);\n+\n+\t\t\thw_mod_cat_kcs_km_get(&dev->ndev->be,\n+\t\t\t\t\t      HW_CAT_KCS_CATEGORY,\n+\t\t\t\t\t      KM_FLM_IF_FIRST, cfn_to_copy,\n+\t\t\t\t\t      &value);\n+\t\t\tif (value > 0) {\n+\t\t\t\tsetup_km = 1;\n+\t\t\t\tsetup_km_rcp = (int)value;\n+\t\t\t}\n+\n+\t\t\thw_mod_cat_kcs_flm_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_CAT_KCS_CATEGORY,\n+\t\t\t\t\t       KM_FLM_IF_FIRST, cfn_to_copy,\n+\t\t\t\t\t       &value);\n+\t\t\thw_mod_cat_kcs_flm_set(&dev->ndev->be,\n+\t\t\t\t\t       HW_CAT_KCS_CATEGORY,\n+\t\t\t\t\t       KM_FLM_IF_FIRST,\n+\t\t\t\t\t       fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t\t       value);\n+\t\t\thw_mod_cat_kcs_flm_flush(&dev->ndev->be,\n+\t\t\t\t\t\t KM_FLM_IF_FIRST,\n+\t\t\t\t\t\t fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t\t\t 1);\n+\n+\t\t\tfh_flm = calloc(1, sizeof(struct flow_handle));\n+\t\t\tif (!fh_flm) {\n+\t\t\t\tflow_nic_set_error(ERR_MEMORY, error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\tnic_insert_flow_flm(dev->ndev, fh_flm);\n+\n+\t\t\tfh_flm->type = FLOW_HANDLE_TYPE_FLM;\n+\t\t\tfh_flm->dev = dev;\n+\t\t\tfh_flm->flm_owner = fh;\n+\t\t} else {\n+\t\t\t/* Reuse allocated memory */\n+\t\t\tfh_flm = fh;\n+\t\t\tfh = fh_existing;\n+\n+\t\t\tnic_insert_flow_flm(dev->ndev, fh_flm);\n+\n+\t\t\tfh_flm->type = FLOW_HANDLE_TYPE_FLM;\n+\t\t\tfh_flm->dev = dev;\n+\t\t\tfh_flm->flm_owner = fh_existing;\n+\n+\t\t\tfree_fd = 1;\n+\t\t}\n+\n+\t\tfh_flm->flm_owner->flm_ref_count += 1;\n+\t} else {\n+\t\t/*\n+\t\t * Filter creation\n+\t\t */\n+\t\tnic_insert_flow(dev->ndev, fh);\n+\n+\t\tsetup_cat_cfn = 1;\n+\t\tsetup_cat_cts = 1;\n+\t\tsetup_qsl_rcp = 1;\n+\n+\t\tif (fd->km.num_ftype_elem) {\n+\t\t\tstruct flow_handle *flow = dev->ndev->flow_base,\n+\t\t\t\t\t\t    *found_flow = NULL;\n+\t\t\tint identical_flow_found = 0;\n+\n+\t\t\t/* Compute new KM key */\n+\t\t\tif (km_key_create(&fd->km, fh->port_id)) {\n+\t\t\t\tNT_LOG(ERR, FILTER, \"KM creation failed\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_FAILED_BY_HW_LIMITS,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\tfd->km.be = &dev->ndev->be;\n+\n+\t\t\t/* Find existing KM key that can be reused */\n+\t\t\twhile (flow) {\n+\t\t\t\tif (flow->type == FLOW_HANDLE_TYPE_FLOW &&\n+\t\t\t\t\t\tflow->fd->km\n+\t\t\t\t\t\t.flow_type && /* This check also skips self */\n+\t\t\t\t\t\tflow->resource[RES_KM_CATEGORY].count) {\n+\t\t\t\t\tint res = km_key_compare(&fd->km,\n+\t\t\t\t\t\t\t\t &flow->fd->km);\n+\t\t\t\t\tif (res < 0) {\n+\t\t\t\t\t\tidentical_flow_found = 1;\n+\t\t\t\t\t\tfound_flow = flow;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\t} else if (res > 0 &&\n+\t\t\t\t\t\t\t!flow->resource[RES_KM_CATEGORY]\n+\t\t\t\t\t\t\t.referenced &&\n+\t\t\t\t\t\t\tfound_flow == NULL)\n+\t\t\t\t\t\tfound_flow = flow;\n+\t\t\t\t}\n+\t\t\t\tflow = flow->next;\n+\t\t\t}\n+\t\t\t\tif (km_ft_handler(&setup_km_ft, &setup_km_rcp, &setup_km,\n+\t\t\t\t\tfound_flow, identical_flow_found, dev, fd, error, fh, flow))\n+\t\t\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tsetup_default_ft = 1;\n+\n+\t\tif (flow_nic_allocate_fh_resource(dev->ndev, RES_CAT_CFN,\n+\t\t\t\t\t\t  fh, 1, 1)) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: Could not get CAT CFN resource\\n\");\n+\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t   error);\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tif (flow_nic_allocate_fh_resource(dev->ndev, RES_QSL_RCP, fh, 1,\n+\t\t\t\t\t\t  1)) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: Could not get QSL RCP resource\\n\");\n+\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t   error);\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tif (qsl_size > 0 &&\n+\t\t\t\tflow_nic_allocate_fh_resource(dev->ndev, RES_QSL_QST,\n+\t\t\t\t\t\t\t      fh, qsl_size, 1)) {\n+\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t       \"ERROR: Could not get QSL QST resource\\n\");\n+\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t   error);\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\t/* Check if filter is set up for FLM */\n+\t\tif (fd->jump_to_group != UINT32_MAX) {\n+\t\t\tflm_flow_setup_group(dev, fd->jump_to_group,\n+\t\t\t\t\t     fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t\t     fh->resource[RES_KM_FLOW_TYPE].index,\n+\t\t\t\t\t     fh);\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Setup QSL\n+\t */\n+\tif (setup_qsl_rcp) {\n+\t\tif (qsl_size == 0) {\n+\t\t\t/* Create drop filter */\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t   0x0);\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t   0x3);\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t   0x0);\n+\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index, 0);\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index, 0);\n+\n+\t\t\thw_mod_qsl_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t     fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t     1);\n+\t\t} else {\n+\t\t\tconst int table_start = fh->resource[RES_QSL_QST].index;\n+\t\t\tconst int table_end = table_start +\n+\t\t\t\t\t      fh->resource[RES_QSL_QST].count -\n+\t\t\t\t\t      1;\n+\n+\t\t\t/* Use 0x0 for pure retransmit */\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DISCARD,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t   0x0);\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_DROP,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t   0x0);\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_LR,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t   num_dest_port > 0 ? 0x3 : 0x0);\n+\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_LO,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t   table_start);\n+\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be, HW_QSL_RCP_TBL_HI,\n+\t\t\t\t\t   fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t   table_end);\n+\n+\t\t\thw_mod_qsl_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t     fh->resource[RES_QSL_RCP].index,\n+\t\t\t\t\t     1);\n+\n+\t\t\t/* Setup QSL QST/QEN */\n+\t\t\tif (num_dest_port > 0 && num_queues > 0) {\n+\t\t\t\tint ports[num_dest_port];\n+\t\t\t\tint queues[num_queues];\n+\n+\t\t\t\tint port_index = 0;\n+\t\t\t\tint queue_index = 0;\n+\n+\t\t\t\tfor (int i = 0; i < fd->dst_num_avail; ++i) {\n+\t\t\t\t\tif (fd->dst_id[i].type == PORT_PHY) {\n+\t\t\t\t\t\tports[port_index++] =\n+\t\t\t\t\t\t\tfd->dst_id[i].id;\n+\t\t\t\t\t} else if (fd->dst_id[i].type ==\n+\t\t\t\t\t\t\tPORT_VIRT) {\n+\t\t\t\t\t\tqueues[queue_index++] =\n+\t\t\t\t\t\t\tfd->dst_id[i].id;\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\n+\t\t\t\tfor (int i = 0; i < fd->dst_num_avail; ++i) {\n+\t\t\t\t\thw_mod_qsl_qst_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t   HW_QSL_QST_TX_PORT,\n+\t\t\t\t\t\t\t   table_start + i,\n+\t\t\t\t\t\t\t   ports[i % num_dest_port]);\n+\t\t\t\t\thw_mod_qsl_qst_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t   HW_QSL_QST_LRE,\n+\t\t\t\t\t\t\t   table_start + i, 1);\n+\n+\t\t\t\t\thw_mod_qsl_qst_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t   HW_QSL_QST_QUEUE,\n+\t\t\t\t\t\t\t   table_start + i,\n+\t\t\t\t\t\t\t   queues[i % num_queues]);\n+\t\t\t\t\thw_mod_qsl_qst_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t   HW_QSL_QST_EN,\n+\t\t\t\t\t\t\t   table_start + i, 1);\n+\t\t\t\t}\n+\t\t\t} else if (num_dest_port > 0) {\n+\t\t\t\tfor (int i = 0; i < fd->dst_num_avail; ++i) {\n+\t\t\t\t\thw_mod_qsl_qst_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t   HW_QSL_QST_TX_PORT,\n+\t\t\t\t\t\t\t   table_start + i,\n+\t\t\t\t\t\t\t   fd->dst_id[i].id);\n+\t\t\t\t\thw_mod_qsl_qst_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t   HW_QSL_QST_LRE,\n+\t\t\t\t\t\t\t   table_start + i, 1);\n+\t\t\t\t}\n+\t\t\t} else if (num_queues > 0) {\n+\t\t\t\tfor (int i = 0; i < fd->dst_num_avail; ++i) {\n+\t\t\t\t\thw_mod_qsl_qst_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t   HW_QSL_QST_QUEUE,\n+\t\t\t\t\t\t\t   table_start + i,\n+\t\t\t\t\t\t\t   fd->dst_id[i].id);\n+\t\t\t\t\thw_mod_qsl_qst_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t   HW_QSL_QST_EN,\n+\t\t\t\t\t\t\t   table_start + i, 1);\n+\t\t\t\t}\n+\t\t\t}\n+\n+\t\t\thw_mod_qsl_qst_flush(&dev->ndev->be, table_start,\n+\t\t\t\t\t     fd->dst_num_avail);\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Setup CAT KM functionality\n+\t */\n+\tif (setup_km) {\n+\t\tuint32_t bm = 0;\n+\n+\t\t/* Enable KM match FS for key A */\n+\t\tset_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t setup_km_ft, 0, 1);\n+\n+\t\t/* KM function select */\n+\t\thw_mod_cat_kcs_km_set(&dev->ndev->be, HW_CAT_KCS_CATEGORY,\n+\t\t\t\t      KM_FLM_IF_FIRST,\n+\t\t\t\t      fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t      setup_km_rcp);\n+\t\thw_mod_cat_kcs_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,\n+\t\t\t\t\tfh->resource[RES_CAT_CFN].index, 1);\n+\n+\t\t/* KM function enable */\n+\t\thw_mod_cat_kce_km_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t      KM_FLM_IF_FIRST,\n+\t\t\t\t      fh->resource[RES_CAT_CFN].index / 8, &bm);\n+\t\thw_mod_cat_kce_km_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t      KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,\n+\t\t\t\t      bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));\n+\t\thw_mod_cat_kce_km_flush(&dev->ndev->be, KM_FLM_IF_FIRST,\n+\t\t\t\t\tfh->resource[RES_CAT_CFN].index / 8, 1);\n+\t} else if (setup_default_ft) {\n+\t\t/* Enable \"no KM match\" FT for key A */\n+\t\tset_flow_type_km(dev->ndev, fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t 0, 0, 1);\n+\t}\n+\n+\t/*\n+\t * Setup CAT FLM functionality\n+\t */\n+\tif (setup_flm) {\n+\t\tuint32_t bm = 0;\n+\n+\t\t/* Enable KM match FT for key A, and FLM match FT for key C */\n+\t\tset_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t  setup_km_ft, 0, 1); /* KM FT A */\n+\t\tset_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t  setup_flm_ft, 2, 1); /* FLM FT C */\n+\n+\t\t/* FLM function enable */\n+\t\thw_mod_cat_kce_flm_get(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t       KM_FLM_IF_FIRST,\n+\t\t\t\t       fh->resource[RES_CAT_CFN].index / 8,\n+\t\t\t\t       &bm);\n+\t\thw_mod_cat_kce_flm_set(&dev->ndev->be, HW_CAT_KCE_ENABLE_BM,\n+\t\t\t\t       KM_FLM_IF_FIRST, fh->resource[RES_CAT_CFN].index / 8,\n+\t\t\t\t       bm | (1 << (fh->resource[RES_CAT_CFN].index % 8)));\n+\t\thw_mod_cat_kce_flm_flush(&dev->ndev->be, KM_FLM_IF_FIRST,\n+\t\t\t\t\t fh->resource[RES_CAT_CFN].index / 8,\n+\t\t\t\t\t 1);\n+\t} else if (setup_default_ft) {\n+\t\t/* Enable KM for key A and UNHANDLED for key C */\n+\t\tset_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t  setup_km_ft, 0, 1);\n+\t\tset_flow_type_flm(dev->ndev, fh->resource[RES_CAT_CFN].index, 1,\n+\t\t\t\t  2, 1);\n+\t}\n+\n+\t/*\n+\t * Setup HST\n+\t */\n+\tif (setup_hst) {\n+\t\tint hst_index = -1;\n+\n+\t\tfor (int i = 1;\n+\t\t\t\ti < (int)dev->ndev->res[RES_HST_RCP].resource_count; ++i) {\n+\t\t\tuint32_t values[] = { 0, 0, 0, 0, 0 };\n+\n+\t\t\tif (!flow_nic_is_resource_used(dev->ndev, RES_HST_RCP,\n+\t\t\t\t\t\t       i))\n+\t\t\t\tcontinue;\n+\n+\t\t\thw_mod_hst_rcp_get(&dev->ndev->be,\n+\t\t\t\t\t   HW_HST_RCP_STRIP_MODE, i,\n+\t\t\t\t\t   &values[0]);\n+\t\t\thw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_DYN,\n+\t\t\t\t\t   i, &values[1]);\n+\t\t\thw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_START_OFS,\n+\t\t\t\t\t   i, &values[2]);\n+\t\t\thw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_DYN,\n+\t\t\t\t\t   i, &values[3]);\n+\t\t\thw_mod_hst_rcp_get(&dev->ndev->be, HW_HST_RCP_END_OFS,\n+\t\t\t\t\t   i, &values[4]);\n+\n+\t\t\tif ((int)values[0] == 1 &&\n+\t\t\t\t\t(int)values[1] == fd->header_strip_start_dyn &&\n+\t\t\t\t\t(int)values[2] == fd->header_strip_start_ofs &&\n+\t\t\t\t\t(int)values[3] == fd->header_strip_end_dyn &&\n+\t\t\t\t\t(int)values[4] == fd->header_strip_end_ofs) {\n+\t\t\t\thst_index = i;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (hst_index >= 0) {\n+\t\t\tif (flow_nic_ref_resource(dev->ndev, RES_HST_RCP,\n+\t\t\t\t\t\t  hst_index)) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not reference HST RCP resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\tfh->resource[RES_HST_RCP].count = 1;\n+\t\t\tfh->resource[RES_HST_RCP].index = hst_index;\n+\t\t\tfh->resource[RES_HST_RCP].referenced = 1;\n+\t\t} else {\n+\t\t\tif (flow_nic_allocate_fh_resource(dev->ndev,\n+\t\t\t\t\t\t\t  RES_HST_RCP,\n+\t\t\t\t\t\t\t  fh, 1, 1)) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not get HST RCP resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\thw_mod_hst_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t   HW_HST_RCP_STRIP_MODE,\n+\t\t\t\t\t   fh->resource[RES_HST_RCP].index, 1);\n+\t\t\thw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_DYN,\n+\t\t\t\t\t   fh->resource[RES_HST_RCP].index,\n+\t\t\t\t\t   fd->header_strip_start_dyn);\n+\t\t\thw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_START_OFS,\n+\t\t\t\t\t   fh->resource[RES_HST_RCP].index,\n+\t\t\t\t\t   fd->header_strip_start_ofs);\n+\t\t\thw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_DYN,\n+\t\t\t\t\t   fh->resource[RES_HST_RCP].index,\n+\t\t\t\t\t   fd->header_strip_end_dyn);\n+\t\t\thw_mod_hst_rcp_set(&dev->ndev->be, HW_HST_RCP_END_OFS,\n+\t\t\t\t\t   fh->resource[RES_HST_RCP].index,\n+\t\t\t\t\t   fd->header_strip_end_ofs);\n+\n+\t\t\thw_mod_hst_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t   HW_HST_RCP_MODIF0_CMD,\n+\t\t\t\t\t   fh->resource[RES_HST_RCP].index,\n+\t\t\t\t\t   fd->header_strip_removed_outer_ip ? 7 : 6);\n+\t\t\thw_mod_hst_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t   HW_HST_RCP_MODIF0_DYN,\n+\t\t\t\t\t   fh->resource[RES_HST_RCP].index, 2);\n+\t\t\thw_mod_hst_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t   HW_HST_RCP_MODIF0_OFS,\n+\t\t\t\t\t   fh->resource[RES_HST_RCP].index, 0);\n+\n+\t\t\thw_mod_hst_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t     fh->resource[RES_HST_RCP].index, 1);\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Setup TPE\n+\t */\n+\tif (setup_tpe_encap_data) {\n+\t\tint ext_rpl_index = -1;\n+\t\tint rpl_rpl_index = -1;\n+\t\tint rpl_rpl_length = -1;\n+\n+\t\t/* Find existing RPL */\n+\t\tfor (int i = 1;\n+\t\t\t\ti < (int)dev->ndev->res[RES_TPE_EXT].resource_count; ++i) {\n+\t\t\tint found = 1;\n+\t\t\tuint32_t len;\n+\t\t\tuint32_t ptr;\n+\n+\t\t\tif (!flow_nic_is_resource_used(dev->ndev, RES_TPE_EXT,\n+\t\t\t\t\t\t       i))\n+\t\t\t\tcontinue;\n+\n+\t\t\thw_mod_tpe_rpl_ext_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_RPL_EXT_META_RPL_LEN, i,\n+\t\t\t\t\t       &len);\n+\t\t\tif (len != fd->tun_hdr.len)\n+\t\t\t\tcontinue;\n+\n+\t\t\thw_mod_tpe_rpl_ext_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_RPL_EXT_RPL_PTR, i, &ptr);\n+\n+\t\t\tfor (uint32_t ptr_it = 0; ptr_it < (len + 15) / 16;\n+\t\t\t\t\t++ptr_it) {\n+\t\t\t\tuint32_t data[4];\n+\n+\t\t\t\thw_mod_tpe_rpl_rpl_get(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_RPL_RPL_VALUE,\n+\t\t\t\t\t\t       ptr + ptr_it, data);\n+\n+\t\t\t\tif (fd->tun_hdr.d.hdr32[ptr_it * 4 + 0] !=\n+\t\t\t\t\t\tdata[0] ||\n+\t\t\t\t\t\tfd->tun_hdr.d.hdr32[ptr_it * 4 + 1] !=\n+\t\t\t\t\t\tdata[1] ||\n+\t\t\t\t\t\tfd->tun_hdr.d.hdr32[ptr_it * 4 + 2] !=\n+\t\t\t\t\t\tdata[2] ||\n+\t\t\t\t\t\tfd->tun_hdr.d.hdr32[ptr_it * 4 + 3] !=\n+\t\t\t\t\t\tdata[3]) {\n+\t\t\t\t\tfound = 0;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t}\n+\n+\t\t\tif (found) {\n+\t\t\t\text_rpl_index = i;\n+\t\t\t\trpl_rpl_index = (int)ptr;\n+\t\t\t\trpl_rpl_length = (int)len;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Set RPL data */\n+\t\tif (ext_rpl_index >= 0) {\n+\t\t\tif (flow_nic_ref_resource(dev->ndev, RES_TPE_EXT,\n+\t\t\t\t\t\t  ext_rpl_index)) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not reference TPE EXT resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\tfor (int i = 0; i < (rpl_rpl_length + 15) / 16; ++i) {\n+\t\t\t\tif (flow_nic_ref_resource(dev->ndev,\n+\t\t\t\t\t\t\t  RES_TPE_RPL,\n+\t\t\t\t\t\t\t  rpl_rpl_index + i)) {\n+\t\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t\t       \"ERROR: Could not reference TPE RPL resource\\n\");\n+\t\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t\t   error);\n+\t\t\t\t\treturn NULL;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t} else {\n+\t\t\text_rpl_index = flow_nic_alloc_resource(dev->ndev,\n+\t\t\t\t\t\t\t\tRES_TPE_EXT, 1);\n+\t\t\tif (ext_rpl_index < 0) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not get TPE EXT resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\trpl_rpl_length = ((int)fd->tun_hdr.len + 15) / 16;\n+\t\t\trpl_rpl_index = flow_nic_alloc_resource_contig(dev->ndev,\n+\t\t\t\t\t\t\t\t       RES_TPE_RPL,\n+\t\t\t\t\t\t\t\t       rpl_rpl_length,\n+\t\t\t\t\t\t\t\t       1);\n+\t\t\tif (rpl_rpl_index < 0) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not get TPE RPL resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\t/* Program new encap header data */\n+\t\t\thw_mod_tpe_rpl_ext_set(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_RPL_EXT_RPL_PTR,\n+\t\t\t\t\t       ext_rpl_index, rpl_rpl_index);\n+\t\t\thw_mod_tpe_rpl_ext_set(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_RPL_EXT_META_RPL_LEN,\n+\t\t\t\t\t       ext_rpl_index, fd->tun_hdr.len);\n+\t\t\thw_mod_tpe_rpl_ext_flush(&dev->ndev->be, ext_rpl_index,\n+\t\t\t\t\t\t 1);\n+\n+\t\t\tfor (int i = 0; i < rpl_rpl_length; ++i) {\n+\t\t\t\thw_mod_tpe_rpl_rpl_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_RPL_RPL_VALUE,\n+\t\t\t\t\t\t       rpl_rpl_index + i,\n+\t\t\t\t\t\t       fd->tun_hdr.d.hdr32 + i * 4);\n+\t\t\t}\n+\t\t\thw_mod_tpe_rpl_rpl_flush(&dev->ndev->be, rpl_rpl_index,\n+\t\t\t\t\t\t rpl_rpl_length);\n+\t\t}\n+\n+\t\tflm_rpl_ext_ptr = ext_rpl_index;\n+\t}\n+\n+\tif (setup_tpe) {\n+\t\tconst uint32_t eth_length = 14;\n+\t\tconst uint32_t l2_length = fd->tun_hdr.l2_len;\n+\t\tconst uint32_t l3_length = fd->tun_hdr.l3_len;\n+\t\tconst uint32_t l4_length = fd->tun_hdr.l4_len;\n+\t\tconst uint32_t fcs_length = 4;\n+\n+\t\tint tpe_index = -1;\n+\n+\t\t/* Find existing RCP */\n+\t\tfor (int i = 1;\n+\t\t\t\ti < (int)dev->ndev->res[RES_TPE_RCP].resource_count; ++i) {\n+\t\t\tuint32_t value;\n+\n+\t\t\tif (!flow_nic_is_resource_used(dev->ndev, RES_TPE_RCP,\n+\t\t\t\t\t\t       i))\n+\t\t\t\tcontinue;\n+\n+\t\t\thw_mod_tpe_rpl_rcp_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_RPL_RCP_LEN, i, &value);\n+\t\t\tif (value != fd->tun_hdr.len)\n+\t\t\t\tcontinue;\n+\t\t\thw_mod_tpe_rpl_rcp_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_RPL_RCP_DYN, i, &value);\n+\t\t\tif (value != 1)\n+\t\t\t\tcontinue;\n+\t\t\thw_mod_tpe_rpl_rcp_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_RPL_RCP_OFS, i, &value);\n+\t\t\tif (value != 0)\n+\t\t\t\tcontinue;\n+\t\t\thw_mod_tpe_hfu_rcp_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_HFU_RCP_L3_PRT, i,\n+\t\t\t\t\t       &value);\n+\t\t\tif (value != (fd->tun_hdr.ip_version == 4 ? 1 : 2))\n+\t\t\t\tcontinue;\n+\t\t\thw_mod_tpe_hfu_rcp_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_HFU_RCP_OUTER_L3_OFS, i,\n+\t\t\t\t\t       &value);\n+\t\t\tif (value != l2_length)\n+\t\t\t\tcontinue;\n+\n+\t\t\ttpe_index = i;\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\t/* Set RCP data */\n+\t\tif (tpe_index >= 0) {\n+\t\t\tif (flow_nic_ref_resource(dev->ndev, RES_TPE_RCP,\n+\t\t\t\t\t\t  tpe_index)) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not reference TPE RCP resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\tfh->resource[RES_TPE_RCP].count = 1;\n+\t\t\tfh->resource[RES_TPE_RCP].index = tpe_index;\n+\t\t\tfh->resource[RES_TPE_RCP].referenced = 1;\n+\t\t} else {\n+\t\t\tif (flow_nic_allocate_fh_resource(dev->ndev,\n+\t\t\t\t\t\t\t  RES_TPE_RCP,\n+\t\t\t\t\t\t\t  fh, 1, 1)) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR: Could not get TPE RCP resource\\n\");\n+\t\t\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION,\n+\t\t\t\t\t\t   error);\n+\t\t\t\treturn NULL;\n+\t\t\t}\n+\n+\t\t\t/* Extend packet if needed. */\n+\t\t\tif (fd->tun_hdr.len > eth_length) {\n+\t\t\t\t/* Extend FPGA packet buffer */\n+\t\t\t\thw_mod_tpe_rpp_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_RPP_RCP_EXP,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       fd->tun_hdr.len - eth_length);\n+\t\t\t\thw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t 1);\n+\n+\t\t\t\t/*\n+\t\t\t\t * Insert 0's into packet\n+\t\t\t\t * After this step DYN offsets are shifted by encap length,\n+\t\t\t\t * so only DYN offset 1 and 18 should be used\n+\t\t\t\t */\n+\t\t\t\thw_mod_tpe_ins_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_INS_RCP_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index, 1);\n+\t\t\t\thw_mod_tpe_ins_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_INS_RCP_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index, 0);\n+\t\t\t\thw_mod_tpe_ins_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_INS_RCP_LEN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       fd->tun_hdr.len - eth_length);\n+\t\t\t\thw_mod_tpe_ins_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t 1);\n+\t\t\t}\n+\n+\t\t\tif (fd->tun_hdr.len > 0) {\n+\t\t\t\t/* Write header data to beginning of packet */\n+\t\t\t\thw_mod_tpe_rpl_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_RPL_RCP_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_rpl_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_RPL_RCP_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       0);\n+\t\t\t\thw_mod_tpe_rpl_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_RPL_RCP_LEN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       fd->tun_hdr.len);\n+\t\t\t\thw_mod_tpe_rpl_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_RPL_RCP_RPL_PTR,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       0);\n+\t\t\t\thw_mod_tpe_rpl_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_RPL_RCP_EXT_PRIO,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t 1);\n+\t\t\t}\n+\n+\t\t\tfor (unsigned int i = 0; i < fd->modify_field_count;\n+\t\t\t\t\t++i) {\n+\t\t\t\tuint32_t select = fd->modify_field[i].select;\n+\t\t\t\tuint32_t dyn = fd->modify_field[i].dyn;\n+\t\t\t\tuint32_t ofs = fd->modify_field[i].ofs;\n+\t\t\t\tuint32_t len = fd->modify_field[i].len;\n+\n+\t\t\t\talign_tun_offset(fd, eth_length, i, &ofs, select, l2_length,\n+\t\t\t\t\tl3_length, l4_length, &dyn);\n+\n+\t\t\t\thw_mod_tpe_cpy_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_CPY_RCP_READER_SELECT,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index +\n+\t\t\t\t\t\t       16 * i,\n+\t\t\t\t\t\t       select);\n+\t\t\t\thw_mod_tpe_cpy_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_CPY_RCP_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index +\n+\t\t\t\t\t\t       16 * i,\n+\t\t\t\t\t\t       dyn);\n+\t\t\t\thw_mod_tpe_cpy_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_CPY_RCP_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index +\n+\t\t\t\t\t\t       16 * i,\n+\t\t\t\t\t\t       ofs);\n+\t\t\t\thw_mod_tpe_cpy_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_CPY_RCP_LEN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index +\n+\t\t\t\t\t\t       16 * i,\n+\t\t\t\t\t\t       len);\n+\t\t\t\thw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t fh->resource[RES_TPE_RCP].index +\n+\t\t\t\t\t\t\t 16 * i,\n+\t\t\t\t\t\t\t 1);\n+\t\t\t}\n+\n+\t\t\tif (fd->tun_hdr.new_outer) {\n+\t\t\t\t/*\n+\t\t\t\t * UDP length\n+\t\t\t\t * dyn_ofs[ADD_DYN] - dyn_ofs[SUB_DYN] + ADD_OFS\n+\t\t\t\t */\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_A_WR,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_A_POS_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_A_POS_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       l2_length + l3_length + 4);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_A_ADD_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       18);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_A_ADD_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       -(l2_length + l3_length + fcs_length));\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_A_SUB_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\n+\t\t\t\t/* IPv4/IPv6 length */\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_B_WR,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_B_POS_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_B_POS_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       l2_length +\n+\t\t\t\t\t\t       (fd->tun_hdr.ip_version == 4 ? 2 : 4));\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_B_ADD_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       18);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_B_ADD_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       -(l2_length +\n+\t\t\t\t\t\t       (fd->tun_hdr.ip_version == 4 ?\n+\t\t\t\t\t\t       0 : l3_length) + fcs_length));\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_B_SUB_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\n+\t\t\t\t/* GTP length */\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_C_WR,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_C_POS_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_C_POS_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       l2_length + l3_length + l4_length + 2);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_C_ADD_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       18);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_C_ADD_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       -(l2_length + l3_length + l4_length +\n+\t\t\t\t\t\t       8 + fcs_length));\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_LEN_C_SUB_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\n+\t\t\t\t/* _update TTL */\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_WR,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       fd->ttl_sub_enable);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_POS_DYN,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       fd->ttl_sub_outer ? 1 : DYN_L3);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_POS_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       (fd->ttl_sub_outer ?\n+\t\t\t\t\t\t       l2_length :\n+\t\t\t\t\t\t       fd->tun_hdr.len - eth_length) +\n+\t\t\t\t\t\t       (fd->ttl_sub_ipv4 ? 8 : 7));\n+\n+\t\t\t\t/* _update FPGA DYN offsets */\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_CS_INF,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       1);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_L3_PRT,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       (fd->tun_hdr.ip_version == 4 ? 1 : 2));\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_L3_FRAG,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       0);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_TUNNEL,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       6);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_L4_PRT,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       2);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_OUTER_L3_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       l2_length);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_OUTER_L4_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       l2_length + l3_length);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_INNER_L3_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       fd->tun_hdr.len - eth_length);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_INNER_L4_OFS,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       fd->tun_hdr.len - eth_length);\n+\n+\t\t\t\thw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t 1);\n+\t\t\t} else {\n+\t\t\t\t/* _update TTL */\n+\t\t\t\tif (fd->ttl_sub_enable) {\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_WR,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       fd->ttl_sub_enable);\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_POS_DYN,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       fd->ttl_sub_outer ? DYN_L3 :\n+\t\t\t\t\t\t\t       DYN_TUN_L3);\n+\t\t\t\t\tif (fd->tun_hdr.len == 0) {\n+\t\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_POS_OFS,\n+\t\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP]\n+\t\t\t\t\t\t\t\t       .index,\n+\t\t\t\t\t\t\t\t       fd->ttl_sub_ipv4 ? 8 : 7);\n+\t\t\t\t\t} else {\n+\t\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_POS_OFS,\n+\t\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP]\n+\t\t\t\t\t\t\t\t       .index,\n+\t\t\t\t\t\t\t\t       (fd->tun_hdr.len -\n+\t\t\t\t\t\t\t\t       eth_length) +\n+\t\t\t\t\t\t\t\t       (fd->ttl_sub_ipv4 ?\n+\t\t\t\t\t\t\t\t       8 : 7));\n+\t\t\t\t\t}\n+\t\t\t\t} else {\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_WR,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       0);\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_POS_DYN,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       0);\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_TTL_POS_OFS,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       0);\n+\t\t\t\t}\n+\n+\t\t\t\t/* _update FPGA DYN offsets */\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_CS_INF,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       0);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_L3_PRT,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       0);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_L3_FRAG,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       0);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_TUNNEL,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       0);\n+\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t       HW_TPE_HFU_RCP_L4_PRT,\n+\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t       0);\n+\t\t\t\tif (fd->tun_hdr.len == 0) {\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_OUTER_L3_OFS,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       0);\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_OUTER_L4_OFS,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       0);\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_INNER_L3_OFS,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       0);\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_INNER_L4_OFS,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       0);\n+\t\t\t\t} else {\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_OUTER_L3_OFS,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       fd->tun_hdr.len - eth_length);\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_OUTER_L4_OFS,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       fd->tun_hdr.len - eth_length);\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_INNER_L3_OFS,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       fd->tun_hdr.len - eth_length);\n+\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_HFU_RCP_INNER_L4_OFS,\n+\t\t\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t       fd->tun_hdr.len - eth_length);\n+\t\t\t\t}\n+\n+\t\t\t\thw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t\t 1);\n+\t\t\t}\n+\n+\t\t\t/* Calculate valid outer and inner checksums */\n+\t\t\thw_mod_tpe_csu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_CSU_RCP_OUTER_L3_CMD,\n+\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t       3);\n+\t\t\thw_mod_tpe_csu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_CSU_RCP_OUTER_L4_CMD,\n+\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t       3);\n+\t\t\thw_mod_tpe_csu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_CSU_RCP_INNER_L3_CMD,\n+\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t       3);\n+\t\t\thw_mod_tpe_csu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_CSU_RCP_INNER_L4_CMD,\n+\t\t\t\t\t       fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t       3);\n+\t\t\thw_mod_tpe_csu_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t fh->resource[RES_TPE_RCP].index,\n+\t\t\t\t\t\t 1);\n+\t\t}\n+\t}\n+\n+\t/*\n+\t * Setup CAT Color Table functionality\n+\t */\n+\tif (setup_cat_cot) {\n+\t\thw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_COLOR,\n+\t\t\t\t   fh->resource[RES_CAT_COT].index, 0);\n+\t\thw_mod_cat_cot_set(&dev->ndev->be, HW_CAT_COT_KM,\n+\t\t\t\t   fh->resource[RES_CAT_COT].index, 0x4);\n+\t\thw_mod_cat_cot_flush(&dev->ndev->be,\n+\t\t\t\t     fh->resource[RES_CAT_COT].index, 1);\n+\t}\n+\n+\t/*\n+\t * Setup CAT action functionality\n+\t */\n+\tif (setup_cat_cts) {\n+\t\t/* Setup CAT CTS */\n+\t\tconst int offset = ((int)dev->ndev->be.cat.cts_num + 1) / 2;\n+\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 0,\n+\t\t\t\t   fh->resource[RES_CAT_COT].index);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 0,\n+\t\t\t\t   0);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 1,\n+\t\t\t\t   0);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 1,\n+\t\t\t\t   fh->resource[RES_QSL_RCP].index);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 2,\n+\t\t\t\t   0);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 2,\n+\t\t\t\t   0);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 3,\n+\t\t\t\t   0);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 3,\n+\t\t\t\t   0);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 4,\n+\t\t\t\t   fh->resource[RES_HST_RCP].index);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 4,\n+\t\t\t\t   0);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_A,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 5,\n+\t\t\t\t   fh->resource[RES_TPE_RCP].index);\n+\t\thw_mod_cat_cts_set(&dev->ndev->be, HW_CAT_CTS_CAT_B,\n+\t\t\t\t   offset * fh->resource[RES_CAT_CFN].index + 5,\n+\t\t\t\t   0);\n+\n+\t\thw_mod_cat_cts_flush(&dev->ndev->be,\n+\t\t\t\t     offset * fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t     6);\n+\t\thw_mod_cat_cts_flush(&dev->ndev->be,\n+\t\t\t\t     offset * fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t     6);\n+\n+\t\t/* Setup CAT CTE */\n+\t\thw_mod_cat_cte_set(&dev->ndev->be,\n+\t\t\t\t   HW_CAT_CTE_ENABLE_BM,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index,\n+\t\t\t\t   (fh->resource[RES_CAT_COT].index ? 0x001 : 0) | 0x004 |\n+\t\t\t\t   (fh->resource[RES_QSL_RCP].index ? 0x008 : 0) |\n+\t\t\t\t   0x040 |\n+\t\t\t\t   (fh->resource[RES_HST_RCP].index ? 0x100 : 0) |\n+\t\t\t\t   (fh->resource[RES_TPE_RCP].index ? 0x400 : 0));\n+\t\thw_mod_cat_cte_flush(&dev->ndev->be,\n+\t\t\t\t     fh->resource[RES_CAT_CFN].index, 1);\n+\t}\n+\n+\t/*\n+\t * Setup CAT CFN\n+\t *\n+\t * Once CAT CFN has been programmed traffic will start match the filter,\n+\t * so CAT CFN must be the last thing to be programmed.\n+\t */\n+\tif (setup_cat_cfn) {\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x0);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ENABLE,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_INV,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x0);\n+\n+\t\t/* Protocol checks */\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_INV,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x0);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_ISL,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_CFP,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MAC,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L2,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   fd->l2_prot != -1 ? (1 << fd->l2_prot) : -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VNTAG,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_VLAN,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   (0xf << fd->vlans) & 0xf);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_MPLS,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L3,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   fd->l3_prot != -1 ? (1 << fd->l3_prot) : -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_FRAG,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   fd->fragmentation);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_IP_PROT,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_L4,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   fd->l4_prot != -1 ? (1 << fd->l4_prot) : -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be,\n+\t\t\t\t   HW_CAT_CFN_PTC_TUNNEL,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   fd->tunnel_prot != -1 ? (1 << fd->tunnel_prot) : -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L2,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_VLAN,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_MPLS,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L3,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   fd->tunnel_l3_prot != -1 ?\n+\t\t\t\t   (1 << fd->tunnel_l3_prot) : -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_FRAG,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_IP_PROT,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PTC_TNL_L4,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   fd->tunnel_l4_prot != -1 ?\n+\t\t\t\t   (1 << fd->tunnel_l4_prot) : -1);\n+\n+\t\t/* Error checks */\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_INV,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x0);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_CV,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_FCS,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TRUNC,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L3_CS,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_L4_CS,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L3_CS,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_ERR_TNL_L4_CS,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be,\n+\t\t\t\t   HW_CAT_CFN_ERR_TTL_EXP,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   (fd->ttl_sub_enable && fd->ttl_sub_outer) ? -1 : 0x1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be,\n+\t\t\t\t   HW_CAT_CFN_ERR_TNL_TTL_EXP,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   (fd->ttl_sub_enable && !fd->ttl_sub_outer) ? -1 : 0x1);\n+\n+\t\t/* MAC port check */\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_MAC_PORT,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0,\n+\t\t\t\t   1 << fh->port_id);\n+\n+\t\t/* Pattern match checks */\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMP,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x0);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_DCT,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x0);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_EXT_INV,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x0);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_CMB,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x0);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_AND_INV,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_OR_INV,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_PM_INV,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\n+\t\t/* Length checks */\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x0);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_LC_INV,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, -1);\n+\n+\t\t/* KM and FLM */\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM0_OR,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x1);\n+\t\thw_mod_cat_cfn_set(&dev->ndev->be, HW_CAT_CFN_KM1_OR,\n+\t\t\t\t   fh->resource[RES_CAT_CFN].index, 0, 0x3);\n+\n+\t\thw_mod_cat_cfn_flush(&dev->ndev->be,\n+\t\t\t\t     fh->resource[RES_CAT_CFN].index, 1);\n+\t}\n+\n+\t/* Program FLM flow */\n+\tif (fh_flm) {\n+\t\tconvert_fd_to_flm(fh_flm, fd, packet_data, flm_key_id,\n+\t\t\t\t  flm_rpl_ext_ptr, attr->priority);\n+\t\tflm_flow_programming(dev, fh_flm, fd->mtr_ids, flm_ft, 1);\n+\t}\n+\n+\tif (free_fd)\n+\t\tfree(fd);\n+\n+\treturn (fh_flm) ? fh_flm : fh;\n+}\n+\n+/*\n+ * Public functions\n+ */\n+\n+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)\n+{\n+\tif (!ndev->flow_mgnt_prepared) {\n+\t\t/* Check static arrays are big enough */\n+\t\tassert(ndev->be.tpe.nb_cpy_writers <=\n+\t\t       MAX_CPY_WRITERS_SUPPORTED);\n+\n+\t\t/* KM Flow Type 0 is reserved */\n+\t\tflow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);\n+\t\tflow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);\n+\n+\t\t/* FLM Flow Type 0 and 1 is reserved */\n+\t\tflow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 0);\n+\t\tflow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, 1);\n+\t\tflow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);\n+\n+\t\t/* CAT CFN 0 is reserved as a low priority catch all filter */\n+\t\thw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_SET_ALL_DEFAULTS,\n+\t\t\t\t   0, 0, 0);\n+\t\thw_mod_cat_cfn_flush(&ndev->be, 0, 1);\n+\t\tflow_nic_mark_resource_used(ndev, RES_CAT_CFN, 0);\n+\n+\t\t/* Initialize QSL with unmatched recipe index 0 - discard */\n+\t\tif (hw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_DISCARD, 0, 0x1) < 0)\n+\t\t\tgoto err_exit0;\n+\t\tif (hw_mod_qsl_rcp_flush(&ndev->be, 0, 1) < 0)\n+\t\t\tgoto err_exit0;\n+\n+\t\tflow_nic_mark_resource_used(ndev, RES_QSL_RCP, 0);\n+\n+\t\t/* Initialize QST with default index 0 */\n+\t\tif (hw_mod_qsl_qst_set(&ndev->be, HW_QSL_QST_PRESET_ALL, 0,\n+\t\t\t\t       0x0) < 0)\n+\t\t\tgoto err_exit0;\n+\t\tif (hw_mod_qsl_qst_flush(&ndev->be, 0, 1) < 0)\n+\t\t\tgoto err_exit0;\n+\n+\t\tflow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);\n+\n+\t\t/* HST & TPE index 0 is reserved */\n+\t\tflow_nic_mark_resource_used(ndev, RES_HST_RCP, 0);\n+\t\tflow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);\n+\t\tflow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);\n+\t\tflow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);\n+\n+\t\t/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0 */\n+\t\tif (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESCRIPTOR, 0, 7) <\n+\t\t\t\t0)\n+\t\t\tgoto err_exit0;\n+\t\tif (hw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_DESC_LEN, 0, 6) <\n+\t\t\t\t0)\n+\t\t\tgoto err_exit0;\n+\n+\t\tif (hw_mod_pdb_rcp_flush(&ndev->be, 0, 1) < 0)\n+\t\t\tgoto err_exit0;\n+\n+\t\tflow_nic_mark_resource_used(ndev, RES_PDB_RCP, 0);\n+\n+\t\t/* Set default hasher recipe to 5-tuple */\n+\t\tflow_nic_set_hasher(ndev, 0, HASH_ALGO_5TUPLE);\n+\t\thw_mod_hsh_rcp_flush(&ndev->be, 0, 1);\n+\n+\t\tflow_nic_mark_resource_used(ndev, RES_HSH_RCP, 0);\n+\n+\t\t/*\n+\t\t * COT - set color to 0 for unmatched - color encoding must not have CAO enabled for\n+\t\t * this entry\n+\t\t */\n+\t\thw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);\n+\t\tif (hw_mod_cat_cot_flush(&ndev->be, 0, 1) < 0)\n+\t\t\tgoto err_exit0;\n+\n+\t\tflow_nic_mark_resource_used(ndev, RES_CAT_COT, 0);\n+\n+\t\t/* Unblock MAC and MAC statistics on this NIC */\n+\t\tif (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_STATT, 0) < 0)\n+\t\t\tgoto err_exit0;\n+\t\t/* block keep alive - not needed */\n+\t\tif (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_KEEPA, 1) < 0)\n+\t\t\tgoto err_exit0;\n+\t\t/*\n+\t\t * Unblock all MAC ports\n+\t\t */\n+\t\tif (hw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_MAC_PORT, 0) < 0)\n+\t\t\tgoto err_exit0;\n+\n+\t\t/*\n+\t\t *  unblock RPP slices\n+\t\t */\n+\t\thw_mod_rmc_ctrl_set(&ndev->be, HW_RMC_BLOCK_RPP_SLICE, 0);\n+\n+\t\tif (hw_mod_rmc_ctrl_flush(&ndev->be) < 0)\n+\t\t\tgoto err_exit0;\n+\n+\t\t/* FLM */\n+\t\tif (flm_sdram_calibrate(ndev) < 0)\n+\t\t\tgoto err_exit0;\n+\t\tif (flm_sdram_reset(ndev, 1) < 0)\n+\t\t\tgoto err_exit0;\n+\t\tflm_flow_handle_create(&ndev->flm_res_handle);\n+\n+\t\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LDS,\n+\t\t\t\t       0); /* Learn done status */\n+\t\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LFS,\n+\t\t\t\t       0); /* Learn fail status */\n+\t\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_LIS,\n+\t\t\t\t       0); /* Learn ignore status */\n+\t\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UDS,\n+\t\t\t\t       0); /* Unlearn done status */\n+\t\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_UIS,\n+\t\t\t\t       0); /* Unlearn ignore status */\n+\t\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RDS,\n+\t\t\t\t       0); /* Relearn done status */\n+\t\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RIS,\n+\t\t\t\t       0); /* Relearn ignore status */\n+\t\thw_mod_flm_control_set(&ndev->be, HW_FLM_CONTROL_RBL, 4);\n+\t\thw_mod_flm_control_flush(&ndev->be);\n+\n+\t\thw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT0,\n+\t\t\t\t    0); /* Drop at 100% FIFO fill level */\n+\t\thw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT0, 1);\n+\t\thw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT1,\n+\t\t\t\t    6); /* Drop at 37,5% FIFO fill level */\n+\t\thw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT1, 1);\n+\t\thw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT2,\n+\t\t\t\t    4); /* Drop at 25% FIFO fill level */\n+\t\thw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT2, 1);\n+\t\thw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_LIMIT3,\n+\t\t\t\t    2); /* Drop at 12,5% FIFO fill level */\n+\t\thw_mod_flm_prio_set(&ndev->be, HW_FLM_PRIO_FT3, 1);\n+\t\thw_mod_flm_prio_flush(&ndev->be);\n+\n+\t\tfor (uint32_t i = 0; i < ndev->be.flm.nb_pst_profiles; ++i) {\n+\t\t\thw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_BP, i,\n+\t\t\t\t\t   FLM_PERIODIC_STATS_BYTE_LIMIT);\n+\t\t\thw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_PP, i,\n+\t\t\t\t\t   FLM_PERIODIC_STATS_PKT_LIMIT);\n+\t\t\thw_mod_flm_pst_set(&ndev->be, HW_FLM_PST_TP, i,\n+\t\t\t\t\t   FLM_PERIODIC_STATS_BYTE_TIMEOUT);\n+\t\t}\n+\t\thw_mod_flm_pst_flush(&ndev->be, 0, ALL_ENTRIES);\n+\n+\t\thw_mod_flm_stat_update(&ndev->be);\n+\n+\t\tndev->flm_mtr_handle =\n+\t\t\tcalloc(1, sizeof(struct flm_flow_mtr_handle_s));\n+\t\tndev->ft_res_handle =\n+\t\t\tcalloc(FLM_FLOW_FT_MAX, sizeof(struct flm_flow_ft_ident_s));\n+\t\tndev->mtr_stat_handle =\n+\t\t\tcalloc(FLM_MTR_STAT_SIZE, sizeof(struct mtr_stat_s));\n+\n+\t\tif (ndev->flm_mtr_handle == NULL ||\n+\t\t\t\tndev->ft_res_handle == NULL ||\n+\t\t\t\tndev->mtr_stat_handle == NULL)\n+\t\t\tgoto err_exit0;\n+\n+\t\tstruct mtr_stat_s *mtr_stat = ndev->mtr_stat_handle;\n+\n+\t\tfor (uint32_t i = 0; i < FLM_MTR_STAT_SIZE; ++i) {\n+\t\t\tatomic_init(&mtr_stat[i].n_pkt, 0);\n+\t\t\tatomic_init(&mtr_stat[i].n_bytes, 0);\n+\t\t\tatomic_init(&mtr_stat[i].stats_mask, 0);\n+\t\t}\n+\n+\t\tif (flow_group_handle_create(&ndev->group_handle,\n+\t\t\t\t\t     FLM_FLOW_RCP_MAX))\n+\t\t\tgoto err_exit0;\n+\n+\t\tndev->flow_mgnt_prepared = 1;\n+\t}\n+\treturn 0;\n+\n+err_exit0:\n+\tdone_flow_management_of_ndev_profile_inline(ndev);\n+\treturn -1;\n+}\n+\n+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)\n+{\n+#ifdef FLOW_DEBUG\n+\tndev->be.iface->set_debug_mode(ndev->be.be_dev,\n+\t\t\t\t       FLOW_BACKEND_DEBUG_MODE_WRITE);\n+#endif\n+\n+\tif (ndev->flow_mgnt_prepared) {\n+\t\tflm_sdram_reset(ndev, 0);\n+\t\tflm_flow_handle_remove(&ndev->flm_res_handle);\n+\n+\t\tflow_nic_free_resource(ndev, RES_KM_FLOW_TYPE, 0);\n+\t\tflow_nic_free_resource(ndev, RES_KM_CATEGORY, 0);\n+\n+\t\thw_mod_flm_rcp_set(&ndev->be, HW_FLM_RCP_PRESET_ALL, 0, 0);\n+\t\thw_mod_flm_rcp_flush(&ndev->be, 0, 1);\n+\t\tflow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 0);\n+\t\tflow_nic_free_resource(ndev, RES_FLM_FLOW_TYPE, 1);\n+\t\tflow_nic_free_resource(ndev, RES_FLM_RCP, 0);\n+\n+\t\tfree(ndev->flm_mtr_handle);\n+\t\tfree(ndev->ft_res_handle);\n+\t\tfree(ndev->mtr_stat_handle);\n+\t\tflow_group_handle_destroy(&ndev->group_handle);\n+\n+\t\thw_mod_cat_cfn_set(&ndev->be, HW_CAT_CFN_PRESET_ALL, 0, 0, 0);\n+\t\thw_mod_cat_cfn_flush(&ndev->be, 0, 1);\n+\t\tflow_nic_free_resource(ndev, RES_CAT_CFN, 0);\n+\n+\t\thw_mod_qsl_rcp_set(&ndev->be, HW_QSL_RCP_PRESET_ALL, 0, 0);\n+\t\thw_mod_qsl_rcp_flush(&ndev->be, 0, 1);\n+\t\tflow_nic_free_resource(ndev, RES_QSL_RCP, 0);\n+\n+\t\thw_mod_hst_rcp_set(&ndev->be, HW_HST_RCP_PRESET_ALL, 0, 0);\n+\t\thw_mod_hst_rcp_flush(&ndev->be, 0, 1);\n+\t\tflow_nic_free_resource(ndev, RES_HST_RCP, 0);\n+\n+\t\thw_mod_tpe_reset(&ndev->be);\n+\t\tflow_nic_free_resource(ndev, RES_TPE_RCP, 0);\n+\t\tflow_nic_free_resource(ndev, RES_TPE_EXT, 0);\n+\t\tflow_nic_free_resource(ndev, RES_TPE_RPL, 0);\n+\n+\t\thw_mod_pdb_rcp_set(&ndev->be, HW_PDB_RCP_PRESET_ALL, 0, 0);\n+\t\thw_mod_pdb_rcp_flush(&ndev->be, 0, 1);\n+\t\tflow_nic_free_resource(ndev, RES_PDB_RCP, 0);\n+\n+\t\thw_mod_hsh_rcp_set(&ndev->be, HW_HSH_RCP_PRESET_ALL, 0, 0, 0);\n+\t\thw_mod_hsh_rcp_flush(&ndev->be, 0, 1);\n+\t\tflow_nic_free_resource(ndev, RES_HSH_RCP, 0);\n+\n+\t\thw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);\n+\t\thw_mod_cat_cot_flush(&ndev->be, 0, 1);\n+\t\tflow_nic_free_resource(ndev, RES_CAT_COT, 0);\n+\n+#ifdef FLOW_DEBUG\n+\t\tndev->be.iface->set_debug_mode(ndev->be.be_dev,\n+\t\t\t\t\t       FLOW_BACKEND_DEBUG_MODE_NONE);\n+#endif\n+\n+\t\tndev->flow_mgnt_prepared = 0;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int flow_validate_profile_inline(struct flow_eth_dev *dev,\n+\t\t\t\t const struct flow_elem elem[],\n+\t\t\t\t const struct flow_action action[],\n+\t\t\t\t struct flow_error *error)\n+{\n+\tuint32_t port_id = 0;\n+\tuint32_t num_dest_port = 0;\n+\tuint32_t num_queues = 0;\n+\n+\tuint32_t packet_data[10];\n+\tuint32_t packet_mask[10];\n+\tstruct flm_flow_key_def_s key_def;\n+\n+\tflow_nic_set_error(ERR_SUCCESS, error);\n+\n+\tpthread_mutex_lock(&dev->ndev->mtx);\n+\tstruct nic_flow_def *fd = interpret_flow_elements(dev, elem, action,\n+\t\t\t\t\t\t\t  error, 0, &port_id,\n+\t\t\t\t\t\t\t  &num_dest_port, &num_queues,\n+\t\t\t\t\t\t\t  packet_data, packet_mask,\n+\t\t\t\t\t\t\t  &key_def);\n+\tpthread_mutex_unlock(&dev->ndev->mtx);\n+\n+\tif (!fd)\n+\t\treturn -1;\n+\n+\tfree(fd);\n+\treturn 0;\n+}\n+\n+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,\n+\tconst struct flow_attr *attr, const struct flow_elem elem[],\n+\tconst struct flow_action action[], struct flow_error *error)\n+{\n+\tstruct flow_handle *fh = NULL;\n+\n+\tuint32_t port_id = UINT32_MAX;\n+\tuint32_t num_dest_port;\n+\tuint32_t num_queues;\n+\n+\tuint32_t packet_data[10];\n+\tuint32_t packet_mask[10];\n+\tstruct flm_flow_key_def_s key_def;\n+\n+\tstruct flow_attr attr_local;\n+\n+\tmemcpy(&attr_local, attr, sizeof(struct flow_attr));\n+\tif (attr_local.group > 0)\n+\t\tattr_local.forced_vlan_vid = 0;\n+\n+\tflow_nic_set_error(ERR_SUCCESS, error);\n+\n+\tpthread_mutex_lock(&dev->ndev->mtx);\n+\n+\tstruct nic_flow_def *fd = interpret_flow_elements(dev, elem, action, error,\n+\t\t\t\t\t\t\t  attr_local.forced_vlan_vid,\n+\t\t\t\t\t\t\t  &port_id, &num_dest_port,\n+\t\t\t\t\t\t\t  &num_queues, packet_data,\n+\t\t\t\t\t\t\t  packet_mask, &key_def);\n+\tif (!fd)\n+\t\tgoto err_exit;\n+\n+\t/* Translate group IDs */\n+\tif (fd->jump_to_group != UINT32_MAX &&\n+\t\t\tflow_group_translate_get(dev->ndev->group_handle,\n+\t\t\t\t\tattr_local.caller_id, fd->jump_to_group,\n+\t\t\t\t\t&fd->jump_to_group)) {\n+\t\tNT_LOG(ERR, FILTER, \"ERROR: Could not get group resource\\n\");\n+\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);\n+\t\tgoto err_exit;\n+\t}\n+\tif (attr_local.group > 0 &&\n+\t\t\tflow_group_translate_get(dev->ndev->group_handle,\n+\t\t\t\t\tattr_local.caller_id, attr_local.group,\n+\t\t\t\t\t&attr_local.group)) {\n+\t\tNT_LOG(ERR, FILTER, \"ERROR: Could not get group resource\\n\");\n+\t\tflow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);\n+\t\tgoto err_exit;\n+\t}\n+\n+\tif (port_id == UINT32_MAX)\n+\t\tport_id = dev->port_id;\n+\n+\t/* Create and flush filter to NIC */\n+\tfh = create_flow_filter(dev, fd, &attr_local, error, port_id,\n+\t\t\t\tnum_dest_port, num_queues, packet_data,\n+\t\t\t\tpacket_mask, &key_def);\n+\tif (!fh)\n+\t\tgoto err_exit;\n+\n+\tNT_LOG(DBG, FILTER,\n+\t       \"New FlOW: fh (flow handle) %p, fd (flow definition) %p\\n\", fh,\n+\t       fd);\n+\tNT_LOG(DBG, FILTER,\n+\t       \">>>>> [Dev %p] Nic %i, Port %i: fh %p fd %p - implementation <<<<<\\n\",\n+\t       dev, dev->ndev->adapter_no, dev->port, fh, fd);\n+\n+\tpthread_mutex_unlock(&dev->ndev->mtx);\n+\n+\treturn fh;\n+\n+err_exit:\n+\tif (fh)\n+\t\tflow_destroy_locked_profile_inline(dev, fh, NULL);\n+\n+\tpthread_mutex_unlock(&dev->ndev->mtx);\n+\n+\tNT_LOG(ERR, FILTER, \"ERR: %s\\n\", __func__);\n+\treturn NULL;\n+}\n+\n+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,\n+\t\t\t\t       struct flow_handle *fh,\n+\t\t\t\t       struct flow_error *error)\n+{\n+\tassert(dev);\n+\tassert(fh);\n+\n+\tint err = 0;\n+\n+\tflow_nic_set_error(ERR_SUCCESS, error);\n+\n+\t/* take flow out of ndev list - may not have been put there yet */\n+\tif (fh->type == FLOW_HANDLE_TYPE_FLM)\n+\t\tnic_remove_flow_flm(dev->ndev, fh);\n+\n+\telse\n+\t\tnic_remove_flow(dev->ndev, fh);\n+\n+#ifdef FLOW_DEBUG\n+\tdev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,\n+\t\t\t\t\t    FLOW_BACKEND_DEBUG_MODE_WRITE);\n+#endif\n+\n+\tif (fh->type == FLOW_HANDLE_TYPE_FLM) {\n+\t\terr |= flm_flow_programming(dev, fh, NULL, 0, 0);\n+\n+\t\tif (fh->flm_rpl_ext_ptr > 0 &&\n+\t\t\t\tflow_nic_deref_resource(dev->ndev, RES_TPE_EXT,\n+\t\t\t\t\t\t\t(int)fh->flm_rpl_ext_ptr) == 0) {\n+\t\t\tuint32_t ptr = 0;\n+\t\t\tuint32_t len = 0;\n+\n+\t\t\thw_mod_tpe_rpl_ext_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_RPL_EXT_RPL_PTR,\n+\t\t\t\t\t       (int)fh->flm_rpl_ext_ptr, &ptr);\n+\t\t\thw_mod_tpe_rpl_ext_get(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_RPL_EXT_META_RPL_LEN,\n+\t\t\t\t\t       (int)fh->flm_rpl_ext_ptr, &len);\n+\n+\t\t\thw_mod_tpe_rpl_ext_set(&dev->ndev->be,\n+\t\t\t\t\t       HW_TPE_PRESET_ALL,\n+\t\t\t\t\t       (int)fh->flm_rpl_ext_ptr, 0);\n+\t\t\thw_mod_tpe_rpl_ext_flush(&dev->ndev->be,\n+\t\t\t\t\t\t (int)fh->flm_rpl_ext_ptr, 1);\n+\n+\t\t\tfor (uint32_t ii = 0; ii < (len + 15) / 16; ii++) {\n+\t\t\t\tif (flow_nic_deref_resource(dev->ndev,\n+\t\t\t\t\t\t\t    RES_TPE_RPL,\n+\t\t\t\t\t\t\t    (int)(ptr + ii)) == 0) {\n+\t\t\t\t\tuint32_t rpl_zero[] = { 0, 0, 0, 0 };\n+\n+\t\t\t\t\thw_mod_tpe_rpl_rpl_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t       HW_TPE_PRESET_ALL,\n+\t\t\t\t\t\t\t       (int)(ptr + ii),\n+\t\t\t\t\t\t\t       rpl_zero);\n+\t\t\t\t\thw_mod_tpe_rpl_rpl_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t (int)(ptr + ii),\n+\t\t\t\t\t\t\t\t 1);\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\n+\t\tflow_group_translate_release(dev->ndev->group_handle,\n+\t\t\t\t\t     fh->flm_owner->flm_group_index);\n+\n+\t\tfh->flm_owner->flm_ref_count -= 1;\n+\t\tif (fh->flm_owner->flm_ref_count == 0) {\n+\t\t\terr |= flow_flm_destroy_owner(dev, fh->flm_owner);\n+\t\t\terr |= flow_destroy_locked_profile_inline(dev,\n+\t\t\t\t\t\t\t\t  fh->flm_owner,\n+\t\t\t\t\t\t\t\t  error);\n+\t\t}\n+\t} else {\n+\t\tNT_LOG(DBG, FILTER, \"removing flow :%p\\n\", fh);\n+\n+\t\tif (fh->fd) {\n+\t\t\tif (fh->fd->km.num_ftype_elem)\n+\t\t\t\tkm_clear_data_match_entry(&fh->fd->km);\n+\n+\t\t\tif (fh->fd->jump_to_group != UINT32_MAX) {\n+\t\t\t\terr |= flm_flow_destroy_group(dev,\n+\t\t\t\t\t\t\t      fh->fd->jump_to_group);\n+\t\t\t\tflow_group_translate_release(dev->ndev->group_handle,\n+\t\t\t\t\t\t\t     fh->fd->jump_to_group);\n+\t\t\t}\n+\t\t}\n+\n+\t\tfor (int res_type = 0; res_type < RES_COUNT; res_type++) {\n+\t\t\tif (fh->resource[res_type].count < 1)\n+\t\t\t\tcontinue;\n+\n+\t\t\tfor (int ii = 0; ii < fh->resource[res_type].count;\n+\t\t\t\t\tii++) {\n+\t\t\t\t/* If last ref count of this resource, free it */\n+\t\t\t\tif (flow_nic_deref_resource(dev->ndev,\n+\t\t\t\t\t\t\t    res_type,\n+\t\t\t\t\t\t\t    fh->resource[res_type].index +\n+\t\t\t\t\t\t\t    ii) == 0) {\n+\t\t\t\t\t/* Free resource up in NIC */\n+\t\t\t\t\tswitch (res_type) {\n+\t\t\t\t\tcase RES_CAT_CFN:\n+\t\t\t\t\t\tassert(ii == 0);\n+\t\t\t\t\t\terr |= reset_cat_function_setup(dev,\n+\t\t\t\t\t\t\tfh->resource[RES_CAT_CFN]\n+\t\t\t\t\t\t\t.index + ii);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_QSL_QST:\n+\t\t\t\t\t\thw_mod_qsl_qst_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t   HW_QSL_QST_PRESET_ALL,\n+\t\t\t\t\t\t\t\t   fh->resource[RES_QSL_QST]\n+\t\t\t\t\t\t\t\t   .index + ii,\n+\t\t\t\t\t\t\t\t   0);\n+\t\t\t\t\t\thw_mod_qsl_qst_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t     fh->resource[RES_QSL_QST]\n+\t\t\t\t\t\t\t\t     .index + ii,\n+\t\t\t\t\t\t\t\t     1);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_QSL_RCP:\n+\t\t\t\t\t\thw_mod_qsl_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t   HW_QSL_RCP_PRESET_ALL,\n+\t\t\t\t\t\t\t\t   fh->resource[RES_QSL_RCP]\n+\t\t\t\t\t\t\t\t   .index + ii,\n+\t\t\t\t\t\t\t\t   0);\n+\t\t\t\t\t\thw_mod_qsl_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t     fh->resource[RES_QSL_RCP]\n+\t\t\t\t\t\t\t\t     .index + ii,\n+\t\t\t\t\t\t\t\t     1);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_CAT_COT:\n+\t\t\t\t\t\thw_mod_cat_cot_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t   HW_CAT_COT_PRESET_ALL,\n+\t\t\t\t\t\t\t\t   fh->resource[res_type]\n+\t\t\t\t\t\t\t\t   .index + ii,\n+\t\t\t\t\t\t\t\t   0);\n+\t\t\t\t\t\thw_mod_cat_cot_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t     fh->resource[res_type]\n+\t\t\t\t\t\t\t\t     .index + ii,\n+\t\t\t\t\t\t\t\t     1);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_KM_CATEGORY:\n+\t\t\t\t\t\tassert(ii == 0);\n+\t\t\t\t\t\thw_mod_km_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t  HW_KM_RCP_PRESET_ALL,\n+\t\t\t\t\t\t\t\t  fh->resource[res_type]\n+\t\t\t\t\t\t\t\t  .index + ii,\n+\t\t\t\t\t\t\t\t  0, 0);\n+\t\t\t\t\t\thw_mod_km_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t    fh->resource[res_type]\n+\t\t\t\t\t\t\t\t    .index + ii,\n+\t\t\t\t\t\t\t\t    1);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_KM_FLOW_TYPE: {\n+\t\t\t\t\t\tstruct flm_flow_ft_ident_s *ft_idents =\n+\t\t\t\t\t\t\t(struct flm_flow_ft_ident_s\n+\t\t\t\t\t\t\t *)dev->ndev\n+\t\t\t\t\t\t\t->ft_res_handle;\n+\t\t\t\t\t\tft_idents[fh->resource[res_type]\n+\t\t\t\t\t\t\t  .index +\n+\t\t\t\t\t\t\t  ii]\n+\t\t\t\t\t\t.data = 0;\n+\t\t\t\t\t}\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_FLM_RCP:\n+\t\t\t\t\t\tassert(ii == 0);\n+\t\t\t\t\t\terr |= flm_flow_destroy_rcp(dev,\n+\t\t\t\t\t\t\t\t\t    fh->resource[res_type]\n+\t\t\t\t\t\t\t\t\t    .index + ii);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_FLM_FLOW_TYPE:\n+\t\t\t\t\t\t/* Nothing needed */\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_HSH_RCP:\n+\t\t\t\t\t\thw_mod_hsh_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t   HW_HSH_RCP_PRESET_ALL,\n+\t\t\t\t\t\t\t\t   fh->resource[res_type]\n+\t\t\t\t\t\t\t\t   .index + ii,\n+\t\t\t\t\t\t\t\t   0, 0);\n+\t\t\t\t\t\thw_mod_hsh_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t     fh->resource[res_type]\n+\t\t\t\t\t\t\t\t     .index +\n+\t\t\t\t\t\t\t\t     ii,\n+\t\t\t\t\t\t\t\t     1);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_PDB_RCP:\n+\t\t\t\t\t\thw_mod_pdb_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t   HW_PDB_RCP_PRESET_ALL,\n+\t\t\t\t\t\t\t\t   fh->resource[res_type]\n+\t\t\t\t\t\t\t\t   .index + ii,\n+\t\t\t\t\t\t\t\t   0);\n+\t\t\t\t\t\thw_mod_pdb_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t     fh->resource[res_type]\n+\t\t\t\t\t\t\t\t     .index + ii,\n+\t\t\t\t\t\t\t\t     1);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_HST_RCP:\n+\t\t\t\t\t\thw_mod_hst_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t   HW_HST_RCP_PRESET_ALL,\n+\t\t\t\t\t\t\t\t   fh->resource[res_type]\n+\t\t\t\t\t\t\t\t   .index + ii,\n+\t\t\t\t\t\t\t\t   0);\n+\t\t\t\t\t\thw_mod_hst_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t     fh->resource[res_type]\n+\t\t\t\t\t\t\t\t     .index + ii,\n+\t\t\t\t\t\t\t\t     1);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_TPE_RCP:\n+\t\t\t\t\t\thw_mod_tpe_rpp_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t       HW_TPE_PRESET_ALL,\n+\t\t\t\t\t\t\t\t       fh->resource[res_type]\n+\t\t\t\t\t\t\t\t       .index + ii,\n+\t\t\t\t\t\t\t\t       0);\n+\t\t\t\t\t\thw_mod_tpe_rpp_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t\t fh->resource[res_type]\n+\t\t\t\t\t\t\t\t\t .index + ii,\n+\t\t\t\t\t\t\t\t\t 1);\n+\t\t\t\t\t\thw_mod_tpe_ins_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t       HW_TPE_PRESET_ALL,\n+\t\t\t\t\t\t\t\t       fh->resource[res_type]\n+\t\t\t\t\t\t\t\t       .index + ii,\n+\t\t\t\t\t\t\t\t       0);\n+\t\t\t\t\t\thw_mod_tpe_ins_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t\t fh->resource[res_type]\n+\t\t\t\t\t\t\t\t\t .index + ii,\n+\t\t\t\t\t\t\t\t\t 1);\n+\t\t\t\t\t\thw_mod_tpe_rpl_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t       HW_TPE_PRESET_ALL,\n+\t\t\t\t\t\t\t\t       fh->resource[res_type]\n+\t\t\t\t\t\t\t\t       .index + ii,\n+\t\t\t\t\t\t\t\t       0);\n+\t\t\t\t\t\thw_mod_tpe_rpl_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t\t fh->resource[res_type]\n+\t\t\t\t\t\t\t\t\t .index + ii,\n+\t\t\t\t\t\t\t\t\t 1);\n+\t\t\t\t\t\thw_mod_tpe_rpl_ext_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t       HW_TPE_PRESET_ALL,\n+\t\t\t\t\t\t\t\t       fh->resource[res_type]\n+\t\t\t\t\t\t\t\t       .index + ii,\n+\t\t\t\t\t\t\t\t       0);\n+\t\t\t\t\t\thw_mod_tpe_rpl_ext_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t\t fh->resource[res_type]\n+\t\t\t\t\t\t\t\t\t .index + ii,\n+\t\t\t\t\t\t\t\t\t 1);\n+\t\t\t\t\t\thw_mod_tpe_cpy_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t       HW_TPE_PRESET_ALL,\n+\t\t\t\t\t\t\t\t       fh->resource[res_type]\n+\t\t\t\t\t\t\t\t       .index + ii,\n+\t\t\t\t\t\t\t\t       0);\n+\t\t\t\t\t\thw_mod_tpe_cpy_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t\t fh->resource[res_type]\n+\t\t\t\t\t\t\t\t\t .index + ii,\n+\t\t\t\t\t\t\t\t\t 1);\n+\t\t\t\t\t\thw_mod_tpe_hfu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t       HW_TPE_PRESET_ALL,\n+\t\t\t\t\t\t\t\t       fh->resource[res_type]\n+\t\t\t\t\t\t\t\t       .index + ii,\n+\t\t\t\t\t\t\t\t       0);\n+\t\t\t\t\t\thw_mod_tpe_hfu_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t\t fh->resource[res_type]\n+\t\t\t\t\t\t\t\t\t .index + ii,\n+\t\t\t\t\t\t\t\t\t 1);\n+\t\t\t\t\t\thw_mod_tpe_csu_rcp_set(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t       HW_TPE_PRESET_ALL,\n+\t\t\t\t\t\t\t\t       fh->resource[res_type]\n+\t\t\t\t\t\t\t\t       .index + ii,\n+\t\t\t\t\t\t\t\t       0);\n+\t\t\t\t\t\thw_mod_tpe_csu_rcp_flush(&dev->ndev->be,\n+\t\t\t\t\t\t\t\t\t fh->resource[res_type]\n+\t\t\t\t\t\t\t\t\t .index + ii,\n+\t\t\t\t\t\t\t\t\t 1);\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_TPE_EXT:\n+\t\t\t\t\t\t/* Nothing needed */\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tcase RES_TPE_RPL:\n+\t\t\t\t\t\t/* Nothing needed */\n+\t\t\t\t\t\tbreak;\n+\n+\t\t\t\t\tdefault:\n+\t\t\t\t\t\terr |= -1;\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\t}\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t\tfree(fh->fd);\n+\t}\n+\n+\tif (err) {\n+\t\tNT_LOG(ERR, FILTER, \"FAILED removing flow: %p\\n\", fh);\n+\t\tflow_nic_set_error(ERR_REMOVE_FLOW_FAILED, error);\n+\t}\n+\n+\tfree(fh);\n+\n+#ifdef FLOW_DEBUG\n+\tdev->ndev->be.iface->set_debug_mode(dev->ndev->be.be_dev,\n+\t\t\t\t\t    FLOW_BACKEND_DEBUG_MODE_NONE);\n+#endif\n+\n+\treturn err;\n+}\n+\n+int flow_destroy_profile_inline(struct flow_eth_dev *dev,\n+\t\t\t\tstruct flow_handle *flow,\n+\t\t\t\tstruct flow_error *error)\n+{\n+\tint err = 0;\n+\n+\tflow_nic_set_error(ERR_SUCCESS, error);\n+\n+\tpthread_mutex_lock(&dev->ndev->mtx);\n+\tif (flow) {\n+\t\t/* Delete this flow */\n+\t\terr = flow_destroy_locked_profile_inline(dev, flow, error);\n+\t} else {\n+\t\t/* Delete all created flows from this eth device */\n+\t\tflow = dev->ndev->flow_base;\n+\n+\t\twhile (flow && !err) {\n+\t\t\tif (flow->dev == dev) {\n+\t\t\t\tstruct flow_handle *flow_next = flow->next;\n+\n+\t\t\t\terr = flow_destroy_locked_profile_inline(dev,\n+\t\t\t\t\t\t\t\t\t flow,\n+\t\t\t\t\t\t\t\t\t NULL);\n+\t\t\t\tflow = flow_next;\n+\t\t\t} else {\n+\t\t\t\tflow = flow->next;\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Delete all created FLM flows from this eth device */\n+\t\tflow = dev->ndev->flow_base_flm;\n+\n+\t\twhile (flow && !err) {\n+\t\t\tif (flow->dev == dev) {\n+\t\t\t\tstruct flow_handle *flow_next = flow->next;\n+\n+\t\t\t\terr = flow_destroy_locked_profile_inline(dev,\n+\t\t\t\t\t\t\t\t\t flow,\n+\t\t\t\t\t\t\t\t\t NULL);\n+\t\t\t\tflow = flow_next;\n+\t\t\t} else {\n+\t\t\t\tflow = flow->next;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tpthread_mutex_unlock(&dev->ndev->mtx);\n+\n+\treturn err;\n+}\n+\n+int flow_flush_profile_inline(UNUSED struct flow_eth_dev *dev,\n+\t\t\t      struct flow_error *error)\n+{\n+\tNT_LOG(ERR, FILTER, \"ERROR: Not implemented yet\\n\");\n+\terror->type = FLOW_ERROR_GENERAL;\n+\terror->message = \"rte_flow_flush is not supported\";\n+\treturn -1;\n+}\n+\n+int flow_query_profile_inline(UNUSED struct flow_eth_dev *dev,\n+\t\t\t      UNUSED struct flow_handle *flow,\n+\t\t\t      UNUSED const struct flow_action *action,\n+\t\t\t      void **data, uint32_t *length,\n+\t\t\t      struct flow_error *error)\n+{\n+\tNT_LOG(ERR, FILTER, \"ERROR: Not implemented yet\\n\");\n+\n+\t*length = 0;\n+\t*data = NULL;\n+\terror->type = FLOW_ERROR_GENERAL;\n+\terror->message = \"rte_flow_query is not supported\";\n+\treturn -1;\n+}\n+\n+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,\n+\t\t\t\t      uint64_t size)\n+{\n+\tconst enum hw_flm_e fields[] = {\n+\t\tHW_FLM_STAT_FLOWS,\tHW_FLM_STAT_LRN_DONE,\n+\t\tHW_FLM_STAT_LRN_IGNORE, HW_FLM_STAT_LRN_FAIL,\n+\t\tHW_FLM_STAT_UNL_DONE,\tHW_FLM_STAT_UNL_IGNORE,\n+\t\tHW_FLM_STAT_AUL_DONE,\tHW_FLM_STAT_AUL_IGNORE,\n+\t\tHW_FLM_STAT_AUL_FAIL,\tHW_FLM_STAT_TUL_DONE,\n+\t\tHW_FLM_STAT_REL_DONE,\tHW_FLM_STAT_REL_IGNORE,\n+\t\tHW_FLM_STAT_PRB_DONE,\tHW_FLM_STAT_PRB_IGNORE,\n+\n+\t\tHW_FLM_STAT_STA_DONE,\tHW_FLM_STAT_INF_DONE,\n+\t\tHW_FLM_STAT_INF_SKIP,\tHW_FLM_STAT_PCK_HIT,\n+\t\tHW_FLM_STAT_PCK_MISS,\tHW_FLM_STAT_PCK_UNH,\n+\t\tHW_FLM_STAT_PCK_DIS,\tHW_FLM_STAT_CSH_HIT,\n+\t\tHW_FLM_STAT_CSH_MISS,\tHW_FLM_STAT_CSH_UNH,\n+\t\tHW_FLM_STAT_CUC_START,\tHW_FLM_STAT_CUC_MOVE,\n+\t};\n+\n+\tconst uint64_t fields_cnt = sizeof(fields) / sizeof(enum hw_flm_e);\n+\n+\tif (size < fields_cnt)\n+\t\treturn -1;\n+\n+\thw_mod_flm_stat_update(&ndev->be);\n+\n+\tfor (uint64_t i = 0; i < fields_cnt; ++i) {\n+\t\tuint32_t value = 0;\n+\n+\t\thw_mod_flm_stat_get(&ndev->be, fields[i], &value);\n+\t\tdata[i] = (fields[i] == HW_FLM_STAT_FLOWS) ? value :\n+\t\t\t  data[i] + value;\n+\t\tif (ndev->be.flm.ver < 18 &&\n+\t\t\t\tfields[i] == HW_FLM_STAT_PRB_IGNORE)\n+\t\t\tbreak;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int flow_set_mtu_inline(struct flow_eth_dev *dev, uint32_t port, uint16_t mtu)\n+{\n+\tif (port >= 255)\n+\t\treturn -1;\n+\n+\tint err = 0;\n+\tuint8_t ifr_mtu_recipe = convert_port_to_ifr_mtu_recipe(port);\n+\tstruct flow_nic_dev *ndev = dev->ndev;\n+\n+\terr |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,\n+\t\t\t\t\t  ifr_mtu_recipe, 1);\n+\terr |= hw_mod_tpe_rpp_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,\n+\t\t\t\t\t  ifr_mtu_recipe, mtu);\n+\terr |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_EN,\n+\t\t\t\t      ifr_mtu_recipe, 1);\n+\terr |= hw_mod_tpe_ifr_rcp_set(&ndev->be, HW_TPE_IFR_RCP_MTU,\n+\t\t\t\t      ifr_mtu_recipe, mtu);\n+\n+\tif (err == 0) {\n+\t\terr |= hw_mod_tpe_rpp_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe,\n+\t\t\t\t\t\t    1);\n+\t\terr |= hw_mod_tpe_ifr_rcp_flush(&ndev->be, ifr_mtu_recipe, 1);\n+\t}\n+\n+\treturn err;\n+}\ndiff --git a/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h\nnew file mode 100644\nindex 0000000000..330cc39db6\n--- /dev/null\n+++ b/drivers/net/ntnic/nthw/flow_api/flow_api_profile_inline.h\n@@ -0,0 +1,56 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef _FLOW_API_PROFILE_INLINE_H_\n+#define _FLOW_API_PROFILE_INLINE_H_\n+\n+#include \"stream_binary_flow_api.h\"\n+#include \"flow_api.h\"\n+\n+/*\n+ * Management\n+ */\n+\n+int done_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);\n+\n+int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev);\n+\n+/*\n+ * Flow functionality\n+ */\n+\n+int flow_destroy_locked_profile_inline(struct flow_eth_dev *dev,\n+\t\t\t\t       struct flow_handle *flow,\n+\t\t\t\t       struct flow_error *error);\n+\n+int flow_validate_profile_inline(struct flow_eth_dev *dev,\n+\t\t\t\t const struct flow_elem elem[],\n+\t\t\t\t const struct flow_action action[],\n+\t\t\t\t struct flow_error *error);\n+\n+struct flow_handle *flow_create_profile_inline(struct flow_eth_dev *dev,\n+\tconst struct flow_attr *attr,\n+\tconst struct flow_elem elem[], const struct flow_action action[],\n+\tstruct flow_error *error);\n+\n+int flow_destroy_profile_inline(struct flow_eth_dev *dev,\n+\t\t\t\tstruct flow_handle *flow,\n+\t\t\t\tstruct flow_error *error);\n+\n+int flow_flush_profile_inline(struct flow_eth_dev *dev,\n+\t\t\t      struct flow_error *error);\n+\n+int flow_query_profile_inline(struct flow_eth_dev *dev,\n+\t\t\t      struct flow_handle *flow,\n+\t\t\t      const struct flow_action *action, void **data,\n+\t\t\t      uint32_t *length, struct flow_error *error);\n+\n+/*\n+ * Stats\n+ */\n+\n+int flow_get_flm_stats_profile_inline(struct flow_nic_dev *ndev, uint64_t *data,\n+\t\t\t\t      uint64_t size);\n+\n+#endif /* _FLOW_API_PROFILE_INLINE_H_ */\ndiff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.c b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c\nnew file mode 100644\nindex 0000000000..1214b32666\n--- /dev/null\n+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.c\n@@ -0,0 +1,3205 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include \"nthw_drv.h\"\n+\n+#include \"flow_nthw_info.h\"\n+#include \"flow_nthw_ifr.h\"\n+#include \"flow_nthw_cat.h\"\n+#include \"flow_nthw_csu.h\"\n+#include \"flow_nthw_km.h\"\n+#include \"flow_nthw_flm.h\"\n+#include \"flow_nthw_hfu.h\"\n+#include \"flow_nthw_hsh.h\"\n+#include \"flow_nthw_hst.h\"\n+#include \"flow_nthw_qsl.h\"\n+#include \"flow_nthw_slc.h\"\n+#include \"flow_nthw_slc_lr.h\"\n+#include \"flow_nthw_pdb.h\"\n+#include \"flow_nthw_ioa.h\"\n+#include \"flow_nthw_rpp_lr.h\"\n+#include \"flow_nthw_roa.h\"\n+#include \"flow_nthw_rmc.h\"\n+#include \"flow_nthw_tx_cpy.h\"\n+#include \"flow_nthw_tx_ins.h\"\n+#include \"flow_nthw_tx_rpl.h\"\n+#include \"flow_backend.h\"\n+#include \"flow_api_backend.h\"\n+\n+#include <stdio.h> /* printf */\n+\n+#if !defined(MAX_PHYS_ADAPTERS)\n+#define MAX_PHYS_ADAPTERS (8)\n+#endif\n+\n+/*\n+ * Binary Flow API backend implementation into ntservice driver\n+ *\n+ * General note on this backend implementation:\n+ * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing\n+ */\n+\n+static struct backend_dev_s {\n+\tuint8_t adapter_no;\n+\tenum debug_mode_e dmode;\n+\tstruct info_nthw *p_info_nthw;\n+\tstruct cat_nthw *p_cat_nthw;\n+\tstruct km_nthw *p_km_nthw;\n+\tstruct flm_nthw *p_flm_nthw;\n+\tstruct hsh_nthw *p_hsh_nthw;\n+\tstruct hst_nthw *p_hst_nthw;\n+\tstruct qsl_nthw *p_qsl_nthw;\n+\tstruct slc_nthw *p_slc_nthw;\n+\tstruct slc_lr_nthw *p_slc_lr_nthw;\n+\tstruct pdb_nthw *p_pdb_nthw;\n+\tstruct ioa_nthw *p_ioa_nthw;\n+\tstruct roa_nthw *p_roa_nthw;\n+\tstruct rmc_nthw *p_rmc_nthw;\n+\tstruct hfu_nthw *p_hfu_nthw; /* TPE module */\n+\tstruct rpp_lr_nthw *p_rpp_lr_nthw; /* TPE module */\n+\tstruct tx_cpy_nthw *p_tx_cpy_nthw; /* TPE module */\n+\tstruct tx_ins_nthw *p_tx_ins_nthw; /* TPE module */\n+\tstruct tx_rpl_nthw *p_tx_rpl_nthw; /* TPE module */\n+\tstruct csu_nthw *p_csu_nthw; /* TPE module */\n+\tstruct ifr_nthw *p_ifr_nthw; /* TPE module */\n+} be_devs[MAX_PHYS_ADAPTERS];\n+\n+#define _CHECK_DEBUG_ON(be, mod, inst)                                 \\\n+\tint __debug__ = 0;                                             \\\n+\tif (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \\\n+\t\tdo {                                                   \\\n+\t\t\tmod##_nthw_set_debug_mode(inst, 0xFF);            \\\n+\t\t\t__debug__ = 1;                                 \\\n+\t} while (0)\n+\n+#define _CHECK_DEBUG_OFF(mod, inst)                      \\\n+\tdo {                                             \\\n+\t\tif (__debug__)                           \\\n+\t\t\tmod##_nthw_set_debug_mode(inst, 0); \\\n+\t} while (0)\n+\n+static int set_debug_mode(void *be_dev, enum debug_mode_e mode)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\tbe->dmode = mode;\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  INFO  *******************\n+ */\n+\n+static int get_nb_phy_ports(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_phy_ports(be->p_info_nthw);\n+}\n+\n+static int get_nb_rx_ports(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_rx_ports(be->p_info_nthw);\n+}\n+\n+static int get_ltx_avail(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_ltx_avail(be->p_info_nthw);\n+}\n+\n+static int get_nb_cat_funcs(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_cat_funcs(be->p_info_nthw);\n+}\n+\n+static int get_nb_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_cat_km_if_cnt(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);\n+}\n+\n+static int get_nb_cat_km_if_m0(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);\n+}\n+\n+static int get_nb_cat_km_if_m1(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);\n+}\n+\n+static int get_nb_queues(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_queues(be->p_info_nthw);\n+}\n+\n+static int get_nb_km_flow_types(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_km_flow_types(be->p_info_nthw);\n+}\n+\n+static int get_nb_pm_ext(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_pm_ext(be->p_info_nthw);\n+}\n+\n+static int get_nb_len(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_len(be->p_info_nthw);\n+}\n+\n+static int get_kcc_size(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_kcc_size(be->p_info_nthw);\n+}\n+\n+static int get_kcc_banks(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_kcc_banks(be->p_info_nthw);\n+}\n+\n+static int get_nb_km_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_km_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_km_cam_banks(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_km_cam_banks(be->p_info_nthw);\n+}\n+\n+static int get_nb_km_cam_record_words(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);\n+}\n+\n+static int get_nb_km_cam_records(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_km_cam_records(be->p_info_nthw);\n+}\n+\n+static int get_nb_km_tcam_banks(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);\n+}\n+\n+static int get_nb_km_tcam_bank_width(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);\n+}\n+\n+static int get_nb_flm_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_flm_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_flm_size_mb(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_flm_size_mb(be->p_info_nthw);\n+}\n+\n+static int get_nb_flm_entry_size(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_flm_entry_size(be->p_info_nthw);\n+}\n+\n+static int get_nb_flm_variant(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_flm_variant(be->p_info_nthw);\n+}\n+\n+static int get_nb_flm_prios(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_flm_prios(be->p_info_nthw);\n+}\n+\n+static int get_nb_flm_pst_profiles(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);\n+}\n+\n+static int get_nb_hst_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_hst_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_qsl_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_qsl_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_qsl_qst_entries(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);\n+}\n+\n+static int get_nb_pdb_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_pdb_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_ioa_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_ioa_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_roa_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_roa_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_tpe_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_tpe_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_tx_cpy_writers(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);\n+}\n+\n+static int get_nb_tx_cpy_mask_mem(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);\n+}\n+\n+static int get_nb_tx_rpl_depth(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);\n+}\n+\n+static int get_nb_tx_rpl_ext_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);\n+}\n+\n+static int get_nb_tpe_ifr_categories(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);\n+}\n+\n+/*\n+ *  *****************  CAT  *******************\n+ */\n+\n+static bool cat_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_cat_nthw != NULL;\n+}\n+\n+static uint32_t cat_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_cat_nthw->m_cat) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_cat_nthw->m_cat) &\n+\t\t\t   0xffff));\n+}\n+\n+static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int cat_func, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18) {\n+\t\tr(be->p_cat_nthw, 1U);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_cfn_select(be->p_cat_nthw, cat_func);\n+\t\t\tcat_nthw_cfn_enable(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v18.cfn[cat_func].enable);\n+\t\t\tcat_nthw_cfn_inv(be->p_cat_nthw,\n+\t\t\t\t       cat->v18.cfn[cat_func].inv);\n+\t\t\tcat_nthw_cfn_ptc_inv(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v18.cfn[cat_func].ptc_inv);\n+\t\t\tcat_nthw_cfn_ptc_isl(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v18.cfn[cat_func].ptc_isl);\n+\t\t\tcat_nthw_cfn_ptc_cfp(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v18.cfn[cat_func].ptc_cfp);\n+\t\t\tcat_nthw_cfn_ptc_mac(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v18.cfn[cat_func].ptc_mac);\n+\t\t\tcat_nthw_cfn_ptc_l2(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].ptc_l2);\n+\t\t\tcat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v18.cfn[cat_func].ptc_vntag);\n+\t\t\tcat_nthw_cfn_ptc_vlan(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v18.cfn[cat_func].ptc_vlan);\n+\t\t\tcat_nthw_cfn_ptc_mpls(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v18.cfn[cat_func].ptc_mpls);\n+\t\t\tcat_nthw_cfn_ptc_l3(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].ptc_l3);\n+\t\t\tcat_nthw_cfn_ptc_frag(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v18.cfn[cat_func].ptc_frag);\n+\t\t\tcat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v18.cfn[cat_func].ptc_ip_prot);\n+\t\t\tcat_nthw_cfn_ptc_l4(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].ptc_l4);\n+\t\t\tcat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cfn[cat_func].ptc_tunnel);\n+\t\t\tcat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v18.cfn[cat_func].ptc_tnl_l2);\n+\t\t\tcat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,\n+\t\t\t\t\t\t  cat->v18.cfn[cat_func].ptc_tnl_vlan);\n+\t\t\tcat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,\n+\t\t\t\t\t\t  cat->v18.cfn[cat_func].ptc_tnl_mpls);\n+\t\t\tcat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v18.cfn[cat_func].ptc_tnl_l3);\n+\t\t\tcat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,\n+\t\t\t\t\t\t  cat->v18.cfn[cat_func].ptc_tnl_frag);\n+\t\t\tcat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,\n+\t\t\t\t\t\t     cat->v18.cfn[cat_func].ptc_tnl_ip_prot);\n+\t\t\tcat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v18.cfn[cat_func].ptc_tnl_l4);\n+\n+\t\t\tcat_nthw_cfn_err_inv(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v18.cfn[cat_func].err_inv);\n+\t\t\tcat_nthw_cfn_err_cv(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].err_cv);\n+\t\t\tcat_nthw_cfn_err_fcs(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v18.cfn[cat_func].err_fcs);\n+\t\t\tcat_nthw_cfn_err_trunc(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v18.cfn[cat_func].err_trunc);\n+\t\t\tcat_nthw_cfn_err_l3_cs(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v18.cfn[cat_func].err_l3_cs);\n+\t\t\tcat_nthw_cfn_err_l4_cs(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v18.cfn[cat_func].err_l4_cs);\n+\n+\t\t\tcat_nthw_cfn_mac_port(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v18.cfn[cat_func].mac_port);\n+\n+\t\t\tcat_nthw_cfn_pm_cmp(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].pm_cmp);\n+\t\t\tcat_nthw_cfn_pm_dct(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].pm_dct);\n+\t\t\tcat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v18.cfn[cat_func].pm_ext_inv);\n+\t\t\tcat_nthw_cfn_pm_cmb(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].pm_cmb);\n+\t\t\tcat_nthw_cfn_pm_and_inv(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v18.cfn[cat_func].pm_and_inv);\n+\t\t\tcat_nthw_cfn_pm_or_inv(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v18.cfn[cat_func].pm_or_inv);\n+\t\t\tcat_nthw_cfn_pm_inv(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].pm_inv);\n+\n+\t\t\tcat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);\n+\t\t\tcat_nthw_cfn_lc_inv(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].lc_inv);\n+\t\t\tcat_nthw_cfn_km0_or(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cfn[cat_func].km_or);\n+\t\t\tcat_nthw_cfn_flush(be->p_cat_nthw);\n+\t\t\tcat_func++;\n+\t\t}\n+\t} else if (cat->ver == 21 || cat->ver == 22) {\n+\t\tr(be->p_cat_nthw, 1U);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_cfn_select(be->p_cat_nthw, cat_func);\n+\t\t\tcat_nthw_cfn_enable(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v21.cfn[cat_func].enable);\n+\t\t\tcat_nthw_cfn_inv(be->p_cat_nthw,\n+\t\t\t\t       cat->v21.cfn[cat_func].inv);\n+\t\t\tcat_nthw_cfn_ptc_inv(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v21.cfn[cat_func].ptc_inv);\n+\t\t\tcat_nthw_cfn_ptc_isl(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v21.cfn[cat_func].ptc_isl);\n+\t\t\tcat_nthw_cfn_ptc_cfp(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v21.cfn[cat_func].ptc_cfp);\n+\t\t\tcat_nthw_cfn_ptc_mac(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v21.cfn[cat_func].ptc_mac);\n+\t\t\tcat_nthw_cfn_ptc_l2(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].ptc_l2);\n+\t\t\tcat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v21.cfn[cat_func].ptc_vntag);\n+\t\t\tcat_nthw_cfn_ptc_vlan(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v21.cfn[cat_func].ptc_vlan);\n+\t\t\tcat_nthw_cfn_ptc_mpls(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v21.cfn[cat_func].ptc_mpls);\n+\t\t\tcat_nthw_cfn_ptc_l3(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].ptc_l3);\n+\t\t\tcat_nthw_cfn_ptc_frag(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v21.cfn[cat_func].ptc_frag);\n+\t\t\tcat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,\n+\t\t\t\tcat->v21.cfn[cat_func].ptc_ip_prot);\n+\t\t\tcat_nthw_cfn_ptc_l4(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].ptc_l4);\n+\t\t\tcat_nthw_cfn_ptc_tunnel(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v21.cfn[cat_func].ptc_tunnel);\n+\t\t\tcat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v21.cfn[cat_func].ptc_tnl_l2);\n+\t\t\tcat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,\n+\t\t\t\t\t\t  cat->v21.cfn[cat_func].ptc_tnl_vlan);\n+\t\t\tcat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,\n+\t\t\t\t\t\t  cat->v21.cfn[cat_func].ptc_tnl_mpls);\n+\t\t\tcat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v21.cfn[cat_func].ptc_tnl_l3);\n+\t\t\tcat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,\n+\t\t\t\t\t\t  cat->v21.cfn[cat_func].ptc_tnl_frag);\n+\t\t\tcat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,\n+\t\t\t\t\t\t     cat->v21.cfn[cat_func].ptc_tnl_ip_prot);\n+\t\t\tcat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v21.cfn[cat_func].ptc_tnl_l4);\n+\n+\t\t\tcat_nthw_cfn_err_inv(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v21.cfn[cat_func].err_inv);\n+\t\t\tcat_nthw_cfn_err_cv(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].err_cv);\n+\t\t\tcat_nthw_cfn_err_fcs(be->p_cat_nthw,\n+\t\t\t\t\t  cat->v21.cfn[cat_func].err_fcs);\n+\t\t\tcat_nthw_cfn_err_trunc(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v21.cfn[cat_func].err_trunc);\n+\t\t\tcat_nthw_cfn_err_l3_cs(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v21.cfn[cat_func].err_l3_cs);\n+\t\t\tcat_nthw_cfn_err_l4_cs(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v21.cfn[cat_func].err_l4_cs);\n+\t\t\tcat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,\n+\t\t\t\t\t\t   cat->v21.cfn[cat_func].err_tnl_l3_cs);\n+\t\t\tcat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,\n+\t\t\t\t\t\t   cat->v21.cfn[cat_func].err_tnl_l4_cs);\n+\t\t\tcat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v21.cfn[cat_func].err_ttl_exp);\n+\t\t\tcat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,\n+\t\t\t\t\t\t     cat->v21.cfn[cat_func].err_tnl_ttl_exp);\n+\n+\t\t\tcat_nthw_cfn_mac_port(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v21.cfn[cat_func].mac_port);\n+\n+\t\t\tcat_nthw_cfn_pm_cmp(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].pm_cmp);\n+\t\t\tcat_nthw_cfn_pm_dct(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].pm_dct);\n+\t\t\tcat_nthw_cfn_pm_ext_inv(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v21.cfn[cat_func].pm_ext_inv);\n+\t\t\tcat_nthw_cfn_pm_cmb(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].pm_cmb);\n+\t\t\tcat_nthw_cfn_pm_and_inv(be->p_cat_nthw,\n+\t\t\t\t\t    cat->v21.cfn[cat_func].pm_and_inv);\n+\t\t\tcat_nthw_cfn_pm_or_inv(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v21.cfn[cat_func].pm_or_inv);\n+\t\t\tcat_nthw_cfn_pm_inv(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].pm_inv);\n+\n+\t\t\tcat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);\n+\t\t\tcat_nthw_cfn_lc_inv(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].lc_inv);\n+\t\t\tcat_nthw_cfn_km0_or(be->p_cat_nthw,\n+\t\t\t\t\t cat->v21.cfn[cat_func].km0_or);\n+\t\t\tif (be->p_cat_nthw->m_km_if_cnt > 1) {\n+\t\t\t\tcat_nthw_cfn_km1_or(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v21.cfn[cat_func].km1_or);\n+\t\t\t}\n+\t\t\tcat_nthw_cfn_flush(be->p_cat_nthw);\n+\t\t\tcat_func++;\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int km_if_idx, int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18) {\n+\t\tcat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_kce_select(be->p_cat_nthw, 0, index + i);\n+\t\t\tcat_nthw_kce_enable(be->p_cat_nthw, 0,\n+\t\t\t\t\t  cat->v18.kce[index + i].enable_bm);\n+\t\t\tcat_nthw_kce_flush(be->p_cat_nthw, 0);\n+\t\t}\n+\t} else if (cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);\n+\t\t\tcat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,\n+\t\t\t\t\t    cat->v21.kce[index + i].enable_bm[km_if_idx]);\n+\t\t\tcat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int km_if_idx, int cat_func, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18) {\n+\t\tcat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);\n+\t\t\tcat_nthw_kcs_category(be->p_cat_nthw, 0,\n+\t\t\t\t\t    cat->v18.kcs[cat_func].category);\n+\t\t\tcat_nthw_kcs_flush(be->p_cat_nthw, 0);\n+\t\t\tcat_func++;\n+\t\t}\n+\t} else if (cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);\n+\t\t\tcat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,\n+\t\t\t\t\t      cat->v21.kcs[cat_func].category[km_if_idx]);\n+\t\t\tcat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);\n+\t\t\tcat_func++;\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int km_if_idx, int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18) {\n+\t\tcat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_fte_select(be->p_cat_nthw, 0, index + i);\n+\t\t\tcat_nthw_fte_enable(be->p_cat_nthw, 0,\n+\t\t\t\t\t  cat->v18.fte[index + i].enable_bm);\n+\t\t\tcat_nthw_fte_flush(be->p_cat_nthw, 0);\n+\t\t}\n+\t} else if (cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);\n+\t\t\tcat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,\n+\t\t\t\t\t    cat->v21.fte[index + i].enable_bm[km_if_idx]);\n+\t\t\tcat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int cat_func, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18 || cat->ver == 21) {\n+\t\tcat_nthw_cte_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_cte_select(be->p_cat_nthw, cat_func);\n+\t\t\tcat_nthw_cte_enable_col(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.col);\n+\t\t\tcat_nthw_cte_enable_cor(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.cor);\n+\t\t\tcat_nthw_cte_enable_hsh(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.hsh);\n+\t\t\tcat_nthw_cte_enable_qsl(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.qsl);\n+\t\t\tcat_nthw_cte_enable_ipf(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.ipf);\n+\t\t\tcat_nthw_cte_enable_slc(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.slc);\n+\t\t\tcat_nthw_cte_enable_pdb(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.pdb);\n+\t\t\tcat_nthw_cte_enable_msk(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.msk);\n+\t\t\tcat_nthw_cte_enable_hst(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.hst);\n+\t\t\tcat_nthw_cte_enable_epp(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.epp);\n+\t\t\tcat_nthw_cte_enable_tpe(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v18.cte[cat_func].b.tpe);\n+\n+\t\t\tcat_nthw_cte_flush(be->p_cat_nthw);\n+\t\t\tcat_func++;\n+\t\t}\n+\t} else if (cat->ver == 22) {\n+\t\tcat_nthw_cte_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_cte_select(be->p_cat_nthw, cat_func);\n+\t\t\tcat_nthw_cte_enable_col(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.col);\n+\t\t\tcat_nthw_cte_enable_cor(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.cor);\n+\t\t\tcat_nthw_cte_enable_hsh(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.hsh);\n+\t\t\tcat_nthw_cte_enable_qsl(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.qsl);\n+\t\t\tcat_nthw_cte_enable_ipf(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.ipf);\n+\t\t\tcat_nthw_cte_enable_slc(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.slc);\n+\t\t\tcat_nthw_cte_enable_pdb(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.pdb);\n+\t\t\tcat_nthw_cte_enable_msk(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.msk);\n+\t\t\tcat_nthw_cte_enable_hst(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.hst);\n+\t\t\tcat_nthw_cte_enable_epp(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.epp);\n+\t\t\tcat_nthw_cte_enable_tpe(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.tpe);\n+\t\t\tcat_nthw_cte_enable_tpe(be->p_cat_nthw,\n+\t\t\t\t\t     cat->v22.cte[cat_func].b.rrb);\n+\n+\t\t\tcat_nthw_cte_flush(be->p_cat_nthw);\n+\t\t\tcat_func++;\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index,\n+\t\t\t int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_cts_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_cts_select(be->p_cat_nthw, index + i);\n+\t\t\tcat_nthw_cts_cat_a(be->p_cat_nthw,\n+\t\t\t\t\tcat->v18.cts[index + i].cat_a);\n+\t\t\tcat_nthw_cts_cat_b(be->p_cat_nthw,\n+\t\t\t\t\tcat->v18.cts[index + i].cat_b);\n+\t\t\tcat_nthw_cts_flush(be->p_cat_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int cat_func, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_cot_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_cot_select(be->p_cat_nthw, cat_func + i);\n+\t\t\tcat_nthw_cot_color(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cot[cat_func + i].color);\n+\t\t\tcat_nthw_cot_km(be->p_cat_nthw,\n+\t\t\t\t      cat->v18.cot[cat_func + i].km);\n+\t\t\tcat_nthw_cot_flush(be->p_cat_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index,\n+\t\t\t int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_cct_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_cct_select(be->p_cat_nthw, index + i);\n+\t\t\tcat_nthw_cct_color(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.cct[index + i].color);\n+\t\t\tcat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);\n+\t\t\tcat_nthw_cct_flush(be->p_cat_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int ext_index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_exo_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_exo_select(be->p_cat_nthw, ext_index + i);\n+\t\t\tcat_nthw_exo_dyn(be->p_cat_nthw,\n+\t\t\t\t       cat->v18.exo[ext_index + i].dyn);\n+\t\t\tcat_nthw_exo_ofs(be->p_cat_nthw,\n+\t\t\t\t       cat->v18.exo[ext_index + i].ofs);\n+\t\t\tcat_nthw_exo_flush(be->p_cat_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index,\n+\t\t\t int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_rck_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_rck_select(be->p_cat_nthw, index + i);\n+\t\t\tcat_nthw_rck_data(be->p_cat_nthw,\n+\t\t\t\t\tcat->v18.rck[index + i].rck_data);\n+\t\t\tcat_nthw_rck_flush(be->p_cat_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_len_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int len_index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_len_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_len_select(be->p_cat_nthw, len_index + i);\n+\t\t\tcat_nthw_len_lower(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.len[len_index + i].lower);\n+\t\t\tcat_nthw_len_upper(be->p_cat_nthw,\n+\t\t\t\t\t cat->v18.len[len_index + i].upper);\n+\t\t\tcat_nthw_len_dyn1(be->p_cat_nthw,\n+\t\t\t\t\tcat->v18.len[len_index + i].dyn1);\n+\t\t\tcat_nthw_len_dyn2(be->p_cat_nthw,\n+\t\t\t\t\tcat->v18.len[len_index + i].dyn2);\n+\t\t\tcat_nthw_len_inv(be->p_cat_nthw,\n+\t\t\t\t       cat->v18.len[len_index + i].inv);\n+\t\t\tcat_nthw_len_flush(be->p_cat_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int len_index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 18 || cat->ver == 21 || cat->ver == 22) {\n+\t\tcat_nthw_kcc_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_kcc_select(be->p_cat_nthw, len_index + i);\n+\t\t\tcat_nthw_kcc_key(be->p_cat_nthw,\n+\t\t\t\t       cat->v18.kcc_cam[len_index + i].key);\n+\t\t\tcat_nthw_kcc_category(be->p_cat_nthw,\n+\t\t\t\t\t      cat->v18.kcc_cam[len_index + i].category);\n+\t\t\tcat_nthw_kcc_id(be->p_cat_nthw,\n+\t\t\t\t      cat->v18.kcc_cam[len_index + i].id);\n+\t\t\tcat_nthw_kcc_flush(be->p_cat_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_cce_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int len_index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 22) {\n+\t\tcat_nthw_cce_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_cce_select(be->p_cat_nthw, len_index + i);\n+\t\t\tcat_nthw_cce_data_imm(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.cce[len_index + i].imm);\n+\t\t\tcat_nthw_cce_data_ind(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.cce[len_index + i].ind);\n+\t\t\tcat_nthw_cce_flush(be->p_cat_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+static int cat_ccs_flush(void *be_dev, const struct cat_func_s *cat,\n+\t\t\t int len_index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);\n+\n+\tif (cat->ver == 22) {\n+\t\tcat_nthw_ccs_cnt(be->p_cat_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcat_nthw_ccs_select(be->p_cat_nthw, len_index + i);\n+\t\t\tcat_nthw_ccs_data_cor_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].cor_en);\n+\t\t\tcat_nthw_ccs_data_cor(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].cor);\n+\t\t\tcat_nthw_ccs_data_hsh_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].hsh_en);\n+\t\t\tcat_nthw_ccs_data_hsh(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].hsh);\n+\t\t\tcat_nthw_ccs_data_qsl_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].qsl_en);\n+\t\t\tcat_nthw_ccs_data_qsl(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].qsl);\n+\t\t\tcat_nthw_ccs_data_ipf_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].ipf_en);\n+\t\t\tcat_nthw_ccs_data_ipf(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].ipf);\n+\t\t\tcat_nthw_ccs_data_slc_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].slc_en);\n+\t\t\tcat_nthw_ccs_data_slc(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].slc);\n+\t\t\tcat_nthw_ccs_data_pdb_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].pdb_en);\n+\t\t\tcat_nthw_ccs_data_pdb(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].pdb);\n+\t\t\tcat_nthw_ccs_data_msk_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].msk_en);\n+\t\t\tcat_nthw_ccs_data_msk(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].msk);\n+\t\t\tcat_nthw_ccs_data_hst_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].hst_en);\n+\t\t\tcat_nthw_ccs_data_hst(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].hst);\n+\t\t\tcat_nthw_ccs_data_epp_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].epp_en);\n+\t\t\tcat_nthw_ccs_data_epp(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].epp);\n+\t\t\tcat_nthw_ccs_data_tpe_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].tpe_en);\n+\t\t\tcat_nthw_ccs_data_tpe(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].tpe);\n+\t\t\tcat_nthw_ccs_data_rrb_en(be->p_cat_nthw,\n+\t\t\t\t\t\t cat->v22.ccs[len_index + i].rrb_en);\n+\t\t\tcat_nthw_ccs_data_rrb(be->p_cat_nthw,\n+\t\t\t\t\t   cat->v22.ccs[len_index + i].rrb);\n+\t\t\tcat_nthw_ccs_data_sb0_type(be->p_cat_nthw,\n+\t\t\t\t\t\t   cat->v22.ccs[len_index + i].sb0_type);\n+\t\t\tcat_nthw_ccs_data_sb0_data(be->p_cat_nthw,\n+\t\t\t\t\t\t   cat->v22.ccs[len_index + i].sb0_data);\n+\t\t\tcat_nthw_ccs_data_sb1_type(be->p_cat_nthw,\n+\t\t\t\t\t\t   cat->v22.ccs[len_index + i].sb1_type);\n+\t\t\tcat_nthw_ccs_data_sb1_data(be->p_cat_nthw,\n+\t\t\t\t\t\t   cat->v22.ccs[len_index + i].sb1_data);\n+\t\t\tcat_nthw_ccs_data_sb2_type(be->p_cat_nthw,\n+\t\t\t\t\t\t   cat->v22.ccs[len_index + i].sb2_type);\n+\t\t\tcat_nthw_ccs_data_sb2_data(be->p_cat_nthw,\n+\t\t\t\t\t\t   cat->v22.ccs[len_index + i].sb2_data);\n+\t\t\tcat_nthw_ccs_flush(be->p_cat_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(cat, be->p_cat_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  KM  *******************\n+ */\n+\n+static bool km_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_km_nthw != NULL;\n+}\n+\n+static uint32_t km_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_km_nthw->m_km) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));\n+}\n+\n+static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category,\n+\t\t\tint cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, km, be->p_km_nthw);\n+\n+\tif (km->ver == 7) {\n+\t\tkm_nthw_rcp_cnt(be->p_km_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tkm_nthw_rcp_select(be->p_km_nthw, category + i);\n+\t\t\tkm_nthw_rcp_qw0_dyn(be->p_km_nthw,\n+\t\t\t\t\t km->v7.rcp[category + i].qw0_dyn);\n+\t\t\tkm_nthw_rcp_qw0_ofs(be->p_km_nthw,\n+\t\t\t\t\t km->v7.rcp[category + i].qw0_ofs);\n+\t\t\tkm_nthw_rcp_qw0_sel_a(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].qw0_sel_a);\n+\t\t\tkm_nthw_rcp_qw0_sel_b(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].qw0_sel_b);\n+\t\t\tkm_nthw_rcp_qw4_dyn(be->p_km_nthw,\n+\t\t\t\t\t km->v7.rcp[category + i].qw4_dyn);\n+\t\t\tkm_nthw_rcp_qw4_ofs(be->p_km_nthw,\n+\t\t\t\t\t km->v7.rcp[category + i].qw4_ofs);\n+\t\t\tkm_nthw_rcp_qw4_sel_a(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].qw4_sel_a);\n+\t\t\tkm_nthw_rcp_qw4_sel_b(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].qw4_sel_b);\n+\t\t\tkm_nthw_rcp_dw8_dyn(be->p_km_nthw,\n+\t\t\t\t\t km->v7.rcp[category + i].dw8_dyn);\n+\t\t\tkm_nthw_rcp_dw8_ofs(be->p_km_nthw,\n+\t\t\t\t\t km->v7.rcp[category + i].dw8_ofs);\n+\t\t\tkm_nthw_rcp_dw8_sel_a(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].dw8_sel_a);\n+\t\t\tkm_nthw_rcp_dw8_sel_b(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].dw8_sel_b);\n+\t\t\tkm_nthw_rcp_dw10_dyn(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].dw10_dyn);\n+\t\t\tkm_nthw_rcp_dw10_ofs(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].dw10_ofs);\n+\t\t\tkm_nthw_rcp_dw10_sel_a(be->p_km_nthw,\n+\t\t\t\t\t   km->v7.rcp[category + i].dw10_sel_a);\n+\t\t\tkm_nthw_rcp_dw10_sel_b(be->p_km_nthw,\n+\t\t\t\t\t   km->v7.rcp[category + i].dw10_sel_b);\n+\t\t\tkm_nthw_rcp_swx_cch(be->p_km_nthw,\n+\t\t\t\t\t km->v7.rcp[category + i].swx_cch);\n+\t\t\tkm_nthw_rcp_swx_sel_a(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].swx_sel_a);\n+\t\t\tkm_nthw_rcp_swx_sel_b(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].swx_sel_b);\n+\t\t\tkm_nthw_rcp_mask_d_a(be->p_km_nthw,\n+\t\t\t\t\t km->v7.rcp[category + i].mask_d_a);\n+\t\t\tkm_nthw_rcp_mask_b(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.rcp[category + i].mask_b);\n+\t\t\tkm_nthw_rcp_dual(be->p_km_nthw,\n+\t\t\t\t       km->v7.rcp[category + i].dual);\n+\t\t\tkm_nthw_rcp_paired(be->p_km_nthw,\n+\t\t\t\t\t km->v7.rcp[category + i].paired);\n+\t\t\tkm_nthw_rcp_el_a(be->p_km_nthw,\n+\t\t\t\t      km->v7.rcp[category + i].el_a);\n+\t\t\tkm_nthw_rcp_el_b(be->p_km_nthw,\n+\t\t\t\t      km->v7.rcp[category + i].el_b);\n+\t\t\tkm_nthw_rcp_info_a(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.rcp[category + i].info_a);\n+\t\t\tkm_nthw_rcp_info_b(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.rcp[category + i].info_b);\n+\t\t\tkm_nthw_rcp_ftm_a(be->p_km_nthw,\n+\t\t\t\t       km->v7.rcp[category + i].ftm_a);\n+\t\t\tkm_nthw_rcp_ftm_b(be->p_km_nthw,\n+\t\t\t\t       km->v7.rcp[category + i].ftm_b);\n+\t\t\tkm_nthw_rcp_bank_a(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.rcp[category + i].bank_a);\n+\t\t\tkm_nthw_rcp_bank_b(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.rcp[category + i].bank_b);\n+\t\t\tkm_nthw_rcp_kl_a(be->p_km_nthw,\n+\t\t\t\t      km->v7.rcp[category + i].kl_a);\n+\t\t\tkm_nthw_rcp_kl_b(be->p_km_nthw,\n+\t\t\t\t      km->v7.rcp[category + i].kl_b);\n+\t\t\tkm_nthw_rcp_keyway_a(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].keyway_a);\n+\t\t\tkm_nthw_rcp_keyway_b(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].keyway_b);\n+\t\t\tkm_nthw_rcp_synergy_mode(be->p_km_nthw,\n+\t\t\t\t\t\t km->v7.rcp[category + i].synergy_mode);\n+\t\t\tkm_nthw_rcp_dw0_b_dyn(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].dw0_b_dyn);\n+\t\t\tkm_nthw_rcp_dw0_b_ofs(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].dw0_b_ofs);\n+\t\t\tkm_nthw_rcp_dw2_b_dyn(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].dw2_b_dyn);\n+\t\t\tkm_nthw_rcp_dw2_b_ofs(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].dw2_b_ofs);\n+\t\t\tkm_nthw_rcp_sw4_b_dyn(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].sw4_b_dyn);\n+\t\t\tkm_nthw_rcp_sw4_b_ofs(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].sw4_b_ofs);\n+\t\t\tkm_nthw_rcp_sw5_b_dyn(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].sw5_b_dyn);\n+\t\t\tkm_nthw_rcp_sw5_b_ofs(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.rcp[category + i].sw5_b_ofs);\n+\t\t\tkm_nthw_rcp_flush(be->p_km_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(km, be->p_km_nthw);\n+\treturn 0;\n+}\n+\n+static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank,\n+\t\t\tint record, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, km, be->p_km_nthw);\n+\n+\tif (km->ver == 7) {\n+\t\tkm_nthw_cam_cnt(be->p_km_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tkm_nthw_cam_select(be->p_km_nthw,\n+\t\t\t\t\t (bank << 11) + record + i);\n+\t\t\tkm_nthw_cam_w0(be->p_km_nthw,\n+\t\t\t\t     km->v7.cam[(bank << 11) + record + i].w0);\n+\t\t\tkm_nthw_cam_w1(be->p_km_nthw,\n+\t\t\t\t     km->v7.cam[(bank << 11) + record + i].w1);\n+\t\t\tkm_nthw_cam_w2(be->p_km_nthw,\n+\t\t\t\t     km->v7.cam[(bank << 11) + record + i].w2);\n+\t\t\tkm_nthw_cam_w3(be->p_km_nthw,\n+\t\t\t\t     km->v7.cam[(bank << 11) + record + i].w3);\n+\t\t\tkm_nthw_cam_w4(be->p_km_nthw,\n+\t\t\t\t     km->v7.cam[(bank << 11) + record + i].w4);\n+\t\t\tkm_nthw_cam_w5(be->p_km_nthw,\n+\t\t\t\t     km->v7.cam[(bank << 11) + record + i].w5);\n+\t\t\tkm_nthw_cam_ft0(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.cam[(bank << 11) + record + i].ft0);\n+\t\t\tkm_nthw_cam_ft1(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.cam[(bank << 11) + record + i].ft1);\n+\t\t\tkm_nthw_cam_ft2(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.cam[(bank << 11) + record + i].ft2);\n+\t\t\tkm_nthw_cam_ft3(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.cam[(bank << 11) + record + i].ft3);\n+\t\t\tkm_nthw_cam_ft4(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.cam[(bank << 11) + record + i].ft4);\n+\t\t\tkm_nthw_cam_ft5(be->p_km_nthw,\n+\t\t\t\t\tkm->v7.cam[(bank << 11) + record + i].ft5);\n+\t\t\tkm_nthw_cam_flush(be->p_km_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(km, be->p_km_nthw);\n+\treturn 0;\n+}\n+\n+static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank,\n+\t\t\t int byte, int value, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, km, be->p_km_nthw);\n+\n+\tif (km->ver == 7) {\n+\t\tint start_idx = bank * 4 * 256 + byte * 256 + value;\n+\n+\t\tkm_nthw_tcam_cnt(be->p_km_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tif (km->v7.tcam[start_idx + i].dirty) {\n+\t\t\t\tkm_nthw_tcam_select(be->p_km_nthw, start_idx + i);\n+\t\t\t\tkm_nthw_tcam_t(be->p_km_nthw,\n+\t\t\t\t\t     km->v7.tcam[start_idx + i].t);\n+\t\t\t\tkm_nthw_tcam_flush(be->p_km_nthw);\n+\t\t\t\tkm->v7.tcam[start_idx + i].dirty = 0;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(km, be->p_km_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ * bank is the TCAM bank, index is the index within the bank (0..71)\n+ */\n+static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank,\n+\t\t\tint index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, km, be->p_km_nthw);\n+\n+\tif (km->ver == 7) {\n+\t\t/* TCAM bank width in version 3 = 72 */\n+\t\tkm_nthw_tci_cnt(be->p_km_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tkm_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);\n+\t\t\tkm_nthw_tci_color(be->p_km_nthw,\n+\t\t\t\t\t  km->v7.tci[bank * 72 + index + i].color);\n+\t\t\tkm_nthw_tci_ft(be->p_km_nthw,\n+\t\t\t\t     km->v7.tci[bank * 72 + index + i].ft);\n+\t\t\tkm_nthw_tci_flush(be->p_km_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(km, be->p_km_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ * bank is the TCAM bank, index is the index within the bank (0..71)\n+ */\n+static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank,\n+\t\t\tint index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, km, be->p_km_nthw);\n+\n+\tif (km->ver == 7) {\n+\t\t/* TCAM bank width in version 3 = 72 */\n+\t\tkm_nthw_tcq_cnt(be->p_km_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\t/* adr = lover 4 bits = bank, upper 7 bits = index */\n+\t\t\tkm_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);\n+\t\t\tkm_nthw_tcq_bank_mask(be->p_km_nthw,\n+\t\t\t\t\t      km->v7.tcq[bank + (index << 4) + i].bank_mask);\n+\t\t\tkm_nthw_tcq_qual(be->p_km_nthw,\n+\t\t\t\t\t km->v7.tcq[bank + (index << 4) + i].qual);\n+\t\t\tkm_nthw_tcq_flush(be->p_km_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(km, be->p_km_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  FLM  *******************\n+ */\n+\n+static bool flm_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_flm_nthw != NULL;\n+}\n+\n+static uint32_t flm_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_flm_nthw->m_flm) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_flm_nthw->m_flm) &\n+\t\t\t   0xffff));\n+}\n+\n+static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_control_enable(be->p_flm_nthw, flm->v17.control->enable);\n+\t\tflm_nthw_control_init(be->p_flm_nthw, flm->v17.control->init);\n+\t\tflm_nthw_control_lds(be->p_flm_nthw, flm->v17.control->lds);\n+\t\tflm_nthw_control_lfs(be->p_flm_nthw, flm->v17.control->lfs);\n+\t\tflm_nthw_control_lis(be->p_flm_nthw, flm->v17.control->lis);\n+\t\tflm_nthw_control_uds(be->p_flm_nthw, flm->v17.control->uds);\n+\t\tflm_nthw_control_uis(be->p_flm_nthw, flm->v17.control->uis);\n+\t\tflm_nthw_control_rds(be->p_flm_nthw, flm->v17.control->rds);\n+\t\tflm_nthw_control_ris(be->p_flm_nthw, flm->v17.control->ris);\n+\t\tflm_nthw_control_pds(be->p_flm_nthw, flm->v17.control->pds);\n+\t\tflm_nthw_control_pis(be->p_flm_nthw, flm->v17.control->pis);\n+\t\tflm_nthw_control_crcwr(be->p_flm_nthw, flm->v17.control->crcwr);\n+\t\tflm_nthw_control_crcrd(be->p_flm_nthw, flm->v17.control->crcrd);\n+\t\tflm_nthw_control_rbl(be->p_flm_nthw, flm->v17.control->rbl);\n+\t\tflm_nthw_control_eab(be->p_flm_nthw, flm->v17.control->eab);\n+\t\tflm_nthw_control_split_sdram_usage(be->p_flm_nthw,\n+\t\t\t\t\t\t   flm->v17.control->split_sdram_usage);\n+\t\tflm_nthw_control_flush(be->p_flm_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\t/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */\n+\t\tflm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,\n+\t\t\t\t       0);\n+\t\tflm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 0);\n+\t\tflm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 0);\n+\t\tflm_nthw_status_flush(be->p_flm_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_status_update(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_status_update(be->p_flm_nthw);\n+\t\tflm_nthw_status_calibdone(be->p_flm_nthw,\n+\t\t\t\t\t&flm->v17.status->calibdone, 1);\n+\t\tflm_nthw_status_initdone(be->p_flm_nthw, &flm->v17.status->initdone,\n+\t\t\t\t       1);\n+\t\tflm_nthw_status_idle(be->p_flm_nthw, &flm->v17.status->idle, 1);\n+\t\tflm_nthw_status_critical(be->p_flm_nthw, &flm->v17.status->critical,\n+\t\t\t\t       1);\n+\t\tflm_nthw_status_panic(be->p_flm_nthw, &flm->v17.status->panic, 1);\n+\t\tflm_nthw_status_crcerr(be->p_flm_nthw, &flm->v17.status->crcerr, 1);\n+\t\tflm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v17.status->eft_bp, 1);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_timeout_flush(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_timeout_t(be->p_flm_nthw, flm->v17.timeout->t);\n+\t\tflm_nthw_timeout_flush(be->p_flm_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_scrub_i(be->p_flm_nthw, flm->v17.scrub->i);\n+\t\tflm_nthw_scrub_flush(be->p_flm_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_load_bin(be->p_flm_nthw, flm->v17.load_bin->bin);\n+\t\tflm_nthw_load_bin_flush(be->p_flm_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_load_pps_flush(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_load_pps(be->p_flm_nthw, flm->v17.load_pps->pps);\n+\t\tflm_nthw_load_pps_flush(be->p_flm_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_load_lps_flush(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_load_lps(be->p_flm_nthw, flm->v17.load_lps->lps);\n+\t\tflm_nthw_load_lps_flush(be->p_flm_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_load_aps_flush(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_load_aps(be->p_flm_nthw, flm->v17.load_aps->aps);\n+\t\tflm_nthw_load_aps_flush(be->p_flm_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_prio_limit0(be->p_flm_nthw, flm->v17.prio->limit0);\n+\t\tflm_nthw_prio_ft0(be->p_flm_nthw, flm->v17.prio->ft0);\n+\t\tflm_nthw_prio_limit1(be->p_flm_nthw, flm->v17.prio->limit1);\n+\t\tflm_nthw_prio_ft1(be->p_flm_nthw, flm->v17.prio->ft1);\n+\t\tflm_nthw_prio_limit2(be->p_flm_nthw, flm->v17.prio->limit2);\n+\t\tflm_nthw_prio_ft2(be->p_flm_nthw, flm->v17.prio->ft2);\n+\t\tflm_nthw_prio_limit3(be->p_flm_nthw, flm->v17.prio->limit3);\n+\t\tflm_nthw_prio_ft3(be->p_flm_nthw, flm->v17.prio->ft3);\n+\t\tflm_nthw_prio_flush(be->p_flm_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index,\n+\t\t\t int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_pst_cnt(be->p_flm_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tflm_nthw_pst_select(be->p_flm_nthw, index + i);\n+\t\t\tflm_nthw_pst_bp(be->p_flm_nthw, flm->v17.pst[index + i].bp);\n+\t\t\tflm_nthw_pst_pp(be->p_flm_nthw, flm->v17.pst[index + i].pp);\n+\t\t\tflm_nthw_pst_tp(be->p_flm_nthw, flm->v17.pst[index + i].tp);\n+\t\t\tflm_nthw_pst_flush(be->p_flm_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index,\n+\t\t\t int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_rcp_cnt(be->p_flm_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tflm_nthw_rcp_select(be->p_flm_nthw, index + i);\n+\t\t\tflm_nthw_rcp_lookup(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].lookup);\n+\t\t\tflm_nthw_rcp_qw0_dyn(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].qw0_dyn);\n+\t\t\tflm_nthw_rcp_qw0_ofs(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].qw0_ofs);\n+\t\t\tflm_nthw_rcp_qw0_sel(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].qw0_sel);\n+\t\t\tflm_nthw_rcp_qw4_dyn(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].qw4_dyn);\n+\t\t\tflm_nthw_rcp_qw4_ofs(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].qw4_ofs);\n+\t\t\tflm_nthw_rcp_sw8_dyn(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].sw8_dyn);\n+\t\t\tflm_nthw_rcp_sw8_ofs(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].sw8_ofs);\n+\t\t\tflm_nthw_rcp_sw8_sel(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].sw8_sel);\n+\t\t\tflm_nthw_rcp_sw9_dyn(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].sw9_dyn);\n+\t\t\tflm_nthw_rcp_sw9_ofs(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].sw9_ofs);\n+\t\t\tflm_nthw_rcp_mask(be->p_flm_nthw,\n+\t\t\t\t\tflm->v17.rcp[index + i].mask);\n+\t\t\tflm_nthw_rcp_kid(be->p_flm_nthw,\n+\t\t\t\t       flm->v17.rcp[index + i].kid);\n+\t\t\tflm_nthw_rcp_opn(be->p_flm_nthw,\n+\t\t\t\t       flm->v17.rcp[index + i].opn);\n+\t\t\tflm_nthw_rcp_ipn(be->p_flm_nthw,\n+\t\t\t\t       flm->v17.rcp[index + i].ipn);\n+\t\t\tflm_nthw_rcp_byt_dyn(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].byt_dyn);\n+\t\t\tflm_nthw_rcp_byt_ofs(be->p_flm_nthw,\n+\t\t\t\t\t  flm->v17.rcp[index + i].byt_ofs);\n+\t\t\tflm_nthw_rcp_txplm(be->p_flm_nthw,\n+\t\t\t\t\t flm->v17.rcp[index + i].txplm);\n+\t\t\tflm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,\n+\t\t\t\t\t\t    flm->v17.rcp[index + i].auto_ipv4_mask);\n+\t\t\tflm_nthw_rcp_flush(be->p_flm_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_buf_ctrl_update(be->p_flm_nthw,\n+\t\t\t\t      &flm->v17.buf_ctrl->lrn_free,\n+\t\t\t\t      &flm->v17.buf_ctrl->inf_avail,\n+\t\t\t\t      &flm->v17.buf_ctrl->sta_avail);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tif (flm->ver >= 17) {\n+\t\tflm_nthw_stat_lrn_done_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_lrn_fail_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_unl_done_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_unl_ignore_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_rel_done_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_rel_ignore_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_aul_done_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_aul_ignore_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_aul_fail_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_tul_done_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_flows_update(be->p_flm_nthw);\n+\n+\t\tflm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v17.lrn_done->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw,\n+\t\t\t\t\t &flm->v17.lrn_ignore->cnt, 1);\n+\t\tflm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v17.lrn_fail->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v17.unl_done->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw,\n+\t\t\t\t\t &flm->v17.unl_ignore->cnt, 1);\n+\t\tflm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v17.rel_done->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw,\n+\t\t\t\t\t &flm->v17.rel_ignore->cnt, 1);\n+\t\tflm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v17.aul_done->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw,\n+\t\t\t\t\t &flm->v17.aul_ignore->cnt, 1);\n+\t\tflm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v17.aul_fail->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v17.tul_done->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v17.flows->cnt, 1);\n+\n+\t\tflm_nthw_stat_prb_done_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_prb_ignore_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v17.prb_done->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw,\n+\t\t\t\t\t &flm->v17.prb_ignore->cnt, 1);\n+\t}\n+\tif (flm->ver >= 20) {\n+\t\tflm_nthw_stat_sta_done_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_inf_done_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_inf_skip_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_pck_hit_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_pck_miss_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_pck_unh_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_pck_dis_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_csh_hit_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_csh_miss_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_csh_unh_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_cuc_start_update(be->p_flm_nthw);\n+\t\tflm_nthw_stat_cuc_move_update(be->p_flm_nthw);\n+\n+\t\tflm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v20.sta_done->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v20.inf_done->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v20.inf_skip->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v20.pck_hit->cnt, 1);\n+\t\tflm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v20.pck_miss->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v20.pck_unh->cnt, 1);\n+\t\tflm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v20.pck_dis->cnt, 1);\n+\t\tflm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v20.csh_hit->cnt, 1);\n+\t\tflm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v20.csh_miss->cnt,\n+\t\t\t\t       1);\n+\t\tflm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v20.csh_unh->cnt, 1);\n+\t\tflm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v20.cuc_start->cnt,\n+\t\t\t\t\t1);\n+\t\tflm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v20.cuc_move->cnt,\n+\t\t\t\t       1);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn 0;\n+}\n+\n+static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm,\n+\t\t\t      const uint32_t *lrn_data, uint32_t size)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tint ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, size,\n+\t\t\t\t       &flm->v17.buf_ctrl->lrn_free,\n+\t\t\t\t       &flm->v17.buf_ctrl->inf_avail,\n+\t\t\t\t       &flm->v17.buf_ctrl->sta_avail);\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn ret;\n+}\n+\n+static int flm_inf_data_update(void *be_dev, const struct flm_func_s *flm,\n+\t\t\t       uint32_t *inf_data, uint32_t size)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tint ret = flm_nthw_inf_data_update(be->p_flm_nthw, inf_data, size,\n+\t\t\t\t\t&flm->v17.buf_ctrl->lrn_free,\n+\t\t\t\t\t&flm->v17.buf_ctrl->inf_avail,\n+\t\t\t\t\t&flm->v17.buf_ctrl->sta_avail);\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn ret;\n+}\n+\n+static int flm_sta_data_update(void *be_dev, const struct flm_func_s *flm,\n+\t\t\t       uint32_t *sta_data, uint32_t size)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);\n+\n+\tint ret = flm_nthw_sta_data_update(be->p_flm_nthw, sta_data, size,\n+\t\t\t\t\t&flm->v17.buf_ctrl->lrn_free,\n+\t\t\t\t\t&flm->v17.buf_ctrl->inf_avail,\n+\t\t\t\t\t&flm->v17.buf_ctrl->sta_avail);\n+\n+\t_CHECK_DEBUG_OFF(flm, be->p_flm_nthw);\n+\treturn ret;\n+}\n+\n+/*\n+ *  *****************  HSH  *******************\n+ */\n+\n+static bool hsh_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_hsh_nthw != NULL;\n+}\n+\n+static uint32_t hsh_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_hsh_nthw->m_hsh) &\n+\t\t\t   0xffff));\n+}\n+\n+static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh,\n+\t\t\t int category, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);\n+\n+\tif (hsh->ver == 5) {\n+\t\thsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\thsh_nthw_rcp_select(be->p_hsh_nthw, category + i);\n+\t\t\thsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,\n+\t\t\t\t\t\t    hsh->v5.rcp[category + i].load_dist_type);\n+\t\t\thsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,\n+\t\t\t\t\t\t   hsh->v5.rcp[category + i].mac_port_mask);\n+\t\t\thsh_nthw_rcp_sort(be->p_hsh_nthw,\n+\t\t\t\t\thsh->v5.rcp[category + i].sort);\n+\t\t\thsh_nthw_rcp_qw0_pe(be->p_hsh_nthw,\n+\t\t\t\t\t hsh->v5.rcp[category + i].qw0_pe);\n+\t\t\thsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw,\n+\t\t\t\t\t  hsh->v5.rcp[category + i].qw0_ofs);\n+\t\t\thsh_nthw_rcp_qw4_pe(be->p_hsh_nthw,\n+\t\t\t\t\t hsh->v5.rcp[category + i].qw4_pe);\n+\t\t\thsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw,\n+\t\t\t\t\t  hsh->v5.rcp[category + i].qw4_ofs);\n+\t\t\thsh_nthw_rcp_w8_pe(be->p_hsh_nthw,\n+\t\t\t\t\thsh->v5.rcp[category + i].w8_pe);\n+\t\t\thsh_nthw_rcp_w8_ofs(be->p_hsh_nthw,\n+\t\t\t\t\t hsh->v5.rcp[category + i].w8_ofs);\n+\t\t\thsh_nthw_rcp_w8_sort(be->p_hsh_nthw,\n+\t\t\t\t\t  hsh->v5.rcp[category + i].w8_sort);\n+\t\t\thsh_nthw_rcp_w9_pe(be->p_hsh_nthw,\n+\t\t\t\t\thsh->v5.rcp[category + i].w9_pe);\n+\t\t\thsh_nthw_rcp_w9_ofs(be->p_hsh_nthw,\n+\t\t\t\t\t hsh->v5.rcp[category + i].w9_ofs);\n+\t\t\thsh_nthw_rcp_w9_sort(be->p_hsh_nthw,\n+\t\t\t\t\t  hsh->v5.rcp[category + i].w9_sort);\n+\t\t\thsh_nthw_rcp_w9_p(be->p_hsh_nthw,\n+\t\t\t\t       hsh->v5.rcp[category + i].w9_p);\n+\t\t\thsh_nthw_rcp_p_mask(be->p_hsh_nthw,\n+\t\t\t\t\t hsh->v5.rcp[category + i].p_mask);\n+\t\t\thsh_nthw_rcp_word_mask(be->p_hsh_nthw,\n+\t\t\t\t\t       hsh->v5.rcp[category + i].word_mask);\n+\t\t\thsh_nthw_rcp_seed(be->p_hsh_nthw,\n+\t\t\t\t\thsh->v5.rcp[category + i].seed);\n+\t\t\thsh_nthw_rcp_tnl_p(be->p_hsh_nthw,\n+\t\t\t\t\thsh->v5.rcp[category + i].tnl_p);\n+\t\t\thsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,\n+\t\t\t\t\t       hsh->v5.rcp[category + i].hsh_valid);\n+\t\t\thsh_nthw_rcp_hsh_type(be->p_hsh_nthw,\n+\t\t\t\t\t   hsh->v5.rcp[category + i].hsh_type);\n+\t\t\thsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,\n+\t\t\t\t\t\t    hsh->v5.rcp[category + i].auto_ipv4_mask);\n+\t\t\thsh_nthw_rcp_flush(be->p_hsh_nthw);\n+\t\t}\n+\t}\n+\t_CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  HST  *******************\n+ */\n+\n+static bool hst_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_hst_nthw != NULL;\n+}\n+\n+static uint32_t hst_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_hst_nthw->m_hst) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_hst_nthw->m_hst) &\n+\t\t\t   0xffff));\n+}\n+\n+static int hst_rcp_flush(void *be_dev, const struct hst_func_s *hst,\n+\t\t\t int category, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, hst, be->p_hst_nthw);\n+\n+\tif (hst->ver == 2) {\n+\t\thst_nthw_rcp_cnt(be->p_hst_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\thst_nthw_rcp_select(be->p_hst_nthw, category + i);\n+\t\t\thst_nthw_rcp_strip_mode(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].strip_mode);\n+\t\t\thst_nthw_rcp_start_dyn(be->p_hst_nthw,\n+\t\t\t\t\t       hst->v2.rcp[category + i].start_dyn);\n+\t\t\thst_nthw_rcp_start_ofs(be->p_hst_nthw,\n+\t\t\t\t\t       hst->v2.rcp[category + i].start_ofs);\n+\t\t\thst_nthw_rcp_end_dyn(be->p_hst_nthw,\n+\t\t\t\t\t  hst->v2.rcp[category + i].end_dyn);\n+\t\t\thst_nthw_rcp_end_ofs(be->p_hst_nthw,\n+\t\t\t\t\t  hst->v2.rcp[category + i].end_ofs);\n+\t\t\thst_nthw_rcp_modif0_cmd(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].modif0_cmd);\n+\t\t\thst_nthw_rcp_modif0_dyn(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].modif0_dyn);\n+\t\t\thst_nthw_rcp_modif0_ofs(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].modif0_ofs);\n+\t\t\thst_nthw_rcp_modif0_value(be->p_hst_nthw,\n+\t\t\t\t\t\t  hst->v2.rcp[category + i].modif0_value);\n+\t\t\thst_nthw_rcp_modif1_cmd(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].modif1_cmd);\n+\t\t\thst_nthw_rcp_modif1_dyn(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].modif1_dyn);\n+\t\t\thst_nthw_rcp_modif1_ofs(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].modif1_ofs);\n+\t\t\thst_nthw_rcp_modif1_value(be->p_hst_nthw,\n+\t\t\t\t\t\t  hst->v2.rcp[category + i].modif1_value);\n+\t\t\thst_nthw_rcp_modif2_cmd(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].modif2_cmd);\n+\t\t\thst_nthw_rcp_modif2_dyn(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].modif2_dyn);\n+\t\t\thst_nthw_rcp_modif2_ofs(be->p_hst_nthw,\n+\t\t\t\t\t\thst->v2.rcp[category + i].modif2_ofs);\n+\t\t\thst_nthw_rcp_modif2_value(be->p_hst_nthw,\n+\t\t\t\t\t\t  hst->v2.rcp[category + i].modif2_value);\n+\t\t\thst_nthw_rcp_flush(be->p_hst_nthw);\n+\t\t}\n+\t}\n+\t_CHECK_DEBUG_OFF(hst, be->p_hst_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  QSL  *******************\n+ */\n+\n+static bool qsl_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_qsl_nthw != NULL;\n+}\n+\n+static uint32_t qsl_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_qsl_nthw->m_qsl) &\n+\t\t\t   0xffff));\n+}\n+\n+static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl,\n+\t\t\t int category, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);\n+\n+\tif (qsl->ver == 7) {\n+\t\tqsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tqsl_nthw_rcp_select(be->p_qsl_nthw, category + i);\n+\t\t\tqsl_nthw_rcp_discard(be->p_qsl_nthw,\n+\t\t\t\t\t   qsl->v7.rcp[category + i].discard);\n+\t\t\tqsl_nthw_rcp_drop(be->p_qsl_nthw,\n+\t\t\t\t\tqsl->v7.rcp[category + i].drop);\n+\t\t\tqsl_nthw_rcp_tbl_lo(be->p_qsl_nthw,\n+\t\t\t\t\t qsl->v7.rcp[category + i].tbl_lo);\n+\t\t\tqsl_nthw_rcp_tbl_hi(be->p_qsl_nthw,\n+\t\t\t\t\t qsl->v7.rcp[category + i].tbl_hi);\n+\t\t\tqsl_nthw_rcp_tbl_idx(be->p_qsl_nthw,\n+\t\t\t\t\t  qsl->v7.rcp[category + i].tbl_idx);\n+\t\t\tqsl_nthw_rcp_tbl_msk(be->p_qsl_nthw,\n+\t\t\t\t\t  qsl->v7.rcp[category + i].tbl_msk);\n+\t\t\tqsl_nthw_rcp_lr(be->p_qsl_nthw,\n+\t\t\t\t      qsl->v7.rcp[category + i].lr);\n+\t\t\tqsl_nthw_rcp_tsa(be->p_qsl_nthw,\n+\t\t\t\t       qsl->v7.rcp[category + i].tsa);\n+\t\t\tqsl_nthw_rcp_vli(be->p_qsl_nthw,\n+\t\t\t\t       qsl->v7.rcp[category + i].vli);\n+\t\t\tqsl_nthw_rcp_flush(be->p_qsl_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);\n+\treturn 0;\n+}\n+\n+static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,\n+\t\t\t int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);\n+\n+\tif (qsl->ver == 7) {\n+\t\tqsl_nthw_qst_cnt(be->p_qsl_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tqsl_nthw_qst_select(be->p_qsl_nthw, entry + i);\n+\t\t\tqsl_nthw_qst_queue(be->p_qsl_nthw,\n+\t\t\t\t\t qsl->v7.qst[entry + i].queue);\n+\t\t\tqsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);\n+\n+\t\t\tqsl_nthw_qst_tx_port(be->p_qsl_nthw,\n+\t\t\t\t\t  qsl->v7.qst[entry + i].tx_port);\n+\t\t\tqsl_nthw_qst_lre(be->p_qsl_nthw,\n+\t\t\t\t       qsl->v7.qst[entry + i].lre);\n+\t\t\tqsl_nthw_qst_tci(be->p_qsl_nthw,\n+\t\t\t\t       qsl->v7.qst[entry + i].tci);\n+\t\t\tqsl_nthw_qst_ven(be->p_qsl_nthw,\n+\t\t\t\t       qsl->v7.qst[entry + i].ven);\n+\t\t\tqsl_nthw_qst_flush(be->p_qsl_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);\n+\treturn 0;\n+}\n+\n+static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,\n+\t\t\t int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);\n+\n+\tif (qsl->ver == 7) {\n+\t\tqsl_nthw_qen_cnt(be->p_qsl_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tqsl_nthw_qen_select(be->p_qsl_nthw, entry + i);\n+\t\t\tqsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);\n+\t\t\tqsl_nthw_qen_flush(be->p_qsl_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);\n+\treturn 0;\n+}\n+\n+static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry,\n+\t\t\t  int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);\n+\n+\tif (qsl->ver == 7) {\n+\t\tqsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tqsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);\n+\t\t\tqsl_nthw_unmq_dest_queue(be->p_qsl_nthw,\n+\t\t\t\t\t\t qsl->v7.unmq[entry + i].dest_queue);\n+\t\t\tqsl_nthw_unmq_en(be->p_qsl_nthw,\n+\t\t\t\t       qsl->v7.unmq[entry + i].en);\n+\t\t\tqsl_nthw_unmq_flush(be->p_qsl_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  SLC  *******************\n+ */\n+\n+static bool slc_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_slc_nthw != NULL;\n+}\n+\n+static uint32_t slc_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_slc_nthw->m_slc) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_slc_nthw->m_slc) &\n+\t\t\t   0xffff));\n+}\n+\n+static int slc_rcp_flush(void *be_dev, const struct slc_func_s *slc,\n+\t\t\t int category, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, slc, be->p_slc_nthw);\n+\n+\tif (slc->ver == 1) {\n+\t\tslc_nthw_rcp_cnt(be->p_slc_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tslc_nthw_rcp_select(be->p_slc_nthw, category + i);\n+\t\t\tslc_nthw_rcp_tail_slc_en(be->p_slc_nthw,\n+\t\t\t\t\t\t slc->v1.rcp[category + i].tail_slc_en);\n+\t\t\tslc_nthw_rcp_tail_dyn(be->p_slc_nthw,\n+\t\t\t\t\t   slc->v1.rcp[category + i].tail_dyn);\n+\t\t\tslc_nthw_rcp_tail_ofs(be->p_slc_nthw,\n+\t\t\t\t\t   slc->v1.rcp[category + i].tail_ofs);\n+\t\t\tslc_nthw_rcp_pcap(be->p_slc_nthw,\n+\t\t\t\t\tslc->v1.rcp[category + i].pcap);\n+\t\t\tslc_nthw_rcp_flush(be->p_slc_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(slc, be->p_slc_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  SLC LR *******************\n+ */\n+\n+static bool slc_lr_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_slc_lr_nthw != NULL;\n+}\n+\n+static uint32_t slc_lr_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_slc_lr_nthw->m_slc_lr)\n+\t\t\t   << 16) |\n+\t\t\t  (module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) &\n+\t\t\t   0xffff));\n+}\n+\n+static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr,\n+\t\t\t    int category, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);\n+\n+\tif (slc_lr->ver == 2) {\n+\t\tslc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tslc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);\n+\t\t\tslc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,\n+\t\t\t\t\t\t    slc_lr->v2.rcp[category + i].tail_slc_en);\n+\t\t\tslc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,\n+\t\t\t\t\t\t slc_lr->v2.rcp[category + i].tail_dyn);\n+\t\t\tslc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,\n+\t\t\t\t\t\t slc_lr->v2.rcp[category + i].tail_ofs);\n+\t\t\tslc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw,\n+\t\t\t\t\t  slc_lr->v2.rcp[category + i].pcap);\n+\t\t\tslc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  PDB  *******************\n+ */\n+\n+static bool pdb_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_pdb_nthw != NULL;\n+}\n+\n+static uint32_t pdb_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_pdb_nthw->m_pdb) &\n+\t\t\t   0xffff));\n+}\n+\n+static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb,\n+\t\t\t int category, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);\n+\n+\tif (pdb->ver == 9) {\n+\t\tpdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tpdb_nthw_rcp_select(be->p_pdb_nthw, category + i);\n+\t\t\tpdb_nthw_rcp_descriptor(be->p_pdb_nthw,\n+\t\t\t\t\t\tpdb->v9.rcp[category + i].descriptor);\n+\t\t\tpdb_nthw_rcp_desc_len(be->p_pdb_nthw,\n+\t\t\t\t\t   pdb->v9.rcp[category + i].desc_len);\n+\t\t\tpdb_nthw_rcp_tx_port(be->p_pdb_nthw,\n+\t\t\t\t\t  pdb->v9.rcp[category + i].tx_port);\n+\t\t\tpdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,\n+\t\t\t\t\t       pdb->v9.rcp[category + i].tx_ignore);\n+\t\t\tpdb_nthw_rcp_tx_now(be->p_pdb_nthw,\n+\t\t\t\t\t pdb->v9.rcp[category + i].tx_now);\n+\t\t\tpdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,\n+\t\t\t\t\t\t   pdb->v9.rcp[category + i].crc_overwrite);\n+\t\t\tpdb_nthw_rcp_align(be->p_pdb_nthw,\n+\t\t\t\t\t pdb->v9.rcp[category + i].align);\n+\t\t\tpdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw,\n+\t\t\t\t\t   pdb->v9.rcp[category + i].ofs0_dyn);\n+\t\t\tpdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw,\n+\t\t\t\t\t   pdb->v9.rcp[category + i].ofs0_rel);\n+\t\t\tpdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw,\n+\t\t\t\t\t   pdb->v9.rcp[category + i].ofs1_dyn);\n+\t\t\tpdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw,\n+\t\t\t\t\t   pdb->v9.rcp[category + i].ofs1_rel);\n+\t\t\tpdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw,\n+\t\t\t\t\t   pdb->v9.rcp[category + i].ofs2_dyn);\n+\t\t\tpdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw,\n+\t\t\t\t\t   pdb->v9.rcp[category + i].ofs2_rel);\n+\t\t\tpdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,\n+\t\t\t\t\t\t pdb->v9.rcp[category + i].ip_prot_tnl);\n+\t\t\tpdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw,\n+\t\t\t\t\t  pdb->v9.rcp[category + i].ppc_hsh);\n+\t\t\tpdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,\n+\t\t\t\t\t\t  pdb->v9.rcp[category + i].duplicate_en);\n+\t\t\tpdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,\n+\t\t\t\t\t\t   pdb->v9.rcp[category + i].duplicate_bit);\n+\t\t\tpdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,\n+\t\t\t\t\t\t   pdb->v9.rcp[category + i].pcap_keep_fcs);\n+\t\t\tpdb_nthw_rcp_flush(be->p_pdb_nthw);\n+\t\t}\n+\t}\n+\t_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);\n+\treturn 0;\n+}\n+\n+static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);\n+\n+\tif (pdb->ver == 9) {\n+\t\tpdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);\n+\t\tpdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);\n+\t\tpdb_nthw_config_flush(be->p_pdb_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  IOA  *******************\n+ */\n+\n+static bool ioa_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_ioa_nthw != NULL;\n+}\n+\n+static uint32_t ioa_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_ioa_nthw->m_ioa) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_ioa_nthw->m_ioa) &\n+\t\t\t   0xffff));\n+}\n+\n+static int ioa_rcp_flush(void *be_dev, const struct ioa_func_s *ioa,\n+\t\t\t int category, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);\n+\n+\tif (ioa->ver == 4) {\n+\t\tioa_nthw_rcp_cnt(be->p_ioa_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tioa_nthw_rcp_select(be->p_ioa_nthw, category + i);\n+\t\t\tioa_nthw_rcp_tunnel_pop(be->p_ioa_nthw,\n+\t\t\t\t\t\tioa->v4.rcp[category + i].tunnel_pop);\n+\t\t\tioa_nthw_rcp_vlan_pop(be->p_ioa_nthw,\n+\t\t\t\t\t   ioa->v4.rcp[category + i].vlan_pop);\n+\t\t\tioa_nthw_rcp_vlan_push(be->p_ioa_nthw,\n+\t\t\t\t\t       ioa->v4.rcp[category + i].vlan_push);\n+\t\t\tioa_nthw_rcp_vlan_vid(be->p_ioa_nthw,\n+\t\t\t\t\t   ioa->v4.rcp[category + i].vlan_vid);\n+\t\t\tioa_nthw_rcp_vlan_dei(be->p_ioa_nthw,\n+\t\t\t\t\t   ioa->v4.rcp[category + i].vlan_dei);\n+\t\t\tioa_nthw_rcp_vlan_pcp(be->p_ioa_nthw,\n+\t\t\t\t\t   ioa->v4.rcp[category + i].vlan_pcp);\n+\t\t\tioa_nthw_rcp_vlan_tpid_sel(be->p_ioa_nthw,\n+\t\t\t\t\t\t   ioa->v4.rcp[category + i].vlan_tpid_sel);\n+\t\t\tioa_nthw_rcp_queue_override_en(be->p_ioa_nthw,\n+\t\t\t\t\t\t       ioa->v4.rcp[category + i].queue_override_en);\n+\t\t\tioa_nthw_rcp_queue_id(be->p_ioa_nthw,\n+\t\t\t\t\t   ioa->v4.rcp[category + i].queue_id);\n+\t\t\tioa_nthw_rcp_flush(be->p_ioa_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);\n+\treturn 0;\n+}\n+\n+static int ioa_special_tpid_flush(void *be_dev, const struct ioa_func_s *ioa)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);\n+\n+\tif (ioa->ver == 4) {\n+\t\tioa_nthw_special_vlan_tpid_cust_tpid0(be->p_ioa_nthw,\n+\t\t\t\t\t\t ioa->v4.tpid->cust_tpid_0);\n+\t\tioa_nthw_special_vlan_tpid_cust_tpid1(be->p_ioa_nthw,\n+\t\t\t\t\t\t ioa->v4.tpid->cust_tpid_1);\n+\t\tioa_nthw_special_vlan_tpid_flush(be->p_ioa_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);\n+\treturn 0;\n+}\n+\n+static int ioa_roa_epp_flush(void *be_dev, const struct ioa_func_s *ioa,\n+\t\t\t     int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, ioa, be->p_ioa_nthw);\n+\n+\tif (ioa->ver == 4) {\n+\t\tioa_nthw_roa_epp_cnt(be->p_ioa_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tioa_nthw_roa_epp_select(be->p_ioa_nthw, index + i);\n+\t\t\tioa_nthw_roa_epp_push_tunnel(be->p_ioa_nthw,\n+\t\t\t\t\t\t     ioa->v4.roa_epp[index + i].push_tunnel);\n+\t\t\tioa_nthw_roa_epp_tx_port(be->p_ioa_nthw,\n+\t\t\t\t\t\t ioa->v4.roa_epp[index + i].tx_port);\n+\t\t\tioa_nthw_roa_epp_flush(be->p_ioa_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(ioa, be->p_ioa_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  ROA  *******************\n+ */\n+\n+static bool roa_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_roa_nthw != NULL;\n+}\n+\n+static uint32_t roa_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_roa_nthw->m_roa) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_roa_nthw->m_roa) &\n+\t\t\t   0xffff));\n+}\n+\n+static int roa_tunhdr_flush(void *be_dev, const struct roa_func_s *roa,\n+\t\t\t    int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);\n+\n+\tif (roa->ver == 6) {\n+\t\troa_nthw_tun_hdr_cnt(be->p_roa_nthw, 4);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tfor (int ii = 0; ii < 4; ii++) {\n+\t\t\t\troa_nthw_tun_hdr_select(be->p_roa_nthw,\n+\t\t\t\t\t\t     index + (i * 4) + ii);\n+\t\t\t\troa_nthw_tun_hdr_tunnel_hdr(be->p_roa_nthw,\n+\t\t\t\t\t\t\t    &roa->v6.tunhdr[index / 4 + i]\n+\t\t\t\t\t\t\t    .tunnel_hdr[ii * 4]);\n+\t\t\t\troa_nthw_tun_hdr_flush(be->p_roa_nthw);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);\n+\treturn 0;\n+}\n+\n+static int roa_tuncfg_flush(void *be_dev, const struct roa_func_s *roa,\n+\t\t\t    int category, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);\n+\n+\tif (roa->ver == 6) {\n+\t\troa_nthw_tun_cfg_cnt(be->p_roa_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\troa_nthw_tun_cfg_select(be->p_roa_nthw, category + i);\n+\t\t\troa_nthw_tun_cfg_tun_len(be->p_roa_nthw,\n+\t\t\t\t\t\t roa->v6.tuncfg[category + i].tun_len);\n+\t\t\troa_nthw_tun_cfg_tun_type(be->p_roa_nthw,\n+\t\t\t\t\t\t  roa->v6.tuncfg[category + i].tun_type);\n+\t\t\troa_nthw_tun_cfg_tun_vlan(be->p_roa_nthw,\n+\t\t\t\t\t\t  roa->v6.tuncfg[category + i].tun_vlan);\n+\t\t\troa_nthw_tun_cfg_ip_type(be->p_roa_nthw,\n+\t\t\t\t\t\t roa->v6.tuncfg[category + i].ip_type);\n+\t\t\troa_nthw_tun_cfg_ipcs_upd(be->p_roa_nthw,\n+\t\t\t\t\t\t  roa->v6.tuncfg[category + i].ipcs_upd);\n+\t\t\troa_nthw_tun_cfg_ipcs_precalc(be->p_roa_nthw,\n+\t\t\t\t\t\t      roa->v6.tuncfg[category + i].ipcs_precalc);\n+\t\t\troa_nthw_tun_cfg_iptl_upd(be->p_roa_nthw,\n+\t\t\t\t\t\t  roa->v6.tuncfg[category + i].iptl_upd);\n+\t\t\troa_nthw_tun_cfg_iptl_precalc(be->p_roa_nthw,\n+\t\t\t\t\t\t      roa->v6.tuncfg[category + i].iptl_precalc);\n+\t\t\troa_nthw_tun_cfg_vxlan_udp_len_upd(be->p_roa_nthw,\n+\t\t\t\troa->v6.tuncfg[category + i].vxlan_udp_len_upd);\n+\t\t\troa_nthw_tun_cfg_tx_lag_ix(be->p_roa_nthw,\n+\t\t\t\t\t\t   roa->v6.tuncfg[category + i].tx_lag_ix);\n+\t\t\troa_nthw_tun_cfg_recirculate(be->p_roa_nthw,\n+\t\t\t\t\t\t     roa->v6.tuncfg[category + i].recirculate);\n+\t\t\troa_nthw_tun_cfg_push_tunnel(be->p_roa_nthw,\n+\t\t\t\t\t\t     roa->v6.tuncfg[category + i].push_tunnel);\n+\t\t\troa_nthw_tun_cfg_recirc_port(be->p_roa_nthw,\n+\t\t\t\t\t\t     roa->v6.tuncfg[category + i].recirc_port);\n+\t\t\troa_nthw_tun_cfg_recirc_bypass(be->p_roa_nthw,\n+\t\t\t\t\t\t       roa->v6.tuncfg[category + i].recirc_bypass);\n+\t\t\troa_nthw_tun_cfg_flush(be->p_roa_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);\n+\treturn 0;\n+}\n+\n+static int roa_config_flush(void *be_dev, const struct roa_func_s *roa)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);\n+\n+\tif (roa->ver == 6) {\n+\t\troa_nthw_config_fwd_recirculate(be->p_roa_nthw,\n+\t\t\t\t\t     roa->v6.config->fwd_recirculate);\n+\t\troa_nthw_config_fwd_normal_pcks(be->p_roa_nthw,\n+\t\t\t\t\t    roa->v6.config->fwd_normal_pcks);\n+\t\troa_nthw_config_fwd_tx_port0(be->p_roa_nthw,\n+\t\t\t\t\t roa->v6.config->fwd_txport0);\n+\t\troa_nthw_config_fwd_tx_port1(be->p_roa_nthw,\n+\t\t\t\t\t roa->v6.config->fwd_txport1);\n+\t\troa_nthw_config_fwd_cell_builder_pcks(be->p_roa_nthw,\n+\t\t\t\t\t\t      roa->v6.config->fwd_cellbuilder_pcks);\n+\t\troa_nthw_config_fwd_non_normal_pcks(be->p_roa_nthw,\n+\t\t\t\t\t\t    roa->v6.config->fwd_non_normal_pcks);\n+\t\troa_nthw_config_flush(be->p_roa_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);\n+\treturn 0;\n+}\n+\n+static int roa_lagcfg_flush(void *be_dev, const struct roa_func_s *roa,\n+\t\t\t    int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, roa, be->p_roa_nthw);\n+\n+\tif (roa->ver == 6) {\n+\t\troa_nthw_lag_cfg_cnt(be->p_roa_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\troa_nthw_lag_cfg_select(be->p_roa_nthw, index + i);\n+\t\t\troa_nthw_lag_cfg_tx_phy_port(be->p_roa_nthw,\n+\t\t\t\t\t\t     roa->v6.lagcfg[index + i].txphy_port);\n+\t\t\troa_nthw_lag_cfg_flush(be->p_roa_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(roa, be->p_roa_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  RMC  *******************\n+ */\n+\n+static bool rmc_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_rmc_nthw != NULL;\n+}\n+\n+static uint32_t rmc_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn (uint32_t)((module_get_major_version(be->p_rmc_nthw->m_rmc) << 16) |\n+\t\t\t  (module_get_minor_version(be->p_rmc_nthw->m_rmc) &\n+\t\t\t   0xffff));\n+}\n+\n+static int rmc_ctrl_flush(void *be_dev, const struct rmc_func_s *rmc)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, rmc, be->p_rmc_nthw);\n+\n+\tif (rmc->ver == 0x10003) {\n+\t\trmc_nthw_ctrl_block_statt(be->p_rmc_nthw,\n+\t\t\t\t       rmc->v1_3.ctrl->block_statt);\n+\t\trmc_nthw_ctrl_block_keep_a(be->p_rmc_nthw,\n+\t\t\t\t       rmc->v1_3.ctrl->block_keepa);\n+\t\trmc_nthw_ctrl_block_rpp_slice(be->p_rmc_nthw,\n+\t\t\t\t\t  rmc->v1_3.ctrl->block_rpp_slice);\n+\t\trmc_nthw_ctrl_block_mac_port(be->p_rmc_nthw,\n+\t\t\t\t\t rmc->v1_3.ctrl->block_mac_port);\n+\t\trmc_nthw_ctrl_lag_phy_odd_even(be->p_rmc_nthw,\n+\t\t\t\t\t  rmc->v1_3.ctrl->lag_phy_odd_even);\n+\t\trmc_nthw_ctrl_flush(be->p_rmc_nthw);\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(rmc, be->p_rmc_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  TPE  *******************\n+ */\n+\n+static bool tpe_get_present(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\treturn be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL &&\n+\t       be->p_rpp_lr_nthw != NULL && be->p_tx_cpy_nthw != NULL &&\n+\t       be->p_tx_ins_nthw != NULL && be->p_tx_rpl_nthw != NULL;\n+}\n+\n+static uint32_t tpe_get_version(void *be_dev)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\tconst uint32_t csu_version =\n+\t\t(uint32_t)((module_get_major_version(be->p_csu_nthw->m_csu) << 16) |\n+\t\t\t   (module_get_minor_version(be->p_csu_nthw->m_csu) &\n+\t\t\t    0xffff));\n+\n+\tconst uint32_t hfu_version =\n+\t\t(uint32_t)((module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |\n+\t\t\t   (module_get_minor_version(be->p_hfu_nthw->m_hfu) &\n+\t\t\t    0xffff));\n+\n+\tconst uint32_t rpp_lr_version =\n+\t\t(uint32_t)((module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr)\n+\t\t\t    << 16) |\n+\t\t\t   (module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) &\n+\t\t\t    0xffff));\n+\n+\tconst uint32_t tx_cpy_version =\n+\t\t(uint32_t)((module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy)\n+\t\t\t    << 16) |\n+\t\t\t   (module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) &\n+\t\t\t    0xffff));\n+\n+\tconst uint32_t tx_ins_version =\n+\t\t(uint32_t)((module_get_major_version(be->p_tx_ins_nthw->m_tx_ins)\n+\t\t\t    << 16) |\n+\t\t\t   (module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) &\n+\t\t\t    0xffff));\n+\n+\tconst uint32_t tx_rpl_version =\n+\t\t(uint32_t)((module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl)\n+\t\t\t    << 16) |\n+\t\t\t   (module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) &\n+\t\t\t    0xffff));\n+\n+\tif (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 0 &&\n+\t\t\ttx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)\n+\t\treturn 1;\n+\n+\tif (csu_version == 0 && hfu_version == 1 && rpp_lr_version == 1 &&\n+\t\t\ttx_cpy_version == 1 && tx_ins_version == 1 && tx_rpl_version == 2)\n+\t\treturn 2;\n+\n+\tassert(false);\n+\treturn 0;\n+}\n+\n+static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,\n+\t\t\t     int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);\n+\n+\tif (rpp_lr->ver >= 1) {\n+\t\trpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\trpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);\n+\t\t\trpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw,\n+\t\t\t\t\t rpp_lr->v1.rpp_rcp[index + i].exp);\n+\t\t\trpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);\n+\treturn 0;\n+}\n+\n+static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr,\n+\t\t\t\t int index, int cnt)\n+{\n+\tint res = 0;\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);\n+\n+\tif (rpp_lr->ver >= 2) {\n+\t\trpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\trpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);\n+\t\t\trpp_lr_nthw_ifr_rcp_en(be->p_rpp_lr_nthw,\n+\t\t\t\t\t   rpp_lr->v2.rpp_ifr_rcp[index + i].en);\n+\t\t\trpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,\n+\t\t\t\t\t\trpp_lr->v2.rpp_ifr_rcp[index + i].mtu);\n+\t\t\trpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);\n+\t\t}\n+\t} else {\n+\t\tres = -1;\n+\t}\n+\t_CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);\n+\treturn res;\n+}\n+\n+static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr,\n+\t\t\t     int index, int cnt)\n+{\n+\tint res = 0;\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);\n+\n+\tif (ifr->ver >= 2) {\n+\t\tifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tifr_nthw_rcp_select(be->p_ifr_nthw, index + i);\n+\t\t\tifr_nthw_rcp_en(be->p_ifr_nthw,\n+\t\t\t\t      ifr->v2.ifr_rcp[index + i].en);\n+\t\t\tifr_nthw_rcp_mtu(be->p_ifr_nthw,\n+\t\t\t\t       ifr->v2.ifr_rcp[index + i].mtu);\n+\t\t\tifr_nthw_rcp_flush(be->p_ifr_nthw);\n+\t\t}\n+\t} else {\n+\t\tres = -1;\n+\t}\n+\t_CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);\n+\treturn res;\n+}\n+\n+static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins,\n+\t\t\t     int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);\n+\n+\tif (tx_ins->ver >= 1) {\n+\t\ttx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\ttx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);\n+\t\t\ttx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw,\n+\t\t\t\t\t tx_ins->v1.ins_rcp[index + i].dyn);\n+\t\t\ttx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw,\n+\t\t\t\t\t tx_ins->v1.ins_rcp[index + i].ofs);\n+\t\t\ttx_ins_nthw_rcp_len(be->p_tx_ins_nthw,\n+\t\t\t\t\t tx_ins->v1.ins_rcp[index + i].len);\n+\t\t\ttx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);\n+\treturn 0;\n+}\n+\n+static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl,\n+\t\t\t     int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);\n+\n+\tif (tx_rpl->ver >= 1) {\n+\t\ttx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\ttx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);\n+\t\t\ttx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw,\n+\t\t\t\t\t tx_rpl->v1.rpl_rcp[index + i].dyn);\n+\t\t\ttx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw,\n+\t\t\t\t\t tx_rpl->v1.rpl_rcp[index + i].ofs);\n+\t\t\ttx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw,\n+\t\t\t\t\t tx_rpl->v1.rpl_rcp[index + i].len);\n+\t\t\ttx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,\n+\t\t\t\t\t\ttx_rpl->v1.rpl_rcp[index + i].rpl_ptr);\n+\t\t\ttx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,\n+\t\t\t\t\t\t tx_rpl->v1.rpl_rcp[index + i].ext_prio);\n+\t\t\ttx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);\n+\treturn 0;\n+}\n+\n+static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl,\n+\t\t\t     int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);\n+\n+\tif (tx_rpl->ver >= 1) {\n+\t\ttx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\ttx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);\n+\t\t\ttx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,\n+\t\t\t\t\t\ttx_rpl->v1.rpl_ext[index + i].rpl_ptr);\n+\t\t\ttx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);\n+\treturn 0;\n+}\n+\n+static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl,\n+\t\t\t     int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);\n+\n+\tif (tx_rpl->ver >= 1) {\n+\t\ttx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\ttx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);\n+\t\t\ttx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,\n+\t\t\t\t\t   tx_rpl->v1.rpl_rpl[index + i].value);\n+\t\t\ttx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);\n+\treturn 0;\n+}\n+\n+static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy,\n+\t\t\t     int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\tunsigned int wr_index = -1;\n+\n+\t_CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);\n+\n+\tif (tx_cpy->ver >= 1) {\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tif (wr_index !=\n+\t\t\t\t\t(index + i) / tx_cpy->nb_rcp_categories) {\n+\t\t\t\twr_index =\n+\t\t\t\t\t(index + i) / tx_cpy->nb_rcp_categories;\n+\t\t\t\ttx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index,\n+\t\t\t\t\t\t    1);\n+\t\t\t}\n+\n+\t\t\ttx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,\n+\t\t\t\t\t\t  (index + i) % tx_cpy->nb_rcp_categories);\n+\t\t\ttx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,\n+\t\t\t\ttx_cpy->v1.cpy_rcp[index + i].reader_select);\n+\t\t\ttx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,\n+\t\t\t\t\t    tx_cpy->v1.cpy_rcp[index + i].dyn);\n+\t\t\ttx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,\n+\t\t\t\t\t    tx_cpy->v1.cpy_rcp[index + i].ofs);\n+\t\t\ttx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,\n+\t\t\t\t\t    tx_cpy->v1.cpy_rcp[index + i].len);\n+\t\t\ttx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);\n+\treturn 0;\n+}\n+\n+static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu,\n+\t\t\t     int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);\n+\n+\tif (hfu->ver >= 1) {\n+\t\thfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\thfu_nthw_rcp_select(be->p_hfu_nthw, index + i);\n+\t\t\thfu_nthw_rcp_len_a_wr(be->p_hfu_nthw,\n+\t\t\t\t\t  hfu->v1.hfu_rcp[index + i].len_a_wr);\n+\t\t\thfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,\n+\t\t\t\t\t\t  hfu->v1.hfu_rcp[index + i].len_a_outer_l4_len);\n+\t\t\thfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_a_pos_dyn);\n+\t\t\thfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_a_pos_ofs);\n+\t\t\thfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_a_add_dyn);\n+\t\t\thfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_a_add_ofs);\n+\t\t\thfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_a_sub_dyn);\n+\t\t\thfu_nthw_rcp_len_b_wr(be->p_hfu_nthw,\n+\t\t\t\t\t      hfu->v1.hfu_rcp[index + i].len_b_wr);\n+\t\t\thfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_b_pos_dyn);\n+\t\t\thfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_b_pos_ofs);\n+\t\t\thfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_b_add_dyn);\n+\t\t\thfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_b_add_ofs);\n+\t\t\thfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_b_sub_dyn);\n+\t\t\thfu_nthw_rcp_len_c_wr(be->p_hfu_nthw,\n+\t\t\t\t\t      hfu->v1.hfu_rcp[index + i].len_c_wr);\n+\t\t\thfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_c_pos_dyn);\n+\t\t\thfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_c_pos_ofs);\n+\t\t\thfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_c_add_dyn);\n+\t\t\thfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_c_add_ofs);\n+\t\t\thfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].len_c_sub_dyn);\n+\t\t\thfu_nthw_rcp_ttl_wr(be->p_hfu_nthw,\n+\t\t\t\t\t    hfu->v1.hfu_rcp[index + i].ttl_wr);\n+\t\t\thfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,\n+\t\t\t\t\t\t hfu->v1.hfu_rcp[index + i].ttl_pos_dyn);\n+\t\t\thfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,\n+\t\t\t\t\t\t hfu->v1.hfu_rcp[index + i].ttl_pos_ofs);\n+\t\t\thfu_nthw_rcp_csinf(be->p_hfu_nthw,\n+\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].cs_inf);\n+\t\t\thfu_nthw_rcp_l3prt(be->p_hfu_nthw,\n+\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].l3_prt);\n+\t\t\thfu_nthw_rcp_l3frag(be->p_hfu_nthw,\n+\t\t\t\t\t    hfu->v1.hfu_rcp[index + i].l3_frag);\n+\t\t\thfu_nthw_rcp_tunnel(be->p_hfu_nthw,\n+\t\t\t\t\t    hfu->v1.hfu_rcp[index + i].tunnel);\n+\t\t\thfu_nthw_rcp_l4prt(be->p_hfu_nthw,\n+\t\t\t\t\t   hfu->v1.hfu_rcp[index + i].l4_prt);\n+\t\t\thfu_nthw_rcp_ol3ofs(be->p_hfu_nthw,\n+\t\t\t\t\t    hfu->v1.hfu_rcp[index + i].outer_l3_ofs);\n+\t\t\thfu_nthw_rcp_ol4ofs(be->p_hfu_nthw,\n+\t\t\t\t\t    hfu->v1.hfu_rcp[index + i].outer_l4_ofs);\n+\t\t\thfu_nthw_rcp_il3ofs(be->p_hfu_nthw,\n+\t\t\t\t\t    hfu->v1.hfu_rcp[index + i].inner_l3_ofs);\n+\t\t\thfu_nthw_rcp_il4ofs(be->p_hfu_nthw,\n+\t\t\t\t\t    hfu->v1.hfu_rcp[index + i].inner_l4_ofs);\n+\t\t\thfu_nthw_rcp_flush(be->p_hfu_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);\n+\treturn 0;\n+}\n+\n+static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu,\n+\t\t\t     int index, int cnt)\n+{\n+\tstruct backend_dev_s *be = (struct backend_dev_s *)be_dev;\n+\n+\t_CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);\n+\n+\tif (csu->ver >= 1) {\n+\t\tcsu_nthw_rcp_cnt(be->p_csu_nthw, 1);\n+\t\tfor (int i = 0; i < cnt; i++) {\n+\t\t\tcsu_nthw_rcp_select(be->p_csu_nthw, index + i);\n+\t\t\tcsu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,\n+\t\t\t\t\t\t  csu->v1.csu_rcp[index + i].ol3_cmd);\n+\t\t\tcsu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,\n+\t\t\t\t\t\t  csu->v1.csu_rcp[index + i].ol4_cmd);\n+\t\t\tcsu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,\n+\t\t\t\t\t\t  csu->v1.csu_rcp[index + i].il3_cmd);\n+\t\t\tcsu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,\n+\t\t\t\t\t\t  csu->v1.csu_rcp[index + i].il4_cmd);\n+\t\t\tcsu_nthw_rcp_flush(be->p_csu_nthw);\n+\t\t}\n+\t}\n+\n+\t_CHECK_DEBUG_OFF(csu, be->p_csu_nthw);\n+\treturn 0;\n+}\n+\n+/*\n+ *  *****************  DBS  *******************\n+ */\n+\n+static int alloc_rx_queue(void *be_dev, int queue_id)\n+{\n+\t(void)be_dev;\n+\t(void)queue_id;\n+\tprintf(\"ERROR alloc Rx queue\\n\");\n+\treturn -1;\n+}\n+\n+static int free_rx_queue(void *be_dev, int hw_queue)\n+{\n+\t(void)be_dev;\n+\t(void)hw_queue;\n+\tprintf(\"ERROR free Rx queue\\n\");\n+\treturn 0;\n+}\n+\n+const struct flow_api_backend_ops flow_be_iface = {\n+\t1,\n+\n+\tset_debug_mode,\n+\tget_nb_phy_ports,\n+\tget_nb_rx_ports,\n+\tget_ltx_avail,\n+\tget_nb_cat_funcs,\n+\tget_nb_categories,\n+\tget_nb_cat_km_if_cnt,\n+\tget_nb_cat_km_if_m0,\n+\tget_nb_cat_km_if_m1,\n+\tget_nb_queues,\n+\tget_nb_km_flow_types,\n+\tget_nb_pm_ext,\n+\tget_nb_len,\n+\tget_kcc_size,\n+\tget_kcc_banks,\n+\tget_nb_km_categories,\n+\tget_nb_km_cam_banks,\n+\tget_nb_km_cam_record_words,\n+\tget_nb_km_cam_records,\n+\tget_nb_km_tcam_banks,\n+\tget_nb_km_tcam_bank_width,\n+\tget_nb_flm_categories,\n+\tget_nb_flm_size_mb,\n+\tget_nb_flm_entry_size,\n+\tget_nb_flm_variant,\n+\tget_nb_flm_prios,\n+\tget_nb_flm_pst_profiles,\n+\tget_nb_hst_categories,\n+\tget_nb_qsl_categories,\n+\tget_nb_qsl_qst_entries,\n+\tget_nb_pdb_categories,\n+\tget_nb_ioa_categories,\n+\tget_nb_roa_categories,\n+\tget_nb_tpe_categories,\n+\tget_nb_tx_cpy_writers,\n+\tget_nb_tx_cpy_mask_mem,\n+\tget_nb_tx_rpl_depth,\n+\tget_nb_tx_rpl_ext_categories,\n+\tget_nb_tpe_ifr_categories,\n+\n+\talloc_rx_queue,\n+\tfree_rx_queue,\n+\n+\tcat_get_present,\n+\tcat_get_version,\n+\tcat_cfn_flush,\n+\n+\tcat_kce_flush,\n+\tcat_kcs_flush,\n+\tcat_fte_flush,\n+\n+\tcat_cte_flush,\n+\tcat_cts_flush,\n+\tcat_cot_flush,\n+\tcat_cct_flush,\n+\tcat_exo_flush,\n+\tcat_rck_flush,\n+\tcat_len_flush,\n+\tcat_kcc_flush,\n+\tcat_cce_flush,\n+\tcat_ccs_flush,\n+\n+\tkm_get_present,\n+\tkm_get_version,\n+\tkm_rcp_flush,\n+\tkm_cam_flush,\n+\tkm_tcam_flush,\n+\tkm_tci_flush,\n+\tkm_tcq_flush,\n+\n+\tflm_get_present,\n+\tflm_get_version,\n+\tflm_control_flush,\n+\tflm_status_flush,\n+\tflm_status_update,\n+\tflm_timeout_flush,\n+\tflm_scrub_flush,\n+\tflm_load_bin_flush,\n+\tflm_load_pps_flush,\n+\tflm_load_lps_flush,\n+\tflm_load_aps_flush,\n+\tflm_prio_flush,\n+\tflm_pst_flush,\n+\tflm_rcp_flush,\n+\tflm_buf_ctrl_update,\n+\tflm_stat_update,\n+\tflm_lrn_data_flush,\n+\tflm_inf_data_update,\n+\tflm_sta_data_update,\n+\n+\thsh_get_present,\n+\thsh_get_version,\n+\thsh_rcp_flush,\n+\n+\thst_get_present,\n+\thst_get_version,\n+\thst_rcp_flush,\n+\n+\tqsl_get_present,\n+\tqsl_get_version,\n+\tqsl_rcp_flush,\n+\tqsl_qst_flush,\n+\tqsl_qen_flush,\n+\tqsl_unmq_flush,\n+\n+\tslc_get_present,\n+\tslc_get_version,\n+\tslc_rcp_flush,\n+\n+\tslc_lr_get_present,\n+\tslc_lr_get_version,\n+\tslc_lr_rcp_flush,\n+\n+\tpdb_get_present,\n+\tpdb_get_version,\n+\tpdb_rcp_flush,\n+\tpdb_config_flush,\n+\n+\tioa_get_present,\n+\tioa_get_version,\n+\tioa_rcp_flush,\n+\tioa_special_tpid_flush,\n+\tioa_roa_epp_flush,\n+\n+\troa_get_present,\n+\troa_get_version,\n+\troa_tunhdr_flush,\n+\troa_tuncfg_flush,\n+\troa_config_flush,\n+\troa_lagcfg_flush,\n+\n+\trmc_get_present,\n+\trmc_get_version,\n+\trmc_ctrl_flush,\n+\n+\ttpe_get_present,\n+\ttpe_get_version,\n+\ttpe_rpp_rcp_flush,\n+\ttpe_rpp_ifr_rcp_flush,\n+\ttpe_ifr_rcp_flush,\n+\ttpe_ins_rcp_flush,\n+\ttpe_rpl_rcp_flush,\n+\ttpe_rpl_ext_flush,\n+\ttpe_rpl_rpl_flush,\n+\ttpe_cpy_rcp_flush,\n+\ttpe_hfu_rcp_flush,\n+\ttpe_csu_rcp_flush,\n+};\n+\n+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,\n+\t\tvoid **dev)\n+{\n+\tuint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;\n+\n+\tstruct info_nthw *pinfonthw = info_nthw_new();\n+\n+\tinfo_nthw_init(pinfonthw, p_fpga, physical_adapter_no);\n+\tbe_devs[physical_adapter_no].p_info_nthw = pinfonthw;\n+\n+\t/* Init nthw CAT */\n+\tif (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct cat_nthw *pcatnthw = cat_nthw_new();\n+\n+\t\tcat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_cat_nthw = pcatnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_cat_nthw = NULL;\n+\t}\n+\t/* Init nthw KM */\n+\tif (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct km_nthw *pkmnthw = km_nthw_new();\n+\n+\t\tkm_nthw_init(pkmnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_km_nthw = pkmnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_km_nthw = NULL;\n+\t}\n+\t/* Init nthw FLM */\n+\tif (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct flm_nthw *pflmnthw = flm_nthw_new();\n+\n+\t\tflm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_flm_nthw = pflmnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_flm_nthw = NULL;\n+\t}\n+\t/* Init nthw IFR */\n+\tif (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct ifr_nthw *ifrnthw = ifr_nthw_new();\n+\n+\t\tifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_ifr_nthw = NULL;\n+\t}\n+\t/* Init nthw HSH */\n+\tif (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct hsh_nthw *phshnthw = hsh_nthw_new();\n+\n+\t\thsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_hsh_nthw = phshnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_hsh_nthw = NULL;\n+\t}\n+\t/* Init nthw HST */\n+\tif (hst_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct hst_nthw *phstnthw = hst_nthw_new();\n+\n+\t\thst_nthw_init(phstnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_hst_nthw = phstnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_hst_nthw = NULL;\n+\t}\n+\t/* Init nthw QSL */\n+\tif (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct qsl_nthw *pqslnthw = qsl_nthw_new();\n+\n+\t\tqsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_qsl_nthw = NULL;\n+\t}\n+\t/* Init nthw SLC */\n+\tif (slc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct slc_nthw *pslcnthw = slc_nthw_new();\n+\n+\t\tslc_nthw_init(pslcnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_slc_nthw = pslcnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_slc_nthw = NULL;\n+\t}\n+\t/* Init nthw SLC LR */\n+\tif (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();\n+\n+\t\tslc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_slc_lr_nthw = NULL;\n+\t}\n+\t/* Init nthw PDB */\n+\tif (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct pdb_nthw *ppdbnthw = pdb_nthw_new();\n+\n+\t\tpdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_pdb_nthw = NULL;\n+\t}\n+\t/* Init nthw IOA */\n+\tif (ioa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct ioa_nthw *pioanthw = ioa_nthw_new();\n+\n+\t\tioa_nthw_init(pioanthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_ioa_nthw = pioanthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_ioa_nthw = NULL;\n+\t}\n+\t/* Init nthw ROA */\n+\tif (roa_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct roa_nthw *proanthw = roa_nthw_new();\n+\n+\t\troa_nthw_init(proanthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_roa_nthw = proanthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_roa_nthw = NULL;\n+\t}\n+\t/* Init nthw RMC */\n+\tif (rmc_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct rmc_nthw *prmcnthw = rmc_nthw_new();\n+\n+\t\trmc_nthw_init(prmcnthw, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_rmc_nthw = prmcnthw;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_rmc_nthw = NULL;\n+\t}\n+\t/* Init nthw HFU */\n+\tif (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct hfu_nthw *ptr = hfu_nthw_new();\n+\n+\t\thfu_nthw_init(ptr, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_hfu_nthw = ptr;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_hfu_nthw = NULL;\n+\t}\n+\t/* Init nthw RPP_LR */\n+\tif (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct rpp_lr_nthw *ptr = rpp_lr_nthw_new();\n+\n+\t\trpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;\n+\t}\n+\t/* Init nthw TX_CPY */\n+\tif (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct tx_cpy_nthw *ptr = tx_cpy_nthw_new();\n+\n+\t\ttx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;\n+\t}\n+\t/* Init nthw CSU */\n+\tif (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct csu_nthw *ptr = csu_nthw_new();\n+\n+\t\tcsu_nthw_init(ptr, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_csu_nthw = ptr;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_csu_nthw = NULL;\n+\t}\n+\t/* Init nthw TX_INS */\n+\tif (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct tx_ins_nthw *ptr = tx_ins_nthw_new();\n+\n+\t\ttx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_tx_ins_nthw = ptr;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_tx_ins_nthw = NULL;\n+\t}\n+\t/* Init nthw TX_RPL */\n+\tif (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {\n+\t\tstruct tx_rpl_nthw *ptr = tx_rpl_nthw_new();\n+\n+\t\ttx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);\n+\t\tbe_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;\n+\t} else {\n+\t\tbe_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;\n+\t}\n+\tbe_devs[physical_adapter_no].adapter_no = physical_adapter_no;\n+\t*dev = (void *)&be_devs[physical_adapter_no];\n+\n+\treturn &flow_be_iface;\n+}\n+\n+void bin_flow_backend_done(void *dev)\n+{\n+\tstruct backend_dev_s *be_dev = (struct backend_dev_s *)dev;\n+\n+\tinfo_nthw_delete(be_dev->p_info_nthw);\n+\tcat_nthw_delete(be_dev->p_cat_nthw);\n+\tkm_nthw_delete(be_dev->p_km_nthw);\n+\tflm_nthw_delete(be_dev->p_flm_nthw);\n+\thsh_nthw_delete(be_dev->p_hsh_nthw);\n+\thst_nthw_delete(be_dev->p_hst_nthw);\n+\tqsl_nthw_delete(be_dev->p_qsl_nthw);\n+\tslc_nthw_delete(be_dev->p_slc_nthw);\n+\tslc_lr_nthw_delete(be_dev->p_slc_lr_nthw);\n+\tpdb_nthw_delete(be_dev->p_pdb_nthw);\n+\tioa_nthw_delete(be_dev->p_ioa_nthw);\n+\troa_nthw_delete(be_dev->p_roa_nthw);\n+\trmc_nthw_delete(be_dev->p_rmc_nthw);\n+\tcsu_nthw_delete(be_dev->p_csu_nthw);\n+\thfu_nthw_delete(be_dev->p_hfu_nthw);\n+\trpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);\n+\ttx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);\n+\ttx_ins_nthw_delete(be_dev->p_tx_ins_nthw);\n+\ttx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);\n+}\ndiff --git a/drivers/net/ntnic/nthw/flow_filter/flow_backend.h b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h\nnew file mode 100644\nindex 0000000000..17fdcada3f\n--- /dev/null\n+++ b/drivers/net/ntnic/nthw/flow_filter/flow_backend.h\n@@ -0,0 +1,15 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef __FLOW_BACKEND_H__\n+#define __FLOW_BACKEND_H__\n+\n+#include <stdint.h> /* uint8_t */\n+#include \"nthw_fpga_model.h\"\n+\n+const struct flow_api_backend_ops *bin_flow_backend_init(nt_fpga_t *p_fpga,\n+\t\tvoid **be_dev);\n+void bin_flow_backend_done(void *be_dev);\n+\n+#endif /* __FLOW_BACKEND_H__ */\ndiff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.c b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c\nnew file mode 100644\nindex 0000000000..90aeb71bd7\n--- /dev/null\n+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.c\n@@ -0,0 +1,39 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include \"nthw_drv.h\"\n+#include \"flow_filter.h\"\n+#include \"flow_api_backend.h\"\n+#include \"flow_backend.h\"\n+#include \"flow_api_nic_setup.h\"\n+\n+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,\n+\t\t   int adapter_no)\n+{\n+\tvoid *be_dev = NULL;\n+\tstruct flow_nic_dev *flow_nic;\n+\n+\tNT_LOG(DBG, FILTER, \"Initializing flow filter api\\n\");\n+\tconst struct flow_api_backend_ops *iface =\n+\t\tbin_flow_backend_init(p_fpga, &be_dev);\n+\n+\tflow_nic = flow_api_create((uint8_t)adapter_no, iface, be_dev);\n+\tif (!flow_nic) {\n+\t\t*p_flow_device = NULL;\n+\t\treturn -1;\n+\t}\n+\t*p_flow_device = flow_nic;\n+\treturn 0;\n+}\n+\n+int flow_filter_done(struct flow_nic_dev *dev)\n+{\n+\tvoid *be_dev = flow_api_get_be_dev(dev);\n+\n+\tint res = flow_api_done(dev);\n+\n+\tif (be_dev)\n+\t\tbin_flow_backend_done(be_dev);\n+\treturn res;\n+}\ndiff --git a/drivers/net/ntnic/nthw/flow_filter/flow_filter.h b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h\nnew file mode 100644\nindex 0000000000..8ea21a614a\n--- /dev/null\n+++ b/drivers/net/ntnic/nthw/flow_filter/flow_filter.h\n@@ -0,0 +1,16 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef __FLOW_FILTER_HPP__\n+#define __FLOW_FILTER_HPP__\n+#undef USE_OPAE\n+\n+#include \"nthw_fpga_model.h\"\n+#include \"flow_api.h\"\n+\n+int flow_filter_init(nt_fpga_t *p_fpga, struct flow_nic_dev **p_flow_device,\n+\t\t   int adapter_no);\n+int flow_filter_done(struct flow_nic_dev *dev);\n+\n+#endif /* __FLOW_FILTER_HPP__ */\n",
    "prefixes": [
        "v10",
        "6/8"
    ]
}