get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/131310/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 131310,
    "url": "http://patchwork.dpdk.org/api/patches/131310/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230908160715.2498821-7-mko-plv@napatech.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230908160715.2498821-7-mko-plv@napatech.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230908160715.2498821-7-mko-plv@napatech.com",
    "date": "2023-09-08T16:07:14",
    "name": "[v16,7/8] net/ntnic: adds ethdev and makes PMD available",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "e21b406494906abf259e8a3c566560c812ee1f69",
    "submitter": {
        "id": 3153,
        "url": "http://patchwork.dpdk.org/api/people/3153/?format=api",
        "name": "Mykola Kostenok",
        "email": "mko-plv@napatech.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230908160715.2498821-7-mko-plv@napatech.com/mbox/",
    "series": [
        {
            "id": 29464,
            "url": "http://patchwork.dpdk.org/api/series/29464/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=29464",
            "date": "2023-09-08T16:07:10",
            "name": "[v16,1/8] net/ntnic: initial commit which adds register defines",
            "version": 16,
            "mbox": "http://patchwork.dpdk.org/series/29464/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/131310/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/131310/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3F7994254A;\n\tFri,  8 Sep 2023 18:07:53 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3887C4067A;\n\tFri,  8 Sep 2023 18:07:06 +0200 (CEST)",
            "from egress-ip40a.ess.de.barracuda.com\n (egress-ip40a.ess.de.barracuda.com [18.185.115.199])\n by mails.dpdk.org (Postfix) with ESMTP id 741EA402F2\n for <dev@dpdk.org>; Fri,  8 Sep 2023 18:07:04 +0200 (CEST)",
            "from EUR05-VI1-obe.outbound.protection.outlook.com\n (mail-vi1eur05lp2172.outbound.protection.outlook.com [104.47.17.172]) by\n mx-outbound10-0.eu-central-1a.ess.aws.cudaops.com (version=TLSv1.2\n cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NO);\n Fri, 08 Sep 2023 16:07:02 +0000",
            "from AM5PR1001CA0040.EURPRD10.PROD.OUTLOOK.COM\n (2603:10a6:206:15::17) by PA4P190MB1232.EURP190.PROD.OUTLOOK.COM\n (2603:10a6:102:101::6) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6768.30; Fri, 8 Sep\n 2023 16:06:48 +0000",
            "from AMS0EPF000001B4.eurprd05.prod.outlook.com\n (2603:10a6:206:15:cafe::7d) by AM5PR1001CA0040.outlook.office365.com\n (2603:10a6:206:15::17) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6768.30 via Frontend\n Transport; Fri, 8 Sep 2023 16:06:48 +0000",
            "from k8s-node.default.svc.cluster.local (178.72.21.4) by\n AMS0EPF000001B4.mail.protection.outlook.com (10.167.16.168) with Microsoft\n SMTP Server id 15.20.6792.11 via Frontend Transport; Fri, 8 Sep 2023 16:06:48\n +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=hpnXxKj6mDUBx9k8+vBwLXle6UmnGzDI7qXaaU/fatrfp7Ba9sw2youo3/v/bHu54zeKkzij5qH+xruabwdcYsGSVz17iZoS54IvnbhdhaIkex9XpzDaMTkuU3lrgDygPL7MQVFXkqKfOn4W8D9nWvgZTDXX3tTZUBy8qbzt84A/Mfx3uyn7TMhL00oLXwiBZ81PSh98ixWUsfulwfrfNmdDVey24V8331ucGGmNyXmSlAWD2BWXcsKS6EzFl7dpJqpK00tV9/ZWtarrotVzNsupxUc4UxakZu1bygHXc73pHH5y4XFp9o7hBGXck6rXDBKvo4RgfNT1yh5pdEelag==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=6JLmTfVcfGzqIIwogmcyZcr5ZUgLxON0tIASRbHIDyM=;\n b=GiRFWid6EJqQbKSApyz+9O6z+8Zg+YPo6E2p1SKhvVFgTU1xZdaSApJItiCrNtb4zJi1XXSNWGV0uovUlDR7GumQl4/bKQZiCG3XyiBJM8kCtiMX6VdELqkleG8QfT6trQyIcnh97QY/lsuxuqH3/xLJkybM3BaOlCQr5hsHWgP2s5xNkhJ19LAcrf8+Mf91nwBD2OtT5OZHxpmT587p02zEfFXmQ12mOWAH3ERrvQ+zt5gkU9VjTe4rP1BrikCJJbdjTNr4GqtSXma1PaKlmYbij+RAhwWofJZkJwLu4GIriBoDoF9JBi3JjpKjnC9sh7ofMLV1d8ljbxeT0eIWjA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=fail (sender ip is\n 178.72.21.4) smtp.rcpttodomain=dpdk.org smtp.mailfrom=napatech.com;\n dmarc=fail (p=none sp=none pct=100) action=none header.from=napatech.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=napatech.com;\n s=selector1;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=6JLmTfVcfGzqIIwogmcyZcr5ZUgLxON0tIASRbHIDyM=;\n b=YeVFoyGYCtKQWNge47s/ZYSUD9rQKwz/xMJp+N/w2CoxTbRiI5cVgv8lyeOV8LUnoVLX6ILmy5UUtP/6FPVNmuO7I1mt8S1gKpMKbqEt1YgbDkJfKGC3msPtPLO8loOS9Q1OL7EJYl53/o4jW4uMBz4mCJ7rpnzuWh0KO4+ufNw=",
        "X-MS-Exchange-Authentication-Results": "spf=fail (sender IP is 178.72.21.4)\n smtp.mailfrom=napatech.com; dkim=none (message not signed)\n header.d=none;dmarc=fail action=none header.from=napatech.com;",
        "Received-SPF": "Fail (protection.outlook.com: domain of napatech.com does not\n designate 178.72.21.4 as permitted sender) receiver=protection.outlook.com;\n client-ip=178.72.21.4; helo=k8s-node.default.svc.cluster.local;",
        "From": "Mykola Kostenok <mko-plv@napatech.com>",
        "To": "dev@dpdk.org",
        "Cc": "mko-plv@napatech.com, thomas@monjalon.net, ckm@napatech.com,\n andrew.rybchenko@oktetlabs.ru, ferruh.yigit@amd.com",
        "Subject": "[PATCH v16 7/8] net/ntnic: adds ethdev and makes PMD available",
        "Date": "Fri,  8 Sep 2023 18:07:14 +0200",
        "Message-Id": "<20230908160715.2498821-7-mko-plv@napatech.com>",
        "X-Mailer": "git-send-email 2.39.3",
        "In-Reply-To": "<20230908160715.2498821-1-mko-plv@napatech.com>",
        "References": "<20230816132552.2483752-1-mko-plv@napatech.com>\n <20230908160715.2498821-1-mko-plv@napatech.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "AMS0EPF000001B4:EE_|PA4P190MB1232:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "ef108e14-f550-4167-e8c8-08dbb0859b3a",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n ryG2/HOYtUOV1me5knlB/Cyaxz1d7PrU2ABqqfJqS3ecGhTHklzbPrGbHPqWiUxE65hrte366KY/t8MRKk8hl9ADOdEOl8keNs3zZ7BMADEMI8s/cOxyfz+wS0ZQIjY62kFcIfup4NGXPyiT7ZMJZgoTQsFNBFGwuRM1fsL3RYg5ykHit2HFtJfk1wRrQLKbXs4gbgY9a+4AIbrYBBDvNj96PqPToDtM75FODXrt7i/6oEuWwBrYHRDriKG4pE6eJTnvAIJPrru2ArR70Y3KDPr+hFpBbiYwKULFg7+Mcp5yf62EBTyXzO0REJuuZrLINaEVQP3eBx+3WqNkVPyIdNI9N3UOlbBOdLyaa4WXxlbkmB83SK3uxLkKaf6aXku3ZC2V4HqFwiE+gDoutmt96PVO3/UlZnYW0jvZ65SCvhz+SMiFkmFX8zwfKQrGtDswDBe+1FXU0Iw7xAbrot0eGgs/YEtyLu0FhVrlXvnYzG5RSDCrn1VvwqW3rqO5UlRnscfHTqiu8ZDdVqWY05kmfsdoNyARtp3WjSlWnwQ+sZJE8s+AsXzAs/BnnITE3N4AiKAv25dlNvJ/+SgEz3Yba7OcjrCLwrbdN3/y5li+s7j/QBN0jkSmM4k3/zsqSteSzCfi+jQFaxUFA9iI1XFsVRZH1JDIMjxLDfqhsEQgtyHJayQ6FX5p/NWhXhBTUbPtYe3liRDB+n78ntBZss9GHyGqTuIq//DmH5ycSgTC6+GAp0CNvpUwGlFM8BaJActOdUSipLi5ikKpJPa+hPxUxzk7SjZ/97o5rIUqpG/evTg=",
        "X-Forefront-Antispam-Report": "CIP:178.72.21.4; CTRY:DK; LANG:en; SCL:1; SRV:;\n IPV:NLI; SFV:NSPM; H:k8s-node.default.svc.cluster.local;\n PTR:InfoDomainNonexistent; CAT:NONE;\n SFS:(13230031)(39840400004)(346002)(376002)(396003)(136003)(82310400011)(186009)(1800799009)(451199024)(36840700001)(46966006)(2906002)(66899024)(86362001)(2616005)(956004)(1076003)(336012)(26005)(36756003)(6486002)(6666004)(478600001)(6506007)(6512007)(9316004)(36860700001)(83380400001)(47076005)(356005)(81166007)(40480700001)(4326008)(41300700001)(8936002)(8676002)(30864003)(36736006)(6916009)(70206006)(70586007)(316002)(5660300002)(21314003)(36900700001)(579004)(559001)(309714004);\n DIR:OUT; SFP:1101;",
        "X-MS-Exchange-AntiSpam-ExternalHop-MessageData-ChunkCount": "1",
        "X-MS-Exchange-AntiSpam-ExternalHop-MessageData-0": "\n mMpKpg9amWvs4696u8vPlWk9D5iiTkdG1MW/pIteDARd+bAfn3hC1yobsZS46eEHuXSHZ4wemtktGYSBQKJRwen1GVziSrXKOON1DwXhWigA9DNKnj6ZHO12mVR+VTfTPq1pJ7tzhsNwMRQnLrIgMeALVkfRSCLQmwsvO86iLmf9IlXRwo2ww8xaWEUY5eLtuT/FR9soQThVZ4AYi7+7qevTr4dVljh4zG6u7d5iMftV4KnUsxIwnDd/Su6HIbX7uIBrkIGeGsvoJ3kvYR9JC6kMLAJfKPGCdH8WANC81WQzqoOitpmLRWKCY2ljETmLl5KpbmqT6ZDb3Rxq56Uwf0aVBHrs+AWQM+aUjK1bcMebpiml9jN9WKv9M0otzFrFAuC0fC3Riaei4pPhBO5XZiKIkAXLdiUvT8ie4ZVagIx8fEGVn7OjFW7jGkhUlUBTYPdHCDCtnD4lIG6+oS3aroR13Gj0ODJDUGKp3vUmtw2dwvB5eCY2YRwT5sNyNpN1cpGQ5g3TtwMsmYlm/OSQeTqB1xNYHFqTE5DaG7x93LRUTYhgrcoUvXQ5nY9paJYuM8o3up1JY5DFk4tVnVftWDmWxJ21Dzzf+l0WHSQUCZqxwwBGwLOTGNapRarFtUqmND8S7yzKrUeCaK0jYjTuDTjZ8Do6UvyXccyyyhODDvASHNb2SAHcwBP1wPdNMo9tqOAAmj1EIHxfIsYLvspp8T/01t1lEiYNWUQFnpNEzEiWjobJoPO5Gp7Z8jposYUqrpYYFnsetJVVQO6Kb3PdUqHwf18sE8FdzYTXbeP4nNE=",
        "X-OriginatorOrg": "napatech.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "08 Sep 2023 16:06:48.3275 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n ef108e14-f550-4167-e8c8-08dbb0859b3a",
        "X-MS-Exchange-CrossTenant-Id": "c4540d0b-728a-4233-9da5-9ea30c7ec3ed",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=c4540d0b-728a-4233-9da5-9ea30c7ec3ed; Ip=[178.72.21.4];\n Helo=[k8s-node.default.svc.cluster.local]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n AMS0EPF000001B4.eurprd05.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "PA4P190MB1232",
        "X-BESS-ID": "1694189222-302560-12337-38002-1",
        "X-BESS-VER": "2019.1_20230901.1930",
        "X-BESS-Apparent-Source-IP": "104.47.17.172",
        "X-BESS-Parts": "H4sIAAAAAAACA4uuVkqtKFGyUioBkjpK+cVKVsbmRiaGRkB2BlA4xcAozdQw0S\n Ap0TLR2DIl0cLS0sQo0TDR0NwkKcky0VCpNhYAazvuWkMAAAA=",
        "X-BESS-Outbound-Spam-Score": "0.00",
        "X-BESS-Outbound-Spam-Report": "Code version 3.2,\n rules version 3.2.2.250691 [from\n cloudscan21-152.eu-central-1b.ess.aws.cudaops.com]\n Rule breakdown below\n pts rule name              description\n ---- ---------------------- --------------------------------\n 0.00 LARGE_BODY_SHORTCUT    META:  ",
        "X-BESS-Outbound-Spam-Status": "SCORE=0.00 using account:ESS113687 scores of\n KILL_LEVEL=7.0 tests=LARGE_BODY_SHORTCUT",
        "X-BESS-BRTS-Status": "1",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Christian Koue Muf <ckm@napatech.com>\n\nHooks into the DPDK API, and make the PMD available to use.\nAlso adds documentation as .rst and .ini files.\n\nSigned-off-by: Christian Koue Muf <ckm@napatech.com>\nReviewed-by: Mykola Kostenok <mko-plv@napatech.com>\n---\nv2:\n* Fixed WARNING:TYPO_SPELLING\n* Fix supported platform list\nv3:\n* Fix Fedora 38 compilation issues\nv5:\n* Add ntnic.rst to index file\nv10:\n* Fix wrong queue id range.\nv11:\n* Repace stdatomic by compiler build-in atomic.\nv13:\n* Fix typo spelling warnings\n---\n .mailmap                                      |    2 +\n MAINTAINERS                                   |    7 +\n doc/guides/nics/features/ntnic.ini            |   50 +\n doc/guides/nics/index.rst                     |    1 +\n doc/guides/nics/ntnic.rst                     |  235 +\n drivers/net/ntnic/include/ntdrv_4ga.h         |   23 +\n drivers/net/ntnic/include/ntos_system.h       |   23 +\n drivers/net/ntnic/meson.build                 |   13 +\n drivers/net/ntnic/ntnic_dbsconfig.c           | 1670 +++++++\n drivers/net/ntnic/ntnic_dbsconfig.h           |  251 +\n drivers/net/ntnic/ntnic_ethdev.c              | 4256 +++++++++++++++++\n drivers/net/ntnic/ntnic_ethdev.h              |  355 ++\n .../net/ntnic/ntnic_filter/create_elements.h  | 1190 +++++\n drivers/net/ntnic/ntnic_filter/ntnic_filter.c |  656 +++\n drivers/net/ntnic/ntnic_filter/ntnic_filter.h |   14 +\n drivers/net/ntnic/ntnic_hshconfig.c           |  102 +\n drivers/net/ntnic/ntnic_hshconfig.h           |    9 +\n drivers/net/ntnic/ntnic_meter.c               |  811 ++++\n drivers/net/ntnic/ntnic_meter.h               |   10 +\n drivers/net/ntnic/ntnic_vdpa.c                |  365 ++\n drivers/net/ntnic/ntnic_vdpa.h                |   21 +\n drivers/net/ntnic/ntnic_vf.c                  |   83 +\n drivers/net/ntnic/ntnic_vf.h                  |   17 +\n drivers/net/ntnic/ntnic_vf_vdpa.c             | 1235 +++++\n drivers/net/ntnic/ntnic_vf_vdpa.h             |   25 +\n drivers/net/ntnic/ntnic_vfio.c                |  321 ++\n drivers/net/ntnic/ntnic_vfio.h                |   31 +\n drivers/net/ntnic/ntnic_xstats.c              |  703 +++\n drivers/net/ntnic/ntnic_xstats.h              |   22 +\n 29 files changed, 12501 insertions(+)\n create mode 100644 doc/guides/nics/features/ntnic.ini\n create mode 100644 doc/guides/nics/ntnic.rst\n create mode 100644 drivers/net/ntnic/include/ntdrv_4ga.h\n create mode 100644 drivers/net/ntnic/include/ntos_system.h\n create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.c\n create mode 100644 drivers/net/ntnic/ntnic_dbsconfig.h\n create mode 100644 drivers/net/ntnic/ntnic_ethdev.c\n create mode 100644 drivers/net/ntnic/ntnic_ethdev.h\n create mode 100644 drivers/net/ntnic/ntnic_filter/create_elements.h\n create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.c\n create mode 100644 drivers/net/ntnic/ntnic_filter/ntnic_filter.h\n create mode 100644 drivers/net/ntnic/ntnic_hshconfig.c\n create mode 100644 drivers/net/ntnic/ntnic_hshconfig.h\n create mode 100644 drivers/net/ntnic/ntnic_meter.c\n create mode 100644 drivers/net/ntnic/ntnic_meter.h\n create mode 100644 drivers/net/ntnic/ntnic_vdpa.c\n create mode 100644 drivers/net/ntnic/ntnic_vdpa.h\n create mode 100644 drivers/net/ntnic/ntnic_vf.c\n create mode 100644 drivers/net/ntnic/ntnic_vf.h\n create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.c\n create mode 100644 drivers/net/ntnic/ntnic_vf_vdpa.h\n create mode 100644 drivers/net/ntnic/ntnic_vfio.c\n create mode 100644 drivers/net/ntnic/ntnic_vfio.h\n create mode 100644 drivers/net/ntnic/ntnic_xstats.c\n create mode 100644 drivers/net/ntnic/ntnic_xstats.h",
    "diff": "diff --git a/.mailmap b/.mailmap\nindex 864d33ee46..be8880971d 100644\n--- a/.mailmap\n+++ b/.mailmap\n@@ -227,6 +227,7 @@ Chintu Hetam <rometoroam@gmail.com>\n Choonho Son <choonho.son@gmail.com>\n Chris Metcalf <cmetcalf@mellanox.com>\n Christian Ehrhardt <christian.ehrhardt@canonical.com>\n+Christian Koue Muf <ckm@napatech.com>\n Christian Maciocco <christian.maciocco@intel.com>\n Christophe Fontaine <cfontain@redhat.com>\n Christophe Grosse <christophe.grosse@6wind.com>\n@@ -967,6 +968,7 @@ Mukesh Dua <mukesh.dua81@gmail.com>\n Murphy Yang <murphyx.yang@intel.com>\n Murthy NSSR <nidadavolu.murthy@caviumnetworks.com>\n Muthurajan Jayakumar <muthurajan.jayakumar@intel.com>\n+Mykola Kostenok <mko-plv@napatech.com>\n Nachiketa Prachanda <nprachan@brocade.com> <nprachan@vyatta.att-mail.com>\n Nagadheeraj Rottela <rnagadheeraj@marvell.com>\n Naga Harish K S V <s.v.naga.harish.k@intel.com>\ndiff --git a/MAINTAINERS b/MAINTAINERS\nindex 698608cdb2..fbe19449c2 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -1053,6 +1053,13 @@ F: drivers/net/memif/\n F: doc/guides/nics/memif.rst\n F: doc/guides/nics/features/memif.ini\n \n+NTNIC PMD\n+M: Mykola Kostenok <mko-plv@napatech.com>\n+M: Christiam Muf <ckm@napatech.com>\n+F: drivers/net/ntnic/\n+F: doc/guides/nics/ntnic.rst\n+F: doc/guides/nics/features/ntnic.ini\n+\n \n Crypto Drivers\n --------------\ndiff --git a/doc/guides/nics/features/ntnic.ini b/doc/guides/nics/features/ntnic.ini\nnew file mode 100644\nindex 0000000000..2583e12b1f\n--- /dev/null\n+++ b/doc/guides/nics/features/ntnic.ini\n@@ -0,0 +1,50 @@\n+;\n+; Supported features of the 'ntnic' network poll mode driver.\n+;\n+; Refer to default.ini for the full list of available PMD features.\n+;\n+[Features]\n+Speed capabilities   = Y\n+Link status          = Y\n+Queue start/stop     = Y\n+Shared Rx queue      = Y\n+MTU update           = Y\n+Promiscuous mode     = Y\n+Unicast MAC filter   = Y\n+Multicast MAC filter = Y\n+RSS hash             = Y\n+RSS key update       = Y\n+Inner RSS            = Y\n+CRC offload          = Y\n+L3 checksum offload  = Y\n+L4 checksum offload  = Y\n+Inner L3 checksum    = Y\n+Inner L4 checksum    = Y\n+Basic stats          = Y\n+Extended stats       = Y\n+FW version           = Y\n+Linux                = Y\n+x86-64               = Y\n+\n+[rte_flow items]\n+any                  = Y\n+eth                  = Y\n+gtp                  = Y\n+ipv4                 = Y\n+ipv6                 = Y\n+port_id              = Y\n+sctp                 = Y\n+tcp                  = Y\n+udp                  = Y\n+vlan                 = Y\n+\n+[rte_flow actions]\n+drop                 = Y\n+jump                 = Y\n+meter                = Y\n+modify_field         = Y\n+port_id              = Y\n+queue                = Y\n+raw_decap            = Y\n+raw_encap            = Y\n+rss                  = Y\ndiff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst\nindex 7bfcac880f..c14bc7988a 100644\n--- a/doc/guides/nics/index.rst\n+++ b/doc/guides/nics/index.rst\n@@ -53,6 +53,7 @@ Network Interface Controller Drivers\n     nfb\n     nfp\n     ngbe\n+    ntnic\n     null\n     octeon_ep\n     octeontx\ndiff --git a/doc/guides/nics/ntnic.rst b/doc/guides/nics/ntnic.rst\nnew file mode 100644\nindex 0000000000..85c58543dd\n--- /dev/null\n+++ b/doc/guides/nics/ntnic.rst\n@@ -0,0 +1,235 @@\n+..  SPDX-License-Identifier: BSD-3-Clause\n+    Copyright(c) 2023 Napatech A/S\n+\n+NTNIC Poll Mode Driver\n+======================\n+\n+The NTNIC PMD provides poll mode driver support for Napatech smartNICs.\n+\n+\n+Design\n+------\n+\n+The NTNIC PMD is designed as a pure user-space driver, and requires no special\n+Napatech kernel modules.\n+\n+The Napatech smartNIC presents one control PCI device (PF0). NTNIC PMD accesses\n+smartNIC PF0 via vfio-pci kernel driver. Access to PF0 for all purposes is\n+exclusive, so only one process should access it. The physical ports are located\n+behind PF0 as DPDK port 0 and 1. These ports can be configured with one or more\n+TX and RX queues each.\n+\n+Virtual ports can be added by creating VFs via SR-IOV. The vfio-pci kernel\n+driver is bound to the VFs. The VFs implement virtio data plane only and the VF\n+configuration is done by NTNIC PMD through PF0. Each VF can be configured with\n+one or more TX and RX queue pairs. The VF’s are numbered starting from VF 4.\n+The number of VFs is limited by the number of queues supported by the FPGA,\n+and the number of queue pairs allocated for each VF. Current FPGA supports 128\n+queues in each TX and RX direction. A maximum of 63 VFs is supported (VF4-VF66).\n+\n+As the Napatech smartNICs supports sensors and monitoring beyond what is\n+available in the DPDK API, the PMD includes the ntconnect socket interface.\n+ntconnect additionally allows Napatech to implement specific customer requests\n+that are not supported by the DPDK API.\n+\n+\n+Supported NICs\n+--------------\n+\n+- NT200A02 2x100G SmartNIC\n+\n+    - FPGA ID 9563 (Inline Flow Management)\n+\n+\n+Features\n+--------\n+\n+- Multiple TX and RX queues.\n+- Scattered and gather for TX and RX.\n+- RSS based on VLAN or 5-tuple.\n+- RSS using different combinations of fields: L3 only, L4 only or both, and\n+    source only, destination only or both.\n+- Several RSS hash keys, one for each flow type.\n+- Default RSS operation with no hash key specification.\n+- VLAN filtering.\n+- RX VLAN stripping via raw decap.\n+- TX VLAN insertion via raw encap.\n+- Hairpin.\n+- HW checksum offload of RX and hairpin.\n+- Promiscuous mode on PF and VF.\n+- Flow API.\n+- Multiple process.\n+- Tunnel types: GTP.\n+- Tunnel HW offload: Packet type, inner/outer RSS, IP and UDP checksum\n+    verification.\n+- Support for multiple rte_flow groups.\n+- Encapsulation and decapsulation of GTP data.\n+- Packet modification: NAT, TTL decrement, DSCP tagging\n+- Traffic mirroring.\n+- Jumbo frame support.\n+- Port and queue statistics.\n+- RMON statistics in extended stats.\n+- Flow metering, including meter policy API.\n+- Link state information.\n+- CAM and TCAM based matching.\n+- Exact match of 140 million flows and policies.\n+\n+\n+Limitations\n+~~~~~~~~~~~\n+\n+Kernel versions before 5.7 are not supported. Kernel version 5.7 added vfio-pci\n+support for creating VFs from the PF which is required for the PMD to use\n+vfio-pci on the PF. This support has been back-ported to older Linux\n+distributions and they are also supported. If vfio-pci is not required kernel\n+version 4.18 is supported.\n+\n+Current NTNIC PMD implementation only supports one active adapter.\n+\n+\n+Configuration\n+-------------\n+\n+Command line arguments\n+~~~~~~~~~~~~~~~~~~~~~~\n+\n+Following standard DPDK command line arguments are used by the PMD:\n+\n+    -a: Used to specifically define the NT adapter by PCI ID.\n+    --iova-mode: Must be set to ‘pa’ for Physical Address mode.\n+\n+NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::\n+\n+    <application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]\n+\n+The NTNIC specific argument format is::\n+\n+    <object>.<attribute>=[<object-ids>:]<value>\n+\n+Multiple arguments for the same device are separated by ‘,’ comma.\n+<object-ids> can be a single value or a range.\n+\n+\n+- ``rxqs`` parameter [int]\n+\n+    Specify number of RX queues to use.\n+\n+    To specify number of RX queues::\n+\n+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4\n+\n+    By default, the value is set to 1.\n+\n+- ``txqs`` parameter [int]\n+\n+    Specify number of TX queues to use.\n+\n+    To specify number of TX queues::\n+\n+        -a <domain>:<bus>:00.0,rxqs=4,txqs=4\n+\n+    By default, the value is set to 1.\n+\n+- ``exception_path`` parameter [int]\n+\n+    Enable exception path for unmatched packets to go through queue 0.\n+\n+    To enable exception_path::\n+\n+        -a <domain>:<bus>:00.0,exception_path=1\n+\n+    By default, the value is set to 0.\n+\n+- ``port.link_speed`` parameter [list]\n+\n+    This parameter is used to set the link speed on physical ports in the format::\n+\n+        port.link_speed=<port>:<link speed in Mbps>\n+\n+    To set up link speeds::\n+\n+        -a <domain>:<bus>:00.0,port.link_speed=0:10000,port.link_speed=1:25000\n+\n+    By default, set to the maximum corresponding to the NIM bit rate.\n+\n+- ``supported-fpgas`` parameter [str]\n+\n+    List the supported FPGAs for a compiled NTNIC DPDK-driver.\n+\n+    This parameter has two options::\n+\n+        - list.\n+        - verbose.\n+\n+    Example usages::\n+\n+        -a <domain>:<bus>:00.0,supported-fpgas=list\n+        -a <domain>:<bus>:00.0,supported-fpgas=verbose\n+\n+- ``help`` parameter [none]\n+\n+    List all available NTNIC PMD parameters.\n+\n+\n+Build options\n+~~~~~~~~~~~~~\n+\n+- ``NT_TOOLS``\n+\n+    Define that enables the PMD ntconnect source code.\n+\n+    Default: Enabled.\n+\n+- ``NT_VF_VDPA``\n+\n+    Define that enables the PMD VF VDPA source code.\n+\n+    Default: Enabled.\n+\n+- ``NT_RELAY_CORE``\n+\n+    Define that enables the PMD replay core source code. The relay core is used\n+    by Napatech's vSwitch PMD profile in an OVS environment.\n+\n+    Default: Disabled.\n+\n+\n+Logging and Debugging\n+---------------------\n+\n+NTNIC supports several groups of logging that can be enabled with ``log-level``\n+parameter:\n+\n+- ETHDEV.\n+\n+    Logging info from the main PMD code. i.e. code that is related to DPDK::\n+\n+        --log-level=ntnic.ethdev,8\n+\n+- NTHW.\n+\n+    Logging info from NTHW. i.e. code that is related to the FPGA and the Adapter::\n+\n+        --log-level=ntnic.nthw,8\n+\n+- vDPA.\n+\n+    Logging info from vDPA. i.e. code that is related to VFIO and vDPA::\n+\n+        --log-level=ntnic.vdpa,8\n+\n+- FILTER.\n+\n+    Logging info from filter. i.e. code that is related to the binary filter::\n+\n+        --log-level=ntnic.filter,8\n+\n+- FPGA.\n+\n+    Logging related to FPGA::\n+\n+        --log-level=ntnic.fpga,8\n+\n+To enable logging on all levels use wildcard in the following way::\n+\n+    --log-level=ntnic.*,8\ndiff --git a/drivers/net/ntnic/include/ntdrv_4ga.h b/drivers/net/ntnic/include/ntdrv_4ga.h\nnew file mode 100644\nindex 0000000000..e9c38fc330\n--- /dev/null\n+++ b/drivers/net/ntnic/include/ntdrv_4ga.h\n@@ -0,0 +1,23 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef __NTDRV_4GA_H__\n+#define __NTDRV_4GA_H__\n+\n+#include \"nthw_drv.h\"\n+#include \"nt4ga_adapter.h\"\n+#include \"nthw_platform_drv.h\"\n+\n+typedef struct ntdrv_4ga_s {\n+\tuint32_t pciident;\n+\tstruct adapter_info_s adapter_info;\n+\tchar *p_drv_name;\n+\n+\tvolatile bool b_shutdown;\n+\tpthread_mutex_t stat_lck;\n+\tpthread_t stat_thread;\n+\tpthread_t flm_thread;\n+} ntdrv_4ga_t;\n+\n+#endif /* __NTDRV_4GA_H__ */\ndiff --git a/drivers/net/ntnic/include/ntos_system.h b/drivers/net/ntnic/include/ntos_system.h\nnew file mode 100644\nindex 0000000000..0adfe86cc3\n--- /dev/null\n+++ b/drivers/net/ntnic/include/ntos_system.h\n@@ -0,0 +1,23 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef __NTOS_SYSTEM_H__\n+#define __NTOS_SYSTEM_H__\n+\n+#include \"include/ntdrv_4ga.h\"\n+\n+/*\n+ * struct drv_s for DPDK (clone of kernel struct)\n+ * keep it as close as possible to original kernel struct\n+ */\n+struct drv_s {\n+\tint adapter_no;\n+\tstruct rte_pci_device *p_dev;\n+\tstruct ntdrv_4ga_s ntdrv;\n+\n+\tint n_eth_dev_init_count;\n+\tint probe_finished;\n+};\n+\n+#endif /* __NTOS_SYSTEM_H__ */\ndiff --git a/drivers/net/ntnic/meson.build b/drivers/net/ntnic/meson.build\nindex 0ae574f9ca..f7454ffb79 100644\n--- a/drivers/net/ntnic/meson.build\n+++ b/drivers/net/ntnic/meson.build\n@@ -27,6 +27,9 @@ includes = [\n     include_directories('sensors/ntavr'),\n ]\n \n+# deps\n+deps += 'vhost'\n+\n # all sources\n sources = files(\n     'adapter/nt4ga_adapter.c',\n@@ -113,6 +116,16 @@ sources = files(\n     'nthw/nthw_stat.c',\n     'nthw/supported/nthw_fpga_9563_055_024_0000.c',\n     'ntlog/ntlog.c',\n+    'ntnic_dbsconfig.c',\n+    'ntnic_ethdev.c',\n+    'ntnic_filter/ntnic_filter.c',\n+    'ntnic_hshconfig.c',\n+    'ntnic_meter.c',\n+    'ntnic_vdpa.c',\n+    'ntnic_vf.c',\n+    'ntnic_vfio.c',\n+    'ntnic_vf_vdpa.c',\n+    'ntnic_xstats.c',\n     'ntutil/nt_util.c',\n     'sensors/avr_sensors/avr_sensors.c',\n     'sensors/board_sensors/board_sensors.c',\ndiff --git a/drivers/net/ntnic/ntnic_dbsconfig.c b/drivers/net/ntnic/ntnic_dbsconfig.c\nnew file mode 100644\nindex 0000000000..2217c163ad\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_dbsconfig.c\n@@ -0,0 +1,1670 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <unistd.h>\n+#include <time.h>\n+#include <rte_mbuf.h>\n+#include <rte_pci.h>\n+\n+#include \"ntdrv_4ga.h\"\n+#include \"nt_util.h\"\n+#include \"ntnic_dbsconfig.h\"\n+#include \"ntnic_ethdev.h\"\n+#include \"ntlog.h\"\n+\n+#define STRUCT_ALIGNMENT (4 * 1024LU)\n+#define MAX_VIRT_QUEUES 128\n+\n+#define LAST_QUEUE 127\n+#define DISABLE 0\n+#define ENABLE 1\n+#define RX_AM_DISABLE DISABLE\n+#define RX_AM_ENABLE ENABLE\n+#define RX_UW_DISABLE DISABLE\n+#define RX_UW_ENABLE ENABLE\n+#define RX_Q_DISABLE DISABLE\n+#define RX_Q_ENABLE ENABLE\n+#define RX_AM_POLL_SPEED 5\n+#define RX_UW_POLL_SPEED 9\n+#define HOST_ID 0\n+#define INIT_QUEUE 1\n+\n+#define TX_AM_DISABLE DISABLE\n+#define TX_AM_ENABLE ENABLE\n+#define TX_UW_DISABLE DISABLE\n+#define TX_UW_ENABLE ENABLE\n+#define TX_Q_DISABLE DISABLE\n+#define TX_Q_ENABLE ENABLE\n+#define TX_AM_POLL_SPEED 5\n+#define TX_UW_POLL_SPEED 8\n+\n+/**************************************************************************/\n+\n+#define le64 uint64_t\n+#define le32 uint32_t\n+#define le16 uint16_t\n+\n+/**************************************************************************/\n+\n+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1\n+#pragma pack(1)\n+struct virtq_avail {\n+\tle16 flags;\n+\tle16 idx;\n+\tle16 ring[]; /* Queue size */\n+};\n+\n+#pragma pack()\n+/**************************************************************************/\n+\n+/* le32 is used here for ids for padding reasons. */\n+#pragma pack(1)\n+struct virtq_used_elem {\n+\t/* Index of start of used descriptor chain. */\n+\tle32 id;\n+\t/* Total length of the descriptor chain which was used (written to) */\n+\tle32 len;\n+};\n+\n+#pragma pack()\n+\n+#define VIRTQ_USED_F_NO_NOTIFY 1\n+\n+#pragma pack(1)\n+struct virtq_used {\n+\tle16 flags;\n+\tle16 idx;\n+\tstruct virtq_used_elem ring[]; /* Queue size */\n+};\n+\n+#pragma pack()\n+\n+struct virtq_struct_layout_s {\n+\tsize_t used_offset;\n+\tsize_t desc_offset;\n+};\n+\n+enum nthw_virt_queue_usage { UNUSED = 0, UNMANAGED, MANAGED };\n+\n+#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)\n+\n+struct nthw_virt_queue {\n+\t/* Pointers to virt-queue structs */\n+\tunion {\n+\t\tstruct {\n+\t\t\t/* SPLIT virtqueue */\n+\t\t\tstruct virtq_avail *p_avail;\n+\t\t\tstruct virtq_used *p_used;\n+\t\t\tstruct virtq_desc *p_desc;\n+\t\t\t/* Control variables for virt-queue structs */\n+\t\t\tle16 am_idx;\n+\t\t\tle16 used_idx;\n+\t\t\tle16 cached_idx;\n+\t\t\tle16 tx_descr_avail_idx;\n+\t\t};\n+\t\tstruct {\n+\t\t\t/* PACKED virtqueue */\n+\t\t\tstruct pvirtq_event_suppress *driver_event;\n+\t\t\tstruct pvirtq_event_suppress *device_event;\n+\t\t\tstruct pvirtq_desc *desc;\n+\t\t\tstruct {\n+\t\t\t\tle16 next;\n+\t\t\t\tle16 num;\n+\t\t\t} outs;\n+\t\t\t/*\n+\t\t\t * when in-order release used Tx packets from FPGA it may collapse\n+\t\t\t * into a batch. When getting new Tx buffers we may only need\n+\t\t\t * partial\n+\t\t\t */\n+\t\t\tle16 next_avail;\n+\t\t\tle16 next_used;\n+\t\t\tle16 avail_wrap_count;\n+\t\t\tle16 used_wrap_count;\n+\t\t};\n+\t};\n+\n+\t/* Array with packet buffers */\n+\tstruct nthw_memory_descriptor *p_virtual_addr;\n+\n+\t/* Queue configuration info */\n+\tenum nthw_virt_queue_usage usage;\n+\tuint16_t vq_type;\n+\tuint16_t in_order;\n+\tint irq_vector;\n+\n+\tnthw_dbs_t *mp_nthw_dbs;\n+\tuint32_t index;\n+\tle16 queue_size;\n+\tuint32_t am_enable;\n+\tuint32_t host_id;\n+\tuint32_t port; /* Only used by TX queues */\n+\tuint32_t virtual_port; /* Only used by TX queues */\n+\tuint32_t header;\n+\t/*\n+\t * Only used by TX queues:\n+\t *   0: VirtIO-Net header (12 bytes).\n+\t *   1: Napatech DVIO0 descriptor (12 bytes).\n+\t */\n+\tvoid *avail_struct_phys_addr;\n+\tvoid *used_struct_phys_addr;\n+\tvoid *desc_struct_phys_addr;\n+};\n+\n+struct pvirtq_struct_layout_s {\n+\tsize_t driver_event_offset;\n+\tsize_t device_event_offset;\n+};\n+\n+static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];\n+static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];\n+\n+static void dbs_init_rx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,\n+\t\t\t      uint32_t start_idx, uint32_t start_ptr)\n+{\n+\tuint32_t busy;\n+\tuint32_t init;\n+\tuint32_t dummy;\n+\n+\tdo {\n+\t\tget_rx_init(p_nthw_dbs, &init, &dummy, &busy);\n+\t} while (busy != 0);\n+\n+\tset_rx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);\n+\n+\tdo {\n+\t\tget_rx_init(p_nthw_dbs, &init, &dummy, &busy);\n+\t} while (busy != 0);\n+}\n+\n+static void dbs_init_tx_queue(nthw_dbs_t *p_nthw_dbs, uint32_t queue,\n+\t\t\t      uint32_t start_idx, uint32_t start_ptr)\n+{\n+\tuint32_t busy;\n+\tuint32_t init;\n+\tuint32_t dummy;\n+\n+\tdo {\n+\t\tget_tx_init(p_nthw_dbs, &init, &dummy, &busy);\n+\t} while (busy != 0);\n+\n+\tset_tx_init(p_nthw_dbs, start_idx, start_ptr, INIT_QUEUE, queue);\n+\n+\tdo {\n+\t\tget_tx_init(p_nthw_dbs, &init, &dummy, &busy);\n+\t} while (busy != 0);\n+}\n+\n+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)\n+{\n+\tassert(p_fpga_info);\n+\n+\tnt_fpga_t *const p_fpga = p_fpga_info->mp_fpga;\n+\tnthw_dbs_t *p_nthw_dbs;\n+\tint res = 0;\n+\tuint32_t i;\n+\n+\tp_fpga_info->mp_nthw_dbs = NULL;\n+\n+\tp_nthw_dbs = nthw_dbs_new();\n+\tif (p_nthw_dbs == NULL)\n+\t\treturn -1;\n+\n+\tres = dbs_init(NULL, p_fpga, 0); /* Check that DBS exists in FPGA */\n+\tif (res) {\n+\t\tfree(p_nthw_dbs);\n+\t\treturn res;\n+\t}\n+\n+\tres = dbs_init(p_nthw_dbs, p_fpga, 0); /* Create DBS module */\n+\tif (res) {\n+\t\tfree(p_nthw_dbs);\n+\t\treturn res;\n+\t}\n+\n+\tp_fpga_info->mp_nthw_dbs = p_nthw_dbs;\n+\n+\tfor (i = 0; i < MAX_VIRT_QUEUES; ++i) {\n+\t\trxvq[i].usage = UNUSED;\n+\t\ttxvq[i].usage = UNUSED;\n+\t}\n+\n+\tdbs_reset(p_nthw_dbs);\n+\n+\tfor (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i)\n+\t\tdbs_init_rx_queue(p_nthw_dbs, i, 0, 0);\n+\n+\tfor (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i)\n+\t\tdbs_init_tx_queue(p_nthw_dbs, i, 0, 0);\n+\n+\tset_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_DISABLE, RX_AM_POLL_SPEED,\n+\t\t       RX_UW_DISABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);\n+\tset_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,\n+\t\t       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_DISABLE);\n+\tset_rx_control(p_nthw_dbs, LAST_QUEUE, RX_AM_ENABLE, RX_AM_POLL_SPEED,\n+\t\t       RX_UW_ENABLE, RX_UW_POLL_SPEED, RX_Q_ENABLE);\n+\n+\tset_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_DISABLE, TX_AM_POLL_SPEED,\n+\t\t       TX_UW_DISABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);\n+\tset_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,\n+\t\t       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_DISABLE);\n+\tset_tx_control(p_nthw_dbs, LAST_QUEUE, TX_AM_ENABLE, TX_AM_POLL_SPEED,\n+\t\t       TX_UW_ENABLE, TX_UW_POLL_SPEED, TX_Q_ENABLE);\n+\n+\treturn 0;\n+}\n+\n+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)\n+{\n+\tsize_t avail_mem =\n+\t\tsizeof(struct virtq_avail) +\n+\t\tqueue_size *\n+\t\tsizeof(le16); /* + sizeof(le16); (\"avail->used_event\" is not used) */\n+\tsize_t avail_mem_aligned =\n+\t\t((avail_mem % STRUCT_ALIGNMENT) == 0) ?\n+\t\tavail_mem :\n+\t\tSTRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);\n+\n+\t/* + sizeof(le16); (\"used->avail_event\" is not used) */\n+\tsize_t used_mem =\n+\t\tsizeof(struct virtq_used) +\n+\t\tqueue_size *\n+\t\tsizeof(struct virtq_used_elem);\n+\tsize_t used_mem_aligned =\n+\t\t((used_mem % STRUCT_ALIGNMENT) == 0) ?\n+\t\tused_mem :\n+\t\tSTRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);\n+\n+\tstruct virtq_struct_layout_s virtq_layout;\n+\n+\tvirtq_layout.used_offset = avail_mem_aligned;\n+\tvirtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;\n+\n+\treturn virtq_layout;\n+}\n+\n+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,\n+\t\t\t\t\tuint16_t initial_avail_idx)\n+{\n+\tuint16_t i;\n+\tstruct virtq_avail *p_avail = (struct virtq_avail *)addr;\n+\n+\tp_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;\n+\tp_avail->idx = initial_avail_idx;\n+\tfor (i = 0; i < queue_size; ++i)\n+\t\tp_avail->ring[i] = i;\n+}\n+\n+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)\n+{\n+\tint i;\n+\tstruct virtq_used *p_used = (struct virtq_used *)addr;\n+\n+\tp_used->flags = 1;\n+\tp_used->idx = 0;\n+\tfor (i = 0; i < queue_size; ++i) {\n+\t\tp_used->ring[i].id = 0;\n+\t\tp_used->ring[i].len = 0;\n+\t}\n+}\n+\n+static void dbs_initialize_descriptor_struct(void *addr,\n+\tstruct nthw_memory_descriptor *packet_buffer_descriptors,\n+\tuint16_t queue_size, ule16 flgs)\n+{\n+\tif (packet_buffer_descriptors) {\n+\t\tint i;\n+\t\tstruct virtq_desc *p_desc = (struct virtq_desc *)addr;\n+\n+\t\tfor (i = 0; i < queue_size; ++i) {\n+\t\t\tp_desc[i].addr =\n+\t\t\t\t(uint64_t)packet_buffer_descriptors[i].phys_addr;\n+\t\t\tp_desc[i].len = packet_buffer_descriptors[i].len;\n+\t\t\tp_desc[i].flags = flgs;\n+\t\t\tp_desc[i].next = 0;\n+\t\t}\n+\t}\n+}\n+\n+static void dbs_initialize_virt_queue_structs(void *avail_struct_addr,\n+\tvoid *used_struct_addr, void *desc_struct_addr,\n+\tstruct nthw_memory_descriptor *packet_buffer_descriptors,\n+\tuint16_t queue_size, uint16_t initial_avail_idx, ule16 flgs)\n+{\n+\tdbs_initialize_avail_struct(avail_struct_addr, queue_size,\n+\t\t\t\t    initial_avail_idx);\n+\tdbs_initialize_used_struct(used_struct_addr, queue_size);\n+\tdbs_initialize_descriptor_struct(desc_struct_addr,\n+\t\t\t\t\t packet_buffer_descriptors,\n+\t\t\t\t\t queue_size, flgs);\n+}\n+\n+static le16 dbs_qsize_log2(le16 qsize)\n+{\n+\tuint32_t qs = 0;\n+\n+\twhile (qsize) {\n+\t\tqsize = qsize >> 1;\n+\t\t++qs;\n+\t}\n+\t--qs;\n+\treturn qs;\n+}\n+\n+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint16_t start_idx,\n+\tuint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,\n+\tvoid *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,\n+\tuint32_t header, uint32_t vq_type, int irq_vector)\n+{\n+\tuint32_t qs = dbs_qsize_log2(queue_size);\n+\tuint32_t int_enable;\n+\tuint32_t vec;\n+\tuint32_t istk;\n+\n+\t/*\n+\t * Setup DBS module - DSF00094\n+\t * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all\n+\t * DBS_RX_QUEUES entries.\n+\t */\n+\tif (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,\n+\t\t\t   host_id, qs, header, PACKED(vq_type)) != 0)\n+\t\treturn NULL;\n+\n+\t/*\n+\t * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all\n+\t *   DBS_RX_QUEUES entries.\n+\t *   Notice: We always start out with interrupts disabled (by setting the\n+\t *     \"irq_vector\" argument to -1). Queues that require interrupts will have\n+\t *     it enabled at a later time (after we have enabled vfio interrupts in\n+\t *     the kernel).\n+\t */\n+\tint_enable = 0;\n+\tvec = 0;\n+\tistk = 0;\n+\tNT_LOG(DBG, ETHDEV, \"%s: set_rx_uw_data int=0 irq_vector=%u\\n\",\n+\t       __func__, irq_vector);\n+\tif (set_rx_uw_data(p_nthw_dbs, index,\n+\t\t\t   vq_type == PACKED_RING ? (uint64_t)desc_struct_phys_addr :\n+\t\t\t   (uint64_t)used_struct_phys_addr,\n+\t\t\t   host_id, qs, PACKED(vq_type), int_enable, vec, istk) != 0)\n+\t\treturn NULL;\n+\n+\t/*\n+\t * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;\n+\t *  good idea to initialize all DBS_RX_QUEUES entries.\n+\t *  Notice: We do this only for queues that don't require interrupts (i.e. if\n+\t *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled\n+\t *    at a later time (after we have enabled vfio interrupts in the kernel).\n+\t */\n+\tif (irq_vector < 0) {\n+\t\tif (set_rx_am_data(p_nthw_dbs, index,\n+\t\t\t\t   (uint64_t)avail_struct_phys_addr, RX_AM_DISABLE,\n+\t\t\t\t   host_id, PACKED(vq_type),\n+\t\t\t\t   irq_vector >= 0 ? 1 : 0) != 0)\n+\t\t\treturn NULL;\n+\t}\n+\n+\t/*\n+\t * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the\n+\t *   DBS.RX_INIT register.\n+\t */\n+\tdbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);\n+\n+\t/*\n+\t * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;\n+\t *  good idea to initialize all DBS_RX_QUEUES entries.\n+\t */\n+\tif (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,\n+\t\t\t   RX_AM_ENABLE, host_id, PACKED(vq_type),\n+\t\t\t   irq_vector >= 0 ? 1 : 0) != 0)\n+\t\treturn NULL;\n+\n+\t/* Save queue state */\n+\trxvq[index].usage = UNMANAGED;\n+\trxvq[index].mp_nthw_dbs = p_nthw_dbs;\n+\trxvq[index].index = index;\n+\trxvq[index].queue_size = queue_size;\n+\trxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;\n+\trxvq[index].host_id = host_id;\n+\trxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;\n+\trxvq[index].used_struct_phys_addr = used_struct_phys_addr;\n+\trxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;\n+\trxvq[index].vq_type = vq_type;\n+\trxvq[index].in_order = 0; /* not used */\n+\trxvq[index].irq_vector = irq_vector;\n+\n+\t/* Return queue handle */\n+\treturn &rxvq[index];\n+}\n+\n+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);\n+\n+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq)\n+{\n+\tif (!rx_vq) {\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: Invalid queue\\n\", __func__);\n+\t\treturn -1;\n+\t}\n+\n+\tnthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;\n+\n+\tif (rx_vq->index >= MAX_VIRT_QUEUES)\n+\t\treturn -1;\n+\n+\tif (rx_vq->usage != UNMANAGED)\n+\t\treturn -1;\n+\n+\tuint32_t qs = dbs_qsize_log2(rx_vq->queue_size);\n+\n+\t/* If ISTK is set, make sure to unset it */\n+\tif (set_rx_uw_data(p_nthw_dbs, rx_vq->index,\n+\t\t\t   rx_vq->vq_type == PACKED_RING ?\n+\t\t\t   (uint64_t)rx_vq->desc_struct_phys_addr :\n+\t\t\t   (uint64_t)rx_vq->used_struct_phys_addr,\n+\t\t\t   rx_vq->host_id, qs, PACKED(rx_vq->vq_type), 0, 0,\n+\t\t\t   0) != 0)\n+\t\treturn -1;\n+\n+\t/* Disable AM */\n+\trx_vq->am_enable = RX_AM_DISABLE;\n+\tif (set_rx_am_data(p_nthw_dbs, rx_vq->index,\n+\t\t\t   (uint64_t)rx_vq->avail_struct_phys_addr,\n+\t\t\t   rx_vq->am_enable, rx_vq->host_id,\n+\t\t\t   PACKED(rx_vq->vq_type), 0) != 0)\n+\t\treturn -1;\n+\n+\t/* let the FPGA finish packet processing */\n+\tif (dbs_wait_hw_queue_shutdown(rx_vq, 1) != 0)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq)\n+{\n+\tuint32_t int_enable;\n+\tuint32_t vec;\n+\tuint32_t istk;\n+\n+\tif (!rx_vq) {\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: Invalid queue\\n\", __func__);\n+\t\treturn -1;\n+\t}\n+\n+\tnthw_dbs_t *p_nthw_dbs = rx_vq->mp_nthw_dbs;\n+\n+\tif (rx_vq->index >= MAX_VIRT_QUEUES)\n+\t\treturn -1;\n+\n+\tif (rx_vq->usage != UNMANAGED)\n+\t\treturn -1;\n+\n+\tuint32_t qs = dbs_qsize_log2(rx_vq->queue_size);\n+\n+\t/* Set ISTK if */\n+\tif (rx_vq->irq_vector >= 0 &&\n+\t\t\trx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {\n+\t\tint_enable = 1;\n+\t\tvec = rx_vq->irq_vector;\n+\t\tistk = 1;\n+\t} else {\n+\t\tint_enable = 0;\n+\t\tvec = 0;\n+\t\tistk = 0;\n+\t}\n+\tNT_LOG(DBG, ETHDEV, \"%s: set_rx_uw_data irq_vector=%u\\n\", __func__,\n+\t       rx_vq->irq_vector);\n+\tif (set_rx_uw_data(p_nthw_dbs, rx_vq->index,\n+\t\t\t   rx_vq->vq_type == PACKED_RING ?\n+\t\t\t   (uint64_t)rx_vq->desc_struct_phys_addr :\n+\t\t\t   (uint64_t)rx_vq->used_struct_phys_addr,\n+\t\t\t   rx_vq->host_id, qs, PACKED(rx_vq->vq_type),\n+\t\t\t   int_enable, vec, istk) != 0)\n+\t\treturn -1;\n+\n+\t/* Enable AM */\n+\trx_vq->am_enable = RX_AM_ENABLE;\n+\tif (set_rx_am_data(p_nthw_dbs, rx_vq->index,\n+\t\t\t   (uint64_t)rx_vq->avail_struct_phys_addr,\n+\t\t\t   rx_vq->am_enable, rx_vq->host_id,\n+\t\t\t   PACKED(rx_vq->vq_type),\n+\t\t\t   rx_vq->irq_vector >= 0 ? 1 : 0) != 0)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq)\n+{\n+\tif (!tx_vq) {\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: Invalid queue\\n\", __func__);\n+\t\treturn -1;\n+\t}\n+\n+\tnthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;\n+\n+\tif (tx_vq->index >= MAX_VIRT_QUEUES)\n+\t\treturn -1;\n+\n+\tif (tx_vq->usage != UNMANAGED)\n+\t\treturn -1;\n+\n+\tuint32_t qs = dbs_qsize_log2(tx_vq->queue_size);\n+\n+\t/* If ISTK is set, make sure to unset it */\n+\tif (set_tx_uw_data(p_nthw_dbs, tx_vq->index,\n+\t\t\t   tx_vq->vq_type == PACKED_RING ?\n+\t\t\t   (uint64_t)tx_vq->desc_struct_phys_addr :\n+\t\t\t   (uint64_t)tx_vq->used_struct_phys_addr,\n+\t\t\t   tx_vq->host_id, qs, PACKED(tx_vq->vq_type), 0, 0, 0,\n+\t\t\t   tx_vq->in_order) != 0)\n+\t\treturn -1;\n+\n+\t/* Disable AM */\n+\ttx_vq->am_enable = TX_AM_DISABLE;\n+\tif (set_tx_am_data(p_nthw_dbs, tx_vq->index,\n+\t\t\t   (uint64_t)tx_vq->avail_struct_phys_addr,\n+\t\t\t   tx_vq->am_enable, tx_vq->host_id,\n+\t\t\t   PACKED(tx_vq->vq_type), 0) != 0)\n+\t\treturn -1;\n+\n+\t/* let the FPGA finish packet processing */\n+\tif (dbs_wait_hw_queue_shutdown(tx_vq, 0) != 0)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq)\n+{\n+\tuint32_t int_enable;\n+\tuint32_t vec;\n+\tuint32_t istk;\n+\n+\tif (!tx_vq) {\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: Invalid queue\\n\", __func__);\n+\t\treturn -1;\n+\t}\n+\n+\tnthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;\n+\n+\tif (tx_vq->index >= MAX_VIRT_QUEUES)\n+\t\treturn -1;\n+\n+\tif (tx_vq->usage != UNMANAGED)\n+\t\treturn -1;\n+\n+\tuint32_t qs = dbs_qsize_log2(tx_vq->queue_size);\n+\n+\t/* Set ISTK if irq_vector is used */\n+\tif (tx_vq->irq_vector >= 0 &&\n+\t\t\ttx_vq->irq_vector < MAX_MSIX_VECTORS_PR_VF) {\n+\t\tint_enable = 1;\n+\t\tvec = tx_vq->irq_vector;\n+\t\tistk = 1; /* Use sticky interrupt */\n+\t} else {\n+\t\tint_enable = 0;\n+\t\tvec = 0;\n+\t\tistk = 0;\n+\t}\n+\tif (set_tx_uw_data(p_nthw_dbs, tx_vq->index,\n+\t\t\t   tx_vq->vq_type == PACKED_RING ?\n+\t\t\t   (uint64_t)tx_vq->desc_struct_phys_addr :\n+\t\t\t   (uint64_t)tx_vq->used_struct_phys_addr,\n+\t\t\t   tx_vq->host_id, qs, PACKED(tx_vq->vq_type),\n+\t\t\t   int_enable, vec, istk, tx_vq->in_order) != 0)\n+\t\treturn -1;\n+\n+\t/* Enable AM */\n+\ttx_vq->am_enable = TX_AM_ENABLE;\n+\tif (set_tx_am_data(p_nthw_dbs, tx_vq->index,\n+\t\t\t   (uint64_t)tx_vq->avail_struct_phys_addr,\n+\t\t\t   tx_vq->am_enable, tx_vq->host_id,\n+\t\t\t   PACKED(tx_vq->vq_type),\n+\t\t\t   tx_vq->irq_vector >= 0 ? 1 : 0) != 0)\n+\t\treturn -1;\n+\n+\treturn 0;\n+}\n+\n+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,\n+\t\tuint32_t outport)\n+{\n+\tnthw_dbs_t *p_nthw_dbs = tx_vq->mp_nthw_dbs;\n+\tuint32_t qs = dbs_qsize_log2(tx_vq->queue_size);\n+\n+\tif (set_tx_dr_data(p_nthw_dbs, tx_vq->index,\n+\t\t\t   (uint64_t)tx_vq->desc_struct_phys_addr, tx_vq->host_id,\n+\t\t\t   qs, outport, 0, PACKED(tx_vq->vq_type)) != 0)\n+\t\treturn -1;\n+\treturn nthw_enable_tx_virt_queue(tx_vq);\n+}\n+\n+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,\n+\t\t\t   uint32_t ir, uint32_t bs)\n+{\n+\treturn set_tx_qos_data(p_nthw_dbs, port, enable, ir, bs);\n+}\n+\n+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,\n+\t\t\t\tuint32_t divider)\n+{\n+\treturn set_tx_qos_rate(p_nthw_dbs, multiplier, divider);\n+}\n+\n+#define INDEX_PTR_NOT_VALID 0x80000000\n+static int dbs_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)\n+{\n+\tuint32_t ptr;\n+\tuint32_t queue;\n+\tuint32_t valid;\n+\n+\tconst int status = get_rx_ptr(p_nthw_dbs, &ptr, &queue, &valid);\n+\n+\tif (status == 0) {\n+\t\tif (valid)\n+\t\t\t*p_index = ptr;\n+\t\telse\n+\t\t\t*p_index = INDEX_PTR_NOT_VALID;\n+\t}\n+\treturn status;\n+}\n+\n+static int dbs_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t *p_index)\n+{\n+\tuint32_t ptr;\n+\tuint32_t queue;\n+\tuint32_t valid;\n+\n+\tconst int status = get_tx_ptr(p_nthw_dbs, &ptr, &queue, &valid);\n+\n+\tif (status == 0) {\n+\t\tif (valid)\n+\t\t\t*p_index = ptr;\n+\t\telse\n+\t\t\t*p_index = INDEX_PTR_NOT_VALID;\n+\t}\n+\treturn status;\n+}\n+\n+static int dbs_initialize_get_rx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)\n+{\n+\treturn set_rx_ptr_queue(p_nthw_dbs, queue);\n+}\n+\n+static int dbs_initialize_get_tx_ptr(nthw_dbs_t *p_nthw_dbs, uint32_t queue)\n+{\n+\treturn set_tx_ptr_queue(p_nthw_dbs, queue);\n+}\n+\n+static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)\n+{\n+\tuint32_t busy;\n+\tuint32_t queue;\n+\tint err = 0;\n+\tnthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;\n+\n+\tdo {\n+\t\tif (rx)\n+\t\t\terr = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);\n+\t\telse\n+\t\t\terr = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);\n+\t} while (!err && busy);\n+\n+\treturn err;\n+}\n+\n+static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)\n+{\n+\tint err = 0;\n+\tuint32_t idle = 0;\n+\tnthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;\n+\n+\terr = dbs_wait_on_busy(vq, &idle, rx);\n+\tif (err) {\n+\t\tif (err == -ENOTSUP) {\n+\t\t\tNT_OS_WAIT_USEC(200000);\n+\t\t\treturn 0;\n+\t\t}\n+\t\treturn -1;\n+\t}\n+\n+\tdo {\n+\t\tif (rx)\n+\t\t\terr = set_rx_idle(p_nthw_dbs, 1, vq->index);\n+\t\telse\n+\t\t\terr = set_tx_idle(p_nthw_dbs, 1, vq->index);\n+\n+\t\tif (err)\n+\t\t\treturn -1;\n+\n+\t\tif (dbs_wait_on_busy(vq, &idle, rx) != 0)\n+\t\t\treturn -1;\n+\n+\t} while (idle == 0);\n+\n+\treturn 0;\n+}\n+\n+static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)\n+{\n+\tnthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;\n+\n+\tif (rxvq == NULL)\n+\t\treturn -1;\n+\n+\t/* Clear UW */\n+\trxvq->used_struct_phys_addr = NULL;\n+\tif (set_rx_uw_data(p_nthw_dbs, rxvq->index,\n+\t\t\t   (uint64_t)rxvq->used_struct_phys_addr, rxvq->host_id, 0,\n+\t\t\t   PACKED(rxvq->vq_type), 0, 0, 0) != 0)\n+\t\treturn -1;\n+\n+\t/* Disable AM */\n+\trxvq->am_enable = RX_AM_DISABLE;\n+\tif (set_rx_am_data(p_nthw_dbs, rxvq->index,\n+\t\t\t   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,\n+\t\t\t   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)\n+\t\treturn -1;\n+\n+\t/* Let the FPGA finish packet processing */\n+\tif (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)\n+\t\treturn -1;\n+\n+\t/* Clear rest of AM */\n+\trxvq->avail_struct_phys_addr = NULL;\n+\trxvq->host_id = 0;\n+\tif (set_rx_am_data(p_nthw_dbs, rxvq->index,\n+\t\t\t   (uint64_t)rxvq->avail_struct_phys_addr, rxvq->am_enable,\n+\t\t\t   rxvq->host_id, PACKED(rxvq->vq_type), 0) != 0)\n+\t\treturn -1;\n+\n+\t/* Clear DR */\n+\trxvq->desc_struct_phys_addr = NULL;\n+\tif (set_rx_dr_data(p_nthw_dbs, rxvq->index,\n+\t\t\t   (uint64_t)rxvq->desc_struct_phys_addr, rxvq->host_id, 0,\n+\t\t\t   rxvq->header, PACKED(rxvq->vq_type)) != 0)\n+\t\treturn -1;\n+\n+\t/* Initialize queue */\n+\tdbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);\n+\n+\t/* Reset queue state */\n+\trxvq->usage = UNUSED;\n+\trxvq->mp_nthw_dbs = p_nthw_dbs;\n+\trxvq->index = 0;\n+\trxvq->queue_size = 0;\n+\n+\treturn 0;\n+}\n+\n+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq)\n+{\n+\tif (rxvq == NULL || rxvq->usage != UNMANAGED)\n+\t\treturn -1;\n+\n+\treturn dbs_internal_release_rx_virt_queue(rxvq);\n+}\n+\n+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq)\n+{\n+\tif (rxvq == NULL || rxvq->usage != MANAGED)\n+\t\treturn -1;\n+\n+\tif (rxvq->p_virtual_addr) {\n+\t\tfree(rxvq->p_virtual_addr);\n+\t\trxvq->p_virtual_addr = NULL;\n+\t}\n+\n+\treturn dbs_internal_release_rx_virt_queue(rxvq);\n+}\n+\n+static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)\n+{\n+\tnthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;\n+\n+\tif (txvq == NULL)\n+\t\treturn -1;\n+\n+\t/* Clear UW */\n+\ttxvq->used_struct_phys_addr = NULL;\n+\tif (set_tx_uw_data(p_nthw_dbs, txvq->index,\n+\t\t\t   (uint64_t)txvq->used_struct_phys_addr, txvq->host_id, 0,\n+\t\t\t   PACKED(txvq->vq_type), 0, 0, 0,\n+\t\t\t   txvq->in_order) != 0)\n+\t\treturn -1;\n+\n+\t/* Disable AM */\n+\ttxvq->am_enable = TX_AM_DISABLE;\n+\tif (set_tx_am_data(p_nthw_dbs, txvq->index,\n+\t\t\t   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,\n+\t\t\t   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)\n+\t\treturn -1;\n+\n+\t/* Let the FPGA finish packet processing */\n+\tif (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)\n+\t\treturn -1;\n+\n+\t/* Clear rest of AM */\n+\ttxvq->avail_struct_phys_addr = NULL;\n+\ttxvq->host_id = 0;\n+\tif (set_tx_am_data(p_nthw_dbs, txvq->index,\n+\t\t\t   (uint64_t)txvq->avail_struct_phys_addr, txvq->am_enable,\n+\t\t\t   txvq->host_id, PACKED(txvq->vq_type), 0) != 0)\n+\t\treturn -1;\n+\n+\t/* Clear DR */\n+\ttxvq->desc_struct_phys_addr = NULL;\n+\ttxvq->port = 0;\n+\ttxvq->header = 0;\n+\tif (set_tx_dr_data(p_nthw_dbs, txvq->index,\n+\t\t\t   (uint64_t)txvq->desc_struct_phys_addr, txvq->host_id, 0,\n+\t\t\t   txvq->port, txvq->header,\n+\t\t\t   PACKED(txvq->vq_type)) != 0)\n+\t\treturn -1;\n+\n+\t/* Clear QP */\n+\ttxvq->virtual_port = 0;\n+\tif (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) !=\n+\t\t\t0)\n+\t\treturn -1;\n+\n+\t/* Initialize queue */\n+\tdbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);\n+\n+\t/* Reset queue state */\n+\ttxvq->usage = UNUSED;\n+\ttxvq->mp_nthw_dbs = p_nthw_dbs;\n+\ttxvq->index = 0;\n+\ttxvq->queue_size = 0;\n+\n+\treturn 0;\n+}\n+\n+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq)\n+{\n+\tif (txvq == NULL || txvq->usage != UNMANAGED)\n+\t\treturn -1;\n+\n+\treturn dbs_internal_release_tx_virt_queue(txvq);\n+}\n+\n+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq)\n+{\n+\tif (txvq == NULL || txvq->usage != MANAGED)\n+\t\treturn -1;\n+\n+\tif (txvq->p_virtual_addr) {\n+\t\tfree(txvq->p_virtual_addr);\n+\t\ttxvq->p_virtual_addr = NULL;\n+\t}\n+\n+\treturn dbs_internal_release_tx_virt_queue(txvq);\n+}\n+\n+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint16_t start_idx,\n+\tuint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,\n+\tvoid *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,\n+\tuint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,\n+\tint irq_vector, uint32_t in_order)\n+{\n+\tuint32_t int_enable;\n+\tuint32_t vec;\n+\tuint32_t istk;\n+\tuint32_t qs = dbs_qsize_log2(queue_size);\n+\n+\t/*\n+\t * Setup DBS module - DSF00094\n+\t * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all\n+\t *    DBS_TX_QUEUES entries.\n+\t */\n+\tif (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr,\n+\t\t\t   host_id, qs, port, header, PACKED(vq_type)) != 0)\n+\t\treturn NULL;\n+\n+\t/*\n+\t * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all\n+\t *    DBS_TX_QUEUES entries.\n+\t *    Notice: We always start out with interrupts disabled (by setting the\n+\t *            \"irq_vector\" argument to -1). Queues that require interrupts will have\n+\t *             it enabled at a later time (after we have enabled vfio interrupts in the\n+\t *             kernel).\n+\t */\n+\tint_enable = 0;\n+\tvec = 0;\n+\tistk = 0;\n+\n+\tif (set_tx_uw_data(p_nthw_dbs, index,\n+\t\t\t   vq_type == PACKED_RING ?\n+\t\t\t   (uint64_t)desc_struct_phys_addr :\n+\t\t\t   (uint64_t)used_struct_phys_addr,\n+\t\t\t   host_id, qs, PACKED(vq_type), int_enable, vec, istk,\n+\t\t\t   in_order) != 0)\n+\t\treturn NULL;\n+\n+\t/*\n+\t * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;\n+\t *    good idea to initialize all DBS_TX_QUEUES entries.\n+\t */\n+\tif (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,\n+\t\t\t   TX_AM_DISABLE, host_id, PACKED(vq_type),\n+\t\t\t   irq_vector >= 0 ? 1 : 0) != 0)\n+\t\treturn NULL;\n+\n+\t/*\n+\t * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the\n+\t *    DBS.TX_INIT register.\n+\t */\n+\tdbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);\n+\n+\tif (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)\n+\t\treturn NULL;\n+\n+\t/*\n+\t * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;\n+\t *    good idea to initialize all DBS_TX_QUEUES entries.\n+\t *    Notice: We do this only for queues that don't require interrupts (i.e. if\n+\t *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA\n+\t *            enabled at a later time (after we have enabled vfio interrupts in the\n+\t *            kernel).\n+\t */\n+\tif (irq_vector < 0) {\n+\t\tif (set_tx_am_data(p_nthw_dbs, index,\n+\t\t\t\t   (uint64_t)avail_struct_phys_addr, TX_AM_ENABLE,\n+\t\t\t\t   host_id, PACKED(vq_type),\n+\t\t\t\t   irq_vector >= 0 ? 1 : 0) != 0)\n+\t\t\treturn NULL;\n+\t}\n+\n+\t/* Save queue state */\n+\ttxvq[index].usage = UNMANAGED;\n+\ttxvq[index].mp_nthw_dbs = p_nthw_dbs;\n+\ttxvq[index].index = index;\n+\ttxvq[index].queue_size = queue_size;\n+\ttxvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;\n+\ttxvq[index].host_id = host_id;\n+\ttxvq[index].port = port;\n+\ttxvq[index].virtual_port = virtual_port;\n+\ttxvq[index].header = header;\n+\ttxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;\n+\ttxvq[index].used_struct_phys_addr = used_struct_phys_addr;\n+\ttxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;\n+\ttxvq[index].vq_type = vq_type;\n+\ttxvq[index].in_order = in_order;\n+\ttxvq[index].irq_vector = irq_vector;\n+\n+\t/* Return queue handle */\n+\treturn &txvq[index];\n+}\n+\n+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint32_t queue_size,\n+\tuint32_t host_id, uint32_t header,\n+\tstruct nthw_memory_descriptor *p_virt_struct_area,\n+\tstruct nthw_memory_descriptor *p_packet_buffers, int irq_vector)\n+{\n+\tstruct virtq_struct_layout_s virtq_struct_layout =\n+\t\t\tdbs_calc_struct_layout(queue_size);\n+\n+\tdbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,\n+\t\t\t\t\t  (char *)p_virt_struct_area->virt_addr +\n+\t\t\t\t\t  virtq_struct_layout.used_offset,\n+\t\t\t\t\t  (char *)p_virt_struct_area->virt_addr +\n+\t\t\t\t\t  virtq_struct_layout.desc_offset,\n+\t\t\t\t\t  p_packet_buffers, (uint16_t)queue_size,\n+\t\t\t\t\t  p_packet_buffers ? (uint16_t)queue_size : 0,\n+\t\t\t\t\t  VIRTQ_DESC_F_WRITE /* Rx */);\n+\n+\trxvq[index].p_avail = p_virt_struct_area->virt_addr;\n+\trxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +\n+\t\t\t\t     virtq_struct_layout.used_offset);\n+\trxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +\n+\t\t\t\t     virtq_struct_layout.desc_offset);\n+\n+\trxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;\n+\trxvq[index].used_idx = 0;\n+\trxvq[index].cached_idx = 0;\n+\trxvq[index].p_virtual_addr = NULL;\n+\n+\tif (p_packet_buffers) {\n+\t\trxvq[index].p_virtual_addr =\n+\t\t\tmalloc(queue_size * sizeof(*p_packet_buffers));\n+\t\tmemcpy(rxvq[index].p_virtual_addr, p_packet_buffers,\n+\t\t       queue_size * sizeof(*p_packet_buffers));\n+\t}\n+\n+\tnthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0,\n+\t\t\t\t (void *)p_virt_struct_area->phys_addr,\n+\t\t\t\t (char *)p_virt_struct_area->phys_addr +\n+\t\t\t\t virtq_struct_layout.used_offset,\n+\t\t\t\t (char *)p_virt_struct_area->phys_addr +\n+\t\t\t\t virtq_struct_layout.desc_offset,\n+\t\t\t\t (uint16_t)queue_size, host_id, header,\n+\t\t\t\t SPLIT_RING, irq_vector);\n+\n+\trxvq[index].usage = MANAGED;\n+\n+\treturn &rxvq[index];\n+}\n+\n+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint32_t queue_size,\n+\tuint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,\n+\tint irq_vector, uint32_t in_order,\n+\tstruct nthw_memory_descriptor *p_virt_struct_area,\n+\tstruct nthw_memory_descriptor *p_packet_buffers)\n+{\n+\tstruct virtq_struct_layout_s virtq_struct_layout =\n+\t\t\tdbs_calc_struct_layout(queue_size);\n+\n+\tdbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,\n+\t\t\t\t\t  (char *)p_virt_struct_area->virt_addr +\n+\t\t\t\t\t  virtq_struct_layout.used_offset,\n+\t\t\t\t\t  (char *)p_virt_struct_area->virt_addr +\n+\t\t\t\t\t  virtq_struct_layout.desc_offset,\n+\t\t\t\t\t  p_packet_buffers, (uint16_t)queue_size, 0, 0 /* Tx */);\n+\n+\ttxvq[index].p_avail = p_virt_struct_area->virt_addr;\n+\ttxvq[index].p_used = (void *)((char *)p_virt_struct_area->virt_addr +\n+\t\t\t\t     virtq_struct_layout.used_offset);\n+\ttxvq[index].p_desc = (void *)((char *)p_virt_struct_area->virt_addr +\n+\t\t\t\t     virtq_struct_layout.desc_offset);\n+\ttxvq[index].queue_size = (le16)queue_size;\n+\ttxvq[index].am_idx = 0;\n+\ttxvq[index].used_idx = 0;\n+\ttxvq[index].cached_idx = 0;\n+\ttxvq[index].p_virtual_addr = NULL;\n+\n+\ttxvq[index].tx_descr_avail_idx = 0;\n+\n+\tif (p_packet_buffers) {\n+\t\ttxvq[index].p_virtual_addr =\n+\t\t\tmalloc(queue_size * sizeof(*p_packet_buffers));\n+\t\tmemcpy(txvq[index].p_virtual_addr, p_packet_buffers,\n+\t\t       queue_size * sizeof(*p_packet_buffers));\n+\t}\n+\n+\tnthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0,\n+\t\t\t\t (void *)p_virt_struct_area->phys_addr,\n+\t\t\t\t (char *)p_virt_struct_area->phys_addr +\n+\t\t\t\t virtq_struct_layout.used_offset,\n+\t\t\t\t (char *)p_virt_struct_area->phys_addr +\n+\t\t\t\t virtq_struct_layout.desc_offset,\n+\t\t\t\t (uint16_t)queue_size, host_id, port, virtual_port,\n+\t\t\t\t header, SPLIT_RING, irq_vector, in_order);\n+\n+\ttxvq[index].usage = MANAGED;\n+\n+\treturn &txvq[index];\n+}\n+\n+/*\n+ * Packed Ring\n+ */\n+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,\n+\tstruct pvirtq_struct_layout_s *pvirtq_layout,\n+\tstruct nthw_memory_descriptor *p_virt_struct_area,\n+\tstruct nthw_memory_descriptor *p_packet_buffers, ule16 flags, int rx)\n+{\n+\t/* page aligned */\n+\tassert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);\n+\tassert(p_packet_buffers);\n+\n+\t/* clean canvas */\n+\tmemset(p_virt_struct_area->virt_addr, 0,\n+\t       sizeof(struct pvirtq_desc) * vq->queue_size +\n+\t       sizeof(struct pvirtq_event_suppress) * 2 +\n+\t       sizeof(int) * vq->queue_size);\n+\n+\tpvirtq_layout->device_event_offset =\n+\t\tsizeof(struct pvirtq_desc) * vq->queue_size;\n+\tpvirtq_layout->driver_event_offset =\n+\t\tpvirtq_layout->device_event_offset +\n+\t\tsizeof(struct pvirtq_event_suppress);\n+\n+\tvq->desc = p_virt_struct_area->virt_addr;\n+\tvq->device_event = (void *)((uintptr_t)vq->desc +\n+\t\t\t\t    pvirtq_layout->device_event_offset);\n+\tvq->driver_event = (void *)((uintptr_t)vq->desc +\n+\t\t\t\t    pvirtq_layout->driver_event_offset);\n+\n+\tvq->next_avail = 0;\n+\tvq->next_used = 0;\n+\tvq->avail_wrap_count = 1;\n+\tvq->used_wrap_count = 1;\n+\n+\t/*\n+\t * Only possible if FPGA always delivers in-order\n+\t * Buffer ID used is the index in the pPacketBuffers array\n+\t */\n+\tunsigned int i;\n+\tstruct pvirtq_desc *p_desc = vq->desc;\n+\n+\tfor (i = 0; i < vq->queue_size; i++) {\n+\t\tif (rx) {\n+\t\t\tp_desc[i].addr = (uint64_t)p_packet_buffers[i].phys_addr;\n+\t\t\tp_desc[i].len = p_packet_buffers[i].len;\n+\t\t}\n+\t\tp_desc[i].id = i;\n+\t\tp_desc[i].flags = flags;\n+\t}\n+\n+\tif (rx)\n+\t\tvq->avail_wrap_count ^=\n+\t\t\t1; /* filled up available buffers for Rx */\n+\telse\n+\t\tvq->used_wrap_count ^= 1; /* pre-fill free buffer IDs */\n+\n+\tif (vq->queue_size == 0)\n+\t\treturn -1; /* don't allocate memory with size of 0 bytes */\n+\tvq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));\n+\tif (vq->p_virtual_addr == NULL)\n+\t\treturn -1;\n+\n+\tmemcpy(vq->p_virtual_addr, p_packet_buffers,\n+\t       vq->queue_size * sizeof(*p_packet_buffers));\n+\n+\t/* Not used yet by FPGA - make sure we disable */\n+\tvq->device_event->flags = RING_EVENT_FLAGS_DISABLE;\n+\n+\treturn 0;\n+}\n+\n+static struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint32_t queue_size,\n+\tuint32_t host_id, uint32_t header,\n+\tstruct nthw_memory_descriptor *p_virt_struct_area,\n+\tstruct nthw_memory_descriptor *p_packet_buffers, int irq_vector)\n+{\n+\tstruct pvirtq_struct_layout_s pvirtq_layout;\n+\tstruct nthw_virt_queue *vq = &rxvq[index];\n+\t/* Set size and setup packed vq ring */\n+\tvq->queue_size = queue_size;\n+\t/* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */\n+\tif (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,\n+\t\t\t\t\t\t p_virt_struct_area, p_packet_buffers,\n+\t\t\t\t\t\t VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)\n+\t\treturn NULL;\n+\n+\tnthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000,\n+\t\t\t\t 0, /* start wrap ring counter as 1 */\n+\t\t\t\t (void *)((uintptr_t)p_virt_struct_area->phys_addr +\n+\t\t\t\t\t  pvirtq_layout.driver_event_offset),\n+\t\t\t\t (void *)((uintptr_t)p_virt_struct_area->phys_addr +\n+\t\t\t\t\t  pvirtq_layout.device_event_offset),\n+\t\t\t\t p_virt_struct_area->phys_addr, (uint16_t)queue_size,\n+\t\t\t\t host_id, header, PACKED_RING, irq_vector);\n+\n+\tvq->usage = MANAGED;\n+\treturn vq;\n+}\n+\n+static struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint32_t queue_size,\n+\tuint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,\n+\tint irq_vector, uint32_t in_order,\n+\tstruct nthw_memory_descriptor *p_virt_struct_area,\n+\tstruct nthw_memory_descriptor *p_packet_buffers)\n+{\n+\tstruct pvirtq_struct_layout_s pvirtq_layout;\n+\tstruct nthw_virt_queue *vq = &txvq[index];\n+\t/* Set size and setup packed vq ring */\n+\tvq->queue_size = queue_size;\n+\tif (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout,\n+\t\t\tp_virt_struct_area,\n+\t\t\tp_packet_buffers, 0, 0) != 0)\n+\t\treturn NULL;\n+\n+\tnthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000,\n+\t\t\t\t 0, /* start wrap ring counter as 1 */\n+\t\t\t\t (void *)((uintptr_t)p_virt_struct_area->phys_addr +\n+\t\t\t\t\t  pvirtq_layout.driver_event_offset),\n+\t\t\t\t (void *)((uintptr_t)p_virt_struct_area->phys_addr +\n+\t\t\t\t\t  pvirtq_layout.device_event_offset),\n+\t\t\t\t p_virt_struct_area->phys_addr, (uint16_t)queue_size,\n+\t\t\t\t host_id, port, virtual_port, header, PACKED_RING,\n+\t\t\t\t irq_vector, in_order);\n+\n+\tvq->usage = MANAGED;\n+\treturn vq;\n+}\n+\n+/*\n+ * Create a Managed Rx Virt Queue\n+ *\n+ * p_virt_struct_area - Memory that can be used for virtQueue structs\n+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries\n+ *\n+ * Notice: The queue will be created with interrupts disabled.\n+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()\n+ *   afterwards.\n+ */\n+struct nthw_virt_queue *\n+nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs, uint32_t index,\n+\t\t\t\t uint32_t queue_size, uint32_t host_id,\n+\t\t\t\t uint32_t header,\n+\t\t\t\t struct nthw_memory_descriptor *p_virt_struct_area,\n+\t\t\t\t struct nthw_memory_descriptor *p_packet_buffers,\n+\t\t\t\t uint32_t vq_type, int irq_vector)\n+{\n+\tswitch (vq_type) {\n+\tcase SPLIT_RING:\n+\t\treturn nthw_setup_managed_rx_virt_queue_split(p_nthw_dbs,\n+\t\t\tindex, queue_size, host_id, header,\n+\t\t\tp_virt_struct_area, p_packet_buffers, irq_vector);\n+\tcase PACKED_RING:\n+\t\treturn nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs,\n+\t\t\tindex, queue_size, host_id, header,\n+\t\t\tp_virt_struct_area, p_packet_buffers, irq_vector);\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\treturn NULL;\n+}\n+\n+/*\n+ * Create a Managed Tx Virt Queue\n+ *\n+ * p_virt_struct_area - Memory that can be used for virtQueue structs\n+ * p_packet_buffers - Memory that can be used for packet buffers. Array must have queue_size entries\n+ *\n+ * Notice: The queue will be created with interrupts disabled.\n+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()\n+ *   afterwards.\n+ */\n+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint32_t queue_size,\n+\tuint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,\n+\tstruct nthw_memory_descriptor *p_virt_struct_area,\n+\tstruct nthw_memory_descriptor *p_packet_buffers, uint32_t vq_type,\n+\tint irq_vector, uint32_t in_order)\n+{\n+\tswitch (vq_type) {\n+\tcase SPLIT_RING:\n+\t\treturn nthw_setup_managed_tx_virt_queue_split(p_nthw_dbs, index,\n+\t\t\tqueue_size, host_id, port, virtual_port,\n+\t\t\theader, irq_vector, in_order, p_virt_struct_area,\n+\t\t\tp_packet_buffers);\n+\tcase PACKED_RING:\n+\t\treturn nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, index,\n+\t\t\tqueue_size, host_id, port, virtual_port,\n+\t\t\theader, irq_vector, in_order, p_virt_struct_area,\n+\t\t\tp_packet_buffers);\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\treturn NULL;\n+}\n+\n+/*\n+ * Packed Ring helper macros\n+ */\n+#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)\n+#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)\n+\n+#define inc_avail(_vq, _num)                               \\\n+\tdo {                                             \\\n+\t\t__typeof__(_vq) (vq) = (_vq); \\\n+\t\t__typeof__(_num) (num) = (_num); \\\n+\t\t(vq)->next_avail += num;                   \\\n+\t\tif ((vq)->next_avail >= (vq)->queue_size) {   \\\n+\t\t\t(vq)->next_avail -= (vq)->queue_size; \\\n+\t\t\t(vq)->avail_wrap_count ^= 1;       \\\n+\t\t}                                        \\\n+\t} while (0)\n+\n+#define inc_used(_vq, _num)                               \\\n+\tdo {                                            \\\n+\t\t__typeof__(_vq) (vq) = (_vq); \\\n+\t\t__typeof__(_num) (num) = (_num); \\\n+\t\t(vq)->next_used += num;                   \\\n+\t\tif ((vq)->next_used >= (vq)->queue_size) {   \\\n+\t\t\t(vq)->next_used -= (vq)->queue_size; \\\n+\t\t\t(vq)->used_wrap_count ^= 1;       \\\n+\t\t}                                       \\\n+\t} while (0)\n+\n+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,\n+\t\t\t     struct nthw_received_packets *rp, uint16_t *nb_pkts)\n+{\n+\tle16 segs = 0;\n+\tuint16_t pkts = 0;\n+\n+\tif (rxvq->vq_type == SPLIT_RING) {\n+\t\tle16 i;\n+\t\tle16 entries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);\n+\n+\t\tif (entries_ready < n) {\n+\t\t\t/* Look for more packets */\n+\t\t\trxvq->cached_idx = rxvq->p_used->idx;\n+\t\t\tentries_ready = (le16)(rxvq->cached_idx - rxvq->used_idx);\n+\t\t\tif (entries_ready == 0) {\n+\t\t\t\t*nb_pkts = 0;\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\n+\t\t\tif (n > entries_ready)\n+\t\t\t\tn = entries_ready;\n+\t\t}\n+\n+\t\t/* Give packets - make sure all packets are whole packets.\n+\t\t * Valid because queue_size is always 2^n\n+\t\t */\n+\t\tconst le16 queue_mask = (le16)(rxvq->queue_size - 1);\n+\t\tconst ule32 buf_len = rxvq->p_desc[0].len;\n+\n+\t\tle16 used = rxvq->used_idx;\n+\n+\t\tfor (i = 0; i < n; ++i) {\n+\t\t\tle32 id = rxvq->p_used->ring[used & queue_mask].id;\n+\n+\t\t\trp[i].addr = rxvq->p_virtual_addr[id].virt_addr;\n+\t\t\trp[i].len = rxvq->p_used->ring[used & queue_mask].len;\n+\n+\t\t\tuint32_t pkt_len =\n+\t\t\t\t((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;\n+\n+\t\t\tif (pkt_len > buf_len) {\n+\t\t\t\t/* segmented */\n+\t\t\t\tint nbsegs = (pkt_len + buf_len - 1) / buf_len;\n+\n+\t\t\t\tif (((int)i + nbsegs) > n) {\n+\t\t\t\t\t/* don't have enough segments - break out */\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\n+\t\t\t\tint ii;\n+\n+\t\t\t\tfor (ii = 1; ii < nbsegs; ii++) {\n+\t\t\t\t\t++i;\n+\t\t\t\t\tid = rxvq->p_used\n+\t\t\t\t\t     ->ring[(used + ii) &\n+\t\t\t\t\t\t\t\tqueue_mask]\n+\t\t\t\t\t     .id;\n+\t\t\t\t\trp[i].addr =\n+\t\t\t\t\t\trxvq->p_virtual_addr[id].virt_addr;\n+\t\t\t\t\trp[i].len = rxvq->p_used\n+\t\t\t\t\t\t    ->ring[(used + ii) &\n+\t\t\t\t\t\t\t\t       queue_mask]\n+\t\t\t\t\t\t    .len;\n+\t\t\t\t}\n+\t\t\t\tused += nbsegs;\n+\t\t\t} else {\n+\t\t\t\t++used;\n+\t\t\t}\n+\n+\t\t\tpkts++;\n+\t\t\tsegs = i + 1;\n+\t\t}\n+\n+\t\trxvq->used_idx = used;\n+\t} else if (rxvq->vq_type == PACKED_RING) {\n+\t\t/* This requires in-order behavior from FPGA */\n+\t\tint i;\n+\n+\t\tfor (i = 0; i < n; i++) {\n+\t\t\tstruct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];\n+\n+\t\t\tule16 flags = desc->flags;\n+\t\t\tuint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);\n+\t\t\tuint8_t used = !!(flags & VIRTQ_DESC_F_USED);\n+\n+\t\t\tif (avail != rxvq->used_wrap_count ||\n+\t\t\t\t\tused != rxvq->used_wrap_count)\n+\t\t\t\tbreak;\n+\n+\t\t\trp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;\n+\t\t\trp[pkts].len = desc->len;\n+\t\t\tpkts++;\n+\n+\t\t\tinc_used(rxvq, 1);\n+\t\t}\n+\n+\t\tsegs = pkts;\n+\t}\n+\n+\t*nb_pkts = pkts;\n+\treturn segs;\n+}\n+\n+/*\n+ * Put buffers back into Avail Ring\n+ */\n+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, le16 n)\n+{\n+\tif (rxvq->vq_type == SPLIT_RING) {\n+\t\trxvq->am_idx = (le16)(rxvq->am_idx + n);\n+\t\trxvq->p_avail->idx = rxvq->am_idx;\n+\t} else if (rxvq->vq_type == PACKED_RING) {\n+\t\tint i;\n+\t\t/*\n+\t\t * Defer flags update on first segment - due to serialization towards HW and\n+\t\t * when jumbo segments are added\n+\t\t */\n+\n+\t\tule16 first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |\n+\t\t\t\t    used_flag_inv(rxvq);\n+\t\tstruct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];\n+\n+\t\tuint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */\n+\n+\t\t/* Optimization point: use in-order release */\n+\n+\t\tfor (i = 0; i < n; i++) {\n+\t\t\tstruct pvirtq_desc *desc =\n+\t\t\t\t\t&rxvq->desc[rxvq->next_avail];\n+\n+\t\t\tdesc->id = rxvq->next_avail;\n+\t\t\tdesc->addr =\n+\t\t\t\t(ule64)rxvq->p_virtual_addr[desc->id].phys_addr;\n+\t\t\tdesc->len = len;\n+\t\t\tif (i)\n+\t\t\t\tdesc->flags = VIRTQ_DESC_F_WRITE |\n+\t\t\t\t\t      avail_flag(rxvq) |\n+\t\t\t\t\t      used_flag_inv(rxvq);\n+\n+\t\t\tinc_avail(rxvq, 1);\n+\t\t}\n+\t\trte_rmb();\n+\t\tfirst_desc->flags = first_flags;\n+\t}\n+}\n+\n+#define vq_log_arg(vq, format, ...)\n+\n+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,\n+\t\t\t     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,\n+\t\t\t     struct nthw_memory_descriptor **p_virt_addr)\n+{\n+\tint m = 0;\n+\tle16 queue_mask = (le16)(txvq->queue_size -\n+\t\t\t\t1); /* Valid because queue_size is always 2^n */\n+\t*p_virt_addr = txvq->p_virtual_addr;\n+\n+\tif (txvq->vq_type == SPLIT_RING) {\n+\t\tcvq->s = txvq->p_desc;\n+\t\tcvq->vq_type = SPLIT_RING;\n+\n+\t\t*first_idx = txvq->tx_descr_avail_idx;\n+\n+\t\tle16 entries_used =\n+\t\t\t(le16)((txvq->tx_descr_avail_idx - txvq->cached_idx) &\n+\t\t\t       queue_mask);\n+\t\tle16 entries_ready = (le16)(txvq->queue_size - 1 - entries_used);\n+\n+\t\tvq_log_arg(txvq,\n+\t\t\t   \"ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i\\n\",\n+\t\t\t   n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used,\n+\t\t\t   entries_ready, txvq->p_used->idx);\n+\n+\t\tif (entries_ready < n) {\n+\t\t\t/*\n+\t\t\t * Look for more packets.\n+\t\t\t * Using the used_idx in the avail ring since they are held synchronous\n+\t\t\t * because of in-order\n+\t\t\t */\n+\t\t\ttxvq->cached_idx =\n+\t\t\t\ttxvq->p_avail->ring[(txvq->p_used->idx - 1) &\n+\t\t\t\t\t\t\t\t\t  queue_mask];\n+\n+\t\t\tvq_log_arg(txvq,\n+\t\t\t\t   \"_update: get cachedidx %i (used_idx-1 %i)\\n\",\n+\t\t\t\t   txvq->cached_idx,\n+\t\t\t\t   (txvq->p_used->idx - 1) & queue_mask);\n+\t\t\tentries_used = (le16)((txvq->tx_descr_avail_idx -\n+\t\t\t\t\t      txvq->cached_idx) &\n+\t\t\t\t\t     queue_mask);\n+\t\t\tentries_ready =\n+\t\t\t\t(le16)(txvq->queue_size - 1 - entries_used);\n+\t\t\tvq_log_arg(txvq, \"new used: %i, ready %i\\n\",\n+\t\t\t\t   entries_used, entries_ready);\n+\t\t\tif (n > entries_ready)\n+\t\t\t\tn = entries_ready;\n+\t\t}\n+\t} else if (txvq->vq_type == PACKED_RING) {\n+\t\tint i;\n+\n+\t\tcvq->p = txvq->desc;\n+\t\tcvq->vq_type = PACKED_RING;\n+\n+\t\tif (txvq->outs.num) {\n+\t\t\t*first_idx = txvq->outs.next;\n+\t\t\tuint16_t num = RTE_MIN(n, txvq->outs.num);\n+\n+\t\t\ttxvq->outs.next = (txvq->outs.next + num) & queue_mask;\n+\t\t\ttxvq->outs.num -= num;\n+\n+\t\t\tif (n == num)\n+\t\t\t\treturn n;\n+\n+\t\t\tm = num;\n+\t\t\tn -= num;\n+\t\t} else {\n+\t\t\t*first_idx = txvq->next_used;\n+\t\t}\n+\t\t/* iterate the ring - this requires in-order behavior from FPGA */\n+\t\tfor (i = 0; i < n; i++) {\n+\t\t\tstruct pvirtq_desc *desc = &txvq->desc[txvq->next_used];\n+\n+\t\t\tule16 flags = desc->flags;\n+\t\t\tuint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);\n+\t\t\tuint8_t used = !!(flags & VIRTQ_DESC_F_USED);\n+\n+\t\t\tif (avail != txvq->used_wrap_count ||\n+\t\t\t\t\tused != txvq->used_wrap_count) {\n+\t\t\t\tn = i;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tle16 incr = (desc->id - txvq->next_used) & queue_mask;\n+\n+\t\t\ti += incr;\n+\t\t\tinc_used(txvq, incr + 1);\n+\t\t}\n+\n+\t\tif (i > n) {\n+\t\t\tint outs_num = i - n;\n+\n+\t\t\ttxvq->outs.next = (txvq->next_used - outs_num) &\n+\t\t\t\t\t  queue_mask;\n+\t\t\ttxvq->outs.num = outs_num;\n+\t\t}\n+\n+\t} else {\n+\t\treturn 0;\n+\t}\n+\treturn m + n;\n+}\n+\n+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,\n+\t\t\t     uint16_t n_segs[])\n+{\n+\tint i;\n+\n+\tif (txvq->vq_type == SPLIT_RING) {\n+\t\t/* Valid because queue_size is always 2^n */\n+\t\tle16 queue_mask = (le16)(txvq->queue_size - 1);\n+\n+\t\tvq_log_arg(txvq, \"pkts %i, avail idx %i, start at %i\\n\", n,\n+\t\t\t   txvq->am_idx, txvq->tx_descr_avail_idx);\n+\t\tfor (i = 0; i < n; i++) {\n+\t\t\tint idx = txvq->am_idx & queue_mask;\n+\n+\t\t\ttxvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;\n+\t\t\ttxvq->tx_descr_avail_idx =\n+\t\t\t\t(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;\n+\t\t\ttxvq->am_idx++;\n+\t\t}\n+\t\t/* Make sure the ring has been updated before HW reads index update */\n+\t\trte_mb();\n+\t\ttxvq->p_avail->idx = txvq->am_idx;\n+\t\tvq_log_arg(txvq, \"new avail idx %i, descr_idx %i\\n\",\n+\t\t\t   txvq->p_avail->idx, txvq->tx_descr_avail_idx);\n+\n+\t} else if (txvq->vq_type == PACKED_RING) {\n+\t\t/*\n+\t\t * Defer flags update on first segment - due to serialization towards HW and\n+\t\t * when jumbo segments are added\n+\t\t */\n+\n+\t\tule16 first_flags = avail_flag(txvq) | used_flag_inv(txvq);\n+\t\tstruct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];\n+\n+\t\tfor (i = 0; i < n; i++) {\n+\t\t\tstruct pvirtq_desc *desc =\n+\t\t\t\t\t&txvq->desc[txvq->next_avail];\n+\n+\t\t\tdesc->id = txvq->next_avail;\n+\t\t\tdesc->addr =\n+\t\t\t\t(ule64)txvq->p_virtual_addr[desc->id].phys_addr;\n+\n+\t\t\tif (i)\n+\t\t\t\t/* bitwise-or here because next flags may already have been setup */\n+\t\t\t\tdesc->flags |= avail_flag(txvq) |\n+\t\t\t\t\t       used_flag_inv(txvq);\n+\n+\t\t\tinc_avail(txvq, 1);\n+\t\t}\n+\t\t/* Proper read barrier before FPGA may see first flags */\n+\t\trte_rmb();\n+\t\tfirst_desc->flags = first_flags;\n+\t}\n+}\n+\n+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index)\n+{\n+\tuint32_t rx_ptr;\n+\tuint32_t loops = 100000;\n+\n+\tdbs_initialize_get_rx_ptr(rxvq->mp_nthw_dbs, rxvq->index);\n+\tdo {\n+\t\tif (dbs_get_rx_ptr(rxvq->mp_nthw_dbs, &rx_ptr) != 0)\n+\t\t\treturn -1;\n+\t\tif (--loops == 0)\n+\t\t\treturn -1;\n+\t\tusleep(10);\n+\t} while (rx_ptr == INDEX_PTR_NOT_VALID);\n+\n+\t*index = (uint16_t)(rx_ptr & 0xffff);\n+\treturn 0;\n+}\n+\n+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index)\n+{\n+\tuint32_t tx_ptr;\n+\tuint32_t loops = 100000;\n+\n+\tdbs_initialize_get_tx_ptr(txvq->mp_nthw_dbs, txvq->index);\n+\tdo {\n+\t\tif (dbs_get_tx_ptr(txvq->mp_nthw_dbs, &tx_ptr) != 0)\n+\t\t\treturn -1;\n+\t\tif (--loops == 0)\n+\t\t\treturn -1;\n+\t\tusleep(10);\n+\t} while (tx_ptr == INDEX_PTR_NOT_VALID);\n+\n+\t*index = (uint16_t)(tx_ptr & 0xffff);\n+\treturn 0;\n+}\ndiff --git a/drivers/net/ntnic/ntnic_dbsconfig.h b/drivers/net/ntnic/ntnic_dbsconfig.h\nnew file mode 100644\nindex 0000000000..ceae535741\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_dbsconfig.h\n@@ -0,0 +1,251 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef NTNIC_DBS_CONFIG_H\n+#define NTNIC_DBS_CONFIG_H\n+\n+#include <stdint.h>\n+#include \"nthw_drv.h\"\n+\n+struct nthw_virt_queue;\n+\n+struct nthw_memory_descriptor {\n+\tvoid *phys_addr;\n+\tvoid *virt_addr;\n+\tuint32_t len;\n+};\n+\n+#define ule64 uint64_t\n+#define ule32 uint32_t\n+#define ule16 uint16_t\n+\n+#define MAX_MSIX_VECTORS_PR_VF 8\n+\n+#define SPLIT_RING 0\n+#define PACKED_RING 1\n+#define IN_ORDER 1\n+#define NO_ORDER_REQUIRED 0\n+\n+/*\n+ * SPLIT : This marks a buffer as continuing via the next field.\n+ * PACKED: This marks a buffer as continuing. (packed does not have a next field, so must be\n+ * contiguous) In Used descriptors it must be ignored\n+ */\n+#define VIRTQ_DESC_F_NEXT 1\n+/*\n+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).\n+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).\n+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by\n+ * the device into any parts of the buffer.\n+ */\n+#define VIRTQ_DESC_F_WRITE 2\n+/*\n+ * SPLIT : This means the buffer contains a list of buffer descriptors.\n+ * PACKED: This means the element contains a table of descriptors.\n+ */\n+#define VIRTQ_DESC_F_INDIRECT 4\n+\n+/*\n+ * Split Ring virtq Descriptor\n+ */\n+#pragma pack(1)\n+struct virtq_desc {\n+\t/* Address (guest-physical). */\n+\tule64 addr;\n+\t/* Length. */\n+\tule32 len;\n+\t/* The flags as indicated above. */\n+\tule16 flags;\n+\t/* Next field if flags & NEXT */\n+\tule16 next;\n+};\n+\n+#pragma pack()\n+\n+/*\n+ * Packed Ring special structures and defines\n+ *\n+ */\n+\n+#define MAX_PACKED_RING_ELEMENTS (1 << 15) /* 32768 */\n+\n+/* additional packed ring flags */\n+#define VIRTQ_DESC_F_AVAIL (1 << 7)\n+#define VIRTQ_DESC_F_USED (1 << 15)\n+\n+/* descr phys address must be 16 byte aligned */\n+#pragma pack(push, 16)\n+struct pvirtq_desc {\n+\t/* Buffer Address. */\n+\tule64 addr;\n+\t/* Buffer Length. */\n+\tule32 len;\n+\t/* Buffer ID. */\n+\tule16 id;\n+\t/* The flags depending on descriptor type. */\n+\tule16 flags;\n+};\n+\n+#pragma pack(pop)\n+\n+/* Enable events */\n+#define RING_EVENT_FLAGS_ENABLE 0x0\n+/* Disable events */\n+#define RING_EVENT_FLAGS_DISABLE 0x1\n+/*\n+ * Enable events for a specific descriptor\n+ * (as specified by Descriptor Ring Change Event offset/Wrap Counter).\n+ * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.\n+ */\n+#define RING_EVENT_FLAGS_DESC 0x2\n+/* The value 0x3 is reserved */\n+\n+struct pvirtq_event_suppress {\n+\tunion {\n+\t\tstruct {\n+\t\t\tule16 desc_event_off : 15; /* Descriptor Ring Change Event offset */\n+\t\t\tule16 desc_event_wrap : 1; /* Descriptor Ring Change Event Wrap Counter */\n+\t\t};\n+\t\tule16 desc; /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */\n+\t};\n+\n+\t/* phys address must be 4 byte aligned */\n+#pragma pack(push, 16)\n+\tunion {\n+\t\tstruct {\n+\t\t\tule16 desc_event_flags : 2; /* Descriptor Ring Change Event Flags */\n+\t\t\tule16 reserved : 14; /* Reserved, set to 0 */\n+\t\t};\n+\t\tule16 flags;\n+\t};\n+};\n+\n+#pragma pack(pop)\n+\n+/*\n+ * Common virtq descr\n+ */\n+#define vq_set_next(_vq, index, nxt)                \\\n+\tdo {                                       \\\n+\t\t__typeof__(_vq) (vq) = (_vq); \\\n+\t\tif ((vq)->vq_type == SPLIT_RING)   \\\n+\t\t\t(vq)->s[index].next = nxt; \\\n+\t} while (0)\n+#define vq_add_flags(_vq, _index, _flgs)                  \\\n+\tdo {                                           \\\n+\t\t__typeof__(_vq) (vq) = (_vq); \\\n+\t\t__typeof__(_index) (index) = (_index); \\\n+\t\t__typeof__(_flgs) (flgs) = (_flgs); \\\n+\t\tif ((vq)->vq_type == SPLIT_RING)       \\\n+\t\t\t(vq)->s[index].flags |= flgs;  \\\n+\t\telse if ((vq)->vq_type == PACKED_RING) \\\n+\t\t\t(vq)->p[index].flags |= flgs;  \\\n+\t} while (0)\n+#define vq_set_flags(_vq, _index, _flgs)                  \\\n+\tdo {                                           \\\n+\t\t__typeof__(_vq) (vq) = (_vq); \\\n+\t\t__typeof__(_index) (index) = (_index); \\\n+\t\t__typeof__(_flgs) (flgs) = (_flgs); \\\n+\t\tif ((vq)->vq_type == SPLIT_RING)       \\\n+\t\t\t(vq)->s[index].flags = flgs;   \\\n+\t\telse if ((vq)->vq_type == PACKED_RING) \\\n+\t\t\t(vq)->p[index].flags = flgs;   \\\n+\t} while (0)\n+\n+struct nthw_virtq_desc_buf {\n+\t/* Address (guest-physical). */\n+\tule64 addr;\n+\t/* Length. */\n+\tule32 len;\n+} __rte_aligned(16);\n+\n+struct nthw_cvirtq_desc {\n+\tunion {\n+\t\tstruct nthw_virtq_desc_buf *b; /* buffer part as is common */\n+\t\tstruct virtq_desc *s; /* SPLIT */\n+\t\tstruct pvirtq_desc *p; /* PACKED */\n+\t};\n+\tuint16_t vq_type;\n+};\n+\n+/* Setup a virt_queue for a VM */\n+struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint16_t start_idx,\n+\tuint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,\n+\tvoid *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,\n+\tuint32_t header, uint32_t vq_type, int irq_vector);\n+\n+int nthw_enable_rx_virt_queue(struct nthw_virt_queue *rx_vq);\n+int nthw_disable_rx_virt_queue(struct nthw_virt_queue *rx_vq);\n+int nthw_release_rx_virt_queue(struct nthw_virt_queue *rxvq);\n+\n+struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint16_t start_idx,\n+\tuint16_t start_ptr, void *avail_struct_phys_addr, void *used_struct_phys_addr,\n+\tvoid *desc_struct_phys_addr, uint16_t queue_size, uint32_t host_id,\n+\tuint32_t port, uint32_t virtual_port, uint32_t header, uint32_t vq_type,\n+\tint irq_vector, uint32_t in_order);\n+\n+int nthw_enable_tx_virt_queue(struct nthw_virt_queue *tx_vq);\n+int nthw_disable_tx_virt_queue(struct nthw_virt_queue *tx_vq);\n+int nthw_release_tx_virt_queue(struct nthw_virt_queue *txvq);\n+int nthw_enable_and_change_port_tx_virt_queue(struct nthw_virt_queue *tx_vq,\n+\t\tuint32_t outport);\n+\n+struct nthw_virt_queue *nthw_setup_managed_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint32_t queue_size,\n+\tuint32_t host_id, uint32_t header,\n+\tstruct nthw_memory_descriptor *\n+\tp_virt_struct_area,\n+\tstruct nthw_memory_descriptor *\n+\tp_packet_buffers,\n+\tuint32_t vq_type, int irq_vector);\n+\n+int nthw_release_managed_rx_virt_queue(struct nthw_virt_queue *rxvq);\n+\n+struct nthw_virt_queue *nthw_setup_managed_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,\n+\tuint32_t index, uint32_t queue_size,\n+\tuint32_t host_id, uint32_t port, uint32_t virtual_port, uint32_t header,\n+\tstruct nthw_memory_descriptor *\n+\tp_virt_struct_area,\n+\tstruct nthw_memory_descriptor *\n+\tp_packet_buffers,\n+\tuint32_t vq_type, int irq_vector, uint32_t in_order);\n+\n+int nthw_release_managed_tx_virt_queue(struct nthw_virt_queue *txvq);\n+\n+int nthw_set_tx_qos_config(nthw_dbs_t *p_nthw_dbs, uint32_t port, uint32_t enable,\n+\t\t\t   uint32_t ir, uint32_t bs);\n+\n+int nthw_set_tx_qos_rate_global(nthw_dbs_t *p_nthw_dbs, uint32_t multiplier,\n+\t\t\t\tuint32_t divider);\n+\n+struct nthw_received_packets {\n+\tvoid *addr;\n+\tuint32_t len;\n+};\n+\n+/*\n+ * These functions handles both Split and Packed including merged buffers (jumbo)\n+ */\n+uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n,\n+\t\t\t     struct nthw_received_packets *rp,\n+\t\t\t     uint16_t *nb_pkts);\n+\n+void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n);\n+\n+uint16_t nthw_get_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,\n+\t\t\t     uint16_t *first_idx, struct nthw_cvirtq_desc *cvq,\n+\t\t\t     struct nthw_memory_descriptor **p_virt_addr);\n+\n+void nthw_release_tx_buffers(struct nthw_virt_queue *txvq, uint16_t n,\n+\t\t\t     uint16_t n_segs[]);\n+\n+int nthw_get_rx_queue_ptr(struct nthw_virt_queue *rxvq, uint16_t *index);\n+\n+int nthw_get_tx_queue_ptr(struct nthw_virt_queue *txvq, uint16_t *index);\n+\n+int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info);\n+\n+#endif\ndiff --git a/drivers/net/ntnic/ntnic_ethdev.c b/drivers/net/ntnic/ntnic_ethdev.c\nnew file mode 100644\nindex 0000000000..d547926453\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_ethdev.c\n@@ -0,0 +1,4256 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <unistd.h> /* sleep() */\n+#include <stdio.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <stdarg.h>\n+\n+#include <sys/queue.h>\n+\n+#include \"ntdrv_4ga.h\"\n+\n+#include <rte_common.h>\n+#include <rte_kvargs.h>\n+#include <rte_interrupts.h>\n+#include <rte_byteorder.h>\n+#include <rte_debug.h>\n+#include <rte_pci.h>\n+#include <rte_bus_pci.h>\n+#include <rte_bus_vdev.h>\n+#include <rte_ether.h>\n+#include <ethdev_pci.h>\n+#include <ethdev_driver.h>\n+#include <rte_memory.h>\n+#include <rte_eal.h>\n+#include <rte_malloc.h>\n+#include <rte_dev.h>\n+#include <rte_vfio.h>\n+#include <rte_flow_driver.h>\n+#include <vdpa_driver.h>\n+\n+#include \"ntlog.h\"\n+\n+#include \"stream_binary_flow_api.h\"\n+#include \"nthw_fpga.h\"\n+#include \"ntnic_xstats.h\"\n+#include \"ntnic_hshconfig.h\"\n+#include \"ntnic_ethdev.h\"\n+#include \"ntnic_vdpa.h\"\n+#include \"ntnic_vf.h\"\n+#include \"ntnic_vfio.h\"\n+#include \"ntnic_meter.h\"\n+\n+#include \"flow_api.h\"\n+\n+#ifdef NT_TOOLS\n+#include \"ntconnect.h\"\n+#include \"ntconnect_api.h\"\n+#include \"ntconnect_modules/ntconn_modules.h\"\n+#endif\n+\n+/* Defines: */\n+\n+#define HW_MAX_PKT_LEN (10000)\n+#define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)\n+#define MIN_MTU 46\n+#define MIN_MTU_INLINE 512\n+\n+#include \"ntnic_dbsconfig.h\"\n+\n+#define EXCEPTION_PATH_HID 0\n+\n+#define MAX_TOTAL_QUEUES 128\n+\n+#define ONE_G_SIZE 0x40000000\n+#define ONE_G_MASK (ONE_G_SIZE - 1)\n+\n+#define VIRTUAL_TUNNEL_PORT_OFFSET 72\n+\n+int lag_active;\n+\n+static struct {\n+\tstruct nthw_virt_queue *vq;\n+\tint managed;\n+\tint rx;\n+} rel_virt_queue[MAX_REL_VQS];\n+\n+#define MAX_RX_PACKETS 128\n+#define MAX_TX_PACKETS 128\n+\n+#if defined(RX_SRC_DUMP_PKTS_DEBUG) || defined(RX_DST_DUMP_PKTS_DEBUG) || \\\n+\tdefined(TX_SRC_DUMP_PKTS_DEBUG) || defined(TX_DST_DUMP_PKTS_DEBUG)\n+static void dump_packet_seg(const char *text, uint8_t *data, int len)\n+{\n+\tint x;\n+\n+\tif (text)\n+\t\tprintf(\"%s (%p, len %i)\", text, data, len);\n+\tfor (x = 0; x < len; x++) {\n+\t\tif (!(x % 16))\n+\t\t\tprintf(\"\\n%04X:\", x);\n+\t\tprintf(\" %02X\", *(data + x));\n+\t}\n+\tprintf(\"\\n\");\n+}\n+#endif\n+\n+/* Global statistics: */\n+extern const struct rte_flow_ops _dev_flow_ops;\n+struct pmd_internals *pmd_intern_base;\n+uint64_t rte_tsc_freq;\n+\n+/*------- Tables to store DPDK EAL log levels for nt log modules----------*/\n+static int nt_log_module_logtype[NT_LOG_MODULE_COUNT] = { -1 };\n+/*Register the custom module binding to EAL --log-level option here*/\n+static const char *nt_log_module_eal_name[NT_LOG_MODULE_COUNT] = {\n+\t[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_GENERAL)] = \"pmd.net.ntnic.general\",\n+\t[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)] = \"pmd.net.ntnic.nthw\",\n+\t[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FILTER)] = \"pmd.net.ntnic.filter\",\n+\t[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_VDPA)] = \"pmd.net.ntnic.vdpa\",\n+\t[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_FPGA)] = \"pmd.net.ntnic.fpga\",\n+\t[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTCONNECT)] =\n+\t\"pmd.net.ntnic.ntconnect\",\n+\t[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_ETHDEV)] = \"pmd.net.ntnic.ethdev\"\n+};\n+\n+/*--------------------------------------------------------------------------*/\n+\n+rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;\n+\n+static void *lag_management(void *arg);\n+static void (*previous_handler)(int sig);\n+static pthread_t shutdown_tid;\n+int kill_pmd;\n+\n+#define ETH_DEV_NTNIC_HELP_ARG \"help\"\n+#define ETH_DEV_NTHW_PORTMASK_ARG \"portmask\"\n+#define ETH_DEV_NTHW_RXQUEUES_ARG \"rxqs\"\n+#define ETH_DEV_NTHW_TXQUEUES_ARG \"txqs\"\n+#define ETH_DEV_NTHW_PORTQUEUES_ARG \"portqueues\"\n+#define ETH_DEV_NTHW_REPRESENTOR_ARG \"representor\"\n+#define ETH_DEV_NTHW_EXCEPTION_PATH_ARG \"exception_path\"\n+#define ETH_NTNIC_LAG_PRIMARY_ARG \"primary\"\n+#define ETH_NTNIC_LAG_BACKUP_ARG \"backup\"\n+#define ETH_NTNIC_LAG_MODE_ARG \"mode\"\n+#define ETH_DEV_NTHW_LINK_SPEED_ARG \"port.link_speed\"\n+#define ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG \"supported-fpgas\"\n+\n+#define DVIO_VHOST_DIR_NAME \"/usr/local/var/run/\"\n+\n+static const char *const valid_arguments[] = {\n+\tETH_DEV_NTNIC_HELP_ARG,\n+\tETH_DEV_NTHW_PORTMASK_ARG,\n+\tETH_DEV_NTHW_RXQUEUES_ARG,\n+\tETH_DEV_NTHW_TXQUEUES_ARG,\n+\tETH_DEV_NTHW_PORTQUEUES_ARG,\n+\tETH_DEV_NTHW_REPRESENTOR_ARG,\n+\tETH_DEV_NTHW_EXCEPTION_PATH_ARG,\n+\tETH_NTNIC_LAG_PRIMARY_ARG,\n+\tETH_NTNIC_LAG_BACKUP_ARG,\n+\tETH_NTNIC_LAG_MODE_ARG,\n+\tETH_DEV_NTHW_LINK_SPEED_ARG,\n+\tETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,\n+\tNULL,\n+};\n+\n+static struct rte_ether_addr eth_addr_vp[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];\n+\n+/* Functions: */\n+\n+/*\n+ * The set of PCI devices this driver supports\n+ */\n+static const struct rte_pci_id nthw_pci_id_map[] = {\n+\t{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },\n+\t{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT50B01) },\n+\t{\n+\t\t.vendor_id = 0,\n+\t}, /* sentinel */\n+};\n+\n+/*\n+ * Store and get adapter info\n+ */\n+\n+static struct drv_s *g_p_drv[NUM_ADAPTER_MAX] = { NULL };\n+\n+static void store_pdrv(struct drv_s *p_drv)\n+{\n+\tif (p_drv->adapter_no > NUM_ADAPTER_MAX) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"Internal error adapter number %u out of range. Max number of adapters: %u\\n\",\n+\t\t       p_drv->adapter_no, NUM_ADAPTER_MAX);\n+\t\treturn;\n+\t}\n+\tif (g_p_drv[p_drv->adapter_no] != 0) {\n+\t\tNT_LOG(WRN, ETHDEV,\n+\t\t       \"Overwriting adapter structure for PCI  \" PCIIDENT_PRINT_STR\n+\t\t       \" with adapter structure for PCI  \" PCIIDENT_PRINT_STR\n+\t\t       \"\\n\",\n+\t\t       PCIIDENT_TO_DOMAIN(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),\n+\t\t       PCIIDENT_TO_BUSNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),\n+\t\t       PCIIDENT_TO_DEVNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),\n+\t\t       PCIIDENT_TO_FUNCNR(g_p_drv[p_drv->adapter_no]->ntdrv.pciident),\n+\t\t       PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),\n+\t\t       PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),\n+\t\t       PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),\n+\t\t       PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));\n+\t}\n+\trte_spinlock_lock(&hwlock);\n+\tg_p_drv[p_drv->adapter_no] = p_drv;\n+\trte_spinlock_unlock(&hwlock);\n+}\n+\n+static struct drv_s *get_pdrv_from_pci(struct rte_pci_addr addr)\n+{\n+\tint i;\n+\tstruct drv_s *p_drv = NULL;\n+\n+\trte_spinlock_lock(&hwlock);\n+\tfor (i = 0; i < NUM_ADAPTER_MAX; i++) {\n+\t\tif (g_p_drv[i]) {\n+\t\t\tif (PCIIDENT_TO_DOMAIN(g_p_drv[i]->ntdrv.pciident) ==\n+\t\t\t\t\taddr.domain &&\n+\t\t\t\t\tPCIIDENT_TO_BUSNR(g_p_drv[i]->ntdrv.pciident) ==\n+\t\t\t\t\taddr.bus) {\n+\t\t\t\tp_drv = g_p_drv[i];\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n+\t}\n+\trte_spinlock_unlock(&hwlock);\n+\treturn p_drv;\n+}\n+\n+static struct drv_s *get_pdrv_from_pciident(uint32_t pciident)\n+{\n+\tstruct rte_pci_addr addr;\n+\n+\taddr.domain = PCIIDENT_TO_DOMAIN(pciident);\n+\taddr.bus = PCIIDENT_TO_BUSNR(pciident);\n+\taddr.devid = PCIIDENT_TO_DEVNR(pciident);\n+\taddr.function = PCIIDENT_TO_FUNCNR(pciident);\n+\treturn get_pdrv_from_pci(addr);\n+}\n+\n+int debug_adapter_show_info(uint32_t pciident, FILE *pfh)\n+{\n+\tstruct drv_s *p_drv = get_pdrv_from_pciident(pciident);\n+\n+\treturn nt4ga_adapter_show_info(&p_drv->ntdrv.adapter_info, pfh);\n+}\n+\n+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr)\n+{\n+\tnthw_dbs_t *p_nthw_dbs = NULL;\n+\tstruct drv_s *p_drv;\n+\n+\tp_drv = get_pdrv_from_pci(pci_addr);\n+\tif (p_drv) {\n+\t\tp_nthw_dbs = p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;\n+\t}\telse {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"Adapter DBS %p (p_drv=%p) info for adapter with PCI \" PCIIDENT_PRINT_STR\n+\t\t       \" is not found\\n\",\n+\t\t       p_nthw_dbs, p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid,\n+\t\t       pci_addr.function);\n+\t}\n+\treturn p_nthw_dbs;\n+}\n+\n+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr)\n+{\n+\tenum fpga_info_profile fpga_profile = FPGA_INFO_PROFILE_UNKNOWN;\n+\tstruct drv_s *p_drv;\n+\n+\tp_drv = get_pdrv_from_pci(pci_addr);\n+\tif (p_drv) {\n+\t\tfpga_profile = p_drv->ntdrv.adapter_info.fpga_info.profile;\n+\t} else {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"FPGA profile (p_drv=%p) for adapter with PCI \" PCIIDENT_PRINT_STR\n+\t\t       \" is not found\\n\",\n+\t\t       p_drv, pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);\n+\t}\n+\treturn fpga_profile;\n+}\n+\n+static int string_to_u32(const char *key_str __rte_unused,\n+\t\t\t const char *value_str, void *extra_args)\n+{\n+\tif (!value_str || !extra_args)\n+\t\treturn -1;\n+\tconst uint32_t value = strtol(value_str, NULL, 0);\n+\t*(uint32_t *)extra_args = value;\n+\treturn 0;\n+}\n+\n+struct port_link_speed {\n+\tint port_id;\n+\tint link_speed;\n+};\n+\n+/* Parse <port>:<link speed Mbps>, e.g 1:10000 */\n+static int string_to_port_link_speed(const char *key_str __rte_unused,\n+\t\t\t\t     const char *value_str, void *extra_args)\n+{\n+\tif (!value_str || !extra_args)\n+\t\treturn -1;\n+\tchar *semicol;\n+\tconst uint32_t pid = strtol(value_str, &semicol, 10);\n+\n+\tif (*semicol != ':')\n+\t\treturn -1;\n+\tconst uint32_t lspeed = strtol(++semicol, NULL, 10);\n+\tstruct port_link_speed *pls = *(struct port_link_speed **)extra_args;\n+\n+\tpls->port_id = pid;\n+\tpls->link_speed = lspeed;\n+\t++(*((struct port_link_speed **)(extra_args)));\n+\treturn 0;\n+}\n+\n+static int dpdk_stats_collect(struct pmd_internals *internals,\n+\t\t\t      struct rte_eth_stats *stats)\n+{\n+\tunsigned int i;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tstruct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;\n+\tnt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;\n+\tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n+\tconst int if_index = internals->if_index;\n+\tuint64_t rx_total = 0;\n+\tuint64_t rx_total_b = 0;\n+\tuint64_t tx_total = 0;\n+\tuint64_t tx_total_b = 0;\n+\tuint64_t tx_err_total = 0;\n+\n+\tif (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||\n+\t\t\tif_index > NUM_ADAPTER_PORTS_MAX) {\n+\t\tNT_LOG(WRN, ETHDEV, \"%s - error exit\\n\", __func__);\n+\t\treturn -1;\n+\t}\n+\n+\t/*\n+\t * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)\n+\t * Return values are in the \"internals->rxq_scg[]\" and \"internals->txq_scg[]\" arrays\n+\t */\n+\tpoll_statistics(internals);\n+\n+\tmemset(stats, 0, sizeof(*stats));\n+\tfor (i = 0;\n+\t\t\ti < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues;\n+\t\t\ti++) {\n+\t\tstats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;\n+\t\tstats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;\n+\t\trx_total += stats->q_ipackets[i];\n+\t\trx_total_b += stats->q_ibytes[i];\n+\t}\n+\n+\tfor (i = 0;\n+\t\t\ti < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues;\n+\t\t\ti++) {\n+\t\tstats->q_opackets[i] = internals->txq_scg[i].tx_pkts;\n+\t\tstats->q_obytes[i] = internals->txq_scg[i].tx_bytes;\n+\t\tstats->q_errors[i] = internals->txq_scg[i].err_pkts;\n+\t\ttx_total += stats->q_opackets[i];\n+\t\ttx_total_b += stats->q_obytes[i];\n+\t\ttx_err_total += stats->q_errors[i];\n+\t}\n+\n+\tstats->imissed = internals->rx_missed;\n+\tstats->ipackets = rx_total;\n+\tstats->ibytes = rx_total_b;\n+\tstats->opackets = tx_total;\n+\tstats->obytes = tx_total_b;\n+\tstats->oerrors = tx_err_total;\n+\n+\treturn 0;\n+}\n+\n+static int dpdk_stats_reset(struct pmd_internals *internals,\n+\t\t\t    struct ntdrv_4ga_s *p_nt_drv, int n_intf_no)\n+{\n+\tnt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;\n+\tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n+\tunsigned int i;\n+\n+\tif (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 ||\n+\t\t\tn_intf_no > NUM_ADAPTER_PORTS_MAX)\n+\t\treturn -1;\n+\n+\tpthread_mutex_lock(&p_nt_drv->stat_lck);\n+\n+\t/* Rx */\n+\tfor (i = 0; i < internals->nb_rx_queues; i++) {\n+\t\tinternals->rxq_scg[i].rx_pkts = 0;\n+\t\tinternals->rxq_scg[i].rx_bytes = 0;\n+\t\tinternals->rxq_scg[i].err_pkts = 0;\n+\t}\n+\n+\tinternals->rx_missed = 0;\n+\n+\t/* Tx */\n+\tfor (i = 0; i < internals->nb_tx_queues; i++) {\n+\t\tinternals->txq_scg[i].tx_pkts = 0;\n+\t\tinternals->txq_scg[i].tx_bytes = 0;\n+\t\tinternals->txq_scg[i].err_pkts = 0;\n+\t}\n+\n+\tp_nt4ga_stat->n_totals_reset_timestamp = time(NULL);\n+\n+\tpthread_mutex_unlock(&p_nt_drv->stat_lck);\n+\n+\treturn 0;\n+}\n+\n+/* NOTE: please note the difference between ETH_SPEED_NUM_xxx and ETH_LINK_SPEED_xxx */\n+static int nt_link_speed_to_eth_speed_num(enum nt_link_speed_e nt_link_speed)\n+{\n+\tint eth_speed_num = ETH_SPEED_NUM_NONE;\n+\n+\tswitch (nt_link_speed) {\n+\tcase NT_LINK_SPEED_10M:\n+\t\teth_speed_num = ETH_SPEED_NUM_10M;\n+\t\tbreak;\n+\tcase NT_LINK_SPEED_100M:\n+\t\teth_speed_num = ETH_SPEED_NUM_100M;\n+\t\tbreak;\n+\tcase NT_LINK_SPEED_1G:\n+\t\teth_speed_num = ETH_SPEED_NUM_1G;\n+\t\tbreak;\n+\tcase NT_LINK_SPEED_10G:\n+\t\teth_speed_num = ETH_SPEED_NUM_10G;\n+\t\tbreak;\n+\tcase NT_LINK_SPEED_25G:\n+\t\teth_speed_num = ETH_SPEED_NUM_25G;\n+\t\tbreak;\n+\tcase NT_LINK_SPEED_40G:\n+\t\teth_speed_num = ETH_SPEED_NUM_40G;\n+\t\tbreak;\n+\tcase NT_LINK_SPEED_50G:\n+\t\teth_speed_num = ETH_SPEED_NUM_50G;\n+\t\tbreak;\n+\tcase NT_LINK_SPEED_100G:\n+\t\teth_speed_num = ETH_SPEED_NUM_100G;\n+\t\tbreak;\n+\tdefault:\n+\t\teth_speed_num = ETH_SPEED_NUM_NONE;\n+\t\tbreak;\n+\t}\n+\n+\treturn eth_speed_num;\n+}\n+\n+static int nt_link_duplex_to_eth_duplex(enum nt_link_duplex_e nt_link_duplex)\n+{\n+\tint eth_link_duplex = 0;\n+\n+\tswitch (nt_link_duplex) {\n+\tcase NT_LINK_DUPLEX_FULL:\n+\t\teth_link_duplex = ETH_LINK_FULL_DUPLEX;\n+\t\tbreak;\n+\tcase NT_LINK_DUPLEX_HALF:\n+\t\teth_link_duplex = ETH_LINK_HALF_DUPLEX;\n+\t\tbreak;\n+\tcase NT_LINK_DUPLEX_UNKNOWN: /* fall-through */\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\treturn eth_link_duplex;\n+}\n+\n+static int eth_link_update(struct rte_eth_dev *eth_dev,\n+\t\t\t   int wait_to_complete __rte_unused)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tconst int n_intf_no = internals->if_index;\n+\tstruct adapter_info_s *p_adapter_info =\n+\t\t\t&internals->p_drv->ntdrv.adapter_info;\n+\n+\tif (eth_dev->data->dev_started) {\n+\t\tif (internals->type == PORT_TYPE_VIRTUAL ||\n+\t\t\t\tinternals->type == PORT_TYPE_OVERRIDE) {\n+\t\t\teth_dev->data->dev_link.link_status =\n+\t\t\t\t((internals->vport_comm ==\n+\t\t\t\t  VIRT_PORT_NEGOTIATED_NONE) ?\n+\t\t\t\t ETH_LINK_DOWN :\n+\t\t\t\t ETH_LINK_UP);\n+\t\t\teth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;\n+\t\t\teth_dev->data->dev_link.link_duplex =\n+\t\t\t\tETH_LINK_FULL_DUPLEX;\n+\t\t\treturn 0;\n+\t\t}\n+\n+\t\tconst bool port_link_status =\n+\t\t\tnt4ga_port_get_link_status(p_adapter_info, n_intf_no);\n+\t\teth_dev->data->dev_link.link_status =\n+\t\t\tport_link_status ? ETH_LINK_UP : ETH_LINK_DOWN;\n+\n+\t\tnt_link_speed_t port_link_speed =\n+\t\t\tnt4ga_port_get_link_speed(p_adapter_info, n_intf_no);\n+\t\teth_dev->data->dev_link.link_speed =\n+\t\t\tnt_link_speed_to_eth_speed_num(port_link_speed);\n+\n+\t\tnt_link_duplex_t nt_link_duplex =\n+\t\t\tnt4ga_port_get_link_duplex(p_adapter_info, n_intf_no);\n+\t\teth_dev->data->dev_link.link_duplex =\n+\t\t\tnt_link_duplex_to_eth_duplex(nt_link_duplex);\n+\t} else {\n+\t\teth_dev->data->dev_link.link_status = ETH_LINK_DOWN;\n+\t\teth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;\n+\t\teth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;\n+\t}\n+\treturn 0;\n+}\n+\n+static int eth_stats_get(struct rte_eth_dev *eth_dev,\n+\t\t\t struct rte_eth_stats *stats)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tdpdk_stats_collect(internals, stats);\n+\treturn 0;\n+}\n+\n+static int eth_stats_reset(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tstruct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;\n+\tconst int if_index = internals->if_index;\n+\n+\tdpdk_stats_reset(internals, p_nt_drv, if_index);\n+\treturn 0;\n+}\n+\n+static uint32_t nt_link_speed_capa_to_eth_speed_capa(int nt_link_speed_capa)\n+{\n+\tuint32_t eth_speed_capa = 0;\n+\n+\tif (nt_link_speed_capa & NT_LINK_SPEED_10M)\n+\t\teth_speed_capa |= ETH_LINK_SPEED_10M;\n+\tif (nt_link_speed_capa & NT_LINK_SPEED_100M)\n+\t\teth_speed_capa |= ETH_LINK_SPEED_100M;\n+\tif (nt_link_speed_capa & NT_LINK_SPEED_1G)\n+\t\teth_speed_capa |= ETH_LINK_SPEED_1G;\n+\tif (nt_link_speed_capa & NT_LINK_SPEED_10G)\n+\t\teth_speed_capa |= ETH_LINK_SPEED_10G;\n+\tif (nt_link_speed_capa & NT_LINK_SPEED_25G)\n+\t\teth_speed_capa |= ETH_LINK_SPEED_25G;\n+\tif (nt_link_speed_capa & NT_LINK_SPEED_40G)\n+\t\teth_speed_capa |= ETH_LINK_SPEED_40G;\n+\tif (nt_link_speed_capa & NT_LINK_SPEED_50G)\n+\t\teth_speed_capa |= ETH_LINK_SPEED_50G;\n+\tif (nt_link_speed_capa & NT_LINK_SPEED_100G)\n+\t\teth_speed_capa |= ETH_LINK_SPEED_100G;\n+\n+\treturn eth_speed_capa;\n+}\n+\n+#define RTE_RSS_5TUPLE (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)\n+\n+static int eth_dev_infos_get(struct rte_eth_dev *eth_dev,\n+\t\t\t     struct rte_eth_dev_info *dev_info)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tconst int n_intf_no = internals->if_index;\n+\tstruct adapter_info_s *p_adapter_info =\n+\t\t\t&internals->p_drv->ntdrv.adapter_info;\n+\n+\tdev_info->if_index = internals->if_index;\n+\tdev_info->driver_name = internals->name;\n+\tdev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;\n+\tdev_info->max_rx_pktlen = HW_MAX_PKT_LEN;\n+\tdev_info->max_mtu = MAX_MTU;\n+\tif (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE)\n+\t\tdev_info->min_mtu = MIN_MTU_INLINE;\n+\n+\telse\n+\t\tdev_info->min_mtu = MIN_MTU;\n+\n+\tif (internals->p_drv) {\n+\t\tdev_info->max_rx_queues = internals->nb_rx_queues;\n+\t\tdev_info->max_tx_queues = internals->nb_tx_queues;\n+\n+\t\tdev_info->min_rx_bufsize = 64;\n+\n+\t\tconst uint32_t nt_port_speed_capa =\n+\t\t\tnt4ga_port_get_link_speed_capabilities(p_adapter_info,\n+\t\t\t\t\t\t\t       n_intf_no);\n+\t\tdev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);\n+\t}\n+\n+\tdev_info->flow_type_rss_offloads =\n+\t\tRTE_RSS_5TUPLE | RTE_ETH_RSS_C_VLAN |\n+\t\tRTE_ETH_RSS_LEVEL_INNERMOST | RTE_ETH_RSS_L3_SRC_ONLY |\n+\t\tRTE_ETH_RSS_LEVEL_OUTERMOST | RTE_ETH_RSS_L3_DST_ONLY;\n+\t/*\n+\t * NT hashing algorithm doesn't use key, so it is just a fake key length to\n+\t * feet testpmd requirements.\n+\t */\n+\tdev_info->hash_key_size = 1;\n+\n+\treturn 0;\n+}\n+\n+static __rte_always_inline int\n+copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf, struct rte_mempool *mb_pool,\n+\t\t       struct nthw_received_packets *hw_recv, int max_segs,\n+\t\t       uint16_t data_len)\n+{\n+\tint src_pkt = 0;\n+\t/*\n+\t * 1. virtqueue packets may be segmented\n+\t * 2. the mbuf size may be too small and may need to be segmented\n+\t */\n+\tchar *data = (char *)hw_recv->addr + SG_HDR_SIZE;\n+\tchar *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;\n+\n+\t/* set packet length */\n+\tmbuf->pkt_len = data_len - SG_HDR_SIZE;\n+\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\tvoid *dbg_src_start = hw_recv->addr;\n+\tvoid *dbg_dst_start = dst;\n+#endif\n+\n+\tint remain = mbuf->pkt_len;\n+\t/* First cpy_size is without header */\n+\tint cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE) ?\n+\t\t       SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE :\n+\t\t       remain;\n+\n+\tstruct rte_mbuf *m = mbuf; /* if mbuf segmentation is needed */\n+\n+\twhile (++src_pkt <= max_segs) {\n+\t\t/* keep track of space in dst */\n+\t\tint cpto_size = rte_pktmbuf_tailroom(m);\n+\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t\tprintf(\"src copy size %i\\n\", cpy_size);\n+#endif\n+\n+\t\tif (cpy_size > cpto_size) {\n+\t\t\tint new_cpy_size = cpto_size;\n+\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t\t\tprintf(\"Seg %i: mbuf first cpy src off 0x%\" PRIX64 \", dst off 0x%\" PRIX64 \", size %i\\n\",\n+\t\t\t       mbuf->nb_segs - 1,\n+\t\t\t       (uint64_t)data - (uint64_t)dbg_src_start,\n+\t\t\t       (uint64_t)dst - (uint64_t)dbg_dst_start,\n+\t\t\t       new_cpy_size);\n+#endif\n+\t\t\trte_memcpy((void *)dst, (void *)data, new_cpy_size);\n+\t\t\tm->data_len += new_cpy_size;\n+\t\t\tremain -= new_cpy_size;\n+\t\t\tcpy_size -= new_cpy_size;\n+\n+\t\t\tdata += new_cpy_size;\n+\n+\t\t\t/*\n+\t\t\t * Loop if remaining data from this virtqueue seg cannot fit in one extra\n+\t\t\t * mbuf\n+\t\t\t */\n+\t\t\tdo {\n+\t\t\t\tm->next = rte_pktmbuf_alloc(mb_pool);\n+\t\t\t\tif (unlikely(!m->next))\n+\t\t\t\t\treturn -1;\n+\t\t\t\tm = m->next;\n+\n+\t\t\t\t/* Headroom is not needed in chained mbufs */\n+\t\t\t\trte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));\n+\t\t\t\tdst = (char *)m->buf_addr;\n+\t\t\t\tm->data_len = 0;\n+\t\t\t\tm->pkt_len = 0;\n+\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t\t\t\tdbg_dst_start = dst;\n+#endif\n+\t\t\t\tcpto_size = rte_pktmbuf_tailroom(m);\n+\n+\t\t\t\tint actual_cpy_size = (cpy_size > cpto_size) ?\n+\t\t\t\t\t\t      cpto_size :\n+\t\t\t\t\t\t      cpy_size;\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t\t\t\tprintf(\"new dst mbuf seg - size %i\\n\",\n+\t\t\t\t       cpto_size);\n+\t\t\t\tprintf(\"Seg %i: mbuf cpy src off 0x%\" PRIX64 \", dst off 0x%\" PRIX64 \", size %i\\n\",\n+\t\t\t\t       mbuf->nb_segs,\n+\t\t\t\t       (uint64_t)data - (uint64_t)dbg_src_start,\n+\t\t\t\t       (uint64_t)dst - (uint64_t)dbg_dst_start,\n+\t\t\t\t       actual_cpy_size);\n+#endif\n+\n+\t\t\t\trte_memcpy((void *)dst, (void *)data,\n+\t\t\t\t\t   actual_cpy_size);\n+\t\t\t\tm->pkt_len += actual_cpy_size;\n+\t\t\t\tm->data_len += actual_cpy_size;\n+\n+\t\t\t\tremain -= actual_cpy_size;\n+\t\t\t\tcpy_size -= actual_cpy_size;\n+\n+\t\t\t\tdata += actual_cpy_size;\n+\n+\t\t\t\tmbuf->nb_segs++;\n+\n+\t\t\t} while (cpy_size && remain);\n+\n+\t\t} else {\n+\t\t\t/* all data from this virtqueue segment can fit in current mbuf */\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t\t\tprintf(\"Copy all into Seg %i: %i bytes, src off 0x%\" PRIX64\n+\t\t\t       \", dst off 0x%\" PRIX64 \"\\n\",\n+\t\t\t       mbuf->nb_segs - 1, cpy_size,\n+\t\t\t       (uint64_t)data - (uint64_t)dbg_src_start,\n+\t\t\t       (uint64_t)dst - (uint64_t)dbg_dst_start);\n+#endif\n+\t\t\trte_memcpy((void *)dst, (void *)data, cpy_size);\n+\t\t\tm->data_len += cpy_size;\n+\t\t\tif (mbuf->nb_segs > 1)\n+\t\t\t\tm->pkt_len += cpy_size;\n+\t\t\tremain -= cpy_size;\n+\t\t}\n+\n+\t\t/* packet complete - all data from current virtqueue packet has been copied */\n+\t\tif (remain == 0)\n+\t\t\tbreak;\n+\t\t/* increment dst to data end */\n+\t\tdst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);\n+\t\t/* prepare for next virtqueue segment */\n+\t\tdata = (char *)hw_recv[src_pkt]\n+\t\t       .addr; /* following packets are full data */\n+\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t\tdbg_src_start = data;\n+#endif\n+\t\tcpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ?\n+\t\t\t   SG_HW_RX_PKT_BUFFER_SIZE :\n+\t\t\t   remain;\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t\tprintf(\"next src buf\\n\");\n+#endif\n+\t};\n+\n+\tif (src_pkt > max_segs) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"Did not receive correct number of segment for a whole packet\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn src_pkt;\n+}\n+\n+static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs,\n+\t\t\t       uint16_t nb_pkts)\n+{\n+\tunsigned int i;\n+\tstruct rte_mbuf *mbuf;\n+\tstruct ntnic_rx_queue *rx_q = queue;\n+\tuint16_t num_rx = 0;\n+\n+\tstruct nthw_received_packets hw_recv[MAX_RX_PACKETS];\n+\n+\tif (kill_pmd)\n+\t\treturn 0;\n+\n+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD\n+\tdbg_print_approx_cpu_load_rx_called(rx_q->port);\n+#endif\n+\n+\tif (unlikely(nb_pkts == 0))\n+\t\treturn 0;\n+\n+\tif (nb_pkts > MAX_RX_PACKETS)\n+\t\tnb_pkts = MAX_RX_PACKETS;\n+\n+\tuint16_t whole_pkts;\n+\tuint16_t hw_recv_pkt_segs =\n+\t\tnthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);\n+\n+\tif (!hw_recv_pkt_segs) {\n+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD\n+\t\tdbg_print_approx_cpu_load_rx_done(rx_q->port, 0);\n+#endif\n+\n+\t\treturn 0;\n+\t}\n+\n+#ifdef NT_DEBUG_STAT\n+\tdbg_rx_queue(rx_q,\n+\t\t     hw_recv_pkt_segs); /* _update debug statistics with new rx packet count */\n+#endif\n+\n+\tnb_pkts = whole_pkts;\n+\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\tprintf(\"\\n---------- DPDK Rx ------------\\n\");\n+\tprintf(\"[Port %i] Pkts recv %i on hw queue index %i: tot segs %i, \"\n+\t       \"vq buf %i, vq header size %i\\n\",\n+\t       rx_q->port, nb_pkts, rx_q->queue.hw_id, hw_recv_pkt_segs,\n+\t       SG_HW_RX_PKT_BUFFER_SIZE, SG_HDR_SIZE);\n+#endif\n+\n+\tint src_pkt = 0; /* from 0 to hw_recv_pkt_segs */\n+\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tbufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);\n+\t\tif (!bufs[i]) {\n+\t\t\tprintf(\"ERROR - no more buffers mbuf in mempool\\n\");\n+\t\t\tgoto err_exit;\n+\t\t}\n+\t\tmbuf = bufs[i];\n+\n+\t\tstruct _pkt_hdr_rx *phdr =\n+\t\t\t(struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;\n+\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t\tprintf(\"\\nRx pkt #%i: vq pkt len %i, segs %i -> mbuf size %i, headroom size %i\\n\",\n+\t\t       i, phdr->cap_len - SG_HDR_SIZE,\n+\t\t       (phdr->cap_len + SG_HW_RX_PKT_BUFFER_SIZE - 1) /\n+\t\t       SG_HW_RX_PKT_BUFFER_SIZE,\n+\t\t       rte_pktmbuf_tailroom(mbuf), rte_pktmbuf_headroom(mbuf));\n+#endif\n+\n+#ifdef RX_SRC_DUMP_PKTS_DEBUG\n+\t\t{\n+\t\t\tint d, _segs = (phdr->cap_len +\n+\t\t\t\t\tSG_HW_RX_PKT_BUFFER_SIZE - 1) /\n+\t\t\t\t       SG_HW_RX_PKT_BUFFER_SIZE;\n+\t\t\tint _size = phdr->cap_len;\n+\n+\t\t\tprintf(\"Rx packet dump: pkt #%i hdr rx port %i, pkt len %i, segs %i\\n\",\n+\t\t\t       i, phdr->port, phdr->cap_len - SG_HDR_SIZE,\n+\t\t\t       _segs);\n+\t\t\tfor (d = 0; d < _segs; d++) {\n+\t\t\t\tprintf(\"Dump seg %i:\\n\", d);\n+\t\t\t\tdump_packet_seg(\"Vq seg:\", hw_recv[src_pkt + d].addr,\n+\t\t\t\t\t\t_size > SG_HW_RX_PKT_BUFFER_SIZE ?\n+\t\t\t\t\t\tSG_HW_RX_PKT_BUFFER_SIZE :\n+\t\t\t\t\t\t_size);\n+\t\t\t\t_size -= SG_HW_RX_PKT_BUFFER_SIZE;\n+\t\t\t}\n+\t\t}\n+#endif\n+\n+\t\tif (phdr->cap_len < SG_HDR_SIZE) {\n+\t\t\tprintf(\"Pkt len of zero received. No header!! - dropping packets\\n\");\n+\t\t\trte_pktmbuf_free(mbuf);\n+\t\t\tgoto err_exit;\n+\t\t}\n+\n+\t\t{\n+\t\t\tif (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&\n+\t\t\t\t\t(phdr->cap_len - SG_HDR_SIZE) <=\n+\t\t\t\t\trte_pktmbuf_tailroom(mbuf)) {\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t\t\t\tprintf(\"Simple copy vq -> mbuf %p size %i\\n\",\n+\t\t\t\t       rte_pktmbuf_mtod(mbuf, void *),\n+\t\t\t\t       phdr->cap_len);\n+#endif\n+\t\t\t\tmbuf->data_len = phdr->cap_len - SG_HDR_SIZE;\n+\t\t\t\trte_memcpy(rte_pktmbuf_mtod(mbuf, char *),\n+\t\t\t\t\t   (char *)hw_recv[src_pkt].addr +\n+\t\t\t\t\t   SG_HDR_SIZE,\n+\t\t\t\t\t   mbuf->data_len);\n+\n+\t\t\t\tmbuf->pkt_len = mbuf->data_len;\n+\t\t\t\tsrc_pkt++;\n+\t\t\t} else {\n+\t\t\t\tint cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,\n+\t\t\t\t\t\t\t\t      &hw_recv[src_pkt],\n+\t\t\t\t\t\t\t\t      hw_recv_pkt_segs - src_pkt,\n+\t\t\t\t\t\t\t\t      phdr->cap_len);\n+\t\t\t\tif (cpy_segs < 0) {\n+\t\t\t\t\t/* Error */\n+\t\t\t\t\trte_pktmbuf_free(mbuf);\n+\t\t\t\t\tgoto err_exit;\n+\t\t\t\t}\n+\t\t\t\tsrc_pkt += cpy_segs;\n+\t\t\t}\n+\n+#ifdef RX_DST_DUMP_PKTS_DEBUG\n+\t\t\t{\n+\t\t\t\tstruct rte_mbuf *m = mbuf;\n+\n+\t\t\t\tprintf(\"\\nRx final mbuf:\\n\");\n+\t\t\t\tfor (int ii = 0; m && ii < m->nb_segs; ii++) {\n+\t\t\t\t\tprintf(\"  seg %i len %i\\n\", ii,\n+\t\t\t\t\t       m->data_len);\n+\t\t\t\t\tprintf(\"  seg dump:\\n\");\n+\t\t\t\t\tdump_packet_seg(\"mbuf seg:\",\n+\t\t\t\t\t\t\trte_pktmbuf_mtod(m, uint8_t *),\n+\t\t\t\t\t\t\tm->data_len);\n+\t\t\t\t\tm = m->next;\n+\t\t\t\t}\n+\t\t\t}\n+#endif\n+\n+\t\t\tnum_rx++;\n+\n+\t\t\tmbuf->ol_flags &=\n+\t\t\t\t~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);\n+\t\t\tmbuf->port = (uint16_t)-1;\n+\n+\t\t\tif (phdr->color_type == 0) {\n+\t\t\t\tif (phdr->port >= VIRTUAL_TUNNEL_PORT_OFFSET &&\n+\t\t\t\t\t\t((phdr->color >> 24) == 0x02)) {\n+\t\t\t\t\t/* VNI in color of descriptor add port as well */\n+\t\t\t\t\tmbuf->hash.fdir.hi =\n+\t\t\t\t\t\t((uint32_t)phdr->color &\n+\t\t\t\t\t\t 0xffffff) |\n+\t\t\t\t\t\t((uint32_t)phdr->port\n+\t\t\t\t\t\t << 24);\n+\t\t\t\t\tmbuf->hash.fdir.lo =\n+\t\t\t\t\t\t(uint32_t)phdr->fid;\n+\t\t\t\t\tmbuf->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;\n+\n+\t\t\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t\t\t       \"POP'ed packet received that missed on inner match. color = %08x, port %i, tunnel-match flow stat id %i\",\n+\t\t\t\t\t       phdr->color, phdr->port,\n+\t\t\t\t\t       phdr->fid);\n+\t\t\t\t}\n+\n+\t\t\t} else {\n+\t\t\t\tif (phdr->color) {\n+\t\t\t\t\tmbuf->hash.fdir.hi =\n+\t\t\t\t\t\tphdr->color &\n+\t\t\t\t\t\t(NT_MAX_COLOR_FLOW_STATS - 1);\n+\t\t\t\t\tmbuf->ol_flags |=\n+\t\t\t\t\t\tRTE_MBUF_F_RX_FDIR_ID |\n+\t\t\t\t\t\tRTE_MBUF_F_RX_FDIR;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+err_exit:\n+\tnthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);\n+\n+#ifdef DEBUG_PRINT_APPROX_CPU_LOAD\n+\tdbg_print_approx_cpu_load_rx_done(rx_q->port, num_rx);\n+#endif\n+\n+#ifdef RX_MERGE_SEGMENT_DEBUG\n+\t/*\n+\t * When the application double frees a mbuf, it will become a doublet in the memory pool\n+\t * This is obvious a bug in application, but can be verified here to some extend at least\n+\t */\n+\tuint64_t addr = (uint64_t)bufs[0]->buf_addr;\n+\n+\tfor (int i = 1; i < num_rx; i++) {\n+\t\tif (bufs[i]->buf_addr == addr) {\n+\t\t\tprintf(\"Duplicate packet addresses! num_rx %i\\n\",\n+\t\t\t       num_rx);\n+\t\t\tfor (int ii = 0; ii < num_rx; ii++) {\n+\t\t\t\tprintf(\"bufs[%i]->buf_addr %p\\n\", ii,\n+\t\t\t\t       bufs[ii]->buf_addr);\n+\t\t\t}\n+\t\t}\n+\t}\n+#endif\n+\n+\treturn num_rx;\n+}\n+\n+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,\n+\t\t\t   uint16_t vq_descr_idx,\n+\t\t\t   struct nthw_memory_descriptor *vq_bufs, int max_segs,\n+\t\t\t   struct rte_mbuf *mbuf)\n+{\n+\t/*\n+\t * 1. mbuf packet may be segmented\n+\t * 2. the virtqueue buffer size may be too small and may need to be segmented\n+\t */\n+\n+\tchar *data = rte_pktmbuf_mtod(mbuf, char *);\n+\tchar *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;\n+\n+\tint remain = mbuf->pkt_len;\n+\tint cpy_size = mbuf->data_len;\n+\n+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG\n+\tprintf(\"src copy size %i\\n\", cpy_size);\n+#endif\n+\n+\tstruct rte_mbuf *m = mbuf;\n+\tint cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;\n+\n+\tcvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;\n+\n+\tint cur_seg_num = 0; /* start from 0 */\n+\n+\twhile (m) {\n+\t\t/* Can all data in current src segment be in current dest segment */\n+\t\tif (cpy_size > cpto_size) {\n+\t\t\tint new_cpy_size = cpto_size;\n+\n+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG\n+\t\t\tprintf(\"Seg %i: virtq buf first cpy src offs %u, dst offs 0x%\" PRIX64 \", size %i\\n\",\n+\t\t\t       cur_seg_num,\n+\t\t\t       (uint64_t)data - rte_pktmbuf_mtod(m, uint64_t),\n+\t\t\t       (uint64_t)dst -\n+\t\t\t       (uint64_t)vq_bufs[vq_descr_idx].virt_addr,\n+\t\t\t       new_cpy_size);\n+#endif\n+\t\t\trte_memcpy((void *)dst, (void *)data, new_cpy_size);\n+\n+\t\t\tcvq_desc->b[vq_descr_idx].len += new_cpy_size;\n+\n+\t\t\tremain -= new_cpy_size;\n+\t\t\tcpy_size -= new_cpy_size;\n+\n+\t\t\tdata += new_cpy_size;\n+\n+\t\t\t/*\n+\t\t\t * Loop if remaining data from this virtqueue seg cannot fit in one extra\n+\t\t\t * mbuf\n+\t\t\t */\n+\t\t\tdo {\n+\t\t\t\tvq_add_flags(cvq_desc, vq_descr_idx,\n+\t\t\t\t\t     VIRTQ_DESC_F_NEXT);\n+\n+\t\t\t\tint next_vq_descr_idx =\n+\t\t\t\t\tVIRTQ_DESCR_IDX_NEXT(vq_descr_idx);\n+\n+\t\t\t\tvq_set_next(cvq_desc, vq_descr_idx,\n+\t\t\t\t\t    next_vq_descr_idx);\n+\n+\t\t\t\tvq_descr_idx = next_vq_descr_idx;\n+\n+\t\t\t\tvq_set_flags(cvq_desc, vq_descr_idx, 0);\n+\t\t\t\tvq_set_next(cvq_desc, vq_descr_idx, 0);\n+\n+\t\t\t\tif (++cur_seg_num > max_segs)\n+\t\t\t\t\tbreak;\n+\n+\t\t\t\tdst = (char *)vq_bufs[vq_descr_idx].virt_addr;\n+\t\t\t\tcpto_size = SG_HW_TX_PKT_BUFFER_SIZE;\n+\n+\t\t\t\tint actual_cpy_size = (cpy_size > cpto_size) ?\n+\t\t\t\t\t\t      cpto_size :\n+\t\t\t\t\t\t      cpy_size;\n+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG\n+\t\t\t\tprintf(\"Tx vq buf seg %i: virtq cpy %i - offset 0x%\" PRIX64 \"\\n\",\n+\t\t\t\t       cur_seg_num, actual_cpy_size,\n+\t\t\t\t       (uint64_t)dst -\n+\t\t\t\t       (uint64_t)vq_bufs[vq_descr_idx]\n+\t\t\t\t       .virt_addr);\n+#endif\n+\t\t\t\trte_memcpy((void *)dst, (void *)data,\n+\t\t\t\t\t   actual_cpy_size);\n+\n+\t\t\t\tcvq_desc->b[vq_descr_idx].len = actual_cpy_size;\n+\n+\t\t\t\tremain -= actual_cpy_size;\n+\t\t\t\tcpy_size -= actual_cpy_size;\n+\t\t\t\tcpto_size -= actual_cpy_size;\n+\n+\t\t\t\tdata += actual_cpy_size;\n+\n+\t\t\t} while (cpy_size && remain);\n+\n+\t\t} else {\n+\t\t\t/* All data from this segment can fit in current virtqueue buffer */\n+#ifdef CPY_MBUF_TO_VQUEUE_DEBUG\n+\t\t\tprintf(\"Tx vq buf seg %i: Copy %i bytes - offset %u\\n\",\n+\t\t\t       cur_seg_num, cpy_size,\n+\t\t\t       (uint64_t)dst -\n+\t\t\t       (uint64_t)vq_bufs[vq_descr_idx].virt_addr);\n+#endif\n+\n+\t\t\trte_memcpy((void *)dst, (void *)data, cpy_size);\n+\n+\t\t\tcvq_desc->b[vq_descr_idx].len += cpy_size;\n+\n+\t\t\tremain -= cpy_size;\n+\t\t\tcpto_size -= cpy_size;\n+\t\t}\n+\n+\t\t/* Packet complete - all segments from current mbuf has been copied */\n+\t\tif (remain == 0)\n+\t\t\tbreak;\n+\t\t/* increment dst to data end */\n+\t\tdst = (char *)vq_bufs[vq_descr_idx].virt_addr +\n+\t\t      cvq_desc->b[vq_descr_idx].len;\n+\n+\t\tm = m->next;\n+\t\tif (!m) {\n+\t\t\tNT_LOG(ERR, ETHDEV, \"ERROR: invalid packet size\\n\");\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\t/* Prepare for next mbuf segment */\n+\t\tdata = rte_pktmbuf_mtod(m, char *);\n+\t\tcpy_size = m->data_len;\n+\t};\n+\n+\tcur_seg_num++;\n+\tif (cur_seg_num > max_segs) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"Did not receive correct number of segment for a whole packet\");\n+\t\treturn -1;\n+\t}\n+\n+\treturn cur_seg_num;\n+}\n+\n+static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs,\n+\t\t\t       uint16_t nb_pkts)\n+{\n+\tuint16_t pkt;\n+\tuint16_t first_vq_descr_idx = 0;\n+\n+\tstruct nthw_cvirtq_desc cvq_desc;\n+\n+\tstruct nthw_memory_descriptor *vq_bufs;\n+\n+\tstruct ntnic_tx_queue *tx_q = queue;\n+\n+\tint nb_segs = 0, i;\n+\tint pkts_sent = 0;\n+\tuint16_t nb_segs_arr[MAX_TX_PACKETS];\n+\n+\tif (kill_pmd)\n+\t\treturn 0;\n+\n+\tif (nb_pkts > MAX_TX_PACKETS)\n+\t\tnb_pkts = MAX_TX_PACKETS;\n+\n+#ifdef TX_CHAINING_DEBUG\n+\tprintf(\"\\n---------- DPDK Tx ------------\\n\");\n+#endif\n+\n+\t/*\n+\t * count all segments needed to contain all packets in vq buffers\n+\t */\n+\tfor (i = 0; i < nb_pkts; i++) {\n+\t\tif (bufs[i]->pkt_len < 60) {\n+\t\t\tbufs[i]->pkt_len = 60;\n+\t\t\tbufs[i]->data_len = 60;\n+\t\t}\n+\n+\t\t/* build the num segments array for segmentation control and release function */\n+\t\tint vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);\n+\n+\t\tnb_segs_arr[i] = vq_segs;\n+\t\tnb_segs += vq_segs;\n+\t}\n+\tif (!nb_segs)\n+\t\tgoto exit_out;\n+\n+#ifdef TX_CHAINING_DEBUG\n+\tprintf(\"[Port %i] Mbufs for Tx: tot segs %i, packets %i, mbuf size %i, headroom size %i\\n\",\n+\t       tx_q->port, nb_segs, nb_pkts,\n+\t       bufs[0]->buf_len - rte_pktmbuf_headroom(bufs[0]),\n+\t       rte_pktmbuf_headroom(bufs[0]));\n+#endif\n+\n+\tint got_nb_segs =\n+\t\tnthw_get_tx_buffers(tx_q->vq, nb_segs, &first_vq_descr_idx,\n+\t\t\t\t    &cvq_desc /*&vq_descr,*/, &vq_bufs);\n+\tif (!got_nb_segs) {\n+#ifdef TX_CHAINING_DEBUG\n+\t\tprintf(\"Zero segments got - back pressure from HW\\n\");\n+#endif\n+\t\tgoto exit_out;\n+\t}\n+\n+\t/*\n+\t * we may get less vq buffers than we have asked for\n+\t * calculate last whole packet that can fit into what\n+\t * we have got\n+\t */\n+\twhile (got_nb_segs < nb_segs) {\n+\t\tif (!--nb_pkts)\n+\t\t\tgoto exit_out;\n+\t\tnb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);\n+\t\tif (nb_segs <= 0)\n+\t\t\tgoto exit_out;\n+\t}\n+\n+\t/*\n+\t * nb_pkts & nb_segs, got it all, ready to copy\n+\t */\n+\tint seg_idx = 0;\n+\tint last_seg_idx = seg_idx;\n+\n+\tfor (pkt = 0; pkt < nb_pkts; ++pkt) {\n+\t\tuint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);\n+\n+\t\tvq_set_flags(&cvq_desc, vq_descr_idx, 0);\n+\t\tvq_set_next(&cvq_desc, vq_descr_idx, 0);\n+\n+\t\tstruct _pkt_hdr_tx *hdr_tx =\n+\t\t\t(struct _pkt_hdr_tx *)vq_bufs[vq_descr_idx].virt_addr;\n+\t\t/* Set the header to all zeros */\n+\t\tmemset(hdr_tx, 0, SG_HDR_SIZE);\n+\n+\t\t/*\n+\t\t * Set the NT DVIO0 header fields\n+\t\t *\n+\t\t * Applicable for Vswitch only.\n+\t\t * For other product types the header values are \"don't care\" and we leave them as\n+\t\t * all zeros.\n+\t\t */\n+\t\tif (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {\n+\t\t\thdr_tx->bypass_port = tx_q->target_id;\n+\n+\t\t\t/* set packet length */\n+\t\t\thdr_tx->cap_len = bufs[pkt]->pkt_len + SG_HDR_SIZE;\n+\t\t}\n+\n+#ifdef TX_CHAINING_DEBUG\n+\t\tprintf(\"\\nTx pkt #%i: pkt segs %i, pkt len %i -> vq buf size %i, vq header size %i\\n\",\n+\t\t       pkt, bufs[pkt]->nb_segs, bufs[pkt]->pkt_len,\n+\t\t       SG_HW_TX_PKT_BUFFER_SIZE, SG_HDR_SIZE);\n+\n+#ifdef TX_SRC_DUMP_PKTS_DEBUG\n+\t\t{\n+\t\t\tstruct rte_mbuf *m = bufs[pkt];\n+\t\t\tint ii;\n+\n+\t\t\tprintf(\"Dump src mbuf:\\n\");\n+\t\t\tfor (ii = 0; ii < bufs[pkt]->nb_segs; ii++) {\n+\t\t\t\tprintf(\"  seg %i len %i\\n\", ii, m->data_len);\n+\t\t\t\tprintf(\"  seg dump:\\n\");\n+\t\t\t\tdump_packet_seg(\"mbuf seg:\",\n+\t\t\t\t\t\trte_pktmbuf_mtod(m, uint8_t *),\n+\t\t\t\t\t\tm->data_len);\n+\t\t\t\tm = m->next;\n+\t\t\t}\n+\t\t}\n+#endif\n+\n+#endif\n+\n+\t\tif (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {\n+#ifdef TX_CHAINING_DEBUG\n+\t\t\tprintf(\"Simple copy %i bytes - mbuf -> vq\\n\",\n+\t\t\t       bufs[pkt]->pkt_len);\n+#endif\n+\t\t\trte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr +\n+\t\t\t\tSG_HDR_SIZE),\n+\t\t\t\trte_pktmbuf_mtod(bufs[pkt], void *),\n+\t\t\t\tbufs[pkt]->pkt_len);\n+\n+\t\t\tcvq_desc.b[vq_descr_idx].len =\n+\t\t\t\tbufs[pkt]->pkt_len + SG_HDR_SIZE;\n+\n+\t\t\tseg_idx++;\n+\t\t} else {\n+\t\t\tint cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc,\n+\t\t\t\t\t\t\t      vq_descr_idx, vq_bufs,\n+\t\t\t\t\t\t\t      nb_segs - last_seg_idx, bufs[pkt]);\n+\t\t\tif (cpy_segs < 0)\n+\t\t\t\tbreak;\n+\t\t\tseg_idx += cpy_segs;\n+\t\t}\n+\n+#ifdef TX_DST_DUMP_PKTS_DEBUG\n+\t\tint d, tot_size = 0;\n+\n+\t\tfor (d = last_seg_idx; d < seg_idx; d++)\n+\t\t\ttot_size += cvq_desc.b[VIRTQ_DESCR_IDX(d)].len;\n+\t\tprintf(\"\\nDump final Tx vq pkt %i, size %i, tx port %i, bypass id %i, using hw queue index %i\\n\",\n+\t\t       pkt, tot_size, tx_q->port, hdr_tx->bypass_port,\n+\t\t       tx_q->queue.hw_id);\n+\t\tfor (d = last_seg_idx; d < seg_idx; d++) {\n+\t\t\tchar str[32];\n+\n+\t\t\tsprintf(str, \"Vq seg %i:\", d - last_seg_idx);\n+\t\t\tdump_packet_seg(str,\n+\t\t\t\t\tvq_bufs[VIRTQ_DESCR_IDX(d)].virt_addr,\n+\t\t\t\t\tcvq_desc.b[VIRTQ_DESCR_IDX(d)].len);\n+\t\t}\n+#endif\n+\n+\t\tlast_seg_idx = seg_idx;\n+\t\trte_pktmbuf_free(bufs[pkt]);\n+\t\tpkts_sent++;\n+\t}\n+\n+#ifdef TX_CHAINING_DEBUG\n+\tprintf(\"\\nTx final vq setup:\\n\");\n+\tfor (int i = 0; i < nb_segs; i++) {\n+\t\tint idx = VIRTQ_DESCR_IDX(i);\n+\n+\t\tif (cvq_desc.vq_type == SPLIT_RING) {\n+\t\t\tprintf(\"virtq descr %i, len %i, flags %04x, next %i\\n\",\n+\t\t\t       idx, cvq_desc.b[idx].len, cvq_desc.s[idx].flags,\n+\t\t\t       cvq_desc.s[idx].next);\n+\t\t}\n+\t}\n+#endif\n+\n+exit_out:\n+\n+\tif (pkts_sent) {\n+#ifdef TX_CHAINING_DEBUG\n+\t\tprintf(\"Release virtq segs %i\\n\", nb_segs);\n+#endif\n+\t\tnthw_release_tx_buffers(tx_q->vq, pkts_sent, nb_segs_arr);\n+\t}\n+\treturn pkts_sent;\n+}\n+\n+static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num,\n+\t\t\t\t     struct hwq_s *hwq, int num_descr,\n+\t\t\t\t     int buf_size)\n+{\n+\tint i, res;\n+\tuint32_t size;\n+\tuint64_t iova_addr;\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"***** Configure IOMMU for HW queues on VF %i *****\\n\", vf_num);\n+\n+\t/* Just allocate 1MB to hold all combined descr rings */\n+\tuint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;\n+\n+\tvoid *virt = rte_malloc_socket(\"VirtQDescr\", tot_alloc_size,\n+\t\t\t\t       ALIGN_SIZE(tot_alloc_size),\n+\t\t\t\t       eth_dev->data->numa_node);\n+\tif (!virt)\n+\t\treturn -1;\n+\n+\tuint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;\n+\trte_iova_t hpa = rte_malloc_virt2iova(virt);\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"Allocated virtio descr rings : virt %p [0x%\" PRIX64\n+\t       \"], hpa %p [0x%\" PRIX64 \"]\\n\",\n+\t       virt, gp_offset, hpa, hpa & ONE_G_MASK);\n+\n+\t/*\n+\t * Same offset on both HPA and IOVA\n+\t * Make sure 1G boundary is never crossed\n+\t */\n+\tif (((hpa & ONE_G_MASK) != gp_offset) ||\n+\t\t\t(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=\n+\t\t\t((uint64_t)virt & ~ONE_G_MASK)) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"*********************************************************\\n\");\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"ERROR, no optimal IOMMU mapping available hpa : %016lx (%016lx), gp_offset : %016lx size %u\\n\",\n+\t\t       hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"*********************************************************\\n\");\n+\n+\t\trte_free(virt);\n+\n+\t\t/* Just allocate 1MB to hold all combined descr rings */\n+\t\tsize = 0x100000;\n+\t\tvoid *virt = rte_malloc_socket(\"VirtQDescr\", size, 4096,\n+\t\t\t\t\t       eth_dev->data->numa_node);\n+\t\tif (!virt)\n+\t\t\treturn -1;\n+\n+\t\tres = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);\n+\n+\t\tNT_LOG(DBG, ETHDEV, \"VFIO MMAP res %i, vf_num %i\\n\", res,\n+\t\t       vf_num);\n+\t\tif (res != 0)\n+\t\t\treturn -1;\n+\n+\t\thwq->vf_num = vf_num;\n+\t\thwq->virt_queues_ctrl.virt_addr = virt;\n+\t\thwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;\n+\t\thwq->virt_queues_ctrl.len = size;\n+\n+\t\tNT_LOG(DBG, ETHDEV,\n+\t\t       \"Allocated for virtio descr rings combined 1MB : %p, IOVA %016lx\\n\",\n+\t\t       virt, iova_addr);\n+\n+\t\tsize = num_descr * sizeof(struct nthw_memory_descriptor);\n+\t\thwq->pkt_buffers = rte_zmalloc_socket(\"rx_pkt_buffers\", size,\n+\t\t\t\t\t\t      64, eth_dev->data->numa_node);\n+\t\tif (!hwq->pkt_buffers) {\n+\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t       \"Failed to allocated buffer array for hw-queue %p, \"\n+\t\t\t       \"total size %i, elements %i\\n\",\n+\t\t\t       hwq->pkt_buffers, size, num_descr);\n+\t\t\trte_free(virt);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tsize = buf_size * num_descr;\n+\t\tvoid *virt_addr = rte_malloc_socket(\"pkt_buffer_pkts\", size,\n+\t\t\t\t\t\t    4096,\n+\t\t\t\t\t\t    eth_dev->data->numa_node);\n+\t\tif (!virt_addr) {\n+\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t       \"Failed allocate packet buffers for hw-queue %p, \"\n+\t\t\t       \"buf size %i, elements %i\\n\",\n+\t\t\t       hwq->pkt_buffers, buf_size, num_descr);\n+\t\t\trte_free(hwq->pkt_buffers);\n+\t\t\trte_free(virt);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tres = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);\n+\n+\t\tNT_LOG(DBG, ETHDEV,\n+\t\t       \"VFIO MMAP res %i, virt %p, iova %016lx, vf_num %i, num \"\n+\t\t       \"pkt bufs %i, tot size %i\\n\",\n+\t\t       res, virt_addr, iova_addr, vf_num, num_descr, size);\n+\n+\t\tif (res != 0)\n+\t\t\treturn -1;\n+\n+\t\tfor (i = 0; i < num_descr; i++) {\n+\t\t\thwq->pkt_buffers[i].virt_addr =\n+\t\t\t\t(void *)((char *)virt_addr +\n+\t\t\t\t\t ((uint64_t)(i) * buf_size));\n+\t\t\thwq->pkt_buffers[i].phys_addr =\n+\t\t\t\t(void *)(iova_addr + ((uint64_t)(i) * buf_size));\n+\t\t\thwq->pkt_buffers[i].len = buf_size;\n+\t\t}\n+\n+\t\treturn 0;\n+\t} /* End of: no optimal IOMMU mapping available */\n+\n+\tres = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);\n+\tif (res != 0) {\n+\t\tNT_LOG(ERR, ETHDEV, \"VFIO MMAP FAILED! res %i, vf_num %i\\n\",\n+\t\t       res, vf_num);\n+\t\treturn -1;\n+\t}\n+\n+\thwq->vf_num = vf_num;\n+\thwq->virt_queues_ctrl.virt_addr = virt;\n+\thwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);\n+\thwq->virt_queues_ctrl.len = 0x100000;\n+\tiova_addr += 0x100000;\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"VFIO MMAP: virt_addr=%\" PRIX64 \" phys_addr=%\" PRIX64\n+\t       \" size=%\" PRIX64 \" hpa=%\" PRIX64 \"\\n\",\n+\t       hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,\n+\t       hwq->virt_queues_ctrl.len,\n+\t       rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));\n+\n+\tsize = num_descr * sizeof(struct nthw_memory_descriptor);\n+\thwq->pkt_buffers = rte_zmalloc_socket(\"rx_pkt_buffers\", size, 64,\n+\t\t\t\t\t      eth_dev->data->numa_node);\n+\tif (!hwq->pkt_buffers) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"Failed to allocated buffer array for hw-queue %p, total size %i, elements %i\\n\",\n+\t\t       hwq->pkt_buffers, size, num_descr);\n+\t\trte_free(virt);\n+\t\treturn -1;\n+\t}\n+\n+\tvoid *virt_addr = (void *)((uint64_t)virt + 0x100000);\n+\n+\tfor (i = 0; i < num_descr; i++) {\n+\t\thwq->pkt_buffers[i].virt_addr =\n+\t\t\t(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));\n+\t\thwq->pkt_buffers[i].phys_addr =\n+\t\t\t(void *)(iova_addr + ((uint64_t)(i) * buf_size));\n+\t\thwq->pkt_buffers[i].len = buf_size;\n+\t}\n+\treturn 0;\n+}\n+\n+static void release_hw_virtio_queues(struct hwq_s *hwq)\n+{\n+\tif (!hwq || hwq->vf_num == 0)\n+\t\treturn;\n+\thwq->vf_num = 0;\n+}\n+\n+static int deallocate_hw_virtio_queues(struct hwq_s *hwq)\n+{\n+\tint vf_num = hwq->vf_num;\n+\n+\tvoid *virt = hwq->virt_queues_ctrl.virt_addr;\n+\n+\tint res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,\n+\t\t\t\t    (uint64_t)hwq->virt_queues_ctrl.phys_addr,\n+\t\t\t\t    ONE_G_SIZE);\n+\tif (res != 0) {\n+\t\tNT_LOG(ERR, ETHDEV, \"VFIO UNMMAP FAILED! res %i, vf_num %i\\n\",\n+\t\t       res, vf_num);\n+\t\treturn -1;\n+\t}\n+\n+\trelease_hw_virtio_queues(hwq);\n+\trte_free(hwq->pkt_buffers);\n+\trte_free(virt);\n+\treturn 0;\n+}\n+\n+static void eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)\n+{\n+\tstruct pmd_internals *internals = dev->data->dev_private;\n+\tstruct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];\n+\n+\tdeallocate_hw_virtio_queues(&tx_q->hwq);\n+\tNT_LOG(DBG, ETHDEV, \"NTNIC: %s\\n\", __func__);\n+}\n+\n+static void eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)\n+{\n+\tstruct pmd_internals *internals = dev->data->dev_private;\n+\tstruct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];\n+\n+\tdeallocate_hw_virtio_queues(&rx_q->hwq);\n+\tNT_LOG(DBG, ETHDEV, \"NTNIC: %s\\n\", __func__);\n+}\n+\n+static int num_queues_allocated;\n+\n+/* Returns num queue starting at returned queue num or -1 on fail */\n+static int allocate_queue(int num)\n+{\n+\tint next_free = num_queues_allocated;\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"%s: num_queues_allocated=%u, New queues=%u, Max queues=%u\\n\",\n+\t       __func__, num_queues_allocated, num, MAX_TOTAL_QUEUES);\n+\tif (num_queues_allocated + num > MAX_TOTAL_QUEUES)\n+\t\treturn -1;\n+\tnum_queues_allocated += num;\n+\treturn next_free;\n+}\n+\n+static int\n+eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id,\n+\t\t       uint16_t nb_rx_desc __rte_unused,\n+\t\t       unsigned int socket_id __rte_unused,\n+\t\t       const struct rte_eth_rxconf *rx_conf __rte_unused,\n+\t\t       struct rte_mempool *mb_pool)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u]\\n\", __func__, __func__, __LINE__);\n+\tstruct rte_pktmbuf_pool_private *mbp_priv;\n+\tstruct pmd_internals *internals = eth_dev->data->dev_private;\n+\tstruct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tstruct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;\n+\n+\tif (internals->type == PORT_TYPE_OVERRIDE) {\n+\t\trx_q->mb_pool = mb_pool;\n+\t\teth_dev->data->rx_queues[rx_queue_id] = rx_q;\n+\t\tmbp_priv = rte_mempool_get_priv(rx_q->mb_pool);\n+\t\trx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -\n+\t\t\t\t\t    RTE_PKTMBUF_HEADROOM);\n+\t\trx_q->enabled = 1;\n+\t\treturn 0;\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i\\n\",\n+\t       internals->port, rx_queue_id, rx_q->queue.hw_id);\n+\n+\trx_q->mb_pool = mb_pool;\n+\n+\teth_dev->data->rx_queues[rx_queue_id] = rx_q;\n+\n+\tmbp_priv = rte_mempool_get_priv(rx_q->mb_pool);\n+\trx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -\n+\t\t\t\t    RTE_PKTMBUF_HEADROOM);\n+\trx_q->enabled = 1;\n+\n+\tif (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,\n+\t\t\t\t      SG_NB_HW_RX_DESCRIPTORS,\n+\t\t\t\t      SG_HW_RX_PKT_BUFFER_SIZE) < 0)\n+\t\treturn -1;\n+\n+\trx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;\n+\n+\trx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;\n+\n+\trx_q->vq = nthw_setup_managed_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,\n+\t\trx_q->queue.hw_id, /* index */\n+\t\trx_q->nb_hw_rx_descr, EXCEPTION_PATH_HID, /* host_id */\n+\t\t1, /* header NT DVIO header for exception path */\n+\t\t&rx_q->hwq.virt_queues_ctrl, rx_q->hwq.pkt_buffers, SPLIT_RING, -1);\n+\n+\tNT_LOG(DBG, ETHDEV, \"(%i) NTNIC RX OVS-SW queues successfully setup\\n\",\n+\t       internals->port);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,\n+\t\t       uint16_t nb_tx_desc __rte_unused,\n+\t\t       unsigned int socket_id __rte_unused,\n+\t\t       const struct rte_eth_txconf *tx_conf __rte_unused)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u]\\n\", __func__, __func__, __LINE__);\n+\tstruct pmd_internals *internals = eth_dev->data->dev_private;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tstruct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;\n+\tstruct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];\n+\n+\tif (internals->type == PORT_TYPE_OVERRIDE) {\n+\t\teth_dev->data->tx_queues[tx_queue_id] = tx_q;\n+\t\treturn 0;\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i\\n\",\n+\t       tx_q->port, tx_queue_id, tx_q->queue.hw_id);\n+\n+\tif (tx_queue_id > internals->nb_tx_queues) {\n+\t\tprintf(\"Error invalid tx queue id\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\teth_dev->data->tx_queues[tx_queue_id] = tx_q;\n+\n+\t/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */\n+\tif (tx_q->rss_target_id >= 0) {\n+\t\t/* bypass to a multiqueue port - qsl-hsh index */\n+\t\ttx_q->target_id = tx_q->rss_target_id + 0x90;\n+\t} else {\n+\t\tif (internals->vpq[tx_queue_id].hw_id > -1) {\n+\t\t\t/* virtual port - queue index */\n+\t\t\ttx_q->target_id = internals->vpq[tx_queue_id].hw_id;\n+\t\t} else {\n+\t\t\t/* Phy port - phy port identifier */\n+\t\t\tif (lag_active) {\n+\t\t\t\t/* If in LAG mode use bypass 0x90 mode */\n+\t\t\t\ttx_q->target_id = 0x90;\n+\t\t\t} else {\n+\t\t\t\t/* output/bypass to MAC */\n+\t\t\t\ttx_q->target_id = (int)(tx_q->port + 0x80);\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,\n+\t\t\t\t      SG_NB_HW_TX_DESCRIPTORS,\n+\t\t\t\t      SG_HW_TX_PKT_BUFFER_SIZE) < 0)\n+\t\treturn -1;\n+\n+\ttx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;\n+\n+\ttx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;\n+\n+\tuint32_t port, header;\n+\n+\tif (tx_q->profile == FPGA_INFO_PROFILE_VSWITCH) {\n+\t\t/* transmit port - not used in vswitch enabled mode - using bypass */\n+\t\tport = 0;\n+\t\theader = 1; /* header type DVIO0 Always for exception path */\n+\t} else {\n+\t\tport = tx_q->port; /* transmit port */\n+\t\theader = 0; /* header type VirtIO-Net */\n+\t}\n+\t/*\n+\t * in_port - in vswitch mode has to move tx port from OVS excep. Away\n+\t * from VM tx port, because of QoS is matched by port id!\n+\t */\n+\ttx_q->vq = nthw_setup_managed_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,\n+\t\ttx_q->queue.hw_id, /* index */\n+\t\ttx_q->nb_hw_tx_descr, /* queue size */\n+\t\tEXCEPTION_PATH_HID, /* host_id always VF4 */\n+\t\tport,\n+\t\ttx_q->port +\n+\t\t128,\n+\t\theader, &tx_q->hwq.virt_queues_ctrl, tx_q->hwq.pkt_buffers,\n+\t\tSPLIT_RING, -1, IN_ORDER);\n+\n+\ttx_q->enabled = 1;\n+\tfor (uint32_t i = 0; i < internals->vpq_nb_vq; i++) {\n+\t\tnthw_epp_set_queue_to_vport(p_nt_drv->adapter_info.fpga_info.mp_nthw_epp,\n+\t\t\t\t\t    internals->vpq[i].hw_id, tx_q->port);\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV, \"(%i) NTNIC TX OVS-SW queues successfully setup\\n\",\n+\t       internals->port);\n+\n+\tif (internals->type == PORT_TYPE_PHYSICAL) {\n+\t\tstruct adapter_info_s *p_adapter_info =\n+\t\t\t\t&internals->p_drv->ntdrv.adapter_info;\n+\t\tNT_LOG(DBG, ETHDEV, \"Port %i is ready for data. Enable port\\n\",\n+\t\t       internals->if_index);\n+\t\tnt4ga_port_set_adm_state(p_adapter_info, internals->if_index,\n+\t\t\t\t\t true);\n+\t\tif (lag_active && internals->if_index == 0) {\n+\t\t\t/*\n+\t\t\t * Special case for link aggregation where the second phy interface (port 1)\n+\t\t\t * is \"hidden\" from DPDK and therefore doesn't get enabled through normal\n+\t\t\t * interface probing\n+\t\t\t */\n+\t\t\tNT_LOG(DBG, ETHDEV, \"LAG: Enable port %i\\n\",\n+\t\t\t       internals->if_index + 1);\n+\t\t\tnt4ga_port_set_adm_state(p_adapter_info,\n+\t\t\t\t\t\t internals->if_index + 1, true);\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int dev_set_mtu_inline(struct rte_eth_dev *dev, uint16_t mtu)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)dev->data->dev_private;\n+\tstruct flow_eth_dev *flw_dev = internals->flw_dev;\n+\tint ret = -1;\n+\n+\tif (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE &&\n+\t\t\tmtu <= MAX_MTU)\n+\t\tret = flow_set_mtu_inline(flw_dev, internals->port, mtu);\n+\treturn ret ? -EINVAL : 0;\n+}\n+\n+static int dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n+{\n+\tstruct pmd_internals *internals = dev->data->dev_private;\n+\t/*struct ntnic_tx_queue *tx_q = internals->txq; */\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tstruct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;\n+\tfpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;\n+\tint retval = EINVAL;\n+\n+\tif (mtu < MIN_MTU || mtu > MAX_MTU)\n+\t\treturn -EINVAL;\n+\n+\tif (internals->type == PORT_TYPE_VIRTUAL) {\n+\t\t/* set MTU on exception to MAX_MTU */\n+\t\tretval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,\n+\t\t\tinternals->rxq_scg[0]\n+\t\t\t.queue\n+\t\t\t.hw_id, /* exception tx queue hw_id to OVS */\n+\t\t\tMAX_MTU, /* max number of bytes allowed for a given port. */\n+\t\t\tinternals->type); /* port type */\n+\n+\t\tif (retval)\n+\t\t\treturn retval;\n+\n+\t\tuint i;\n+\n+\t\tfor (i = 0; i < internals->vpq_nb_vq; i++) {\n+\t\t\tretval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,\n+\t\t\t\tinternals->vpq[i].hw_id, /* tx queue hw_id */\n+\t\t\t\tmtu, /* max number of bytes allowed for a given port. */\n+\t\t\t\tinternals->type); /* port type */\n+\t\t\tif (retval)\n+\t\t\t\treturn retval;\n+\n+\t\t\tNT_LOG(DBG, ETHDEV, \"SET MTU SIZE %d queue hw_id %d\\n\",\n+\t\t\t       mtu, internals->vpq[i].hw_id);\n+\t\t}\n+\t} else if (internals->type == PORT_TYPE_PHYSICAL) {\n+\t\t/* set MTU on exception to MAX_MTU */\n+\t\tretval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,\n+\t\t\tinternals->rxq_scg[0]\n+\t\t\t.queue\n+\t\t\t.hw_id, /* exception tx queue hw_id to OVS */\n+\t\t\tMAX_MTU, /* max number of bytes allowed for a given port. */\n+\t\t\tPORT_TYPE_VIRTUAL); /* port type */\n+\t\tif (retval)\n+\t\t\treturn retval;\n+\n+\t\tretval = nthw_epp_set_mtu(fpga_info->mp_nthw_epp,\n+\t\t\tinternals->port, /* port number */\n+\t\t\tmtu, /* max number of bytes allowed for a given port. */\n+\t\t\tinternals->type); /* port type */\n+\n+\t\tNT_LOG(DBG, ETHDEV, \"SET MTU SIZE %d port %d\\n\", mtu,\n+\t\t       internals->port);\n+\t} else {\n+\t\tNT_LOG(DBG, ETHDEV,\n+\t\t       \"COULD NOT SET MTU SIZE %d port %d type %d\\n\", mtu,\n+\t\t       internals->port, internals->type);\n+\t\tretval = -EINVAL;\n+\t}\n+\treturn retval;\n+}\n+\n+static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u]\\n\", __func__, __func__, __LINE__);\n+\teth_dev->data->rx_queue_state[rx_queue_id] =\n+\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\treturn 0;\n+}\n+\n+static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u]\\n\", __func__, __func__, __LINE__);\n+\teth_dev->data->rx_queue_state[rx_queue_id] =\n+\t\tRTE_ETH_QUEUE_STATE_STOPPED;\n+\treturn 0;\n+}\n+\n+static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u]\\n\", __func__, __func__, __LINE__);\n+\teth_dev->data->tx_queue_state[rx_queue_id] =\n+\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\treturn 0;\n+}\n+\n+static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u]\\n\", __func__, __func__, __LINE__);\n+\teth_dev->data->tx_queue_state[rx_queue_id] =\n+\t\tRTE_ETH_QUEUE_STATE_STOPPED;\n+\treturn 0;\n+}\n+\n+static void eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)\n+{\n+\tstruct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;\n+\n+\tassert(index < NUM_MAC_ADDRS_PER_PORT);\n+\n+\tif (index >= NUM_MAC_ADDRS_PER_PORT) {\n+\t\tconst struct pmd_internals *const internals =\n+\t\t\t\tdev->data->dev_private;\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"%s: [%s:%i]: Port %i: illegal index %u (>= %u)\\n\",\n+\t\t       __FILE__, __func__, __LINE__, internals->if_index, index,\n+\t\t       NUM_MAC_ADDRS_PER_PORT);\n+\t\treturn;\n+\t}\n+\t(void)memset(&eth_addrs[index], 0, sizeof(eth_addrs[index]));\n+}\n+\n+static int eth_mac_addr_add(struct rte_eth_dev *dev,\n+\t\t\t    struct rte_ether_addr *mac_addr, uint32_t index,\n+\t\t\t    uint32_t vmdq __rte_unused)\n+{\n+\tstruct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;\n+\n+\tassert(index < NUM_MAC_ADDRS_PER_PORT);\n+\n+\tif (index >= NUM_MAC_ADDRS_PER_PORT) {\n+\t\tconst struct pmd_internals *const internals =\n+\t\t\t\tdev->data->dev_private;\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"%s: [%s:%i]: Port %i: illegal index %u (>= %u)\\n\",\n+\t\t       __FILE__, __func__, __LINE__, internals->if_index, index,\n+\t\t       NUM_MAC_ADDRS_PER_PORT);\n+\t\treturn -1;\n+\t}\n+\n+\teth_addrs[index] = *mac_addr;\n+\n+\treturn 0;\n+}\n+\n+static int eth_mac_addr_set(struct rte_eth_dev *dev,\n+\t\t\t    struct rte_ether_addr *mac_addr)\n+{\n+\tstruct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;\n+\n+\teth_addrs[0U] = *mac_addr;\n+\n+\treturn 0;\n+}\n+\n+static int eth_set_mc_addr_list(struct rte_eth_dev *dev,\n+\t\t\t\tstruct rte_ether_addr *mc_addr_set,\n+\t\t\t\tuint32_t nb_mc_addr)\n+{\n+\tstruct pmd_internals *const internals = dev->data->dev_private;\n+\tstruct rte_ether_addr *const mc_addrs = internals->mc_addrs;\n+\tsize_t i;\n+\n+\tif (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"%s: [%s:%i]: Port %i: too many multicast addresses %u (>= %u)\\n\",\n+\t\t       __FILE__, __func__, __LINE__, internals->if_index,\n+\t\t       nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++) {\n+\t\tif (i < nb_mc_addr)\n+\t\t\tmc_addrs[i] = mc_addr_set[i];\n+\n+\t\telse\n+\t\t\t(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int eth_dev_configure(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u] Called for eth_dev %p\\n\", __func__,\n+\t       __func__, __LINE__, eth_dev);\n+\n+\tp_drv->probe_finished = 1;\n+\n+\t/* The device is ALWAYS running promiscuous mode. */\n+\teth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;\n+\treturn 0;\n+}\n+\n+static int eth_dev_start(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tconst int n_intf_no = internals->if_index;\n+\tstruct adapter_info_s *p_adapter_info =\n+\t\t\t&internals->p_drv->ntdrv.adapter_info;\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u] - Port %u, %u\\n\", __func__, __func__,\n+\t       __LINE__, internals->n_intf_no, internals->if_index);\n+\n+\tif (internals->type == PORT_TYPE_VIRTUAL ||\n+\t\t\tinternals->type == PORT_TYPE_OVERRIDE) {\n+\t\teth_dev->data->dev_link.link_status = ETH_LINK_UP;\n+\t} else {\n+\t\t/*\n+\t\t * wait for link on port\n+\t\t * If application starts sending too soon before FPGA port is ready, garbage is\n+\t\t * produced\n+\t\t */\n+\t\tint loop = 0;\n+\n+\t\twhile (nt4ga_port_get_link_status(p_adapter_info, n_intf_no) ==\n+\t\t\t\tETH_LINK_DOWN) {\n+\t\t\t/* break out after 5 sec */\n+\t\t\tif (++loop >= 50) {\n+\t\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t\t       \"%s: TIMEOUT No link on port %i (5sec timeout)\\n\",\n+\t\t\t\t       __func__, internals->n_intf_no);\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tusleep(100000);\n+\t\t}\n+\t\tassert(internals->n_intf_no ==\n+\t\t       internals->if_index); /* Sanity check */\n+\t\tif (internals->lpbk_mode) {\n+\t\t\tif (internals->lpbk_mode & 1 << 0) {\n+\t\t\t\tnt4ga_port_set_loopback_mode(p_adapter_info,\n+\t\t\t\t\t\t\t     n_intf_no,\n+\t\t\t\t\t\t\t     NT_LINK_LOOPBACK_HOST);\n+\t\t\t}\n+\t\t\tif (internals->lpbk_mode & 1 << 1) {\n+\t\t\t\tnt4ga_port_set_loopback_mode(p_adapter_info,\n+\t\t\t\t\t\t\t     n_intf_no,\n+\t\t\t\t\t\t\t     NT_LINK_LOOPBACK_LINE);\n+\t\t\t}\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+static int eth_dev_stop(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tconst int n_intf_no = internals->if_index;\n+\tstruct adapter_info_s *p_adapter_info =\n+\t\t\t&internals->p_drv->ntdrv.adapter_info;\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u] - Port %u, %u, type %u\\n\", __func__,\n+\t       __func__, __LINE__, internals->n_intf_no, internals->if_index,\n+\t       internals->type);\n+\n+\tif (internals->type != PORT_TYPE_VIRTUAL) {\n+\t\tstruct ntnic_rx_queue *rx_q = internals->rxq_scg;\n+\t\tstruct ntnic_tx_queue *tx_q = internals->txq_scg;\n+\n+\t\tuint q;\n+\n+\t\tfor (q = 0; q < internals->nb_rx_queues; q++)\n+\t\t\tnthw_release_managed_rx_virt_queue(rx_q[q].vq);\n+\n+\t\tfor (q = 0; q < internals->nb_tx_queues; q++)\n+\t\t\tnthw_release_managed_tx_virt_queue(tx_q[q].vq);\n+\n+\t\tnt4ga_port_set_adm_state(p_adapter_info, n_intf_no, 0);\n+\t\tnt4ga_port_set_link_status(p_adapter_info, n_intf_no, 0);\n+\t\tnt4ga_port_set_link_speed(p_adapter_info, n_intf_no,\n+\t\t\t\t\t  NT_LINK_SPEED_UNKNOWN);\n+\t\tnt4ga_port_set_loopback_mode(p_adapter_info, n_intf_no,\n+\t\t\t\t\t     NT_LINK_LOOPBACK_OFF);\n+\t}\n+\n+\teth_dev->data->dev_link.link_status = ETH_LINK_DOWN;\n+\treturn 0;\n+}\n+\n+static int eth_dev_set_link_up(struct rte_eth_dev *dev)\n+{\n+\tstruct pmd_internals *const internals = dev->data->dev_private;\n+\tstruct adapter_info_s *p_adapter_info =\n+\t\t\t&internals->p_drv->ntdrv.adapter_info;\n+\tconst int port = internals->if_index;\n+\n+\tif (internals->type == PORT_TYPE_VIRTUAL ||\n+\t\t\tinternals->type == PORT_TYPE_OVERRIDE)\n+\t\treturn 0;\n+\n+\tassert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);\n+\tassert(port == internals->n_intf_no);\n+\n+\tnt4ga_port_set_adm_state(p_adapter_info, port, true);\n+\n+\treturn 0;\n+}\n+\n+static int eth_dev_set_link_down(struct rte_eth_dev *dev)\n+{\n+\tstruct pmd_internals *const internals = dev->data->dev_private;\n+\tstruct adapter_info_s *p_adapter_info =\n+\t\t\t&internals->p_drv->ntdrv.adapter_info;\n+\tconst int port = internals->if_index;\n+\n+\tif (internals->type == PORT_TYPE_VIRTUAL ||\n+\t\t\tinternals->type == PORT_TYPE_OVERRIDE)\n+\t\treturn 0;\n+\n+\tassert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);\n+\tassert(port == internals->n_intf_no);\n+\n+\tnt4ga_port_set_link_status(p_adapter_info, port, false);\n+\n+\treturn 0;\n+}\n+\n+static int eth_dev_close(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;\n+\tfpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;\n+\tstruct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);\n+\t(void)pci_dev; /* UNUSED */\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: enter [%s:%u]\\n\", __func__, __func__,\n+\t       __LINE__);\n+\n+\tinternals->p_drv = NULL;\n+\n+\t/* LAG cleanup */\n+\tif (internals->lag_config) {\n+\t\tif (internals->lag_config->lag_tid) {\n+\t\t\tinternals->lag_config->lag_thread_active = 0;\n+\t\t\tpthread_join(internals->lag_config->lag_tid, NULL);\n+\t\t}\n+\t\tlag_active = 0;\n+\t\trte_free(internals->lag_config);\n+\t}\n+\n+\t/* free */\n+\trte_free(internals);\n+\tinternals = NULL;\n+\n+\teth_dev->data->dev_private = NULL;\n+\teth_dev->data->mac_addrs = NULL;\n+\n+\t/* release */\n+\trte_eth_dev_release_port(eth_dev);\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: %d [%s:%u]\\n\", __func__,\n+\t       p_drv->n_eth_dev_init_count, __func__, __LINE__);\n+\tp_drv->n_eth_dev_init_count--;\n+\n+\t/*\n+\t * rte_pci_dev has no private member for p_drv\n+\t * wait until all rte_eth_dev's are closed - then close adapters via p_drv\n+\t */\n+\tif (!p_drv->n_eth_dev_init_count && p_drv) {\n+\t\tNT_LOG(DBG, ETHDEV, \"%s: %d [%s:%u]\\n\", __func__,\n+\t\t       p_drv->n_eth_dev_init_count, __func__, __LINE__);\n+\t\tp_drv->ntdrv.b_shutdown = true;\n+\t\tvoid *p_ret_val = NULL;\n+\n+\t\tpthread_join(p_nt_drv->stat_thread, &p_ret_val);\n+\t\tif (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {\n+\t\t\tp_ret_val = NULL;\n+\t\t\tpthread_join(p_nt_drv->flm_thread, &p_ret_val);\n+\t\t}\n+\t\tnt4ga_adapter_deinit(&p_nt_drv->adapter_info);\n+\t\trte_free(p_drv);\n+\t}\n+\tNT_LOG(DBG, ETHDEV, \"%s: leave [%s:%u]\\n\", __func__, __func__,\n+\t       __LINE__);\n+\treturn 0;\n+}\n+\n+static int eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,\n+\t\t\t      size_t fw_size)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\n+\tif (internals->type == PORT_TYPE_VIRTUAL ||\n+\t\t\tinternals->type == PORT_TYPE_OVERRIDE)\n+\t\treturn 0;\n+\n+\tfpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;\n+\tconst int length =\n+\t\tsnprintf(fw_version, fw_size, \"%03d-%04d-%02d-%02d\",\n+\t\t\t fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,\n+\t\t\t fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);\n+\tif ((size_t)length < fw_size) {\n+\t\t/* We have space for the version string */\n+\t\treturn 0;\n+\t}\n+\t/* We do not have space for the version string -return the needed space */\n+\treturn length + 1;\n+}\n+\n+static int eth_xstats_get(struct rte_eth_dev *eth_dev,\n+\t\t\t  struct rte_eth_xstat *stats, unsigned int n)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;\n+\tnt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;\n+\tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n+\tint if_index = internals->if_index;\n+\tint nb_xstats;\n+\n+\tpthread_mutex_lock(&p_nt_drv->stat_lck);\n+\tnb_xstats = nthw_xstats_get(p_nt4ga_stat, stats, n,\n+\t\t\t\t    p_nthw_stat->mb_is_vswitch, if_index);\n+\tpthread_mutex_unlock(&p_nt_drv->stat_lck);\n+\treturn nb_xstats;\n+}\n+\n+static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,\n+\t\t\t\tconst uint64_t *ids, uint64_t *values,\n+\t\t\t\tunsigned int n)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;\n+\tnt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;\n+\tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n+\tint if_index = internals->if_index;\n+\tint nb_xstats;\n+\n+\tpthread_mutex_lock(&p_nt_drv->stat_lck);\n+\tnb_xstats = nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n,\n+\t\t\t\t\t  p_nthw_stat->mb_is_vswitch, if_index);\n+\tpthread_mutex_unlock(&p_nt_drv->stat_lck);\n+\treturn nb_xstats;\n+}\n+\n+static int eth_xstats_reset(struct rte_eth_dev *eth_dev)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;\n+\tnt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;\n+\tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n+\tint if_index = internals->if_index;\n+\n+\tpthread_mutex_lock(&p_nt_drv->stat_lck);\n+\tnthw_xstats_reset(p_nt4ga_stat, p_nthw_stat->mb_is_vswitch, if_index);\n+\tpthread_mutex_unlock(&p_nt_drv->stat_lck);\n+\treturn dpdk_stats_reset(internals, p_nt_drv, if_index);\n+}\n+\n+static int eth_xstats_get_names(struct rte_eth_dev *eth_dev __rte_unused,\n+\t\t\t\tstruct rte_eth_xstat_name *xstats_names,\n+\t\t\t\tunsigned int size)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;\n+\tnt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;\n+\tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n+\n+\treturn nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size,\n+\t\t\t\t     p_nthw_stat->mb_is_vswitch);\n+}\n+\n+static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,\n+\t\t\t\t      const uint64_t *ids,\n+\t\t\t\t      struct rte_eth_xstat_name *xstats_names,\n+\t\t\t\t      unsigned int size)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;\n+\tnt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;\n+\tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n+\n+\treturn nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids, size,\n+\t\t\t\t\t   p_nthw_stat->mb_is_vswitch);\n+}\n+\n+static int _dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,\n+\t\t\t     const struct rte_flow_ops **ops)\n+{\n+\t*ops = &_dev_flow_ops;\n+\treturn 0;\n+}\n+\n+static int promiscuous_enable(struct rte_eth_dev __rte_unused * dev)\n+{\n+\tNT_LOG(DBG, NTHW, \"The device always run promiscuous mode.\");\n+\treturn 0;\n+}\n+\n+static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev,\n+\t\t\t\t   struct rte_eth_rss_conf *rss_conf)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct flow_eth_dev *fedev = internals->flw_dev;\n+\tstruct flow_nic_dev *ndev = fedev->ndev;\n+\tconst int hsh_idx =\n+\t\t0; /* hsh index 0 means the default receipt in HSH module */\n+\tint res = flow_nic_set_hasher_fields(ndev, hsh_idx,\n+\t\t\t\t\t     nt_rss_hash_field_from_dpdk(rss_conf->rss_hf));\n+\tres |= hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);\n+\treturn res;\n+}\n+\n+static int rss_hash_conf_get(struct rte_eth_dev *eth_dev,\n+\t\t\t     struct rte_eth_rss_conf *rss_conf)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\tstruct flow_eth_dev *fedev = internals->flw_dev;\n+\tstruct flow_nic_dev *ndev = fedev->ndev;\n+\n+\trss_conf->rss_key = NULL;\n+\trss_conf->rss_key_len = 0;\n+\trss_conf->rss_hf |=\n+\t\tdpdk_rss_hash_define_from_nt_rss(ndev->rss_hash_config);\n+\treturn 0;\n+}\n+\n+static struct eth_dev_ops nthw_eth_dev_ops = {\n+\t.dev_configure = eth_dev_configure,\n+\t.dev_start = eth_dev_start,\n+\t.dev_stop = eth_dev_stop,\n+\t.dev_set_link_up = eth_dev_set_link_up,\n+\t.dev_set_link_down = eth_dev_set_link_down,\n+\t.dev_close = eth_dev_close,\n+\t.link_update = eth_link_update,\n+\t.stats_get = eth_stats_get,\n+\t.stats_reset = eth_stats_reset,\n+\t.dev_infos_get = eth_dev_infos_get,\n+\t.fw_version_get = eth_fw_version_get,\n+\t.rx_queue_setup = eth_rx_scg_queue_setup,\n+\t.rx_queue_start = eth_rx_queue_start,\n+\t.rx_queue_stop = eth_rx_queue_stop,\n+\t.rx_queue_release = eth_rx_queue_release,\n+\t.tx_queue_setup = eth_tx_scg_queue_setup,\n+\t.tx_queue_start = eth_tx_queue_start,\n+\t.tx_queue_stop = eth_tx_queue_stop,\n+\t.tx_queue_release = eth_tx_queue_release,\n+\t.mac_addr_remove = eth_mac_addr_remove,\n+\t.mac_addr_add = eth_mac_addr_add,\n+\t.mac_addr_set = eth_mac_addr_set,\n+\t.set_mc_addr_list = eth_set_mc_addr_list,\n+\t.xstats_get = eth_xstats_get,\n+\t.xstats_get_names = eth_xstats_get_names,\n+\t.xstats_reset = eth_xstats_reset,\n+\t.xstats_get_by_id = eth_xstats_get_by_id,\n+\t.xstats_get_names_by_id = eth_xstats_get_names_by_id,\n+\t.mtu_set = NULL,\n+\t.mtr_ops_get = eth_mtr_ops_get,\n+\t.flow_ops_get = _dev_flow_ops_get,\n+\t.promiscuous_disable = NULL,\n+\t.promiscuous_enable = promiscuous_enable,\n+\t.rss_hash_update = eth_dev_rss_hash_update,\n+\t.rss_hash_conf_get = rss_hash_conf_get,\n+};\n+\n+/* Converts link speed provided in Mbps to NT specific definitions.*/\n+static nt_link_speed_t convert_link_speed(int link_speed_mbps)\n+{\n+\tswitch (link_speed_mbps) {\n+\tcase 10:\n+\t\treturn NT_LINK_SPEED_10M;\n+\tcase 100:\n+\t\treturn NT_LINK_SPEED_100M;\n+\tcase 1000:\n+\t\treturn NT_LINK_SPEED_1G;\n+\tcase 10000:\n+\t\treturn NT_LINK_SPEED_10G;\n+\tcase 40000:\n+\t\treturn NT_LINK_SPEED_40G;\n+\tcase 100000:\n+\t\treturn NT_LINK_SPEED_100G;\n+\tcase 50000:\n+\t\treturn NT_LINK_SPEED_50G;\n+\tcase 25000:\n+\t\treturn NT_LINK_SPEED_25G;\n+\tdefault:\n+\t\treturn NT_LINK_SPEED_UNKNOWN;\n+\t}\n+}\n+\n+/*\n+ * Adapter flm stat thread\n+ */\n+static void *adapter_flm_thread_fn(void *context)\n+{\n+\tstruct drv_s *p_drv = context;\n+\tstruct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;\n+\tstruct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;\n+\tstruct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;\n+\tstruct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: %s: waiting for port configuration\\n\",\n+\t       p_adapter_info->mp_adapter_id_str, __func__);\n+\n+\twhile (p_flow_nic_dev->eth_base == NULL)\n+\t\tusleep(1000000);\n+\tstruct flow_eth_dev *dev = p_flow_nic_dev->eth_base;\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: %s: begin\\n\", p_adapter_info->mp_adapter_id_str,\n+\t       __func__);\n+\n+\twhile (!p_drv->ntdrv.b_shutdown) {\n+\t\tif (flm_mtr_update_stats(dev) == 0)\n+\t\t\tusleep(10);\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: %s: end\\n\", p_adapter_info->mp_adapter_id_str,\n+\t       __func__);\n+\n+\treturn NULL;\n+}\n+\n+/*\n+ * Adapter stat thread\n+ */\n+static void *adapter_stat_thread_fn(void *context)\n+{\n+\tstruct drv_s *p_drv = context;\n+\tntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;\n+\tnt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;\n+\tnthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;\n+\n+\tconst char *const p_adapter_id_str _unused =\n+\t\tp_nt_drv->adapter_info.mp_adapter_id_str;\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: %s: begin\\n\", p_adapter_id_str, __func__);\n+\n+\tassert(p_nthw_stat);\n+\n+\twhile (!p_drv->ntdrv.b_shutdown) {\n+\t\tusleep(100 * 100);\n+\n+\t\tnthw_stat_trigger(p_nthw_stat);\n+\n+\t\tuint32_t loop = 0;\n+\n+\t\twhile ((!p_drv->ntdrv.b_shutdown) &&\n+\t\t\t\t(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {\n+\t\t\tusleep(1 * 100);\n+\n+\t\t\tif (nt_log_is_debug(NT_LOG_MODULE_ETHDEV) &&\n+\t\t\t\t\t(++loop & 0x3fff) == 0) {\n+\t\t\t\tuint32_t sf_ram_of =\n+\t\t\t\t\tnthw_rmc_get_status_sf_ram_of(p_nt4ga_stat->mp_nthw_rmc);\n+\t\t\t\tuint32_t descr_fifo_of =\n+\t\t\t\tnthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat->mp_nthw_rmc);\n+\n+\t\t\t\tuint32_t dbg_merge =\n+\t\t\t\t\tnthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);\n+\t\t\t\tuint32_t mac_if_err =\n+\t\t\t\t\tnthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);\n+\n+\t\t\t\tNT_LOG(ERR, ETHDEV, \"Statistics DMA frozen\\n\");\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"SF RAM Overflow     : %08x\\n\",\n+\t\t\t\t       sf_ram_of);\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"Descr Fifo Overflow : %08x\\n\",\n+\t\t\t\t       descr_fifo_of);\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"DBG Merge           : %08x\\n\",\n+\t\t\t\t       dbg_merge);\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"MAC If Errors       : %08x\\n\",\n+\t\t\t\t       mac_if_err);\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Check then collect */\n+\t\t{\n+\t\t\tpthread_mutex_lock(&p_nt_drv->stat_lck);\n+\t\t\tnt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);\n+\t\t\tpthread_mutex_unlock(&p_nt_drv->stat_lck);\n+\t\t}\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: %s: end\\n\", p_adapter_id_str, __func__);\n+\n+\treturn NULL;\n+}\n+\n+static struct {\n+\tstruct rte_pci_device *vpf_dev;\n+\tstruct rte_eth_devargs eth_da;\n+\tint portqueues[MAX_FPGA_VIRTUAL_PORTS_SUPPORTED];\n+\tuint16_t pf_backer_port_id;\n+} rep;\n+\n+static int nthw_pci_dev_init(struct rte_pci_device *pci_dev)\n+{\n+\tint res;\n+\tstruct drv_s *p_drv;\n+\tntdrv_4ga_t *p_nt_drv;\n+\tfpga_info_t *fpga_info;\n+\n+\thw_info_t *p_hw_info _unused;\n+\tuint32_t n_port_mask = -1; /* All ports enabled by default */\n+\tuint32_t nb_rx_queues = 1;\n+\tuint32_t nb_tx_queues = 1;\n+\tuint32_t exception_path = 0;\n+\tstruct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES];\n+\tlag_config_t *lag_config = NULL;\n+\tint n_phy_ports;\n+\tstruct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = {{ 0 }};\n+\tint num_port_speeds = 0;\n+\tenum flow_eth_dev_profile profile;\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u] enter\\n\", __func__, __FILE__, __LINE__);\n+\tNT_LOG(DBG, ETHDEV, \"Dev %s PF #%i Init : %02x:%02x:%i\\n\",\n+\t       pci_dev->name, pci_dev->addr.function, pci_dev->addr.bus,\n+\t       pci_dev->addr.devid, pci_dev->addr.function);\n+\n+\t/*\n+\t * Process options/arguments\n+\t */\n+\tif (pci_dev->device.devargs && pci_dev->device.devargs->args) {\n+\t\tint kvargs_count;\n+\t\tstruct rte_kvargs *kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,\n+\t\t\t\t\t\t\t     valid_arguments);\n+\t\tif (kvlist == NULL)\n+\t\t\treturn -1;\n+\n+\t\t/*\n+\t\t * Argument: help\n+\t\t * NOTE: this argument/option check should be the first as it will stop\n+\t\t * execution after producing its output\n+\t\t */\n+\t\t{\n+\t\t\tif (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {\n+\t\t\t\tsize_t i;\n+\n+\t\t\t\tprintf(\"NTNIC supported arguments:\\n\\n\");\n+\t\t\t\tfor (i = 0; i < RTE_DIM(valid_arguments); i++) {\n+\t\t\t\t\tif (valid_arguments[i] == NULL)\n+\t\t\t\t\t\tbreak;\n+\t\t\t\t\tprintf(\"  %s\\n\", valid_arguments[i]);\n+\t\t\t\t}\n+\t\t\t\tprintf(\"\\n\");\n+\t\t\t\texit(0);\n+\t\t\t}\n+\t\t}\n+\n+\t\t/*\n+\t\t * Argument: supported-fpgas=list|verbose\n+\t\t * NOTE: this argument/option check should be the first as it will stop\n+\t\t * execution after producing its output\n+\t\t */\n+\t\t{\n+\t\t\tconst char *val_str;\n+\n+\t\t\tval_str = rte_kvargs_get(kvlist,\n+\t\t\t\t\t\t ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);\n+\t\t\tif (val_str) {\n+\t\t\t\tint detail_level = 0;\n+\t\t\t\tnt_fpga_mgr_t *p_fpga_mgr = NULL;\n+\n+\t\t\t\tif (strcmp(val_str, \"list\") == 0) {\n+\t\t\t\t\tdetail_level = 0;\n+\t\t\t\t} else if (strcmp(val_str, \"verbose\") == 0) {\n+\t\t\t\t\tdetail_level = 1;\n+\t\t\t\t} else {\n+\t\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t\t       \"%s: argument '%s': '%s': unsupported value\\n\",\n+\t\t\t\t\t       __func__,\n+\t\t\t\t\t       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG,\n+\t\t\t\t\t       val_str);\n+\t\t\t\t\texit(1);\n+\t\t\t\t}\n+\t\t\t\t/* Produce fpgamgr output and exit hard */\n+\t\t\t\tp_fpga_mgr = fpga_mgr_new();\n+\t\t\t\tif (p_fpga_mgr) {\n+\t\t\t\t\tfpga_mgr_init(p_fpga_mgr);\n+\t\t\t\t\tfpga_mgr_show(p_fpga_mgr, stdout,\n+\t\t\t\t\t\t     detail_level);\n+\t\t\t\t\tfpga_mgr_delete(p_fpga_mgr);\n+\t\t\t\t\tp_fpga_mgr = NULL;\n+\t\t\t\t} else {\n+\t\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t\t       \"%s: %s cannot complete\\n\",\n+\t\t\t\t\t       __func__,\n+\t\t\t\t\t       ETH_DEV_NTNIC_SUPPORTED_FPGAS_ARG);\n+\t\t\t\t\texit(1);\n+\t\t\t\t}\n+\t\t\t\texit(0);\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* link_speed options/argument only applicable for physical ports. */\n+\t\tnum_port_speeds =\n+\t\t\trte_kvargs_count(kvlist, ETH_DEV_NTHW_LINK_SPEED_ARG);\n+\t\tif (num_port_speeds) {\n+\t\t\tassert(num_port_speeds <= NUM_ADAPTER_PORTS_MAX);\n+\t\t\tvoid *pls_mbps_ptr = &pls_mbps[0];\n+\n+\t\t\tres = rte_kvargs_process(kvlist,\n+\t\t\t\t\t\t ETH_DEV_NTHW_LINK_SPEED_ARG,\n+\t\t\t\t\t\t &string_to_port_link_speed,\n+\t\t\t\t\t\t &pls_mbps_ptr);\n+\t\t\tif (res < 0) {\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"%s: problem with port link speed command \"\n+\t\t\t\t       \"line arguments: res=%d\\n\",\n+\t\t\t\t       __func__, res);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tfor (int i = 0; i < num_port_speeds; ++i) {\n+\t\t\t\tint pid = pls_mbps[i].port_id;\n+\n+\t\t\t\tint lspeed _unused = pls_mbps[i].link_speed;\n+\n+\t\t\t\tNT_LOG(DBG, ETHDEV, \"%s: devargs: %s=%d.%d\\n\",\n+\t\t\t\t       __func__, ETH_DEV_NTHW_LINK_SPEED_ARG,\n+\t\t\t\t       pid, lspeed);\n+\t\t\t\tif (pls_mbps[i].port_id >=\n+\t\t\t\t\t\tNUM_ADAPTER_PORTS_MAX) {\n+\t\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t\t       \"%s: problem with port link speed command line \"\n+\t\t\t\t\t       \"arguments: port id should be 0 to %d, got %d\\n\",\n+\t\t\t\t\t       __func__, NUM_ADAPTER_PORTS_MAX,\n+\t\t\t\t\t       pid);\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\n+\t\t/*\n+\t\t * portmask option/argument\n+\t\t * It is intentional that portmask is only used to decide if DPDK eth_dev\n+\t\t * should be created for testing we would still keep the nthw subsystems\n+\t\t * running for all interfaces\n+\t\t */\n+\t\tkvargs_count =\n+\t\t\trte_kvargs_count(kvlist, ETH_DEV_NTHW_PORTMASK_ARG);\n+\t\tif (kvargs_count) {\n+\t\t\tassert(kvargs_count == 1);\n+\t\t\tres = rte_kvargs_process(kvlist,\n+\t\t\t\t\t\t ETH_DEV_NTHW_PORTMASK_ARG,\n+\t\t\t\t\t\t &string_to_u32, &n_port_mask);\n+\t\t\tif (res < 0) {\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"%s: problem with command line arguments: res=%d\\n\",\n+\t\t\t\t       __func__, res);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tNT_LOG(DBG, ETHDEV, \"%s: devargs: %s=%u\\n\", __func__,\n+\t\t\t       ETH_DEV_NTHW_PORTMASK_ARG, n_port_mask);\n+\t\t}\n+\n+\t\t/*\n+\t\t * rxq option/argument\n+\t\t * The number of rxq (hostbuffers) allocated in memory.\n+\t\t * Default is 32 RX Hostbuffers\n+\t\t */\n+\t\tkvargs_count =\n+\t\t\trte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);\n+\t\tif (kvargs_count) {\n+\t\t\tassert(kvargs_count == 1);\n+\t\t\tres = rte_kvargs_process(kvlist,\n+\t\t\t\t\t\t ETH_DEV_NTHW_RXQUEUES_ARG,\n+\t\t\t\t\t\t &string_to_u32, &nb_rx_queues);\n+\t\t\tif (res < 0) {\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"%s: problem with command line arguments: res=%d\\n\",\n+\t\t\t\t       __func__, res);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tNT_LOG(DBG, ETHDEV, \"%s: devargs: %s=%u\\n\", __func__,\n+\t\t\t       ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);\n+\t\t}\n+\n+\t\t/*\n+\t\t * txq option/argument\n+\t\t * The number of txq (hostbuffers) allocated in memory.\n+\t\t * Default is 32 TX Hostbuffers\n+\t\t */\n+\t\tkvargs_count =\n+\t\t\trte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);\n+\t\tif (kvargs_count) {\n+\t\t\tassert(kvargs_count == 1);\n+\t\t\tres = rte_kvargs_process(kvlist,\n+\t\t\t\t\t\t ETH_DEV_NTHW_TXQUEUES_ARG,\n+\t\t\t\t\t\t &string_to_u32, &nb_tx_queues);\n+\t\t\tif (res < 0) {\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"%s: problem with command line arguments: res=%d\\n\",\n+\t\t\t\t       __func__, res);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tNT_LOG(DBG, ETHDEV, \"%s: devargs: %s=%u\\n\", __func__,\n+\t\t\t       ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);\n+\t\t}\n+\n+\t\tkvargs_count = rte_kvargs_count(kvlist, ETH_NTNIC_LAG_MODE_ARG);\n+\t\tif (kvargs_count) {\n+\t\t\tlag_config = (lag_config_t *)rte_zmalloc(NULL, sizeof(lag_config_t), 0);\n+\t\t\tif (lag_config == NULL) {\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"Failed to alloc lag_config data\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tassert(kvargs_count == 1);\n+\t\t\tres = rte_kvargs_process(kvlist, ETH_NTNIC_LAG_MODE_ARG,\n+\t\t\t\t\t\t &string_to_u32,\n+\t\t\t\t\t\t &lag_config->mode);\n+\t\t\tif (res < 0) {\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"%s: problem with command line arguments: res=%d\\n\",\n+\t\t\t\t       __func__, res);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tNT_LOG(DBG, ETHDEV, \"%s: devargs: %s=%u\\n\", __func__,\n+\t\t\t       ETH_NTNIC_LAG_MODE_ARG, nb_tx_queues);\n+\t\t\tlag_active = 1;\n+\t\t}\n+\n+\t\tkvargs_count = rte_kvargs_count(kvlist,\n+\t\t\t\t\t\tETH_DEV_NTHW_EXCEPTION_PATH_ARG);\n+\t\tif (kvargs_count) {\n+\t\t\tassert(kvargs_count == 1);\n+\t\t\tres = rte_kvargs_process(kvlist,\n+\t\t\t\t\t\t ETH_DEV_NTHW_EXCEPTION_PATH_ARG,\n+\t\t\t\t\t\t &string_to_u32, &exception_path);\n+\t\t\tif (res < 0) {\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"%s: problem with command line arguments: res=%d\\n\",\n+\t\t\t\t       __func__, res);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t\tNT_LOG(DBG, ETHDEV, \"%s: devargs: %s=%u\\n\", __func__,\n+\t\t\t       ETH_DEV_NTHW_EXCEPTION_PATH_ARG, exception_path);\n+\t\t}\n+\n+\t\tif (lag_active && lag_config) {\n+\t\t\tswitch (lag_config->mode) {\n+\t\t\tcase BONDING_MODE_ACTIVE_BACKUP:\n+\t\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t\t       \"Active / Backup LAG mode\\n\");\n+\t\t\t\tkvargs_count = rte_kvargs_count(kvlist,\n+\t\t\t\t\t\t\t\tETH_NTNIC_LAG_PRIMARY_ARG);\n+\t\t\t\tif (kvargs_count) {\n+\t\t\t\t\tassert(kvargs_count == 1);\n+\t\t\t\t\tres = rte_kvargs_process(kvlist,\n+\t\t\t\t\t\t\t\t ETH_NTNIC_LAG_PRIMARY_ARG,\n+\t\t\t\t\t\t\t\t &string_to_u32,\n+\t\t\t\t\t\t\t\t &lag_config->primary_port);\n+\t\t\t\t\tif (res < 0) {\n+\t\t\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t\t\t       \"%s: problem with command line \"\n+\t\t\t\t\t\t       \"arguments: res=%d\\n\",\n+\t\t\t\t\t\t       __func__, res);\n+\t\t\t\t\t\treturn -1;\n+\t\t\t\t\t}\n+\t\t\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t\t\t       \"%s: devargs: %s=%u\\n\", __func__,\n+\t\t\t\t\t       ETH_NTNIC_LAG_MODE_ARG,\n+\t\t\t\t\t       nb_tx_queues);\n+\t\t\t\t} else {\n+\t\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t\t       \"LAG must define a primary port\\n\");\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\n+\t\t\t\tkvargs_count = rte_kvargs_count(kvlist,\n+\t\t\t\t\t\t\t\tETH_NTNIC_LAG_BACKUP_ARG);\n+\t\t\t\tif (kvargs_count) {\n+\t\t\t\t\tassert(kvargs_count == 1);\n+\t\t\t\t\tres = rte_kvargs_process(kvlist,\n+\t\t\t\t\t\t\t\t ETH_NTNIC_LAG_BACKUP_ARG,\n+\t\t\t\t\t\t\t\t &string_to_u32,\n+\t\t\t\t\t\t\t\t &lag_config->backup_port);\n+\t\t\t\t\tif (res != 0) {\n+\t\t\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t\t\t       \"%s: problem with command line \"\n+\t\t\t\t\t\t       \"arguments: res=%d\\n\",\n+\t\t\t\t\t\t       __func__, res);\n+\t\t\t\t\t\treturn -1;\n+\t\t\t\t\t}\n+\t\t\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t\t\t       \"%s: devargs: %s=%u\\n\", __func__,\n+\t\t\t\t\t       ETH_NTNIC_LAG_MODE_ARG,\n+\t\t\t\t\t       nb_tx_queues);\n+\t\t\t\t} else {\n+\t\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t\t       \"LAG must define a backup port\\n\");\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\t\t\t\tbreak;\n+\n+\t\t\tcase BONDING_MODE_8023AD:\n+\t\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t\t       \"Active / Active LAG mode\\n\");\n+\t\t\t\tlag_config->primary_port = 0;\n+\t\t\t\tlag_config->backup_port = 0;\n+\t\t\t\tbreak;\n+\n+\t\t\tdefault:\n+\t\t\t\tNT_LOG(ERR, ETHDEV, \"Unsupported LAG mode\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\n+\t\trte_kvargs_free(kvlist);\n+\t}\n+\n+\t/* parse representor args */\n+\tif (setup_virtual_pf_representor_base(pci_dev) == -1) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"%s: setup_virtual_pf_representor_base error %d (%s:%u)\\n\",\n+\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), -1, __func__,\n+\t\t       __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* alloc */\n+\tp_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s),\n+\t\t\t\t  RTE_CACHE_LINE_SIZE,\n+\t\t\t\t  pci_dev->device.numa_node);\n+\tif (!p_drv) {\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: error %d (%s:%u)\\n\",\n+\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), -1, __func__,\n+\t\t       __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\t/* Setup VFIO context */\n+\tint vfio = nt_vfio_setup(pci_dev);\n+\n+\tif (vfio < 0) {\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: vfio_setup error %d (%s:%u)\\n\",\n+\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), -1, __func__,\n+\t\t       __LINE__);\n+\t\trte_free(p_drv);\n+\t\treturn -1;\n+\t}\n+\n+\tp_drv->probe_finished = 0;\n+\t/* context */\n+\tp_nt_drv = &p_drv->ntdrv;\n+\tfpga_info = &p_nt_drv->adapter_info.fpga_info;\n+\tp_hw_info = &p_nt_drv->adapter_info.hw_info;\n+\n+\tp_drv->p_dev = pci_dev;\n+\n+\t/* Set context for NtDrv */\n+\tp_nt_drv->pciident =\n+\t\tBDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,\n+\t\t\t\tpci_dev->addr.devid, pci_dev->addr.function);\n+\tp_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;\n+\tp_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;\n+\n+\tfpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;\n+\tfpga_info->bar0_size = pci_dev->mem_resource[0].len;\n+\tNT_LOG(DBG, ETHDEV, \"bar0=0x%\" PRIX64 \" len=%d\\n\", fpga_info->bar0_addr,\n+\t       fpga_info->bar0_size);\n+\tfpga_info->numa_node = pci_dev->device.numa_node;\n+\tfpga_info->pciident = p_nt_drv->pciident;\n+\tfpga_info->adapter_no = p_drv->adapter_no;\n+\n+\tp_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;\n+\tp_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;\n+\tp_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;\n+\tp_nt_drv->adapter_info.hw_info.pci_sub_vendor_id =\n+\t\tpci_dev->id.subsystem_vendor_id;\n+\tp_nt_drv->adapter_info.hw_info.pci_sub_device_id =\n+\t\tpci_dev->id.subsystem_device_id;\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"%s: \" PCIIDENT_PRINT_STR \" %04X:%04X: %04X:%04X:\\n\",\n+\t       p_nt_drv->adapter_info.mp_adapter_id_str,\n+\t       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),\n+\t       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),\n+\t       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),\n+\t       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),\n+\t       p_nt_drv->adapter_info.hw_info.pci_vendor_id,\n+\t       p_nt_drv->adapter_info.hw_info.pci_device_id,\n+\t       p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,\n+\t       p_nt_drv->adapter_info.hw_info.pci_sub_device_id);\n+\n+\tp_nt_drv->b_shutdown = false;\n+\tp_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;\n+\n+\tfor (int i = 0; i < num_port_speeds; ++i) {\n+\t\tstruct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;\n+\t\tnt_link_speed_t link_speed =\n+\t\t\tconvert_link_speed(pls_mbps[i].link_speed);\n+\t\tnt4ga_port_set_link_speed(p_adapter_info, i, link_speed);\n+\t}\n+\n+\t/* store context */\n+\tstore_pdrv(p_drv);\n+\n+\t/* initialize nt4ga nthw fpga module instance in drv */\n+\tint err = nt4ga_adapter_init(&p_nt_drv->adapter_info);\n+\n+\tif (err != 0) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"%s: Cannot initialize the adapter instance\\n\",\n+\t\t       p_nt_drv->adapter_info.mp_adapter_id_str);\n+\t\treturn -1;\n+\t}\n+\n+\tif (fpga_info->mp_nthw_epp != NULL)\n+\t\tnthw_eth_dev_ops.mtu_set = dev_set_mtu;\n+\n+\t/* Initialize the queue system */\n+\tif (err == 0) {\n+\t\terr = nthw_virt_queue_init(fpga_info);\n+\t\tif (err != 0) {\n+\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t       \"%s: Cannot initialize scatter-gather queues\\n\",\n+\t\t\t       p_nt_drv->adapter_info.mp_adapter_id_str);\n+\t\t} else {\n+\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t       \"%s: Initialized scatter-gather queues\\n\",\n+\t\t\t       p_nt_drv->adapter_info.mp_adapter_id_str);\n+\t\t}\n+\t}\n+\n+\tswitch (fpga_info->profile) {\n+\tcase FPGA_INFO_PROFILE_VSWITCH:\n+\t\tprofile = FLOW_ETH_DEV_PROFILE_VSWITCH;\n+\t\tbreak;\n+\tcase FPGA_INFO_PROFILE_INLINE:\n+\t\tprofile = FLOW_ETH_DEV_PROFILE_INLINE;\n+\t\tbreak;\n+\tcase FPGA_INFO_PROFILE_UNKNOWN:\n+\t/* fallthrough */\n+\tcase FPGA_INFO_PROFILE_CAPTURE:\n+\t/* fallthrough */\n+\tdefault:\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: fpga profile not supported [%s:%u]\\n\",\n+\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), __func__,\n+\t\t       __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (err == 0) {\n+\t\t/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */\n+\t\tconst char *const p_adapter_id_str _unused =\n+\t\t\tp_nt_drv->adapter_info.mp_adapter_id_str;\n+\t\tNT_LOG(DBG, ETHDEV,\n+\t\t       \"%s: %s: AdapterPCI=\" PCIIDENT_PRINT_STR\n+\t\t       \" Hw=0x%02X_rev%d PhyPorts=%d\\n\",\n+\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), p_adapter_id_str,\n+\t\t       PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),\n+\t\t       PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),\n+\t\t       PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),\n+\t\t       PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),\n+\t\t       p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,\n+\t\t       fpga_info->n_phy_ports);\n+\t} else {\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: error=%d [%s:%u]\\n\",\n+\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), err, __func__,\n+\t\t       __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tpthread_mutex_init(&p_nt_drv->stat_lck, NULL);\n+\tres = rte_ctrl_thread_create(&p_nt_drv->stat_thread, \"nt4ga_stat_thr\",\n+\t\t\t\t     NULL, adapter_stat_thread_fn,\n+\t\t\t\t     (void *)p_drv);\n+\tif (res) {\n+\t\tNT_LOG(ERR, ETHDEV, \"%s: error=%d [%s:%u]\\n\",\n+\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), res, __func__,\n+\t\t       __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tif (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {\n+\t\tres = rte_ctrl_thread_create(&p_nt_drv->flm_thread,\n+\t\t\t\t\t     \"nt_flm_stat_thr\", NULL,\n+\t\t\t\t\t     adapter_flm_thread_fn,\n+\t\t\t\t\t     (void *)p_drv);\n+\t\tif (res) {\n+\t\t\tNT_LOG(ERR, ETHDEV, \"%s: error=%d [%s:%u]\\n\",\n+\t\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), res,\n+\t\t\t       __func__, __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tif (lag_config) {\n+\t\t/* LAG is activated, so only use port 0 */\n+\t\tn_phy_ports = 1;\n+\t} else {\n+\t\tn_phy_ports = fpga_info->n_phy_ports;\n+\t}\n+\tfor (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {\n+\t\tconst char *const p_port_id_str _unused =\n+\t\t\tp_nt_drv->adapter_info.mp_port_id_str[n_intf_no];\n+\t\tstruct pmd_internals *internals = NULL;\n+\t\tstruct rte_eth_dev *eth_dev;\n+\t\tchar name[32];\n+\t\tint i;\n+\n+\t\tif ((1 << n_intf_no) & ~n_port_mask) {\n+\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t       \"%s: %s: interface #%d: skipping due to portmask 0x%02X\\n\",\n+\t\t\t       __func__, p_port_id_str, n_intf_no, n_port_mask);\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tsnprintf(name, sizeof(name), \"ntnic%d\", n_intf_no);\n+\t\tNT_LOG(DBG, ETHDEV, \"%s: %s: interface #%d: %s: '%s'\\n\",\n+\t\t       __func__, p_port_id_str, n_intf_no,\n+\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), name);\n+\n+\t\tinternals = rte_zmalloc_socket(name,\n+\t\t\t\t\t       sizeof(struct pmd_internals),\n+\t\t\t\t\t       RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t       pci_dev->device.numa_node);\n+\t\tif (!internals) {\n+\t\t\tNT_LOG(ERR, ETHDEV, \"%s: %s: error=%d [%s:%u]\\n\",\n+\t\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), name,\n+\t\t\t       -1, __func__, __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tinternals->pci_dev = pci_dev;\n+\t\tinternals->n_intf_no = n_intf_no;\n+\t\tinternals->if_index = n_intf_no;\n+\t\tinternals->min_tx_pkt_size = 64;\n+\t\tinternals->max_tx_pkt_size = 10000;\n+\t\tinternals->type = PORT_TYPE_PHYSICAL;\n+\t\tinternals->vhid = -1;\n+\t\tinternals->port = n_intf_no;\n+\t\tinternals->nb_rx_queues = nb_rx_queues;\n+\t\tinternals->nb_tx_queues = nb_tx_queues;\n+\n+\t\t/* Not used queue index as dest port in bypass - use 0x80 + port nr */\n+\t\tfor (i = 0; i < MAX_QUEUES; i++)\n+\t\t\tinternals->vpq[i].hw_id = -1;\n+\n+\t\t/* Setup queue_ids */\n+\t\tif (nb_rx_queues > 1) {\n+\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t       \"(%i) NTNIC configured with Rx multi queues. %i queues\\n\",\n+\t\t\t       0 /*port*/, nb_rx_queues);\n+\t\t}\n+\n+\t\tif (nb_tx_queues > 1) {\n+\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t       \"(%i) NTNIC configured with Tx multi queues. %i queues\\n\",\n+\t\t\t       0 /*port*/, nb_tx_queues);\n+\t\t}\n+\n+\t\tint max_num_queues = (nb_rx_queues > nb_tx_queues) ?\n+\t\t\t\t     nb_rx_queues :\n+\t\t\t\t     nb_tx_queues;\n+\t\tint start_queue = allocate_queue(max_num_queues);\n+\n+\t\tif (start_queue < 0)\n+\t\t\treturn -1;\n+\n+\t\tfor (i = 0; i < (int)max_num_queues; i++) {\n+\t\t\tqueue_ids[i].id    = i;\n+\t\t\tqueue_ids[i].hw_id = start_queue + i;\n+\n+\t\t\tinternals->rxq_scg[i].queue = queue_ids[i];\n+\t\t\t/* use same index in Rx and Tx rings */\n+\t\t\tinternals->txq_scg[i].queue = queue_ids[i];\n+\t\t\tinternals->rxq_scg[i].enabled = 0;\n+\t\t\tinternals->txq_scg[i].type = internals->type;\n+\t\t\tinternals->rxq_scg[i].type = internals->type;\n+\t\t\tinternals->rxq_scg[i].port = internals->port;\n+\t\t}\n+\n+\t\t/* no tx queues - tx data goes out on phy */\n+\t\tinternals->vpq_nb_vq = 0;\n+\n+\t\tfor (i = 0; i < (int)nb_tx_queues; i++) {\n+\t\t\tinternals->txq_scg[i].port = internals->port;\n+\t\t\tinternals->txq_scg[i].enabled = 0;\n+\t\t}\n+\n+\t\t/* Set MAC address (but only if the MAC address is permitted) */\n+\t\tif (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {\n+\t\t\tconst uint64_t mac =\n+\t\t\t\tfpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value +\n+\t\t\t\tn_intf_no;\n+\t\t\tinternals->eth_addrs[0].addr_bytes[0] = (mac >> 40) &\n+\t\t\t\t\t\t\t\t0xFFu;\n+\t\t\tinternals->eth_addrs[0].addr_bytes[1] = (mac >> 32) &\n+\t\t\t\t\t\t\t\t0xFFu;\n+\t\t\tinternals->eth_addrs[0].addr_bytes[2] = (mac >> 24) &\n+\t\t\t\t\t\t\t\t0xFFu;\n+\t\t\tinternals->eth_addrs[0].addr_bytes[3] = (mac >> 16) &\n+\t\t\t\t\t\t\t\t0xFFu;\n+\t\t\tinternals->eth_addrs[0].addr_bytes[4] = (mac >> 8) &\n+\t\t\t\t\t\t\t\t0xFFu;\n+\t\t\tinternals->eth_addrs[0].addr_bytes[5] = (mac >> 0) &\n+\t\t\t\t\t\t\t\t0xFFu;\n+\t\t}\n+\n+\t\teth_dev = rte_eth_dev_allocate(name);\n+\t\tif (!eth_dev) {\n+\t\t\tNT_LOG(ERR, ETHDEV, \"%s: %s: error=%d [%s:%u]\\n\",\n+\t\t\t       (pci_dev->name[0] ? pci_dev->name : \"NA\"), name,\n+\t\t\t       -1, __func__, __LINE__);\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tinternals->flw_dev = flow_get_eth_dev(0, n_intf_no,\n+\t\t\t\t\t\t      eth_dev->data->port_id,\n+\t\t\t\t\t\t      nb_rx_queues,\n+\t\t\t\t\t\t      queue_ids,\n+\t\t\t\t\t\t      &internals->txq_scg[0].rss_target_id,\n+\t\t\t\t\t\t      profile, exception_path);\n+\t\tif (!internals->flw_dev) {\n+\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t       \"Error creating port. Resource exhaustion in HW\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tNT_LOG(DBG, ETHDEV,\n+\t\t       \"%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\\n\",\n+\t\t       __func__, __func__, __LINE__, eth_dev,\n+\t\t       eth_dev->data->port_id, n_intf_no);\n+\n+\t\t/* connect structs */\n+\t\tinternals->p_drv = p_drv;\n+\t\teth_dev->data->dev_private = internals;\n+\t\teth_dev->data->mac_addrs = internals->eth_addrs;\n+\n+\t\tinternals->port_id = eth_dev->data->port_id;\n+\n+\t\t/*\n+\t\t * if representor ports defined on this PF set the assigned port_id as the\n+\t\t * backer_port_id for the VFs\n+\t\t */\n+\t\tif (rep.vpf_dev == pci_dev)\n+\t\t\trep.pf_backer_port_id = eth_dev->data->port_id;\n+\t\tNT_LOG(DBG, ETHDEV,\n+\t\t       \"%s: [%s:%u] Setting up RX functions for SCG\\n\",\n+\t\t       __func__, __func__, __LINE__);\n+\t\teth_dev->rx_pkt_burst = eth_dev_rx_scg;\n+\t\teth_dev->tx_pkt_burst = eth_dev_tx_scg;\n+\t\teth_dev->tx_pkt_prepare = NULL;\n+\n+\t\tstruct rte_eth_link pmd_link;\n+\n+\t\tpmd_link.link_speed = ETH_SPEED_NUM_NONE;\n+\t\tpmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;\n+\t\tpmd_link.link_status = ETH_LINK_DOWN;\n+\t\tpmd_link.link_autoneg = ETH_LINK_AUTONEG;\n+\n+\t\teth_dev->device = &pci_dev->device;\n+\t\teth_dev->data->dev_link = pmd_link;\n+\t\teth_dev->data->numa_node = pci_dev->device.numa_node;\n+\t\teth_dev->dev_ops = &nthw_eth_dev_ops;\n+\t\teth_dev->state = RTE_ETH_DEV_ATTACHED;\n+\n+\t\trte_eth_copy_pci_info(eth_dev, pci_dev);\n+\t\teth_dev_pci_specific_init(eth_dev,\n+\t\t\t\t\t  pci_dev); /* performs rte_eth_copy_pci_info() */\n+\n+\t\tp_drv->n_eth_dev_init_count++;\n+\n+\t\tif (lag_config) {\n+\t\t\tinternals->lag_config = lag_config;\n+\t\t\tlag_config->internals = internals;\n+\n+\t\t\t/* Always merge port 0 and port 1 on a LAG bond */\n+\t\t\tlag_set_port_group(0, (uint32_t)0x01);\n+\t\t\tlag_config->lag_thread_active = 1;\n+\t\t\tpthread_create(&lag_config->lag_tid, NULL,\n+\t\t\t\t       lag_management, lag_config);\n+\t\t}\n+\n+\t\tif (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&\n+\t\t\t\tinternals->flw_dev->ndev->be.tpe.ver >= 2) {\n+\t\t\tassert(nthw_eth_dev_ops.mtu_set ==\n+\t\t\t       dev_set_mtu_inline ||\n+\t\t\t       nthw_eth_dev_ops.mtu_set == NULL);\n+\t\t\tnthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;\n+\t\t\tdev_set_mtu_inline(eth_dev, MTUINITVAL);\n+\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t       \"%s INLINE MTU supported, tpe version %d\\n\",\n+\t\t\t       __func__, internals->flw_dev->ndev->be.tpe.ver);\n+\t\t} else {\n+\t\t\tNT_LOG(DBG, ETHDEV, \"INLINE MTU not supported\");\n+\t\t}\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u] leave\\n\", __func__, __FILE__, __LINE__);\n+\n+#ifdef NT_TOOLS\n+\t/*\n+\t * If NtConnect interface must be started for external tools\n+\t */\n+\tntconn_adap_register(p_drv);\n+\tntconn_stat_register(p_drv);\n+\n+\t/* Determine CPU used by the DPDK */\n+\tcpu_set_t cpuset;\n+\tunsigned int lcore_id;\n+\n+\tCPU_ZERO(&cpuset);\n+\tfor (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {\n+\t\tif (rte_lcore_has_role(lcore_id, ROLE_OFF))\n+\t\t\tcontinue;\n+\t\trte_cpuset_t lcore_cpuset = rte_lcore_cpuset(lcore_id);\n+\n+\t\tRTE_CPU_OR(&cpuset, &cpuset, &lcore_cpuset);\n+\t}\n+\t/* Set available CPU for ntconnect */\n+\tRTE_CPU_NOT(&cpuset, &cpuset);\n+\n+\tntconn_flow_register(p_drv);\n+\tntconn_meter_register(p_drv);\n+#ifdef NTCONNECT_TEST\n+\tntconn_test_register(p_drv);\n+#endif\n+\tntconnect_init(NTCONNECT_SOCKET, cpuset);\n+#endif\n+\n+\treturn 0;\n+}\n+\n+static int nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)\n+{\n+\tint i;\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u] start\\n\", __func__, __FILE__, __LINE__);\n+\n+\tstruct pmd_internals *internals = pmd_intern_base;\n+\n+\tsleep(1); /* let running threads end Rx and Tx activity */\n+\n+\twhile (internals) {\n+\t\tfor (i = internals->nb_tx_queues - 1; i >= 0; i--) {\n+\t\t\tnthw_release_managed_tx_virt_queue(internals->txq_scg[i].vq);\n+\t\t\trelease_hw_virtio_queues(&internals->txq_scg[i].hwq);\n+\t\t}\n+\n+\t\tfor (i = internals->nb_rx_queues - 1; i >= 0; i--) {\n+\t\t\tnthw_release_managed_rx_virt_queue(internals->rxq_scg[i].vq);\n+\t\t\trelease_hw_virtio_queues(&internals->rxq_scg[i].hwq);\n+\t\t}\n+\t\tinternals = internals->next;\n+\t}\n+\n+\tfor (i = 0; i < MAX_REL_VQS; i++) {\n+\t\tif (rel_virt_queue[i].vq != NULL) {\n+\t\t\tif (rel_virt_queue[i].rx) {\n+\t\t\t\tif (rel_virt_queue[i].managed)\n+\t\t\t\t\tnthw_release_managed_rx_virt_queue(rel_virt_queue[i].vq);\n+\t\t\t\telse\n+\t\t\t\t\tnthw_release_rx_virt_queue(rel_virt_queue[i].vq);\n+\t\t\t} else {\n+\t\t\t\tif (rel_virt_queue[i].managed)\n+\t\t\t\t\tnthw_release_managed_tx_virt_queue(rel_virt_queue[i].vq);\n+\t\t\t\telse\n+\t\t\t\t\tnthw_release_tx_virt_queue(rel_virt_queue[i].vq);\n+\t\t\t}\n+\t\t\trel_virt_queue[i].vq = NULL;\n+\t\t}\n+\t}\n+\n+\tnt_vfio_remove(EXCEPTION_PATH_HID);\n+\n+\treturn 0;\n+}\n+\n+static void signal_handler_func_int(int sig)\n+{\n+\tif (sig != SIGINT) {\n+\t\tsignal(sig, previous_handler);\n+\t\traise(sig);\n+\t\treturn;\n+\t}\n+\tkill_pmd = 1;\n+}\n+\n+static void *shutdown_thread(void *arg __rte_unused)\n+{\n+\tstruct rte_eth_dev dummy;\n+\n+\twhile (!kill_pmd)\n+\t\tusleep(100000);\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: Shutting down because of ctrl+C\\n\", __func__);\n+\tnthw_pci_dev_deinit(&dummy);\n+\n+\tsignal(SIGINT, previous_handler);\n+\traise(SIGINT);\n+\n+\treturn NULL;\n+}\n+\n+static int init_shutdown(void)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"%s: Starting shutdown handler\\n\", __func__);\n+\tkill_pmd = 0;\n+\tprevious_handler = signal(SIGINT, signal_handler_func_int);\n+\tpthread_create(&shutdown_tid, NULL, shutdown_thread, NULL);\n+\n+\t/*\n+\t * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll\n+\t * flooding by OVS from multiple virtual port threads - no need to be precise\n+\t */\n+\tuint64_t now_rtc = rte_get_tsc_cycles();\n+\n+\tusleep(10000);\n+\trte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);\n+\n+\treturn 0;\n+}\n+\n+static int nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\t\t\t  struct rte_pci_device *pci_dev)\n+{\n+\tint res;\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u] start\\n\", __func__, __FILE__, __LINE__);\n+\n+#if defined(DEBUG)\n+\tNT_LOG(DBG, NTHW, \"Testing NTHW %u [%s:%u]\\n\",\n+\t       nt_log_module_logtype[NT_LOG_MODULE_INDEX(NT_LOG_MODULE_NTHW)],\n+\t       __func__, __LINE__);\n+#endif\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: pcidev: name: '%s'\\n\", __func__,\n+\t       pci_dev->name);\n+\tNT_LOG(DBG, ETHDEV, \"%s: devargs: name: '%s'\\n\", __func__,\n+\t       pci_dev->device.name);\n+\tif (pci_dev->device.devargs) {\n+\t\tNT_LOG(DBG, ETHDEV, \"%s: devargs: args: '%s'\\n\", __func__,\n+\t\t       (pci_dev->device.devargs->args ?\n+\t\t\tpci_dev->device.devargs->args :\n+\t\t\t\"NULL\"));\n+\t\tNT_LOG(DBG, ETHDEV, \"%s: devargs: data: '%s'\\n\", __func__,\n+\t\t       (pci_dev->device.devargs->data ?\n+\t\t\tpci_dev->device.devargs->data :\n+\t\t\t\"NULL\"));\n+\t}\n+\n+\tconst int n_rte_has_pci = rte_eal_has_pci();\n+\n+\tNT_LOG(DBG, ETHDEV, \"has_pci=%d\\n\", n_rte_has_pci);\n+\tif (n_rte_has_pci == 0) {\n+\t\tNT_LOG(ERR, ETHDEV, \"has_pci=%d: this PMD needs hugepages\\n\",\n+\t\t       n_rte_has_pci);\n+\t\treturn -1;\n+\t}\n+\n+\tconst int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();\n+\n+\tNT_LOG(DBG, ETHDEV, \"vfio_no_iommu_enabled=%d\\n\",\n+\t       n_rte_vfio_no_io_mmu_enabled);\n+\tif (n_rte_vfio_no_io_mmu_enabled) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU\\n\",\n+\t\t       n_rte_vfio_no_io_mmu_enabled);\n+\t\treturn -1;\n+\t}\n+\n+\tconst enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();\n+\n+\tNT_LOG(DBG, ETHDEV, \"iova mode=%d\\n\", n_rte_io_va_mode);\n+\tif (n_rte_io_va_mode != RTE_IOVA_PA) {\n+\t\tNT_LOG(WRN, ETHDEV,\n+\t\t       \"iova mode (%d) should be PA for performance reasons\\n\",\n+\t\t       n_rte_io_va_mode);\n+\t}\n+\n+\tconst int n_rte_has_huge_pages = rte_eal_has_hugepages();\n+\n+\tNT_LOG(DBG, ETHDEV, \"has_hugepages=%d\\n\", n_rte_has_huge_pages);\n+\tif (n_rte_has_huge_pages == 0) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"has_hugepages=%d: this PMD needs hugepages\\n\",\n+\t\t       n_rte_has_huge_pages);\n+\t\treturn -1;\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"busid=\" PCI_PRI_FMT\n+\t       \" pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s\\n\",\n+\t       pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,\n+\t       pci_dev->addr.function, pci_dev->id.vendor_id,\n+\t       pci_dev->id.device_id, pci_dev->id.subsystem_vendor_id,\n+\t       pci_dev->id.subsystem_device_id,\n+\t       pci_dev->name[0] ? pci_dev->name : \"NA\", /* locstr */\n+\t       pci_dev->device.numa_node,\n+\t       pci_dev->driver->driver.name ? pci_dev->driver->driver.name :\n+\t       \"NA\",\n+\t       pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias :\n+\t       \"NA\");\n+\n+\tif (pci_dev->id.vendor_id == NT_HW_PCI_VENDOR_ID) {\n+\t\tif (pci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT200A01 ||\n+\t\t\t\tpci_dev->id.device_id == NT_HW_PCI_DEVICE_ID_NT50B01) {\n+\t\t\tif (pci_dev->id.subsystem_device_id != 0x01) {\n+\t\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t\t       \"%s: PCIe bifurcation - secondary endpoint \"\n+\t\t\t\t       \"found - leaving probe\\n\",\n+\t\t\t\t       __func__);\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tres = nthw_pci_dev_init(pci_dev);\n+\n+\tinit_shutdown();\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: leave: res=%d\\n\", __func__, res);\n+\treturn res;\n+}\n+\n+static int nthw_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u]\\n\", __func__, __func__, __LINE__);\n+\n+\treturn rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);\n+}\n+\n+static int nt_log_init_impl(void)\n+{\n+\trte_log_set_global_level(RTE_LOG_DEBUG);\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u]\\n\", __func__, __func__, __LINE__);\n+\n+\tfor (int i = NT_LOG_MODULE_GENERAL; i < NT_LOG_MODULE_END; ++i) {\n+\t\tint index = NT_LOG_MODULE_INDEX(i);\n+\n+\t\tnt_log_module_logtype[index] =\n+\t\t\trte_log_register_type_and_pick_level(nt_log_module_eal_name[index],\n+\t\t\t\t\t\t\t     RTE_LOG_INFO);\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV, \"%s: [%s:%u]\\n\", __func__, __func__, __LINE__);\n+\n+\treturn 0;\n+}\n+\n+__rte_format_printf(3, 0)\n+static int nt_log_log_impl(enum nt_log_level level, uint32_t module,\n+\t\t\t   const char *format, va_list args)\n+{\n+\tuint32_t rte_level = 0;\n+\tuint32_t rte_module = 0;\n+\n+\tswitch (level) {\n+\tcase NT_LOG_ERR:\n+\t\trte_level = RTE_LOG_ERR;\n+\t\tbreak;\n+\tcase NT_LOG_WRN:\n+\t\trte_level = RTE_LOG_WARNING;\n+\t\tbreak;\n+\tcase NT_LOG_INF:\n+\t\trte_level = RTE_LOG_INFO;\n+\t\tbreak;\n+\tdefault:\n+\t\trte_level = RTE_LOG_DEBUG;\n+\t}\n+\n+\trte_module =\n+\t\t(module >= NT_LOG_MODULE_GENERAL &&\n+\t\t module < NT_LOG_MODULE_END) ?\n+\t\t(uint32_t)nt_log_module_logtype[NT_LOG_MODULE_INDEX(module)] : module;\n+\n+\treturn (int)rte_vlog(rte_level, rte_module, format, args);\n+}\n+\n+static int nt_log_is_debug_impl(uint32_t module)\n+{\n+\tif (module < NT_LOG_MODULE_GENERAL || module >= NT_LOG_MODULE_END)\n+\t\treturn -1;\n+\tint index = NT_LOG_MODULE_INDEX(module);\n+\n+\treturn rte_log_get_level(nt_log_module_logtype[index]) == RTE_LOG_DEBUG;\n+}\n+\n+RTE_INIT(ntnic_rte_init); /* must go before function */\n+\n+static void ntnic_rte_init(void)\n+{\n+\tstatic struct nt_log_impl impl = { .init = &nt_log_init_impl,\n+\t\t       .log = &nt_log_log_impl,\n+\t\t       .is_debug = &nt_log_is_debug_impl\n+\t};\n+\n+\tnt_log_init(&impl);\n+}\n+\n+static struct rte_pci_driver rte_nthw_pmd = {\n+\t.driver = {\n+\t\t.name = \"net_ntnic\",\n+\t},\n+\n+\t.id_table = nthw_pci_id_map,\n+\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING,\n+\t.probe = nthw_pci_probe,\n+\t.remove = nthw_pci_remove,\n+};\n+\n+RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);\n+RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);\n+RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, \"* vfio-pci\");\n+\n+/*\n+ * VF and VDPA code\n+ */\n+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,\n+\t\t\t\t    int managed)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < MAX_REL_VQS; i++) {\n+\t\tif (rel_virt_queue[i].vq == NULL) {\n+\t\t\trel_virt_queue[i].vq = vq;\n+\t\t\trel_virt_queue[i].rx = rx;\n+\t\t\trel_virt_queue[i].managed = managed;\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\treturn -1;\n+}\n+\n+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < MAX_REL_VQS; i++) {\n+\t\tif (rel_virt_queue[i].vq == vq) {\n+\t\t\trel_virt_queue[i].vq = NULL;\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\treturn -1;\n+}\n+\n+struct pmd_internals *vp_vhid_instance_ready(int vhid)\n+{\n+\tstruct pmd_internals *intern = pmd_intern_base;\n+\n+\twhile (intern) {\n+\t\tif (intern->vhid == vhid)\n+\t\t\treturn intern;\n+\t\tintern = intern->next;\n+\t}\n+\treturn NULL;\n+}\n+\n+struct pmd_internals *vp_path_instance_ready(const char *path)\n+{\n+\tstruct pmd_internals *intern = pmd_intern_base;\n+\n+\twhile (intern) {\n+\t\tprintf(\"Searching for path: \\\"%s\\\" == \\\"%s\\\" (%d)\\n\",\n+\t\t       intern->vhost_path, path,\n+\t\t       strcmp(intern->vhost_path, path));\n+\t\tif (strcmp(intern->vhost_path, path) == 0)\n+\t\t\treturn intern;\n+\t\tintern = intern->next;\n+\t}\n+\treturn NULL;\n+}\n+\n+static void read_port_queues_mapping(char *str, int *portq)\n+{\n+\tint len;\n+\tchar *tok;\n+\n+\twhile (*str != '[' && *str != '\\0')\n+\t\tstr++;\n+\n+\tif (*str == '\\0')\n+\t\treturn;\n+\tstr++;\n+\tlen = strlen(str);\n+\tchar *str_e = &str[len];\n+\n+\twhile (*str_e != ']' && str_e != str)\n+\t\tstr_e--;\n+\tif (*str_e != ']')\n+\t\treturn;\n+\t*str_e = '\\0';\n+\n+\ttok = strtok(str, \",;\");\n+\twhile (tok) {\n+\t\tchar *ch = strchr(tok, ':');\n+\n+\t\tif (ch) {\n+\t\t\t*ch = '\\0';\n+\t\t\tint port = atoi(tok);\n+\t\t\tint nvq = atoi(ch + 1);\n+\n+\t\t\tif (port >= 0 &&\n+\t\t\t\t\tport < MAX_FPGA_VIRTUAL_PORTS_SUPPORTED &&\n+\t\t\t\t\tnvq > 0 && nvq < MAX_QUEUES)\n+\t\t\t\tportq[port] = nvq;\n+\t\t}\n+\n+\t\ttok = strtok(NULL, \",;\");\n+\t}\n+}\n+\n+int setup_virtual_pf_representor_base(struct rte_pci_device *dev)\n+{\n+\tstruct rte_eth_devargs eth_da;\n+\n+\teth_da.nb_representor_ports = 0U;\n+\tif (dev->device.devargs && dev->device.devargs->args) {\n+\t\tchar *ch = strstr(dev->device.devargs->args, \"portqueues\");\n+\n+\t\tif (ch) {\n+\t\t\tread_port_queues_mapping(ch, rep.portqueues);\n+\t\t\t/*\n+\t\t\t * Remove this extension. DPDK cannot read representor=[x] if added\n+\t\t\t * parameter to the end\n+\t\t\t */\n+\t\t\t *ch = '\\0';\n+\t\t}\n+\n+\t\tint err = rte_eth_devargs_parse(dev->device.devargs->args,\n+\t\t\t\t\t\t&eth_da);\n+\t\tif (err) {\n+\t\t\trte_errno = -err;\n+\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t       \"failed to process device arguments: %s\",\n+\t\t\t       strerror(rte_errno));\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tif (eth_da.nb_representor_ports) {\n+\t\t\trep.vpf_dev = dev;\n+\t\t\trep.eth_da = eth_da;\n+\t\t}\n+\t}\n+\t/* Will be set later when assigned to this PF */\n+\trep.pf_backer_port_id = RTE_MAX_ETHPORTS;\n+\treturn eth_da.nb_representor_ports;\n+}\n+\n+static inline struct rte_eth_dev *\n+rte_eth_vdev_allocate(struct rte_pci_device *dev, const char *name,\n+\t\t       size_t private_data_size, int *n_vq)\n+{\n+\tstatic int next_rep_p;\n+\tstruct rte_eth_dev *eth_dev = NULL;\n+\n+\teth_dev = rte_eth_dev_allocate(name);\n+\tif (!eth_dev)\n+\t\treturn NULL;\n+\n+\tNT_LOG(DBG, VDPA, \"%s: [%s:%u] eth_dev %p, port_id %u\\n\", __func__,\n+\t       __func__, __LINE__, eth_dev, eth_dev->data->port_id);\n+\n+\tif (private_data_size) {\n+\t\teth_dev->data->dev_private = rte_zmalloc_socket(name, private_data_size,\n+\t\t\t\t\t\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t\t\t\tdev->device.numa_node);\n+\t\tif (!eth_dev->data->dev_private) {\n+\t\t\trte_eth_dev_release_port(eth_dev);\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\n+\teth_dev->intr_handle = NULL;\n+\teth_dev->data->numa_node = dev->device.numa_node;\n+\teth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;\n+\n+\tif (rep.vpf_dev && rep.eth_da.nb_representor_ports > next_rep_p) {\n+\t\teth_dev->data->representor_id =\n+\t\t\trep.eth_da.representor_ports[next_rep_p++];\n+\t\teth_dev->device = &rep.vpf_dev->device;\n+\t\teth_dev->data->backer_port_id = rep.pf_backer_port_id;\n+\t} else {\n+\t\teth_dev->data->representor_id = nt_vfio_vf_num(dev);\n+\t\teth_dev->device = &dev->device;\n+\t}\n+\n+\tif (rep.portqueues[eth_dev->data->representor_id])\n+\t\t*n_vq = rep.portqueues[eth_dev->data->representor_id];\n+\n+\telse\n+\t\t*n_vq = 1;\n+\treturn eth_dev;\n+}\n+\n+static inline const char *\n+rte_vdev_device_name(const struct rte_pci_device *dev)\n+{\n+\tif (dev && dev->device.name)\n+\t\treturn dev->device.name;\n+\treturn NULL;\n+}\n+\n+static const char *const valid_args[] = {\n+#define VP_VLAN_ID \"vlan\"\n+\tVP_VLAN_ID,\n+#define VP_SEPARATE_SOCKET \"sep\"\n+\tVP_SEPARATE_SOCKET, NULL\n+};\n+\n+static int rte_pmd_vp_init_internals(struct rte_pci_device *vdev,\n+\t\t\t\t     struct rte_eth_dev **eth_dev)\n+{\n+\tstruct pmd_internals *internals = NULL;\n+\tstruct rte_eth_dev_data *data = NULL;\n+\tint i;\n+\tstruct rte_eth_link pmd_link;\n+\tint numa_node = vdev->device.numa_node;\n+\tconst char *name;\n+\tint n_vq;\n+\tint num_queues;\n+\tuint8_t port;\n+\tuint32_t vlan = 0;\n+\tuint32_t separate_socket = 0;\n+\n+\tenum fpga_info_profile fpga_profile =\n+\t\tget_fpga_profile_from_pci(vdev->addr);\n+\n+\tname = rte_vdev_device_name(vdev);\n+\n+\t/*\n+\t * Now do all data allocation - for eth_dev structure\n+\t * and internal (private) data\n+\t */\n+\n+\tif (vdev && vdev->device.devargs) {\n+\t\tstruct rte_kvargs *kvlist = NULL;\n+\n+\t\tkvlist = rte_kvargs_parse(vdev->device.devargs->args,\n+\t\t\t\t\t  valid_args);\n+\t\tif (!kvlist) {\n+\t\t\tNT_LOG(ERR, VDPA, \"error when parsing param\");\n+\t\t\tgoto error;\n+\t\t}\n+\n+\t\tif (rte_kvargs_count(kvlist, VP_VLAN_ID) == 1) {\n+\t\t\tif (rte_kvargs_process(kvlist, VP_VLAN_ID,\n+\t\t\t\t\t       &string_to_u32, &vlan) < 0) {\n+\t\t\t\tNT_LOG(ERR, VDPA, \"error to parse %s\",\n+\t\t\t\t       VP_VLAN_ID);\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (rte_kvargs_count(kvlist, VP_SEPARATE_SOCKET) == 1) {\n+\t\t\tif (rte_kvargs_process(kvlist, VP_SEPARATE_SOCKET,\n+\t\t\t\t\t       &string_to_u32,\n+\t\t\t\t\t       &separate_socket) < 0) {\n+\t\t\t\tNT_LOG(ERR, VDPA, \"error to parse %s\",\n+\t\t\t\t       VP_SEPARATE_SOCKET);\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tn_vq = 0;\n+\t*eth_dev =\n+\t\trte_eth_vdev_allocate(vdev, name, sizeof(*internals), &n_vq);\n+\tif (*eth_dev == NULL)\n+\t\tgoto error;\n+\n+\tdata = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);\n+\tif (data == NULL)\n+\t\tgoto error;\n+\n+\tNT_LOG(DBG, VDPA, \"%s: [%s:%u] eth_dev %p, port_id %u, if_index %u\\n\",\n+\t       __func__, __func__, __LINE__, *eth_dev,\n+\t       (*eth_dev)->data->port_id, (*eth_dev)->data->representor_id);\n+\n+\tport = (*eth_dev)->data->representor_id;\n+\n+\tif (port < MAX_NTNIC_PORTS || port >= VIRTUAL_TUNNEL_PORT_OFFSET) {\n+\t\tNT_LOG(ERR, VDPA,\n+\t\t       \"(%i) Creating ntvp-backend ethdev on numa socket %i has invalid representor port\\n\",\n+\t\t       port, numa_node);\n+\t\treturn -1;\n+\t}\n+\tNT_LOG(DBG, VDPA,\n+\t       \"(%i) Creating ntnic-backend ethdev on numa socket %i\\n\", port,\n+\t       numa_node);\n+\n+\t/* Build up private dev data */\n+\tinternals = (*eth_dev)->data->dev_private;\n+\tinternals->pci_dev = vdev;\n+\tif (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {\n+\t\tinternals->type = PORT_TYPE_VIRTUAL;\n+\t\tinternals->nb_rx_queues = 1;\n+\t\tinternals->nb_tx_queues = 1;\n+\t} else {\n+\t\tinternals->type = PORT_TYPE_OVERRIDE;\n+\t\tinternals->nb_rx_queues = n_vq;\n+\t\tinternals->nb_tx_queues = n_vq;\n+\t}\n+\tinternals->p_drv = get_pdrv_from_pci(vdev->addr);\n+\n+\tif (n_vq > MAX_QUEUES) {\n+\t\tNT_LOG(ERR, VDPA,\n+\t\t       \"Error creating virtual port. Too many rx or tx queues. Max is %i\\n\",\n+\t\t       MAX_QUEUES);\n+\t\tgoto error;\n+\t}\n+\n+\tif (n_vq > FLOW_MAX_QUEUES) {\n+\t\tNT_LOG(ERR, VDPA,\n+\t\t       \"Error creating virtual port. Too many rx or tx queues for NIC. Max reported %i\\n\",\n+\t\t       FLOW_MAX_QUEUES);\n+\t\tgoto error;\n+\t}\n+\n+\t/* Initialize HB output dest to none */\n+\tfor (i = 0; i < MAX_QUEUES; i++)\n+\t\tinternals->vpq[i].hw_id = -1;\n+\n+\tinternals->vhid = -1;\n+\tinternals->port = port;\n+\tinternals->if_index = port;\n+\tinternals->port_id = (*eth_dev)->data->port_id;\n+\tinternals->vlan = vlan;\n+\n+\t/*\n+\t * Create first time all queues in HW\n+\t */\n+\tstruct flow_queue_id_s queue_ids[FLOW_MAX_QUEUES + 1];\n+\n+\tif (fpga_profile == FPGA_INFO_PROFILE_VSWITCH)\n+\t\tnum_queues = n_vq + 1; /* add 1: 0th for exception */\n+\telse\n+\t\tnum_queues = n_vq;\n+\n+\tint start_queue = allocate_queue(num_queues);\n+\n+\tif (start_queue < 0) {\n+\t\tNT_LOG(ERR, VDPA,\n+\t\t       \"Error creating virtual port. Too many rx queues. Could not allocate %i\\n\",\n+\t\t       num_queues);\n+\t\tgoto error;\n+\t}\n+\n+\tint vhid = -1;\n+\n+\tfor (i = 0; i < num_queues; i++) {\n+\t\tqueue_ids[i].id    = start_queue + i; /* 0th is exception queue */\n+\t\tqueue_ids[i].hw_id = start_queue + i;\n+\t}\n+\n+\tif (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {\n+\t\tinternals->txq_scg[0].rss_target_id = -1;\n+\t\tinternals->flw_dev = flow_get_eth_dev(0, internals->port,\n+\t\t\t\t\t\t      internals->port_id, num_queues,\n+\t\t\t\t\t\t      queue_ids,\n+\t\t\t\t\t\t      &internals->txq_scg[0].rss_target_id,\n+\t\t\t\t\t\t      FLOW_ETH_DEV_PROFILE_VSWITCH, 0);\n+\t} else {\n+\t\tuint16_t in_port = internals->port & 1;\n+\t\tchar name[RTE_ETH_NAME_MAX_LEN];\n+\t\tstruct pmd_internals *main_internals;\n+\t\tstruct rte_eth_dev *eth_dev;\n+\t\tint i;\n+\t\tint status;\n+\n+\t\t/* Get name of in_port */\n+\t\tstatus = rte_eth_dev_get_name_by_port(in_port, name);\n+\t\tif (status != 0) {\n+\t\t\tNT_LOG(ERR, VDPA, \"Name of port not found\");\n+\t\t\tgoto error;\n+\t\t}\n+\t\tNT_LOG(DBG, VDPA, \"Name of port %u = %s\\n\", in_port, name);\n+\n+\t\t/* Get ether device for in_port */\n+\t\teth_dev = rte_eth_dev_get_by_name(name);\n+\t\tif (eth_dev == NULL) {\n+\t\t\tNT_LOG(ERR, VDPA, \"Failed to get eth device\");\n+\t\t\tgoto error;\n+\t\t}\n+\n+\t\t/* Get internals for in_port */\n+\t\tmain_internals =\n+\t\t\t(struct pmd_internals *)eth_dev->data->dev_private;\n+\t\tNT_LOG(DBG, VDPA, \"internals port   %u\\n\\n\",\n+\t\t       main_internals->port);\n+\t\tif (main_internals->port != in_port) {\n+\t\t\tNT_LOG(ERR, VDPA, \"Port did not match\");\n+\t\t\tgoto error;\n+\t\t}\n+\n+\t\t/* Get flow device for in_port */\n+\t\tinternals->flw_dev = main_internals->flw_dev;\n+\n+\t\tfor (i = 0; i < num_queues && i < MAX_QUEUES; i++) {\n+\t\t\tNT_LOG(DBG, VDPA, \"Queue:            %u\\n\",\n+\t\t\t       queue_ids[i].id);\n+\t\t\tNT_LOG(DBG, VDPA, \"HW ID:            %u\\n\",\n+\t\t\t       queue_ids[i].hw_id);\n+\t\t\tif (flow_eth_dev_add_queue(main_internals->flw_dev,\n+\t\t\t\t\t\t   &queue_ids[i])) {\n+\t\t\t\tNT_LOG(ERR, VDPA, \"Could not add queue\");\n+\t\t\t\tgoto error;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (!internals->flw_dev) {\n+\t\tNT_LOG(ERR, VDPA,\n+\t\t       \"Error creating virtual port. Resource exhaustion in HW\\n\");\n+\t\tgoto error;\n+\t}\n+\n+\tchar path[128];\n+\n+\tif (!separate_socket) {\n+\t\tsprintf(path, \"%sstdvio%i\", DVIO_VHOST_DIR_NAME, port);\n+\t} else {\n+\t\tsprintf(path, \"%sstdvio%i/stdvio%i\", DVIO_VHOST_DIR_NAME, port,\n+\t\t\tport);\n+\t}\n+\n+\tinternals->vpq_nb_vq = n_vq;\n+\tif (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {\n+\t\tif (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,\n+\t\t\t\t   queue_ids[1].hw_id, n_vq, n_vq,\n+\t\t\t\t   internals->port, &vhid)) {\n+\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t       \"*********** ERROR *********** vDPA RELAY INIT\\n\");\n+\t\t\tgoto error;\n+\t\t}\n+\t\tfor (i = 0; i < n_vq; i++) {\n+\t\t\tinternals->vpq[i] =\n+\t\t\t\tqueue_ids[i + 1]; /* queue 0 is for exception */\n+\t\t}\n+\t} else {\n+\t\tif (nthw_vdpa_init(vdev, (*eth_dev)->device->name, path,\n+\t\t\t\t   queue_ids[0].hw_id, n_vq, n_vq,\n+\t\t\t\t   internals->port, &vhid)) {\n+\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t       \"*********** ERROR *********** vDPA RELAY INIT\\n\");\n+\t\t\tgoto error;\n+\t\t}\n+\t\tfor (i = 0; i < n_vq; i++)\n+\t\t\tinternals->vpq[i] = queue_ids[i];\n+\t}\n+\n+\t/*\n+\t * Exception queue for OVS SW path\n+\t */\n+\tinternals->rxq_scg[0].queue = queue_ids[0];\n+\tinternals->txq_scg[0].queue =\n+\t\tqueue_ids[0]; /* use same index in Rx and Tx rings */\n+\tinternals->rxq_scg[0].enabled = 0;\n+\tinternals->txq_scg[0].port = port;\n+\n+\tinternals->txq_scg[0].type = internals->type;\n+\tinternals->rxq_scg[0].type = internals->type;\n+\tinternals->rxq_scg[0].port = internals->port;\n+\n+\t/* Setup pmd_link info */\n+\tpmd_link.link_speed = ETH_SPEED_NUM_NONE;\n+\tpmd_link.link_duplex = ETH_LINK_FULL_DUPLEX;\n+\tpmd_link.link_status = ETH_LINK_DOWN;\n+\n+\trte_memcpy(data, (*eth_dev)->data, sizeof(*data));\n+\tdata->dev_private = internals;\n+\tdata->port_id = (*eth_dev)->data->port_id;\n+\n+\tdata->nb_rx_queues = 1; /* this is exception */\n+\tdata->nb_tx_queues = 1;\n+\n+\tdata->dev_link = pmd_link;\n+\tdata->mac_addrs = &eth_addr_vp[port - MAX_NTNIC_PORTS];\n+\tdata->numa_node = numa_node;\n+\n+\t(*eth_dev)->data = data;\n+\t(*eth_dev)->dev_ops = &nthw_eth_dev_ops;\n+\n+\tif (pmd_intern_base) {\n+\t\tstruct pmd_internals *intern = pmd_intern_base;\n+\n+\t\twhile (intern->next)\n+\t\t\tintern = intern->next;\n+\t\tintern->next = internals;\n+\t} else {\n+\t\tpmd_intern_base = internals;\n+\t}\n+\tinternals->next = NULL;\n+\n+\t__atomic_store_n(&internals->vhid, vhid, __ATOMIC_RELAXED);\n+\n+\tLIST_INIT(&internals->mtr_profiles);\n+\tLIST_INIT(&internals->mtrs);\n+\treturn 0;\n+\n+error:\n+\tif (data)\n+\t\trte_free(data);\n+\tif (internals)\n+\t\trte_free(internals);\n+\treturn -1;\n+}\n+\n+/*\n+ * PORT_TYPE_OVERRIDE cannot receive data through SCG as the queues\n+ * are going to VF/vDPA\n+ */\n+static uint16_t eth_dev_rx_scg_dummy(void *queue __rte_unused,\n+\t\t\t\t     struct rte_mbuf **bufs __rte_unused,\n+\t\t\t\t     uint16_t nb_pkts __rte_unused)\n+{\n+\treturn 0;\n+}\n+\n+/*\n+ * PORT_TYPE_OVERRIDE cannot transmit data through SCG as the queues\n+ * are coming from VF/vDPA\n+ */\n+static uint16_t eth_dev_tx_scg_dummy(void *queue __rte_unused,\n+\t\t\t\t     struct rte_mbuf **bufs __rte_unused,\n+\t\t\t\t     uint16_t nb_pkts __rte_unused)\n+{\n+\treturn 0;\n+}\n+\n+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev)\n+{\n+\tstruct pmd_internals *internals;\n+\tstruct rte_eth_dev *eth_dev;\n+\n+\t/* Create virtual function DPDK PCI devices.*/\n+\tif (rte_pmd_vp_init_internals(pci_dev, &eth_dev) < 0)\n+\t\treturn -1;\n+\n+\tinternals = (struct pmd_internals *)eth_dev->data->dev_private;\n+\n+\tif (internals->type == PORT_TYPE_OVERRIDE) {\n+\t\teth_dev->rx_pkt_burst = eth_dev_rx_scg_dummy;\n+\t\teth_dev->tx_pkt_burst = eth_dev_tx_scg_dummy;\n+\t} else {\n+\t\teth_dev->rx_pkt_burst = eth_dev_rx_scg;\n+\t\teth_dev->tx_pkt_burst = eth_dev_tx_scg;\n+\t}\n+\n+\trte_eth_dev_probing_finish(eth_dev);\n+\n+\treturn 0;\n+}\n+\n+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_eth_dev *eth_dev = NULL;\n+\n+\tNT_LOG(DBG, VDPA, \"Closing ntvp pmd on numa socket %u\\n\",\n+\t       rte_socket_id());\n+\n+\tif (!pci_dev)\n+\t\treturn -1;\n+\n+\t/* Clean up all vDPA devices */\n+\tnthw_vdpa_close();\n+\n+\t/* reserve an ethdev entry */\n+\teth_dev = rte_eth_dev_allocated(rte_vdev_device_name(pci_dev));\n+\tif (eth_dev == NULL)\n+\t\treturn -1;\n+\n+\trte_free(eth_dev->data->dev_private);\n+\trte_free(eth_dev->data);\n+\n+\trte_eth_dev_release_port(eth_dev);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * LAG\n+ */\n+\n+#define LAG_PORT0_ONLY (100)\n+#define LAG_BALANCED_50_50 (50)\n+#define LAG_PORT1_ONLY (0)\n+\n+#define LAG_NO_TX (0)\n+#define LAG_PORT0_INDEX (1)\n+#define LAG_PORT1_INDEX (2)\n+#define LAG_HASH_INDEX (3)\n+\n+static int lag_nop(lag_config_t *config __rte_unused)\n+{\n+\treturn 0;\n+}\n+\n+static int lag_balance(lag_config_t *config __rte_unused)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"AA LAG: balanced output\\n\");\n+\treturn lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_BALANCED_50_50);\n+}\n+\n+static int lag_port0_active(lag_config_t *config __rte_unused)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"AA LAG: port 0 output only\\n\");\n+\treturn lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT0_ONLY);\n+}\n+\n+static int lag_port1_active(lag_config_t *config __rte_unused)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"AA LAG: port 1 output only\\n\");\n+\treturn lag_set_config(0, FLOW_LAG_SET_BALANCE, 0, LAG_PORT1_ONLY);\n+}\n+\n+static int lag_notx(lag_config_t *config __rte_unused)\n+{\n+\tNT_LOG(DBG, ETHDEV, \"AA LAG: no link\\n\");\n+\n+\tint retval = 0;\n+\n+\tretval +=\n+\t\tlag_set_config(0, FLOW_LAG_SET_ALL, LAG_PORT0_INDEX, LAG_NO_TX);\n+\tretval +=\n+\t\tlag_set_config(0, FLOW_LAG_SET_ALL, LAG_HASH_INDEX, LAG_NO_TX);\n+\treturn retval;\n+}\n+\n+static bool lag_get_link_status(lag_config_t *lag_config, uint8_t port)\n+{\n+\tstruct adapter_info_s *p_adapter_info =\n+\t\t\t&lag_config->internals->p_drv->ntdrv.adapter_info;\n+\tconst bool link_up = nt4ga_port_get_link_status(p_adapter_info, port);\n+\n+\tNT_LOG(DBG, ETHDEV, \"port %d status: %d\\n\", port, link_up);\n+\treturn link_up;\n+}\n+\n+static int lag_get_status(lag_config_t *config)\n+{\n+\tuint8_t port0 = lag_get_link_status(config, 0);\n+\n+\tuint8_t port1 = lag_get_link_status(config, 1);\n+\n+\tuint8_t status = (port1 << 1 | port0);\n+\treturn status;\n+}\n+\n+static int lag_activate_primary(lag_config_t *config)\n+{\n+\tint retval;\n+\n+\tuint8_t port_0_distribution;\n+\tuint8_t blocked_port;\n+\n+\tif (config->primary_port == 0) {\n+\t\t/* If port 0 is the active primary, then it take 100% of the hash distribution. */\n+\t\tport_0_distribution = 100;\n+\t\tblocked_port = LAG_PORT1_INDEX;\n+\t} else {\n+\t\t/* If port 1 is the active primary, then port 0 take 0% of the hash distribution. */\n+\t\tport_0_distribution = 0;\n+\t\tblocked_port = LAG_PORT0_INDEX;\n+\t}\n+\n+\tretval =\n+\t\tlag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);\n+\n+\t/* Block Rx on the backup port */\n+\tretval += lag_set_port_block(0, blocked_port);\n+\n+\treturn retval;\n+}\n+\n+static int lag_activate_backup(lag_config_t *config)\n+{\n+\tint retval;\n+\n+\tuint8_t port_0_distribution;\n+\tuint8_t blocked_port;\n+\n+\tif (config->backup_port == 0) {\n+\t\t/* If port 0 is the active backup, then it take 100% of the hash distribution. */\n+\t\tport_0_distribution = 100;\n+\t\tblocked_port = LAG_PORT1_INDEX;\n+\t} else {\n+\t\t/* If port 1 is the active backup, then port 0 take 0% of the hash distribution. */\n+\t\tport_0_distribution = 0;\n+\t\tblocked_port = LAG_PORT0_INDEX;\n+\t}\n+\n+\t/* Tx only on the backup port */\n+\tretval =\n+\t\tlag_set_config(0, FLOW_LAG_SET_BALANCE, 0, port_0_distribution);\n+\n+\t/* Block Rx on the primary port */\n+\tretval += lag_set_port_block(0, blocked_port);\n+\n+\treturn retval;\n+}\n+\n+static int lag_active_backup(lag_config_t *config)\n+{\n+\tuint8_t backup_port_active = 0;\n+\n+\t/* Initialize with the primary port active */\n+\tlag_activate_primary(config);\n+\n+\twhile (config->lag_thread_active) {\n+\t\tusleep(500 *\n+\t\t       1000); /* 500 ms sleep between testing the link status. */\n+\n+\t\tbool primary_port_status =\n+\t\t\tlag_get_link_status(config, config->primary_port);\n+\n+\t\tif (!primary_port_status) {\n+\t\t\tbool backup_port_status =\n+\t\t\t\tlag_get_link_status(config, config->backup_port);\n+\t\t\t/* If the backup port has been activated, no need to do more. */\n+\t\t\tif (backup_port_active)\n+\t\t\t\tcontinue;\n+\n+\t\t\t/* If the backup port is up, flip to it. */\n+\t\t\tif (backup_port_status) {\n+\t\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t\t       \"LAG: primary port down => swapping to backup port\\n\");\n+\t\t\t\tlag_activate_backup(config);\n+\t\t\t\tbackup_port_active = 1;\n+\t\t\t}\n+\t\t} else {\n+\t\t\t/* If using the backup port and primary come back. */\n+\t\t\tif (backup_port_active) {\n+\t\t\t\tNT_LOG(DBG, ETHDEV,\n+\t\t\t\t       \"LAG: primary port restored => swapping to primary port\\n\");\n+\t\t\t\tlag_activate_primary(config);\n+\t\t\t\tbackup_port_active = 0;\n+\t\t\t} /* Backup is active, while primary is restored. */\n+\t\t} /* Primary port status */\n+\t}\n+\n+\treturn 0;\n+}\n+\n+typedef int (*lag_aa_action)(lag_config_t *config);\n+\n+/* port 0 is LSB and port 1 is MSB */\n+enum lag_state_e {\n+\tP0DOWN_P1DOWN = 0b00,\n+\tP0UP_P1DOWN = 0b01,\n+\tP0DOWN_P1UP = 0b10,\n+\tP0UP_P1UP = 0b11\n+};\n+\n+struct lag_action_s {\n+\tenum lag_state_e src_state;\n+\tenum lag_state_e dst_state;\n+\tlag_aa_action action;\n+};\n+\n+struct lag_action_s actions[] = {\n+\t/* No action in same state */\n+\t{ P0UP_P1UP, P0UP_P1UP, lag_nop },\n+\t{ P0UP_P1DOWN, P0UP_P1DOWN, lag_nop },\n+\t{ P0DOWN_P1UP, P0DOWN_P1UP, lag_nop },\n+\t{ P0DOWN_P1DOWN, P0DOWN_P1DOWN, lag_nop },\n+\n+\t/* UU start */\n+\t{ P0UP_P1UP, P0UP_P1DOWN, lag_port0_active },\n+\t{ P0UP_P1UP, P0DOWN_P1UP, lag_port1_active },\n+\t{ P0UP_P1UP, P0DOWN_P1DOWN, lag_notx },\n+\n+\t/* UD start */\n+\t{ P0UP_P1DOWN, P0DOWN_P1DOWN, lag_notx },\n+\t{ P0UP_P1DOWN, P0DOWN_P1UP, lag_port1_active },\n+\t{ P0UP_P1DOWN, P0UP_P1UP, lag_balance },\n+\n+\t/* DU start */\n+\t{ P0DOWN_P1UP, P0DOWN_P1DOWN, lag_notx },\n+\t{ P0DOWN_P1UP, P0UP_P1DOWN, lag_port0_active },\n+\t{ P0DOWN_P1UP, P0UP_P1UP, lag_balance },\n+\n+\t/* DD start */\n+\t{ P0DOWN_P1DOWN, P0DOWN_P1UP, lag_port1_active },\n+\t{ P0DOWN_P1DOWN, P0UP_P1DOWN, lag_port0_active },\n+\t{ P0DOWN_P1DOWN, P0UP_P1UP, lag_balance },\n+};\n+\n+static lag_aa_action lookup_action(enum lag_state_e current_state,\n+\t\t\t\t   enum lag_state_e new_state)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < sizeof(actions) / sizeof(struct lag_action_s); i++) {\n+\t\tif (actions[i].src_state == current_state &&\n+\t\t\t\tactions[i].dst_state == new_state)\n+\t\t\treturn actions[i].action;\n+\t}\n+\treturn NULL;\n+}\n+\n+static int lag_active_active(lag_config_t *config)\n+{\n+\tenum lag_state_e ports_status;\n+\n+\t/* Set the initial state to 50/50% */\n+\tenum lag_state_e current_state = P0UP_P1UP;\n+\n+\tlag_balance(config);\n+\t/* No ports are blocked in active/active */\n+\tlag_set_port_block(0, 0);\n+\n+\tlag_aa_action action;\n+\n+\twhile (config->lag_thread_active) {\n+\t\t/* 500 ms sleep between testing the link status. */\n+\t\tusleep(500 * 1000);\n+\n+\t\tports_status = lag_get_status(config);\n+\n+\t\taction = lookup_action(current_state, ports_status);\n+\t\taction(config);\n+\n+\t\tcurrent_state = ports_status;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static void *lag_management(void *arg)\n+{\n+\tlag_config_t *config = (lag_config_t *)arg;\n+\n+\tswitch (config->mode) {\n+\tcase BONDING_MODE_ACTIVE_BACKUP:\n+\t\tlag_active_backup(config);\n+\t\tbreak;\n+\n+\tcase BONDING_MODE_8023AD:\n+\t\tlag_active_active(config);\n+\t\tbreak;\n+\n+\tdefault:\n+\t\tfprintf(stderr, \"Unsupported NTbond mode\\n\");\n+\t\treturn NULL;\n+\t}\n+\n+\treturn NULL;\n+}\ndiff --git a/drivers/net/ntnic/ntnic_ethdev.h b/drivers/net/ntnic/ntnic_ethdev.h\nnew file mode 100644\nindex 0000000000..ee0d84ce82\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_ethdev.h\n@@ -0,0 +1,355 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef __NTNIC_ETHDEV_H__\n+#define __NTNIC_ETHDEV_H__\n+\n+#include <rte_ether.h>\n+#include <rte_version.h> /* RTE_VERSION, RTE_VERSION_NUM */\n+#include <rte_mtr_driver.h>\n+#include <rte_mbuf.h>\n+#include <rte_pci.h>\n+#include <ethdev_pci.h>\n+\n+#include \"ntos_system.h\"\n+#include \"ntnic_dbsconfig.h\"\n+#include \"stream_binary_flow_api.h\"\n+\n+#if (RTE_VERSION_NUM(22, 07, 0, 0) <= RTE_VERSION)\n+#undef ETH_LINK_HALF_DUPLEX\n+#undef ETH_LINK_FULL_DUPLEX\n+#undef ETH_LINK_DOWN\n+#undef ETH_LINK_UP\n+#undef ETH_LINK_FIXED\n+#undef ETH_LINK_AUTONEG\n+#undef ETH_SPEED_NUM_NONE\n+#undef ETH_SPEED_NUM_10M\n+#undef ETH_SPEED_NUM_100M\n+#undef ETH_SPEED_NUM_1G\n+#undef ETH_SPEED_NUM_2_5G\n+#undef ETH_SPEED_NUM_5G\n+#undef ETH_SPEED_NUM_10G\n+#undef ETH_SPEED_NUM_20G\n+#undef ETH_SPEED_NUM_25G\n+#undef ETH_SPEED_NUM_40G\n+#undef ETH_SPEED_NUM_50G\n+#undef ETH_SPEED_NUM_56G\n+#undef ETH_SPEED_NUM_100G\n+#undef ETH_SPEED_NUM_200G\n+#undef ETH_SPEED_NUM_UNKNOWN\n+#undef ETH_LINK_SPEED_AUTONEG\n+#undef ETH_LINK_SPEED_FIXED\n+#undef ETH_LINK_SPEED_10M_HD\n+#undef ETH_LINK_SPEED_10M\n+#undef ETH_LINK_SPEED_100M_HD\n+#undef ETH_LINK_SPEED_100M\n+#undef ETH_LINK_SPEED_1G\n+#undef ETH_LINK_SPEED_2_5G\n+#undef ETH_LINK_SPEED_5G\n+#undef ETH_LINK_SPEED_10G\n+#undef ETH_LINK_SPEED_20G\n+#undef ETH_LINK_SPEED_25G\n+#undef ETH_LINK_SPEED_40G\n+#undef ETH_LINK_SPEED_50G\n+#undef ETH_LINK_SPEED_56G\n+#undef ETH_LINK_SPEED_100G\n+#undef ETH_LINK_SPEED_200G\n+#undef ETH_RSS_IP\n+#undef ETH_RSS_UDP\n+#undef ETH_RSS_TCP\n+#undef ETH_RSS_SCTP\n+#define ETH_LINK_HALF_DUPLEX RTE_ETH_LINK_HALF_DUPLEX\n+#define ETH_LINK_FULL_DUPLEX RTE_ETH_LINK_FULL_DUPLEX\n+#define ETH_LINK_DOWN RTE_ETH_LINK_DOWN\n+#define ETH_LINK_UP RTE_ETH_LINK_UP\n+#define ETH_LINK_FIXED RTE_ETH_LINK_FIXED\n+#define ETH_LINK_AUTONEG RTE_ETH_LINK_AUTONEG\n+#define ETH_SPEED_NUM_NONE RTE_ETH_SPEED_NUM_NONE\n+#define ETH_SPEED_NUM_10M RTE_ETH_SPEED_NUM_10M\n+#define ETH_SPEED_NUM_100M RTE_ETH_SPEED_NUM_100M\n+#define ETH_SPEED_NUM_1G RTE_ETH_SPEED_NUM_1G\n+#define ETH_SPEED_NUM_2_5G RTE_ETH_SPEED_NUM_2_5G\n+#define ETH_SPEED_NUM_5G RTE_ETH_SPEED_NUM_5G\n+#define ETH_SPEED_NUM_10G RTE_ETH_SPEED_NUM_10G\n+#define ETH_SPEED_NUM_20G RTE_ETH_SPEED_NUM_20G\n+#define ETH_SPEED_NUM_25G RTE_ETH_SPEED_NUM_25G\n+#define ETH_SPEED_NUM_40G RTE_ETH_SPEED_NUM_40G\n+#define ETH_SPEED_NUM_50G RTE_ETH_SPEED_NUM_50G\n+#define ETH_SPEED_NUM_56G RTE_ETH_SPEED_NUM_56G\n+#define ETH_SPEED_NUM_100G RTE_ETH_SPEED_NUM_100G\n+#define ETH_SPEED_NUM_200G RTE_ETH_SPEED_NUM_200G\n+#define ETH_SPEED_NUM_UNKNOWN RTE_ETH_SPEED_NUM_UNKNOWN\n+#define ETH_LINK_SPEED_AUTONEG RTE_ETH_LINK_SPEED_AUTONEG\n+#define ETH_LINK_SPEED_FIXED RTE_ETH_LINK_SPEED_FIXED\n+#define ETH_LINK_SPEED_10M_HD RTE_ETH_LINK_SPEED_10M_HD\n+#define ETH_LINK_SPEED_10M RTE_ETH_LINK_SPEED_10M\n+#define ETH_LINK_SPEED_100M_HD RTE_ETH_LINK_SPEED_100M_HD\n+#define ETH_LINK_SPEED_100M RTE_ETH_LINK_SPEED_100M\n+#define ETH_LINK_SPEED_1G RTE_ETH_LINK_SPEED_1G\n+#define ETH_LINK_SPEED_2_5G RTE_ETH_LINK_SPEED_2_5G\n+#define ETH_LINK_SPEED_5G RTE_ETH_LINK_SPEED_5G\n+#define ETH_LINK_SPEED_10G RTE_ETH_LINK_SPEED_10G\n+#define ETH_LINK_SPEED_20G RTE_ETH_LINK_SPEED_20G\n+#define ETH_LINK_SPEED_25G RTE_ETH_LINK_SPEED_25G\n+#define ETH_LINK_SPEED_40G RTE_ETH_LINK_SPEED_40G\n+#define ETH_LINK_SPEED_50G RTE_ETH_LINK_SPEED_50G\n+#define ETH_LINK_SPEED_56G RTE_ETH_LINK_SPEED_56G\n+#define ETH_LINK_SPEED_100G RTE_ETH_LINK_SPEED_100G\n+#define ETH_LINK_SPEED_200G RTE_ETH_LINK_SPEED_200G\n+#define ETH_RSS_IP RTE_ETH_RSS_IP\n+#define ETH_RSS_UDP RTE_ETH_RSS_UDP\n+#define ETH_RSS_TCP RTE_ETH_RSS_TCP\n+#define ETH_RSS_SCTP RTE_ETH_RSS_SCTP\n+#define ETH_RSS_IPV4 RTE_ETH_RSS_IPV4\n+#define ETH_RSS_FRAG_IPV4 RTE_ETH_RSS_FRAG_IPV4\n+#define ETH_RSS_NONFRAG_IPV4_OTHER RTE_ETH_RSS_NONFRAG_IPV4_OTHER\n+#define ETH_RSS_IPV6 RTE_ETH_RSS_IPV6\n+#define ETH_RSS_FRAG_IPV6 RTE_ETH_RSS_FRAG_IPV6\n+#define ETH_RSS_NONFRAG_IPV6_OTHER RTE_ETH_RSS_NONFRAG_IPV6_OTHER\n+#define ETH_RSS_IPV6_EX RTE_ETH_RSS_IPV6_EX\n+#define ETH_RSS_C_VLAN RTE_ETH_RSS_C_VLAN\n+#define ETH_RSS_L3_DST_ONLY RTE_ETH_RSS_L3_DST_ONLY\n+#define ETH_RSS_L3_SRC_ONLY RTE_ETH_RSS_L3_SRC_ONLY\n+#endif\n+\n+#define NUM_MAC_ADDRS_PER_PORT (16U)\n+#define NUM_MULTICAST_ADDRS_PER_PORT (16U)\n+\n+#define MAX_FPGA_VIRTUAL_PORTS_SUPPORTED 256\n+\n+/* Total max ports per NT NFV NIC */\n+#define MAX_NTNIC_PORTS 2\n+\n+/* Max RSS queues */\n+#define MAX_QUEUES 125\n+\n+#define SG_NB_HW_RX_DESCRIPTORS 1024\n+#define SG_NB_HW_TX_DESCRIPTORS 1024\n+#define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)\n+#define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)\n+\n+#define SG_HDR_SIZE 12\n+\n+/* VQ buffers needed to fit all data in packet + header */\n+#define NUM_VQ_SEGS(_data_size_) \\\n+\t({ \\\n+\t\tsize_t _size = (_data_size_); \\\n+\t\tsize_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE) ? \\\n+\t\t(((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) / \\\n+\t\tSG_HW_TX_PKT_BUFFER_SIZE) : 1; \\\n+\t\t_segment_count; \\\n+\t})\n+\n+\n+#define VIRTQ_DESCR_IDX(_tx_pkt_idx_) \\\n+\t(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)\n+\n+#define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) \\\n+\t(((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)\n+\n+#define MAX_REL_VQS 128\n+\n+/* Functions: */\n+struct pmd_internals *vp_vhid_instance_ready(int vhid);\n+struct pmd_internals *vp_path_instance_ready(const char *path);\n+int setup_virtual_pf_representor_base(struct rte_pci_device *dev);\n+int nthw_create_vf_interface_dpdk(struct rte_pci_device *pci_dev);\n+int nthw_remove_vf_interface_dpdk(struct rte_pci_device *pci_dev);\n+nthw_dbs_t *get_pdbs_from_pci(struct rte_pci_addr pci_addr);\n+enum fpga_info_profile get_fpga_profile_from_pci(struct rte_pci_addr pci_addr);\n+int register_release_virtqueue_info(struct nthw_virt_queue *vq, int rx,\n+\t\t\t\t    int managed);\n+int de_register_release_virtqueue_info(struct nthw_virt_queue *vq);\n+int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,\n+\t\t\t   uint16_t vq_descr_idx,\n+\t\t\t   struct nthw_memory_descriptor *vq_bufs, int max_segs,\n+\t\t\t   struct rte_mbuf *mbuf);\n+\n+extern int lag_active;\n+extern uint64_t rte_tsc_freq;\n+extern rte_spinlock_t hwlock;\n+\n+/* Structs: */\n+\n+#define SG_HDR_SIZE 12\n+\n+struct _pkt_hdr_rx {\n+\tuint32_t cap_len : 14;\n+\tuint32_t fid : 10;\n+\tuint32_t ofs1 : 8;\n+\tuint32_t ip_prot : 8;\n+\tuint32_t port : 13;\n+\tuint32_t descr : 8;\n+\tuint32_t descr_12b : 1;\n+\tuint32_t color_type : 2;\n+\tuint32_t color : 32;\n+};\n+\n+struct _pkt_hdr_tx {\n+\tuint32_t cap_len : 14;\n+\tuint32_t lso_cso0 : 9;\n+\tuint32_t lso_cso1 : 9;\n+\tuint32_t lso_cso2 : 8;\n+\t/* all 1's : use implicit in-port. 0-127 queue index. 0x80 + phy-port to phy */\n+\tuint32_t bypass_port : 13;\n+\tuint32_t descr : 8;\n+\tuint32_t descr_12b : 1;\n+\tuint32_t color_type : 2;\n+\tuint32_t color : 32;\n+};\n+\n+/* Compile time verification of scatter gather header size. */\n+typedef char check_sg_pkt_rx_hdr_size\n+[(sizeof(struct _pkt_hdr_rx) == SG_HDR_SIZE) ? 1 : -1];\n+typedef char check_sg_pkt_tx_hdr_size\n+[(sizeof(struct _pkt_hdr_tx) == SG_HDR_SIZE) ? 1 : -1];\n+\n+typedef void *handle_t;\n+\n+struct hwq_s {\n+\tint vf_num;\n+\tstruct nthw_memory_descriptor virt_queues_ctrl;\n+\tstruct nthw_memory_descriptor *pkt_buffers;\n+};\n+\n+struct ntnic_rx_queue {\n+\tstruct flow_queue_id_s\n+\t\tqueue; /* queue info - user id and hw queue index */\n+\n+\tstruct rte_mempool *mb_pool; /* mbuf memory pool */\n+\tuint16_t buf_size; /* size of data area in mbuf */\n+\tunsigned long rx_pkts; /* Rx packet statistics */\n+\tunsigned long rx_bytes; /* Rx bytes statistics */\n+\tunsigned long err_pkts; /* Rx error packet statistics */\n+\tint enabled; /* Enabling/disabling of this queue */\n+\n+\tstruct hwq_s hwq;\n+\tstruct nthw_virt_queue *vq;\n+\tint nb_hw_rx_descr;\n+\tnt_meta_port_type_t type;\n+\tuint32_t port; /* Rx port for this queue */\n+\tenum fpga_info_profile profile; /* Vswitch / Inline / Capture */\n+\n+} __rte_cache_aligned;\n+\n+struct ntnic_tx_queue {\n+\tstruct flow_queue_id_s\n+\t\tqueue; /* queue info - user id and hw queue index */\n+\tstruct hwq_s hwq;\n+\tstruct nthw_virt_queue *vq;\n+\tint nb_hw_tx_descr;\n+\t/* Used for bypass in NTDVIO0 header on  Tx - pre calculated */\n+\tint target_id;\n+\tnt_meta_port_type_t type;\n+\t/* only used for exception tx queue from OVS SW switching */\n+\tint rss_target_id;\n+\n+\tuint32_t port; /* Tx port for this queue */\n+\tunsigned long tx_pkts; /* Tx packet statistics */\n+\tunsigned long tx_bytes; /* Tx bytes statistics */\n+\tunsigned long err_pkts; /* Tx error packet stat */\n+\tint enabled; /* Enabling/disabling of this queue */\n+\tenum fpga_info_profile profile; /* Vswitch / Inline / Capture */\n+} __rte_cache_aligned;\n+\n+#define MAX_ARRAY_ENTRIES MAX_QUEUES\n+struct array_s {\n+\tuint32_t value[MAX_ARRAY_ENTRIES];\n+\tint count;\n+};\n+\n+/* Configuerations related to LAG management */\n+typedef struct {\n+\tuint8_t mode;\n+\n+\tint8_t primary_port;\n+\tint8_t backup_port;\n+\n+\tuint32_t ntpl_rx_id;\n+\n+\tpthread_t lag_tid;\n+\tuint8_t lag_thread_active;\n+\n+\tstruct pmd_internals *internals;\n+} lag_config_t;\n+\n+#define BONDING_MODE_ACTIVE_BACKUP (1)\n+#define BONDING_MODE_8023AD (4)\n+struct nt_mtr_profile {\n+\tLIST_ENTRY(nt_mtr_profile) next;\n+\tuint32_t profile_id;\n+\tstruct rte_mtr_meter_profile profile;\n+};\n+\n+struct nt_mtr {\n+\tLIST_ENTRY(nt_mtr) next;\n+\tuint32_t mtr_id;\n+\tint shared;\n+\tstruct nt_mtr_profile *profile;\n+};\n+\n+enum virt_port_comm {\n+\tVIRT_PORT_NEGOTIATED_NONE,\n+\tVIRT_PORT_NEGOTIATED_SPLIT,\n+\tVIRT_PORT_NEGOTIATED_PACKED,\n+\tVIRT_PORT_USE_RELAY\n+};\n+\n+#define MAX_PATH_LEN 128\n+\n+struct pmd_internals {\n+\tconst struct rte_pci_device *pci_dev;\n+\n+\tstruct flow_eth_dev *flw_dev;\n+\n+\tchar name[20];\n+\tchar vhost_path[MAX_PATH_LEN];\n+\n+\tint n_intf_no;\n+\tint if_index;\n+\n+\tint lpbk_mode;\n+\n+\tuint8_t nb_ports_on_adapter;\n+\tuint8_t ts_multiplier;\n+\tuint16_t min_tx_pkt_size;\n+\tuint16_t max_tx_pkt_size;\n+\n+\tunsigned int nb_rx_queues; /* Number of Rx queues configured */\n+\tunsigned int nb_tx_queues; /* Number of Tx queues configured */\n+\tuint32_t port;\n+\tuint8_t port_id;\n+\n+\tnt_meta_port_type_t type;\n+\tstruct flow_queue_id_s vpq[MAX_QUEUES];\n+\tunsigned int vpq_nb_vq;\n+\tint vhid; /* if a virtual port type - the vhid */\n+\tenum virt_port_comm vport_comm; /* link and how split,packed,relay */\n+\tuint32_t vlan;\n+\n+\tlag_config_t *lag_config;\n+\n+\tstruct ntnic_rx_queue rxq_scg[MAX_QUEUES]; /* Array of Rx queues */\n+\tstruct ntnic_tx_queue txq_scg[MAX_QUEUES]; /* Array of Tx queues */\n+\n+\tstruct drv_s *p_drv;\n+\t/* Ethernet (MAC) addresses. Element number zero denotes default address. */\n+\tstruct rte_ether_addr eth_addrs[NUM_MAC_ADDRS_PER_PORT];\n+\t/* Multicast ethernet (MAC) addresses. */\n+\tstruct rte_ether_addr mc_addrs[NUM_MULTICAST_ADDRS_PER_PORT];\n+\n+\tLIST_HEAD(_profiles, nt_mtr_profile) mtr_profiles;\n+\tLIST_HEAD(_mtrs, nt_mtr) mtrs;\n+\n+\tuint64_t last_stat_rtc;\n+\tuint64_t rx_missed;\n+\n+\tstruct pmd_internals *next;\n+};\n+\n+void cleanup_flows(struct pmd_internals *internals);\n+int poll_statistics(struct pmd_internals *internals);\n+int debug_adapter_show_info(uint32_t pciident, FILE *pfh);\n+\n+#endif /* __NTNIC_ETHDEV_H__ */\ndiff --git a/drivers/net/ntnic/ntnic_filter/create_elements.h b/drivers/net/ntnic/ntnic_filter/create_elements.h\nnew file mode 100644\nindex 0000000000..e90643ec6b\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_filter/create_elements.h\n@@ -0,0 +1,1190 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef __CREATE_ELEMENTS_H__\n+#define __CREATE_ELEMENTS_H__\n+\n+#include \"stream_binary_flow_api.h\"\n+\n+#define MAX_ELEMENTS 64\n+#define MAX_ACTIONS 32\n+\n+#define MAX_COLOR_FLOW_STATS 0x400\n+\n+struct cnv_match_s {\n+\tstruct flow_elem flow_elem[MAX_ELEMENTS];\n+};\n+\n+struct tun_def_s {\n+\tstruct flow_elem *tun_definition;\n+\tstruct cnv_match_s match;\n+};\n+\n+struct cnv_attr_s {\n+\tstruct cnv_match_s match;\n+\tstruct flow_attr attr;\n+};\n+\n+struct cnv_action_s {\n+\tstruct flow_action flow_actions[MAX_ACTIONS];\n+\tstruct tun_def_s tun_def;\n+\tstruct flow_action_rss flow_rss;\n+\tstruct rte_flow_action_mark mark;\n+\tstruct flow_action_raw_encap encap;\n+\tstruct flow_action_raw_decap decap;\n+\tstruct flow_action_queue queue;\n+};\n+\n+/*\n+ * Only needed because it eases the use of statistics through NTAPI\n+ * for faster integration into NTAPI version of driver\n+ * Therefore, this is only a good idea when running on a temporary NTAPI\n+ * The query() functionality must go to flow engine, when moved to Open Source driver\n+ */\n+\n+struct rte_flow {\n+\tvoid *flw_hdl;\n+\tint used;\n+\tuint32_t flow_stat_id;\n+\n+\tuint64_t stat_pkts;\n+\tuint64_t stat_bytes;\n+\tuint8_t stat_tcp_flags;\n+};\n+\n+enum nt_rte_flow_item_type {\n+\tNT_RTE_FLOW_ITEM_TYPE_END = INT_MIN,\n+\tNT_RTE_FLOW_ITEM_TYPE_TAG,\n+\tNT_RTE_FLOW_ITEM_TYPE_TUNNEL,\n+};\n+\n+enum nt_rte_flow_action_type {\n+\tNT_RTE_FLOW_ACTION_TYPE_END = INT_MIN,\n+\tNT_RTE_FLOW_ACTION_TYPE_TAG,\n+\tNT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,\n+\tNT_RTE_FLOW_ACTION_TYPE_JUMP,\n+};\n+\n+static int convert_tables_initialized;\n+\n+#define MAX_RTE_ENUM_INDEX 127\n+\n+static int elem_list[MAX_RTE_ENUM_INDEX + 1];\n+static int action_list[MAX_RTE_ENUM_INDEX + 1];\n+\n+#ifdef RTE_FLOW_DEBUG\n+static const char *elem_list_str[MAX_RTE_ENUM_INDEX + 1];\n+static const char *action_list_str[MAX_RTE_ENUM_INDEX + 1];\n+#endif\n+\n+#define CNV_TO_ELEM(item) \\\n+\t({ \\\n+\t\tint _temp_item = (item); \\\n+\t\t((_temp_item >= 0 && _temp_item <= MAX_RTE_ENUM_INDEX) ? \\\n+\t\telem_list[_temp_item] : -1); \\\n+\t})\n+\n+\n+#define CNV_TO_ACTION(action)                                   \\\n+\t({                                                          \\\n+\t\tint _temp_action = (action);                            \\\n+\t\t(_temp_action >= 0 && _temp_action <= MAX_RTE_ENUM_INDEX) ? \\\n+\t\taction_list[_temp_action] : -1; \\\n+\t})\n+\n+\n+static uint32_t flow_stat_id_map[MAX_COLOR_FLOW_STATS];\n+static rte_spinlock_t flow_lock = RTE_SPINLOCK_INITIALIZER;\n+\n+static int convert_error(struct rte_flow_error *error,\n+\t\t\t struct flow_error *flow_error)\n+{\n+\tif (error) {\n+\t\terror->cause = NULL;\n+\t\terror->message = flow_error->message;\n+\n+\t\tif (flow_error->type == FLOW_ERROR_NONE ||\n+\t\t\t\tflow_error->type == FLOW_ERROR_SUCCESS)\n+\t\t\terror->type = RTE_FLOW_ERROR_TYPE_NONE;\n+\n+\t\telse\n+\t\t\terror->type = RTE_FLOW_ERROR_TYPE_UNSPECIFIED;\n+\t}\n+\treturn 0;\n+}\n+\n+/*\n+ * Map Flow MARK to flow stat id\n+ */\n+static uint32_t create_flow_stat_id_locked(uint32_t mark)\n+{\n+\tuint32_t flow_stat_id = mark & (MAX_COLOR_FLOW_STATS - 1);\n+\n+\twhile (flow_stat_id_map[flow_stat_id])\n+\t\tflow_stat_id = (flow_stat_id + 1) & (MAX_COLOR_FLOW_STATS - 1);\n+\n+\tflow_stat_id_map[flow_stat_id] = mark + 1;\n+\treturn flow_stat_id;\n+}\n+\n+static uint32_t create_flow_stat_id(uint32_t mark)\n+{\n+\trte_spinlock_lock(&flow_lock);\n+\tuint32_t ret = create_flow_stat_id_locked(mark);\n+\n+\trte_spinlock_unlock(&flow_lock);\n+\treturn ret;\n+}\n+\n+static void delete_flow_stat_id_locked(uint32_t flow_stat_id)\n+{\n+\tif (flow_stat_id < MAX_COLOR_FLOW_STATS)\n+\t\tflow_stat_id_map[flow_stat_id] = 0;\n+}\n+\n+static void initialize_global_cnv_tables(void)\n+{\n+\tif (convert_tables_initialized)\n+\t\treturn;\n+\n+\tmemset(elem_list, -1, sizeof(elem_list));\n+\telem_list[RTE_FLOW_ITEM_TYPE_END] = FLOW_ELEM_TYPE_END;\n+\telem_list[RTE_FLOW_ITEM_TYPE_ANY] = FLOW_ELEM_TYPE_ANY;\n+\telem_list[RTE_FLOW_ITEM_TYPE_ETH] = FLOW_ELEM_TYPE_ETH;\n+\telem_list[RTE_FLOW_ITEM_TYPE_VLAN] = FLOW_ELEM_TYPE_VLAN;\n+\telem_list[RTE_FLOW_ITEM_TYPE_IPV4] = FLOW_ELEM_TYPE_IPV4;\n+\telem_list[RTE_FLOW_ITEM_TYPE_IPV6] = FLOW_ELEM_TYPE_IPV6;\n+\telem_list[RTE_FLOW_ITEM_TYPE_UDP] = FLOW_ELEM_TYPE_UDP;\n+\telem_list[RTE_FLOW_ITEM_TYPE_SCTP] = FLOW_ELEM_TYPE_SCTP;\n+\telem_list[RTE_FLOW_ITEM_TYPE_TCP] = FLOW_ELEM_TYPE_TCP;\n+\telem_list[RTE_FLOW_ITEM_TYPE_ICMP] = FLOW_ELEM_TYPE_ICMP;\n+\telem_list[RTE_FLOW_ITEM_TYPE_VXLAN] = FLOW_ELEM_TYPE_VXLAN;\n+\telem_list[RTE_FLOW_ITEM_TYPE_GTP] = FLOW_ELEM_TYPE_GTP;\n+\telem_list[RTE_FLOW_ITEM_TYPE_PORT_ID] = FLOW_ELEM_TYPE_PORT_ID;\n+\telem_list[RTE_FLOW_ITEM_TYPE_TAG] = FLOW_ELEM_TYPE_TAG;\n+\telem_list[RTE_FLOW_ITEM_TYPE_VOID] = FLOW_ELEM_TYPE_VOID;\n+\n+#ifdef RTE_FLOW_DEBUG\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_END] = \"FLOW_ELEM_TYPE_END\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_ANY] = \"FLOW_ELEM_TYPE_ANY\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_ETH] = \"FLOW_ELEM_TYPE_ETH\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_VLAN] = \"FLOW_ELEM_TYPE_VLAN\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_IPV4] = \"FLOW_ELEM_TYPE_IPV4\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_IPV6] = \"FLOW_ELEM_TYPE_IPV6\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_UDP] = \"FLOW_ELEM_TYPE_UDP\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_SCTP] = \"FLOW_ELEM_TYPE_SCTP\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_TCP] = \"FLOW_ELEM_TYPE_TCP\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_ICMP] = \"FLOW_ELEM_TYPE_ICMP\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_VXLAN] = \"FLOW_ELEM_TYPE_VXLAN\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_GTP] = \"FLOW_ELEM_TYPE_GTP\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_PORT_ID] = \"FLOW_ELEM_TYPE_PORT_ID\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_TAG] = \"FLOW_ELEM_TYPE_TAG\";\n+\telem_list_str[RTE_FLOW_ITEM_TYPE_VOID] = \"FLOW_ELEM_TYPE_VOID\";\n+#endif\n+\n+\tmemset(action_list, -1, sizeof(action_list));\n+\taction_list[RTE_FLOW_ACTION_TYPE_END] = FLOW_ACTION_TYPE_END;\n+\taction_list[RTE_FLOW_ACTION_TYPE_MARK] = FLOW_ACTION_TYPE_MARK;\n+\taction_list[RTE_FLOW_ACTION_TYPE_SET_TAG] = FLOW_ACTION_TYPE_SET_TAG;\n+\taction_list[RTE_FLOW_ACTION_TYPE_DROP] = FLOW_ACTION_TYPE_DROP;\n+\taction_list[RTE_FLOW_ACTION_TYPE_COUNT] = FLOW_ACTION_TYPE_COUNT;\n+\taction_list[RTE_FLOW_ACTION_TYPE_RSS] = FLOW_ACTION_TYPE_RSS;\n+\taction_list[RTE_FLOW_ACTION_TYPE_PORT_ID] = FLOW_ACTION_TYPE_PORT_ID;\n+\taction_list[RTE_FLOW_ACTION_TYPE_QUEUE] = FLOW_ACTION_TYPE_QUEUE;\n+\taction_list[RTE_FLOW_ACTION_TYPE_JUMP] = FLOW_ACTION_TYPE_JUMP;\n+\taction_list[RTE_FLOW_ACTION_TYPE_METER] = FLOW_ACTION_TYPE_METER;\n+\taction_list[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =\n+\t\tFLOW_ACTION_TYPE_VXLAN_ENCAP;\n+\taction_list[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =\n+\t\tFLOW_ACTION_TYPE_VXLAN_DECAP;\n+\taction_list[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =\n+\t\tFLOW_ACTION_TYPE_PUSH_VLAN;\n+\taction_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =\n+\t\tFLOW_ACTION_TYPE_SET_VLAN_VID;\n+\taction_list[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =\n+\t\tFLOW_ACTION_TYPE_SET_VLAN_PCP;\n+\taction_list[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =\n+\t\tFLOW_ACTION_TYPE_POP_VLAN;\n+\taction_list[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =\n+\t\tFLOW_ACTION_TYPE_RAW_ENCAP;\n+\taction_list[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =\n+\t\tFLOW_ACTION_TYPE_RAW_DECAP;\n+\taction_list[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =\n+\t\tFLOW_ACTION_TYPE_MODIFY_FIELD;\n+\n+#ifdef RTE_FLOW_DEBUG\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_END] = \"FLOW_ACTION_TYPE_END\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_MARK] = \"FLOW_ACTION_TYPE_MARK\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_SET_TAG] =\n+\t\t\"FLOW_ACTION_TYPE_SET_TAG\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_DROP] = \"FLOW_ACTION_TYPE_DROP\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_COUNT] = \"FLOW_ACTION_TYPE_COUNT\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_RSS] = \"FLOW_ACTION_TYPE_RSS\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_PORT_ID] =\n+\t\t\"FLOW_ACTION_TYPE_PORT_ID\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_QUEUE] = \"FLOW_ACTION_TYPE_QUEUE\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_JUMP] = \"FLOW_ACTION_TYPE_JUMP\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_METER] = \"FLOW_ACTION_TYPE_METER\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] =\n+\t\t\"FLOW_ACTION_TYPE_VXLAN_ENCAP\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] =\n+\t\t\"FLOW_ACTION_TYPE_VXLAN_DECAP\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] =\n+\t\t\"FLOW_ACTION_TYPE_PUSH_VLAN\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] =\n+\t\t\"FLOW_ACTION_TYPE_SET_VLAN_VID\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] =\n+\t\t\"FLOW_ACTION_TYPE_SET_VLAN_PCP\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] =\n+\t\t\"FLOW_ACTION_TYPE_POP_VLAN\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_RAW_ENCAP] =\n+\t\t\"FLOW_ACTION_TYPE_RAW_ENCAP\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_RAW_DECAP] =\n+\t\t\"FLOW_ACTION_TYPE_RAW_DECAP\";\n+\taction_list_str[RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] =\n+\t\t\"FLOW_ACTION_TYPE_MODIFY_FIELD\";\n+#endif\n+\n+\tconvert_tables_initialized = 1;\n+}\n+\n+static int interpret_raw_data(uint8_t *data, uint8_t *preserve, int size,\n+\t\t\t      struct flow_elem *out)\n+{\n+\tint hdri = 0;\n+\tint pkti = 0;\n+\n+\t/* Ethernet */\n+\tif (size - pkti == 0)\n+\t\tgoto interpret_end;\n+\tif (size - pkti < (int)sizeof(struct rte_ether_hdr))\n+\t\treturn -1;\n+\n+\tout[hdri].type = FLOW_ELEM_TYPE_ETH;\n+\tout[hdri].spec = &data[pkti];\n+\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;\n+\n+\trte_be16_t ether_type =\n+\t\t((struct rte_ether_hdr *)&data[pkti])->ether_type;\n+\n+\thdri += 1;\n+\tpkti += sizeof(struct rte_ether_hdr);\n+\n+\tif (size - pkti == 0)\n+\t\tgoto interpret_end;\n+\n+\t/* VLAN */\n+\twhile (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||\n+\t\t\tether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ) ||\n+\t\t\tether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ1)) {\n+\t\tif (size - pkti == 0)\n+\t\t\tgoto interpret_end;\n+\t\tif (size - pkti < (int)sizeof(struct rte_vlan_hdr))\n+\t\t\treturn -1;\n+\n+\t\tout[hdri].type = FLOW_ELEM_TYPE_VLAN;\n+\t\tout[hdri].spec = &data[pkti];\n+\t\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;\n+\n+\t\tether_type = ((struct rte_vlan_hdr *)&data[pkti])->eth_proto;\n+\n+\t\thdri += 1;\n+\t\tpkti += sizeof(struct rte_vlan_hdr);\n+\t}\n+\n+\tif (size - pkti == 0)\n+\t\tgoto interpret_end;\n+\n+\t/* Layer 3 */\n+\tuint8_t next_header = 0;\n+\n+\tif (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) &&\n+\t\t\t(data[pkti] & 0xF0) == 0x40) {\n+\t\tif (size - pkti < (int)sizeof(struct rte_ipv4_hdr))\n+\t\t\treturn -1;\n+\n+\t\tout[hdri].type = FLOW_ELEM_TYPE_IPV4;\n+\t\tout[hdri].spec = &data[pkti];\n+\t\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;\n+\n+\t\tnext_header = data[pkti + 9];\n+\n+\t\thdri += 1;\n+\t\tpkti += sizeof(struct rte_ipv4_hdr);\n+\t} else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) &&\n+\t\t\t(data[pkti] & 0xF0) == 0x60) {\n+\t\tif (size - pkti < (int)sizeof(struct rte_ipv6_hdr))\n+\t\t\treturn -1;\n+\n+\t\tout[hdri].type = FLOW_ELEM_TYPE_IPV6;\n+\t\tout[hdri].spec = &data[pkti];\n+\t\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;\n+\n+\t\tnext_header = data[pkti + 6];\n+\n+\t\thdri += 1;\n+\t\tpkti += sizeof(struct rte_ipv6_hdr);\n+\n+\t} else {\n+\t\treturn -1;\n+\t}\n+\n+\tif (size - pkti == 0)\n+\t\tgoto interpret_end;\n+\n+\t/* Layer 4 */\n+\tint gtpu_encap = 0;\n+\n+\tif (next_header == 1) { /* ICMP */\n+\t\tif (size - pkti < (int)sizeof(struct rte_icmp_hdr))\n+\t\t\treturn -1;\n+\n+\t\tout[hdri].type = FLOW_ELEM_TYPE_ICMP;\n+\t\tout[hdri].spec = &data[pkti];\n+\t\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;\n+\n+\t\thdri += 1;\n+\t\tpkti += sizeof(struct rte_icmp_hdr);\n+\t} else if (next_header == 6) { /* TCP */\n+\t\tif (size - pkti < (int)sizeof(struct rte_tcp_hdr))\n+\t\t\treturn -1;\n+\n+\t\tout[hdri].type = FLOW_ELEM_TYPE_TCP;\n+\t\tout[hdri].spec = &data[pkti];\n+\t\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;\n+\n+\t\thdri += 1;\n+\t\tpkti += sizeof(struct rte_tcp_hdr);\n+\t} else if (next_header == 17) { /* UDP */\n+\t\tif (size - pkti < (int)sizeof(struct rte_udp_hdr))\n+\t\t\treturn -1;\n+\n+\t\tout[hdri].type = FLOW_ELEM_TYPE_UDP;\n+\t\tout[hdri].spec = &data[pkti];\n+\t\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;\n+\n+\t\tgtpu_encap = ((struct rte_udp_hdr *)&data[pkti])->dst_port ==\n+\t\t\t     rte_cpu_to_be_16(RTE_GTPU_UDP_PORT);\n+\n+\t\thdri += 1;\n+\t\tpkti += sizeof(struct rte_udp_hdr);\n+\t} else if (next_header == 132) { /* SCTP */\n+\t\tif (size - pkti < (int)sizeof(struct rte_sctp_hdr))\n+\t\t\treturn -1;\n+\n+\t\tout[hdri].type = FLOW_ELEM_TYPE_SCTP;\n+\t\tout[hdri].spec = &data[pkti];\n+\t\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;\n+\n+\t\thdri += 1;\n+\t\tpkti += sizeof(struct rte_sctp_hdr);\n+\t} else {\n+\t\treturn -1;\n+\t}\n+\n+\tif (size - pkti == 0)\n+\t\tgoto interpret_end;\n+\n+\t/* GTPv1-U */\n+\tif (gtpu_encap) {\n+\t\tif (size - pkti < (int)sizeof(struct rte_gtp_hdr))\n+\t\t\treturn -1;\n+\n+\t\tout[hdri].type = FLOW_ELEM_TYPE_GTP;\n+\t\tout[hdri].spec = &data[pkti];\n+\t\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] : NULL;\n+\n+\t\tint extension_present_bit =\n+\t\t\t((struct rte_gtp_hdr *)&data[pkti])->e;\n+\n+\t\thdri += 1;\n+\t\tpkti += sizeof(struct rte_gtp_hdr);\n+\n+\t\tif (extension_present_bit) {\n+\t\t\tif (size - pkti <\n+\t\t\t\t\t(int)sizeof(struct rte_gtp_hdr_ext_word))\n+\t\t\t\treturn -1;\n+\n+\t\t\tout[hdri].type = FLOW_ELEM_TYPE_GTP;\n+\t\t\tout[hdri].spec = &data[pkti];\n+\t\t\tout[hdri].mask = (preserve != NULL) ? &preserve[pkti] :\n+\t\t\t\t\t NULL;\n+\n+\t\t\tuint8_t next_ext =\n+\t\t\t\t((struct rte_gtp_hdr_ext_word *)&data[pkti])\n+\t\t\t\t->next_ext;\n+\n+\t\t\thdri += 1;\n+\t\t\tpkti += sizeof(struct rte_gtp_hdr_ext_word);\n+\n+\t\t\twhile (next_ext) {\n+\t\t\t\tsize_t ext_len = data[pkti] * 4;\n+\n+\t\t\t\tif (size - pkti < (int)ext_len)\n+\t\t\t\t\treturn -1;\n+\n+\t\t\t\tout[hdri].type = FLOW_ELEM_TYPE_GTP;\n+\t\t\t\tout[hdri].spec = &data[pkti];\n+\t\t\t\tout[hdri].mask = (preserve != NULL) ?\n+\t\t\t\t\t\t &preserve[pkti] :\n+\t\t\t\t\t\t NULL;\n+\n+\t\t\t\tnext_ext = data[pkti + ext_len - 1];\n+\n+\t\t\t\thdri += 1;\n+\t\t\t\tpkti += ext_len;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (size - pkti != 0)\n+\t\treturn -1;\n+\n+interpret_end:\n+\tout[hdri].type = FLOW_ELEM_TYPE_END;\n+\tout[hdri].spec = NULL;\n+\tout[hdri].mask = NULL;\n+\n+\treturn hdri + 1;\n+}\n+\n+static int create_attr(struct cnv_attr_s *attribute,\n+\t\t       const struct rte_flow_attr *attr)\n+{\n+\tmemset(&attribute->attr, 0x0, sizeof(struct flow_attr));\n+\tif (attr) {\n+\t\tattribute->attr.group = attr->group;\n+\t\tattribute->attr.priority = attr->priority;\n+\t}\n+\treturn 0;\n+}\n+\n+static int create_match_elements(struct cnv_match_s *match,\n+\t\t\t\t const struct rte_flow_item items[],\n+\t\t\t\t int max_elem)\n+{\n+\tint eidx = 0;\n+\tint iter_idx = 0;\n+\tint type = -1;\n+\n+\tif (!items) {\n+\t\tNT_LOG(ERR, FILTER, \"ERROR no items to iterate!\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (!convert_tables_initialized)\n+\t\tinitialize_global_cnv_tables();\n+\n+\tdo {\n+\t\ttype = CNV_TO_ELEM(items[iter_idx].type);\n+\t\tif (type < 0) {\n+\t\t\tif ((int)items[iter_idx].type ==\n+\t\t\t\t\tNT_RTE_FLOW_ITEM_TYPE_TUNNEL) {\n+\t\t\t\ttype = FLOW_ELEM_TYPE_TUNNEL;\n+\t\t\t} else {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR unknown item type received!\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\n+\t\tif (type >= 0) {\n+\t\t\tif (items[iter_idx].last) {\n+\t\t\t\t/* Ranges are not supported yet */\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR ITEM-RANGE SETUP - NOT SUPPORTED!\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\n+\t\t\tif (eidx == max_elem) {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR TOO MANY ELEMENTS ENCOUNTERED!\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\tNT_LOG(INF, FILTER,\n+\t\t\t       \"RTE ITEM -> FILTER FLOW ELEM - %i -> %i - %s\\n\",\n+\t\t\t       items[iter_idx].type, type,\n+\t\t\t       ((int)items[iter_idx].type >= 0) ?\n+\t\t\t       elem_list_str[items[iter_idx].type] :\n+\t\t\t       \"FLOW_ELEM_TYPE_TUNNEL\");\n+\n+\t\t\tswitch (type) {\n+\t\t\tcase FLOW_ELEM_TYPE_ETH:\n+\t\t\t\tif (items[iter_idx].spec) {\n+\t\t\t\t\tconst struct flow_elem_eth *eth =\n+\t\t\t\t\t\t\titems[iter_idx].spec;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_ETH SPEC: dst=%02X:%02X:%02X:%02X:%02X:%02X\\n\",\n+\t\t\t\t\t       eth->d_addr.addr_b[0] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[1] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[2] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[3] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[4] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[5] & 0xFF);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                         src=%02X:%02X:%02X:%02X:%02X:%02X\\n\",\n+\t\t\t\t\t       eth->s_addr.addr_b[0] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[1] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[2] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[3] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[4] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[5] & 0xFF);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                         type=%04x\\n\",\n+\t\t\t\t\t       htons(eth->ether_type));\n+\t\t\t\t}\n+\t\t\t\tif (items[iter_idx].mask) {\n+\t\t\t\t\tconst struct flow_elem_eth *eth =\n+\t\t\t\t\t\t\titems[iter_idx].mask;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_ETH MASK: dst=%02X:%02X:%02X:%02X:%02X:%02X\\n\",\n+\t\t\t\t\t       eth->d_addr.addr_b[0] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[1] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[2] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[3] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[4] & 0xFF,\n+\t\t\t\t\t       eth->d_addr.addr_b[5] & 0xFF);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                         src=%02X:%02X:%02X:%02X:%02X:%02X\\n\",\n+\t\t\t\t\t       eth->s_addr.addr_b[0] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[1] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[2] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[3] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[4] & 0xFF,\n+\t\t\t\t\t       eth->s_addr.addr_b[5] & 0xFF);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                         type=%04x\\n\",\n+\t\t\t\t\t       htons(eth->ether_type));\n+\t\t\t\t}\n+\t\t\t\tbreak;\n+\t\t\tcase FLOW_ELEM_TYPE_VLAN:\n+\t\t\t\tif (items[iter_idx].spec) {\n+\t\t\t\t\tconst struct flow_elem_vlan *vlan =\n+\t\t\t\t\t\t(const struct flow_elem_vlan *)\n+\t\t\t\t\t\titems[iter_idx]\n+\t\t\t\t\t\t.spec;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_VLAN SPEC: tci=%04x\\n\",\n+\t\t\t\t\t       htons(vlan->tci));\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          inner type=%04x\\n\",\n+\t\t\t\t\t       htons(vlan->inner_type));\n+\t\t\t\t}\n+\t\t\t\tif (items[iter_idx].mask) {\n+\t\t\t\t\tconst struct flow_elem_vlan *vlan =\n+\t\t\t\t\t\t(const struct flow_elem_vlan *)\n+\t\t\t\t\t\titems[iter_idx]\n+\t\t\t\t\t\t.mask;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_VLAN MASK: tci=%04x\\n\",\n+\t\t\t\t\t       htons(vlan->tci));\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          inner type=%04x\\n\",\n+\t\t\t\t\t       htons(vlan->inner_type));\n+\t\t\t\t}\n+\t\t\t\tbreak;\n+\t\t\tcase FLOW_ELEM_TYPE_IPV4:\n+\t\t\t\tif (items[iter_idx].spec) {\n+\t\t\t\t\tconst struct flow_elem_ipv4 *ip =\n+\t\t\t\t\t\t\titems[iter_idx].spec;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_IPV4 SPEC: dst=%d.%d.%d.%d\\n\",\n+\t\t\t\t\t       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          src=%d.%d.%d.%d\\n\",\n+\t\t\t\t\t       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          fragment_offset=%u\\n\",\n+\t\t\t\t\t       ip->hdr.frag_offset);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          next_proto_id=%u\\n\",\n+\t\t\t\t\t       ip->hdr.next_proto_id);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          packet_id=%u\\n\",\n+\t\t\t\t\t       ip->hdr.id);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          time_to_live=%u\\n\",\n+\t\t\t\t\t       ip->hdr.ttl);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          type_of_service=%u\\n\",\n+\t\t\t\t\t       ip->hdr.tos);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          version_ihl=%u\\n\",\n+\t\t\t\t\t       ip->hdr.version_ihl);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          total_length=%u\\n\",\n+\t\t\t\t\t       ip->hdr.length);\n+\t\t\t\t}\n+\t\t\t\tif (items[iter_idx].mask) {\n+\t\t\t\t\tconst struct flow_elem_ipv4 *ip =\n+\t\t\t\t\t\t\titems[iter_idx].mask;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_IPV4 MASK: dst=%d.%d.%d.%d\\n\",\n+\t\t\t\t\t       ((const char *)&ip->hdr.dst_ip)[0] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.dst_ip)[1] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.dst_ip)[2] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.dst_ip)[3] & 0xFF);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          src=%d.%d.%d.%d\\n\",\n+\t\t\t\t\t       ((const char *)&ip->hdr.src_ip)[0] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.src_ip)[1] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.src_ip)[2] & 0xFF,\n+\t\t\t\t\t       ((const char *)&ip->hdr.src_ip)[3] & 0xFF);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          fragment_offset=%x\\n\",\n+\t\t\t\t\t       ip->hdr.frag_offset);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          next_proto_id=%x\\n\",\n+\t\t\t\t\t       ip->hdr.next_proto_id);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          packet_id=%x\\n\",\n+\t\t\t\t\t       ip->hdr.id);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          time_to_live=%x\\n\",\n+\t\t\t\t\t       ip->hdr.ttl);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          type_of_service=%x\\n\",\n+\t\t\t\t\t       ip->hdr.tos);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          version_ihl=%x\\n\",\n+\t\t\t\t\t       ip->hdr.version_ihl);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          total_length=%x\\n\",\n+\t\t\t\t\t       ip->hdr.length);\n+\t\t\t\t}\n+\t\t\t\tbreak;\n+\t\t\tcase FLOW_ELEM_TYPE_UDP:\n+\t\t\t\tif (items[iter_idx].spec) {\n+\t\t\t\t\tconst struct flow_elem_udp *udp =\n+\t\t\t\t\t\t(const struct flow_elem_udp *)\n+\t\t\t\t\t\titems[iter_idx]\n+\t\t\t\t\t\t.spec;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_UDP SPEC: src port=%04x\\n\",\n+\t\t\t\t\t       htons(udp->hdr.src_port));\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                         dst port=%04x\\n\",\n+\t\t\t\t\t       htons(udp->hdr.dst_port));\n+\t\t\t\t}\n+\t\t\t\tif (items[iter_idx].mask) {\n+\t\t\t\t\tconst struct flow_elem_udp *udp =\n+\t\t\t\t\t\t(const struct flow_elem_udp *)\n+\t\t\t\t\t\titems[iter_idx]\n+\t\t\t\t\t\t.mask;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_UDP MASK: src port=%04x\\n\",\n+\t\t\t\t\t       htons(udp->hdr.src_port));\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                         dst port=%04x\\n\",\n+\t\t\t\t\t       htons(udp->hdr.dst_port));\n+\t\t\t\t}\n+\t\t\t\tbreak;\n+\t\t\tcase FLOW_ELEM_TYPE_TAG:\n+\t\t\t\tif (items[iter_idx].spec) {\n+\t\t\t\t\tconst struct flow_elem_tag *tag =\n+\t\t\t\t\t\t(const struct flow_elem_tag *)\n+\t\t\t\t\t\titems[iter_idx]\n+\t\t\t\t\t\t.spec;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_TAG SPEC: data=%u\\n\",\n+\t\t\t\t\t       tag->data);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                         index=%u\\n\",\n+\t\t\t\t\t       tag->index);\n+\t\t\t\t}\n+\t\t\t\tif (items[iter_idx].mask) {\n+\t\t\t\t\tconst struct flow_elem_tag *tag =\n+\t\t\t\t\t\t(const struct flow_elem_tag *)\n+\t\t\t\t\t\titems[iter_idx]\n+\t\t\t\t\t\t.mask;\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ELEM_TYPE_TAG MASK: data=%u\\n\",\n+\t\t\t\t\t       tag->data);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                         index=%u\\n\",\n+\t\t\t\t\t       tag->index);\n+\t\t\t\t}\n+\t\t\t\tbreak;\n+\t\t\tcase FLOW_ELEM_TYPE_VXLAN: {\n+\t\t\t\tconst struct flow_elem_vxlan *vxlan =\n+\t\t\t\t\t(const struct flow_elem_vxlan *)\n+\t\t\t\t\titems[iter_idx]\n+\t\t\t\t\t.spec;\n+\t\t\t\tconst struct flow_elem_vxlan *mask =\n+\t\t\t\t\t(const struct flow_elem_vxlan *)\n+\t\t\t\t\titems[iter_idx]\n+\t\t\t\t\t.mask;\n+\n+\t\t\t\tuint32_t vni =\n+\t\t\t\t\t(uint32_t)(((uint32_t)vxlan->vni[0]\n+\t\t\t\t\t\t    << 16) |\n+\t\t\t\t\t\t   ((uint32_t)vxlan->vni[1]\n+\t\t\t\t\t\t    << 8) |\n+\t\t\t\t\t\t   ((uint32_t)vxlan->vni[2]));\n+\t\t\t\tuint32_t vni_mask =\n+\t\t\t\t\t(uint32_t)(((uint32_t)mask->vni[0]\n+\t\t\t\t\t\t    << 16) |\n+\t\t\t\t\t\t   ((uint32_t)mask->vni[1]\n+\t\t\t\t\t\t    << 8) |\n+\t\t\t\t\t\t   ((uint32_t)mask->vni[2]));\n+\n+\t\t\t\tNT_LOG(INF, FILTER, \"VNI: %08x / %08x\\n\", vni,\n+\t\t\t\t       vni_mask);\n+\t\t\t}\n+\t\t\tbreak;\n+\t\t\t}\n+#endif\n+\n+\t\t\tmatch->flow_elem[eidx].type = type;\n+\t\t\tmatch->flow_elem[eidx].spec = items[iter_idx].spec;\n+\t\t\tmatch->flow_elem[eidx].mask = items[iter_idx].mask;\n+\n+\t\t\teidx++;\n+\t\t\titer_idx++;\n+\t\t}\n+\n+\t} while (type >= 0 && type != FLOW_ELEM_TYPE_END);\n+\n+\treturn (type >= 0) ? 0 : -1;\n+}\n+\n+static int\n+create_action_elements_vswitch(struct cnv_action_s *action,\n+\t\t\t       const struct rte_flow_action actions[],\n+\t\t\t       int max_elem, uint32_t *flow_stat_id)\n+{\n+\tint aidx = 0;\n+\tint iter_idx = 0;\n+\tint type = -1;\n+\n+\tif (!actions)\n+\t\treturn -1;\n+\n+\tif (!convert_tables_initialized)\n+\t\tinitialize_global_cnv_tables();\n+\n+\t*flow_stat_id = MAX_COLOR_FLOW_STATS;\n+\tdo {\n+\t\ttype = CNV_TO_ACTION(actions[iter_idx].type);\n+\t\tif (type < 0) {\n+\t\t\tif ((int)actions[iter_idx].type ==\n+\t\t\t\t\tNT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET) {\n+\t\t\t\ttype = FLOW_ACTION_TYPE_TUNNEL_SET;\n+\t\t\t} else {\n+\t\t\t\tNT_LOG(ERR, FILTER,\n+\t\t\t\t       \"ERROR unknown action type received!\\n\");\n+\t\t\t\treturn -1;\n+\t\t\t}\n+\t\t}\n+\n+#ifdef RTE_FLOW_DEBUG\n+\t\tNT_LOG(INF, FILTER,\n+\t\t       \"RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\\n\",\n+\t\t       actions[iter_idx].type, type,\n+\t\t       ((int)actions[iter_idx].type >= 0) ?\n+\t\t       action_list_str[actions[iter_idx].type] :\n+\t\t       \"FLOW_ACTION_TYPE_TUNNEL_SET\");\n+#endif\n+\n+\t\tif (type >= 0) {\n+\t\t\taction->flow_actions[aidx].type = type;\n+\n+\t\t\t/*\n+\t\t\t * Non-compatible actions handled here\n+\t\t\t */\n+\t\t\tswitch (type) {\n+\t\t\tcase -1:\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\t\tNT_LOG(INF, FILTER,\n+\t\t\t\t       \"RTE ACTION UNSUPPORTED %i\\n\",\n+\t\t\t\t       actions[iter_idx].type);\n+#endif\n+\t\t\t\treturn -1;\n+\n+\t\t\tcase FLOW_ACTION_TYPE_RSS: {\n+\t\t\t\tconst struct rte_flow_action_rss *rss =\n+\t\t\t\t\t(const struct rte_flow_action_rss *)\n+\t\t\t\t\tactions[iter_idx]\n+\t\t\t\t\t.conf;\n+\t\t\t\taction->flow_rss.func =\n+\t\t\t\t\tFLOW_HASH_FUNCTION_DEFAULT;\n+\n+\t\t\t\tif (rss->func !=\n+\t\t\t\t\t\tRTE_ETH_HASH_FUNCTION_DEFAULT)\n+\t\t\t\t\treturn -1;\n+\t\t\t\taction->flow_rss.level = rss->level;\n+\t\t\t\taction->flow_rss.types = rss->types;\n+\t\t\t\taction->flow_rss.key_len = rss->key_len;\n+\t\t\t\taction->flow_rss.queue_num = rss->queue_num;\n+\t\t\t\taction->flow_rss.key = rss->key;\n+\t\t\t\taction->flow_rss.queue = rss->queue;\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"FLOW_ACTION_TYPE_RSS: rss->level = %u\\n\",\n+\t\t\t\t       rss->level);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"                      rss->types = 0x%\" PRIX64 \"\\n\",\n+\t\t\t\t       (unsigned long long)rss->types);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"                      rss->key_len = %u\\n\",\n+\t\t\t\t       rss->key_len);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"                      rss->queue_num = %u\\n\",\n+\t\t\t\t       rss->queue_num);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"                      rss->key = %p\\n\",\n+\t\t\t\t       rss->key);\n+\t\t\t\tunsigned int i;\n+\n+\t\t\t\tfor (i = 0; i < rss->queue_num; i++) {\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                      rss->queue[%u] = %u\\n\",\n+\t\t\t\t\t       i, rss->queue[i]);\n+\t\t\t\t}\n+#endif\n+\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\t&action->flow_rss;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\tcase FLOW_ACTION_TYPE_VXLAN_ENCAP: {\n+\t\t\t\tconst struct rte_flow_action_vxlan_encap *tun =\n+\t\t\t\t\t(const struct rte_flow_action_vxlan_encap\n+\t\t\t\t\t *)actions[iter_idx]\n+\t\t\t\t\t.conf;\n+\t\t\t\tif (!tun || create_match_elements(&action->tun_def.match,\n+\t\t\t\t\t\t\t\t  tun->definition,\n+\t\t\t\t\t\t\t\t  MAX_ELEMENTS) != 0)\n+\t\t\t\t\treturn -1;\n+\t\t\t\taction->tun_def.tun_definition =\n+\t\t\t\t\taction->tun_def.match.flow_elem;\n+\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\t&action->tun_def;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\t\tcase FLOW_ACTION_TYPE_MARK: {\n+\t\t\t\tconst struct rte_flow_action_mark *mark_id =\n+\t\t\t\t\t(const struct rte_flow_action_mark *)\n+\t\t\t\t\tactions[iter_idx]\n+\t\t\t\t\t.conf;\n+\t\t\t\tif (mark_id) {\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\t\t\tNT_LOG(DBG, FILTER, \"Mark ID=%u\\n\",\n+\t\t\t\t\t       mark_id->id);\n+#endif\n+\t\t\t\t\t*flow_stat_id = create_flow_stat_id(mark_id->id);\n+\t\t\t\t\taction->mark.id = *flow_stat_id;\n+\t\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\t\t&action->mark;\n+\n+\t\t\t\t} else {\n+\t\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\t\tactions[iter_idx].conf;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\t\tdefault:\n+\t\t\t\t/* Compatible */\n+\n+\t\t\t\t/*\n+\t\t\t\t * OVS Full offload does not add mark in RTE Flow\n+\t\t\t\t * We need one in FPGA to control flow(color) statistics\n+\t\t\t\t */\n+\t\t\t\tif (type == FLOW_ACTION_TYPE_END &&\n+\t\t\t\t\t\t*flow_stat_id == MAX_COLOR_FLOW_STATS) {\n+\t\t\t\t\t/* We need to insert a mark for our FPGA */\n+\t\t\t\t\t*flow_stat_id = create_flow_stat_id(0);\n+\t\t\t\t\taction->mark.id = *flow_stat_id;\n+\n+\t\t\t\t\taction->flow_actions[aidx].type =\n+\t\t\t\t\t\tFLOW_ACTION_TYPE_MARK;\n+\t\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\t\t&action->mark;\n+\t\t\t\t\taidx++;\n+\n+\t\t\t\t\t/* Move end type */\n+\t\t\t\t\taction->flow_actions[aidx].type =\n+\t\t\t\t\t\tFLOW_ACTION_TYPE_END;\n+\t\t\t\t}\n+\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\t\tswitch (type) {\n+\t\t\t\tcase FLOW_ACTION_TYPE_PORT_ID:\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"Port ID=%u, Original=%u\\n\",\n+\t\t\t\t\t       ((const struct rte_flow_action_port_id\n+\t\t\t\t\t\t *)actions[iter_idx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->id,\n+\t\t\t\t\t       ((const struct rte_flow_action_port_id\n+\t\t\t\t\t\t *)actions[iter_idx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->original);\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase FLOW_ACTION_TYPE_COUNT:\n+\t\t\t\t\tNT_LOG(DBG, FILTER, \"Count ID=%u\\n\",\n+\t\t\t\t\t       ((const struct rte_flow_action_count\n+\t\t\t\t\t\t *)actions[iter_idx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->id);\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase FLOW_ACTION_TYPE_SET_TAG:\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ACTION_TYPE_SET_TAG: data=%u\\n\",\n+\t\t\t\t\t       ((const struct flow_action_tag *)\n+\t\t\t\t\t\tactions[iter_idx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->data);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          mask=%u\\n\",\n+\t\t\t\t\t       ((const struct flow_action_tag *)\n+\t\t\t\t\t\tactions[iter_idx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->mask);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          index=%u\\n\",\n+\t\t\t\t\t       ((const struct flow_action_tag *)\n+\t\t\t\t\t\tactions[iter_idx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->index);\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+#endif\n+\n+\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\tactions[iter_idx].conf;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\taidx++;\n+\t\t\tif (aidx == max_elem)\n+\t\t\t\treturn -1;\n+\t\t\titer_idx++;\n+\t\t}\n+\n+\t} while (type >= 0 && type != FLOW_ACTION_TYPE_END);\n+\n+\treturn (type >= 0) ? 0 : -1;\n+}\n+\n+static int create_action_elements_inline(struct cnv_action_s *action,\n+\t\tconst struct rte_flow_action actions[],\n+\t\tint max_elem, uint32_t queue_offset)\n+{\n+\tint aidx = 0;\n+\tint type = -1;\n+\n+\tdo {\n+\t\ttype = CNV_TO_ACTION(actions[aidx].type);\n+\n+#ifdef RTE_FLOW_DEBUG\n+\t\tNT_LOG(INF, FILTER,\n+\t\t       \"RTE ACTION -> FILTER FLOW ACTION - %i -> %i - %s\\n\",\n+\t\t       actions[aidx].type, type,\n+\t\t       ((int)actions[aidx].type >= 0) ?\n+\t\t       action_list_str[actions[aidx].type] :\n+\t\t       \"FLOW_ACTION_TYPE_TUNNEL_SET\");\n+#endif\n+\n+\t\tif (type >= 0) {\n+\t\t\taction->flow_actions[aidx].type = type;\n+\n+\t\t\t/*\n+\t\t\t * Non-compatible actions handled here\n+\t\t\t */\n+\t\t\tswitch (type) {\n+\t\t\tcase FLOW_ACTION_TYPE_RSS: {\n+\t\t\t\tconst struct rte_flow_action_rss *rss =\n+\t\t\t\t\t(const struct rte_flow_action_rss *)\n+\t\t\t\t\tactions[aidx]\n+\t\t\t\t\t.conf;\n+\t\t\t\taction->flow_rss.func =\n+\t\t\t\t\tFLOW_HASH_FUNCTION_DEFAULT;\n+\n+\t\t\t\tif (rss->func !=\n+\t\t\t\t\t\tRTE_ETH_HASH_FUNCTION_DEFAULT)\n+\t\t\t\t\treturn -1;\n+\t\t\t\taction->flow_rss.level = rss->level;\n+\t\t\t\taction->flow_rss.types = rss->types;\n+\t\t\t\taction->flow_rss.key_len = rss->key_len;\n+\t\t\t\taction->flow_rss.queue_num = rss->queue_num;\n+\t\t\t\taction->flow_rss.key = rss->key;\n+\t\t\t\taction->flow_rss.queue = rss->queue;\n+\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\t&action->flow_rss;\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"FLOW_ACTION_TYPE_RSS: rss->level = %u\\n\",\n+\t\t\t\t       rss->level);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"                      rss->types = 0x%\" PRIX64 \"\\n\",\n+\t\t\t\t       (unsigned long long)rss->types);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"                      rss->key_len = %u\\n\",\n+\t\t\t\t       rss->key_len);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"                      rss->queue_num = %u\\n\",\n+\t\t\t\t       rss->queue_num);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"                      rss->key = %p\\n\",\n+\t\t\t\t       rss->key);\n+\t\t\t\tunsigned int i;\n+\n+\t\t\t\tfor (i = 0; i < rss->queue_num; i++) {\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                      rss->queue[%u] = %u\\n\",\n+\t\t\t\t\t       i, rss->queue[i]);\n+\t\t\t\t}\n+#endif\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\t\tcase FLOW_ACTION_TYPE_RAW_DECAP: {\n+\t\t\t\tconst struct rte_flow_action_raw_decap *decap =\n+\t\t\t\t\t(const struct rte_flow_action_raw_decap\n+\t\t\t\t\t *)actions[aidx]\n+\t\t\t\t\t.conf;\n+\t\t\t\tint item_count = interpret_raw_data(decap->data,\n+\t\t\t\t\t\t\t\t    NULL, decap->size,\n+\t\t\t\t\t\t\t\t    action->decap.items);\n+\t\t\t\tif (item_count < 0)\n+\t\t\t\t\treturn item_count;\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"FLOW_ACTION_TYPE_RAW_DECAP: size = %u\\n\",\n+\t\t\t\t       decap->size);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"FLOW_ACTION_TYPE_RAW_DECAP: item_count = %u\\n\",\n+\t\t\t\t       item_count);\n+\t\t\t\tfor (int i = 0; i < item_count; i++) {\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ACTION_TYPE_RAW_DECAP: item = %u\\n\",\n+\t\t\t\t\t       action->decap.items[i].type);\n+\t\t\t\t}\n+#endif\n+\t\t\t\taction->decap.data = decap->data;\n+\t\t\t\taction->decap.size = decap->size;\n+\t\t\t\taction->decap.item_count = item_count;\n+\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\t&action->decap;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\t\tcase FLOW_ACTION_TYPE_RAW_ENCAP: {\n+\t\t\t\tconst struct rte_flow_action_raw_encap *encap =\n+\t\t\t\t\t(const struct rte_flow_action_raw_encap\n+\t\t\t\t\t *)actions[aidx]\n+\t\t\t\t\t.conf;\n+\t\t\t\tint item_count = interpret_raw_data(encap->data,\n+\t\t\t\t\t\t\t\t    encap->preserve,\n+\t\t\t\t\t\t\t\t    encap->size,\n+\t\t\t\t\t\t\t\t    action->encap.items);\n+\t\t\t\tif (item_count < 0)\n+\t\t\t\t\treturn item_count;\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"FLOW_ACTION_TYPE_RAW_ENCAP: size = %u\\n\",\n+\t\t\t\t       encap->size);\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"FLOW_ACTION_TYPE_RAW_ENCAP: item_count = %u\\n\",\n+\t\t\t\t       item_count);\n+#endif\n+\t\t\t\taction->encap.data = encap->data;\n+\t\t\t\taction->encap.preserve = encap->preserve;\n+\t\t\t\taction->encap.size = encap->size;\n+\t\t\t\taction->encap.item_count = item_count;\n+\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\t&action->encap;\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\t\tcase FLOW_ACTION_TYPE_QUEUE: {\n+\t\t\t\tconst struct rte_flow_action_queue *queue =\n+\t\t\t\t\t(const struct rte_flow_action_queue *)\n+\t\t\t\t\tactions[aidx]\n+\t\t\t\t\t.conf;\n+\t\t\t\taction->queue.index =\n+\t\t\t\t\tqueue->index + queue_offset;\n+\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\t&action->queue;\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t       \"FLOW_ACTION_TYPE_QUEUE: queue = %u\\n\",\n+\t\t\t\t       action->queue.index);\n+#endif\n+\t\t\t}\n+\t\t\tbreak;\n+\n+\t\t\tdefault: {\n+\t\t\t\taction->flow_actions[aidx].conf =\n+\t\t\t\t\tactions[aidx].conf;\n+\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\t\tswitch (type) {\n+\t\t\t\tcase FLOW_ACTION_TYPE_PORT_ID:\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"Port ID=%u, Original=%u\\n\",\n+\t\t\t\t\t       ((const struct rte_flow_action_port_id\n+\t\t\t\t\t\t *)actions[aidx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->id,\n+\t\t\t\t\t       ((const struct rte_flow_action_port_id\n+\t\t\t\t\t\t *)actions[aidx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->original);\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase FLOW_ACTION_TYPE_COUNT:\n+\t\t\t\t\tNT_LOG(DBG, FILTER, \"Count ID=%u\\n\",\n+\t\t\t\t\t       ((const struct rte_flow_action_count\n+\t\t\t\t\t\t *)actions[aidx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->id);\n+\t\t\t\t\tbreak;\n+\t\t\t\tcase FLOW_ACTION_TYPE_SET_TAG:\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"FLOW_ACTION_TYPE_SET_TAG: data=%u\\n\",\n+\t\t\t\t\t       ((const struct flow_action_tag *)\n+\t\t\t\t\t\tactions[aidx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->data);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          mask=%u\\n\",\n+\t\t\t\t\t       ((const struct flow_action_tag *)\n+\t\t\t\t\t\tactions[aidx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->mask);\n+\t\t\t\t\tNT_LOG(DBG, FILTER,\n+\t\t\t\t\t       \"                          index=%u\\n\",\n+\t\t\t\t\t       ((const struct flow_action_tag *)\n+\t\t\t\t\t\tactions[aidx]\n+\t\t\t\t\t\t.conf)\n+\t\t\t\t\t       ->index);\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+#endif\n+\t\t\t}\n+\t\t\tbreak;\n+\t\t\t}\n+\n+\t\t\taidx++;\n+\t\t\tif (aidx == max_elem)\n+\t\t\t\treturn -1;\n+\t\t}\n+\n+\t} while (type >= 0 && type != FLOW_ACTION_TYPE_END);\n+\n+\treturn (type >= 0) ? 0 : -1;\n+}\n+\n+#endif /* __CREATE_ELEMENTS_H__ */\ndiff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c\nnew file mode 100644\nindex 0000000000..6b19c2308e\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c\n@@ -0,0 +1,656 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <stdio.h>\n+#include <stdint.h>\n+#include <malloc.h>\n+\n+#include \"ntdrv_4ga.h\"\n+#include <rte_flow_driver.h>\n+#include <rte_pci.h>\n+#include \"ntnic_ethdev.h\"\n+\n+#include \"ntlog.h\"\n+#include \"nt_util.h\"\n+#include \"create_elements.h\"\n+#include \"ntnic_filter.h\"\n+\n+#define MAX_RTE_FLOWS 8192\n+#define MAX_PORTIDS 64\n+\n+#if (MAX_COLOR_FLOW_STATS != NT_MAX_COLOR_FLOW_STATS)\n+#error Difference in COLOR_FLOW_STATS. Please synchronize the defines.\n+#endif\n+\n+struct rte_flow nt_flows[MAX_RTE_FLOWS];\n+\n+static int is_flow_handle_typecast(struct rte_flow *flow)\n+{\n+\tconst void *first_element = &nt_flows[0];\n+\tconst void *last_element = &nt_flows[MAX_RTE_FLOWS - 1];\n+\n+\treturn (void *)flow < first_element || (void *)flow > last_element;\n+}\n+\n+static int convert_flow(struct rte_eth_dev *eth_dev,\n+\t\t\tconst struct rte_flow_attr *attr,\n+\t\t\tconst struct rte_flow_item items[],\n+\t\t\tconst struct rte_flow_action actions[],\n+\t\t\tstruct cnv_attr_s *attribute, struct cnv_match_s *match,\n+\t\t\tstruct cnv_action_s *action,\n+\t\t\tstruct rte_flow_error *error, uint32_t *flow_stat_id)\n+{\n+\tstruct pmd_internals *dev = eth_dev->data->dev_private;\n+\tstruct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;\n+\n+\tstatic struct flow_error flow_error = { .type = FLOW_ERROR_NONE,\n+\t\t       .message = \"none\"\n+\t};\n+\tuint32_t queue_offset = 0;\n+\n+#ifdef RTE_FLOW_DEBUG\n+\tNT_LOG(DBG, FILTER, \"ntnic_flow_create port_id %u - %s\\n\",\n+\t       eth_dev->data->port_id, eth_dev->data->name);\n+#endif\n+\n+\tif (dev->type == PORT_TYPE_OVERRIDE && dev->vpq_nb_vq > 0) {\n+\t\t/*\n+\t\t * The queues coming from the main PMD will always start from 0\n+\t\t * When the port is a the VF/vDPA port the queues must be changed\n+\t\t * to match the queues allocated for VF/vDPA.\n+\t\t */\n+\t\tqueue_offset = dev->vpq[0].id;\n+\t}\n+\n+\t/* Set initial error */\n+\tconvert_error(error, &flow_error);\n+\n+\tif (!dev) {\n+\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"Missing eth_dev\");\n+\t\treturn -1;\n+\t}\n+\n+\tif (create_attr(attribute, attr) != 0) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,\n+\t\t\t\t   NULL, \"Error in attr\");\n+\t\treturn -1;\n+\t}\n+\tif (create_match_elements(match, items, MAX_ELEMENTS) != 0) {\n+\t\trte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,\n+\t\t\t\t   NULL, \"Error in items\");\n+\t\treturn -1;\n+\t}\n+\tif (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {\n+\t\tif (create_action_elements_inline(action, actions, MAX_ACTIONS,\n+\t\t\t\t\t\t  queue_offset) != 0) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t   \"Error in actions\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tif (attribute->attr.group > 0)\n+\t\t\treturn 0;\n+\t} else if (fpga_info->profile == FPGA_INFO_PROFILE_VSWITCH) {\n+\t\tif (create_action_elements_vswitch(action, actions, MAX_ACTIONS,\n+\t\t\t\t\t\t   flow_stat_id) != 0) {\n+\t\t\trte_flow_error_set(error, EINVAL,\n+\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION, NULL,\n+\t\t\t\t\t   \"Error in actions\");\n+\t\t\treturn -1;\n+\t\t}\n+\t} else {\n+\t\trte_flow_error_set(error, EPERM,\n+\t\t\t\t   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t   \"Unsupported adapter profile\");\n+\t\treturn -1;\n+\t}\n+\treturn 0;\n+}\n+\n+static int eth_flow_destroy(struct rte_eth_dev *eth_dev, struct rte_flow *flow,\n+\t\t\t    struct rte_flow_error *error)\n+{\n+\tstruct pmd_internals *dev = eth_dev->data->dev_private;\n+\tstatic struct flow_error flow_error = { .type = FLOW_ERROR_NONE,\n+\t\t       .message = \"none\"\n+\t};\n+\n+\tint res = 0;\n+\n+\t/* Set initial error */\n+\tconvert_error(error, &flow_error);\n+\n+\tif (!flow)\n+\t\treturn 0;\n+\n+\tif (is_flow_handle_typecast(flow)) {\n+\t\tres = flow_destroy(dev->flw_dev, (void *)flow, &flow_error);\n+\t\tconvert_error(error, &flow_error);\n+\t} else {\n+\t\tres = flow_destroy(dev->flw_dev, flow->flw_hdl, &flow_error);\n+\t\tconvert_error(error, &flow_error);\n+\n+\t\trte_spinlock_lock(&flow_lock);\n+\t\tdelete_flow_stat_id_locked(flow->flow_stat_id);\n+\t\tflow->used = 0;\n+\t\trte_spinlock_unlock(&flow_lock);\n+\t}\n+\n+\t/* Clear the flow statistics if successfully destroyed */\n+\tif (res == 0) {\n+\t\tflow->stat_pkts = 0UL;\n+\t\tflow->stat_bytes = 0UL;\n+\t\tflow->stat_tcp_flags = 0;\n+\t}\n+\n+\treturn res;\n+}\n+\n+static int eth_flow_validate(struct rte_eth_dev *eth_dev,\n+\t\t\t     const struct rte_flow_attr *attr,\n+\t\t\t     const struct rte_flow_item items[],\n+\t\t\t     const struct rte_flow_action actions[],\n+\t\t\t     struct rte_flow_error *error)\n+{\n+\tstatic struct flow_error flow_error = { .type = FLOW_ERROR_NONE,\n+\t\t       .message = \"none\"\n+\t};\n+\tstruct pmd_internals *dev = eth_dev->data->dev_private;\n+\tstruct cnv_attr_s attribute;\n+\tstruct cnv_match_s match;\n+\tstruct cnv_action_s action;\n+\tuint32_t flow_stat_id = 0;\n+\tint res;\n+\n+\tif (convert_flow(eth_dev, attr, items, actions, &attribute, &match,\n+\t\t\t &action, error, &flow_stat_id) < 0)\n+\t\treturn -EINVAL;\n+\n+\tres = flow_validate(dev->flw_dev, match.flow_elem, action.flow_actions,\n+\t\t\t    &flow_error);\n+\n+\tif (res < 0)\n+\t\tconvert_error(error, &flow_error);\n+\n+\treturn res;\n+}\n+\n+static struct rte_flow *eth_flow_create(struct rte_eth_dev *eth_dev,\n+\t\t\t\t\tconst struct rte_flow_attr *attr,\n+\t\t\t\t\tconst struct rte_flow_item items[],\n+\t\t\t\t\tconst struct rte_flow_action actions[],\n+\t\t\t\t\tstruct rte_flow_error *error)\n+{\n+\tstruct pmd_internals *dev = eth_dev->data->dev_private;\n+\tstruct fpga_info_s *fpga_info = &dev->p_drv->ntdrv.adapter_info.fpga_info;\n+\n+\tstruct cnv_attr_s attribute;\n+\tstruct cnv_match_s match;\n+\tstruct cnv_action_s action;\n+\n+\tstatic struct flow_error flow_error = { .type = FLOW_ERROR_NONE,\n+\t\t       .message = \"none\"\n+\t};\n+\tuint32_t flow_stat_id = 0;\n+\n+#ifdef RTE_FLOW_DEBUG\n+\tNT_LOG(DBG, FILTER, \"ntnic_flow_create port_id %u - %s\\n\",\n+\t       eth_dev->data->port_id, eth_dev->data->name);\n+#endif\n+\n+\tif (convert_flow(eth_dev, attr, items, actions, &attribute, &match,\n+\t\t\t &action, error, &flow_stat_id) < 0)\n+\t\treturn NULL;\n+\n+\tif (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&\n+\t\t\tattribute.attr.group > 0) {\n+\t\tvoid *flw_hdl = flow_create(dev->flw_dev, &attribute.attr,\n+\t\t\t\t\t    match.flow_elem,\n+\t\t\t\t\t    action.flow_actions, &flow_error);\n+\t\tconvert_error(error, &flow_error);\n+\t\treturn (struct rte_flow *)flw_hdl;\n+\t}\n+\n+\tstruct rte_flow *flow = NULL;\n+\n+\trte_spinlock_lock(&flow_lock);\n+\tint i;\n+\n+\tfor (i = 0; i < MAX_RTE_FLOWS; i++) {\n+\t\tif (!nt_flows[i].used) {\n+\t\t\tnt_flows[i].flow_stat_id = flow_stat_id;\n+\t\t\tif (nt_flows[i].flow_stat_id <\n+\t\t\t\t\tNT_MAX_COLOR_FLOW_STATS) {\n+\t\t\t\tnt_flows[i].used = 1;\n+\t\t\t\tflow = &nt_flows[i];\n+\t\t\t}\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\trte_spinlock_unlock(&flow_lock);\n+\tif (flow) {\n+\t\tflow->flw_hdl = flow_create(dev->flw_dev, &attribute.attr,\n+\t\t\t\t\t    match.flow_elem,\n+\t\t\t\t\t    action.flow_actions, &flow_error);\n+\t\tconvert_error(error, &flow_error);\n+\t\tif (!flow->flw_hdl) {\n+\t\t\trte_spinlock_lock(&flow_lock);\n+\t\t\tdelete_flow_stat_id_locked(flow->flow_stat_id);\n+\t\t\tflow->used = 0;\n+\t\t\tflow = NULL;\n+\t\t\trte_spinlock_unlock(&flow_lock);\n+\t\t} else {\n+#ifdef RTE_FLOW_DEBUG\n+\t\t\tNT_LOG(INF, FILTER, \"Create Flow %p using stat_id %i\\n\",\n+\t\t\t       flow, flow->flow_stat_id);\n+#endif\n+\t\t}\n+\t}\n+\treturn flow;\n+}\n+\n+uint64_t last_stat_rtc;\n+\n+int poll_statistics(struct pmd_internals *internals)\n+{\n+\tint flow;\n+\tstruct drv_s *p_drv = internals->p_drv;\n+\tstruct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;\n+\tnt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;\n+\tconst int if_index = internals->if_index;\n+\n+\tif (!p_nt4ga_stat || if_index < 0 || if_index > NUM_ADAPTER_PORTS_MAX)\n+\t\treturn -1;\n+\n+\tassert(rte_tsc_freq > 0);\n+\n+\trte_spinlock_lock(&hwlock);\n+\n+\tuint64_t now_rtc = rte_get_tsc_cycles();\n+\n+\t/*\n+\t * Check per port max once a second\n+\t * if more than a second since last stat read, do a new one\n+\t */\n+\tif ((now_rtc - internals->last_stat_rtc) < rte_tsc_freq) {\n+\t\trte_spinlock_unlock(&hwlock);\n+\t\treturn 0;\n+\t}\n+\n+\tinternals->last_stat_rtc = now_rtc;\n+\n+\tpthread_mutex_lock(&p_nt_drv->stat_lck);\n+\n+\t/*\n+\t * Add the RX statistics increments since last time we polled.\n+\t * (No difference if physical or virtual port)\n+\t */\n+\tinternals->rxq_scg[0].rx_pkts +=\n+\t\tp_nt4ga_stat->a_port_rx_packets_total[if_index] -\n+\t\tp_nt4ga_stat->a_port_rx_packets_base[if_index];\n+\tinternals->rxq_scg[0].rx_bytes +=\n+\t\tp_nt4ga_stat->a_port_rx_octets_total[if_index] -\n+\t\tp_nt4ga_stat->a_port_rx_octets_base[if_index];\n+\tinternals->rxq_scg[0].err_pkts += 0;\n+\tinternals->rx_missed += p_nt4ga_stat->a_port_rx_drops_total[if_index] -\n+\t\t\t\tp_nt4ga_stat->a_port_rx_drops_base[if_index];\n+\n+\t/* _update the increment bases */\n+\tp_nt4ga_stat->a_port_rx_packets_base[if_index] =\n+\t\tp_nt4ga_stat->a_port_rx_packets_total[if_index];\n+\tp_nt4ga_stat->a_port_rx_octets_base[if_index] =\n+\t\tp_nt4ga_stat->a_port_rx_octets_total[if_index];\n+\tp_nt4ga_stat->a_port_rx_drops_base[if_index] =\n+\t\tp_nt4ga_stat->a_port_rx_drops_total[if_index];\n+\n+\t/* Tx (here we must distinguish between physical and virtual ports) */\n+\tif (internals->type == PORT_TYPE_PHYSICAL) {\n+\t\t/* LAG management of Tx stats. */\n+\t\tif (lag_active && if_index == 0) {\n+\t\t\tunsigned int i;\n+\t\t\t/*\n+\t\t\t * Collect all LAG ports Tx stat into this one. Simplified to only collect\n+\t\t\t * from port 0 and 1.\n+\t\t\t */\n+\t\t\tfor (i = 0; i < 2; i++) {\n+\t\t\t\t/* Add the statistics increments since last time we polled */\n+\t\t\t\tinternals->txq_scg[0].tx_pkts +=\n+\t\t\t\t\tp_nt4ga_stat->a_port_tx_packets_total[i] -\n+\t\t\t\t\tp_nt4ga_stat->a_port_tx_packets_base[i];\n+\t\t\t\tinternals->txq_scg[0].tx_bytes +=\n+\t\t\t\t\tp_nt4ga_stat->a_port_tx_octets_total[i] -\n+\t\t\t\t\tp_nt4ga_stat->a_port_tx_octets_base[i];\n+\t\t\t\tinternals->txq_scg[0].err_pkts += 0;\n+\n+\t\t\t\t/* _update the increment bases */\n+\t\t\t\tp_nt4ga_stat->a_port_tx_packets_base[i] =\n+\t\t\t\t\tp_nt4ga_stat->a_port_tx_packets_total[i];\n+\t\t\t\tp_nt4ga_stat->a_port_tx_octets_base[i] =\n+\t\t\t\t\tp_nt4ga_stat->a_port_tx_octets_total[i];\n+\t\t\t}\n+\t\t} else {\n+\t\t\t/* Add the statistics increments since last time we polled */\n+\t\t\tinternals->txq_scg[0].tx_pkts +=\n+\t\t\t\tp_nt4ga_stat->a_port_tx_packets_total[if_index] -\n+\t\t\t\tp_nt4ga_stat->a_port_tx_packets_base[if_index];\n+\t\t\tinternals->txq_scg[0].tx_bytes +=\n+\t\t\t\tp_nt4ga_stat->a_port_tx_octets_total[if_index] -\n+\t\t\t\tp_nt4ga_stat->a_port_tx_octets_base[if_index];\n+\t\t\tinternals->txq_scg[0].err_pkts += 0;\n+\n+\t\t\t/* _update the increment bases */\n+\t\t\tp_nt4ga_stat->a_port_tx_packets_base[if_index] =\n+\t\t\t\tp_nt4ga_stat->a_port_tx_packets_total[if_index];\n+\t\t\tp_nt4ga_stat->a_port_tx_octets_base[if_index] =\n+\t\t\t\tp_nt4ga_stat->a_port_tx_octets_total[if_index];\n+\t\t}\n+\t}\n+\tif (internals->type == PORT_TYPE_VIRTUAL) {\n+\t\t/* _update TX counters from HB queue counter */\n+\t\tunsigned int i;\n+\t\tstruct host_buffer_counters *const p_hb_counters =\n+\t\t\t\tp_nt4ga_stat->mp_stat_structs_hb;\n+\t\tuint64_t v_port_packets_total = 0, v_port_octets_total = 0;\n+\n+\t\t/*\n+\t\t * This is a bit odd. But typically nb_tx_queues must be only 1 since it denotes\n+\t\t * the number of exception queues which must be 1 - for now. The code is kept if we\n+\t\t * want it in future, but it will not be likely.\n+\t\t * Therefore adding all vPorts queue tx counters into Tx[0] is ok for now.\n+\t\t *\n+\t\t * Only use the vPort Tx counter to update OVS, since these are the real ones.\n+\t\t * The rep port into OVS that represents this port will always replicate the traffic\n+\t\t * here, also when no offload occurs\n+\t\t */\n+\t\tfor (i = 0; i < internals->vpq_nb_vq; ++i) {\n+\t\t\tv_port_packets_total +=\n+\t\t\t\tp_hb_counters[internals->vpq[i].id].fwd_packets;\n+\t\t\tv_port_octets_total +=\n+\t\t\t\tp_hb_counters[internals->vpq[i].id].fwd_bytes;\n+\t\t}\n+\t\t/* Add the statistics increments since last time we polled */\n+\t\tinternals->txq_scg[0].tx_pkts +=\n+\t\t\tv_port_packets_total -\n+\t\t\tp_nt4ga_stat->a_port_tx_packets_base[if_index];\n+\t\tinternals->txq_scg[0].tx_bytes +=\n+\t\t\tv_port_octets_total -\n+\t\t\tp_nt4ga_stat->a_port_tx_octets_base[if_index];\n+\t\tinternals->txq_scg[0].err_pkts += 0; /* What to user here ?? */\n+\n+\t\t/* _update the increment bases */\n+\t\tp_nt4ga_stat->a_port_tx_packets_base[if_index] = v_port_packets_total;\n+\t\tp_nt4ga_stat->a_port_tx_octets_base[if_index] = v_port_octets_total;\n+\t}\n+\n+\t/* Globally only once a second */\n+\tif ((now_rtc - last_stat_rtc) < rte_tsc_freq) {\n+\t\trte_spinlock_unlock(&hwlock);\n+\t\tpthread_mutex_unlock(&p_nt_drv->stat_lck);\n+\t\treturn 0;\n+\t}\n+\n+\tlast_stat_rtc = now_rtc;\n+\n+\t/* All color counter are global, therefore only 1 pmd must update them */\n+\tconst struct color_counters *p_color_counters =\n+\t\t\tp_nt4ga_stat->mp_stat_structs_color;\n+\tstruct color_counters *p_color_counters_base =\n+\t\t\tp_nt4ga_stat->a_stat_structs_color_base;\n+\tuint64_t color_packets_accumulated, color_bytes_accumulated;\n+\n+\tfor (flow = 0; flow < MAX_RTE_FLOWS; flow++) {\n+\t\tif (nt_flows[flow].used) {\n+\t\t\tunsigned int color = nt_flows[flow].flow_stat_id;\n+\n+\t\t\tif (color < NT_MAX_COLOR_FLOW_STATS) {\n+\t\t\t\tcolor_packets_accumulated =\n+\t\t\t\t\tp_color_counters[color].color_packets;\n+\t\t\t\tnt_flows[flow].stat_pkts +=\n+\t\t\t\t\t(color_packets_accumulated -\n+\t\t\t\t\t p_color_counters_base[color].color_packets);\n+\n+\t\t\t\tnt_flows[flow].stat_tcp_flags |=\n+\t\t\t\t\tp_color_counters[color].tcp_flags;\n+\n+\t\t\t\tcolor_bytes_accumulated =\n+\t\t\t\t\tp_color_counters[color].color_bytes;\n+\t\t\t\tnt_flows[flow].stat_bytes +=\n+\t\t\t\t\t(color_bytes_accumulated -\n+\t\t\t\t\t p_color_counters_base[color].color_bytes);\n+\n+\t\t\t\t/* _update the counter bases */\n+\t\t\t\tp_color_counters_base[color].color_packets =\n+\t\t\t\t\tcolor_packets_accumulated;\n+\t\t\t\tp_color_counters_base[color].color_bytes =\n+\t\t\t\t\tcolor_bytes_accumulated;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\trte_spinlock_unlock(&hwlock);\n+\tpthread_mutex_unlock(&p_nt_drv->stat_lck);\n+\n+\treturn 0;\n+}\n+\n+static int eth_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,\n+\t\t\t  const struct rte_flow_action *action, void *data,\n+\t\t\t  struct rte_flow_error *err)\n+{\n+\tstruct pmd_internals *internals = dev->data->dev_private;\n+\n+\terr->cause = NULL;\n+\terr->message = NULL;\n+\n+\tif (is_flow_handle_typecast(flow)) {\n+\t\trte_flow_error_set(err, EFAULT, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t   NULL, \"Error in flow handle\");\n+\t\treturn -1;\n+\t}\n+\n+\tpoll_statistics(internals);\n+\n+\tif (action->type == RTE_FLOW_ACTION_TYPE_COUNT) {\n+\t\tstruct rte_flow_query_count *qcnt =\n+\t\t\t(struct rte_flow_query_count *)data;\n+\t\tif (qcnt) {\n+\t\t\tif (flow) {\n+\t\t\t\tqcnt->hits = flow->stat_pkts;\n+\t\t\t\tqcnt->hits_set = 1;\n+\t\t\t\tqcnt->bytes = flow->stat_bytes;\n+\t\t\t\tqcnt->bytes_set = 1;\n+\n+\t\t\t\tif (qcnt->reset) {\n+\t\t\t\t\tflow->stat_pkts = 0UL;\n+\t\t\t\t\tflow->stat_bytes = 0UL;\n+\t\t\t\t\tflow->stat_tcp_flags = 0;\n+\t\t\t\t}\n+\t\t\t} else {\n+\t\t\t\tqcnt->hits_set = 0;\n+\t\t\t\tqcnt->bytes_set = 0;\n+\t\t\t}\n+\t\t}\n+\t} else {\n+\t\trte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,\n+\t\t\t\t   NULL, \"Unsupported query\");\n+\t\treturn -1;\n+\t}\n+\trte_flow_error_set(err, 0, RTE_FLOW_ERROR_TYPE_NONE, NULL, \"Success\");\n+\treturn 0;\n+}\n+\n+#ifdef DEBUGGING\n+\n+static void _print_tunnel(struct rte_flow_tunnel *tunnel)\n+{\n+\tstruct in_addr addr;\n+\n+\tNT_LOG(DBG, FILTER, \"    tun type: %i\\n\", tunnel->type);\n+\tNT_LOG(DBG, FILTER, \"    tun ID: %016lx\\n\", tunnel->tun_id);\n+\taddr.s_addr = tunnel->ipv4.src_addr;\n+\tNT_LOG(DBG, FILTER, \"    tun src IP: %s\\n\", inet_ntoa(addr));\n+\taddr.s_addr = tunnel->ipv4.dst_addr;\n+\tNT_LOG(DBG, FILTER, \"    tun dst IP: %s\\n\", inet_ntoa(addr));\n+\tNT_LOG(DBG, FILTER, \"    tun tp_src: %i\\n\", htons(tunnel->tp_src));\n+\tNT_LOG(DBG, FILTER, \"    tun tp_dst: %i\\n\", htons(tunnel->tp_dst));\n+\tNT_LOG(DBG, FILTER, \"    tun flags:  %i\\n\", tunnel->tun_flags);\n+\tNT_LOG(DBG, FILTER, \"    tun ipv6:  %i\\n\", tunnel->is_ipv6);\n+\n+\tNT_LOG(DBG, FILTER, \"    tun tos:   %i\\n\", tunnel->tos);\n+\tNT_LOG(DBG, FILTER, \"    tun ttl:   %i\\n\", tunnel->ttl);\n+}\n+#endif\n+\n+static struct rte_flow_action _pmd_actions[] = {\n+\t{\t.type = (enum rte_flow_action_type)NT_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,\n+\t\t.conf = NULL\n+\t},\n+\t{ .type = 0, .conf = NULL }\n+};\n+\n+static int ntnic_tunnel_decap_set(struct rte_eth_dev *dev _unused,\n+\t\t\t\t  struct rte_flow_tunnel *tunnel,\n+\t\t\t\t  struct rte_flow_action **pmd_actions,\n+\t\t\t\t  uint32_t *num_of_actions,\n+\t\t\t\t  struct rte_flow_error *err _unused)\n+{\n+#ifdef DEBUGGING\n+\tNT_LOG(DBG, FILTER, \"%s: [%s:%u] start\\n\", __func__, __FILE__, __LINE__);\n+#endif\n+\n+\tif (tunnel->type == RTE_FLOW_ITEM_TYPE_VXLAN)\n+\t\t_pmd_actions[1].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;\n+\telse\n+\t\treturn -ENOTSUP;\n+\n+\t*pmd_actions = _pmd_actions;\n+\t*num_of_actions = 2;\n+\n+\treturn 0;\n+}\n+\n+static struct rte_flow_item _pmd_items = {\n+\t.type = (enum rte_flow_item_type)NT_RTE_FLOW_ITEM_TYPE_TUNNEL,\n+\t.spec = NULL,\n+\t.last = NULL,\n+\t.mask = NULL\n+};\n+\n+static int ntnic_tunnel_match(struct rte_eth_dev *dev _unused,\n+\t\t\t      struct rte_flow_tunnel *tunnel _unused,\n+\t\t\t      struct rte_flow_item **pmd_items,\n+\t\t\t      uint32_t *num_of_items,\n+\t\t\t      struct rte_flow_error *err _unused)\n+{\n+#ifdef DEBUGGING\n+\tNT_LOG(DBG, FILTER, \"%s: [%s:%u] start\\n\", __func__, __FILE__, __LINE__);\n+#endif\n+\n+\t*pmd_items = &_pmd_items;\n+\t*num_of_items = 1;\n+\treturn 0;\n+}\n+\n+/*\n+ * Restoration API support\n+ */\n+static int ntnic_get_restore_info(struct rte_eth_dev *dev _unused,\n+\t\t\t\t  struct rte_mbuf *m,\n+\t\t\t\t  struct rte_flow_restore_info *info,\n+\t\t\t\t  struct rte_flow_error *err _unused)\n+{\n+#ifdef DEBUGGING\n+\tNT_LOG(DBG, FILTER, \"%s: [%s:%u]\\n\", __func__, __FILE__, __LINE__);\n+\tNT_LOG(DBG, FILTER, \"dev name: %s - port_id %i\\n\", dev->data->name, dev->data->port_id);\n+\tNT_LOG(DBG, FILTER, \"dpdk tunnel mark %08x\\n\", m->hash.fdir.hi);\n+#endif\n+\n+\tif ((m->ol_flags & RTE_MBUF_F_RX_FDIR_ID) && m->hash.fdir.hi) {\n+\t\tuint8_t port_id = (m->hash.fdir.hi >> 24) & 0xff;\n+\t\tuint32_t stat_id = m->hash.fdir.lo & 0xffffff;\n+\n+\t\tstruct tunnel_cfg_s tuncfg;\n+\t\tint ret = flow_get_tunnel_definition(&tuncfg, stat_id, port_id);\n+\n+\t\tif (ret)\n+\t\t\treturn -EINVAL;\n+\n+\t\tif (tuncfg.ipversion == 4) {\n+\t\t\tinfo->tunnel.ipv4.dst_addr = tuncfg.v4.dst_ip;\n+\t\t\tinfo->tunnel.ipv4.src_addr = tuncfg.v4.src_ip;\n+\t\t\tinfo->tunnel.is_ipv6 = 0;\n+\t\t} else {\n+\t\t\t/* IPv6 */\n+\t\t\tfor (int i = 0; i < 16; i++) {\n+\t\t\t\tinfo->tunnel.ipv6.src_addr[i] =\n+\t\t\t\t\ttuncfg.v6.src_ip[i];\n+\t\t\t\tinfo->tunnel.ipv6.dst_addr[i] =\n+\t\t\t\t\ttuncfg.v6.dst_ip[i];\n+\t\t\t}\n+\t\t\tinfo->tunnel.is_ipv6 = 1;\n+\t\t}\n+\n+\t\tinfo->tunnel.tp_dst = tuncfg.d_port;\n+\t\tinfo->tunnel.tp_src = tuncfg.s_port;\n+\n+\t\tinfo->tunnel.ttl = 64;\n+\t\tinfo->tunnel.tos = 0;\n+\n+\t\t/* FLOW_TNL_F_KEY | FLOW_TNL_F_DO_NOT_FRAGMENT */\n+\t\tinfo->tunnel.tun_flags = (1 << 3) | (1 << 1);\n+\n+\t\tinfo->tunnel.type = RTE_FLOW_ITEM_TYPE_VXLAN;\n+\t\tinfo->tunnel.tun_id = m->hash.fdir.hi & 0xffffff;\n+\n+\t\tinfo->flags = RTE_FLOW_RESTORE_INFO_TUNNEL;\n+\t\t/* | RTE_FLOW_RESTORE_INFO_ENCAPSULATED; if restored packet is sent back */\n+\t\tinfo->group_id = 0;\n+\n+#ifdef DEBUGGING\n+\t\t_print_tunnel(&info->tunnel);\n+#endif\n+\n+\t\treturn 0;\n+\t}\n+\treturn -EINVAL; /* Supported, but no hit found */\n+}\n+\n+static int\n+ntnic_tunnel_action_decap_release(struct rte_eth_dev *dev _unused,\n+\t\t\t\t  struct rte_flow_action *pmd_actions _unused,\n+\t\t\t\t  uint32_t num_of_actions _unused,\n+\t\t\t\t  struct rte_flow_error *err _unused)\n+{\n+#ifdef DEBUGGING\n+\tNT_LOG(DBG, FILTER, \"%s: [%s:%u] start\\n\", __func__, __FILE__, __LINE__);\n+#endif\n+\treturn 0;\n+}\n+\n+static int ntnic_tunnel_item_release(struct rte_eth_dev *dev _unused,\n+\t\t\t\t     struct rte_flow_item *pmd_items _unused,\n+\t\t\t\t     uint32_t num_of_items _unused,\n+\t\t\t\t     struct rte_flow_error *err _unused)\n+{\n+#ifdef DEBUGGING\n+\tNT_LOG(DBG, FILTER, \"%s: [%s:%u] start\\n\", __func__, __FILE__, __LINE__);\n+#endif\n+\treturn 0;\n+}\n+\n+const struct rte_flow_ops _dev_flow_ops = {\n+\t.validate = eth_flow_validate,\n+\t.create = eth_flow_create,\n+\t.destroy = eth_flow_destroy,\n+\t.flush = NULL,\n+\t.query = eth_flow_query,\n+\t.tunnel_decap_set = ntnic_tunnel_decap_set,\n+\t.tunnel_match = ntnic_tunnel_match,\n+\t.get_restore_info = ntnic_get_restore_info,\n+\t.tunnel_action_decap_release = ntnic_tunnel_action_decap_release,\n+\t.tunnel_item_release = ntnic_tunnel_item_release\n+\n+};\ndiff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.h b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h\nnew file mode 100644\nindex 0000000000..cf4207e5de\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.h\n@@ -0,0 +1,14 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef __NTNIC_FILTER_H__\n+#define __NTNIC_FILTER_H__\n+\n+struct rte_flow *\n+client_flow_create(struct flow_eth_dev *flw_dev, enum fpga_info_profile profile,\n+\t\t   struct cnv_attr_s *attribute, struct cnv_match_s *match,\n+\t\t   struct cnv_action_s *action, uint32_t flow_stat_id,\n+\t\t   struct rte_flow_error *error);\n+\n+#endif /* __NTNIC_FILTER_H__ */\ndiff --git a/drivers/net/ntnic/ntnic_hshconfig.c b/drivers/net/ntnic/ntnic_hshconfig.c\nnew file mode 100644\nindex 0000000000..a8eff76528\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_hshconfig.c\n@@ -0,0 +1,102 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <ntlog.h>\n+#include <flow_api.h>\n+\n+#include \"ntnic_hshconfig.h\"\n+\n+#include <rte_ethdev.h>\n+#include <nthw_helper.h>\n+\n+struct pair_uint64_t {\n+\tuint64_t first;\n+\tuint64_t second;\n+};\n+\n+#define PAIR_NT(name)                 \\\n+\t{                             \\\n+\t\tRTE_##name, NT_##name \\\n+\t}\n+\n+struct pair_uint64_t rte_eth_rss_to_nt[] = {\n+\tPAIR_NT(ETH_RSS_IPV4),\n+\tPAIR_NT(ETH_RSS_FRAG_IPV4),\n+\tPAIR_NT(ETH_RSS_NONFRAG_IPV4_OTHER),\n+\tPAIR_NT(ETH_RSS_IPV6),\n+\tPAIR_NT(ETH_RSS_FRAG_IPV6),\n+\tPAIR_NT(ETH_RSS_NONFRAG_IPV6_OTHER),\n+\tPAIR_NT(ETH_RSS_IPV6_EX),\n+\tPAIR_NT(ETH_RSS_C_VLAN),\n+\tPAIR_NT(ETH_RSS_L3_DST_ONLY),\n+\tPAIR_NT(ETH_RSS_L3_SRC_ONLY),\n+\tPAIR_NT(ETH_RSS_LEVEL_OUTERMOST),\n+\tPAIR_NT(ETH_RSS_LEVEL_INNERMOST),\n+};\n+\n+static const uint64_t *rte_to_nt_rss_flag(const uint64_t rte_flag)\n+{\n+\tconst struct pair_uint64_t *start = rte_eth_rss_to_nt;\n+\n+\tfor (const struct pair_uint64_t *p = start;\n+\t\t\tp != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {\n+\t\tif (p->first == rte_flag)\n+\t\t\treturn &p->second;\n+\t}\n+\treturn NULL; /* NOT found */\n+}\n+\n+static const uint64_t *nt_to_rte_rss_flag(const uint64_t nt_flag)\n+{\n+\tconst struct pair_uint64_t *start = rte_eth_rss_to_nt;\n+\n+\tfor (const struct pair_uint64_t *p = start;\n+\t\t\tp != start + ARRAY_SIZE(rte_eth_rss_to_nt); ++p) {\n+\t\tif (p->second == nt_flag)\n+\t\t\treturn &p->first;\n+\t}\n+\treturn NULL; /* NOT found */\n+}\n+\n+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits)\n+{\n+\tstruct nt_eth_rss res = { 0 };\n+\n+\tfor (uint i = 0; i < sizeof(rte_hash_bits) * CHAR_BIT; ++i) {\n+\t\tuint64_t rte_bit = (UINT64_C(1) << i);\n+\n+\t\tif (rte_hash_bits & rte_bit) {\n+\t\t\tconst uint64_t *nt_bit_p = rte_to_nt_rss_flag(rte_bit);\n+\n+\t\t\tif (!nt_bit_p) {\n+\t\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t\t       \"RSS hash function field number %d is not supported. Only supported fields will be used in RSS hash function.\",\n+\t\t\t\t       i);\n+\t\t\t} else {\n+\t\t\t\tres.fields |= *nt_bit_p;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\treturn res;\n+}\n+\n+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh)\n+{\n+\tuint64_t res = 0;\n+\n+\tfor (uint i = 0; i < sizeof(nt_hsh.fields) * CHAR_BIT; ++i) {\n+\t\tuint64_t nt_bit = (UINT64_C(1) << i);\n+\n+\t\tif (nt_hsh.fields & nt_bit) {\n+\t\t\tconst uint64_t *rte_bit_p = nt_to_rte_rss_flag(nt_bit);\n+\n+\t\t\tassert(rte_bit_p &&\n+\t\t\t       \"All nt rss bit flags should be mapped to rte rss bit fields, as nt rss is a subset of rte options\");\n+\t\t\tres |= *rte_bit_p;\n+\t\t}\n+\t}\n+\n+\treturn res;\n+}\ndiff --git a/drivers/net/ntnic/ntnic_hshconfig.h b/drivers/net/ntnic/ntnic_hshconfig.h\nnew file mode 100644\nindex 0000000000..d4d7337d23\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_hshconfig.h\n@@ -0,0 +1,9 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <flow_api.h>\n+\n+/* Mapping from dpdk rss hash defines to nt hash defines */\n+struct nt_eth_rss nt_rss_hash_field_from_dpdk(uint64_t rte_hash_bits);\n+uint64_t dpdk_rss_hash_define_from_nt_rss(struct nt_eth_rss nt_hsh);\ndiff --git a/drivers/net/ntnic/ntnic_meter.c b/drivers/net/ntnic/ntnic_meter.c\nnew file mode 100644\nindex 0000000000..027ae073dd\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_meter.c\n@@ -0,0 +1,811 @@\n+/*\n+ * SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <stdint.h>\n+\n+#include <rte_common.h>\n+#include <rte_meter.h>\n+#include <ethdev_pci.h>\n+#include <ethdev_driver.h>\n+\n+#include \"ntdrv_4ga.h\"\n+#include \"nthw_fpga.h\"\n+#include \"ntnic_ethdev.h\"\n+#include \"ntnic_meter.h\"\n+#include \"ntlog.h\"\n+\n+/*\n+ *******************************************************************************\n+ * Vswitch metering\n+ *******************************************************************************\n+ */\n+\n+static const uint32_t highest_bit_mask = (~(~0u >> 1));\n+\n+static struct nt_mtr_profile *\n+nt_mtr_profile_find(struct pmd_internals *dev_priv, uint32_t meter_profile_id)\n+{\n+\tstruct nt_mtr_profile *profile = NULL;\n+\n+\tLIST_FOREACH(profile, &dev_priv->mtr_profiles, next)\n+\tif (profile->profile_id == meter_profile_id)\n+\t\tbreak;\n+\n+\treturn profile;\n+}\n+\n+static int eth_meter_profile_add(struct rte_eth_dev *dev,\n+\t\t\t\t uint32_t meter_profile_id,\n+\t\t\t\t struct rte_mtr_meter_profile *profile,\n+\t\t\t\t struct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\n+\tNT_LOG(DBG, NTHW, \"%s: [%s:%u] adapter: \" PCIIDENT_PRINT_STR \"\\n\",\n+\t       __func__, __func__, __LINE__,\n+\t       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));\n+\n+\tconst bool is_egress = meter_profile_id & highest_bit_mask;\n+\n+\tif (dev_priv->type == PORT_TYPE_VIRTUAL || is_egress) {\n+\t\tstruct nt_mtr_profile *prof;\n+\n+\t\tprof = nt_mtr_profile_find(dev_priv, meter_profile_id);\n+\t\tif (prof)\n+\t\t\treturn -rte_mtr_error_set(error, EEXIST,\n+\t\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,\n+\t\t\t\t\t\t  NULL,\n+\t\t\t\t\t\t  \"Profile id already exists\\n\");\n+\n+\t\tprof = rte_zmalloc(NULL, sizeof(*prof), 0);\n+\t\tif (!prof) {\n+\t\t\treturn -rte_mtr_error_set(error,\n+\t\t\t\t\t\t  ENOMEM, RTE_MTR_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\t\t\t  NULL, NULL);\n+\t\t}\n+\n+\t\tprof->profile_id = meter_profile_id;\n+\t\tmemcpy(&prof->profile, profile,\n+\t\t       sizeof(struct rte_mtr_meter_profile));\n+\n+\t\tLIST_INSERT_HEAD(&dev_priv->mtr_profiles, prof, next);\n+\n+\t\treturn 0;\n+\t}\n+\t/* Ingress is not possible yet on phy ports */\n+\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\tRTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\"Traffic ingress metering/policing is not supported on physical ports\\n\");\n+}\n+\n+static int eth_meter_profile_delete(struct rte_eth_dev *dev,\n+\t\t\t\t    uint32_t meter_profile_id,\n+\t\t\t\t    struct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\tstruct nt_mtr_profile *profile;\n+\n+\tNT_LOG(DBG, NTHW, \"%s: [%s:%u] adapter: \" PCIIDENT_PRINT_STR \"\\n\",\n+\t       __func__, __func__, __LINE__,\n+\t       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));\n+\n+\tprofile = nt_mtr_profile_find(dev_priv, meter_profile_id);\n+\tif (!profile)\n+\t\treturn -rte_mtr_error_set(error, ENODEV,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,\n+\t\t\t\t\t  NULL, \"Profile id does not exist\\n\");\n+\n+\tLIST_REMOVE(profile, next);\n+\trte_free(profile);\n+\treturn 0;\n+}\n+\n+static struct nt_mtr *nt_mtr_find(struct pmd_internals *dev_priv,\n+\t\t\t\t  uint32_t mtr_id)\n+{\n+\tstruct nt_mtr *mtr = NULL;\n+\n+\tLIST_FOREACH(mtr, &dev_priv->mtrs, next)\n+\tif (mtr->mtr_id == mtr_id)\n+\t\tbreak;\n+\n+\treturn mtr;\n+}\n+\n+struct qos_integer_fractional {\n+\tuint32_t integer;\n+\tuint32_t fractional; /* 1/1024 */\n+};\n+\n+/*\n+ * Converts byte/s to byte/period if form of integer + 1/1024*fractional\n+ * the period depends on the clock friquency and other parameters which\n+ * being combined give multiplier. The resulting formula is:\n+ *     f[bytes/period] = x[byte/s] * period_ps / 10^-12\n+ */\n+static struct qos_integer_fractional\n+byte_per_second_to_qo_s_ri(uint64_t byte_per_second, uint64_t period_ps)\n+{\n+\tstruct qos_integer_fractional res;\n+\tconst uint64_t dividend = byte_per_second * period_ps;\n+\tconst uint64_t divisor = 1000000000000ull; /*10^12 pico second*/\n+\n+\tres.integer = dividend / divisor;\n+\tconst uint64_t reminder = dividend % divisor;\n+\n+\tres.fractional = 1024ull * reminder / divisor;\n+\treturn res;\n+}\n+\n+static struct qos_integer_fractional\n+byte_per_second_to_physical_qo_s_ri(uint64_t byte_per_second)\n+{\n+\treturn byte_per_second_to_qo_s_ri(byte_per_second, 8 * 3333ul);\n+}\n+\n+static struct qos_integer_fractional\n+byte_per_second_to_virtual_qo_s_ri(uint64_t byte_per_second)\n+{\n+\treturn byte_per_second_to_qo_s_ri(byte_per_second, 512 * 3333ul);\n+}\n+\n+static int eth_meter_enable(struct rte_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t    struct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\tint res;\n+\tstatic int ingress_initial;\n+\n+\tNT_LOG(DBG, NTHW, \"%s: [%s:%u] adapter: \" PCIIDENT_PRINT_STR \"\\n\",\n+\t       __func__, __func__, __LINE__,\n+\t       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));\n+\n+\tnthw_dbs_t *p_nthw_dbs =\n+\t\tdev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;\n+\tnthw_epp_t *p_nthw_epp =\n+\t\tdev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;\n+\n+\t/*\n+\t *  FPGA is based on FRC 4115 so CIR,EIR and CBS/EBS are used\n+\t *   rfc4115.cir = rfc2697.cir\n+\t *   rfc4115.eir = rfc2697.cir\n+\t *   rfc4115.cbs = rfc2697.cbs\n+\t *   rfc4115.ebs = rfc2697.ebs\n+\t */\n+\tstruct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);\n+\n+\tif (!mtr) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,\n+\t\t\t\t\t  \"Meter id not found\\n\");\n+\t}\n+\n+\tif (!mtr->profile) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,\n+\t\t\t\t\t  NULL, \"Meter profile id not found\\n\");\n+\t}\n+\n+\tconst uint32_t profile_id = mtr->profile->profile_id;\n+\tconst bool is_egress = profile_id & highest_bit_mask;\n+\tuint32_t burst = mtr->profile->profile.srtcm_rfc2697.cbs;\n+\n+\tif (is_egress) {\n+\t\tconst bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);\n+\t\tstruct qos_integer_fractional cir = { 0 };\n+\n+\t\tif (is_virtual) {\n+\t\t\tcir =\n+\t\t\tbyte_per_second_to_virtual_qo_s_ri(mtr->profile->profile.srtcm_rfc2697.cir);\n+\t\t\tif (cir.integer == 0 && cir.fractional == 0)\n+\t\t\t\tcir.fractional = 1;\n+\t\t\tres = nthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port,\n+\t\t\t\t\t\t  cir.integer, cir.fractional,\n+\t\t\t\t\t\t  burst);\n+\t\t} else {\n+\t\t\tcir =\n+\t\t\t\tbyte_per_second_to_physical_qo_s_ri(mtr->profile->profile\n+\t\t\t\t\t\t\t\t    .srtcm_rfc2697.cir);\n+\t\t\tif (cir.integer == 0 && cir.fractional == 0)\n+\t\t\t\tcir.fractional = 1;\n+\t\t\tres = nthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port,\n+\t\t\t\t\t\tcir.integer, cir.fractional,\n+\t\t\t\t\t\tburst);\n+\t\t}\n+\t\tif (res) {\n+\t\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\tRTE_MTR_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"Applying meter profile for setting egress policy failed\\n\");\n+\t\t}\n+\t} else {\n+\t\tif (!ingress_initial) {\n+\t\t\t/*\n+\t\t\t * based on a 250Mhz FPGA\n+\t\t\t * _update refresh rate interval calculation:\n+\t\t\t * multiplier / (divider * 4ns)\n+\t\t\t * 1 / (2000 * 4ns) = 8,000*10-6 => refresh rate interval = 8000ns\n+\t\t\t *\n+\t\t\t * results in resolution of IR is 1Mbps\n+\t\t\t */\n+\t\t\tres = nthw_set_tx_qos_rate_global(p_nthw_dbs, 1, 2000);\n+\n+\t\t\tif (res) {\n+\t\t\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\tRTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t\"Applying meter profile for setting ingress \"\n+\t\t\t\t\t\"global QoS rate failed\\n\");\n+\t\t\t}\n+\t\t\tingress_initial = 1;\n+\t\t}\n+\n+\t\tif (mtr->profile->profile.srtcm_rfc2697.cbs >= (1 << 27)) {\n+\t\t\t/* max burst 1,074Mb (27 bits) */\n+\t\t\tmtr->profile->profile.srtcm_rfc2697.cbs = (1 << 27) - 1;\n+\t\t}\n+\t\t/* IR - fill x bytes each 8000ns -> 1B/8000ns => 1000Kbps => 125000Bps / x */\n+\t\tres = nthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */\n+\t\t\t\t\t     1, /* enable */\n+\t\t\t\t\t     mtr->profile->profile.srtcm_rfc2697.cir /\n+\t\t\t\t\t     125000,\n+\t\t\t\t\t     mtr->profile->profile.srtcm_rfc2697\n+\t\t\t\t\t     .cbs); /* BS - burst size in Bytes */\n+\t\tif (res) {\n+\t\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\tRTE_MTR_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL, \"Applying meter profile failed\\n\");\n+\t\t}\n+\t}\n+\treturn 0;\n+}\n+\n+static void disable(struct pmd_internals *dev_priv)\n+{\n+\tNT_LOG(DBG, NTHW, \"%s: [%s:%u] adapter: \" PCIIDENT_PRINT_STR \"\\n\",\n+\t       __func__, __func__, __LINE__,\n+\t       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));\n+\n+\tnthw_dbs_t *p_nthw_dbs =\n+\t\tdev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_dbs;\n+\tnthw_set_tx_qos_config(p_nthw_dbs, dev_priv->port, /* vport */\n+\t\t\t       0, /* disable */\n+\t\t\t       0, /* IR */\n+\t\t\t       0); /* BS */\n+}\n+\n+static int eth_meter_disable(struct rte_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t     struct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\tstruct nt_mtr *mtr = nt_mtr_find(dev_priv, mtr_id);\n+\n+\tNT_LOG(DBG, NTHW, \"%s: [%s:%u] adapter: \" PCIIDENT_PRINT_STR \"\\n\",\n+\t       __func__, __func__, __LINE__,\n+\t       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));\n+\n+\tnthw_epp_t *p_nthw_epp =\n+\t\tdev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;\n+\n+\tif (!mtr) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,\n+\t\t\t\t\t  \"Meter id not found\\n\");\n+\t}\n+\n+\tconst bool is_egress = mtr_id & highest_bit_mask;\n+\n+\tif (is_egress) {\n+\t\tconst bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);\n+\n+\t\tif (is_virtual)\n+\t\t\tnthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);\n+\t\telse\n+\t\t\tnthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);\n+\t} else {\n+\t\tdisable(dev_priv);\n+\t}\n+\treturn 0;\n+}\n+\n+/* MTR object create */\n+static int eth_mtr_create(struct rte_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t  struct rte_mtr_params *params, int shared,\n+\t\t\t  struct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\tstruct nt_mtr *mtr = NULL;\n+\tstruct nt_mtr_profile *profile;\n+\n+\tNT_LOG(DBG, NTHW, \"%s: [%s:%u] adapter: \" PCIIDENT_PRINT_STR \"\\n\",\n+\t       __func__, __func__, __LINE__,\n+\t       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));\n+\n+\tconst bool is_egress = mtr_id & highest_bit_mask;\n+\n+\tif (dev_priv->type == PORT_TYPE_PHYSICAL && !is_egress) {\n+\t\tNT_LOG(ERR, NTHW,\n+\t\t       \"ERROR try to create ingress meter object on a phy port. Not supported\\n\");\n+\n+\t\treturn -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\"Traffic ingress metering/policing is not supported on physical ports\\n\");\n+\t}\n+\n+\tmtr = nt_mtr_find(dev_priv, mtr_id);\n+\tif (mtr)\n+\t\treturn -rte_mtr_error_set(error, EEXIST,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,\n+\t\t\t\t\t  \"Meter id already exists\\n\");\n+\n+\tprofile = nt_mtr_profile_find(dev_priv, params->meter_profile_id);\n+\tif (!profile) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,\n+\t\t\t\t\t  NULL, \"Profile id does not exist\\n\");\n+\t}\n+\n+\tmtr = rte_zmalloc(NULL, sizeof(struct nt_mtr), 0);\n+\tif (!mtr)\n+\t\treturn -rte_mtr_error_set(error, ENOMEM,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  NULL);\n+\n+\tmtr->shared = shared;\n+\tmtr->mtr_id = mtr_id;\n+\tmtr->profile = profile;\n+\tLIST_INSERT_HEAD(&dev_priv->mtrs, mtr, next);\n+\n+\tif (params->meter_enable)\n+\t\treturn eth_meter_enable(dev, mtr_id, error);\n+\n+\treturn 0;\n+}\n+\n+/* MTR object destroy */\n+static int eth_mtr_destroy(struct rte_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t   struct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\tstruct nt_mtr *mtr;\n+\n+\tNT_LOG(DBG, NTHW, \"%s: [%s:%u] adapter: \" PCIIDENT_PRINT_STR \"\\n\",\n+\t       __func__, __func__, __LINE__,\n+\t       PCIIDENT_TO_DOMAIN(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_BUSNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_DEVNR(dev_priv->p_drv->ntdrv.pciident),\n+\t       PCIIDENT_TO_FUNCNR(dev_priv->p_drv->ntdrv.pciident));\n+\n+\tnthw_epp_t *p_nthw_epp =\n+\t\tdev_priv->p_drv->ntdrv.adapter_info.fpga_info.mp_nthw_epp;\n+\n+\tmtr = nt_mtr_find(dev_priv, mtr_id);\n+\tif (!mtr)\n+\t\treturn -rte_mtr_error_set(error, EEXIST,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_ID, NULL,\n+\t\t\t\t\t  \"Meter id does not exist\\n\");\n+\n+\tconst bool is_egress = mtr_id & highest_bit_mask;\n+\n+\tif (is_egress) {\n+\t\tconst bool is_virtual = (dev_priv->type == PORT_TYPE_VIRTUAL);\n+\n+\t\tif (is_virtual)\n+\t\t\tnthw_epp_set_vport_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);\n+\t\telse\n+\t\t\tnthw_epp_set_txp_qos(p_nthw_epp, dev_priv->port, 0, 0, 0);\n+\t} else {\n+\t\tdisable(dev_priv);\n+\t}\n+\tLIST_REMOVE(mtr, next);\n+\trte_free(mtr);\n+\treturn 0;\n+}\n+\n+/*\n+ *******************************************************************************\n+ * Inline FLM metering\n+ *******************************************************************************\n+ */\n+\n+static int eth_mtr_capabilities_get_inline(struct rte_eth_dev *dev,\n+\t\tstruct rte_mtr_capabilities *cap,\n+\t\tstruct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\n+\tif (!flow_mtr_supported(dev_priv->flw_dev)) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\tRTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\"Ethernet device does not support metering\\n\");\n+\t}\n+\n+\tmemset(cap, 0x0, sizeof(struct rte_mtr_capabilities));\n+\n+\t/* MBR records use 28-bit integers */\n+\tcap->n_max = flow_mtr_meters_supported();\n+\tcap->n_shared_max = cap->n_max;\n+\n+\tcap->identical = 0;\n+\tcap->shared_identical = 0;\n+\n+\tcap->shared_n_flows_per_mtr_max = UINT32_MAX;\n+\n+\t/* Limited by number of MBR record ids per FLM learn record */\n+\tcap->chaining_n_mtrs_per_flow_max = 4;\n+\n+\tcap->chaining_use_prev_mtr_color_supported = 0;\n+\tcap->chaining_use_prev_mtr_color_enforced = 0;\n+\n+\tcap->meter_rate_max = (uint64_t)(0xfff << 0xf) * 1099;\n+\n+\tcap->stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |\n+\t\t\t  RTE_MTR_STATS_N_BYTES_GREEN;\n+\n+\t/* Only color-blind mode is supported */\n+\tcap->color_aware_srtcm_rfc2697_supported = 0;\n+\tcap->color_aware_trtcm_rfc2698_supported = 0;\n+\tcap->color_aware_trtcm_rfc4115_supported = 0;\n+\n+\t/* Focused on RFC2698 for now */\n+\tcap->meter_srtcm_rfc2697_n_max = 0;\n+\tcap->meter_trtcm_rfc2698_n_max = cap->n_max;\n+\tcap->meter_trtcm_rfc4115_n_max = 0;\n+\n+\tcap->meter_policy_n_max = flow_mtr_meter_policy_n_max();\n+\n+\t/* Byte mode is supported */\n+\tcap->srtcm_rfc2697_byte_mode_supported = 0;\n+\tcap->trtcm_rfc2698_byte_mode_supported = 1;\n+\tcap->trtcm_rfc4115_byte_mode_supported = 0;\n+\n+\t/* Packet mode not supported */\n+\tcap->srtcm_rfc2697_packet_mode_supported = 0;\n+\tcap->trtcm_rfc2698_packet_mode_supported = 0;\n+\tcap->trtcm_rfc4115_packet_mode_supported = 0;\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_mtr_meter_profile_add_inline(struct rte_eth_dev *dev,\n+\t\t\t\t uint32_t meter_profile_id,\n+\t\t\t\t struct rte_mtr_meter_profile *profile,\n+\t\t\t\t struct rte_mtr_error *error __rte_unused)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\n+\tif (meter_profile_id >= flow_mtr_meter_policy_n_max())\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,\n+\t\t\t\t\t  NULL, \"Profile id out of range\\n\");\n+\n+\tif (profile->packet_mode != 0) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE_PACKET_MODE, NULL,\n+\t\t\t\t\t  \"Profile packet mode not supported\\n\");\n+\t}\n+\n+\tif (profile->alg == RTE_MTR_SRTCM_RFC2697) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE,\n+\t\t\t\t\t  NULL, \"RFC 2697 not supported\\n\");\n+\t}\n+\n+\tif (profile->alg == RTE_MTR_TRTCM_RFC4115) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE,\n+\t\t\t\t\t  NULL, \"RFC 4115 not supported\\n\");\n+\t}\n+\n+\tif (profile->trtcm_rfc2698.cir != profile->trtcm_rfc2698.pir ||\n+\t\t\tprofile->trtcm_rfc2698.cbs != profile->trtcm_rfc2698.pbs) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_PROFILE, NULL,\n+\t\t\t\t\t  \"Profile committed and peak rates must be equal\\n\");\n+\t}\n+\n+\tint res = flow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id,\n+\t\t\t\t       profile->trtcm_rfc2698.cir,\n+\t\t\t\t       profile->trtcm_rfc2698.cbs, 0, 0);\n+\n+\tif (res) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE,\n+\t\t\t\t\t  NULL,\n+\t\t\t\t\t  \"Profile could not be added.\\n\");\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_mtr_meter_profile_delete_inline(struct rte_eth_dev *dev __rte_unused,\n+\t\t\t\t    uint32_t meter_profile_id __rte_unused,\n+\t\t\t\t    struct rte_mtr_error *error __rte_unused)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\n+\tif (meter_profile_id >= flow_mtr_meter_policy_n_max())\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,\n+\t\t\t\t\t  NULL, \"Profile id out of range\\n\");\n+\n+\tflow_mtr_set_profile(dev_priv->flw_dev, meter_profile_id, 0, 0, 0, 0);\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_mtr_meter_policy_add_inline(struct rte_eth_dev *dev, uint32_t policy_id,\n+\t\t\t\tstruct rte_mtr_meter_policy_params *policy,\n+\t\t\t\tstruct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\n+\tif (policy_id >= flow_mtr_meter_policy_n_max())\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,\n+\t\t\t\t\t  NULL, \"Policy id out of range\\n\");\n+\n+\tconst struct rte_flow_action *actions =\n+\t\t\tpolicy->actions[RTE_COLOR_GREEN];\n+\tint green_action_supported =\n+\t\t(actions[0].type == RTE_FLOW_ACTION_TYPE_END) ||\n+\t\t(actions[0].type == RTE_FLOW_ACTION_TYPE_VOID &&\n+\t\t actions[1].type == RTE_FLOW_ACTION_TYPE_END) ||\n+\t\t(actions[0].type == RTE_FLOW_ACTION_TYPE_PASSTHRU &&\n+\t\t actions[1].type == RTE_FLOW_ACTION_TYPE_END);\n+\n+\tactions = policy->actions[RTE_COLOR_YELLOW];\n+\tint yellow_action_supported =\n+\t\tactions[0].type == RTE_FLOW_ACTION_TYPE_DROP &&\n+\t\tactions[1].type == RTE_FLOW_ACTION_TYPE_END;\n+\n+\tactions = policy->actions[RTE_COLOR_RED];\n+\tint red_action_supported = actions[0].type ==\n+\t\t\t\t   RTE_FLOW_ACTION_TYPE_DROP &&\n+\t\t\t\t   actions[1].type == RTE_FLOW_ACTION_TYPE_END;\n+\n+\tif (green_action_supported == 0 || yellow_action_supported == 0 ||\n+\t\t\tred_action_supported == 0) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,\n+\t\t\t\t\t  \"Unsupported meter policy actions\\n\");\n+\t}\n+\n+\tif (flow_mtr_set_policy(dev_priv->flw_dev, policy_id, 1)) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,\n+\t\t\t\t\t  \"Policy could not be added\\n\");\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+eth_mtr_meter_policy_delete_inline(struct rte_eth_dev *dev __rte_unused,\n+\t\t\t\t   uint32_t policy_id __rte_unused,\n+\t\t\t\t   struct rte_mtr_error *error __rte_unused)\n+{\n+\tif (policy_id >= flow_mtr_meter_policy_n_max())\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,\n+\t\t\t\t\t  NULL, \"Policy id out of range\\n\");\n+\n+\treturn 0;\n+}\n+\n+static int eth_mtr_create_inline(struct rte_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t\t struct rte_mtr_params *params, int shared,\n+\t\t\t\t struct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\n+\tif (params->use_prev_mtr_color != 0 || params->dscp_table != NULL) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\t\t  \"Only color blind mode is supported\\n\");\n+\t}\n+\n+\tuint64_t allowed_stats_mask = RTE_MTR_STATS_N_PKTS_GREEN |\n+\t\t\t\t      RTE_MTR_STATS_N_BYTES_GREEN;\n+\tif ((params->stats_mask & ~allowed_stats_mask) != 0) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\t\t  \"Requested color stats not supported\\n\");\n+\t}\n+\n+\tif (params->meter_enable == 0) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\t\t  \"Disabled meters not supported\\n\");\n+\t}\n+\n+\tif (shared == 0) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\t\t  \"Only shared mtrs are supported\\n\");\n+\t}\n+\n+\tif (params->meter_profile_id >= flow_mtr_meter_policy_n_max())\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,\n+\t\t\t\t\t  NULL, \"Profile id out of range\\n\");\n+\n+\tif (params->meter_policy_id >= flow_mtr_meter_policy_n_max())\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_METER_POLICY_ID,\n+\t\t\t\t\t  NULL, \"Policy id out of range\\n\");\n+\n+\tif (mtr_id >= flow_mtr_meters_supported()) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\t\t  \"MTR id is out of range\\n\");\n+\t}\n+\n+\tint res = flow_mtr_create_meter(dev_priv->flw_dev, mtr_id,\n+\t\t\t\t\tparams->meter_profile_id,\n+\t\t\t\t\tparams->meter_policy_id,\n+\t\t\t\t\tparams->stats_mask);\n+\n+\tif (res) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"Failed to offload to hardware\\n\");\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int eth_mtr_destroy_inline(struct rte_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t\t  struct rte_mtr_error *error __rte_unused)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\n+\tif (mtr_id >= flow_mtr_meters_supported()) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\t\t  \"MTR id is out of range\\n\");\n+\t}\n+\n+\tif (flow_mtr_destroy_meter(dev_priv->flw_dev, mtr_id)) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"Failed to offload to hardware\\n\");\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int eth_mtr_stats_adjust_inline(struct rte_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t\t       uint64_t adjust_value,\n+\t\t\t\t       struct rte_mtr_error *error)\n+{\n+\tconst uint64_t adjust_bit = 1ULL << 63;\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\n+\tif (mtr_id >= flow_mtr_meters_supported()) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\t\t  \"MTR id is out of range\\n\");\n+\t}\n+\n+\tif ((adjust_value & adjust_bit) == 0) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\"To adjust a MTR bucket value, bit 63 of \\\"stats_mask\\\" must be 1\\n\");\n+\t}\n+\n+\tadjust_value &= adjust_bit - 1;\n+\n+\tif (adjust_value > (uint64_t)UINT32_MAX) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\t\t  \"Adjust value is out of range\\n\");\n+\t}\n+\n+\tif (flm_mtr_adjust_stats(dev_priv->flw_dev, mtr_id,\n+\t\t\t\t (uint32_t)adjust_value)) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t\t  \"Failed to adjust offloaded MTR\\n\");\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int eth_mtr_stats_read_inline(struct rte_eth_dev *dev, uint32_t mtr_id,\n+\t\t\t\t     struct rte_mtr_stats *stats,\n+\t\t\t\t     uint64_t *stats_mask, int clear,\n+\t\t\t\t     struct rte_mtr_error *error)\n+{\n+\tstruct pmd_internals *dev_priv = dev->data->dev_private;\n+\n+\tif (mtr_id >= flow_mtr_meters_supported()) {\n+\t\treturn -rte_mtr_error_set(error, EINVAL,\n+\t\t\t\t\t  RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL,\n+\t\t\t\t\t  \"MTR id is out of range\\n\");\n+\t}\n+\n+\tmemset(stats, 0x0, sizeof(struct rte_mtr_stats));\n+\tflm_mtr_read_stats(dev_priv->flw_dev, mtr_id, stats_mask,\n+\t\t\t   &stats->n_pkts[RTE_COLOR_GREEN],\n+\t\t\t   &stats->n_bytes[RTE_COLOR_GREEN], clear);\n+\n+\treturn 0;\n+}\n+\n+/*\n+ *******************************************************************************\n+ * Ops setup\n+ *******************************************************************************\n+ */\n+\n+static const struct rte_mtr_ops mtr_ops_vswitch = {\n+\t.meter_profile_add = eth_meter_profile_add,\n+\t.meter_profile_delete = eth_meter_profile_delete,\n+\t.create = eth_mtr_create,\n+\t.destroy = eth_mtr_destroy,\n+\t.meter_enable = eth_meter_enable,\n+\t.meter_disable = eth_meter_disable,\n+};\n+\n+static const struct rte_mtr_ops mtr_ops_inline = {\n+\t.capabilities_get = eth_mtr_capabilities_get_inline,\n+\t.meter_profile_add = eth_mtr_meter_profile_add_inline,\n+\t.meter_profile_delete = eth_mtr_meter_profile_delete_inline,\n+\t.create = eth_mtr_create_inline,\n+\t.destroy = eth_mtr_destroy_inline,\n+\t.meter_policy_add = eth_mtr_meter_policy_add_inline,\n+\t.meter_policy_delete = eth_mtr_meter_policy_delete_inline,\n+\t.stats_update = eth_mtr_stats_adjust_inline,\n+\t.stats_read = eth_mtr_stats_read_inline,\n+};\n+\n+int eth_mtr_ops_get(struct rte_eth_dev *dev, void *ops)\n+{\n+\tstruct pmd_internals *internals =\n+\t\t(struct pmd_internals *)dev->data->dev_private;\n+\tntdrv_4ga_t *p_nt_drv = &internals->p_drv->ntdrv;\n+\tenum fpga_info_profile profile = p_nt_drv->adapter_info.fpga_info.profile;\n+\n+\tswitch (profile) {\n+\tcase FPGA_INFO_PROFILE_VSWITCH:\n+\t\t*(const struct rte_mtr_ops **)ops = &mtr_ops_vswitch;\n+\t\tbreak;\n+\tcase FPGA_INFO_PROFILE_INLINE:\n+\t\t*(const struct rte_mtr_ops **)ops = &mtr_ops_inline;\n+\t\tbreak;\n+\tcase FPGA_INFO_PROFILE_UNKNOWN:\n+\t/* fallthrough */\n+\tcase FPGA_INFO_PROFILE_CAPTURE:\n+\t/* fallthrough */\n+\tdefault:\n+\t\tNT_LOG(ERR, NTHW,\n+\t\t       \"\" PCIIDENT_PRINT_STR\n+\t\t       \": fpga profile not supported [%s:%u]\\n\",\n+\t\t       PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),\n+\t\t       PCIIDENT_TO_BUSNR(p_nt_drv->pciident),\n+\t\t       PCIIDENT_TO_DEVNR(p_nt_drv->pciident),\n+\t\t       PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),\n+\t\t       __func__, __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/net/ntnic/ntnic_meter.h b/drivers/net/ntnic/ntnic_meter.h\nnew file mode 100644\nindex 0000000000..9484c9ee20\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_meter.h\n@@ -0,0 +1,10 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef __NTNIC_METER_H__\n+#define __NTNIC_METER_H__\n+\n+int eth_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops);\n+\n+#endif /* __NTNIC_METER_H__ */\ndiff --git a/drivers/net/ntnic/ntnic_vdpa.c b/drivers/net/ntnic/ntnic_vdpa.c\nnew file mode 100644\nindex 0000000000..6372514527\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_vdpa.c\n@@ -0,0 +1,365 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <stdio.h>\n+#include <unistd.h>\n+\n+#include <rte_ethdev.h>\n+#include <rte_malloc.h>\n+#include <rte_vhost.h>\n+#include <linux/virtio_net.h>\n+#include <rte_vdpa.h>\n+#include <rte_pci.h>\n+#include <rte_string_fns.h>\n+#include <rte_bus_pci.h>\n+#include <vhost.h>\n+#include \"ntnic_vf_vdpa.h\"\n+#include \"ntnic_vdpa.h\"\n+#include \"ntnic_ethdev.h\"\n+#include \"nt_util.h\"\n+#include \"ntlog.h\"\n+#include \"ntnic_vfio.h\"\n+\n+#define MAX_PATH_LEN 128\n+#define MAX_VDPA_PORTS 128UL\n+\n+struct vdpa_port {\n+\tchar ifname[MAX_PATH_LEN];\n+\tstruct rte_vdpa_device *vdev;\n+\tint vid;\n+\tuint32_t index;\n+\tuint32_t host_id;\n+\tuint32_t rep_port;\n+\tint rxqs;\n+\tint txqs;\n+\tuint64_t flags;\n+\tstruct rte_pci_addr addr;\n+};\n+\n+static struct vdpa_port vport[MAX_VDPA_PORTS];\n+static uint32_t nb_vpda_devcnt;\n+\n+static int nthw_vdpa_start(struct vdpa_port *vport);\n+\n+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,\n+\t\t\t\tint queue_id, uint32_t *hw_index,\n+\t\t\t\tuint32_t *host_id, uint32_t *rep_port)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < nb_vpda_devcnt; i++) {\n+\t\tif (vport[i].vdev == vdpa_dev) {\n+\t\t\tif (rx) {\n+\t\t\t\tif (queue_id >= vport[i].rxqs) {\n+\t\t\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t\t\t       \"Failed: %s: Queue ID not configured. vDPA dev %p, rx queue_id %i, rxqs %i\\n\",\n+\t\t\t\t\t       __func__, vdpa_dev, queue_id,\n+\t\t\t\t\t       vport[i].rxqs);\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\t\t\t\t*hw_index = vport[i].index + queue_id;\n+\t\t\t} else {\n+\t\t\t\tif (queue_id >= vport[i].txqs) {\n+\t\t\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t\t\t       \"Failed: %s: Queue ID not configured. vDPA dev %p, tx queue_id %i, rxqs %i\\n\",\n+\t\t\t\t\t       __func__, vdpa_dev, queue_id,\n+\t\t\t\t\t       vport[i].rxqs);\n+\t\t\t\t\treturn -1;\n+\t\t\t\t}\n+\t\t\t\t*hw_index = vport[i].index + queue_id;\n+\t\t\t}\n+\n+\t\t\t*host_id = vport[i].host_id;\n+\t\t\t*rep_port = vport[i].rep_port;\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\n+\tNT_LOG(ERR, VDPA,\n+\t       \"Failed: %s: Ask on vDPA dev %p, queue_id %i, nb_vpda_devcnt %i\\n\",\n+\t       __func__, vdpa_dev, queue_id, nb_vpda_devcnt);\n+\treturn -1;\n+}\n+\n+int nthw_vdpa_init(const struct rte_pci_device *vdev,\n+\t\t   const char *backing_devname _unused, const char *socket_path,\n+\t\t   uint32_t index, int rxqs, int txqs, uint32_t rep_port,\n+\t\t   int *vhid)\n+{\n+\tint ret;\n+\tuint32_t host_id = nt_vfio_vf_num(vdev);\n+\n+\tstruct rte_vdpa_device *vdpa_dev =\n+\t\trte_vdpa_find_device_by_name(vdev->name);\n+\tif (!vdpa_dev) {\n+\t\tNT_LOG(ERR, VDPA, \"vDPA device with name %s - not found\\n\",\n+\t\t       vdev->name);\n+\t\treturn -1;\n+\t}\n+\n+\tvport[nb_vpda_devcnt].vdev = vdpa_dev;\n+\tvport[nb_vpda_devcnt].host_id = host_id; /* VF # */\n+\tvport[nb_vpda_devcnt].index = index; /* HW ring index */\n+\tvport[nb_vpda_devcnt].rep_port = rep_port; /* in port override on Tx */\n+\tvport[nb_vpda_devcnt].rxqs = rxqs;\n+\tvport[nb_vpda_devcnt].txqs = txqs;\n+\tvport[nb_vpda_devcnt].addr = vdev->addr;\n+\n+\tvport[nb_vpda_devcnt].flags = RTE_VHOST_USER_CLIENT;\n+\tstrlcpy(vport[nb_vpda_devcnt].ifname, socket_path, MAX_PATH_LEN);\n+\n+\tNT_LOG(INF, VDPA,\n+\t       \"vDPA%u: device %s (host_id %u), backing device %s, index %u, queues %i, rep port %u, ifname %s\\n\",\n+\t       nb_vpda_devcnt, vdev->name, host_id, backing_devname, index,\n+\t       rxqs, rep_port, vport[nb_vpda_devcnt].ifname);\n+\n+\tret = nthw_vdpa_start(&vport[nb_vpda_devcnt]);\n+\n+\t*vhid = nb_vpda_devcnt;\n+\tnb_vpda_devcnt++;\n+\treturn ret;\n+}\n+\n+void nthw_vdpa_close(void)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < MAX_VDPA_PORTS; i++) {\n+\t\tif (vport[i].ifname[0] != '\\0') {\n+\t\t\tint ret;\n+\t\t\tchar *socket_path = vport[i].ifname;\n+\n+\t\t\tret = rte_vhost_driver_detach_vdpa_device(socket_path);\n+\t\t\tif (ret != 0) {\n+\t\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t\t       \"detach vdpa device failed: %s\\n\",\n+\t\t\t\t       socket_path);\n+\t\t\t}\n+\n+\t\t\tret = rte_vhost_driver_unregister(socket_path);\n+\t\t\tif (ret != 0) {\n+\t\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t\t       \"Fail to unregister vhost driver for %s.\\n\",\n+\t\t\t\t       socket_path);\n+\t\t\t}\n+\n+\t\t\tvport[i].ifname[0] = '\\0';\n+\t\t\treturn;\n+\t\t}\n+\t}\n+}\n+\n+#ifdef DUMP_VIRTIO_FEATURES\n+#define VIRTIO_F_NOTIFICATION_DATA 38\n+#define NUM_FEATURES 40\n+struct {\n+\tuint64_t id;\n+\tconst char *name;\n+} virt_features[NUM_FEATURES] = {\n+\t{ VIRTIO_NET_F_CSUM, \"VIRTIO_NET_F_CSUM\" },\n+\t{ VIRTIO_NET_F_GUEST_CSUM, \"VIRTIO_NET_F_GUEST_CSUM\" },\n+\t{\tVIRTIO_NET_F_CTRL_GUEST_OFFLOADS,\n+\t\t\"  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS\"\n+\t},\n+\t{ VIRTIO_NET_F_MTU, \"  VIRTIO_NET_F_MTU\" },\n+\t{ VIRTIO_NET_F_MAC, \"  VIRTIO_NET_F_MAC\" },\n+\t{ VIRTIO_NET_F_GSO, \"  VIRTIO_NET_F_GSO\" },\n+\t{ VIRTIO_NET_F_GUEST_TSO4, \"  VIRTIO_NET_F_GUEST_TSO4\" },\n+\t{ VIRTIO_NET_F_GUEST_TSO6, \"  VIRTIO_NET_F_GUEST_TSO6\" },\n+\t{ VIRTIO_NET_F_GUEST_ECN, \"  VIRTIO_NET_F_GUEST_ECN\" },\n+\t{ VIRTIO_NET_F_GUEST_UFO, \"  VIRTIO_NET_F_GUEST_UFO\" },\n+\t{ VIRTIO_NET_F_HOST_TSO4, \"  VIRTIO_NET_F_HOST_TSO4\" },\n+\t{ VIRTIO_NET_F_HOST_TSO6, \"  VIRTIO_NET_F_HOST_TSO6\" },\n+\t{ VIRTIO_NET_F_HOST_ECN, \"  VIRTIO_NET_F_HOST_ECN\" },\n+\t{ VIRTIO_NET_F_HOST_UFO, \"  VIRTIO_NET_F_HOST_UFO\" },\n+\t{ VIRTIO_NET_F_MRG_RXBUF, \"  VIRTIO_NET_F_MRG_RXBUF\" },\n+\t{ VIRTIO_NET_F_STATUS, \"  VIRTIO_NET_F_STATUS\" },\n+\t{ VIRTIO_NET_F_CTRL_VQ, \"  VIRTIO_NET_F_CTRL_VQ\" },\n+\t{ VIRTIO_NET_F_CTRL_RX, \"  VIRTIO_NET_F_CTRL_RX\" },\n+\t{ VIRTIO_NET_F_CTRL_VLAN, \"  VIRTIO_NET_F_CTRL_VLAN\" },\n+\t{ VIRTIO_NET_F_CTRL_RX_EXTRA, \"  VIRTIO_NET_F_CTRL_RX_EXTRA\" },\n+\t{ VIRTIO_NET_F_GUEST_ANNOUNCE, \"  VIRTIO_NET_F_GUEST_ANNOUNCE\" },\n+\t{ VIRTIO_NET_F_MQ, \"  VIRTIO_NET_F_MQ\" },\n+\t{ VIRTIO_NET_F_CTRL_MAC_ADDR, \"  VIRTIO_NET_F_CTRL_MAC_ADDR\" },\n+\t{ VIRTIO_NET_F_HASH_REPORT, \"  VIRTIO_NET_F_HASH_REPORT\" },\n+\t{ VIRTIO_NET_F_RSS, \"  VIRTIO_NET_F_RSS\" },\n+\t{ VIRTIO_NET_F_RSC_EXT, \"  VIRTIO_NET_F_RSC_EXT\" },\n+\t{ VIRTIO_NET_F_STANDBY, \"  VIRTIO_NET_F_STANDBY\" },\n+\t{ VIRTIO_NET_F_SPEED_DUPLEX, \"  VIRTIO_NET_F_SPEED_DUPLEX\" },\n+\t{ VIRTIO_F_NOTIFY_ON_EMPTY, \"  VIRTIO_F_NOTIFY_ON_EMPTY\" },\n+\t{ VIRTIO_F_ANY_LAYOUT, \"  VIRTIO_F_ANY_LAYOUT\" },\n+\t{ VIRTIO_RING_F_INDIRECT_DESC, \"  VIRTIO_RING_F_INDIRECT_DESC\" },\n+\t{ VIRTIO_F_VERSION_1, \"  VIRTIO_F_VERSION_1\" },\n+\t{ VIRTIO_F_IOMMU_PLATFORM, \"  VIRTIO_F_IOMMU_PLATFORM\" },\n+\t{ VIRTIO_F_RING_PACKED, \"  VIRTIO_F_RING_PACKED\" },\n+\t{ VIRTIO_TRANSPORT_F_START, \"  VIRTIO_TRANSPORT_F_START\" },\n+\t{ VIRTIO_TRANSPORT_F_END, \"  VIRTIO_TRANSPORT_F_END\" },\n+\t{ VIRTIO_F_IN_ORDER, \"  VIRTIO_F_IN_ORDER\" },\n+\t{ VIRTIO_F_ORDER_PLATFORM, \"  VIRTIO_F_ORDER_PLATFORM\" },\n+\t{ VIRTIO_F_NOTIFICATION_DATA, \"  VIRTIO_F_NOTIFICATION_DATA\" },\n+};\n+\n+static void dump_virtio_features(uint64_t features)\n+{\n+\tint i;\n+\n+\tfor (i = 0; i < NUM_FEATURES; i++) {\n+\t\tif ((1ULL << virt_features[i].id) ==\n+\t\t\t\t(features & (1ULL << virt_features[i].id)))\n+\t\t\tprintf(\"Virtio feature: %s\\n\", virt_features[i].name);\n+\t}\n+}\n+#endif\n+\n+static int nthw_vdpa_new_device(int vid)\n+{\n+\tchar ifname[MAX_PATH_LEN];\n+\tuint64_t negotiated_features = 0;\n+\tunsigned int vhid = -1;\n+\n+\trte_vhost_get_ifname(vid, ifname, sizeof(ifname));\n+\n+\tfor (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {\n+\t\tif (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {\n+\t\t\tvport[vhid].vid = vid;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tif (vhid >= MAX_VDPA_PORTS)\n+\t\treturn -1;\n+\n+\tint max_loops = 2000;\n+\tstruct pmd_internals *intern;\n+\n+\twhile ((intern = vp_vhid_instance_ready(vhid)) == NULL) {\n+\t\tusleep(1000);\n+\t\tif (--max_loops == 0) {\n+\t\t\tNT_LOG(INF, VDPA,\n+\t\t\t       \"FAILED CREATING (vhost could not get ready) New port %s, vDPA dev: %s\\n\",\n+\t\t\t       ifname, vport[vhid].vdev->device->name);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\t/* set link up on virtual port */\n+\tintern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;\n+\n+\t/* Store ifname (vhost_path) */\n+\tstrlcpy(intern->vhost_path, ifname, MAX_PATH_LEN);\n+\n+\tNT_LOG(INF, VDPA, \"New port %s, vDPA dev: %s\\n\", ifname,\n+\t       vport[vhid].vdev->device->name);\n+\trte_vhost_get_negotiated_features(vid, &negotiated_features);\n+\tNT_LOG(INF, VDPA, \"Virtio Negotiated features %016lx\\n\",\n+\t       negotiated_features);\n+\n+#ifdef DUMP_VIRTIO_FEATURES\n+\tdump_virtio_features(negotiated_features);\n+#endif\n+\n+\tif ((((negotiated_features & (1ULL << VIRTIO_F_IN_ORDER))) ||\n+\t\t\t((negotiated_features & (1ULL << VIRTIO_F_RING_PACKED))))) {\n+\t\t/* IN_ORDER negotiated - we can run HW-virtio directly (vDPA) */\n+\t\tNT_LOG(INF, VDPA, \"Running virtio in vDPA mode : %s  %s\\n\",\n+\t\t       (negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?\n+\t\t       \"\\\"Packed-Ring\\\"\" :\n+\t\t       \"\\\"Split-Ring\\\"\",\n+\t\t       (negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ?\n+\t\t       \"\\\"In-Order\\\"\" :\n+\t\t       \"\\\"No In-Order Requested\\\"\");\n+\n+\t\tintern->vport_comm =\n+\t\t\t(negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) ?\n+\t\t\tVIRT_PORT_NEGOTIATED_PACKED :\n+\t\t\tVIRT_PORT_NEGOTIATED_SPLIT;\n+\t} else {\n+\t\tNT_LOG(ERR, VDPA, \"Incompatible virtio negotiated features.\\n\");\n+\t\treturn -1;\n+\t}\n+\treturn 0;\n+}\n+\n+static void nthw_vdpa_destroy_device(int vid)\n+{\n+\tchar ifname[MAX_PATH_LEN];\n+\tuint32_t i;\n+\tunsigned int vhid;\n+\n+\trte_vhost_get_ifname(vid, ifname, sizeof(ifname));\n+\tfor (i = 0; i < MAX_VDPA_PORTS; i++) {\n+\t\tif (strcmp(ifname, vport[i].ifname) == 0) {\n+\t\t\tNT_LOG(INF, VDPA, \"\\ndestroy port %s, vDPA dev: %s\\n\",\n+\t\t\t       ifname, vport[i].vdev->device->name);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tstruct pmd_internals *intern;\n+\n+\t/* set link down on virtual port */\n+\tfor (vhid = 0; vhid < MAX_VDPA_PORTS; vhid++) {\n+\t\tif (strncmp(ifname, vport[vhid].ifname, MAX_PATH_LEN) == 0) {\n+\t\t\tintern = vp_vhid_instance_ready(vhid);\n+\t\t\tif (intern)\n+\t\t\t\tintern->vport_comm = VIRT_PORT_NEGOTIATED_NONE;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+}\n+\n+static const struct rte_vhost_device_ops vdpa_devops = {\n+\t.new_device = nthw_vdpa_new_device,\n+\t.destroy_device = nthw_vdpa_destroy_device,\n+};\n+\n+static int nthw_vdpa_start(struct vdpa_port *vport)\n+{\n+\tint ret;\n+\tchar *socket_path = vport->ifname;\n+\n+\tret = rte_vhost_driver_register(socket_path, vport->flags);\n+\tif (ret != 0) {\n+\t\tNT_LOG(ERR, VDPA, \"register driver failed: %s\\n\", socket_path);\n+\t\treturn -1;\n+\t}\n+\n+\tret = rte_vhost_driver_callback_register(socket_path, &vdpa_devops);\n+\tif (ret != 0) {\n+\t\tNT_LOG(ERR, VDPA, \"register driver ops failed: %s\\n\",\n+\t\t       socket_path);\n+\t\treturn -1;\n+\t}\n+\n+\tret = rte_vhost_driver_disable_features(socket_path, (1ULL << VIRTIO_NET_F_HOST_TSO4) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_HOST_TSO6) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_CSUM) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_RING_F_EVENT_IDX) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_HOST_UFO) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_HOST_ECN) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_GUEST_CSUM) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_GUEST_TSO4) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_GUEST_TSO6) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_GUEST_UFO) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_GUEST_ECN) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_CTRL_VQ) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_CTRL_RX) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_GSO) |\n+\t\t\t\t\t\t(1ULL << VIRTIO_NET_F_MTU));\n+\n+\tif (ret != 0) {\n+\t\tNT_LOG(INF, VDPA,\n+\t\t       \"rte_vhost_driver_disable_features failed for vhost user client port: %s\\n\",\n+\t\t       socket_path);\n+\t\treturn -1;\n+\t}\n+\n+\tif (rte_vhost_driver_start(socket_path) < 0) {\n+\t\tNT_LOG(ERR, VDPA, \"start vhost driver failed: %s\\n\",\n+\t\t       socket_path);\n+\t\treturn -1;\n+\t}\n+\treturn 0;\n+}\ndiff --git a/drivers/net/ntnic/ntnic_vdpa.h b/drivers/net/ntnic/ntnic_vdpa.h\nnew file mode 100644\nindex 0000000000..7acc2c8e4b\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_vdpa.h\n@@ -0,0 +1,21 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef _NTNIC_VDPA_H_\n+#define _NTNIC_VDPA_H_\n+\n+#include <stdint.h>\n+\n+int nthw_vdpa_get_queue_id_info(struct rte_vdpa_device *vdpa_dev, int rx,\n+\t\t\t\tint queue_id, uint32_t *hw_index,\n+\t\t\t\tuint32_t *host_id, uint32_t *rep_port);\n+\n+int nthw_vdpa_init(const struct rte_pci_device *vdev,\n+\t\t   const char *backing_devname, const char *socket_path,\n+\t\t   uint32_t index, int rxqs, int txqs, uint32_t rep_port,\n+\t\t   int *vhid);\n+\n+void nthw_vdpa_close(void);\n+\n+#endif /* _NTNIC_VDPA_H_ */\ndiff --git a/drivers/net/ntnic/ntnic_vf.c b/drivers/net/ntnic/ntnic_vf.c\nnew file mode 100644\nindex 0000000000..0724b040c3\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_vf.c\n@@ -0,0 +1,83 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <rte_malloc.h>\n+#include <rte_memory.h>\n+#include <rte_interrupts.h>\n+#include <eal_interrupts.h>\n+#include <rte_bus_pci.h>\n+#include <rte_vfio.h>\n+#include <rte_spinlock.h>\n+#include <rte_log.h>\n+\n+#include \"ntnic_ethdev.h\"\n+#include \"ntnic_vf.h\"\n+#include \"ntnic_vf_vdpa.h\"\n+#include \"nt_util.h\"\n+#include \"ntlog.h\"\n+\n+#define NT_HW_NAPATECH_PCI_VENDOR_ID (0x18F4)\n+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF (0x051A)\n+#define NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF (0x051B)\n+\n+static const char *get_adapter_name(struct rte_pci_device *pci_dev)\n+{\n+\tswitch (pci_dev->id.vendor_id) {\n+\tcase NT_HW_NAPATECH_PCI_VENDOR_ID:\n+\t\tswitch (pci_dev->id.device_id) {\n+\t\tcase NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF:\n+\t\t\treturn \"NT200A02\";\n+\t\tcase NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF:\n+\t\t\treturn \"NT50B01\";\n+\t\t}\n+\t\tbreak;\n+\t}\n+\n+\treturn \"Unknown\";\n+}\n+\n+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv,\n+\t\t    struct rte_pci_device *pci_dev)\n+{\n+\tconst char *adapter_name _unused = get_adapter_name(pci_dev);\n+\n+\tNT_LOG(INF, VDPA, \"Probe %s VF : %02x:%02x:%i\\n\", adapter_name,\n+\t       pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);\n+\n+\t/* Create vDPA device for the virtual function interface.*/\n+\n+\tif (ntvf_vdpa_pci_probe(pci_drv, pci_dev) != 0)\n+\t\treturn -1;\n+\n+\treturn nthw_create_vf_interface_dpdk(pci_dev);\n+}\n+\n+int nt_vf_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tif (ntvf_vdpa_pci_remove(pci_dev) != 0)\n+\t\treturn -1;\n+\n+\treturn nthw_remove_vf_interface_dpdk(pci_dev);\n+}\n+\n+static const struct rte_pci_id pci_id_nt_vf_map[] = {\n+\t{\tRTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,\n+\t\t\t       NT_HW_NAPATECH_PCI_DEVICE_ID_NT200A02_VF)\n+\t},\n+\t{\tRTE_PCI_DEVICE(NT_HW_NAPATECH_PCI_VENDOR_ID,\n+\t\t\t       NT_HW_NAPATECH_PCI_DEVICE_ID_NT50B01_VF)\n+\t},\n+\t{ .vendor_id = 0, /* sentinel */ },\n+};\n+\n+static struct rte_pci_driver rte_nt_vf = {\n+\t.id_table = pci_id_nt_vf_map,\n+\t.drv_flags = 0,\n+\t.probe = nt_vf_pci_probe,\n+\t.remove = nt_vf_pci_remove,\n+};\n+\n+RTE_PMD_REGISTER_PCI(net_nt_vf, rte_nt_vf);\n+RTE_PMD_REGISTER_PCI_TABLE(net_nt_vf, pci_id_nt_vf_map);\n+RTE_PMD_REGISTER_KMOD_DEP(net_nt_vf, \"* vfio-pci\");\ndiff --git a/drivers/net/ntnic/ntnic_vf.h b/drivers/net/ntnic/ntnic_vf.h\nnew file mode 100644\nindex 0000000000..84be3bd71f\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_vf.h\n@@ -0,0 +1,17 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef _NTNIC_VF_H_\n+#define _NTNIC_VF_H_\n+\n+#include \"rte_bus_pci.h\"\n+\n+int nt_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\t\t    struct rte_pci_device *pci_dev);\n+int nt_vf_pci_remove(struct rte_pci_device *pci_dev __rte_unused);\n+\n+int get_container_fd(int vf_num);\n+int close_vf_mem_mapping(int vf_num);\n+\n+#endif /* _NTNIC_VF_H_ */\ndiff --git a/drivers/net/ntnic/ntnic_vf_vdpa.c b/drivers/net/ntnic/ntnic_vf_vdpa.c\nnew file mode 100644\nindex 0000000000..4125bc50c9\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_vf_vdpa.c\n@@ -0,0 +1,1235 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Intel Corporation\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <unistd.h>\n+#include <stdint.h>\n+\n+#include <pthread.h>\n+#include <fcntl.h>\n+#include <sys/ioctl.h>\n+#include <sys/epoll.h>\n+\n+#include <linux/virtio_net.h>\n+#include <linux/pci_regs.h>\n+\n+#include <rte_interrupts.h>\n+#include <eal_interrupts.h>\n+\n+#include <rte_malloc.h>\n+#include <rte_memory.h>\n+#include <rte_bus_pci.h>\n+#include <rte_vhost.h>\n+#include <rte_vdpa.h>\n+#include <rte_vfio.h>\n+#include <rte_spinlock.h>\n+#include <rte_log.h>\n+\n+#include <vhost.h>\n+\n+#include \"ntdrv_4ga.h\"\n+#include \"ntnic_ethdev.h\"\n+#include \"ntnic_vdpa.h\"\n+#include \"ntnic_vf_vdpa.h\"\n+#include \"ntnic_vf.h\"\n+#include \"ntnic_vfio.h\"\n+#include \"ntnic_dbsconfig.h\"\n+#include \"ntlog.h\"\n+\n+#define NTVF_VDPA_MAX_QUEUES (MAX_QUEUES)\n+#define NTVF_VDPA_MAX_INTR_VECTORS 8\n+\n+#define NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES              \\\n+\t((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) |       \\\n+\t (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) |     \\\n+\t (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \\\n+\t (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) |   \\\n+\t (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |       \\\n+\t (1ULL << VHOST_USER_PROTOCOL_F_MQ))\n+\n+#define NTVF_VIRTIO_NET_SUPPORTED_FEATURES                                 \\\n+\t((1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) |    \\\n+\t (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) |    \\\n+\t (1ULL << VIRTIO_F_IOMMU_PLATFORM) | (1ULL << VIRTIO_F_IN_ORDER) | \\\n+\t (1ULL << VIRTIO_F_RING_PACKED) |                                  \\\n+\t (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |                           \\\n+\t (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))\n+\n+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state);\n+\n+struct vring_info {\n+\tuint64_t desc;\n+\tuint64_t avail;\n+\tuint64_t used;\n+\tuint16_t size;\n+\n+\tuint16_t last_avail_idx;\n+\tuint16_t last_used_idx;\n+\n+\tint vq_type;\n+\tstruct nthw_virt_queue *p_vq;\n+\n+\tint enable;\n+};\n+\n+struct ntvf_vdpa_hw {\n+\tuint64_t negotiated_features;\n+\n+\tuint8_t nr_vring;\n+\n+\tstruct vring_info vring[NTVF_VDPA_MAX_QUEUES * 2];\n+};\n+\n+struct ntvf_vdpa_internal {\n+\tstruct rte_pci_device *pdev;\n+\tstruct rte_vdpa_device *vdev;\n+\n+\tint vfio_container_fd;\n+\tint vfio_group_fd;\n+\tint vfio_dev_fd;\n+\n+\tint vid;\n+\n+\tuint32_t outport;\n+\n+\tuint16_t max_queues;\n+\n+\tuint64_t features;\n+\n+\tstruct ntvf_vdpa_hw hw;\n+\n+\tvolatile int32_t started;\n+\tvolatile int32_t dev_attached;\n+\tvolatile int32_t running;\n+\n+\trte_spinlock_t lock;\n+\n+\tvolatile int32_t dma_mapped;\n+\tvolatile int32_t intr_enabled;\n+};\n+\n+#ifndef PAGE_SIZE\n+#define PAGE_SIZE 4096\n+#endif\n+\n+#define NTVF_USED_RING_LEN(size) \\\n+\t((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)\n+\n+#define NTVF_MEDIATED_VRING 0x210000000000\n+\n+struct internal_list {\n+\tTAILQ_ENTRY(internal_list) next;\n+\tstruct ntvf_vdpa_internal *internal;\n+};\n+\n+TAILQ_HEAD(internal_list_head, internal_list);\n+\n+static struct internal_list_head internal_list =\n+\tTAILQ_HEAD_INITIALIZER(internal_list);\n+\n+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;\n+\n+int ntvf_vdpa_logtype;\n+\n+static struct internal_list *\n+find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)\n+{\n+\tint found = 0;\n+\tstruct internal_list *list;\n+\n+\tNT_LOG(DBG, VDPA, \"%s: vDPA dev=%p\\n\", __func__, vdev);\n+\n+\tpthread_mutex_lock(&internal_list_lock);\n+\n+\tTAILQ_FOREACH(list, &internal_list, next)\n+\t{\n+\t\tif (vdev == list->internal->vdev) {\n+\t\t\tfound = 1;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tpthread_mutex_unlock(&internal_list_lock);\n+\n+\tif (!found)\n+\t\treturn NULL;\n+\n+\treturn list;\n+}\n+\n+static struct internal_list *\n+ntvf_vdpa_find_internal_resource_by_dev(const struct rte_pci_device *pdev)\n+{\n+\tint found = 0;\n+\tstruct internal_list *list;\n+\n+\tNT_LOG(DBG, VDPA, \"%s: [%s:%u]\\n\", __func__, __FILE__, __LINE__);\n+\n+\tpthread_mutex_lock(&internal_list_lock);\n+\n+\tTAILQ_FOREACH(list, &internal_list, next)\n+\t{\n+\t\tif (pdev == list->internal->pdev) {\n+\t\t\tfound = 1;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tpthread_mutex_unlock(&internal_list_lock);\n+\n+\tif (!found)\n+\t\treturn NULL;\n+\n+\treturn list;\n+}\n+\n+static int ntvf_vdpa_vfio_setup(struct ntvf_vdpa_internal *internal)\n+{\n+\tint vfio;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tinternal->vfio_dev_fd = -1;\n+\tinternal->vfio_group_fd = -1;\n+\tinternal->vfio_container_fd = -1;\n+\n+\tvfio = nt_vfio_setup(internal->pdev);\n+\tif (vfio == -1) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: [%s:%u]\\n\", __func__, __FILE__, __LINE__);\n+\t\treturn -1;\n+\t}\n+\tinternal->vfio_container_fd = nt_vfio_get_container_fd(vfio);\n+\tinternal->vfio_group_fd = nt_vfio_get_group_fd(vfio);\n+\tinternal->vfio_dev_fd = nt_vfio_get_dev_fd(vfio);\n+\treturn 0;\n+}\n+\n+static int ntvf_vdpa_dma_map(struct ntvf_vdpa_internal *internal, int do_map)\n+{\n+\tuint32_t i;\n+\tint ret = 0;\n+\tstruct rte_vhost_memory *mem = NULL;\n+\tint vf_num = nt_vfio_vf_num(internal->pdev);\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tNT_LOG(DBG, VDPA, \"%s: vid=%d vDPA dev=%p\\n\", __func__, internal->vid,\n+\t       internal->vdev);\n+\n+\tif ((do_map && __atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED)) ||\n+\t\t\t(!do_map && !__atomic_load_n(&internal->dma_mapped, __ATOMIC_RELAXED))) {\n+\t\tret = -1;\n+\t\tgoto exit;\n+\t}\n+\tret = rte_vhost_get_mem_table(internal->vid, &mem);\n+\tif (ret < 0) {\n+\t\tNT_LOG(ERR, VDPA, \"failed to get VM memory layout.\\n\");\n+\t\tgoto exit;\n+\t}\n+\n+\tfor (i = 0; i < mem->nregions; i++) {\n+\t\tstruct rte_vhost_mem_region *reg = &mem->regions[i];\n+\n+\t\tNT_LOG(INF, VDPA,\n+\t\t       \"%s, region %u: HVA 0x%\" PRIX64 \", GPA 0xllx, size 0x%\" PRIX64 \".\\n\",\n+\t\t       (do_map ? \"DMA map\" : \"DMA unmap\"), i,\n+\t\t       reg->host_user_addr, reg->guest_phys_addr, reg->size);\n+\n+\t\tif (do_map) {\n+\t\t\tret = nt_vfio_dma_map_vdpa(vf_num, reg->host_user_addr,\n+\t\t\t\t\t\t   reg->guest_phys_addr,\n+\t\t\t\t\t\t   reg->size);\n+\t\t\tif (ret < 0) {\n+\t\t\t\tNT_LOG(ERR, VDPA, \"%s: DMA map failed.\\n\",\n+\t\t\t\t       __func__);\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\t\t\t__atomic_store_n(&internal->dma_mapped, 1, __ATOMIC_RELAXED);\n+\t\t} else {\n+\t\t\tret = nt_vfio_dma_unmap_vdpa(vf_num,\n+\t\t\t\t\t\t     reg->host_user_addr,\n+\t\t\t\t\t\t     reg->guest_phys_addr,\n+\t\t\t\t\t\t     reg->size);\n+\t\t\tif (ret < 0) {\n+\t\t\t\tNT_LOG(ERR, VDPA, \"%s: DMA unmap failed.\\n\", __func__);\n+\t\t\t\tgoto exit;\n+\t\t\t}\n+\t\t\t__atomic_store_n(&internal->dma_mapped, 0, __ATOMIC_RELAXED);\n+\t\t}\n+\t}\n+\n+exit:\n+\tif (mem)\n+\t\tfree(mem);\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn ret;\n+}\n+\n+static uint64_t _hva_to_gpa(int vid, uint64_t hva)\n+{\n+\tstruct rte_vhost_memory *mem = NULL;\n+\tstruct rte_vhost_mem_region *reg;\n+\tuint64_t gpa = 0;\n+\tuint32_t i;\n+\n+\tif (rte_vhost_get_mem_table(vid, &mem) < 0)\n+\t\tgoto exit;\n+\n+\tfor (i = 0; i < mem->nregions; i++) {\n+\t\treg = &mem->regions[i];\n+\t\tif (hva >= reg->host_user_addr &&\n+\t\t\t\thva < reg->host_user_addr + reg->size) {\n+\t\t\tgpa = hva - reg->host_user_addr + reg->guest_phys_addr;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+exit:\n+\tif (mem)\n+\t\tfree(mem);\n+\n+\treturn gpa;\n+}\n+\n+static int ntvf_vdpa_create_vring(struct ntvf_vdpa_internal *internal,\n+\t\t\t\t  int vring)\n+{\n+\tstruct ntvf_vdpa_hw *hw = &internal->hw;\n+\tstruct rte_vhost_vring vq;\n+\tint vid = internal->vid;\n+\tuint64_t gpa;\n+\n+\trte_vhost_get_vhost_vring(vid, vring, &vq);\n+\n+\tNT_LOG(INF, VDPA, \"%s: idx=%d: vq.desc %p\\n\", __func__, vring, vq.desc);\n+\n+\tgpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);\n+\tif (gpa == 0) {\n+\t\tNT_LOG(ERR, VDPA,\n+\t\t       \"%s: idx=%d: failed to get GPA for descriptor ring: vq.desc %p\\n\",\n+\t\t       __func__, vring, vq.desc);\n+\t\treturn -1;\n+\t}\n+\thw->vring[vring].desc = gpa;\n+\n+\tgpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);\n+\tif (gpa == 0) {\n+\t\tNT_LOG(ERR, VDPA,\n+\t\t       \"%s: idx=%d: failed to get GPA for available ring\\n\",\n+\t\t       __func__, vring);\n+\t\treturn -1;\n+\t}\n+\thw->vring[vring].avail = gpa;\n+\n+\tgpa = _hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);\n+\tif (gpa == 0) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: idx=%d: fail to get GPA for used ring\\n\",\n+\t\t       __func__, vring);\n+\t\treturn -1;\n+\t}\n+\n+\thw->vring[vring].used = gpa;\n+\thw->vring[vring].size = vq.size;\n+\n+\trte_vhost_get_vring_base(vid, vring, &hw->vring[vring].last_avail_idx,\n+\t\t\t\t &hw->vring[vring].last_used_idx);\n+\n+\t/* Prevent multiple creations */\n+\t{\n+\t\tconst int index = vring;\n+\t\tuint32_t hw_index = 0;\n+\t\tuint32_t host_id = 0;\n+\t\tconst uint32_t header = 0; /* 0=VirtIO hdr, 1=NT virtio hdr */\n+\t\tuint32_t vport = 0;\n+\t\tuint32_t port = internal->outport;\n+\t\tstruct vring_info *p_vr_inf = &hw->vring[vring];\n+\t\tnthw_dbs_t *p_nthw_dbs = get_pdbs_from_pci(internal->pdev->addr);\n+\n+\t\tint res = nthw_vdpa_get_queue_id_info(internal->vdev,\n+\t\t\t\t\t\t      !(vring & 1), vring >> 1,\n+\t\t\t\t\t\t      &hw_index, &host_id,\n+\t\t\t\t\t\t      &vport);\n+\t\tif (res) {\n+\t\t\tNT_LOG(ERR, VDPA, \"HW info received failed\\n\");\n+\t\t\tp_vr_inf->p_vq = NULL; /* Failed to create the vring */\n+\t\t\treturn res;\n+\t\t}\n+\n+\t\tif (!(vring & 1)) {\n+\t\t\tNT_LOG(DBG, VDPA,\n+\t\t\t       \"Rx: idx %u, host_id %u, vport %u, queue %i\\n\",\n+\t\t\t       hw_index, host_id, vport, vring >> 1);\n+\t\t} else {\n+\t\t\tNT_LOG(DBG, VDPA,\n+\t\t\t       \"Tx: idx %u, host_id %u, vport %u, queue %i\\n\",\n+\t\t\t       hw_index, host_id, vport, vring >> 1);\n+\t\t}\n+\t\tNT_LOG(DBG, VDPA,\n+\t\t       \"%s: idx=%d: avail=%p used=%p desc=%p: %X: %d %d %d\\n\",\n+\t\t       __func__, index, (void *)p_vr_inf->avail,\n+\t\t       (void *)p_vr_inf->used, (void *)p_vr_inf->desc,\n+\t\t       p_vr_inf->size, host_id, port, header);\n+\n+\t\tif ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||\n+\t\t\t\t(hw->negotiated_features &\n+\t\t\t\t (1ULL << VIRTIO_F_RING_PACKED))) {\n+\t\t\tint res;\n+\n+\t\t\tNT_LOG(DBG, VDPA,\n+\t\t\t       \"%s: idx=%d: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\\n\",\n+\t\t\t       __func__, index, hw->negotiated_features);\n+\n+\t\t\tif (!(vring & 1)) {\n+\t\t\t\tstruct nthw_virt_queue *rx_vq;\n+\n+\t\t\t\tuint16_t start_idx =\n+\t\t\t\t\thw->vring[vring].last_avail_idx;\n+\t\t\t\tuint16_t next_ptr =\n+\t\t\t\t\t(start_idx & 0x7fff) % vq.size;\n+\n+\t\t\t\t/* disable doorbell not needed by FPGA */\n+\t\t\t\t((struct pvirtq_event_suppress *)vq.used)\n+\t\t\t\t->flags = RING_EVENT_FLAGS_DISABLE;\n+\t\t\t\trte_wmb();\n+\t\t\t\tif (hw->negotiated_features &\n+\t\t\t\t\t\t(1ULL << VIRTIO_F_RING_PACKED)) {\n+\t\t\t\t\tNT_LOG(DBG, VDPA,\n+\t\t\t\t\t       \"Rx: hw_index %u, host_id %u, start_idx %u, header %u, vring %u, vport %u\\n\",\n+\t\t\t\t\t       hw_index, host_id, start_idx,\n+\t\t\t\t\t       header, vring, vport);\n+\t\t\t\t\t/*  irq_vector 1,3,5... for Rx we support max 8 pr VF */\n+\t\t\t\t\trx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,\n+\t\t\t\t\t\thw_index, start_idx,\n+\t\t\t\t\t\tnext_ptr,\n+\t\t\t\t\t\t(void *)p_vr_inf\n+\t\t\t\t\t\t->avail, /* -> driver_event */\n+\t\t\t\t\t\t(void *)p_vr_inf\n+\t\t\t\t\t\t->used, /* -> device_event */\n+\t\t\t\t\t\t(void *)p_vr_inf->desc,\n+\t\t\t\t\t\tp_vr_inf->size, host_id, header,\n+\t\t\t\t\t\tPACKED_RING,\n+\t\t\t\t\t\tvring + 1);\n+\n+\t\t\t\t} else {\n+\t\t\t\t\trx_vq = nthw_setup_rx_virt_queue(p_nthw_dbs,\n+\t\t\t\t\t\thw_index, start_idx,\n+\t\t\t\t\t\tnext_ptr,\n+\t\t\t\t\t\t(void *)p_vr_inf->avail,\n+\t\t\t\t\t\t(void *)p_vr_inf->used,\n+\t\t\t\t\t\t(void *)p_vr_inf->desc,\n+\t\t\t\t\t\tp_vr_inf->size, host_id, header,\n+\t\t\t\t\t\tSPLIT_RING,\n+\t\t\t\t\t\t-1); /* no interrupt enabled */\n+\t\t\t\t}\n+\n+\t\t\t\tp_vr_inf->p_vq = rx_vq;\n+\t\t\t\tp_vr_inf->vq_type = 0;\n+\t\t\t\tres = (rx_vq ? 0 : -1);\n+\t\t\t\tif (res == 0)\n+\t\t\t\t\tregister_release_virtqueue_info(rx_vq,\n+\t\t\t\t\t\t\t\t\t1, 0);\n+\n+\t\t\t\tNT_LOG(DBG, VDPA, \"[%i] Rx Queue size %i\\n\",\n+\t\t\t\t       hw_index, p_vr_inf->size);\n+\t\t\t} else if (vring & 1) {\n+\t\t\t\t/*\n+\t\t\t\t * transmit virt queue\n+\t\t\t\t */\n+\t\t\t\tstruct nthw_virt_queue *tx_vq;\n+\t\t\t\tuint16_t start_idx =\n+\t\t\t\t\thw->vring[vring].last_avail_idx;\n+\t\t\t\tuint16_t next_ptr;\n+\n+\t\t\t\tif (hw->negotiated_features &\n+\t\t\t\t\t\t(1ULL << VIRTIO_F_RING_PACKED)) {\n+\t\t\t\t\tnext_ptr =\n+\t\t\t\t\t\t(start_idx & 0x7fff) % vq.size;\n+\n+\t\t\t\t\t/* disable doorbell needs from FPGA */\n+\t\t\t\t\t((struct pvirtq_event_suppress *)vq.used)\n+\t\t\t\t\t->flags =\n+\t\t\t\t\t\tRING_EVENT_FLAGS_DISABLE;\n+\t\t\t\t\trte_wmb();\n+\t\t\t\t\ttx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,\n+\t\t\t\t\t\thw_index, start_idx,\n+\t\t\t\t\t\tnext_ptr,\n+\t\t\t\t\t\t(void *)p_vr_inf->avail, /* driver_event */\n+\t\t\t\t\t\t(void *)p_vr_inf->used, /* device_event */\n+\t\t\t\t\t\t(void *)p_vr_inf->desc,\n+\t\t\t\t\t\tp_vr_inf->size, host_id, port,\n+\t\t\t\t\t\tvport, header, PACKED_RING,\n+\t\t\t\t\t\tvring + 1, /* interrupt 2,4,6... */\n+\t\t\t\t\t\t!!(hw->negotiated_features &\n+\t\t\t\t\t\t\t(1ULL << VIRTIO_F_IN_ORDER)));\n+\n+\t\t\t\t} else {\n+\t\t\t\t\t/*\n+\t\t\t\t\t * In Live Migration restart scenario:\n+\t\t\t\t\t * This only works if no jumbo packets has been send from VM\n+\t\t\t\t\t * on the LM source sideю This pointer points to the next\n+\t\t\t\t\t * free descr and may be pushed ahead by next flag and if\n+\t\t\t\t\t * so, this pointer calculation is incorrect\n+\t\t\t\t\t *\n+\t\t\t\t\t * NOTE: THEREFORE, THIS DOES NOT WORK WITH JUMBO PACKETS\n+\t\t\t\t\t *       SUPPORT IN VM\n+\t\t\t\t\t */\n+\t\t\t\t\tnext_ptr =\n+\t\t\t\t\t\t(start_idx & 0x7fff) % vq.size;\n+\t\t\t\t\ttx_vq = nthw_setup_tx_virt_queue(p_nthw_dbs,\n+\t\t\t\t\t\thw_index, start_idx,\n+\t\t\t\t\t\tnext_ptr,\n+\t\t\t\t\t\t(void *)p_vr_inf->avail,\n+\t\t\t\t\t\t(void *)p_vr_inf->used,\n+\t\t\t\t\t\t(void *)p_vr_inf->desc,\n+\t\t\t\t\t\tp_vr_inf->size, host_id, port,\n+\t\t\t\t\t\tvport, header, SPLIT_RING,\n+\t\t\t\t\t\t-1, /* no interrupt enabled */\n+\t\t\t\t\t\tIN_ORDER);\n+\t\t\t\t}\n+\n+\t\t\t\tp_vr_inf->p_vq = tx_vq;\n+\t\t\t\tp_vr_inf->vq_type = 1;\n+\t\t\t\tres = (tx_vq ? 0 : -1);\n+\t\t\t\tif (res == 0)\n+\t\t\t\t\tregister_release_virtqueue_info(tx_vq,\n+\t\t\t\t\t\t\t\t\t0, 0);\n+\n+\t\t\t\tNT_LOG(DBG, VDPA, \"[%i] Tx Queue size %i\\n\",\n+\t\t\t\t       hw_index, p_vr_inf->size);\n+\t\t\t} else {\n+\t\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t\t       \"%s: idx=%d: unexpected index: %d\\n\",\n+\t\t\t\t       __func__, index, vring);\n+\t\t\t\tres = -1;\n+\t\t\t}\n+\t\t\tif (res != 0) {\n+\t\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t\t       \"%s: idx=%d: vring error: res=%d\\n\",\n+\t\t\t\t       __func__, index, res);\n+\t\t\t}\n+\n+\t\t} else {\n+\t\t\tNT_LOG(WRN, VDPA,\n+\t\t\t       \"%s: idx=%d: for SPLIT RING: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\\n\",\n+\t\t\t       __func__, index, hw->negotiated_features);\n+\t\t\treturn 0;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int ntvf_vdpa_start(struct ntvf_vdpa_internal *internal)\n+{\n+\tenum fpga_info_profile fpga_profile =\n+\t\tget_fpga_profile_from_pci(internal->pdev->addr);\n+\tstruct ntvf_vdpa_hw *hw = &internal->hw;\n+\tint vid;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tvid = internal->vid;\n+\thw->nr_vring = rte_vhost_get_vring_num(vid);\n+\trte_vhost_get_negotiated_features(vid, &hw->negotiated_features);\n+\n+\tif (fpga_profile == FPGA_INFO_PROFILE_INLINE) {\n+\t\tNT_LOG(INF, VDPA, \"%s: Number of VRINGs=%u\\n\", __func__,\n+\t\t       hw->nr_vring);\n+\n+\t\tfor (int i = 0; i < hw->nr_vring && i < 2; i++) {\n+\t\t\tif (!hw->vring[i].enable) {\n+\t\t\t\tntvf_vdpa_dma_map(internal, 1);\n+\t\t\t\tntvf_vdpa_create_vring(internal, i);\n+\t\t\t\tif (hw->vring[i].desc && hw->vring[i].p_vq) {\n+\t\t\t\t\tif (hw->vring[i].vq_type == 0)\n+\t\t\t\t\t\tnthw_enable_rx_virt_queue(hw->vring[i].p_vq);\n+\t\t\t\t\telse\n+\t\t\t\t\t\tnthw_enable_tx_virt_queue(hw->vring[i].p_vq);\n+\t\t\t\t\thw->vring[i].enable = 1;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t} else {\n+\t\t/*\n+\t\t * Initially vring 0 must be enabled/created here - it is not later\n+\t\t * enabled in vring state\n+\t\t */\n+\t\tif (!hw->vring[0].enable) {\n+\t\t\tntvf_vdpa_dma_map(internal, 1);\n+\t\t\tntvf_vdpa_create_vring(internal, 0);\n+\t\t\thw->vring[0].enable = 1;\n+\t\t}\n+\t}\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static int ntvf_vdpa_stop(struct ntvf_vdpa_internal *internal)\n+{\n+\tstruct ntvf_vdpa_hw *hw = &internal->hw;\n+\tuint64_t features;\n+\tuint32_t i;\n+\tint vid;\n+\tint res;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tvid = internal->vid;\n+\n+\tfor (i = 0; i < hw->nr_vring; i++) {\n+\t\trte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,\n+\t\t\t\t\t hw->vring[i].last_used_idx);\n+\t}\n+\n+\trte_vhost_get_negotiated_features(vid, &features);\n+\n+\tfor (i = 0; i < hw->nr_vring; i++) {\n+\t\tstruct vring_info *p_vr_inf = &hw->vring[i];\n+\n+\t\tif ((hw->negotiated_features & (1ULL << VIRTIO_F_IN_ORDER)) ||\n+\t\t\t\t(hw->negotiated_features &\n+\t\t\t\t (1ULL << VIRTIO_F_RING_PACKED))) {\n+\t\t\tNT_LOG(DBG, VDPA,\n+\t\t\t       \"%s: feature VIRTIO_F_IN_ORDER is set: 0x%016lX\\n\",\n+\t\t\t       __func__, hw->negotiated_features);\n+\t\t\tif (p_vr_inf->vq_type == 0) {\n+\t\t\t\tde_register_release_virtqueue_info(p_vr_inf->p_vq);\n+\t\t\t\tres = nthw_release_rx_virt_queue(p_vr_inf->p_vq);\n+\t\t\t} else if (p_vr_inf->vq_type == 1) {\n+\t\t\t\tde_register_release_virtqueue_info(p_vr_inf->p_vq);\n+\t\t\t\tres = nthw_release_tx_virt_queue(p_vr_inf->p_vq);\n+\t\t\t} else {\n+\t\t\t\tNT_LOG(ERR, VDPA,\n+\t\t\t\t       \"%s: vring #%d: unknown type %d\\n\",\n+\t\t\t\t       __func__, i, p_vr_inf->vq_type);\n+\t\t\t\tres = -1;\n+\t\t\t}\n+\t\t\tif (res != 0) {\n+\t\t\t\tNT_LOG(ERR, VDPA, \"%s: vring #%d: res=%d\\n\",\n+\t\t\t\t       __func__, i, res);\n+\t\t\t}\n+\t\t} else {\n+\t\t\tNT_LOG(WRN, VDPA,\n+\t\t\t       \"%s: feature VIRTIO_F_IN_ORDER is *NOT* set: 0x%016lX\\n\",\n+\t\t\t       __func__, hw->negotiated_features);\n+\t\t}\n+\t\tp_vr_inf->desc = 0UL;\n+\t}\n+\n+\tif (RTE_VHOST_NEED_LOG(features)) {\n+\t\tNT_LOG(WRN, VDPA,\n+\t\t       \"%s: vid %d: vhost logging feature needed - currently not supported\\n\",\n+\t\t       __func__, vid);\n+\t}\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+#define MSIX_IRQ_SET_BUF_LEN           \\\n+\t(sizeof(struct vfio_irq_set) + \\\n+\t sizeof(int) * NTVF_VDPA_MAX_QUEUES * 2 + 1)\n+\n+static int ntvf_vdpa_enable_vfio_intr(struct ntvf_vdpa_internal *internal)\n+{\n+\tint ret;\n+\tuint32_t i, nr_vring;\n+\tchar irq_set_buf[MSIX_IRQ_SET_BUF_LEN];\n+\tstruct vfio_irq_set *irq_set;\n+\tint *fd_ptr;\n+\tstruct rte_vhost_vring vring;\n+\n+\tif (__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))\n+\t\treturn 0;\n+\n+\tLOG_FUNC_ENTER();\n+\tvring.callfd = -1;\n+\n+\tnr_vring = rte_vhost_get_vring_num(internal->vid);\n+\n+\tNT_LOG(INF, VDPA,\n+\t       \"Enable VFIO interrupt MSI-X num rings %i on VID %i (%02x:%02x.%x)\\n\",\n+\t       nr_vring, internal->vid, internal->pdev->addr.bus,\n+\t       internal->pdev->addr.devid, internal->pdev->addr.function);\n+\n+\tif (nr_vring + 1 > NTVF_VDPA_MAX_INTR_VECTORS) {\n+\t\tNT_LOG(WRN, VDPA,\n+\t\t       \"Can't enable MSI interrupts. Too many vectors requested: \"\n+\t\t       \"%i (max: %i) only poll mode drivers will work\",\n+\t\t       nr_vring + 1, NTVF_VDPA_MAX_INTR_VECTORS);\n+\t\t/*\n+\t\t * Return success, because polling drivers in VM still works without\n+\t\t * interrupts (i.e. DPDK PMDs)\n+\t\t */\n+\t\treturn 0;\n+\t}\n+\n+\tirq_set = (struct vfio_irq_set *)irq_set_buf;\n+\tirq_set->argsz = sizeof(irq_set_buf);\n+\tirq_set->count = nr_vring + 1;\n+\tirq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |\n+\t\t\t VFIO_IRQ_SET_ACTION_TRIGGER;\n+\tirq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;\n+\tirq_set->start = 0;\n+\tfd_ptr = (int *)&irq_set->data;\n+\n+\tfd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle->fd;\n+\n+\tfor (i = 0; i < nr_vring; i += 2) {\n+\t\trte_vhost_get_vhost_vring(internal->vid, i, &vring);\n+\t\tfd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;\n+\n+\t\trte_vhost_get_vhost_vring(internal->vid, i + 1, &vring);\n+\t\tfd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i + 1] = vring.callfd;\n+\t}\n+\n+\tret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);\n+\tif (ret) {\n+\t\tNT_LOG(ERR, VDPA, \"Error enabling MSI-X interrupts: %s\",\n+\t\t       strerror(errno));\n+\t\treturn -1;\n+\t}\n+\n+\t__atomic_store_n(&internal->intr_enabled, 1, __ATOMIC_RELAXED);\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static int ntvf_vdpa_disable_vfio_intr(struct ntvf_vdpa_internal *internal)\n+{\n+\tint ret;\n+\tchar irq_set_buf[MSIX_IRQ_SET_BUF_LEN];\n+\tstruct vfio_irq_set *irq_set;\n+\tint len;\n+\n+\tif (!__atomic_load_n(&internal->intr_enabled, __ATOMIC_RELAXED))\n+\t\treturn 0;\n+\tLOG_FUNC_ENTER();\n+\n+\tNT_LOG(INF, VDPA, \"Disable VFIO interrupt on VID %i (%02x:%02x.%x)\\n\",\n+\t       internal->vid, internal->pdev->addr.bus,\n+\t       internal->pdev->addr.devid, internal->pdev->addr.function);\n+\n+\tlen = sizeof(struct vfio_irq_set);\n+\tirq_set = (struct vfio_irq_set *)irq_set_buf;\n+\tirq_set->argsz = len;\n+\tirq_set->count = 0;\n+\tirq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;\n+\tirq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;\n+\tirq_set->start = 0;\n+\n+\tret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);\n+\tif (ret) {\n+\t\tNT_LOG(ERR, VDPA, \"Error disabling MSI-X interrupts: %s\",\n+\t\t       strerror(errno));\n+\t\treturn -1;\n+\t}\n+\n+\t__atomic_store_n(&internal->intr_enabled, 0, __ATOMIC_RELAXED);\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static int ntvf_vdpa_update_datapath(struct ntvf_vdpa_internal *internal)\n+{\n+\tint ret;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\trte_spinlock_lock(&internal->lock);\n+\n+\tif (!__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&\n+\t\t\t(__atomic_load_n(&internal->started, __ATOMIC_RELAXED) &&\n+\t\t\t __atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {\n+\t\tNT_LOG(DBG, VDPA, \"%s: [%s:%u] start\\n\", __func__, __FILE__,\n+\t\t\t       __LINE__);\n+\n+\t\tret = ntvf_vdpa_start(internal);\n+\t\tif (ret) {\n+\t\t\tNT_LOG(ERR, VDPA, \"%s: [%s:%u]\\n\", __func__, __FILE__,\n+\t\t\t       __LINE__);\n+\t\t\tgoto err;\n+\t\t}\n+\n+\t\t__atomic_store_n(&internal->running, 1, __ATOMIC_RELAXED);\n+\t} else if (__atomic_load_n(&internal->running, __ATOMIC_RELAXED) &&\n+\t\t\t(!__atomic_load_n(&internal->started, __ATOMIC_RELAXED) ||\n+\t\t\t !__atomic_load_n(&internal->dev_attached, __ATOMIC_RELAXED))) {\n+\t\tNT_LOG(DBG, VDPA, \"%s: stop\\n\", __func__);\n+\n+\t\tret = ntvf_vdpa_stop(internal);\n+\t\tif (ret) {\n+\t\t\tNT_LOG(ERR, VDPA, \"%s: [%s:%u]\\n\", __func__, __FILE__,\n+\t\t\t       __LINE__);\n+\t\t\tgoto err;\n+\t\t}\n+\n+\t\tret = ntvf_vdpa_disable_vfio_intr(internal);\n+\t\tif (ret) {\n+\t\t\tgoto err;\n+\t\t\tNT_LOG(ERR, VDPA, \"%s: [%s:%u]\\n\", __func__, __FILE__,\n+\t\t\t       __LINE__);\n+\t\t}\n+\n+\t\tret = ntvf_vdpa_dma_map(internal, 0);\n+\t\tif (ret) {\n+\t\t\tNT_LOG(ERR, VDPA, \"%s: [%s:%u]\\n\", __func__, __FILE__,\n+\t\t\t       __LINE__);\n+\t\t\tgoto err;\n+\t\t}\n+\n+\t\t__atomic_store_n(&internal->running, 0, __ATOMIC_RELAXED);\n+\t} else {\n+\t\tNT_LOG(INF, VDPA, \"%s: unhandled state [%s:%u]\\n\", __func__,\n+\t\t       __FILE__, __LINE__);\n+\t}\n+\n+\trte_spinlock_unlock(&internal->lock);\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+\n+err:\n+\trte_spinlock_unlock(&internal->lock);\n+\tNT_LOG(ERR, VDPA, \"%s: leave [%s:%u]\\n\", __func__, __FILE__, __LINE__);\n+\treturn ret;\n+}\n+\n+static int ntvf_vdpa_dev_config(int vid)\n+{\n+\tstruct rte_vdpa_device *vdev;\n+\tstruct internal_list *list;\n+\tstruct ntvf_vdpa_internal *internal;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tvdev = rte_vhost_get_vdpa_device(vid);\n+\tlist = find_internal_resource_by_vdev(vdev);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"Invalid vDPA device: %p\", vdev);\n+\t\treturn -1;\n+\t}\n+\n+\tinternal = list->internal;\n+\tinternal->vid = vid;\n+\n+\t__atomic_store_n(&internal->dev_attached, 1, __ATOMIC_RELAXED);\n+\n+\tntvf_vdpa_update_datapath(internal);\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static int ntvf_vdpa_dev_close(int vid)\n+{\n+\tstruct rte_vdpa_device *vdev;\n+\tstruct internal_list *list;\n+\tstruct ntvf_vdpa_internal *internal;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tvdev = rte_vhost_get_vdpa_device(vid);\n+\tlist = find_internal_resource_by_vdev(vdev);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"Invalid vDPA device: %p\", vdev);\n+\t\treturn -1;\n+\t}\n+\n+\tinternal = list->internal;\n+\n+\t__atomic_store_n(&internal->dev_attached, 0, __ATOMIC_RELAXED);\n+\tntvf_vdpa_update_datapath(internal);\n+\n+\t/* Invalidate the virt queue pointers */\n+\tuint32_t i;\n+\tstruct ntvf_vdpa_hw *hw = &internal->hw;\n+\n+\tfor (i = 0; i < hw->nr_vring; i++)\n+\t\thw->vring[i].p_vq = NULL;\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static int ntvf_vdpa_set_features(int vid)\n+{\n+\tuint64_t features;\n+\tstruct rte_vdpa_device *vdev;\n+\tstruct internal_list *list;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tvdev = rte_vhost_get_vdpa_device(vid);\n+\tlist = find_internal_resource_by_vdev(vdev);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"Invalid vDPA device: %p\", vdev);\n+\t\treturn -1;\n+\t}\n+\n+\trte_vhost_get_negotiated_features(vid, &features);\n+\tNT_LOG(DBG, VDPA, \"%s: vid %d: vDPA dev %p: features=0x%016lX\\n\",\n+\t       __func__, vid, vdev, features);\n+\n+\tif (!RTE_VHOST_NEED_LOG(features))\n+\t\treturn 0;\n+\n+\tNT_LOG(INF, VDPA,\n+\t       \"%s: Starting Live Migration for vid=%d vDPA dev=%p\\n\", __func__,\n+\t       vid, vdev);\n+\n+\t/* Relay core feature not present. We cannot do live migration then. */\n+\tNT_LOG(ERR, VDPA,\n+\t       \"%s: Live Migration not possible. Relay core feature required.\\n\",\n+\t       __func__);\n+\treturn -1;\n+}\n+\n+static int ntvf_vdpa_get_vfio_group_fd(int vid)\n+{\n+\tstruct rte_vdpa_device *vdev;\n+\tstruct internal_list *list;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tvdev = rte_vhost_get_vdpa_device(vid);\n+\tlist = find_internal_resource_by_vdev(vdev);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"Invalid vDPA device: %p\", vdev);\n+\t\treturn -1;\n+\t}\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn list->internal->vfio_group_fd;\n+}\n+\n+static int ntvf_vdpa_get_vfio_device_fd(int vid)\n+{\n+\tstruct rte_vdpa_device *vdev;\n+\tstruct internal_list *list;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tvdev = rte_vhost_get_vdpa_device(vid);\n+\tlist = find_internal_resource_by_vdev(vdev);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"Invalid vDPA device: %p\", vdev);\n+\t\treturn -1;\n+\t}\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn list->internal->vfio_dev_fd;\n+}\n+\n+static int ntvf_vdpa_get_queue_num(struct rte_vdpa_device *vdev,\n+\t\t\t\t   uint32_t *queue_num)\n+{\n+\tstruct internal_list *list;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tlist = find_internal_resource_by_vdev(vdev);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: Invalid device : %p\\n\", __func__, vdev);\n+\t\treturn -1;\n+\t}\n+\t*queue_num = list->internal->max_queues;\n+\tNT_LOG(DBG, VDPA, \"%s: vDPA dev=%p queue_num=%d\\n\", __func__, vdev,\n+\t       *queue_num);\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static int ntvf_vdpa_get_vdpa_features(struct rte_vdpa_device *vdev,\n+\t\t\t\t       uint64_t *features)\n+{\n+\tstruct internal_list *list;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tlist = find_internal_resource_by_vdev(vdev);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: Invalid device : %p\\n\", __func__, vdev);\n+\t\treturn -1;\n+\t}\n+\n+\tif (!features) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: vDPA dev=%p: no ptr to feature\\n\",\n+\t\t       __func__, vdev);\n+\t\treturn -1;\n+\t}\n+\n+\t*features = list->internal->features;\n+\tNT_LOG(DBG, VDPA, \"%s: vDPA dev=%p: features=0x%016lX\\n\", __func__,\n+\t       vdev, *features);\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static int\n+ntvf_vdpa_get_protocol_features(struct rte_vdpa_device *vdev __rte_unused,\n+\t\t\t\tuint64_t *features)\n+{\n+\tLOG_FUNC_ENTER();\n+\n+\tif (!features) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: vDPA dev=%p: no ptr to feature\\n\",\n+\t\t       __func__, vdev);\n+\t\treturn -1;\n+\t}\n+\n+\t*features = NTVF_VDPA_SUPPORTED_PROTOCOL_FEATURES;\n+\tNT_LOG(DBG, VDPA, \"%s: vDPA dev=%p: features=0x%016lX\\n\", __func__,\n+\t       vdev, *features);\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static int ntvf_vdpa_configure_queue(struct ntvf_vdpa_hw *hw,\n+\tstruct ntvf_vdpa_internal *internal)\n+{\n+\tint ret = 0;\n+\n+\tret = ntvf_vdpa_enable_vfio_intr(internal);\n+\tif (ret) {\n+\t\tprintf(\"ERROR - ENABLE INTERRUPT via VFIO\\n\");\n+\t\treturn ret;\n+\t}\n+\t/* Enable Rx and Tx for all vrings */\n+\tfor (int i = 0; i < hw->nr_vring; i++) {\n+\t\tif (i & 1)\n+\t\t\tnthw_enable_tx_virt_queue(hw->vring[i].p_vq);\n+\t\telse\n+\t\t\tnthw_enable_rx_virt_queue(hw->vring[i].p_vq);\n+\t}\n+\treturn ret;\n+}\n+static int ntvf_vdpa_set_vring_state(int vid, int vring, int state)\n+{\n+\tstruct rte_vdpa_device *vdev;\n+\tstruct internal_list *list;\n+\n+\tstruct ntvf_vdpa_internal *internal;\n+\tstruct ntvf_vdpa_hw *hw;\n+\tint ret = 0;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tvdev = rte_vhost_get_vdpa_device(vid);\n+\tlist = find_internal_resource_by_vdev(vdev);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"Invalid vDPA device: %p\", vdev);\n+\t\treturn -1;\n+\t}\n+\n+\tinternal = list->internal;\n+\tif (vring < 0 || vring >= internal->max_queues * 2) {\n+\t\tNT_LOG(ERR, VDPA, \"Vring index %d not correct\", vring);\n+\t\treturn -1;\n+\t}\n+\n+\thw = &internal->hw;\n+\tenum fpga_info_profile fpga_profile =\n+\t\tget_fpga_profile_from_pci(internal->pdev->addr);\n+\n+\tif (!state && hw->vring[vring].enable) {\n+\t\t/* Disable vring */\n+\t\tif (hw->vring[vring].desc && hw->vring[vring].p_vq) {\n+\t\t\tif (hw->vring[vring].vq_type == 0)\n+\t\t\t\tnthw_disable_rx_virt_queue(hw->vring[vring].p_vq);\n+\t\t\telse\n+\t\t\t\tnthw_disable_tx_virt_queue(hw->vring[vring].p_vq);\n+\t\t}\n+\t}\n+\n+\tif (state && !hw->vring[vring].enable) {\n+\t\t/* Enable/Create vring */\n+\t\tif (hw->vring[vring].desc && hw->vring[vring].p_vq) {\n+\t\t\tif (hw->vring[vring].vq_type == 0)\n+\t\t\t\tnthw_enable_rx_virt_queue(hw->vring[vring].p_vq);\n+\t\t\telse\n+\t\t\t\tnthw_enable_tx_virt_queue(hw->vring[vring].p_vq);\n+\t\t} else {\n+\t\t\tntvf_vdpa_dma_map(internal, 1);\n+\t\t\tntvf_vdpa_create_vring(internal, vring);\n+\n+\t\t\tif (fpga_profile != FPGA_INFO_PROFILE_INLINE) {\n+\t\t\t\t/*\n+\t\t\t\t * After last vq enable VFIO interrupt IOMMU re-mapping and enable\n+\t\t\t\t * FPGA Rx/Tx\n+\t\t\t\t */\n+\t\t\t\tif (vring == hw->nr_vring - 1) {\n+\t\t\t\t\tret = ntvf_vdpa_configure_queue(hw, internal);\n+\t\t\t\t\tif (ret)\n+\t\t\t\t\t\treturn ret;\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\tif (fpga_profile == FPGA_INFO_PROFILE_INLINE) {\n+\t\thw->vring[vring].enable = !!state;\n+\t\t/* after last vq enable VFIO interrupt IOMMU re-mapping */\n+\t\tif (hw->vring[vring].enable && vring == hw->nr_vring - 1) {\n+\t\t\tret = ntvf_vdpa_configure_queue(hw, internal);\n+\t\t\tif (ret)\n+\t\t\t\treturn ret;\n+\t\t}\n+\t} else {\n+\t\thw->vring[vring].enable = !!state;\n+\t}\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static struct rte_vdpa_dev_ops ntvf_vdpa_vdpa_ops = {\n+\t.get_queue_num = ntvf_vdpa_get_queue_num,\n+\t.get_features = ntvf_vdpa_get_vdpa_features,\n+\t.get_protocol_features = ntvf_vdpa_get_protocol_features,\n+\t.dev_conf = ntvf_vdpa_dev_config,\n+\t.dev_close = ntvf_vdpa_dev_close,\n+\t.set_vring_state = ntvf_vdpa_set_vring_state,\n+\t.set_features = ntvf_vdpa_set_features,\n+\t.migration_done = NULL,\n+\t.get_vfio_group_fd = ntvf_vdpa_get_vfio_group_fd,\n+\t.get_vfio_device_fd = ntvf_vdpa_get_vfio_device_fd,\n+\t.get_notify_area = NULL,\n+};\n+\n+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\t\t\tstruct rte_pci_device *pci_dev)\n+{\n+\tstruct ntvf_vdpa_internal *internal = NULL;\n+\tstruct internal_list *list = NULL;\n+\tenum fpga_info_profile fpga_profile;\n+\n+\tLOG_FUNC_ENTER();\n+\n+\tNT_LOG(INF, VDPA, \"%s: [%s:%u] %04x:%02x:%02x.%x\\n\", __func__, __FILE__,\n+\t       __LINE__, pci_dev->addr.domain, pci_dev->addr.bus,\n+\t       pci_dev->addr.devid, pci_dev->addr.function);\n+\tlist = rte_zmalloc(\"ntvf_vdpa\", sizeof(*list), 0);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: [%s:%u]\\n\", __func__, __FILE__,\n+\t\t       __LINE__);\n+\t\tgoto error;\n+\t}\n+\n+\tinternal = rte_zmalloc(\"ntvf_vdpa\", sizeof(*internal), 0);\n+\tif (internal == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: [%s:%u]\\n\", __func__, __FILE__,\n+\t\t       __LINE__);\n+\t\tgoto error;\n+\t}\n+\n+\tinternal->pdev = pci_dev;\n+\trte_spinlock_init(&internal->lock);\n+\n+\tif (ntvf_vdpa_vfio_setup(internal) < 0) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: [%s:%u]\\n\", __func__, __FILE__,\n+\t\t       __LINE__);\n+\t\treturn -1;\n+\t}\n+\n+\tinternal->max_queues = NTVF_VDPA_MAX_QUEUES;\n+\n+\tinternal->features = NTVF_VIRTIO_NET_SUPPORTED_FEATURES;\n+\n+\tNT_LOG(DBG, VDPA, \"%s: masked features=0x%016lX [%s:%u]\\n\", __func__,\n+\t       internal->features, __FILE__, __LINE__);\n+\n+\tfpga_profile = get_fpga_profile_from_pci(internal->pdev->addr);\n+\tif (fpga_profile == FPGA_INFO_PROFILE_VSWITCH) {\n+\t\tinternal->outport = 0;\n+\t} else {\n+\t\t/* VF4 output port 0, VF5 output port 1, VF6 output port 0, ....... */\n+\t\tinternal->outport = internal->pdev->addr.function & 1;\n+\t}\n+\n+\tlist->internal = internal;\n+\n+\tinternal->vdev =\n+\t\trte_vdpa_register_device(&pci_dev->device, &ntvf_vdpa_vdpa_ops);\n+\tNT_LOG(DBG, VDPA, \"%s: vDPA dev=%p\\n\", __func__, internal->vdev);\n+\n+\tif (!internal->vdev) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: [%s:%u] Register vDPA device failed\\n\",\n+\t\t       __func__, __FILE__, __LINE__);\n+\t\tgoto error;\n+\t}\n+\n+\tpthread_mutex_lock(&internal_list_lock);\n+\tTAILQ_INSERT_TAIL(&internal_list, list, next);\n+\tpthread_mutex_unlock(&internal_list_lock);\n+\n+\t__atomic_store_n(&internal->started, 1, __ATOMIC_RELAXED);\n+\n+\tntvf_vdpa_update_datapath(internal);\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+\n+error:\n+\trte_free(list);\n+\trte_free(internal);\n+\treturn -1;\n+}\n+\n+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev)\n+{\n+\tstruct ntvf_vdpa_internal *internal;\n+\tstruct internal_list *list;\n+\tint vf_num = nt_vfio_vf_num(pci_dev);\n+\n+\tLOG_FUNC_ENTER();\n+\tlist = ntvf_vdpa_find_internal_resource_by_dev(pci_dev);\n+\tif (list == NULL) {\n+\t\tNT_LOG(ERR, VDPA, \"%s: Invalid device: %s\", __func__,\n+\t\t       pci_dev->name);\n+\t\treturn -1;\n+\t}\n+\n+\tinternal = list->internal;\n+\t__atomic_store_n(&internal->started, 0, __ATOMIC_RELAXED);\n+\n+\tntvf_vdpa_update_datapath(internal);\n+\n+\trte_pci_unmap_device(internal->pdev);\n+\tnt_vfio_remove(vf_num);\n+\trte_vdpa_unregister_device(internal->vdev);\n+\n+\tpthread_mutex_lock(&internal_list_lock);\n+\tTAILQ_REMOVE(&internal_list, list, next);\n+\tpthread_mutex_unlock(&internal_list_lock);\n+\n+\trte_free(list);\n+\trte_free(internal);\n+\n+\tLOG_FUNC_LEAVE();\n+\treturn 0;\n+}\n+\n+static const struct rte_pci_id pci_id_ntvf_vdpa_map[] = {\n+\t{\n+\t\t.vendor_id = 0,\n+\t},\n+};\n+\n+static struct rte_pci_driver rte_ntvf_vdpa = {\n+\t.id_table = pci_id_ntvf_vdpa_map,\n+\t.drv_flags = 0,\n+\t.probe = ntvf_vdpa_pci_probe,\n+\t.remove = ntvf_vdpa_pci_remove,\n+};\n+\n+RTE_PMD_REGISTER_PCI(net_ntvf_vdpa, rte_ntvf_vdpa);\n+RTE_PMD_REGISTER_PCI_TABLE(net_ntvf_vdpa, pci_id_ntvf_vdpa_map);\n+RTE_PMD_REGISTER_KMOD_DEP(net_ntvf_vdpa, \"* vfio-pci\");\n+\ndiff --git a/drivers/net/ntnic/ntnic_vf_vdpa.h b/drivers/net/ntnic/ntnic_vf_vdpa.h\nnew file mode 100644\nindex 0000000000..561e3bf7cf\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_vf_vdpa.h\n@@ -0,0 +1,25 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef __NTNIC_VF_VDPA_H__\n+#define __NTNIC_VF_VDPA_H__\n+\n+extern int ntvf_vdpa_logtype;\n+\n+#define LOG_FUNC_TRACE\n+#ifdef LOG_FUNC_TRACE\n+#define LOG_FUNC_ENTER() NT_LOG(DBG, VDPA, \"%s: enter\\n\", __func__)\n+#define LOG_FUNC_LEAVE() NT_LOG(DBG, VDPA, \"%s: leave\\n\", __func__)\n+#else\n+#define LOG_FUNC_ENTER()\n+#define LOG_FUNC_LEAVE()\n+#endif\n+\n+int ntvf_vdpa_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,\n+\t\t\tstruct rte_pci_device *pci_dev);\n+int ntvf_vdpa_pci_remove(struct rte_pci_device *pci_dev);\n+\n+void ntvf_vdpa_reset_hw(int vid);\n+\n+#endif /* __NTNIC_VF_VDPA_H__ */\ndiff --git a/drivers/net/ntnic/ntnic_vfio.c b/drivers/net/ntnic/ntnic_vfio.c\nnew file mode 100644\nindex 0000000000..1390383c55\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_vfio.c\n@@ -0,0 +1,321 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <sys/ioctl.h>\n+\n+#include <rte_malloc.h>\n+#include <rte_memory.h>\n+#include <rte_vfio.h>\n+#include <rte_dev.h>\n+#include <rte_bus_pci.h>\n+#include <rte_spinlock.h>\n+\n+#include <ntlog.h>\n+#include <nt_util.h>\n+#include \"ntnic_vfio.h\"\n+\n+#define ONE_G_SIZE 0x40000000\n+#define ONE_G_MASK (ONE_G_SIZE - 1)\n+#define START_VF_IOVA 0x220000000000\n+\n+int nt_vfio_vf_num(const struct rte_pci_device *pdev)\n+{\n+\treturn ((pdev->addr.devid & 0x1f) << 3) + ((pdev->addr.function) & 0x7);\n+}\n+\n+/* Internal API */\n+struct vfio_dev {\n+\tint container_fd;\n+\tint group_fd;\n+\tint dev_fd;\n+\tuint64_t iova_addr;\n+};\n+\n+static struct vfio_dev vfio_list[256];\n+\n+static struct vfio_dev *vfio_get(int vf_num)\n+{\n+\tif (vf_num < 0 || vf_num > 255)\n+\t\treturn NULL;\n+\treturn &vfio_list[vf_num];\n+}\n+\n+/* External API */\n+int nt_vfio_setup(struct rte_pci_device *dev)\n+{\n+\tchar devname[RTE_DEV_NAME_MAX_LEN] = { 0 };\n+\tint iommu_group_num;\n+\tint vf_num;\n+\tstruct vfio_dev *vfio;\n+\n+\tNT_LOG(INF, ETHDEV, \"NT VFIO device setup %s\\n\", dev->name);\n+\n+\tvf_num = nt_vfio_vf_num(dev);\n+\n+\tvfio = vfio_get(vf_num);\n+\tif (vfio == NULL) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"VFIO device setup failed. Illegal device id\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tvfio->dev_fd = -1;\n+\tvfio->group_fd = -1;\n+\tvfio->container_fd = -1;\n+\tvfio->iova_addr = START_VF_IOVA;\n+\n+\trte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);\n+\trte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,\n+\t\t\t       &iommu_group_num);\n+\n+\tif (vf_num == 0) {\n+\t\t/* use default container for pf0 */\n+\t\tvfio->container_fd = RTE_VFIO_DEFAULT_CONTAINER_FD;\n+\t} else {\n+\t\tvfio->container_fd = rte_vfio_container_create();\n+\t\tif (vfio->container_fd < 0) {\n+\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t       \"VFIO device setup failed. VFIO container creation failed.\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tvfio->group_fd = rte_vfio_container_group_bind(vfio->container_fd,\n+\t\t\t iommu_group_num);\n+\tif (vfio->group_fd < 0) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"VFIO device setup failed. VFIO container group bind failed.\\n\");\n+\t\tgoto err;\n+\t}\n+\n+\tif (vf_num > 0) {\n+\t\tif (rte_pci_map_device(dev)) {\n+\t\t\tNT_LOG(ERR, ETHDEV,\n+\t\t\t       \"Map VFIO device failed. is the vfio-pci driver loaded?\\n\");\n+\t\t\tgoto err;\n+\t\t}\n+\t}\n+\n+\tvfio->dev_fd = rte_intr_dev_fd_get(dev->intr_handle);\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"%s: VFIO id=%d, dev_fd=%d, container_fd=%d, group_fd=%d, iommu_group_num=%d\\n\",\n+\t       dev->name, vf_num, vfio->dev_fd, vfio->container_fd,\n+\t       vfio->group_fd, iommu_group_num);\n+\n+\treturn vf_num;\n+\n+err:\n+\tif (vfio->container_fd != RTE_VFIO_DEFAULT_CONTAINER_FD)\n+\t\trte_vfio_container_destroy(vfio->container_fd);\n+\treturn -1;\n+}\n+\n+int nt_vfio_remove(int vf_num)\n+{\n+\tstruct vfio_dev *vfio;\n+\n+\tNT_LOG(DBG, ETHDEV, \"NT VFIO device remove VF=%d\\n\", vf_num);\n+\n+\tvfio = vfio_get(vf_num);\n+\tif (!vfio) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"VFIO device remove failed. Illegal device id\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\trte_vfio_container_destroy(vfio->container_fd);\n+\treturn 0;\n+}\n+\n+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,\n+\t\t    uint64_t size)\n+{\n+\tuint64_t gp_virt_base;\n+\tuint64_t gp_offset;\n+\n+\tif (size == ONE_G_SIZE) {\n+\t\tgp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;\n+\t\tgp_offset = (uint64_t)virt_addr & ONE_G_MASK;\n+\t} else {\n+\t\tgp_virt_base = (uint64_t)virt_addr;\n+\t\tgp_offset = 0;\n+\t}\n+\n+\tstruct vfio_dev *vfio;\n+\n+\tvfio = vfio_get(vf_num);\n+\tif (vfio == NULL) {\n+\t\tNT_LOG(ERR, ETHDEV, \"VFIO MAP: VF number %d invalid\\n\", vf_num);\n+\t\treturn -1;\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"VFIO MMAP VF=%d VirtAddr=%\" PRIX64 \" HPA=%\" PRIX64\n+\t       \" VirtBase=%\" PRIX64 \" IOVA Addr=%\" PRIX64 \" size=%d\\n\",\n+\t       vf_num, virt_addr, rte_malloc_virt2iova(virt_addr), gp_virt_base,\n+\t       vfio->iova_addr, size);\n+\n+\tint res = rte_vfio_container_dma_map(vfio->container_fd, gp_virt_base,\n+\t\t\t\t\t     vfio->iova_addr, size);\n+\n+\tNT_LOG(DBG, ETHDEV, \"VFIO MMAP res %i, container_fd %i, vf_num %i\\n\",\n+\t       res, vfio->container_fd, vf_num);\n+\tif (res) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"rte_vfio_container_dma_map failed: res %d\\n\", res);\n+\t\treturn -1;\n+\t}\n+\n+\t*iova_addr = vfio->iova_addr + gp_offset;\n+\n+\tvfio->iova_addr += ONE_G_SIZE;\n+\n+\treturn 0;\n+}\n+\n+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,\n+\t\t      uint64_t size)\n+{\n+\tuint64_t gp_virt_base;\n+\tstruct vfio_dev *vfio;\n+\n+\tif (size == ONE_G_SIZE) {\n+\t\tuint64_t gp_offset;\n+\n+\t\tgp_virt_base = (uint64_t)virt_addr & ~ONE_G_MASK;\n+\t\tgp_offset = (uint64_t)virt_addr & ONE_G_MASK;\n+\t\tiova_addr -= gp_offset;\n+\t} else {\n+\t\tgp_virt_base = (uint64_t)virt_addr;\n+\t}\n+\n+\tvfio = vfio_get(vf_num);\n+\n+\tif (vfio == NULL) {\n+\t\tNT_LOG(ERR, ETHDEV, \"VFIO UNMAP: VF number %d invalid\\n\",\n+\t\t       vf_num);\n+\t\treturn -1;\n+\t}\n+\n+\tif (vfio->container_fd == -1)\n+\t\treturn 0;\n+\n+\tint res = rte_vfio_container_dma_unmap(vfio->container_fd, gp_virt_base,\n+\t\t\t\t\t       iova_addr, size);\n+\tif (res != 0) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"VFIO UNMMAP FAILED! res %i, container_fd %i, vf_num %i, virt_base=%\" PRIX64\n+\t\t       \", IOVA=%\" PRIX64 \", size=%i\\n\",\n+\t\t       res, vfio->container_fd, vf_num, gp_virt_base, iova_addr,\n+\t\t       (int)size);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+/* vDPA mapping with Guest Phy addresses as IOVA */\n+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,\n+\t\t\t uint64_t size)\n+{\n+\tstruct vfio_dev *vfio = vfio_get(vf_num);\n+\n+\tif (vfio == NULL) {\n+\t\tNT_LOG(ERR, ETHDEV, \"VFIO MAP: VF number %d invalid\\n\", vf_num);\n+\t\treturn -1;\n+\t}\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"VFIO vDPA MMAP VF=%d VirtAddr=%\" PRIX64 \" IOVA Addr=%\" PRIX64\n+\t       \" size=%d\\n\",\n+\t       vf_num, virt_addr, iova_addr, size);\n+\n+\tint res = rte_vfio_container_dma_map(vfio->container_fd, virt_addr,\n+\t\t\t\t\t     iova_addr, size);\n+\n+\tNT_LOG(DBG, ETHDEV,\n+\t       \"VFIO vDPA MMAP res %i, container_fd %i, vf_num %i\\n\", res,\n+\t       vfio->container_fd, vf_num);\n+\tif (res) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"rte_vfio_container_dma_map failed: res %d\\n\", res);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,\n+\t\t\t   uint64_t size)\n+{\n+\tstruct vfio_dev *vfio = vfio_get(vf_num);\n+\n+\tif (vfio == NULL) {\n+\t\tNT_LOG(ERR, ETHDEV, \"VFIO vDPA UNMAP: VF number %d invalid\\n\",\n+\t\t       vf_num);\n+\t\treturn -1;\n+\t}\n+\tint res = rte_vfio_container_dma_unmap(vfio->container_fd, virt_addr,\n+\t\t\t\t\t       iova_addr, size);\n+\tif (res != 0) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"VFIO vDPA UNMMAP FAILED! res %i, container_fd %i, vf_num %i\\n\",\n+\t\t       res, vfio->container_fd, vf_num);\n+\t\treturn -1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+int nt_vfio_get_container_fd(int vf_num)\n+{\n+\tstruct vfio_dev *vfio;\n+\n+\tvfio = vfio_get(vf_num);\n+\tif (!vfio) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"VFIO device remove failed. Illegal device id\\n\");\n+\t\treturn -1;\n+\t}\n+\treturn vfio->container_fd;\n+}\n+\n+int nt_vfio_get_group_fd(int vf_num)\n+{\n+\tstruct vfio_dev *vfio;\n+\n+\tvfio = vfio_get(vf_num);\n+\tif (!vfio) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"VFIO device remove failed. Illegal device id\\n\");\n+\t\treturn -1;\n+\t}\n+\treturn vfio->group_fd;\n+}\n+\n+int nt_vfio_get_dev_fd(int vf_num)\n+{\n+\tstruct vfio_dev *vfio;\n+\n+\tvfio = vfio_get(vf_num);\n+\tif (!vfio) {\n+\t\tNT_LOG(ERR, ETHDEV,\n+\t\t       \"VFIO device remove failed. Illegal device id\\n\");\n+\t\treturn -1;\n+\t}\n+\treturn vfio->dev_fd;\n+}\n+\n+/* Internal init */\n+\n+RTE_INIT(nt_vfio_init);\n+\n+static void nt_vfio_init(void)\n+{\n+\tstruct nt_util_vfio_impl s = { .vfio_dma_map = nt_vfio_dma_map,\n+\t\t       .vfio_dma_unmap = nt_vfio_dma_unmap\n+\t};\n+\tnt_util_vfio_init(&s);\n+}\ndiff --git a/drivers/net/ntnic/ntnic_vfio.h b/drivers/net/ntnic/ntnic_vfio.h\nnew file mode 100644\nindex 0000000000..5d8a63d364\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_vfio.h\n@@ -0,0 +1,31 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef _NTNIC_VFIO_H_\n+#define _NTNIC_VFIO_H_\n+\n+#include <rte_dev.h>\n+#include <rte_bus_pci.h>\n+#include <ethdev_pci.h>\n+\n+int nt_vfio_setup(struct rte_pci_device *dev);\n+int nt_vfio_remove(int vf_num);\n+\n+int nt_vfio_get_container_fd(int vf_num);\n+int nt_vfio_get_group_fd(int vf_num);\n+int nt_vfio_get_dev_fd(int vf_num);\n+\n+int nt_vfio_dma_map(int vf_num, void *virt_addr, uint64_t *iova_addr,\n+\t\t    uint64_t size);\n+int nt_vfio_dma_unmap(int vf_num, void *virt_addr, uint64_t iova_addr,\n+\t\t      uint64_t size);\n+\n+int nt_vfio_dma_map_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,\n+\t\t\t uint64_t size);\n+int nt_vfio_dma_unmap_vdpa(int vf_num, uint64_t virt_addr, uint64_t iova_addr,\n+\t\t\t   uint64_t size);\n+\n+/* Find device (PF/VF) number from device address */\n+int nt_vfio_vf_num(const struct rte_pci_device *dev);\n+#endif /* _NTNIC_VFIO_H_ */\ndiff --git a/drivers/net/ntnic/ntnic_xstats.c b/drivers/net/ntnic/ntnic_xstats.c\nnew file mode 100644\nindex 0000000000..c0e67ba03d\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_xstats.c\n@@ -0,0 +1,703 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#include <rte_ethdev.h>\n+#include <rte_common.h>\n+\n+#include \"ntdrv_4ga.h\"\n+#include \"ntlog.h\"\n+#include \"nthw_drv.h\"\n+#include \"nthw_fpga.h\"\n+#include \"ntnic_xstats.h\"\n+\n+#define UNUSED __rte_unused\n+\n+struct rte_nthw_xstats_names_s {\n+\tchar name[RTE_ETH_XSTATS_NAME_SIZE];\n+\tuint8_t source;\n+\tunsigned int offset;\n+};\n+\n+/*\n+ * Extended stat for VSwitch\n+ */\n+static struct rte_nthw_xstats_names_s nthw_virt_xstats_names[] = {\n+\t{ \"rx_octets\", 1, offsetof(struct port_counters_vswitch_v1, octets) },\n+\t{ \"rx_packets\", 1, offsetof(struct port_counters_vswitch_v1, pkts) },\n+\t{\t\"rx_dropped_packets\", 1,\n+\t\toffsetof(struct port_counters_vswitch_v1, drop_events)\n+\t},\n+\t{\t\"rx_qos_dropped_bytes\", 1,\n+\t\toffsetof(struct port_counters_vswitch_v1, qos_drop_octets)\n+\t},\n+\t{\t\"rx_qos_dropped_packets\", 1,\n+\t\toffsetof(struct port_counters_vswitch_v1, qos_drop_pkts)\n+\t},\n+\t{ \"tx_octets\", 2, offsetof(struct port_counters_vswitch_v1, octets) },\n+\t{ \"tx_packets\", 2, offsetof(struct port_counters_vswitch_v1, pkts) },\n+\t{\t\"tx_dropped_packets\", 2,\n+\t\toffsetof(struct port_counters_vswitch_v1, drop_events)\n+\t},\n+\t{\t\"tx_qos_dropped_bytes\", 2,\n+\t\toffsetof(struct port_counters_vswitch_v1, qos_drop_octets)\n+\t},\n+\t{\t\"tx_qos_dropped_packets\", 2,\n+\t\toffsetof(struct port_counters_vswitch_v1, qos_drop_pkts)\n+\t},\n+};\n+\n+#define NTHW_VIRT_XSTATS_NAMES RTE_DIM(nthw_virt_xstats_names)\n+\n+/*\n+ * Extended stat for Capture/Inline - implements RMON\n+ * FLM 0.17\n+ */\n+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v1[] = {\n+\t{ \"rx_drop_events\", 1, offsetof(struct port_counters_v2, drop_events) },\n+\t{ \"rx_octets\", 1, offsetof(struct port_counters_v2, octets) },\n+\t{ \"rx_packets\", 1, offsetof(struct port_counters_v2, pkts) },\n+\t{\t\"rx_broadcast_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, broadcast_pkts)\n+\t},\n+\t{\t\"rx_multicast_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, multicast_pkts)\n+\t},\n+\t{\t\"rx_unicast_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, unicast_pkts)\n+\t},\n+\t{\t\"rx_align_errors\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_alignment)\n+\t},\n+\t{\t\"rx_code_violation_errors\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_code_violation)\n+\t},\n+\t{ \"rx_crc_errors\", 1, offsetof(struct port_counters_v2, pkts_crc) },\n+\t{\t\"rx_undersize_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, undersize_pkts)\n+\t},\n+\t{\t\"rx_oversize_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, oversize_pkts)\n+\t},\n+\t{ \"rx_fragments\", 1, offsetof(struct port_counters_v2, fragments) },\n+\t{\t\"rx_jabbers_not_truncated\", 1,\n+\t\toffsetof(struct port_counters_v2, jabbers_not_truncated)\n+\t},\n+\t{\t\"rx_jabbers_truncated\", 1,\n+\t\toffsetof(struct port_counters_v2, jabbers_truncated)\n+\t},\n+\t{\t\"rx_size_64_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_64_octets)\n+\t},\n+\t{\t\"rx_size_65_to_127_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_65_to_127_octets)\n+\t},\n+\t{\t\"rx_size_128_to_255_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_128_to_255_octets)\n+\t},\n+\t{\t\"rx_size_256_to_511_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_256_to_511_octets)\n+\t},\n+\t{\t\"rx_size_512_to_1023_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_512_to_1023_octets)\n+\t},\n+\t{\t\"rx_size_1024_to_1518_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_1024_to_1518_octets)\n+\t},\n+\t{\t\"rx_size_1519_to_2047_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_1519_to_2047_octets)\n+\t},\n+\t{\t\"rx_size_2048_to_4095_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_2048_to_4095_octets)\n+\t},\n+\t{\t\"rx_size_4096_to_8191_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_4096_to_8191_octets)\n+\t},\n+\t{\t\"rx_size_8192_to_max_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_8192_to_max_octets)\n+\t},\n+\t{\t\"rx_ip_checksum_error\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_ip_chksum_error)\n+\t},\n+\t{\t\"rx_udp_checksum_error\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_udp_chksum_error)\n+\t},\n+\t{\t\"rx_tcp_checksum_error\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_tcp_chksum_error)\n+\t},\n+\n+\t{ \"tx_drop_events\", 2, offsetof(struct port_counters_v2, drop_events) },\n+\t{ \"tx_octets\", 2, offsetof(struct port_counters_v2, octets) },\n+\t{ \"tx_packets\", 2, offsetof(struct port_counters_v2, pkts) },\n+\t{\t\"tx_broadcast_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, broadcast_pkts)\n+\t},\n+\t{\t\"tx_multicast_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, multicast_pkts)\n+\t},\n+\t{\t\"tx_unicast_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, unicast_pkts)\n+\t},\n+\t{\t\"tx_align_errors\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_alignment)\n+\t},\n+\t{\t\"tx_code_violation_errors\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_code_violation)\n+\t},\n+\t{ \"tx_crc_errors\", 2, offsetof(struct port_counters_v2, pkts_crc) },\n+\t{\t\"tx_undersize_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, undersize_pkts)\n+\t},\n+\t{\t\"tx_oversize_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, oversize_pkts)\n+\t},\n+\t{ \"tx_fragments\", 2, offsetof(struct port_counters_v2, fragments) },\n+\t{\t\"tx_jabbers_not_truncated\", 2,\n+\t\toffsetof(struct port_counters_v2, jabbers_not_truncated)\n+\t},\n+\t{\t\"tx_jabbers_truncated\", 2,\n+\t\toffsetof(struct port_counters_v2, jabbers_truncated)\n+\t},\n+\t{\t\"tx_size_64_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_64_octets)\n+\t},\n+\t{\t\"tx_size_65_to_127_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_65_to_127_octets)\n+\t},\n+\t{\t\"tx_size_128_to_255_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_128_to_255_octets)\n+\t},\n+\t{\t\"tx_size_256_to_511_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_256_to_511_octets)\n+\t},\n+\t{\t\"tx_size_512_to_1023_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_512_to_1023_octets)\n+\t},\n+\t{\t\"tx_size_1024_to_1518_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_1024_to_1518_octets)\n+\t},\n+\t{\t\"tx_size_1519_to_2047_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_1519_to_2047_octets)\n+\t},\n+\t{\t\"tx_size_2048_to_4095_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_2048_to_4095_octets)\n+\t},\n+\t{\t\"tx_size_4096_to_8191_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_4096_to_8191_octets)\n+\t},\n+\t{\t\"tx_size_8192_to_max_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_8192_to_max_octets)\n+\t},\n+\n+\t/* FLM 0.17 */\n+\t{ \"flm_count_current\", 3, offsetof(struct flm_counters_v1, current) },\n+\t{\t\"flm_count_learn_done\", 3,\n+\t\toffsetof(struct flm_counters_v1, learn_done)\n+\t},\n+\t{\t\"flm_count_learn_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, learn_ignore)\n+\t},\n+\t{\t\"flm_count_learn_fail\", 3,\n+\t\toffsetof(struct flm_counters_v1, learn_fail)\n+\t},\n+\t{\t\"flm_count_unlearn_done\", 3,\n+\t\toffsetof(struct flm_counters_v1, unlearn_done)\n+\t},\n+\t{\t\"flm_count_unlearn_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, unlearn_ignore)\n+\t},\n+\t{\t\"flm_count_auto_unlearn_done\", 3,\n+\t\toffsetof(struct flm_counters_v1, auto_unlearn_done)\n+\t},\n+\t{\t\"flm_count_auto_unlearn_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, auto_unlearn_ignore)\n+\t},\n+\t{\t\"flm_count_auto_unlearn_fail\", 3,\n+\t\toffsetof(struct flm_counters_v1, auto_unlearn_fail)\n+\t},\n+\t{\t\"flm_count_timeout_unlearn_done\", 3,\n+\t\toffsetof(struct flm_counters_v1, timeout_unlearn_done)\n+\t},\n+\t{ \"flm_count_rel_done\", 3, offsetof(struct flm_counters_v1, rel_done) },\n+\t{\t\"flm_count_rel_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, rel_ignore)\n+\t},\n+\t{ \"flm_count_prb_done\", 3, offsetof(struct flm_counters_v1, prb_done) },\n+\t{\t\"flm_count_prb_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, prb_ignore)\n+\t},\n+};\n+\n+/*\n+ * Extended stat for Capture/Inline - implements RMON\n+ * FLM 0.18\n+ */\n+static struct rte_nthw_xstats_names_s nthw_cap_xstats_names_v2[] = {\n+\t{ \"rx_drop_events\", 1, offsetof(struct port_counters_v2, drop_events) },\n+\t{ \"rx_octets\", 1, offsetof(struct port_counters_v2, octets) },\n+\t{ \"rx_packets\", 1, offsetof(struct port_counters_v2, pkts) },\n+\t{\t\"rx_broadcast_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, broadcast_pkts)\n+\t},\n+\t{\t\"rx_multicast_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, multicast_pkts)\n+\t},\n+\t{\t\"rx_unicast_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, unicast_pkts)\n+\t},\n+\t{\t\"rx_align_errors\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_alignment)\n+\t},\n+\t{\t\"rx_code_violation_errors\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_code_violation)\n+\t},\n+\t{ \"rx_crc_errors\", 1, offsetof(struct port_counters_v2, pkts_crc) },\n+\t{\t\"rx_undersize_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, undersize_pkts)\n+\t},\n+\t{\t\"rx_oversize_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, oversize_pkts)\n+\t},\n+\t{ \"rx_fragments\", 1, offsetof(struct port_counters_v2, fragments) },\n+\t{\t\"rx_jabbers_not_truncated\", 1,\n+\t\toffsetof(struct port_counters_v2, jabbers_not_truncated)\n+\t},\n+\t{\t\"rx_jabbers_truncated\", 1,\n+\t\toffsetof(struct port_counters_v2, jabbers_truncated)\n+\t},\n+\t{\t\"rx_size_64_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_64_octets)\n+\t},\n+\t{\t\"rx_size_65_to_127_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_65_to_127_octets)\n+\t},\n+\t{\t\"rx_size_128_to_255_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_128_to_255_octets)\n+\t},\n+\t{\t\"rx_size_256_to_511_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_256_to_511_octets)\n+\t},\n+\t{\t\"rx_size_512_to_1023_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_512_to_1023_octets)\n+\t},\n+\t{\t\"rx_size_1024_to_1518_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_1024_to_1518_octets)\n+\t},\n+\t{\t\"rx_size_1519_to_2047_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_1519_to_2047_octets)\n+\t},\n+\t{\t\"rx_size_2048_to_4095_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_2048_to_4095_octets)\n+\t},\n+\t{\t\"rx_size_4096_to_8191_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_4096_to_8191_octets)\n+\t},\n+\t{\t\"rx_size_8192_to_max_packets\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_8192_to_max_octets)\n+\t},\n+\t{\t\"rx_ip_checksum_error\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_ip_chksum_error)\n+\t},\n+\t{\t\"rx_udp_checksum_error\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_udp_chksum_error)\n+\t},\n+\t{\t\"rx_tcp_checksum_error\", 1,\n+\t\toffsetof(struct port_counters_v2, pkts_tcp_chksum_error)\n+\t},\n+\n+\t{ \"tx_drop_events\", 2, offsetof(struct port_counters_v2, drop_events) },\n+\t{ \"tx_octets\", 2, offsetof(struct port_counters_v2, octets) },\n+\t{ \"tx_packets\", 2, offsetof(struct port_counters_v2, pkts) },\n+\t{\t\"tx_broadcast_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, broadcast_pkts)\n+\t},\n+\t{\t\"tx_multicast_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, multicast_pkts)\n+\t},\n+\t{\t\"tx_unicast_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, unicast_pkts)\n+\t},\n+\t{\t\"tx_align_errors\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_alignment)\n+\t},\n+\t{\t\"tx_code_violation_errors\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_code_violation)\n+\t},\n+\t{ \"tx_crc_errors\", 2, offsetof(struct port_counters_v2, pkts_crc) },\n+\t{\t\"tx_undersize_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, undersize_pkts)\n+\t},\n+\t{\t\"tx_oversize_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, oversize_pkts)\n+\t},\n+\t{ \"tx_fragments\", 2, offsetof(struct port_counters_v2, fragments) },\n+\t{\t\"tx_jabbers_not_truncated\", 2,\n+\t\toffsetof(struct port_counters_v2, jabbers_not_truncated)\n+\t},\n+\t{\t\"tx_jabbers_truncated\", 2,\n+\t\toffsetof(struct port_counters_v2, jabbers_truncated)\n+\t},\n+\t{\t\"tx_size_64_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_64_octets)\n+\t},\n+\t{\t\"tx_size_65_to_127_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_65_to_127_octets)\n+\t},\n+\t{\t\"tx_size_128_to_255_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_128_to_255_octets)\n+\t},\n+\t{\t\"tx_size_256_to_511_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_256_to_511_octets)\n+\t},\n+\t{\t\"tx_size_512_to_1023_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_512_to_1023_octets)\n+\t},\n+\t{\t\"tx_size_1024_to_1518_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_1024_to_1518_octets)\n+\t},\n+\t{\t\"tx_size_1519_to_2047_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_1519_to_2047_octets)\n+\t},\n+\t{\t\"tx_size_2048_to_4095_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_2048_to_4095_octets)\n+\t},\n+\t{\t\"tx_size_4096_to_8191_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_4096_to_8191_octets)\n+\t},\n+\t{\t\"tx_size_8192_to_max_packets\", 2,\n+\t\toffsetof(struct port_counters_v2, pkts_8192_to_max_octets)\n+\t},\n+\n+\t/* FLM 0.17 */\n+\t{ \"flm_count_current\", 3, offsetof(struct flm_counters_v1, current) },\n+\t{\t\"flm_count_learn_done\", 3,\n+\t\toffsetof(struct flm_counters_v1, learn_done)\n+\t},\n+\t{\t\"flm_count_learn_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, learn_ignore)\n+\t},\n+\t{\t\"flm_count_learn_fail\", 3,\n+\t\toffsetof(struct flm_counters_v1, learn_fail)\n+\t},\n+\t{\t\"flm_count_unlearn_done\", 3,\n+\t\toffsetof(struct flm_counters_v1, unlearn_done)\n+\t},\n+\t{\t\"flm_count_unlearn_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, unlearn_ignore)\n+\t},\n+\t{\t\"flm_count_auto_unlearn_done\", 3,\n+\t\toffsetof(struct flm_counters_v1, auto_unlearn_done)\n+\t},\n+\t{\t\"flm_count_auto_unlearn_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, auto_unlearn_ignore)\n+\t},\n+\t{\t\"flm_count_auto_unlearn_fail\", 3,\n+\t\toffsetof(struct flm_counters_v1, auto_unlearn_fail)\n+\t},\n+\t{\t\"flm_count_timeout_unlearn_done\", 3,\n+\t\toffsetof(struct flm_counters_v1, timeout_unlearn_done)\n+\t},\n+\t{ \"flm_count_rel_done\", 3, offsetof(struct flm_counters_v1, rel_done) },\n+\t{\t\"flm_count_rel_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, rel_ignore)\n+\t},\n+\t{ \"flm_count_prb_done\", 3, offsetof(struct flm_counters_v1, prb_done) },\n+\t{\t\"flm_count_prb_ignore\", 3,\n+\t\toffsetof(struct flm_counters_v1, prb_ignore)\n+\t},\n+\n+\t/* FLM 0.20 */\n+\t{ \"flm_count_sta_done\", 3, offsetof(struct flm_counters_v1, sta_done) },\n+\t{ \"flm_count_inf_done\", 3, offsetof(struct flm_counters_v1, inf_done) },\n+\t{ \"flm_count_inf_skip\", 3, offsetof(struct flm_counters_v1, inf_skip) },\n+\t{ \"flm_count_pck_hit\", 3, offsetof(struct flm_counters_v1, pck_hit) },\n+\t{ \"flm_count_pck_miss\", 3, offsetof(struct flm_counters_v1, pck_miss) },\n+\t{ \"flm_count_pck_unh\", 3, offsetof(struct flm_counters_v1, pck_unh) },\n+\t{ \"flm_count_pck_dis\", 3, offsetof(struct flm_counters_v1, pck_dis) },\n+\t{ \"flm_count_csh_hit\", 3, offsetof(struct flm_counters_v1, csh_hit) },\n+\t{ \"flm_count_csh_miss\", 3, offsetof(struct flm_counters_v1, csh_miss) },\n+\t{ \"flm_count_csh_unh\", 3, offsetof(struct flm_counters_v1, csh_unh) },\n+\t{\t\"flm_count_cuc_start\", 3,\n+\t\toffsetof(struct flm_counters_v1, cuc_start)\n+\t},\n+\t{ \"flm_count_cuc_move\", 3, offsetof(struct flm_counters_v1, cuc_move) },\n+};\n+\n+#define NTHW_CAP_XSTATS_NAMES_V1 RTE_DIM(nthw_cap_xstats_names_v1)\n+#define NTHW_CAP_XSTATS_NAMES_V2 RTE_DIM(nthw_cap_xstats_names_v2)\n+\n+/*\n+ * Container for the reset values\n+ */\n+#define NTHW_XSTATS_SIZE ((NTHW_VIRT_XSTATS_NAMES < NTHW_CAP_XSTATS_NAMES_V2) ? \\\n+\tNTHW_CAP_XSTATS_NAMES_V2 : NTHW_VIRT_XSTATS_NAMES)\n+\n+uint64_t nthw_xstats_reset_val[NUM_ADAPTER_PORTS_MAX][NTHW_XSTATS_SIZE] = {{ 0 }};\n+\n+\n+/*\n+ * These functions must only be called with stat mutex locked\n+ */\n+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,\n+\t\t    unsigned int n, bool is_vswitch, uint8_t port)\n+{\n+\tunsigned int i;\n+\tuint8_t *flm_ptr;\n+\tuint8_t *rx_ptr;\n+\tuint8_t *tx_ptr;\n+\tuint32_t nb_names;\n+\tstruct rte_nthw_xstats_names_s *names;\n+\n+\tif (is_vswitch) {\n+\t\tflm_ptr = NULL;\n+\t\trx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];\n+\t\ttx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];\n+\t\tnames = nthw_virt_xstats_names;\n+\t\tnb_names = NTHW_VIRT_XSTATS_NAMES;\n+\t} else {\n+\t\tflm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;\n+\t\trx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];\n+\t\ttx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];\n+\t\tif (p_nt4ga_stat->flm_stat_ver < 18) {\n+\t\t\tnames = nthw_cap_xstats_names_v1;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V1;\n+\t\t} else {\n+\t\t\tnames = nthw_cap_xstats_names_v2;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V2;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < n && i < nb_names; i++) {\n+\t\tstats[i].id = i;\n+\t\tswitch (names[i].source) {\n+\t\tcase 1:\n+\t\t\t/* RX stat */\n+\t\t\tstats[i].value =\n+\t\t\t\t*((uint64_t *)&rx_ptr[names[i].offset]) -\n+\t\t\t\tnthw_xstats_reset_val[port][i];\n+\t\t\tbreak;\n+\t\tcase 2:\n+\t\t\t/* TX stat */\n+\t\t\tstats[i].value =\n+\t\t\t\t*((uint64_t *)&tx_ptr[names[i].offset]) -\n+\t\t\t\tnthw_xstats_reset_val[port][i];\n+\t\t\tbreak;\n+\t\tcase 3:\n+\t\t\t/* FLM stat */\n+\t\t\tif (flm_ptr) {\n+\t\t\t\tstats[i].value =\n+\t\t\t\t\t*((uint64_t *)&flm_ptr[names[i].offset]) -\n+\t\t\t\t\tnthw_xstats_reset_val[0][i];\n+\t\t\t} else {\n+\t\t\t\tstats[i].value = 0;\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tstats[i].value = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\treturn i;\n+}\n+\n+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,\n+\t\t\t  uint64_t *values, unsigned int n, bool is_vswitch,\n+\t\t\t  uint8_t port)\n+{\n+\tunsigned int i;\n+\tuint8_t *flm_ptr;\n+\tuint8_t *rx_ptr;\n+\tuint8_t *tx_ptr;\n+\tuint32_t nb_names;\n+\tstruct rte_nthw_xstats_names_s *names;\n+\tint count = 0;\n+\n+\tif (is_vswitch) {\n+\t\tflm_ptr = NULL;\n+\t\trx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];\n+\t\ttx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];\n+\t\tnames = nthw_virt_xstats_names;\n+\t\tnb_names = NTHW_VIRT_XSTATS_NAMES;\n+\t} else {\n+\t\tflm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;\n+\t\trx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];\n+\t\ttx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];\n+\t\tif (p_nt4ga_stat->flm_stat_ver < 18) {\n+\t\t\tnames = nthw_cap_xstats_names_v1;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V1;\n+\t\t} else {\n+\t\t\tnames = nthw_cap_xstats_names_v2;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V2;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < n; i++) {\n+\t\tif (ids[i] < nb_names) {\n+\t\t\tswitch (names[ids[i]].source) {\n+\t\t\tcase 1:\n+\t\t\t\t/* RX stat */\n+\t\t\t\tvalues[i] =\n+\t\t\t\t\t*((uint64_t *)&rx_ptr[names[ids[i]]\n+\t\t\t\t\t\t\t      .offset]) -\n+\t\t\t\t\tnthw_xstats_reset_val[port][ids[i]];\n+\t\t\t\tbreak;\n+\t\t\tcase 2:\n+\t\t\t\t/* TX stat */\n+\t\t\t\tvalues[i] =\n+\t\t\t\t\t*((uint64_t *)&tx_ptr[names[ids[i]]\n+\t\t\t\t\t\t\t      .offset]) -\n+\t\t\t\t\tnthw_xstats_reset_val[port][ids[i]];\n+\t\t\t\tbreak;\n+\t\t\tcase 3:\n+\t\t\t\t/* FLM stat */\n+\t\t\t\tif (flm_ptr) {\n+\t\t\t\t\tvalues[i] =\n+\t\t\t\t\t\t*((uint64_t *)&flm_ptr\n+\t\t\t\t\t\t  [names[ids[i]].offset]) -\n+\t\t\t\t\t\tnthw_xstats_reset_val[0][ids[i]];\n+\t\t\t\t} else {\n+\t\t\t\t\tvalues[i] = 0;\n+\t\t\t\t}\n+\t\t\t\tbreak;\n+\t\t\tdefault:\n+\t\t\t\tvalues[i] = 0;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tcount++;\n+\t\t}\n+\t}\n+\n+\treturn count;\n+}\n+\n+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port)\n+{\n+\tunsigned int i;\n+\tuint8_t *flm_ptr;\n+\tuint8_t *rx_ptr;\n+\tuint8_t *tx_ptr;\n+\tuint32_t nb_names;\n+\tstruct rte_nthw_xstats_names_s *names;\n+\n+\tif (is_vswitch) {\n+\t\tflm_ptr = NULL;\n+\t\trx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_rx[port];\n+\t\ttx_ptr = (uint8_t *)&p_nt4ga_stat->virt.mp_stat_structs_port_tx[port];\n+\t\tnames = nthw_virt_xstats_names;\n+\t\tnb_names = NTHW_VIRT_XSTATS_NAMES;\n+\t} else {\n+\t\tflm_ptr = (uint8_t *)p_nt4ga_stat->mp_stat_structs_flm;\n+\t\trx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_rx[port];\n+\t\ttx_ptr = (uint8_t *)&p_nt4ga_stat->cap.mp_stat_structs_port_tx[port];\n+\t\tif (p_nt4ga_stat->flm_stat_ver < 18) {\n+\t\t\tnames = nthw_cap_xstats_names_v1;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V1;\n+\t\t} else {\n+\t\t\tnames = nthw_cap_xstats_names_v2;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V2;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < nb_names; i++) {\n+\t\tswitch (names[i].source) {\n+\t\tcase 1:\n+\t\t\t/* RX stat */\n+\t\t\tnthw_xstats_reset_val[port][i] =\n+\t\t\t\t*((uint64_t *)&rx_ptr[names[i].offset]);\n+\t\t\tbreak;\n+\t\tcase 2:\n+\t\t\t/* TX stat */\n+\t\t\tnthw_xstats_reset_val[port][i] =\n+\t\t\t\t*((uint64_t *)&tx_ptr[names[i].offset]);\n+\t\t\tbreak;\n+\t\tcase 3:\n+\t\t\t/*\n+\t\t\t * FLM stat\n+\t\t\t * Reset makes no sense for flm_count_current\n+\t\t\t */\n+\t\t\tif (flm_ptr && strcmp(names[i].name, \"flm_count_current\") != 0) {\n+\t\t\t\tnthw_xstats_reset_val[0][i] =\n+\t\t\t\t\t*((uint64_t *)&flm_ptr[names[i].offset]);\n+\t\t\t}\n+\t\t\tbreak;\n+\t\tdefault:\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+}\n+\n+/*\n+ * These functions does not require stat mutex locked\n+ */\n+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,\n+\t\t\t  struct rte_eth_xstat_name *xstats_names,\n+\t\t\t  unsigned int size, bool is_vswitch)\n+{\n+\tint count = 0;\n+\tunsigned int i;\n+\tuint32_t nb_names;\n+\tstruct rte_nthw_xstats_names_s *names;\n+\n+\tif (is_vswitch) {\n+\t\tnames = nthw_virt_xstats_names;\n+\t\tnb_names = NTHW_VIRT_XSTATS_NAMES;\n+\t} else {\n+\t\tif (p_nt4ga_stat->flm_stat_ver < 18) {\n+\t\t\tnames = nthw_cap_xstats_names_v1;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V1;\n+\t\t} else {\n+\t\t\tnames = nthw_cap_xstats_names_v2;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V2;\n+\t\t}\n+\t}\n+\n+\tif (!xstats_names)\n+\t\treturn nb_names;\n+\n+\tfor (i = 0; i < size && i < nb_names; i++) {\n+\t\tstrlcpy(xstats_names[i].name, names[i].name,\n+\t\t\tsizeof(xstats_names[i].name));\n+\t\tcount++;\n+\t}\n+\n+\treturn count;\n+}\n+\n+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,\n+\t\t\t\tstruct rte_eth_xstat_name *xstats_names,\n+\t\t\t\tconst uint64_t *ids, unsigned int size,\n+\t\t\t\tbool is_vswitch)\n+{\n+\tint count = 0;\n+\tunsigned int i;\n+\n+\tuint32_t nb_names;\n+\tstruct rte_nthw_xstats_names_s *names;\n+\n+\tif (is_vswitch) {\n+\t\tnames = nthw_virt_xstats_names;\n+\t\tnb_names = NTHW_VIRT_XSTATS_NAMES;\n+\t} else {\n+\t\tif (p_nt4ga_stat->flm_stat_ver < 18) {\n+\t\t\tnames = nthw_cap_xstats_names_v1;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V1;\n+\t\t} else {\n+\t\t\tnames = nthw_cap_xstats_names_v2;\n+\t\t\tnb_names = NTHW_CAP_XSTATS_NAMES_V2;\n+\t\t}\n+\t}\n+\n+\tif (!xstats_names)\n+\t\treturn nb_names;\n+\n+\tfor (i = 0; i < size; i++) {\n+\t\tif (ids[i] < nb_names) {\n+\t\t\tstrlcpy(xstats_names[i].name, names[ids[i]].name,\n+\t\t\t\tRTE_ETH_XSTATS_NAME_SIZE);\n+\t\t}\n+\t\tcount++;\n+\t}\n+\n+\treturn count;\n+}\ndiff --git a/drivers/net/ntnic/ntnic_xstats.h b/drivers/net/ntnic/ntnic_xstats.h\nnew file mode 100644\nindex 0000000000..0a82a1a677\n--- /dev/null\n+++ b/drivers/net/ntnic/ntnic_xstats.h\n@@ -0,0 +1,22 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Napatech A/S\n+ */\n+\n+#ifndef NTNIC_XSTATS_H_\n+#define NTNIC_XSTATS_H_\n+\n+int nthw_xstats_get_names(nt4ga_stat_t *p_nt4ga_stat,\n+\t\t\t  struct rte_eth_xstat_name *xstats_names,\n+\t\t\t  unsigned int size, bool is_vswitch);\n+int nthw_xstats_get(nt4ga_stat_t *p_nt4ga_stat, struct rte_eth_xstat *stats,\n+\t\t    unsigned int n, bool is_vswitch, uint8_t port);\n+void nthw_xstats_reset(nt4ga_stat_t *p_nt4ga_stat, bool is_vswitch, uint8_t port);\n+int nthw_xstats_get_names_by_id(nt4ga_stat_t *p_nt4ga_stat,\n+\t\t\t\tstruct rte_eth_xstat_name *xstats_names,\n+\t\t\t\tconst uint64_t *ids, unsigned int size,\n+\t\t\t\tbool is_vswitch);\n+int nthw_xstats_get_by_id(nt4ga_stat_t *p_nt4ga_stat, const uint64_t *ids,\n+\t\t\t  uint64_t *values, unsigned int n, bool is_vswitch,\n+\t\t\t  uint8_t port);\n+\n+#endif /* NTNIC_XSTATS_H_ */\n",
    "prefixes": [
        "v16",
        "7/8"
    ]
}