get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/131813/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 131813,
    "url": "http://patchwork.dpdk.org/api/patches/131813/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230921204349.3285318-7-nicolas.chautru@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230921204349.3285318-7-nicolas.chautru@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230921204349.3285318-7-nicolas.chautru@intel.com",
    "date": "2023-09-21T20:43:48",
    "name": "[v2,6/7] baseband/acc: introduce the new VRB2 variant",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "1535083872f59684f084e1b3dd09532be971f619",
    "submitter": {
        "id": 1314,
        "url": "http://patchwork.dpdk.org/api/people/1314/?format=api",
        "name": "Chautru, Nicolas",
        "email": "nicolas.chautru@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patchwork.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230921204349.3285318-7-nicolas.chautru@intel.com/mbox/",
    "series": [
        {
            "id": 29596,
            "url": "http://patchwork.dpdk.org/api/series/29596/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=29596",
            "date": "2023-09-21T20:43:42",
            "name": "VRB2 bbdev PMD introduction",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/29596/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/131813/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/131813/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E30E74260E;\n\tThu, 21 Sep 2023 22:47:40 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1227F40A8A;\n\tThu, 21 Sep 2023 22:47:07 +0200 (CEST)",
            "from mgamail.intel.com (mgamail.intel.com [134.134.136.100])\n by mails.dpdk.org (Postfix) with ESMTP id EE311402D3\n for <dev@dpdk.org>; Thu, 21 Sep 2023 22:47:01 +0200 (CEST)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 21 Sep 2023 13:47:01 -0700",
            "from spr-npg-bds1-eec2.sn.intel.com (HELO spr-npg-bds1-eec2..)\n ([10.233.181.123])\n by orsmga003.jf.intel.com with ESMTP; 21 Sep 2023 13:47:00 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1695329222; x=1726865222;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=r3Eo4Sht0VBjhdsOI8QHVaRqa24kfpazz5JK39C8Glw=;\n b=luj+mYB3hWrh6Vg0n2C9E/c4hB1F0E1cdsNaFczULZwo3lhAhxXmHnyZ\n HJV9r3hY4vW4MDU4ZuJlmz2BSXpa59NQJceFk5ZRI8n4Kfv4Zws4iIGn5\n XXi++0QngK/X1DQuJCO76gTJdiABLDi6HiVtk+iGB7n8fbGc+T999KfvQ\n OSwWoElMrN37Brx+pWgd1dTXOe6FIszykhaPwl7gmpQByGy6lUAjdi8wT\n cquQWgwHqI2U5oUiqgJY+SRsE5zYIGJJyt5ro3Fg6/CdSsJhEvrV6AWZ/\n 2PYIwX6A81ekm10RQ/DuIE1CJUM2t5vl2NDK9tU7I5ni7CwDLXooOZmeD g==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6600,9927,10840\"; a=\"447138125\"",
            "E=Sophos;i=\"6.03,166,1694761200\"; d=\"scan'208\";a=\"447138125\"",
            "E=McAfee;i=\"6600,9927,10840\"; a=\"696907260\"",
            "E=Sophos;i=\"6.03,166,1694761200\"; d=\"scan'208\";a=\"696907260\""
        ],
        "X-ExtLoop1": "1",
        "From": "Nicolas Chautru <nicolas.chautru@intel.com>",
        "To": "dev@dpdk.org,\n\tmaxime.coquelin@redhat.com",
        "Cc": "hemant.agrawal@nxp.com, david.marchand@redhat.com,\n hernan.vargas@intel.com,\n Nicolas Chautru <nicolas.chautru@intel.com>",
        "Subject": "[PATCH v2 6/7] baseband/acc: introduce the new VRB2 variant",
        "Date": "Thu, 21 Sep 2023 20:43:48 +0000",
        "Message-Id": "<20230921204349.3285318-7-nicolas.chautru@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20230921204349.3285318-1-nicolas.chautru@intel.com>",
        "References": "<20230921204349.3285318-1-nicolas.chautru@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This extends the unified driver to support both the\nVRB1 and VRB2 implementations of Intel vRAN Boost.\n\nSigned-off-by: Nicolas Chautru <nicolas.chautru@intel.com>\n---\n doc/guides/bbdevs/index.rst            |    1 +\n doc/guides/bbdevs/vrb2.rst             |  269 +++++\n doc/guides/rel_notes/release_23_11.rst |    3 +\n drivers/baseband/acc/acc_common.h      |   84 +-\n drivers/baseband/acc/rte_acc100_pmd.c  |    4 +-\n drivers/baseband/acc/rte_vrb_pmd.c     | 1442 +++++++++++++++++++++---\n drivers/baseband/acc/vrb1_pf_enum.h    |   17 +-\n drivers/baseband/acc/vrb2_pf_enum.h    |  124 ++\n drivers/baseband/acc/vrb2_vf_enum.h    |  121 ++\n drivers/baseband/acc/vrb_pmd.h         |  161 ++-\n 10 files changed, 2062 insertions(+), 164 deletions(-)\n create mode 100644 doc/guides/bbdevs/vrb2.rst\n create mode 100644 drivers/baseband/acc/vrb2_pf_enum.h\n create mode 100644 drivers/baseband/acc/vrb2_vf_enum.h",
    "diff": "diff --git a/doc/guides/bbdevs/index.rst b/doc/guides/bbdevs/index.rst\nindex 77d4c54664..269157d77f 100644\n--- a/doc/guides/bbdevs/index.rst\n+++ b/doc/guides/bbdevs/index.rst\n@@ -15,4 +15,5 @@ Baseband Device Drivers\n     fpga_5gnr_fec\n     acc100\n     vrb1\n+    vrb2\n     la12xx\ndiff --git a/doc/guides/bbdevs/vrb2.rst b/doc/guides/bbdevs/vrb2.rst\nnew file mode 100644\nindex 0000000000..8d8e094660\n--- /dev/null\n+++ b/doc/guides/bbdevs/vrb2.rst\n@@ -0,0 +1,269 @@\n+..  SPDX-License-Identifier: BSD-3-Clause\n+    Copyright(c) 2023 Intel Corporation\n+\n+.. include:: <isonum.txt>\n+\n+Intel\\ |reg| vRAN Boost v2 Poll Mode Driver (PMD)\n+=================================================\n+\n+The Intel\\ |reg| vRAN Boost integrated accelerator enables\n+cost-effective 4G and 5G next-generation virtualized Radio Access Network (vRAN)\n+solutions.\n+The Intel vRAN Boost v2.0 (VRB2 in the code) is specifically integrated on the\n+Intel\\ |reg| Xeon\\ |reg| Granite Rapids-D Process (GNR-D).\n+\n+Features\n+--------\n+\n+Intel vRAN Boost v2.0 includes a 5G Low Density Parity Check (LDPC) encoder/decoder,\n+rate match/dematch, Hybrid Automatic Repeat Request (HARQ) with access to DDR\n+memory for buffer management, a 4G Turbo encoder/decoder,\n+a Fast Fourier Transform (FFT) block providing DFT/iDFT processing offload\n+for the 5G Sounding Reference Signal (SRS), a MLD-TS accelerator, a Queue Manager (QMGR),\n+and a DMA subsystem.\n+There is no dedicated on-card memory for HARQ, the coherent memory on the CPU side is being used.\n+\n+These hardware blocks provide the following features exposed by the PMD:\n+\n+- LDPC Encode in the Downlink (5GNR)\n+- LDPC Decode in the Uplink (5GNR)\n+- Turbo Encode in the Downlink (4G)\n+- Turbo Decode in the Uplink (4G)\n+- FFT processing\n+- MLD-TS processing\n+- Single Root I/O Virtualization (SR-IOV) with 16 Virtual Functions (VFs) per Physical Function (PF)\n+- Maximum of 2048 queues per VF\n+- Message Signaled Interrupts (MSIs)\n+\n+The Intel vRAN Boost v2.0 PMD supports the following bbdev capabilities:\n+\n+* For the LDPC encode operation:\n+   - ``RTE_BBDEV_LDPC_CRC_24B_ATTACH``: set to attach CRC24B to CB(s).\n+   - ``RTE_BBDEV_LDPC_RATE_MATCH``: if set then do not do Rate Match bypass.\n+   - ``RTE_BBDEV_LDPC_INTERLEAVER_BYPASS``: if set then bypass interleaver.\n+   - ``RTE_BBDEV_LDPC_ENC_SCATTER_GATHER``: supports scatter-gather for input/output data.\n+   - ``RTE_BBDEV_LDPC_ENC_CONCATENATION``: concatenate code blocks with bit granularity.\n+\n+* For the LDPC decode operation:\n+   - ``RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK``: check CRC24B from CB(s).\n+   - ``RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP``: drops CRC24B bits appended while decoding.\n+   - ``RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK``: check CRC24A from CB(s).\n+   - ``RTE_BBDEV_LDPC_CRC_TYPE_16_CHECK``: check CRC16 from CB(s).\n+   - ``RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE``: provides an input for HARQ combining.\n+   - ``RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE``: provides an input for HARQ combining.\n+   - ``RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE``: disable early termination.\n+   - ``RTE_BBDEV_LDPC_DEC_SCATTER_GATHER``: supports scatter-gather for input/output data.\n+   - ``RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION``: supports compression of the HARQ input/output.\n+   - ``RTE_BBDEV_LDPC_LLR_COMPRESSION``: supports LLR input compression.\n+   - ``RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION``: supports compression of the HARQ input/output.\n+   - ``RTE_BBDEV_LDPC_SOFT_OUT_ENABLE``: set the APP LLR soft output.\n+   - ``RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS``: set the APP LLR soft output after rate-matching.\n+   - ``RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS``: disables the de-interleaver.\n+\n+* For the turbo encode operation:\n+   - ``RTE_BBDEV_TURBO_CRC_24B_ATTACH``: set to attach CRC24B to CB(s).\n+   - ``RTE_BBDEV_TURBO_RATE_MATCH``: if set then do not do Rate Match bypass.\n+   - ``RTE_BBDEV_TURBO_ENC_INTERRUPTS``: set for encoder dequeue interrupts.\n+   - ``RTE_BBDEV_TURBO_RV_INDEX_BYPASS``: set to bypass RV index.\n+   - ``RTE_BBDEV_TURBO_ENC_SCATTER_GATHER``: supports scatter-gather for input/output data.\n+\n+* For the turbo decode operation:\n+   - ``RTE_BBDEV_TURBO_CRC_TYPE_24B``: check CRC24B from CB(s).\n+   - ``RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE``: perform subblock de-interleave.\n+   - ``RTE_BBDEV_TURBO_DEC_INTERRUPTS``: set for decoder dequeue interrupts.\n+   - ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN``: set if negative LLR input is supported.\n+   - ``RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP``: keep CRC24B bits appended while decoding.\n+   - ``RTE_BBDEV_TURBO_DEC_CRC_24B_DROP``: option to drop the code block CRC after decoding.\n+   - ``RTE_BBDEV_TURBO_EARLY_TERMINATION``: set early termination feature.\n+   - ``RTE_BBDEV_TURBO_DEC_SCATTER_GATHER``: supports scatter-gather for input/output data.\n+   - ``RTE_BBDEV_TURBO_HALF_ITERATION_EVEN``: set half iteration granularity.\n+   - ``RTE_BBDEV_TURBO_SOFT_OUTPUT``: set the APP LLR soft output.\n+   - ``RTE_BBDEV_TURBO_EQUALIZER``: set the turbo equalizer feature.\n+   - ``RTE_BBDEV_TURBO_SOFT_OUT_SATURATE``: set the soft output saturation.\n+   - ``RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH``: set to run an extra odd iteration after CRC match.\n+   - ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT``: set if negative APP LLR output supported.\n+   - ``RTE_BBDEV_TURBO_MAP_DEC``: supports flexible parallel MAP engine decoding.\n+\n+* For the FFT operation:\n+   - ``RTE_BBDEV_FFT_WINDOWING``: flexible windowing capability.\n+   - ``RTE_BBDEV_FFT_CS_ADJUSTMENT``: flexible adjustment of Cyclic Shift time offset.\n+   - ``RTE_BBDEV_FFT_DFT_BYPASS``: set for bypass the DFT and get directly into iDFT input.\n+   - ``RTE_BBDEV_FFT_IDFT_BYPASS``: set for bypass the IDFT and get directly the DFT output.\n+   - ``RTE_BBDEV_FFT_WINDOWING_BYPASS``: set for bypass time domain windowing.\n+\n+* For the MLD-TS operation:\n+   - ``RTE_BBDEV_MLDTS_REP``: set to repeat and reuse channel across operations.\n+\n+Installation\n+------------\n+\n+Section 3 of the DPDK manual provides instructions on installing and compiling DPDK.\n+\n+DPDK requires hugepages to be configured as detailed in section 2 of the DPDK manual.\n+The bbdev test application has been tested with a configuration 40 x 1GB hugepages.\n+The hugepage configuration of a server may be examined using:\n+\n+.. code-block:: console\n+\n+   grep Huge* /proc/meminfo\n+\n+\n+Initialization\n+--------------\n+\n+When the device first powers up, its PCI Physical Functions (PF)\n+can be listed through these commands for Intel vRAN Boost v2:\n+\n+.. code-block:: console\n+\n+   sudo lspci -vd8086:57c2\n+\n+The physical and virtual functions are compatible with Linux UIO drivers:\n+``vfio`` and ``igb_uio``.\n+However, in order to work the 5G/4G FEC device first needs to be bound\n+to one of these Linux drivers through DPDK.\n+\n+\n+Bind PF UIO driver(s)\n+~~~~~~~~~~~~~~~~~~~~~\n+\n+Install the DPDK igb_uio driver, bind it with the PF PCI device ID and use\n+``lspci`` to confirm the PF device is under use by ``igb_uio`` DPDK UIO driver.\n+\n+The igb_uio driver may be bound to the PF PCI device using one of two methods\n+for Intel vRAN Boost v2:\n+\n+#. PCI functions (physical or virtual, depending on the use case) can be bound\n+to the UIO driver by repeating this command for every function.\n+\n+.. code-block:: console\n+\n+   cd <dpdk-top-level-directory>\n+   insmod build/kmod/igb_uio.ko\n+   echo \"8086 57c2\" > /sys/bus/pci/drivers/igb_uio/new_id\n+   lspci -vd8086:57c2\n+\n+#. Another way to bind PF with DPDK UIO driver is by using the ``dpdk-devbind.py`` tool\n+\n+.. code-block:: console\n+\n+   cd <dpdk-top-level-directory>\n+   usertools/dpdk-devbind.py -b igb_uio 0000:f7:00.0\n+\n+where the PCI device ID (example: 0000:f7:00.0) is obtained using ``lspci -vd8086:57c2``.\n+\n+In a similar way the PF may be bound with vfio-pci as any PCIe device.\n+\n+\n+Enable Virtual Functions\n+~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+Now, it should be visible in the printouts that PCI PF is under igb_uio control\n+\"``Kernel driver in use: igb_uio``\"\n+\n+To show the number of available VFs on the device, read ``sriov_totalvfs`` file.\n+\n+.. code-block:: console\n+\n+   cat /sys/bus/pci/devices/0000\\:<b>\\:<d>.<f>/sriov_totalvfs\n+\n+where ``0000\\:<b>\\:<d>.<f>`` is the PCI device ID\n+\n+To enable VFs via igb_uio, echo the number of virtual functions intended\n+to enable to ``max_vfs`` file.\n+\n+.. code-block:: console\n+\n+   echo <num-of-vfs> > /sys/bus/pci/devices/0000\\:<b>\\:<d>.<f>/max_vfs\n+\n+Afterwards, all VFs must be bound to appropriate UIO drivers as required,\n+same way it was done with the physical function previously.\n+\n+Enabling SR-IOV via VFIO driver is pretty much the same,\n+except that the file name is different:\n+\n+.. code-block:: console\n+\n+   echo <num-of-vfs> > /sys/bus/pci/devices/0000\\:<b>\\:<d>.<f>/sriov_numvfs\n+\n+\n+Configure the VFs through PF\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+The PCI virtual functions must be configured before working or getting assigned\n+to VMs/Containers.\n+The configuration involves allocating the number of hardware queues, priorities,\n+load balance, bandwidth and other settings necessary for the device\n+to perform FEC functions.\n+\n+This configuration needs to be executed at least once after reboot or PCI FLR\n+and can be achieved by using the functions ``rte_acc_configure()``,\n+which sets up the parameters defined in the compatible ``rte_acc_conf`` structure.\n+\n+\n+Test Application\n+----------------\n+\n+The bbdev class is provided with a test application, ``test-bbdev.py``\n+and range of test data for testing the functionality of the device,\n+depending on the device's capabilities.\n+The test application is located under app/test-bbdev folder\n+and has the following options:\n+\n+.. code-block:: console\n+\n+   \"-p\", \"--testapp-path\": specifies path to the bbdev test app.\n+   \"-e\", \"--eal-params\": EAL arguments which are passed to the test app.\n+   \"-t\", \"--timeout\": Timeout in seconds (default=300).\n+   \"-c\", \"--test-cases\": Defines test cases to run. Run all if not specified.\n+   \"-v\", \"--test-vector\": Test vector path.\n+   \"-n\", \"--num-ops\": Number of operations to process on device (default=32).\n+   \"-b\", \"--burst-size\": Operations enqueue/dequeue burst size (default=32).\n+   \"-s\", \"--snr\": SNR in dB used when generating LLRs for bler tests.\n+   \"-s\", \"--iter_max\": Number of iterations for LDPC decoder.\n+   \"-l\", \"--num-lcores\": Number of lcores to run (default=16).\n+   \"-i\", \"--init-device\": Initialise PF device with default values.\n+\n+\n+To execute the test application tool using simple decode or encode data,\n+type one of the following:\n+\n+.. code-block:: console\n+\n+  ./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_dec_default.data\n+  ./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_enc_default.data\n+\n+\n+The test application ``test-bbdev.py``, supports the ability to configure the\n+PF device with a default set of values, if the \"-i\" or \"- -init-device\" option\n+is included. The default values are defined in test_bbdev_perf.c.\n+\n+\n+Test Vectors\n+~~~~~~~~~~~~\n+\n+In addition to the simple LDPC decoder and LDPC encoder tests,\n+bbdev also provides a range of additional tests under the test_vectors folder,\n+which may be useful.\n+The results of these tests will depend on the device capabilities which may\n+cause some test cases to be skipped, but no failure should be reported.\n+\n+\n+Alternate Baseband Device configuration tool\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+On top of the embedded configuration feature supported in test-bbdev using\n+\"- -init-device\" option mentioned above, there is also a tool available\n+to perform that device configuration using a companion application.\n+The ``pf_bb_config`` application notably enables then to run bbdev-test\n+from the VF and not only limited to the PF as captured above.\n+\n+See for more details: https://github.com/intel/pf-bb-config\n+\n+Specifically for the bbdev Intel vRAN Boost v2 PMD, the command below can be used\n+(note that ACC200 was used previously to refer to VRB2):\n+\n+.. code-block:: console\n+\n+   pf_bb_config VRB2 -c ./vrb2/vrb2_config_vf_5g.cfg\n+   test-bbdev.py -e=\"-c 0xff0 -a${VF_PCI_ADDR}\" -c validation -n 64 -b 64 -l 1 -v ./ldpc_dec_default.data\ndiff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst\nindex 333e1d95a2..668dd58ee3 100644\n--- a/doc/guides/rel_notes/release_23_11.rst\n+++ b/doc/guides/rel_notes/release_23_11.rst\n@@ -78,6 +78,9 @@ New Features\n * build: Optional libraries can now be selected with the new ``enable_libs``\n   build option similarly to the existing ``enable_drivers`` build option.\n \n+* **Updated Intel vRAN Boost bbdev PMD.**\n+\n+  Added support for the new Intel vRAN Boost v2 device variant (GNR-D) within the unified driver.\n \n Removed Items\n -------------\ndiff --git a/drivers/baseband/acc/acc_common.h b/drivers/baseband/acc/acc_common.h\nindex 5de58dbe36..b71292af94 100644\n--- a/drivers/baseband/acc/acc_common.h\n+++ b/drivers/baseband/acc/acc_common.h\n@@ -18,6 +18,7 @@\n #define ACC_DMA_BLKID_OUT_HARQ      3\n #define ACC_DMA_BLKID_IN_HARQ       3\n #define ACC_DMA_BLKID_IN_MLD_R      3\n+#define ACC_DMA_BLKID_DEWIN_IN      3\n \n /* Values used in filling in decode FCWs */\n #define ACC_FCW_TD_VER              1\n@@ -103,6 +104,9 @@\n #define ACC_MAX_NUM_QGRPS              32\n #define ACC_RING_SIZE_GRANULARITY      64\n #define ACC_MAX_FCW_SIZE              128\n+#define ACC_IQ_SIZE                    4\n+\n+#define ACC_FCW_FFT_BLEN_3             28\n \n /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */\n #define ACC_N_ZC_1 66 /* N = 66 Zc for BG 1 */\n@@ -132,6 +136,11 @@\n #define ACC_LIM_21 14 /* 0.21 */\n #define ACC_LIM_31 20 /* 0.31 */\n #define ACC_MAX_E (128 * 1024 - 2)\n+#define ACC_MAX_CS 12\n+\n+#define ACC100_VARIANT          0\n+#define VRB1_VARIANT\t\t2\n+#define VRB2_VARIANT\t\t3\n \n /* Helper macro for logging */\n #define rte_acc_log(level, fmt, ...) \\\n@@ -332,6 +341,37 @@ struct __rte_packed acc_fcw_fft {\n \t\tres:19;\n };\n \n+/* FFT Frame Control Word. */\n+struct __rte_packed acc_fcw_fft_3 {\n+\tuint32_t in_frame_size:16,\n+\t\tleading_pad_size:16;\n+\tuint32_t out_frame_size:16,\n+\t\tleading_depad_size:16;\n+\tuint32_t cs_window_sel;\n+\tuint32_t cs_window_sel2:16,\n+\t\tcs_enable_bmap:16;\n+\tuint32_t num_antennas:8,\n+\t\tidft_size:8,\n+\t\tdft_size:8,\n+\t\tcs_offset:8;\n+\tuint32_t idft_shift:8,\n+\t\tdft_shift:8,\n+\t\tcs_multiplier:16;\n+\tuint32_t bypass:2,\n+\t\tfp16_in:1,\n+\t\tfp16_out:1,\n+\t\texp_adj:4,\n+\t\tpower_shift:4,\n+\t\tpower_en:1,\n+\t\tenable_dewin:1,\n+\t\tfreq_resample_mode:2,\n+\t\tdepad_ouput_size:16;\n+\tuint16_t cs_theta_0[ACC_MAX_CS];\n+\tuint32_t cs_theta_d[ACC_MAX_CS];\n+\tint8_t cs_time_offset[ACC_MAX_CS];\n+};\n+\n+\n /* MLD-TS Frame Control Word */\n struct __rte_packed acc_fcw_mldts {\n \tuint32_t fcw_version:4,\n@@ -473,14 +513,14 @@ union acc_info_ring_data {\n \t\tuint16_t valid: 1;\n \t};\n \tstruct {\n-\t\tuint32_t aq_id_3: 6;\n-\t\tuint32_t qg_id_3: 5;\n-\t\tuint32_t vf_id_3: 6;\n-\t\tuint32_t int_nb_3: 6;\n-\t\tuint32_t msi_0_3: 1;\n-\t\tuint32_t vf2pf_3: 6;\n-\t\tuint32_t loop_3: 1;\n-\t\tuint32_t valid_3: 1;\n+\t\tuint32_t aq_id_vrb2: 6;\n+\t\tuint32_t qg_id_vrb2: 5;\n+\t\tuint32_t vf_id_vrb2: 6;\n+\t\tuint32_t int_nb_vrb2: 6;\n+\t\tuint32_t msi_0_vrb2: 1;\n+\t\tuint32_t vf2pf_vrb2: 6;\n+\t\tuint32_t loop_vrb2: 1;\n+\t\tuint32_t valid_vrb2: 1;\n \t};\n } __rte_packed;\n \n@@ -765,16 +805,20 @@ alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc_device *d,\n  */\n static inline uint16_t\n get_queue_id_from_ring_info(struct rte_bbdev_data *data,\n-\t\tconst union acc_info_ring_data ring_data)\n+\t\tconst union acc_info_ring_data ring_data, uint16_t device_variant)\n {\n \tuint16_t queue_id;\n+\tstruct acc_queue *acc_q;\n+\tuint16_t vf_id = (device_variant == VRB2_VARIANT) ? ring_data.vf_id_vrb2 : ring_data.vf_id;\n+\tuint16_t aq_id = (device_variant == VRB2_VARIANT) ? ring_data.aq_id_vrb2 : ring_data.aq_id;\n+\tuint16_t qg_id = (device_variant == VRB2_VARIANT) ? ring_data.qg_id_vrb2 : ring_data.qg_id;\n \n \tfor (queue_id = 0; queue_id < data->num_queues; ++queue_id) {\n-\t\tstruct acc_queue *acc_q =\n-\t\t\t\tdata->queues[queue_id].queue_private;\n-\t\tif (acc_q != NULL && acc_q->aq_id == ring_data.aq_id &&\n-\t\t\t\tacc_q->qgrp_id == ring_data.qg_id &&\n-\t\t\t\tacc_q->vf_id == ring_data.vf_id)\n+\t\tacc_q = data->queues[queue_id].queue_private;\n+\n+\t\tif (acc_q != NULL && acc_q->aq_id == aq_id &&\n+\t\t\t\tacc_q->qgrp_id == qg_id &&\n+\t\t\t\tacc_q->vf_id == vf_id)\n \t\t\treturn queue_id;\n \t}\n \n@@ -1436,4 +1480,16 @@ get_num_cbs_in_tb_ldpc_enc(struct rte_bbdev_op_ldpc_enc *ldpc_enc)\n \treturn cbs_in_tb;\n }\n \n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+static inline void\n+acc_memdump(const char *string, void *buf, uint16_t bytes)\n+{\n+\tprintf(\"%s\\n\", string);\n+\tuint32_t *data = buf;\n+\tuint16_t i;\n+\tfor (i = 0; i < bytes / 4; i++)\n+\t\tprintf(\"0x%08X\\n\", data[i]);\n+}\n+#endif\n+\n #endif /* _ACC_COMMON_H_ */\ndiff --git a/drivers/baseband/acc/rte_acc100_pmd.c b/drivers/baseband/acc/rte_acc100_pmd.c\nindex 5362d39c30..7f8d05b5a9 100644\n--- a/drivers/baseband/acc/rte_acc100_pmd.c\n+++ b/drivers/baseband/acc/rte_acc100_pmd.c\n@@ -294,7 +294,7 @@ acc100_pf_interrupt_handler(struct rte_bbdev *dev)\n \t\tcase ACC100_PF_INT_DMA_UL5G_DESC_IRQ:\n \t\tcase ACC100_PF_INT_DMA_DL5G_DESC_IRQ:\n \t\t\tdeq_intr_det.queue_id = get_queue_id_from_ring_info(\n-\t\t\t\t\tdev->data, *ring_data);\n+\t\t\t\t\tdev->data, *ring_data, acc100_dev->device_variant);\n \t\t\tif (deq_intr_det.queue_id == UINT16_MAX) {\n \t\t\t\trte_bbdev_log(ERR,\n \t\t\t\t\t\t\"Couldn't find queue: aq_id: %u, qg_id: %u, vf_id: %u\",\n@@ -348,7 +348,7 @@ acc100_vf_interrupt_handler(struct rte_bbdev *dev)\n \t\t\t */\n \t\t\tring_data->vf_id = 0;\n \t\t\tdeq_intr_det.queue_id = get_queue_id_from_ring_info(\n-\t\t\t\t\tdev->data, *ring_data);\n+\t\t\t\t\tdev->data, *ring_data, acc100_dev->device_variant);\n \t\t\tif (deq_intr_det.queue_id == UINT16_MAX) {\n \t\t\t\trte_bbdev_log(ERR,\n \t\t\t\t\t\t\"Couldn't find queue: aq_id: %u, qg_id: %u\",\ndiff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c\nindex e82ed55ca7..a787592ec9 100644\n--- a/drivers/baseband/acc/rte_vrb_pmd.c\n+++ b/drivers/baseband/acc/rte_vrb_pmd.c\n@@ -37,6 +37,15 @@ vrb1_queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id\n \t\treturn ((qgrp_id << 7) + (aq_id << 3) + VRB1_VfQmgrIngressAq);\n }\n \n+static inline uint32_t\n+vrb2_queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)\n+{\n+\tif (pf_device)\n+\t\treturn ((vf_id << 14) + (qgrp_id << 9) + (aq_id << 3) + VRB2_PfQmgrIngressAq);\n+\telse\n+\t\treturn ((qgrp_id << 9) + (aq_id << 3) + VRB2_VfQmgrIngressAq);\n+}\n+\n enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, FFT, MLD, NUM_ACC};\n \n /* Return the accelerator enum for a Queue Group Index. */\n@@ -197,7 +206,7 @@ fetch_acc_config(struct rte_bbdev *dev)\n \tstruct acc_device *d = dev->data->dev_private;\n \tstruct rte_acc_conf *acc_conf = &d->acc_conf;\n \tuint8_t acc, qg;\n-\tuint32_t reg_aq, reg_len0, reg_len1, reg0, reg1;\n+\tuint32_t reg_aq, reg_len0, reg_len1, reg_len2, reg_len3, reg0, reg1, reg2, reg3;\n \tuint32_t reg_mode, idx;\n \tstruct rte_acc_queue_topology *q_top = NULL;\n \tint qman_func_id[VRB_NUM_ACCS] = {ACC_ACCMAP_0, ACC_ACCMAP_1,\n@@ -219,32 +228,81 @@ fetch_acc_config(struct rte_bbdev *dev)\n \tacc_conf->num_vf_bundles = 1;\n \tinitQTop(acc_conf);\n \n-\treg0 = acc_reg_read(d, d->reg_addr->qman_group_func);\n-\treg1 = acc_reg_read(d, d->reg_addr->qman_group_func + 4);\n-\tfor (qg = 0; qg < d->num_qgroups; qg++) {\n-\t\treg_aq = acc_reg_read(d, d->queue_offset(d->pf_device, 0, qg, 0));\n-\t\tif (reg_aq & ACC_QUEUE_ENABLE) {\n-\t\t\tif (qg < ACC_NUM_QGRPS_PER_WORD)\n-\t\t\t\tidx = (reg0 >> (qg * 4)) & 0x7;\n+\tif (d->device_variant == VRB1_VARIANT) {\n+\t\treg0 = acc_reg_read(d, d->reg_addr->qman_group_func);\n+\t\treg1 = acc_reg_read(d, d->reg_addr->qman_group_func + 4);\n+\t\tfor (qg = 0; qg < d->num_qgroups; qg++) {\n+\t\t\treg_aq = acc_reg_read(d, d->queue_offset(d->pf_device, 0, qg, 0));\n+\t\t\tif (reg_aq & ACC_QUEUE_ENABLE) {\n+\t\t\t\tif (qg < ACC_NUM_QGRPS_PER_WORD)\n+\t\t\t\t\tidx = (reg0 >> (qg * 4)) & 0x7;\n+\t\t\t\telse\n+\t\t\t\t\tidx = (reg1 >> ((qg - ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7;\n+\t\t\t\tif (idx < VRB1_NUM_ACCS) {\n+\t\t\t\t\tacc = qman_func_id[idx];\n+\t\t\t\t\tupdateQtop(acc, qg, acc_conf, d);\n+\t\t\t\t}\n+\t\t\t}\n+\t\t}\n+\n+\t\t/* Check the depth of the AQs. */\n+\t\treg_len0 = acc_reg_read(d, d->reg_addr->depth_log0_offset);\n+\t\treg_len1 = acc_reg_read(d, d->reg_addr->depth_log1_offset);\n+\t\tfor (acc = 0; acc < NUM_ACC; acc++) {\n+\t\t\tqtopFromAcc(&q_top, acc, acc_conf);\n+\t\t\tif (q_top->first_qgroup_index < ACC_NUM_QGRPS_PER_WORD)\n+\t\t\t\tq_top->aq_depth_log2 =\n+\t\t\t\t\t\t(reg_len0 >> (q_top->first_qgroup_index * 4)) & 0xF;\n \t\t\telse\n-\t\t\t\tidx = (reg1 >> ((qg - ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7;\n-\t\t\tif (idx < VRB1_NUM_ACCS) {\n-\t\t\t\tacc = qman_func_id[idx];\n-\t\t\t\tupdateQtop(acc, qg, acc_conf, d);\n+\t\t\t\tq_top->aq_depth_log2 = (reg_len1 >> ((q_top->first_qgroup_index -\n+\t\t\t\t\t\tACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF;\n+\t\t}\n+\t} else {\n+\t\treg0 = acc_reg_read(d, d->reg_addr->qman_group_func);\n+\t\treg1 = acc_reg_read(d, d->reg_addr->qman_group_func + 4);\n+\t\treg2 = acc_reg_read(d, d->reg_addr->qman_group_func + 8);\n+\t\treg3 = acc_reg_read(d, d->reg_addr->qman_group_func + 12);\n+\t\t/* printf(\"Debug Function %08x %08x %08x %08x\\n\", reg0, reg1, reg2, reg3);*/\n+\t\tfor (qg = 0; qg < VRB2_NUM_QGRPS; qg++) {\n+\t\t\treg_aq = acc_reg_read(d, vrb2_queue_offset(d->pf_device, 0, qg, 0));\n+\t\t\tif (reg_aq & ACC_QUEUE_ENABLE) {\n+\t\t\t\t/* printf(\"Qg enabled %d %x\\n\", qg, reg_aq);*/\n+\t\t\t\tif (qg / ACC_NUM_QGRPS_PER_WORD == 0)\n+\t\t\t\t\tidx = (reg0 >> ((qg % ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7;\n+\t\t\t\telse if (qg / ACC_NUM_QGRPS_PER_WORD == 1)\n+\t\t\t\t\tidx = (reg1 >> ((qg % ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7;\n+\t\t\t\telse if (qg / ACC_NUM_QGRPS_PER_WORD == 2)\n+\t\t\t\t\tidx = (reg2 >> ((qg % ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7;\n+\t\t\t\telse\n+\t\t\t\t\tidx = (reg3 >> ((qg % ACC_NUM_QGRPS_PER_WORD) * 4)) & 0x7;\n+\t\t\t\tif (idx < VRB_NUM_ACCS) {\n+\t\t\t\t\tacc = qman_func_id[idx];\n+\t\t\t\t\tupdateQtop(acc, qg, acc_conf, d);\n+\t\t\t\t}\n \t\t\t}\n \t\t}\n-\t}\n \n-\t/* Check the depth of the AQs. */\n-\treg_len0 = acc_reg_read(d, d->reg_addr->depth_log0_offset);\n-\treg_len1 = acc_reg_read(d, d->reg_addr->depth_log1_offset);\n-\tfor (acc = 0; acc < NUM_ACC; acc++) {\n-\t\tqtopFromAcc(&q_top, acc, acc_conf);\n-\t\tif (q_top->first_qgroup_index < ACC_NUM_QGRPS_PER_WORD)\n-\t\t\tq_top->aq_depth_log2 = (reg_len0 >> (q_top->first_qgroup_index * 4)) & 0xF;\n-\t\telse\n-\t\t\tq_top->aq_depth_log2 = (reg_len1 >> ((q_top->first_qgroup_index -\n-\t\t\t\t\tACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF;\n+\t\t/* Check the depth of the AQs. */\n+\t\treg_len0 = acc_reg_read(d, d->reg_addr->depth_log0_offset);\n+\t\treg_len1 = acc_reg_read(d, d->reg_addr->depth_log0_offset + 4);\n+\t\treg_len2 = acc_reg_read(d, d->reg_addr->depth_log0_offset + 8);\n+\t\treg_len3 = acc_reg_read(d, d->reg_addr->depth_log0_offset + 12);\n+\n+\t\tfor (acc = 0; acc < NUM_ACC; acc++) {\n+\t\t\tqtopFromAcc(&q_top, acc, acc_conf);\n+\t\t\tif (q_top->first_qgroup_index / ACC_NUM_QGRPS_PER_WORD == 0)\n+\t\t\t\tq_top->aq_depth_log2 = (reg_len0 >> ((q_top->first_qgroup_index %\n+\t\t\t\t\t\tACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF;\n+\t\t\telse if (q_top->first_qgroup_index / ACC_NUM_QGRPS_PER_WORD == 1)\n+\t\t\t\tq_top->aq_depth_log2 = (reg_len1 >> ((q_top->first_qgroup_index %\n+\t\t\t\t\t\tACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF;\n+\t\t\telse if (q_top->first_qgroup_index / ACC_NUM_QGRPS_PER_WORD == 2)\n+\t\t\t\tq_top->aq_depth_log2 = (reg_len2 >> ((q_top->first_qgroup_index %\n+\t\t\t\t\t\tACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF;\n+\t\t\telse\n+\t\t\t\tq_top->aq_depth_log2 = (reg_len3 >> ((q_top->first_qgroup_index %\n+\t\t\t\t\t\tACC_NUM_QGRPS_PER_WORD) * 4)) & 0xF;\n+\t\t}\n \t}\n \n \t/* Read PF mode. */\n@@ -341,18 +399,29 @@ vrb_check_ir(struct acc_device *acc_dev)\n \tring_data = acc_dev->info_ring + (acc_dev->info_ring_head & ACC_INFO_RING_MASK);\n \n \twhile (ring_data->valid) {\n-\t\tif ((ring_data->int_nb < ACC_PF_INT_DMA_DL_DESC_IRQ) || (\n-\t\t\t\tring_data->int_nb > ACC_PF_INT_DMA_MLD_DESC_IRQ)) {\n-\t\t\trte_bbdev_log(WARNING, \"InfoRing: ITR:%d Info:0x%x\",\n-\t\t\t\t\tring_data->int_nb, ring_data->detailed_info);\n-\t\t\t/* Initialize Info Ring entry and move forward. */\n-\t\t\tring_data->val = 0;\n+\t\tif (acc_dev->device_variant == VRB1_VARIANT) {\n+\t\t\tif ((ring_data->int_nb < ACC_PF_INT_DMA_DL_DESC_IRQ) || (\n+\t\t\t\t\tring_data->int_nb > ACC_PF_INT_DMA_MLD_DESC_IRQ)) {\n+\t\t\t\trte_bbdev_log(WARNING, \"InfoRing: ITR:%d Info:0x%x\",\n+\t\t\t\t\t\tring_data->int_nb, ring_data->detailed_info);\n+\t\t\t\t/* Initialize Info Ring entry and move forward. */\n+\t\t\t\tring_data->val = 0;\n+\t\t\t}\n+\t\t} else { /* VRB2_VARIANT */\n+\t\t\tif ((ring_data->int_nb_vrb2 < ACC_PF_INT_DMA_DL_DESC_IRQ) || (\n+\t\t\t\t\tring_data->int_nb_vrb2 > ACC_PF_INT_DMA_MLD_DESC_IRQ)) {\n+\t\t\t\trte_bbdev_log(WARNING, \"InfoRing: ITR:%d Info:0x%x\",\n+\t\t\t\t\t\tring_data->int_nb_vrb2, ring_data->val);\n+\t\t\t\t/* Initialize Info Ring entry and move forward. */\n+\t\t\t\tring_data->val = 0;\n+\t\t\t}\n \t\t}\n \t\tinfo_ring_head++;\n \t\tring_data = acc_dev->info_ring + (info_ring_head & ACC_INFO_RING_MASK);\n \t}\n }\n \n+\n /* Interrupt handler triggered by dev for handling specific interrupt. */\n static void\n vrb_dev_interrupt_handler(void *cb_arg)\n@@ -361,16 +430,22 @@ vrb_dev_interrupt_handler(void *cb_arg)\n \tstruct acc_device *acc_dev = dev->data->dev_private;\n \tvolatile union acc_info_ring_data *ring_data;\n \tstruct acc_deq_intr_details deq_intr_det;\n+\tuint16_t vf_id, aq_id, qg_id, int_nb;\n+\tbool isVrb1 = (acc_dev->device_variant == VRB1_VARIANT);\n \n \tring_data = acc_dev->info_ring + (acc_dev->info_ring_head & ACC_INFO_RING_MASK);\n \n \twhile (ring_data->valid) {\n+\t\tvf_id = isVrb1 ? ring_data->vf_id : ring_data->vf_id_vrb2;\n+\t\taq_id = isVrb1 ? ring_data->aq_id : ring_data->aq_id_vrb2;\n+\t\tqg_id = isVrb1 ? ring_data->qg_id : ring_data->qg_id_vrb2;\n+\t\tint_nb = isVrb1 ? ring_data->int_nb : ring_data->int_nb_vrb2;\n \t\tif (acc_dev->pf_device) {\n \t\t\trte_bbdev_log_debug(\n-\t\t\t\t\t\"VRB1 PF Interrupt received, Info Ring data: 0x%x -> %d\",\n-\t\t\t\t\tring_data->val, ring_data->int_nb);\n+\t\t\t\t\t\"PF Interrupt received, Info Ring data: 0x%x -> %d\",\n+\t\t\t\t\tring_data->val, int_nb);\n \n-\t\t\tswitch (ring_data->int_nb) {\n+\t\t\tswitch (int_nb) {\n \t\t\tcase ACC_PF_INT_DMA_DL_DESC_IRQ:\n \t\t\tcase ACC_PF_INT_DMA_UL_DESC_IRQ:\n \t\t\tcase ACC_PF_INT_DMA_FFT_DESC_IRQ:\n@@ -378,13 +453,11 @@ vrb_dev_interrupt_handler(void *cb_arg)\n \t\t\tcase ACC_PF_INT_DMA_DL5G_DESC_IRQ:\n \t\t\tcase ACC_PF_INT_DMA_MLD_DESC_IRQ:\n \t\t\t\tdeq_intr_det.queue_id = get_queue_id_from_ring_info(\n-\t\t\t\t\t\tdev->data, *ring_data);\n+\t\t\t\t\t\tdev->data, *ring_data, acc_dev->device_variant);\n \t\t\t\tif (deq_intr_det.queue_id == UINT16_MAX) {\n \t\t\t\t\trte_bbdev_log(ERR,\n \t\t\t\t\t\t\t\"Couldn't find queue: aq_id: %u, qg_id: %u, vf_id: %u\",\n-\t\t\t\t\t\t\tring_data->aq_id,\n-\t\t\t\t\t\t\tring_data->qg_id,\n-\t\t\t\t\t\t\tring_data->vf_id);\n+\t\t\t\t\t\t\taq_id, qg_id, vf_id);\n \t\t\t\t\treturn;\n \t\t\t\t}\n \t\t\t\trte_bbdev_pmd_callback_process(dev,\n@@ -396,9 +469,9 @@ vrb_dev_interrupt_handler(void *cb_arg)\n \t\t\t}\n \t\t} else {\n \t\t\trte_bbdev_log_debug(\n-\t\t\t\t\t\"VRB1 VF Interrupt received, Info Ring data: 0x%x\\n\",\n+\t\t\t\t\t\"VRB VF Interrupt received, Info Ring data: 0x%x\\n\",\n \t\t\t\t\tring_data->val);\n-\t\t\tswitch (ring_data->int_nb) {\n+\t\t\tswitch (int_nb) {\n \t\t\tcase ACC_VF_INT_DMA_DL_DESC_IRQ:\n \t\t\tcase ACC_VF_INT_DMA_UL_DESC_IRQ:\n \t\t\tcase ACC_VF_INT_DMA_FFT_DESC_IRQ:\n@@ -406,14 +479,16 @@ vrb_dev_interrupt_handler(void *cb_arg)\n \t\t\tcase ACC_VF_INT_DMA_DL5G_DESC_IRQ:\n \t\t\tcase ACC_VF_INT_DMA_MLD_DESC_IRQ:\n \t\t\t\t/* VFs are not aware of their vf_id - it's set to 0.  */\n-\t\t\t\tring_data->vf_id = 0;\n+\t\t\t\tif (acc_dev->device_variant == VRB1_VARIANT)\n+\t\t\t\t\tring_data->vf_id = 0;\n+\t\t\t\telse\n+\t\t\t\t\tring_data->vf_id_vrb2 = 0;\n \t\t\t\tdeq_intr_det.queue_id = get_queue_id_from_ring_info(\n-\t\t\t\t\t\tdev->data, *ring_data);\n+\t\t\t\t\t\tdev->data, *ring_data, acc_dev->device_variant);\n \t\t\t\tif (deq_intr_det.queue_id == UINT16_MAX) {\n \t\t\t\t\trte_bbdev_log(ERR,\n \t\t\t\t\t\t\t\"Couldn't find queue: aq_id: %u, qg_id: %u\",\n-\t\t\t\t\t\t\tring_data->aq_id,\n-\t\t\t\t\t\t\tring_data->qg_id);\n+\t\t\t\t\t\t\taq_id, qg_id);\n \t\t\t\t\treturn;\n \t\t\t\t}\n \t\t\t\trte_bbdev_pmd_callback_process(dev,\n@@ -428,8 +503,7 @@ vrb_dev_interrupt_handler(void *cb_arg)\n \t\t/* Initialize Info Ring entry and move forward. */\n \t\tring_data->val = 0;\n \t\t++acc_dev->info_ring_head;\n-\t\tring_data = acc_dev->info_ring +\n-\t\t\t\t(acc_dev->info_ring_head & ACC_INFO_RING_MASK);\n+\t\tring_data = acc_dev->info_ring + (acc_dev->info_ring_head & ACC_INFO_RING_MASK);\n \t}\n }\n \n@@ -461,7 +535,10 @@ allocate_info_ring(struct rte_bbdev *dev)\n \tphys_low  = (uint32_t)(info_ring_iova);\n \tacc_reg_write(d, d->reg_addr->info_ring_hi, phys_high);\n \tacc_reg_write(d, d->reg_addr->info_ring_lo, phys_low);\n-\tacc_reg_write(d, d->reg_addr->info_ring_en, VRB1_REG_IRQ_EN_ALL);\n+\tif (d->device_variant == VRB1_VARIANT)\n+\t\tacc_reg_write(d, d->reg_addr->info_ring_en, VRB1_REG_IRQ_EN_ALL);\n+\telse\n+\t\tacc_reg_write(d, d->reg_addr->info_ring_en, VRB2_REG_IRQ_EN_ALL);\n \td->info_ring_head = (acc_reg_read(d, d->reg_addr->info_ring_ptr) &\n \t\t\t0xFFF) / sizeof(union acc_info_ring_data);\n \treturn 0;\n@@ -516,6 +593,7 @@ vrb_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n \tphys_high = (uint32_t)(d->sw_rings_iova >> 32);\n \tphys_low  = (uint32_t)(d->sw_rings_iova & ~(ACC_SIZE_64MBYTE-1));\n \n+\n \t/* Read the populated cfg from device registers. */\n \tfetch_acc_config(dev);\n \n@@ -540,6 +618,10 @@ vrb_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n \tacc_reg_write(d, d->reg_addr->dma_ring_dl4g_lo, phys_low);\n \tacc_reg_write(d, d->reg_addr->dma_ring_fft_hi, phys_high);\n \tacc_reg_write(d, d->reg_addr->dma_ring_fft_lo, phys_low);\n+\tif (d->device_variant == VRB2_VARIANT) {\n+\t\tacc_reg_write(d, d->reg_addr->dma_ring_mld_hi, phys_high);\n+\t\tacc_reg_write(d, d->reg_addr->dma_ring_mld_lo, phys_low);\n+\t}\n \t/*\n \t * Configure Ring Size to the max queue ring size\n \t * (used for wrapping purpose).\n@@ -549,8 +631,7 @@ vrb_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n \n \t/* Configure tail pointer for use when SDONE enabled. */\n \tif (d->tail_ptrs == NULL)\n-\t\td->tail_ptrs = rte_zmalloc_socket(\n-\t\t\t\tdev->device->driver->name,\n+\t\td->tail_ptrs = rte_zmalloc_socket(dev->device->driver->name,\n \t\t\t\tVRB_MAX_QGRPS * VRB_MAX_AQS * sizeof(uint32_t),\n \t\t\t\tRTE_CACHE_LINE_SIZE, socket_id);\n \tif (d->tail_ptrs == NULL) {\n@@ -574,6 +655,10 @@ vrb_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)\n \tacc_reg_write(d, d->reg_addr->tail_ptrs_dl4g_lo, phys_low);\n \tacc_reg_write(d, d->reg_addr->tail_ptrs_fft_hi, phys_high);\n \tacc_reg_write(d, d->reg_addr->tail_ptrs_fft_lo, phys_low);\n+\tif (d->device_variant == VRB2_VARIANT) {\n+\t\tacc_reg_write(d, d->reg_addr->tail_ptrs_mld_hi, phys_high);\n+\t\tacc_reg_write(d, d->reg_addr->tail_ptrs_mld_lo, phys_low);\n+\t}\n \n \tret = allocate_info_ring(dev);\n \tif (ret < 0) {\n@@ -671,10 +756,17 @@ vrb_intr_enable(struct rte_bbdev *dev)\n \t\t\treturn ret;\n \t\t}\n \n-\t\tif (acc_dev->pf_device)\n-\t\t\tmax_queues = VRB1_MAX_PF_MSIX;\n-\t\telse\n-\t\t\tmax_queues = VRB1_MAX_VF_MSIX;\n+\t\tif (d->device_variant == VRB1_VARIANT) {\n+\t\t\tif (acc_dev->pf_device)\n+\t\t\t\tmax_queues = VRB1_MAX_PF_MSIX;\n+\t\t\telse\n+\t\t\t\tmax_queues = VRB1_MAX_VF_MSIX;\n+\t\t} else {\n+\t\t\tif (acc_dev->pf_device)\n+\t\t\t\tmax_queues = VRB2_MAX_PF_MSIX;\n+\t\t\telse\n+\t\t\t\tmax_queues = VRB2_MAX_VF_MSIX;\n+\t\t}\n \n \t\tif (rte_intr_efd_enable(dev->intr_handle, max_queues)) {\n \t\t\trte_bbdev_log(ERR, \"Failed to create fds for %u queues\",\n@@ -776,7 +868,10 @@ vrb_find_free_queue_idx(struct rte_bbdev *dev,\n \t\t\t/* Mark the Queue as assigned. */\n \t\t\td->q_assigned_bit_map[group_idx] |= (1ULL << aq_idx);\n \t\t\t/* Report the AQ Index. */\n-\t\t\treturn (group_idx << VRB1_GRP_ID_SHIFT) + aq_idx;\n+\t\t\tif (d->device_variant == VRB1_VARIANT)\n+\t\t\t\treturn (group_idx << VRB1_GRP_ID_SHIFT) + aq_idx;\n+\t\t\telse\n+\t\t\t\treturn (group_idx << VRB2_GRP_ID_SHIFT) + aq_idx;\n \t\t}\n \t}\n \trte_bbdev_log(INFO, \"Failed to find free queue on %s, priority %u\",\n@@ -819,6 +914,9 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n \t\t\tACC_FCW_LD_BLEN : (conf->op_type == RTE_BBDEV_OP_FFT ?\n \t\t\tACC_FCW_FFT_BLEN : ACC_FCW_MLDTS_BLEN))));\n \n+\tif ((q->d->device_variant == VRB2_VARIANT) && (conf->op_type == RTE_BBDEV_OP_FFT))\n+\t\tfcw_len = ACC_FCW_FFT_BLEN_3;\n+\n \tfor (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {\n \t\tdesc = q->ring_addr + desc_idx;\n \t\tdesc->req.word0 = ACC_DMA_DESC_TYPE;\n@@ -915,9 +1013,16 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,\n \t\t}\n \t}\n \n-\tq->qgrp_id = (q_idx >> VRB1_GRP_ID_SHIFT) & 0xF;\n-\tq->vf_id = (q_idx >> VRB1_VF_ID_SHIFT)  & 0x3F;\n-\tq->aq_id = q_idx & 0xF;\n+\tif (d->device_variant == VRB1_VARIANT) {\n+\t\tq->qgrp_id = (q_idx >> VRB1_GRP_ID_SHIFT) & 0xF;\n+\t\tq->vf_id = (q_idx >> VRB1_VF_ID_SHIFT)  & 0x3F;\n+\t\tq->aq_id = q_idx & 0xF;\n+\t} else {\n+\t\tq->qgrp_id = (q_idx >> VRB2_GRP_ID_SHIFT) & 0x1F;\n+\t\tq->vf_id = (q_idx >> VRB2_VF_ID_SHIFT)  & 0x3F;\n+\t\tq->aq_id = q_idx & 0x3F;\n+\t}\n+\n \tq->aq_depth = 0;\n \tif (conf->op_type ==  RTE_BBDEV_OP_TURBO_DEC)\n \t\tq->aq_depth = (1 << d->acc_conf.q_ul_4g.aq_depth_log2);\n@@ -1149,6 +1254,127 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)\n \t\tRTE_BBDEV_END_OF_CAPABILITIES_LIST()\n \t};\n \n+\tstatic const struct rte_bbdev_op_cap vrb2_bbdev_capabilities[] = {\n+\t\t{\n+\t\t\t.type = RTE_BBDEV_OP_TURBO_DEC,\n+\t\t\t.cap.turbo_dec = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |\n+\t\t\t\t\tRTE_BBDEV_TURBO_CRC_TYPE_24B |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_CRC_24B_DROP |\n+\t\t\t\t\tRTE_BBDEV_TURBO_EQUALIZER |\n+\t\t\t\t\tRTE_BBDEV_TURBO_SOFT_OUT_SATURATE |\n+\t\t\t\t\tRTE_BBDEV_TURBO_HALF_ITERATION_EVEN |\n+\t\t\t\t\tRTE_BBDEV_TURBO_CONTINUE_CRC_MATCH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_SOFT_OUTPUT |\n+\t\t\t\t\tRTE_BBDEV_TURBO_EARLY_TERMINATION |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_INTERRUPTS |\n+\t\t\t\t\tRTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |\n+\t\t\t\t\tRTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT |\n+\t\t\t\t\tRTE_BBDEV_TURBO_MAP_DEC |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |\n+\t\t\t\t\tRTE_BBDEV_TURBO_DEC_SCATTER_GATHER,\n+\t\t\t\t.max_llr_modulus = INT8_MAX,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_hard_out =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_soft_out =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type = RTE_BBDEV_OP_TURBO_ENC,\n+\t\t\t.cap.turbo_enc = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_TURBO_CRC_24B_ATTACH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_RV_INDEX_BYPASS |\n+\t\t\t\t\tRTE_BBDEV_TURBO_RATE_MATCH |\n+\t\t\t\t\tRTE_BBDEV_TURBO_ENC_INTERRUPTS |\n+\t\t\t\t\tRTE_BBDEV_TURBO_ENC_SCATTER_GATHER,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_dst =\n+\t\t\t\t\t\tRTE_BBDEV_TURBO_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type   = RTE_BBDEV_OP_LDPC_ENC,\n+\t\t\t.cap.ldpc_enc = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\tRTE_BBDEV_LDPC_RATE_MATCH |\n+\t\t\t\t\tRTE_BBDEV_LDPC_CRC_24B_ATTACH |\n+\t\t\t\t\tRTE_BBDEV_LDPC_INTERLEAVER_BYPASS |\n+\t\t\t\t\tRTE_BBDEV_LDPC_ENC_INTERRUPTS |\n+\t\t\t\t\tRTE_BBDEV_LDPC_ENC_SCATTER_GATHER |\n+\t\t\t\t\tRTE_BBDEV_LDPC_ENC_CONCATENATION,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_dst =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type   = RTE_BBDEV_OP_LDPC_DEC,\n+\t\t\t.cap.ldpc_dec = {\n+\t\t\t.capability_flags =\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK |\n+\t\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_16_CHECK |\n+\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS |\n+\t\t\t\tRTE_BBDEV_LDPC_DEC_SCATTER_GATHER |\n+\t\t\t\tRTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION |\n+\t\t\t\tRTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION |\n+\t\t\t\tRTE_BBDEV_LDPC_LLR_COMPRESSION |\n+\t\t\t\tRTE_BBDEV_LDPC_SOFT_OUT_ENABLE |\n+\t\t\t\tRTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS |\n+\t\t\t\tRTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS |\n+\t\t\t\tRTE_BBDEV_LDPC_DEC_INTERRUPTS,\n+\t\t\t.llr_size = 8,\n+\t\t\t.llr_decimals = 2,\n+\t\t\t.num_buffers_src =\n+\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t.num_buffers_hard_out =\n+\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t.num_buffers_soft_out = 0,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type\t= RTE_BBDEV_OP_FFT,\n+\t\t\t.cap.fft = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\t\tRTE_BBDEV_FFT_WINDOWING |\n+\t\t\t\t\t\tRTE_BBDEV_FFT_CS_ADJUSTMENT |\n+\t\t\t\t\t\tRTE_BBDEV_FFT_DFT_BYPASS |\n+\t\t\t\t\t\tRTE_BBDEV_FFT_IDFT_BYPASS |\n+\t\t\t\t\t\tRTE_BBDEV_FFT_FP16_INPUT |\n+\t\t\t\t\t\tRTE_BBDEV_FFT_FP16_OUTPUT |\n+\t\t\t\t\t\tRTE_BBDEV_FFT_POWER_MEAS |\n+\t\t\t\t\t\tRTE_BBDEV_FFT_WINDOWING_BYPASS,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_dst =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\t{\n+\t\t\t.type\t= RTE_BBDEV_OP_MLDTS,\n+\t\t\t.cap.fft = {\n+\t\t\t\t.capability_flags =\n+\t\t\t\t\t\tRTE_BBDEV_MLDTS_REP,\n+\t\t\t\t.num_buffers_src =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t\t.num_buffers_dst =\n+\t\t\t\t\t\tRTE_BBDEV_LDPC_MAX_CODE_BLOCKS,\n+\t\t\t}\n+\t\t},\n+\t\tRTE_BBDEV_END_OF_CAPABILITIES_LIST()\n+\t};\n+\n \tstatic struct rte_bbdev_queue_conf default_queue_conf;\n \tdefault_queue_conf.socket = dev->data->socket_id;\n \tdefault_queue_conf.queue_size = ACC_MAX_QUEUE_DEPTH;\n@@ -1193,7 +1419,10 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)\n \tdev_info->default_queue_conf = default_queue_conf;\n \tdev_info->cpu_flag_reqs = NULL;\n \tdev_info->min_alignment = 1;\n-\tdev_info->capabilities = vrb1_bbdev_capabilities;\n+\tif (d->device_variant == VRB1_VARIANT)\n+\t\tdev_info->capabilities = vrb1_bbdev_capabilities;\n+\telse\n+\t\tdev_info->capabilities = vrb2_bbdev_capabilities;\n \tdev_info->harq_buffer_size = 0;\n \n \tvrb_check_ir(d);\n@@ -1242,6 +1471,9 @@ static struct rte_pci_id pci_id_vrb_pf_map[] = {\n \t{\n \t\tRTE_PCI_DEVICE(RTE_VRB1_VENDOR_ID, RTE_VRB1_PF_DEVICE_ID)\n \t},\n+\t{\n+\t\tRTE_PCI_DEVICE(RTE_VRB2_VENDOR_ID, RTE_VRB2_PF_DEVICE_ID)\n+\t},\n \t{.device_id = 0},\n };\n \n@@ -1250,6 +1482,9 @@ static struct rte_pci_id pci_id_vrb_vf_map[] = {\n \t{\n \t\tRTE_PCI_DEVICE(RTE_VRB1_VENDOR_ID, RTE_VRB1_VF_DEVICE_ID)\n \t},\n+\t{\n+\t\tRTE_PCI_DEVICE(RTE_VRB2_VENDOR_ID, RTE_VRB2_VF_DEVICE_ID)\n+\t},\n \t{.device_id = 0},\n };\n \n@@ -1286,6 +1521,7 @@ vrb_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc_fcw_td *fcw)\n \t\t\t\tfcw->ea = op->turbo_dec.cb_params.e;\n \t\t\t\tfcw->eb = op->turbo_dec.cb_params.e;\n \t\t\t}\n+\n \t\t\tif (op->turbo_dec.rv_index == 0)\n \t\t\t\tfcw->k0_start_col = ACC_FCW_TD_RVIDX_0;\n \t\t\telse if (op->turbo_dec.rv_index == 1)\n@@ -1304,7 +1540,7 @@ vrb_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc_fcw_td *fcw)\n \t\tfcw->bypass_teq = 0;\n \t}\n \n-\tfcw->code_block_mode = 1; /* FIXME */\n+\tfcw->code_block_mode = 1;\n \tfcw->turbo_crc_type = check_bit(op->turbo_dec.op_flags,\n \t\t\tRTE_BBDEV_TURBO_CRC_TYPE_24B);\n \n@@ -1464,8 +1700,8 @@ vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op,\n \tif (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {\n \t\tk = op->turbo_dec.tb_params.k_pos;\n \t\te = (r < op->turbo_dec.tb_params.cab)\n-\t\t\t? op->turbo_dec.tb_params.ea\n-\t\t\t: op->turbo_dec.tb_params.eb;\n+\t\t\t\t? op->turbo_dec.tb_params.ea\n+\t\t\t\t: op->turbo_dec.tb_params.eb;\n \t} else {\n \t\tk = op->turbo_dec.cb_params.k;\n \t\te = op->turbo_dec.cb_params.e;\n@@ -1676,61 +1912,329 @@ vrb1_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n \treturn 0;\n }\n \n+/* Fill in a frame control word for LDPC decoding. */\n static inline void\n-vrb_dma_desc_ld_update(struct rte_bbdev_dec_op *op,\n-\t\tstruct acc_dma_req_desc *desc,\n-\t\tstruct rte_mbuf *input, struct rte_mbuf *h_output,\n-\t\tuint32_t *in_offset, uint32_t *h_out_offset,\n-\t\tuint32_t *h_out_length,\n+vrb2_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw,\n \t\tunion acc_harq_layout_data *harq_layout)\n {\n-\tint next_triplet = 1; /* FCW already done. */\n-\tdesc->data_ptrs[next_triplet].address = rte_pktmbuf_iova_offset(input, *in_offset);\n-\tnext_triplet++;\n+\tuint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset;\n+\tuint32_t harq_index;\n+\tuint32_t l;\n \n-\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {\n-\t\tstruct rte_bbdev_op_data hi = op->ldpc_dec.harq_combined_input;\n-\t\tdesc->data_ptrs[next_triplet].address =\n-\t\t\t\trte_pktmbuf_iova_offset(hi.data, hi.offset);\n-\t\tnext_triplet++;\n-\t}\n+\tfcw->qm = op->ldpc_dec.q_m;\n+\tfcw->nfiller = op->ldpc_dec.n_filler;\n+\tfcw->BG = (op->ldpc_dec.basegraph - 1);\n+\tfcw->Zc = op->ldpc_dec.z_c;\n+\tfcw->ncb = op->ldpc_dec.n_cb;\n+\tfcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_dec.basegraph,\n+\t\t\top->ldpc_dec.rv_index);\n+\tif (op->ldpc_dec.code_block_mode == RTE_BBDEV_CODE_BLOCK)\n+\t\tfcw->rm_e = op->ldpc_dec.cb_params.e;\n+\telse\n+\t\tfcw->rm_e = (op->ldpc_dec.tb_params.r <\n+\t\t\t\top->ldpc_dec.tb_params.cab) ?\n+\t\t\t\t\t\top->ldpc_dec.tb_params.ea :\n+\t\t\t\t\t\top->ldpc_dec.tb_params.eb;\n \n-\tdesc->data_ptrs[next_triplet].address =\n-\t\t\trte_pktmbuf_iova_offset(h_output, *h_out_offset);\n-\t*h_out_length = desc->data_ptrs[next_triplet].blen;\n-\tnext_triplet++;\n+\tif (unlikely(check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE) &&\n+\t\t\t(op->ldpc_dec.harq_combined_input.length == 0))) {\n+\t\trte_bbdev_log(WARNING, \"Null HARQ input size provided\");\n+\t\t/* Disable HARQ input in that case to carry forward. */\n+\t\top->ldpc_dec.op_flags ^= RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE;\n+\t}\n+\tif (unlikely(fcw->rm_e == 0)) {\n+\t\trte_bbdev_log(WARNING, \"Null E input provided\");\n+\t\tfcw->rm_e = 2;\n+\t}\n \n-\tif (check_bit(op->ldpc_dec.op_flags,\n-\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {\n-\t\t/* Adjust based on previous operation. */\n-\t\tstruct rte_bbdev_dec_op *prev_op = desc->op_addr;\n-\t\top->ldpc_dec.harq_combined_output.length =\n-\t\t\t\tprev_op->ldpc_dec.harq_combined_output.length;\n-\t\tuint32_t harq_idx = hq_index(op->ldpc_dec.harq_combined_output.offset);\n-\t\tuint32_t prev_harq_idx = hq_index(prev_op->ldpc_dec.harq_combined_output.offset);\n-\t\tharq_layout[harq_idx].val = harq_layout[prev_harq_idx].val;\n-\t\tstruct rte_bbdev_op_data ho = op->ldpc_dec.harq_combined_output;\n-\t\tdesc->data_ptrs[next_triplet].address =\n-\t\t\t\trte_pktmbuf_iova_offset(ho.data, ho.offset);\n-\t\tnext_triplet++;\n+\tfcw->hcin_en = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);\n+\tfcw->hcout_en = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);\n+\tfcw->crc_select = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);\n+\tfcw->so_en = check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_SOFT_OUT_ENABLE);\n+\tfcw->so_bypass_intlv = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS);\n+\tfcw->so_bypass_rm = check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS);\n+\tfcw->bypass_dec = 0;\n+\tfcw->bypass_intlv = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_DEINTERLEAVER_BYPASS);\n+\tif (op->ldpc_dec.q_m == 1) {\n+\t\tfcw->bypass_intlv = 1;\n+\t\tfcw->qm = 2;\n+\t}\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION)) {\n+\t\tfcw->hcin_decomp_mode = 1;\n+\t\tfcw->hcout_comp_mode = 1;\n+\t} else if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION)) {\n+\t\tfcw->hcin_decomp_mode = 4;\n+\t\tfcw->hcout_comp_mode = 4;\n+\t} else {\n+\t\tfcw->hcin_decomp_mode = 0;\n+\t\tfcw->hcout_comp_mode = 0;\n \t}\n \n-\top->ldpc_dec.hard_output.length += *h_out_length;\n-\tdesc->op_addr = op;\n-}\n+\tfcw->llr_pack_mode = check_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_LLR_COMPRESSION);\n+\tharq_index = hq_index(op->ldpc_dec.harq_combined_output.offset);\n+\tif (fcw->hcin_en > 0) {\n+\t\tharq_in_length = op->ldpc_dec.harq_combined_input.length;\n+\t\tif (fcw->hcin_decomp_mode == 1)\n+\t\t\tharq_in_length = harq_in_length * 8 / 6;\n+\t\telse if (fcw->hcin_decomp_mode == 4)\n+\t\t\tharq_in_length = harq_in_length * 2;\n+\t\tharq_in_length = RTE_MIN(harq_in_length, op->ldpc_dec.n_cb\n+\t\t\t\t- op->ldpc_dec.n_filler);\n+\t\tharq_in_length = RTE_ALIGN_CEIL(harq_in_length, 64);\n+\t\tfcw->hcin_size0 = harq_in_length;\n+\t\tfcw->hcin_offset = 0;\n+\t\tfcw->hcin_size1 = 0;\n+\t} else {\n+\t\tfcw->hcin_size0 = 0;\n+\t\tfcw->hcin_offset = 0;\n+\t\tfcw->hcin_size1 = 0;\n+\t}\n \n-/* Enqueue one encode operations for device in CB mode */\n-static inline int\n-enqueue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n-\t\tuint16_t total_enqueued_cbs)\n-{\n-\tunion acc_dma_desc *desc = NULL;\n-\tint ret;\n-\tuint32_t in_offset, out_offset, out_length, mbuf_total_left, seg_total_left;\n-\tstruct rte_mbuf *input, *output_head, *output;\n+\tfcw->itmax = op->ldpc_dec.iter_max;\n+\tfcw->so_it = op->ldpc_dec.iter_max;\n+\tfcw->itstop = check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);\n+\tfcw->cnu_algo = ACC_ALGO_MSA;\n+\tfcw->synd_precoder = fcw->itstop;\n \n-\tdesc = acc_desc(q, total_enqueued_cbs);\n-\tacc_fcw_te_fill(op, &desc->req.fcw_te);\n+\tfcw->minsum_offset = 1;\n+\tfcw->dec_llrclip   = 2;\n+\n+\t/*\n+\t * These are all implicitly set\n+\t * fcw->synd_post = 0;\n+\t * fcw->dec_convllr = 0;\n+\t * fcw->hcout_convllr = 0;\n+\t * fcw->hcout_size1 = 0;\n+\t * fcw->hcout_offset = 0;\n+\t * fcw->negstop_th = 0;\n+\t * fcw->negstop_it = 0;\n+\t * fcw->negstop_en = 0;\n+\t * fcw->gain_i = 1;\n+\t * fcw->gain_h = 1;\n+\t */\n+\tif (fcw->hcout_en > 0) {\n+\t\tparity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8)\n+\t\t\t* op->ldpc_dec.z_c - op->ldpc_dec.n_filler;\n+\t\tk0_p = (fcw->k0 > parity_offset) ?\n+\t\t\t\tfcw->k0 - op->ldpc_dec.n_filler : fcw->k0;\n+\t\tncb_p = fcw->ncb - op->ldpc_dec.n_filler;\n+\t\tl = k0_p + fcw->rm_e;\n+\t\tharq_out_length = (uint16_t) fcw->hcin_size0;\n+\t\tharq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p);\n+\t\tharq_out_length = RTE_ALIGN_CEIL(harq_out_length, 64);\n+\t\tfcw->hcout_size0 = harq_out_length;\n+\t\tfcw->hcout_size1 = 0;\n+\t\tfcw->hcout_offset = 0;\n+\t\tharq_layout[harq_index].offset = fcw->hcout_offset;\n+\t\tharq_layout[harq_index].size0 = fcw->hcout_size0;\n+\t} else {\n+\t\tfcw->hcout_size0 = 0;\n+\t\tfcw->hcout_size1 = 0;\n+\t\tfcw->hcout_offset = 0;\n+\t}\n+\n+\tfcw->tb_crc_select = 0;\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK))\n+\t\tfcw->tb_crc_select = 2;\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_16_CHECK))\n+\t\tfcw->tb_crc_select = 1;\n+}\n+\n+static inline void\n+vrb_dma_desc_ld_update(struct rte_bbdev_dec_op *op,\n+\t\tstruct acc_dma_req_desc *desc,\n+\t\tstruct rte_mbuf *input, struct rte_mbuf *h_output,\n+\t\tuint32_t *in_offset, uint32_t *h_out_offset,\n+\t\tuint32_t *h_out_length,\n+\t\tunion acc_harq_layout_data *harq_layout)\n+{\n+\tint next_triplet = 1; /* FCW already done. */\n+\tdesc->data_ptrs[next_triplet].address = rte_pktmbuf_iova_offset(input, *in_offset);\n+\tnext_triplet++;\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {\n+\t\tstruct rte_bbdev_op_data hi = op->ldpc_dec.harq_combined_input;\n+\t\tdesc->data_ptrs[next_triplet].address =\n+\t\t\t\trte_pktmbuf_iova_offset(hi.data, hi.offset);\n+\t\tnext_triplet++;\n+\t}\n+\n+\tdesc->data_ptrs[next_triplet].address =\n+\t\t\trte_pktmbuf_iova_offset(h_output, *h_out_offset);\n+\t*h_out_length = desc->data_ptrs[next_triplet].blen;\n+\tnext_triplet++;\n+\n+\tif (check_bit(op->ldpc_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {\n+\t\t/* Adjust based on previous operation. */\n+\t\tstruct rte_bbdev_dec_op *prev_op = desc->op_addr;\n+\t\top->ldpc_dec.harq_combined_output.length =\n+\t\t\t\tprev_op->ldpc_dec.harq_combined_output.length;\n+\t\tuint32_t harq_idx = hq_index(op->ldpc_dec.harq_combined_output.offset);\n+\t\tuint32_t prev_harq_idx = hq_index(prev_op->ldpc_dec.harq_combined_output.offset);\n+\t\tharq_layout[harq_idx].val = harq_layout[prev_harq_idx].val;\n+\t\tstruct rte_bbdev_op_data ho = op->ldpc_dec.harq_combined_output;\n+\t\tdesc->data_ptrs[next_triplet].address =\n+\t\t\t\trte_pktmbuf_iova_offset(ho.data, ho.offset);\n+\t\tnext_triplet++;\n+\t}\n+\n+\top->ldpc_dec.hard_output.length += *h_out_length;\n+\tdesc->op_addr = op;\n+}\n+\n+static inline int\n+vrb2_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,\n+\t\tstruct acc_dma_req_desc *desc,\n+\t\tstruct rte_mbuf **input, struct rte_mbuf *h_output,\n+\t\tuint32_t *in_offset, uint32_t *h_out_offset,\n+\t\tuint32_t *h_out_length, uint32_t *mbuf_total_left,\n+\t\tuint32_t *seg_total_left, struct acc_fcw_ld *fcw)\n+{\n+\tstruct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;\n+\tint next_triplet = 1; /* FCW already done. */\n+\tuint32_t input_length;\n+\tuint16_t output_length, crc24_overlap = 0;\n+\tuint16_t sys_cols, K, h_p_size, h_np_size;\n+\n+\tacc_header_init(desc);\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))\n+\t\tcrc24_overlap = 24;\n+\n+\t/* Compute some LDPC BG lengths. */\n+\tinput_length = fcw->rm_e;\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_LLR_COMPRESSION))\n+\t\tinput_length = (input_length * 3 + 3) / 4;\n+\tsys_cols = (dec->basegraph == 1) ? 22 : 10;\n+\tK = sys_cols * dec->z_c;\n+\toutput_length = K - dec->n_filler - crc24_overlap;\n+\n+\tif (unlikely((*mbuf_total_left == 0) || (*mbuf_total_left < input_length))) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u\",\n+\t\t\t\t*mbuf_total_left, input_length);\n+\t\treturn -1;\n+\t}\n+\n+\tnext_triplet = acc_dma_fill_blk_type_in(desc, input,\n+\t\t\tin_offset, input_length,\n+\t\t\tseg_total_left, next_triplet,\n+\t\t\tcheck_bit(op->ldpc_dec.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_DEC_SCATTER_GATHER));\n+\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {\n+\t\tif (op->ldpc_dec.harq_combined_input.data == 0) {\n+\t\t\trte_bbdev_log(ERR, \"HARQ input is not defined\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\th_p_size = fcw->hcin_size0 + fcw->hcin_size1;\n+\t\tif (fcw->hcin_decomp_mode == 1)\n+\t\t\th_p_size = (h_p_size * 3 + 3) / 4;\n+\t\telse if (fcw->hcin_decomp_mode == 4)\n+\t\t\th_p_size = h_p_size / 2;\n+\t\tif (op->ldpc_dec.harq_combined_input.data == 0) {\n+\t\t\trte_bbdev_log(ERR, \"HARQ input is not defined\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tacc_dma_fill_blk_type(\n+\t\t\t\tdesc,\n+\t\t\t\top->ldpc_dec.harq_combined_input.data,\n+\t\t\t\top->ldpc_dec.harq_combined_input.offset,\n+\t\t\t\th_p_size,\n+\t\t\t\tnext_triplet,\n+\t\t\t\tACC_DMA_BLKID_IN_HARQ);\n+\t\tnext_triplet++;\n+\t}\n+\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->m2dlen = next_triplet;\n+\t*mbuf_total_left -= input_length;\n+\n+\tnext_triplet = acc_dma_fill_blk_type(desc, h_output,\n+\t\t\t*h_out_offset, output_length >> 3, next_triplet,\n+\t\t\tACC_DMA_BLKID_OUT_HARD);\n+\n+\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)) {\n+\t\tif (op->ldpc_dec.soft_output.data == 0) {\n+\t\t\trte_bbdev_log(ERR, \"Soft output is not defined\");\n+\t\t\treturn -1;\n+\t\t}\n+\t\tdec->soft_output.length = fcw->rm_e;\n+\t\tacc_dma_fill_blk_type(desc, dec->soft_output.data, dec->soft_output.offset,\n+\t\t\t\tfcw->rm_e, next_triplet, ACC_DMA_BLKID_OUT_SOFT);\n+\t\tnext_triplet++;\n+\t}\n+\n+\tif (check_bit(op->ldpc_dec.op_flags,\n+\t\t\t\tRTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {\n+\t\tif (op->ldpc_dec.harq_combined_output.data == 0) {\n+\t\t\trte_bbdev_log(ERR, \"HARQ output is not defined\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\t/* Pruned size of the HARQ */\n+\t\th_p_size = fcw->hcout_size0 + fcw->hcout_size1;\n+\t\t/* Non-Pruned size of the HARQ */\n+\t\th_np_size = fcw->hcout_offset > 0 ?\n+\t\t\t\tfcw->hcout_offset + fcw->hcout_size1 :\n+\t\t\t\th_p_size;\n+\t\tif (fcw->hcin_decomp_mode == 1) {\n+\t\t\th_np_size = (h_np_size * 3 + 3) / 4;\n+\t\t\th_p_size = (h_p_size * 3 + 3) / 4;\n+\t\t} else if (fcw->hcin_decomp_mode == 4) {\n+\t\t\th_np_size = h_np_size / 2;\n+\t\t\th_p_size = h_p_size / 2;\n+\t\t}\n+\t\tdec->harq_combined_output.length = h_np_size;\n+\t\tacc_dma_fill_blk_type(\n+\t\t\t\tdesc,\n+\t\t\t\tdec->harq_combined_output.data,\n+\t\t\t\tdec->harq_combined_output.offset,\n+\t\t\t\th_p_size,\n+\t\t\t\tnext_triplet,\n+\t\t\t\tACC_DMA_BLKID_OUT_HARQ);\n+\n+\t\tnext_triplet++;\n+\t}\n+\n+\t*h_out_length = output_length >> 3;\n+\tdec->hard_output.length += *h_out_length;\n+\t*h_out_offset += *h_out_length;\n+\tdesc->data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->d2mlen = next_triplet - desc->m2dlen;\n+\n+\tdesc->op_addr = op;\n+\n+\treturn 0;\n+}\n+\n+/* Enqueue one encode operations for device in CB mode */\n+static inline int\n+enqueue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t total_enqueued_cbs)\n+{\n+\tunion acc_dma_desc *desc = NULL;\n+\tint ret;\n+\tuint32_t in_offset, out_offset, out_length, mbuf_total_left, seg_total_left;\n+\tstruct rte_mbuf *input, *output_head, *output;\n+\n+\tdesc = acc_desc(q, total_enqueued_cbs);\n+\tacc_fcw_te_fill(op, &desc->req.fcw_te);\n \n \tinput = op->turbo_enc.input.data;\n \toutput_head = output = op->turbo_enc.output.data;\n@@ -1779,6 +2283,7 @@ enqueue_ldpc_enc_n_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ops,\n \t/** This could be done at polling. */\n \tacc_header_init(&desc->req);\n \tdesc->req.numCBs = num;\n+\tdesc->req.dltb = 0;\n \n \tin_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len;\n \tout_length = (enc->cb_params.e + 7) >> 3;\n@@ -1817,7 +2322,7 @@ enqueue_ldpc_enc_n_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ops,\n \treturn num;\n }\n \n-/* Enqueue one encode operations for device for a partial TB\n+/* Enqueue one encode operations for VRB1 device for a partial TB\n  * all codes blocks have same configuration multiplexed on the same descriptor.\n  */\n static inline void\n@@ -2004,6 +2509,105 @@ vrb1_enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op\n \treturn return_descs;\n }\n \n+/* Fill in a frame control word for LDPC encoding. */\n+static inline void\n+vrb2_fcw_letb_fill(const struct rte_bbdev_enc_op *op, struct acc_fcw_le *fcw)\n+{\n+\tfcw->qm = op->ldpc_enc.q_m;\n+\tfcw->nfiller = op->ldpc_enc.n_filler;\n+\tfcw->BG = (op->ldpc_enc.basegraph - 1);\n+\tfcw->Zc = op->ldpc_enc.z_c;\n+\tfcw->ncb = op->ldpc_enc.n_cb;\n+\tfcw->k0 = get_k0(fcw->ncb, fcw->Zc, op->ldpc_enc.basegraph,\n+\t\t\top->ldpc_enc.rv_index);\n+\tfcw->rm_e = op->ldpc_enc.tb_params.ea;\n+\tfcw->rm_e_b = op->ldpc_enc.tb_params.eb;\n+\tfcw->crc_select = check_bit(op->ldpc_enc.op_flags,\n+\t\t\tRTE_BBDEV_LDPC_CRC_24B_ATTACH);\n+\tfcw->bypass_intlv = 0;\n+\tif (op->ldpc_enc.tb_params.c > 1) {\n+\t\tfcw->mcb_count = 0;\n+\t\tfcw->C = op->ldpc_enc.tb_params.c;\n+\t\tfcw->Cab = op->ldpc_enc.tb_params.cab;\n+\t} else {\n+\t\tfcw->mcb_count = 1;\n+\t\tfcw->C = 0;\n+\t}\n+}\n+\n+/* Enqueue one encode operations for device in TB mode.\n+ * returns the number of descs used.\n+ */\n+static inline int\n+vrb2_enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,\n+\t\tuint16_t enq_descs)\n+{\n+\tunion acc_dma_desc *desc = NULL;\n+\tuint32_t in_offset, out_offset, out_length, seg_total_left;\n+\tstruct rte_mbuf *input, *output_head, *output;\n+\n+\tuint16_t desc_idx = ((q->sw_ring_head + enq_descs) & q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tvrb2_fcw_letb_fill(op, &desc->req.fcw_le);\n+\tstruct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;\n+\tint next_triplet = 1; /* FCW already done */\n+\tuint32_t in_length_in_bytes;\n+\tuint16_t K, in_length_in_bits;\n+\n+\tinput = enc->input.data;\n+\toutput_head = output = enc->output.data;\n+\tin_offset = enc->input.offset;\n+\tout_offset = enc->output.offset;\n+\tseg_total_left = rte_pktmbuf_data_len(enc->input.data) - in_offset;\n+\n+\tacc_header_init(&desc->req);\n+\tK = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;\n+\tin_length_in_bits = K - enc->n_filler;\n+\tif ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) ||\n+\t\t\t(enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH))\n+\t\tin_length_in_bits -= 24;\n+\tin_length_in_bytes = (in_length_in_bits >> 3) * enc->tb_params.c;\n+\n+\tnext_triplet = acc_dma_fill_blk_type_in(&desc->req, &input, &in_offset,\n+\t\t\tin_length_in_bytes, &seg_total_left, next_triplet,\n+\t\t\tcheck_bit(enc->op_flags, RTE_BBDEV_LDPC_ENC_SCATTER_GATHER));\n+\tif (unlikely(next_triplet < 0)) {\n+\t\trte_bbdev_log(ERR,\n+\t\t\t\t\"Mismatch between data to process and mbuf data length in bbdev_op: %p\",\n+\t\t\t\top);\n+\t\treturn -1;\n+\t}\n+\tdesc->req.data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->req.m2dlen = next_triplet;\n+\n+\t/* Set output length */\n+\t/* Integer round up division by 8 */\n+\tout_length = (enc->tb_params.ea * enc->tb_params.cab +\n+\t\t\tenc->tb_params.eb * (enc->tb_params.c - enc->tb_params.cab)  + 7) >> 3;\n+\n+\tnext_triplet = acc_dma_fill_blk_type(&desc->req, output, out_offset,\n+\t\t\tout_length, next_triplet, ACC_DMA_BLKID_OUT_ENC);\n+\tenc->output.length = out_length;\n+\tout_offset += out_length;\n+\tdesc->req.data_ptrs[next_triplet - 1].last = 1;\n+\tdesc->req.data_ptrs[next_triplet - 1].dma_ext = 0;\n+\tdesc->req.d2mlen = next_triplet - desc->req.m2dlen;\n+\tdesc->req.numCBs = enc->tb_params.c;\n+\tif (desc->req.numCBs > 1)\n+\t\tdesc->req.dltb = 1;\n+\tdesc->req.op_addr = op;\n+\n+\tif (out_length < ACC_MAX_E_MBUF)\n+\t\tmbuf_append(output_head, output, out_length);\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tacc_memdump(\"FCW\", &desc->req.fcw_le, sizeof(desc->req.fcw_le));\n+\tacc_memdump(\"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\t/* One CB (one op) was successfully prepared to enqueue */\n+\treturn 1;\n+}\n+\n /** Enqueue one decode operations for device in CB mode. */\n static inline int\n enqueue_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n@@ -2016,8 +2620,10 @@ enqueue_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \tstruct rte_mbuf *input, *h_output_head, *h_output,\n \t\t*s_output_head, *s_output;\n \n-\t/* Disable explicitly SO for VRB 1. */\n-\top->turbo_dec.op_flags &= ~RTE_BBDEV_TURBO_SOFT_OUTPUT;\n+\tif (q->d->device_variant == VRB1_VARIANT) {\n+\t\t/* Disable explicitly SO for VRB 1. */\n+\t\top->turbo_dec.op_flags &= ~RTE_BBDEV_TURBO_SOFT_OUTPUT;\n+\t}\n \n \tdesc = acc_desc(q, total_enqueued_cbs);\n \tvrb_fcw_td_fill(op, &desc->req.fcw_td);\n@@ -2060,7 +2666,7 @@ enqueue_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \treturn 1;\n }\n \n-/** Enqueue one decode operations for device in CB mode */\n+/** Enqueue one decode operations for device in CB mode. */\n static inline int\n vrb_enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\tuint16_t total_enqueued_cbs, bool same_op)\n@@ -2113,11 +2719,16 @@ vrb_enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\t\tseg_total_left = rte_pktmbuf_data_len(input) - in_offset;\n \t\telse\n \t\t\tseg_total_left = fcw->rm_e;\n-\n-\t\tret = vrb1_dma_desc_ld_fill(op, &desc->req, &input, h_output,\n-\t\t\t\t&in_offset, &h_out_offset,\n-\t\t\t\t&h_out_length, &mbuf_total_left,\n-\t\t\t\t&seg_total_left, fcw);\n+\t\tif (q->d->device_variant == VRB1_VARIANT)\n+\t\t\tret = vrb1_dma_desc_ld_fill(op, &desc->req, &input, h_output,\n+\t\t\t\t\t&in_offset, &h_out_offset,\n+\t\t\t\t\t&h_out_length, &mbuf_total_left,\n+\t\t\t\t\t&seg_total_left, fcw);\n+\t\telse\n+\t\t\tret = vrb2_dma_desc_ld_fill(op, &desc->req, &input, h_output,\n+\t\t\t\t\t&in_offset, &h_out_offset,\n+\t\t\t\t\t&h_out_length, &mbuf_total_left,\n+\t\t\t\t\t&seg_total_left, fcw);\n \t\tif (unlikely(ret < 0))\n \t\t\treturn ret;\n \t}\n@@ -2206,12 +2817,18 @@ vrb_enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\tdesc->req.data_ptrs[0].blen = ACC_FCW_LD_BLEN;\n \t\trte_memcpy(&desc->req.fcw_ld, &desc_first->req.fcw_ld, ACC_FCW_LD_BLEN);\n \t\tdesc->req.fcw_ld.tb_trailer_size = (c - r - 1) * trail_len;\n-\n-\t\tret = vrb1_dma_desc_ld_fill(op, &desc->req, &input,\n-\t\t\t\th_output, &in_offset, &h_out_offset,\n-\t\t\t\t&h_out_length,\n-\t\t\t\t&mbuf_total_left, &seg_total_left,\n-\t\t\t\t&desc->req.fcw_ld);\n+\t\tif (q->d->device_variant == VRB1_VARIANT)\n+\t\t\tret = vrb1_dma_desc_ld_fill(op, &desc->req, &input,\n+\t\t\t\t\th_output, &in_offset, &h_out_offset,\n+\t\t\t\t\t&h_out_length,\n+\t\t\t\t\t&mbuf_total_left, &seg_total_left,\n+\t\t\t\t\t&desc->req.fcw_ld);\n+\t\telse\n+\t\t\tret = vrb2_dma_desc_ld_fill(op, &desc->req, &input,\n+\t\t\t\t\th_output, &in_offset, &h_out_offset,\n+\t\t\t\t\t&h_out_length,\n+\t\t\t\t\t&mbuf_total_left, &seg_total_left,\n+\t\t\t\t\t&desc->req.fcw_ld);\n \n \t\tif (unlikely(ret < 0))\n \t\t\treturn ret;\n@@ -2253,7 +2870,7 @@ vrb_enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \treturn current_enqueued_cbs;\n }\n \n-/* Enqueue one decode operations for device in TB mode */\n+/* Enqueue one decode operations for device in TB mode. */\n static inline int\n enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,\n \t\tuint16_t total_enqueued_cbs, uint8_t cbs_in_tb)\n@@ -2475,14 +3092,23 @@ vrb_enqueue_ldpc_enc_tb(struct rte_bbdev_queue_data *q_data,\n \tint descs_used;\n \n \tfor (i = 0; i < num; ++i) {\n-\t\tcbs_in_tb = get_num_cbs_in_tb_ldpc_enc(&ops[i]->ldpc_enc);\n-\t\t/* Check if there are available space for further processing. */\n-\t\tif (unlikely((avail - cbs_in_tb < 0) || (cbs_in_tb == 0))) {\n-\t\t\tacc_enqueue_ring_full(q_data);\n-\t\t\tbreak;\n+\t\tif (q->d->device_variant == VRB1_VARIANT) {\n+\t\t\tcbs_in_tb = get_num_cbs_in_tb_ldpc_enc(&ops[i]->ldpc_enc);\n+\t\t\t/* Check if there are available space for further processing. */\n+\t\t\tif (unlikely((avail - cbs_in_tb < 0) || (cbs_in_tb == 0))) {\n+\t\t\t\tacc_enqueue_ring_full(q_data);\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tdescs_used = vrb1_enqueue_ldpc_enc_one_op_tb(q, ops[i],\n+\t\t\t\t\tenqueued_descs, cbs_in_tb);\n+\t\t} else {\n+\t\t\tif (unlikely(avail < 1)) {\n+\t\t\t\tacc_enqueue_ring_full(q_data);\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\tdescs_used = vrb2_enqueue_ldpc_enc_one_op_tb(q, ops[i], enqueued_descs);\n \t\t}\n \n-\t\tdescs_used = vrb1_enqueue_ldpc_enc_one_op_tb(q, ops[i], enqueued_descs, cbs_in_tb);\n \t\tif (descs_used < 0) {\n \t\t\tacc_enqueue_invalid(q_data);\n \t\t\tbreak;\n@@ -2616,7 +3242,6 @@ vrb_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,\n \t\t\tbreak;\n \t\t}\n \t\tavail -= 1;\n-\n \t\trte_bbdev_log(INFO, \"Op %d %d %d %d %d %d %d %d %d %d %d %d\\n\",\n \t\t\ti, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index,\n \t\t\tops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count,\n@@ -2744,6 +3369,7 @@ vrb_dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,\n \top->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);\n \top->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n \top->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\top->status |= ((rsp.engine_hung) ? (1 << RTE_BBDEV_ENGINE_ERROR) : 0);\n \n \tif (desc->req.last_desc_in_batch) {\n \t\t(*aq_dequeued)++;\n@@ -2764,7 +3390,56 @@ vrb_dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,\n \treturn desc->req.numCBs;\n }\n \n-/* Dequeue one LDPC encode operations from device in TB mode.\n+\n+/* Dequeue one LDPC encode operations from VRB2 device in TB mode\n+ */\n+static inline int\n+vrb2_dequeue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,\n+\t\tuint16_t *dequeued_ops, uint32_t *aq_dequeued,\n+\t\tuint16_t *dequeued_descs)\n+{\n+\tunion acc_dma_desc *desc, atom_desc;\n+\tunion acc_dma_rsp_desc rsp;\n+\tstruct rte_bbdev_enc_op *op;\n+\tint desc_idx = ((q->sw_ring_tail + *dequeued_descs) & q->sw_ring_wrap_mask);\n+\n+\tdesc = q->ring_addr + desc_idx;\n+\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);\n+\n+\t/* Check fdone bit */\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n+\t\treturn -1;\n+\n+\trsp.val = atom_desc.rsp.val;\n+\trte_bbdev_log_debug(\"Resp. desc %p: %x\", desc, rsp.val);\n+\n+\t/* Dequeue */\n+\top = desc->req.op_addr;\n+\n+\t/* Clearing status, it will be set based on response */\n+\top->status = 0;\n+\top->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;\n+\top->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;\n+\top->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;\n+\top->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR;\n+\n+\tif (desc->req.last_desc_in_batch) {\n+\t\t(*aq_dequeued)++;\n+\t\tdesc->req.last_desc_in_batch = 0;\n+\t}\n+\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n+\tdesc->rsp.add_info_0 = 0; /*Reserved bits */\n+\tdesc->rsp.add_info_1 = 0; /*Reserved bits */\n+\n+\t/* One op was successfully dequeued */\n+\tref_op[0] = op;\n+\t(*dequeued_descs)++;\n+\t(*dequeued_ops)++;\n+\treturn 1;\n+}\n+\n+\n+/* Dequeue one encode operations from device in TB mode.\n  * That operation may cover multiple descriptors.\n  */\n static inline int\n@@ -2814,6 +3489,7 @@ vrb_dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op,\n \t\top->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);\n \t\top->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n \t\top->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\t\top->status |= ((rsp.engine_hung) ? (1 << RTE_BBDEV_ENGINE_ERROR) : 0);\n \n \t\tif (desc->req.last_desc_in_batch) {\n \t\t\t(*aq_dequeued)++;\n@@ -2860,6 +3536,8 @@ vrb_dequeue_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n \top->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);\n \top->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n \top->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\top->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR;\n+\n \tif (op->status != 0) {\n \t\t/* These errors are not expected. */\n \t\tq_data->queue_stats.dequeue_err_count++;\n@@ -2913,6 +3591,7 @@ vrb_dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data,\n \top->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;\n \top->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;\n \top->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;\n+\top->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR;\n \tif (op->status != 0)\n \t\tq_data->queue_stats.dequeue_err_count++;\n \n@@ -2994,6 +3673,7 @@ vrb_dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op,\n \t\top->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0);\n \t\top->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n \t\top->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0);\n+\t\top->status |= ((rsp.engine_hung) ? (1 << RTE_BBDEV_ENGINE_ERROR) : 0);\n \n \t\tif (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK))\n \t\t\ttb_crc_check ^= desc->rsp.add_info_1;\n@@ -3045,7 +3725,6 @@ vrb_dequeue_enc(struct rte_bbdev_queue_data *q_data,\n \tif (avail == 0)\n \t\treturn 0;\n \top = acc_op_tail(q, 0);\n-\n \tcbm = op->turbo_enc.code_block_mode;\n \n \tfor (i = 0; i < avail; i++) {\n@@ -3088,9 +3767,14 @@ vrb_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,\n \n \tfor (i = 0; i < avail; i++) {\n \t\tif (cbm == RTE_BBDEV_TRANSPORT_BLOCK)\n-\t\t\tret = vrb_dequeue_enc_one_op_tb(q, &ops[dequeued_ops],\n-\t\t\t\t\t&dequeued_ops, &aq_dequeued,\n-\t\t\t\t\t&dequeued_descs, num);\n+\t\t\tif (q->d->device_variant == VRB1_VARIANT)\n+\t\t\t\tret = vrb_dequeue_enc_one_op_tb(q, &ops[dequeued_ops],\n+\t\t\t\t\t\t&dequeued_ops, &aq_dequeued,\n+\t\t\t\t\t\t&dequeued_descs, num);\n+\t\t\telse\n+\t\t\t\tret = vrb2_dequeue_ldpc_enc_one_op_tb(q, &ops[dequeued_ops],\n+\t\t\t\t\t\t&dequeued_ops, &aq_dequeued,\n+\t\t\t\t\t\t&dequeued_descs);\n \t\telse\n \t\t\tret = vrb_dequeue_enc_one_op_cb(q, &ops[dequeued_ops],\n \t\t\t\t\t&dequeued_ops, &aq_dequeued,\n@@ -3220,6 +3904,47 @@ vrb1_fcw_fft_fill(struct rte_bbdev_fft_op *op, struct acc_fcw_fft *fcw)\n \t\tfcw->bypass = 0;\n }\n \n+/* Fill in a frame control word for FFT processing. */\n+static inline void\n+vrb2_fcw_fft_fill(struct rte_bbdev_fft_op *op, struct acc_fcw_fft_3 *fcw)\n+{\n+\tfcw->in_frame_size = op->fft.input_sequence_size;\n+\tfcw->leading_pad_size = op->fft.input_leading_padding;\n+\tfcw->out_frame_size = op->fft.output_sequence_size;\n+\tfcw->leading_depad_size = op->fft.output_leading_depadding;\n+\tfcw->cs_window_sel = op->fft.window_index[0] +\n+\t\t\t(op->fft.window_index[1] << 8) +\n+\t\t\t(op->fft.window_index[2] << 16) +\n+\t\t\t(op->fft.window_index[3] << 24);\n+\tfcw->cs_window_sel2 = op->fft.window_index[4] +\n+\t\t\t(op->fft.window_index[5] << 8);\n+\tfcw->cs_enable_bmap = op->fft.cs_bitmap;\n+\tfcw->num_antennas = op->fft.num_antennas_log2;\n+\tfcw->idft_size = op->fft.idft_log2;\n+\tfcw->dft_size = op->fft.dft_log2;\n+\tfcw->cs_offset = op->fft.cs_time_adjustment;\n+\tfcw->idft_shift = op->fft.idft_shift;\n+\tfcw->dft_shift = op->fft.dft_shift;\n+\tfcw->cs_multiplier = op->fft.ncs_reciprocal;\n+\tfcw->power_shift = op->fft.power_shift;\n+\tfcw->exp_adj = op->fft.fp16_exp_adjust;\n+\tfcw->fp16_in = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_FP16_INPUT);\n+\tfcw->fp16_out = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_FP16_OUTPUT);\n+\tfcw->power_en = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_POWER_MEAS);\n+\tif (check_bit(op->fft.op_flags,\n+\t\t\tRTE_BBDEV_FFT_IDFT_BYPASS)) {\n+\t\tif (check_bit(op->fft.op_flags,\n+\t\t\t\tRTE_BBDEV_FFT_WINDOWING_BYPASS))\n+\t\t\tfcw->bypass = 2;\n+\t\telse\n+\t\t\tfcw->bypass = 1;\n+\t} else if (check_bit(op->fft.op_flags,\n+\t\t\tRTE_BBDEV_FFT_DFT_BYPASS))\n+\t\tfcw->bypass = 3;\n+\telse\n+\t\tfcw->bypass = 0;\n+}\n+\n static inline int\n vrb1_dma_desc_fft_fill(struct rte_bbdev_fft_op *op,\n \t\tstruct acc_dma_req_desc *desc,\n@@ -3253,6 +3978,58 @@ vrb1_dma_desc_fft_fill(struct rte_bbdev_fft_op *op,\n \treturn 0;\n }\n \n+static inline int\n+vrb2_dma_desc_fft_fill(struct rte_bbdev_fft_op *op,\n+\t\tstruct acc_dma_req_desc *desc,\n+\t\tstruct rte_mbuf *input, struct rte_mbuf *output, struct rte_mbuf *win_input,\n+\t\tstruct rte_mbuf *pwr, uint32_t *in_offset, uint32_t *out_offset,\n+\t\tuint32_t *win_offset, uint32_t *pwr_offset)\n+{\n+\tbool pwr_en = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_POWER_MEAS);\n+\tbool win_en = check_bit(op->fft.op_flags, RTE_BBDEV_FFT_DEWINDOWING);\n+\tint num_cs = 0, i, bd_idx = 1;\n+\n+\t/* FCW already done */\n+\tacc_header_init(desc);\n+\n+\tRTE_SET_USED(win_input);\n+\tRTE_SET_USED(win_offset);\n+\n+\tdesc->data_ptrs[bd_idx].address = rte_pktmbuf_iova_offset(input, *in_offset);\n+\tdesc->data_ptrs[bd_idx].blen = op->fft.input_sequence_size * ACC_IQ_SIZE;\n+\tdesc->data_ptrs[bd_idx].blkid = ACC_DMA_BLKID_IN;\n+\tdesc->data_ptrs[bd_idx].last = 1;\n+\tdesc->data_ptrs[bd_idx].dma_ext = 0;\n+\tbd_idx++;\n+\n+\tdesc->data_ptrs[bd_idx].address = rte_pktmbuf_iova_offset(output, *out_offset);\n+\tdesc->data_ptrs[bd_idx].blen = op->fft.output_sequence_size * ACC_IQ_SIZE;\n+\tdesc->data_ptrs[bd_idx].blkid = ACC_DMA_BLKID_OUT_HARD;\n+\tdesc->data_ptrs[bd_idx].last = pwr_en ? 0 : 1;\n+\tdesc->data_ptrs[bd_idx].dma_ext = 0;\n+\tdesc->m2dlen = win_en ? 3 : 2;\n+\tdesc->d2mlen = pwr_en ? 2 : 1;\n+\tdesc->ib_ant_offset = op->fft.input_sequence_size;\n+\tdesc->num_ant = op->fft.num_antennas_log2 - 3;\n+\n+\tfor (i = 0; i < RTE_BBDEV_MAX_CS; i++)\n+\t\tif (check_bit(op->fft.cs_bitmap, 1 << i))\n+\t\t\tnum_cs++;\n+\tdesc->num_cs = num_cs;\n+\n+\tif (pwr_en && pwr) {\n+\t\tbd_idx++;\n+\t\tdesc->data_ptrs[bd_idx].address = rte_pktmbuf_iova_offset(pwr, *pwr_offset);\n+\t\tdesc->data_ptrs[bd_idx].blen = num_cs * (1 << op->fft.num_antennas_log2) * 4;\n+\t\tdesc->data_ptrs[bd_idx].blkid = ACC_DMA_BLKID_OUT_SOFT;\n+\t\tdesc->data_ptrs[bd_idx].last = 1;\n+\t\tdesc->data_ptrs[bd_idx].dma_ext = 0;\n+\t}\n+\tdesc->ob_cyc_offset = op->fft.output_sequence_size;\n+\tdesc->ob_ant_offset = op->fft.output_sequence_size * num_cs;\n+\tdesc->op_addr = op;\n+\treturn 0;\n+}\n \n /** Enqueue one FFT operation for device. */\n static inline int\n@@ -3260,26 +4037,35 @@ vrb_enqueue_fft_one_op(struct acc_queue *q, struct rte_bbdev_fft_op *op,\n \t\tuint16_t total_enqueued_cbs)\n {\n \tunion acc_dma_desc *desc;\n-\tstruct rte_mbuf *input, *output;\n-\tuint32_t in_offset, out_offset;\n+\tstruct rte_mbuf *input, *output, *pwr, *win;\n+\tuint32_t in_offset, out_offset, pwr_offset, win_offset;\n \tstruct acc_fcw_fft *fcw;\n \n \tdesc = acc_desc(q, total_enqueued_cbs);\n \tinput = op->fft.base_input.data;\n \toutput = op->fft.base_output.data;\n+\tpwr = op->fft.power_meas_output.data;\n+\twin = op->fft.dewindowing_input.data;\n \tin_offset = op->fft.base_input.offset;\n \tout_offset = op->fft.base_output.offset;\n+\tpwr_offset = op->fft.power_meas_output.offset;\n+\twin_offset = op->fft.dewindowing_input.offset;\n \n \tfcw = (struct acc_fcw_fft *) (q->fcw_ring +\n \t\t\t((q->sw_ring_head + total_enqueued_cbs) & q->sw_ring_wrap_mask)\n \t\t\t* ACC_MAX_FCW_SIZE);\n \n-\tvrb1_fcw_fft_fill(op, fcw);\n-\tvrb1_dma_desc_fft_fill(op, &desc->req, input, output, &in_offset, &out_offset);\n+\tif (q->d->device_variant == VRB1_VARIANT) {\n+\t\tvrb1_fcw_fft_fill(op, fcw);\n+\t\tvrb1_dma_desc_fft_fill(op, &desc->req, input, output, &in_offset, &out_offset);\n+\t} else {\n+\t\tvrb2_fcw_fft_fill(op, (struct acc_fcw_fft_3 *) fcw);\n+\t\tvrb2_dma_desc_fft_fill(op, &desc->req, input, output, win, pwr,\n+\t\t\t\t&in_offset, &out_offset, &win_offset, &pwr_offset);\n+\t}\n #ifdef RTE_LIBRTE_BBDEV_DEBUG\n-\trte_memdump(stderr, \"FCW\", &desc->req.fcw_fft,\n-\t\t\tsizeof(desc->req.fcw_fft));\n-\trte_memdump(stderr, \"Req Desc.\", desc, sizeof(*desc));\n+\tacc_memdump(\"FCW\", fcw, 128);\n+\tacc_memdump(\"Req Desc.\", desc, 128);\n #endif\n \treturn 1;\n }\n@@ -3352,6 +4138,7 @@ vrb_dequeue_fft_one_op(struct rte_bbdev_queue_data *q_data,\n \top->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;\n \top->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;\n \top->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;\n+\top->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR;\n \tif (op->status != 0)\n \t\tq_data->queue_stats.dequeue_err_count++;\n \n@@ -3398,6 +4185,371 @@ vrb_dequeue_fft(struct rte_bbdev_queue_data *q_data,\n \treturn i;\n }\n \n+/* Fill in a frame control word for MLD-TS processing. */\n+static inline void\n+vrb2_fcw_mldts_fill(struct rte_bbdev_mldts_op *op, struct acc_fcw_mldts *fcw)\n+{\n+\tfcw->nrb = op->mldts.num_rbs;\n+\tfcw->NLayers = op->mldts.num_layers - 1;\n+\tfcw->Qmod0 = (op->mldts.q_m[0] >> 1) - 1;\n+\tfcw->Qmod1 = (op->mldts.q_m[1] >> 1) - 1;\n+\tfcw->Qmod2 = (op->mldts.q_m[2] >> 1) - 1;\n+\tfcw->Qmod3 = (op->mldts.q_m[3] >> 1) - 1;\n+\t/* Mark some layers as disabled */\n+\tif (op->mldts.num_layers == 2) {\n+\t\tfcw->Qmod2 = 3;\n+\t\tfcw->Qmod3 = 3;\n+\t}\n+\tif (op->mldts.num_layers == 3)\n+\t\tfcw->Qmod3 = 3;\n+\tfcw->Rrep = op->mldts.r_rep;\n+\tfcw->Crep = op->mldts.c_rep;\n+}\n+\n+/* Fill in descriptor for one MLD-TS processing operation. */\n+static inline int\n+vrb2_dma_desc_mldts_fill(struct rte_bbdev_mldts_op *op,\n+\t\tstruct acc_dma_req_desc *desc,\n+\t\tstruct rte_mbuf *input_q, struct rte_mbuf *input_r,\n+\t\tstruct rte_mbuf *output,\n+\t\tuint32_t *in_offset, uint32_t *out_offset)\n+{\n+\tuint16_t qsize_per_re[VRB2_MLD_LAY_SIZE] = {8, 12, 16}; /* Layer 2 to 4. */\n+\tuint16_t rsize_per_re[VRB2_MLD_LAY_SIZE] = {14, 26, 42};\n+\tuint16_t sc_factor_per_rrep[VRB2_MLD_RREP_SIZE] = {12, 6, 4, 3, 0, 2};\n+\tuint16_t i, outsize_per_re = 0;\n+\tuint32_t sc_num, r_num, q_size, r_size, out_size;\n+\n+\t/* Prevent out of range access. */\n+\tif (op->mldts.r_rep > 5)\n+\t\top->mldts.r_rep = 5;\n+\tif (op->mldts.num_layers < 2)\n+\t\top->mldts.num_layers = 2;\n+\tif (op->mldts.num_layers > 4)\n+\t\top->mldts.num_layers = 4;\n+\tfor (i = 0; i < op->mldts.num_layers; i++)\n+\t\toutsize_per_re += op->mldts.q_m[i];\n+\tsc_num = op->mldts.num_rbs * RTE_BBDEV_SCPERRB * (op->mldts.c_rep + 1);\n+\tr_num = op->mldts.num_rbs * sc_factor_per_rrep[op->mldts.r_rep];\n+\tq_size = qsize_per_re[op->mldts.num_layers - 2] * sc_num;\n+\tr_size = rsize_per_re[op->mldts.num_layers - 2] * r_num;\n+\tout_size =  sc_num * outsize_per_re;\n+\t/* printf(\"Sc %d R num %d Size %d %d %d\\n\", sc_num, r_num, q_size, r_size, out_size); */\n+\n+\t/* FCW already done. */\n+\tacc_header_init(desc);\n+\tdesc->data_ptrs[1].address = rte_pktmbuf_iova_offset(input_q, *in_offset);\n+\tdesc->data_ptrs[1].blen = q_size;\n+\tdesc->data_ptrs[1].blkid = ACC_DMA_BLKID_IN;\n+\tdesc->data_ptrs[1].last = 0;\n+\tdesc->data_ptrs[1].dma_ext = 0;\n+\tdesc->data_ptrs[2].address = rte_pktmbuf_iova_offset(input_r, *in_offset);\n+\tdesc->data_ptrs[2].blen = r_size;\n+\tdesc->data_ptrs[2].blkid = ACC_DMA_BLKID_IN_MLD_R;\n+\tdesc->data_ptrs[2].last = 1;\n+\tdesc->data_ptrs[2].dma_ext = 0;\n+\tdesc->data_ptrs[3].address = rte_pktmbuf_iova_offset(output, *out_offset);\n+\tdesc->data_ptrs[3].blen = out_size;\n+\tdesc->data_ptrs[3].blkid = ACC_DMA_BLKID_OUT_HARD;\n+\tdesc->data_ptrs[3].last = 1;\n+\tdesc->data_ptrs[3].dma_ext = 0;\n+\tdesc->m2dlen = 3;\n+\tdesc->d2mlen = 1;\n+\tdesc->op_addr = op;\n+\tdesc->cbs_in_tb = 1;\n+\n+\treturn 0;\n+}\n+\n+/* Check whether the MLD operation can be processed as a single operation. */\n+static inline bool\n+vrb2_check_mld_r_constraint(struct rte_bbdev_mldts_op *op) {\n+\tuint8_t layer_idx, rrep_idx;\n+\tuint16_t max_rb[VRB2_MLD_LAY_SIZE][VRB2_MLD_RREP_SIZE] = {\n+\t\t\t{188, 275, 275, 275, 0, 275},\n+\t\t\t{101, 202, 275, 275, 0, 275},\n+\t\t\t{62, 124, 186, 248, 0, 275} };\n+\n+\tif (op->mldts.c_rep == 0)\n+\t\treturn true;\n+\n+\tlayer_idx = RTE_MIN(op->mldts.num_layers - VRB2_MLD_MIN_LAYER,\n+\t\t\tVRB2_MLD_MAX_LAYER - VRB2_MLD_MIN_LAYER);\n+\trrep_idx = RTE_MIN(op->mldts.r_rep, VRB2_MLD_MAX_RREP);\n+\trte_bbdev_log_debug(\"RB %d index %d %d max %d\\n\", op->mldts.num_rbs, layer_idx, rrep_idx,\n+\t\t\tmax_rb[layer_idx][rrep_idx]);\n+\n+\treturn (op->mldts.num_rbs <= max_rb[layer_idx][rrep_idx]);\n+}\n+\n+/** Enqueue MLDTS operation split across symbols. */\n+static inline int\n+enqueue_mldts_split_op(struct acc_queue *q, struct rte_bbdev_mldts_op *op,\n+\t\tuint16_t total_enqueued_descs)\n+{\n+\tuint16_t qsize_per_re[VRB2_MLD_LAY_SIZE] = {8, 12, 16}; /* Layer 2 to 4. */\n+\tuint16_t rsize_per_re[VRB2_MLD_LAY_SIZE] = {14, 26, 42};\n+\tuint16_t sc_factor_per_rrep[VRB2_MLD_RREP_SIZE] = {12, 6, 4, 3, 0, 2};\n+\tuint32_t i, outsize_per_re = 0, sc_num, r_num, q_size, r_size, out_size, num_syms;\n+\tunion acc_dma_desc *desc, *first_desc;\n+\tuint16_t desc_idx, symb;\n+\tstruct rte_mbuf *input_q, *input_r, *output;\n+\tuint32_t in_offset, out_offset;\n+\tstruct acc_fcw_mldts *fcw;\n+\n+\tdesc_idx = ((q->sw_ring_head + total_enqueued_descs) & q->sw_ring_wrap_mask);\n+\tfirst_desc = q->ring_addr + desc_idx;\n+\tinput_q = op->mldts.qhy_input.data;\n+\tinput_r = op->mldts.r_input.data;\n+\toutput = op->mldts.output.data;\n+\tin_offset = op->mldts.qhy_input.offset;\n+\tout_offset = op->mldts.output.offset;\n+\tnum_syms = op->mldts.c_rep + 1;\n+\tfcw = &first_desc->req.fcw_mldts;\n+\tvrb2_fcw_mldts_fill(op, fcw);\n+\tfcw->Crep = 0; /* C rep forced to zero. */\n+\n+\t/* Prevent out of range access. */\n+\tif (op->mldts.r_rep > 5)\n+\t\top->mldts.r_rep = 5;\n+\tif (op->mldts.num_layers < 2)\n+\t\top->mldts.num_layers = 2;\n+\tif (op->mldts.num_layers > 4)\n+\t\top->mldts.num_layers = 4;\n+\n+\tfor (i = 0; i < op->mldts.num_layers; i++)\n+\t\toutsize_per_re += op->mldts.q_m[i];\n+\tsc_num = op->mldts.num_rbs * RTE_BBDEV_SCPERRB; /* C rep forced to zero. */\n+\tr_num = op->mldts.num_rbs * sc_factor_per_rrep[op->mldts.r_rep];\n+\tq_size = qsize_per_re[op->mldts.num_layers - 2] * sc_num;\n+\tr_size = rsize_per_re[op->mldts.num_layers - 2] * r_num;\n+\tout_size =  sc_num * outsize_per_re;\n+\n+\tfor (symb = 0; symb < num_syms; symb++) {\n+\t\tdesc_idx = ((q->sw_ring_head + total_enqueued_descs + symb) & q->sw_ring_wrap_mask);\n+\t\tdesc = q->ring_addr + desc_idx;\n+\t\tacc_header_init(&desc->req);\n+\t\tif (symb == 0)\n+\t\t\tdesc->req.cbs_in_tb = num_syms;\n+\t\telse\n+\t\t\trte_memcpy(&desc->req.fcw_mldts, fcw, ACC_FCW_MLDTS_BLEN);\n+\t\tdesc->req.data_ptrs[1].address = rte_pktmbuf_iova_offset(input_q, in_offset);\n+\t\tdesc->req.data_ptrs[1].blen = q_size;\n+\t\tin_offset += q_size;\n+\t\tdesc->req.data_ptrs[1].blkid = ACC_DMA_BLKID_IN;\n+\t\tdesc->req.data_ptrs[1].last = 0;\n+\t\tdesc->req.data_ptrs[1].dma_ext = 0;\n+\t\tdesc->req.data_ptrs[2].address = rte_pktmbuf_iova_offset(input_r, 0);\n+\t\tdesc->req.data_ptrs[2].blen = r_size;\n+\t\tdesc->req.data_ptrs[2].blkid = ACC_DMA_BLKID_IN_MLD_R;\n+\t\tdesc->req.data_ptrs[2].last = 1;\n+\t\tdesc->req.data_ptrs[2].dma_ext = 0;\n+\t\tdesc->req.data_ptrs[3].address = rte_pktmbuf_iova_offset(output, out_offset);\n+\t\tdesc->req.data_ptrs[3].blen = out_size;\n+\t\tout_offset += out_size;\n+\t\tdesc->req.data_ptrs[3].blkid = ACC_DMA_BLKID_OUT_HARD;\n+\t\tdesc->req.data_ptrs[3].last = 1;\n+\t\tdesc->req.data_ptrs[3].dma_ext = 0;\n+\t\tdesc->req.m2dlen = VRB2_MLD_M2DLEN;\n+\t\tdesc->req.d2mlen = 1;\n+\t\tdesc->req.op_addr = op;\n+\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\tacc_memdump(\"FCW\", &desc->req.fcw_mldts, sizeof(desc->req.fcw_mldts));\n+\t\tacc_memdump(\"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\t}\n+\tdesc->req.sdone_enable = 0;\n+\n+\treturn num_syms;\n+}\n+\n+/** Enqueue one MLDTS operation. */\n+static inline int\n+enqueue_mldts_one_op(struct acc_queue *q, struct rte_bbdev_mldts_op *op,\n+\t\tuint16_t total_enqueued_descs)\n+{\n+\tunion acc_dma_desc *desc;\n+\tuint16_t desc_idx;\n+\tstruct rte_mbuf *input_q, *input_r, *output;\n+\tuint32_t in_offset, out_offset;\n+\tstruct acc_fcw_mldts *fcw;\n+\n+\tdesc_idx = ((q->sw_ring_head + total_enqueued_descs) & q->sw_ring_wrap_mask);\n+\tdesc = q->ring_addr + desc_idx;\n+\tinput_q = op->mldts.qhy_input.data;\n+\tinput_r = op->mldts.r_input.data;\n+\toutput = op->mldts.output.data;\n+\tin_offset = op->mldts.qhy_input.offset;\n+\tout_offset = op->mldts.output.offset;\n+\tfcw = &desc->req.fcw_mldts;\n+\tvrb2_fcw_mldts_fill(op, fcw);\n+\tvrb2_dma_desc_mldts_fill(op, &desc->req, input_q, input_r, output,\n+\t\t\t&in_offset, &out_offset);\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tacc_memdump(\"FCW\", &desc->req.fcw_mldts, sizeof(desc->req.fcw_mldts));\n+\tacc_memdump(\"Req Desc.\", desc, sizeof(*desc));\n+#endif\n+\treturn 1;\n+}\n+\n+/* Enqueue MLDTS operations. */\n+static uint16_t\n+vrb2_enqueue_mldts(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_mldts_op **ops, uint16_t num)\n+{\n+\tint32_t aq_avail, avail;\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tuint16_t i, enqueued_descs = 0, descs_in_op;\n+\tint ret;\n+\tbool as_one_op;\n+\n+\taq_avail = acc_aq_avail(q_data, num);\n+\tif (unlikely((aq_avail <= 0) || (num == 0)))\n+\t\treturn 0;\n+\tavail = acc_ring_avail_enq(q);\n+\n+\tfor (i = 0; i < num; ++i) {\n+\t\tas_one_op = vrb2_check_mld_r_constraint(ops[i]);\n+\t\tdescs_in_op = as_one_op ? 1 : ops[i]->mldts.c_rep + 1;\n+\n+\t\t/* Check if there are available space for further processing. */\n+\t\tif (unlikely(avail < descs_in_op)) {\n+\t\t\tacc_enqueue_ring_full(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\t\tavail -= descs_in_op;\n+\n+\t\tif (as_one_op)\n+\t\t\tret = enqueue_mldts_one_op(q, ops[i], enqueued_descs);\n+\t\telse\n+\t\t\tret = enqueue_mldts_split_op(q, ops[i], enqueued_descs);\n+\n+\t\tif (ret < 0) {\n+\t\t\tacc_enqueue_invalid(q_data);\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tenqueued_descs += ret;\n+\t}\n+\n+\tif (unlikely(i == 0))\n+\t\treturn 0; /* Nothing to enqueue. */\n+\n+\tacc_dma_enqueue(q, enqueued_descs, &q_data->queue_stats);\n+\n+\t/* Update stats. */\n+\tq_data->queue_stats.enqueued_count += i;\n+\tq_data->queue_stats.enqueue_err_count += num - i;\n+\treturn i;\n+}\n+\n+/*\n+ * Dequeue one MLDTS operation.\n+ * This may have been split over multiple descriptors.\n+ */\n+static inline int\n+dequeue_mldts_one_op(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct acc_queue *q, struct rte_bbdev_mldts_op **ref_op,\n+\t\tuint16_t dequeued_ops, uint32_t *aq_dequeued)\n+{\n+\tunion acc_dma_desc *desc, atom_desc, *last_desc;\n+\tunion acc_dma_rsp_desc rsp;\n+\tstruct rte_bbdev_mldts_op *op;\n+\tuint8_t descs_in_op, i;\n+\n+\tdesc = acc_desc_tail(q, dequeued_ops);\n+\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);\n+\n+\t/* Check fdone bit. */\n+\tif (!(atom_desc.rsp.val & ACC_FDONE))\n+\t\treturn -1;\n+\n+\tdescs_in_op = desc->req.cbs_in_tb;\n+\tif (descs_in_op > 1) {\n+\t\t/* Get last CB. */\n+\t\tlast_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + descs_in_op - 1)\n+\t\t\t\t& q->sw_ring_wrap_mask);\n+\t\t/* Check if last op is ready to dequeue by checking fdone bit. If not exit. */\n+\t\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc, __ATOMIC_RELAXED);\n+\t\tif (!(atom_desc.rsp.val & ACC_FDONE))\n+\t\t\treturn -1;\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\t\tacc_memdump(\"Last Resp\", &last_desc->rsp.val, sizeof(desc->rsp.val));\n+#endif\n+\t\t/* Check each operation iteratively using fdone. */\n+\t\tfor (i = 1; i < descs_in_op - 1; i++) {\n+\t\t\tlast_desc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i)\n+\t\t\t\t\t& q->sw_ring_wrap_mask);\n+\t\t\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)last_desc,\n+\t\t\t\t\t__ATOMIC_RELAXED);\n+\t\t\tif (!(atom_desc.rsp.val & ACC_FDONE))\n+\t\t\t\treturn -1;\n+\t\t}\n+\t}\n+#ifdef RTE_LIBRTE_BBDEV_DEBUG\n+\tacc_memdump(\"Resp\", &desc->rsp.val, sizeof(desc->rsp.val));\n+#endif\n+\t/* Dequeue. */\n+\top = desc->req.op_addr;\n+\n+\t/* Clearing status, it will be set based on response. */\n+\top->status = 0;\n+\n+\tfor (i = 0; i < descs_in_op; i++) {\n+\t\tdesc = q->ring_addr + ((q->sw_ring_tail + dequeued_ops + i) & q->sw_ring_wrap_mask);\n+\t\tatom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED);\n+\t\trsp.val = atom_desc.rsp.val;\n+\t\top->status |= rsp.input_err << RTE_BBDEV_DATA_ERROR;\n+\t\top->status |= rsp.dma_err << RTE_BBDEV_DRV_ERROR;\n+\t\top->status |= rsp.fcw_err << RTE_BBDEV_DRV_ERROR;\n+\t\top->status |= rsp.engine_hung << RTE_BBDEV_ENGINE_ERROR;\n+\t}\n+\n+\tif (op->status != 0)\n+\t\tq_data->queue_stats.dequeue_err_count++;\n+\tif (op->status & (1 << RTE_BBDEV_DRV_ERROR))\n+\t\tvrb_check_ir(q->d);\n+\n+\t/* Check if this is the last desc in batch (Atomic Queue). */\n+\tif (desc->req.last_desc_in_batch) {\n+\t\t(*aq_dequeued)++;\n+\t\tdesc->req.last_desc_in_batch = 0;\n+\t}\n+\tdesc->rsp.val = ACC_DMA_DESC_TYPE;\n+\tdesc->rsp.add_info_0 = 0;\n+\t*ref_op = op;\n+\n+\treturn descs_in_op;\n+}\n+\n+/* Dequeue MLDTS operations from VRB2 device. */\n+static uint16_t\n+vrb2_dequeue_mldts(struct rte_bbdev_queue_data *q_data,\n+\t\tstruct rte_bbdev_mldts_op **ops, uint16_t num)\n+{\n+\tstruct acc_queue *q = q_data->queue_private;\n+\tuint16_t dequeue_num, i, dequeued_cbs = 0;\n+\tuint32_t avail = acc_ring_avail_deq(q);\n+\tuint32_t aq_dequeued = 0;\n+\tint ret;\n+\n+\tdequeue_num = RTE_MIN(avail, num);\n+\n+\tfor (i = 0; i < dequeue_num; ++i) {\n+\t\tret = dequeue_mldts_one_op(q_data, q, &ops[i], dequeued_cbs, &aq_dequeued);\n+\t\tif (ret <= 0)\n+\t\t\tbreak;\n+\t\tdequeued_cbs += ret;\n+\t}\n+\n+\tq->aq_dequeued += aq_dequeued;\n+\tq->sw_ring_tail += dequeued_cbs;\n+\t/* Update enqueue stats. */\n+\tq_data->queue_stats.dequeued_count += i;\n+\treturn i;\n+}\n+\n /* Initialization Function */\n static void\n vrb_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)\n@@ -3416,6 +4568,8 @@ vrb_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)\n \tdev->dequeue_ldpc_dec_ops = vrb_dequeue_ldpc_dec;\n \tdev->enqueue_fft_ops = vrb_enqueue_fft;\n \tdev->dequeue_fft_ops = vrb_dequeue_fft;\n+\tdev->enqueue_mldts_ops = vrb2_enqueue_mldts;\n+\tdev->dequeue_mldts_ops = vrb2_dequeue_mldts;\n \n \td->pf_device = !strcmp(drv->driver.name, RTE_STR(VRB_PF_DRIVER_NAME));\n \td->mmio_base = pci_dev->mem_resource[0].addr;\n@@ -3432,6 +4586,16 @@ vrb_bbdev_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)\n \t\t\td->reg_addr = &vrb1_pf_reg_addr;\n \t\telse\n \t\t\td->reg_addr = &vrb1_vf_reg_addr;\n+\t} else {\n+\t\td->device_variant = VRB2_VARIANT;\n+\t\td->queue_offset = vrb2_queue_offset;\n+\t\td->fcw_ld_fill = vrb2_fcw_ld_fill;\n+\t\td->num_qgroups = VRB2_NUM_QGRPS;\n+\t\td->num_aqs = VRB2_NUM_AQS;\n+\t\tif (d->pf_device)\n+\t\t\td->reg_addr = &vrb2_pf_reg_addr;\n+\t\telse\n+\t\t\td->reg_addr = &vrb2_vf_reg_addr;\n \t}\n \n \trte_bbdev_log_debug(\"Init device %s [%s] @ vaddr %p paddr %#\"PRIx64\"\",\ndiff --git a/drivers/baseband/acc/vrb1_pf_enum.h b/drivers/baseband/acc/vrb1_pf_enum.h\nindex 82a36685e9..6dc359800f 100644\n--- a/drivers/baseband/acc/vrb1_pf_enum.h\n+++ b/drivers/baseband/acc/vrb1_pf_enum.h\n@@ -98,11 +98,18 @@ enum {\n \tACC_PF_INT_DMA_UL5G_DESC_IRQ = 8,\n \tACC_PF_INT_DMA_DL5G_DESC_IRQ = 9,\n \tACC_PF_INT_DMA_MLD_DESC_IRQ = 10,\n-\tACC_PF_INT_ARAM_ECC_1BIT_ERR = 11,\n-\tACC_PF_INT_PARITY_ERR = 12,\n-\tACC_PF_INT_QMGR_ERR = 13,\n-\tACC_PF_INT_INT_REQ_OVERFLOW = 14,\n-\tACC_PF_INT_APB_TIMEOUT = 15,\n+\tACC_PF_INT_ARAM_ACCESS_ERR = 11,\n+\tACC_PF_INT_ARAM_ECC_1BIT_ERR = 12,\n+\tACC_PF_INT_PARITY_ERR = 13,\n+\tACC_PF_INT_QMGR_OVERFLOW = 14,\n+\tACC_PF_INT_QMGR_ERR = 15,\n+\tACC_PF_INT_ATS_ERR = 22,\n+\tACC_PF_INT_ARAM_FUUL = 23,\n+\tACC_PF_INT_EXTRA_READ = 24,\n+\tACC_PF_INT_COMPLETION_TIMEOUT = 25,\n+\tACC_PF_INT_CORE_HANG = 26,\n+\tACC_PF_INT_DMA_HANG = 28,\n+\tACC_PF_INT_DS_HANG = 27,\n };\n \n #endif /* VRB1_PF_ENUM_H */\ndiff --git a/drivers/baseband/acc/vrb2_pf_enum.h b/drivers/baseband/acc/vrb2_pf_enum.h\nnew file mode 100644\nindex 0000000000..a5ef826b86\n--- /dev/null\n+++ b/drivers/baseband/acc/vrb2_pf_enum.h\n@@ -0,0 +1,124 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2021 Intel Corporation\n+ */\n+\n+#ifndef VRB2_PF_ENUM_H\n+#define VRB2_PF_ENUM_H\n+\n+/*\n+ * VRB2 Register mapping on PF BAR0\n+ * This is automatically generated from RDL, format may change with new RDL\n+ * Release.\n+ * Variable names are as is\n+ */\n+enum {\n+\tVRB2_PfQmgrEgressQueuesTemplate             = 0x0007FC00,\n+\tVRB2_PfQmgrIngressAq                        = 0x00100000,\n+\tVRB2_PfQmgrSoftReset                        = 0x00A00034,\n+\tVRB2_PfQmgrAramAllocEn\t                    = 0x00A000a0,\n+\tVRB2_PfQmgrAramAllocSetupN0                 = 0x00A000b0,\n+\tVRB2_PfQmgrAramAllocSetupN1                 = 0x00A000b4,\n+\tVRB2_PfQmgrAramAllocSetupN2                 = 0x00A000b8,\n+\tVRB2_PfQmgrAramAllocSetupN3                 = 0x00A000bc,\n+\tVRB2_PfQmgrDepthLog2Grp                     = 0x00A00200,\n+\tVRB2_PfQmgrTholdGrp                         = 0x00A00300,\n+\tVRB2_PfQmgrGrpTmplateReg0Indx               = 0x00A00600,\n+\tVRB2_PfQmgrGrpTmplateReg1Indx               = 0x00A00700,\n+\tVRB2_PfQmgrGrpTmplateReg2Indx               = 0x00A00800,\n+\tVRB2_PfQmgrGrpTmplateReg3Indx               = 0x00A00900,\n+\tVRB2_PfQmgrGrpTmplateReg4Indx               = 0x00A00A00,\n+\tVRB2_PfQmgrGrpTmplateReg5Indx               = 0x00A00B00,\n+\tVRB2_PfQmgrGrpTmplateReg6Indx               = 0x00A00C00,\n+\tVRB2_PfQmgrGrpTmplateReg7Indx               = 0x00A00D00,\n+\tVRB2_PfQmgrGrpTmplateEnRegIndx              = 0x00A00E00,\n+\tVRB2_PfQmgrArbQDepthGrp                     = 0x00A02F00,\n+\tVRB2_PfQmgrGrpFunction0                     = 0x00A02F80,\n+\tVRB2_PfQmgrGrpPriority                      = 0x00A02FC0,\n+\tVRB2_PfQmgrVfBaseAddr                       = 0x00A08000,\n+\tVRB2_PfQmgrAqEnableVf                       = 0x00A10000,\n+\tVRB2_PfQmgrRingSizeVf                       = 0x00A20010,\n+\tVRB2_PfQmgrGrpDepthLog20Vf                  = 0x00A20020,\n+\tVRB2_PfQmgrGrpDepthLog21Vf                  = 0x00A20024,\n+\tVRB2_PfFabricM2iBufferReg                   = 0x00B30000,\n+\tVRB2_PfFecUl5gIbDebug0Reg                   = 0x00B401FC,\n+\tVRB2_PfFftConfig0                           = 0x00B58004,\n+\tVRB2_PfFftParityMask8                       = 0x00B5803C,\n+\tVRB2_PfDmaConfig0Reg                        = 0x00B80000,\n+\tVRB2_PfDmaConfig1Reg                        = 0x00B80004,\n+\tVRB2_PfDmaQmgrAddrReg                       = 0x00B80008,\n+\tVRB2_PfDmaAxcacheReg                        = 0x00B80010,\n+\tVRB2_PfDmaAxiControl                        = 0x00B8002C,\n+\tVRB2_PfDmaQmanen                            = 0x00B80040,\n+\tVRB2_PfDmaQmanenSelect                      = 0x00B80044,\n+\tVRB2_PfDmaCfgRrespBresp                     = 0x00B80814,\n+\tVRB2_PfDmaDescriptorSignature               = 0x00B80868,\n+\tVRB2_PfDmaErrorDetectionEn                  = 0x00B80870,\n+\tVRB2_PfDmaFec5GulDescBaseLoRegVf            = 0x00B88020,\n+\tVRB2_PfDmaFec5GulDescBaseHiRegVf            = 0x00B88024,\n+\tVRB2_PfDmaFec5GulRespPtrLoRegVf             = 0x00B88028,\n+\tVRB2_PfDmaFec5GulRespPtrHiRegVf             = 0x00B8802C,\n+\tVRB2_PfDmaFec5GdlDescBaseLoRegVf            = 0x00B88040,\n+\tVRB2_PfDmaFec5GdlDescBaseHiRegVf            = 0x00B88044,\n+\tVRB2_PfDmaFec5GdlRespPtrLoRegVf             = 0x00B88048,\n+\tVRB2_PfDmaFec5GdlRespPtrHiRegVf             = 0x00B8804C,\n+\tVRB2_PfDmaFec4GulDescBaseLoRegVf            = 0x00B88060,\n+\tVRB2_PfDmaFec4GulDescBaseHiRegVf            = 0x00B88064,\n+\tVRB2_PfDmaFec4GulRespPtrLoRegVf             = 0x00B88068,\n+\tVRB2_PfDmaFec4GulRespPtrHiRegVf             = 0x00B8806C,\n+\tVRB2_PfDmaFec4GdlDescBaseLoRegVf            = 0x00B88080,\n+\tVRB2_PfDmaFec4GdlDescBaseHiRegVf            = 0x00B88084,\n+\tVRB2_PfDmaFec4GdlRespPtrLoRegVf             = 0x00B88088,\n+\tVRB2_PfDmaFec4GdlRespPtrHiRegVf             = 0x00B8808C,\n+\tVRB2_PfDmaFftDescBaseLoRegVf                = 0x00B880A0,\n+\tVRB2_PfDmaFftDescBaseHiRegVf                = 0x00B880A4,\n+\tVRB2_PfDmaFftRespPtrLoRegVf                 = 0x00B880A8,\n+\tVRB2_PfDmaFftRespPtrHiRegVf                 = 0x00B880AC,\n+\tVRB2_PfDmaMldDescBaseLoRegVf                = 0x00B880C0,\n+\tVRB2_PfDmaMldDescBaseHiRegVf                = 0x00B880C4,\n+\tVRB2_PfQosmonAEvalOverflow0                 = 0x00B90008,\n+\tVRB2_PfPermonACntrlRegVf                    = 0x00B98000,\n+\tVRB2_PfQosmonBEvalOverflow0                 = 0x00BA0008,\n+\tVRB2_PfPermonBCntrlRegVf                    = 0x00BA8000,\n+\tVRB2_PfPermonCCntrlRegVf                    = 0x00BB8000,\n+\tVRB2_PfHiInfoRingBaseLoRegPf                = 0x00C84014,\n+\tVRB2_PfHiInfoRingBaseHiRegPf                = 0x00C84018,\n+\tVRB2_PfHiInfoRingPointerRegPf               = 0x00C8401C,\n+\tVRB2_PfHiInfoRingIntWrEnRegPf               = 0x00C84020,\n+\tVRB2_PfHiBlockTransmitOnErrorEn             = 0x00C84038,\n+\tVRB2_PfHiCfgMsiIntWrEnRegPf                 = 0x00C84040,\n+\tVRB2_PfHiMsixVectorMapperPf                 = 0x00C84060,\n+\tVRB2_PfHiPfMode                             = 0x00C84108,\n+\tVRB2_PfHiClkGateHystReg                     = 0x00C8410C,\n+\tVRB2_PfHiMsiDropEnableReg                   = 0x00C84114,\n+\tVRB2_PfHiSectionPowerGatingReq              = 0x00C84128,\n+\tVRB2_PfHiSectionPowerGatingAck              = 0x00C8412C,\n+};\n+\n+/* TIP PF Interrupt numbers */\n+enum {\n+\tVRB2_PF_INT_QMGR_AQ_OVERFLOW = 0,\n+\tVRB2_PF_INT_DOORBELL_VF_2_PF = 1,\n+\tVRB2_PF_INT_ILLEGAL_FORMAT = 2,\n+\tVRB2_PF_INT_QMGR_DISABLED_ACCESS = 3,\n+\tVRB2_PF_INT_QMGR_AQ_OVERTHRESHOLD = 4,\n+\tVRB2_PF_INT_DMA_DL_DESC_IRQ = 5,\n+\tVRB2_PF_INT_DMA_UL_DESC_IRQ = 6,\n+\tVRB2_PF_INT_DMA_FFT_DESC_IRQ = 7,\n+\tVRB2_PF_INT_DMA_UL5G_DESC_IRQ = 8,\n+\tVRB2_PF_INT_DMA_DL5G_DESC_IRQ = 9,\n+\tVRB2_PF_INT_DMA_MLD_DESC_IRQ = 10,\n+\tVRB2_PF_INT_ARAM_ACCESS_ERR = 11,\n+\tVRB2_PF_INT_ARAM_ECC_1BIT_ERR = 12,\n+\tVRB2_PF_INT_PARITY_ERR = 13,\n+\tVRB2_PF_INT_QMGR_OVERFLOW = 14,\n+\tVRB2_PF_INT_QMGR_ERR = 15,\n+\tVRB2_PF_INT_ATS_ERR = 22,\n+\tVRB2_PF_INT_ARAM_FUUL = 23,\n+\tVRB2_PF_INT_EXTRA_READ = 24,\n+\tVRB2_PF_INT_COMPLETION_TIMEOUT = 25,\n+\tVRB2_PF_INT_CORE_HANG = 26,\n+\tVRB2_PF_INT_DMA_HANG = 28,\n+\tVRB2_PF_INT_DS_HANG = 27,\n+};\n+\n+#endif /* VRB2_PF_ENUM_H */\ndiff --git a/drivers/baseband/acc/vrb2_vf_enum.h b/drivers/baseband/acc/vrb2_vf_enum.h\nnew file mode 100644\nindex 0000000000..69debc9116\n--- /dev/null\n+++ b/drivers/baseband/acc/vrb2_vf_enum.h\n@@ -0,0 +1,121 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2021 Intel Corporation\n+ */\n+\n+#ifndef VRB2_VF_ENUM_H\n+#define VRB2_VF_ENUM_H\n+\n+/*\n+ * VRB2 Register mapping on VF BAR0\n+ * This is automatically generated from RDL, format may change with new RDL\n+ */\n+enum {\n+\tVRB2_VfHiVfToPfDbellVf           = 0x00000000,\n+\tVRB2_VfHiPfToVfDbellVf           = 0x00000008,\n+\tVRB2_VfHiInfoRingBaseLoVf        = 0x00000010,\n+\tVRB2_VfHiInfoRingBaseHiVf        = 0x00000014,\n+\tVRB2_VfHiInfoRingPointerVf       = 0x00000018,\n+\tVRB2_VfHiInfoRingIntWrEnVf       = 0x00000020,\n+\tVRB2_VfHiInfoRingPf2VfWrEnVf     = 0x00000024,\n+\tVRB2_VfHiMsixVectorMapperVf      = 0x00000060,\n+\tVRB2_VfHiDeviceStatus            = 0x00000068,\n+\tVRB2_VfHiInterruptSrc            = 0x00000070,\n+\tVRB2_VfDmaFec5GulDescBaseLoRegVf = 0x00000120,\n+\tVRB2_VfDmaFec5GulDescBaseHiRegVf = 0x00000124,\n+\tVRB2_VfDmaFec5GulRespPtrLoRegVf  = 0x00000128,\n+\tVRB2_VfDmaFec5GulRespPtrHiRegVf  = 0x0000012C,\n+\tVRB2_VfDmaFec5GdlDescBaseLoRegVf = 0x00000140,\n+\tVRB2_VfDmaFec5GdlDescBaseHiRegVf = 0x00000144,\n+\tVRB2_VfDmaFec5GdlRespPtrLoRegVf  = 0x00000148,\n+\tVRB2_VfDmaFec5GdlRespPtrHiRegVf  = 0x0000014C,\n+\tVRB2_VfDmaFec4GulDescBaseLoRegVf = 0x00000160,\n+\tVRB2_VfDmaFec4GulDescBaseHiRegVf = 0x00000164,\n+\tVRB2_VfDmaFec4GulRespPtrLoRegVf  = 0x00000168,\n+\tVRB2_VfDmaFec4GulRespPtrHiRegVf  = 0x0000016C,\n+\tVRB2_VfDmaFec4GdlDescBaseLoRegVf = 0x00000180,\n+\tVRB2_VfDmaFec4GdlDescBaseHiRegVf = 0x00000184,\n+\tVRB2_VfDmaFec4GdlRespPtrLoRegVf  = 0x00000188,\n+\tVRB2_VfDmaFec4GdlRespPtrHiRegVf  = 0x0000018C,\n+\tVRB2_VfDmaFftDescBaseLoRegVf     = 0x000001A0,\n+\tVRB2_VfDmaFftDescBaseHiRegVf     = 0x000001A4,\n+\tVRB2_VfDmaFftRespPtrLoRegVf      = 0x000001A8,\n+\tVRB2_VfDmaFftRespPtrHiRegVf      = 0x000001AC,\n+\tVRB2_VfDmaMldDescBaseLoRegVf     = 0x000001C0,\n+\tVRB2_VfDmaMldDescBaseHiRegVf     = 0x000001C4,\n+\tVRB2_VfDmaMldRespPtrLoRegVf      = 0x000001C8,\n+\tVRB2_VfDmaMldRespPtrHiRegVf      = 0x000001CC,\n+\tVRB2_VfPmACntrlRegVf             = 0x00000200,\n+\tVRB2_VfPmACountVf                = 0x00000208,\n+\tVRB2_VfPmAKCntLoVf               = 0x00000210,\n+\tVRB2_VfPmAKCntHiVf               = 0x00000214,\n+\tVRB2_VfPmADeltaCntLoVf           = 0x00000220,\n+\tVRB2_VfPmADeltaCntHiVf           = 0x00000224,\n+\tVRB2_VfPmBCntrlRegVf             = 0x00000240,\n+\tVRB2_VfPmBCountVf                = 0x00000248,\n+\tVRB2_VfPmBKCntLoVf               = 0x00000250,\n+\tVRB2_VfPmBKCntHiVf               = 0x00000254,\n+\tVRB2_VfPmBDeltaCntLoVf           = 0x00000260,\n+\tVRB2_VfPmBDeltaCntHiVf           = 0x00000264,\n+\tVRB2_VfPmCCntrlRegVf             = 0x00000280,\n+\tVRB2_VfPmCCountVf                = 0x00000288,\n+\tVRB2_VfPmCKCntLoVf               = 0x00000290,\n+\tVRB2_VfPmCKCntHiVf               = 0x00000294,\n+\tVRB2_VfPmCDeltaCntLoVf           = 0x000002A0,\n+\tVRB2_VfPmCDeltaCntHiVf           = 0x000002A4,\n+\tVRB2_VfPmDCntrlRegVf             = 0x000002C0,\n+\tVRB2_VfPmDCountVf                = 0x000002C8,\n+\tVRB2_VfPmDKCntLoVf               = 0x000002D0,\n+\tVRB2_VfPmDKCntHiVf               = 0x000002D4,\n+\tVRB2_VfPmDDeltaCntLoVf           = 0x000002E0,\n+\tVRB2_VfPmDDeltaCntHiVf           = 0x000002E4,\n+\tVRB2_VfPmECntrlRegVf             = 0x00000300,\n+\tVRB2_VfPmECountVf                = 0x00000308,\n+\tVRB2_VfPmEKCntLoVf               = 0x00000310,\n+\tVRB2_VfPmEKCntHiVf               = 0x00000314,\n+\tVRB2_VfPmEDeltaCntLoVf           = 0x00000320,\n+\tVRB2_VfPmEDeltaCntHiVf           = 0x00000324,\n+\tVRB2_VfPmFCntrlRegVf             = 0x00000340,\n+\tVRB2_VfPmFCountVf                = 0x00000348,\n+\tVRB2_VfPmFKCntLoVf               = 0x00000350,\n+\tVRB2_VfPmFKCntHiVf               = 0x00000354,\n+\tVRB2_VfPmFDeltaCntLoVf           = 0x00000360,\n+\tVRB2_VfPmFDeltaCntHiVf           = 0x00000364,\n+\tVRB2_VfQmgrAqReset0              = 0x00000600,\n+\tVRB2_VfQmgrAqReset1              = 0x00000604,\n+\tVRB2_VfQmgrAqReset2              = 0x00000608,\n+\tVRB2_VfQmgrAqReset3              = 0x0000060C,\n+\tVRB2_VfQmgrRingSizeVf            = 0x00000610,\n+\tVRB2_VfQmgrGrpDepthLog20Vf       = 0x00000620,\n+\tVRB2_VfQmgrGrpDepthLog21Vf       = 0x00000624,\n+\tVRB2_VfQmgrGrpDepthLog22Vf       = 0x00000628,\n+\tVRB2_VfQmgrGrpDepthLog23Vf       = 0x0000062C,\n+\tVRB2_VfQmgrGrpFunction0Vf        = 0x00000630,\n+\tVRB2_VfQmgrGrpFunction1Vf        = 0x00000634,\n+\tVRB2_VfQmgrAramUsageN0           = 0x00000640,\n+\tVRB2_VfQmgrAramUsageN1           = 0x00000644,\n+\tVRB2_VfQmgrAramUsageN2           = 0x00000648,\n+\tVRB2_VfQmgrAramUsageN3           = 0x0000064C,\n+\tVRB2_VfHiMSIXBaseLoRegVf         = 0x00001000,\n+\tVRB2_VfHiMSIXBaseHiRegVf         = 0x00001004,\n+\tVRB2_VfHiMSIXBaseDataRegVf       = 0x00001008,\n+\tVRB2_VfHiMSIXBaseMaskRegVf       = 0x0000100C,\n+\tVRB2_VfHiMSIXPBABaseLoRegVf      = 0x00003000,\n+\tVRB2_VfQmgrIngressAq             = 0x00004000,\n+};\n+\n+/* TIP VF Interrupt numbers */\n+enum {\n+\tVRB2_VF_INT_QMGR_AQ_OVERFLOW = 0,\n+\tVRB2_VF_INT_DOORBELL_PF_2_VF = 1,\n+\tVRB2_VF_INT_ILLEGAL_FORMAT = 2,\n+\tVRB2_VF_INT_QMGR_DISABLED_ACCESS = 3,\n+\tVRB2_VF_INT_QMGR_AQ_OVERTHRESHOLD = 4,\n+\tVRB2_VF_INT_DMA_DL_DESC_IRQ = 5,\n+\tVRB2_VF_INT_DMA_UL_DESC_IRQ = 6,\n+\tVRB2_VF_INT_DMA_FFT_DESC_IRQ = 7,\n+\tVRB2_VF_INT_DMA_UL5G_DESC_IRQ = 8,\n+\tVRB2_VF_INT_DMA_DL5G_DESC_IRQ = 9,\n+\tVRB2_VF_INT_DMA_MLD_DESC_IRQ = 10,\n+};\n+\n+#endif /* VRB2_VF_ENUM_H */\ndiff --git a/drivers/baseband/acc/vrb_pmd.h b/drivers/baseband/acc/vrb_pmd.h\nindex 1cabc0b7f4..def8ceaf93 100644\n--- a/drivers/baseband/acc/vrb_pmd.h\n+++ b/drivers/baseband/acc/vrb_pmd.h\n@@ -8,6 +8,8 @@\n #include \"acc_common.h\"\n #include \"vrb1_pf_enum.h\"\n #include \"vrb1_vf_enum.h\"\n+#include \"vrb2_pf_enum.h\"\n+#include \"vrb2_vf_enum.h\"\n #include \"vrb_cfg.h\"\n \n /* Helper macro for logging */\n@@ -31,12 +33,13 @@\n #define RTE_VRB1_VENDOR_ID           (0x8086)\n #define RTE_VRB1_PF_DEVICE_ID        (0x57C0)\n #define RTE_VRB1_VF_DEVICE_ID        (0x57C1)\n-\n-#define VRB1_VARIANT               2\n+#define RTE_VRB2_VENDOR_ID           (0x8086)\n+#define RTE_VRB2_PF_DEVICE_ID        (0x57C2)\n+#define RTE_VRB2_VF_DEVICE_ID        (0x57C3)\n \n #define VRB_NUM_ACCS                 6\n #define VRB_MAX_QGRPS                32\n-#define VRB_MAX_AQS                  32\n+#define VRB_MAX_AQS                  64\n \n #define ACC_STATUS_WAIT      10\n #define ACC_STATUS_TO        100\n@@ -61,7 +64,6 @@\n #define VRB1_SIG_DL_4G_LAST 23\n #define VRB1_SIG_FFT        24\n #define VRB1_SIG_FFT_LAST   24\n-\n #define VRB1_NUM_ACCS       5\n \n /* VRB1 Configuration */\n@@ -90,6 +92,69 @@\n #define VRB1_MAX_PF_MSIX            (256+32)\n #define VRB1_MAX_VF_MSIX            (256+7)\n \n+/* VRB2 specific flags */\n+\n+#define VRB2_NUM_VFS        64\n+#define VRB2_NUM_QGRPS      32\n+#define VRB2_NUM_AQS        64\n+#define VRB2_GRP_ID_SHIFT    12 /* Queue Index Hierarchy */\n+#define VRB2_VF_ID_SHIFT     6  /* Queue Index Hierarchy */\n+#define VRB2_WORDS_IN_ARAM_SIZE (512 * 1024 / 4)\n+#define VRB2_NUM_ACCS        6\n+#define VRB2_AQ_REG_NUM      4\n+\n+/* VRB2 Mapping of signals for the available engines */\n+#define VRB2_SIG_UL_5G       0\n+#define VRB2_SIG_UL_5G_LAST  5\n+#define VRB2_SIG_DL_5G       9\n+#define VRB2_SIG_DL_5G_LAST 11\n+#define VRB2_SIG_UL_4G      12\n+#define VRB2_SIG_UL_4G_LAST 16\n+#define VRB2_SIG_DL_4G      21\n+#define VRB2_SIG_DL_4G_LAST 23\n+#define VRB2_SIG_FFT        24\n+#define VRB2_SIG_FFT_LAST   26\n+#define VRB2_SIG_MLD        30\n+#define VRB2_SIG_MLD_LAST   31\n+#define VRB2_FFT_NUM        3\n+\n+#define VRB2_FCW_MLDTS_BLEN 32\n+#define VRB2_MLD_MIN_LAYER   2\n+#define VRB2_MLD_MAX_LAYER   4\n+#define VRB2_MLD_MAX_RREP    5\n+#define VRB2_MLD_LAY_SIZE    3\n+#define VRB2_MLD_RREP_SIZE   6\n+#define VRB2_MLD_M2DLEN      3\n+\n+#define VRB2_MAX_PF_MSIX      (256+32)\n+#define VRB2_MAX_VF_MSIX      (64+7)\n+#define VRB2_REG_IRQ_EN_ALL   0xFFFFFFFF  /* Enable all interrupts */\n+#define VRB2_FABRIC_MODE      0x8000103\n+#define VRB2_CFG_DMA_ERROR    0x7DF\n+#define VRB2_CFG_AXI_CACHE    0x11\n+#define VRB2_CFG_QMGR_HI_P    0x0F0F\n+#define VRB2_RESET_HARD       0x1FF\n+#define VRB2_ENGINES_MAX      9\n+#define VRB2_GPEX_AXIMAP_NUM  17\n+#define VRB2_CLOCK_GATING_EN  0x30000\n+#define VRB2_FFT_CFG_0        0x2001\n+#define VRB2_FFT_ECC          0x60\n+#define VRB2_FFT_RAM_EN       0x80008000\n+#define VRB2_FFT_RAM_DIS      0x0\n+#define VRB2_FFT_RAM_SIZE     512\n+#define VRB2_CLK_EN           0x00010A01\n+#define VRB2_CLK_DIS          0x01F10A01\n+#define VRB2_PG_MASK_0        0x1F\n+#define VRB2_PG_MASK_1        0xF\n+#define VRB2_PG_MASK_2        0x1\n+#define VRB2_PG_MASK_3        0x0\n+#define VRB2_PG_MASK_FFT      1\n+#define VRB2_PG_MASK_4GUL     4\n+#define VRB2_PG_MASK_5GUL     8\n+#define VRB2_PF_PM_REG_OFFSET 0x10000\n+#define VRB2_VF_PM_REG_OFFSET 0x40\n+#define VRB2_PM_START         0x2\n+\n struct acc_registry_addr {\n \tunsigned int dma_ring_dl5g_hi;\n \tunsigned int dma_ring_dl5g_lo;\n@@ -218,4 +283,92 @@ static const struct acc_registry_addr vrb1_vf_reg_addr = {\n \t.pf2vf_doorbell = VRB1_VfHiPfToVfDbellVf,\n };\n \n+\n+/* Structure holding registry addresses for PF */\n+static const struct acc_registry_addr vrb2_pf_reg_addr = {\n+\t.dma_ring_dl5g_hi =  VRB2_PfDmaFec5GdlDescBaseHiRegVf,\n+\t.dma_ring_dl5g_lo =  VRB2_PfDmaFec5GdlDescBaseLoRegVf,\n+\t.dma_ring_ul5g_hi =  VRB2_PfDmaFec5GulDescBaseHiRegVf,\n+\t.dma_ring_ul5g_lo =  VRB2_PfDmaFec5GulDescBaseLoRegVf,\n+\t.dma_ring_dl4g_hi =  VRB2_PfDmaFec4GdlDescBaseHiRegVf,\n+\t.dma_ring_dl4g_lo =  VRB2_PfDmaFec4GdlDescBaseLoRegVf,\n+\t.dma_ring_ul4g_hi =  VRB2_PfDmaFec4GulDescBaseHiRegVf,\n+\t.dma_ring_ul4g_lo =  VRB2_PfDmaFec4GulDescBaseLoRegVf,\n+\t.dma_ring_fft_hi =   VRB2_PfDmaFftDescBaseHiRegVf,\n+\t.dma_ring_fft_lo =   VRB2_PfDmaFftDescBaseLoRegVf,\n+\t.dma_ring_mld_hi =   VRB2_PfDmaMldDescBaseHiRegVf,\n+\t.dma_ring_mld_lo =   VRB2_PfDmaMldDescBaseLoRegVf,\n+\t.ring_size =         VRB2_PfQmgrRingSizeVf,\n+\t.info_ring_hi =      VRB2_PfHiInfoRingBaseHiRegPf,\n+\t.info_ring_lo =      VRB2_PfHiInfoRingBaseLoRegPf,\n+\t.info_ring_en =      VRB2_PfHiInfoRingIntWrEnRegPf,\n+\t.info_ring_ptr =     VRB2_PfHiInfoRingPointerRegPf,\n+\t.tail_ptrs_dl5g_hi = VRB2_PfDmaFec5GdlRespPtrHiRegVf,\n+\t.tail_ptrs_dl5g_lo = VRB2_PfDmaFec5GdlRespPtrLoRegVf,\n+\t.tail_ptrs_ul5g_hi = VRB2_PfDmaFec5GulRespPtrHiRegVf,\n+\t.tail_ptrs_ul5g_lo = VRB2_PfDmaFec5GulRespPtrLoRegVf,\n+\t.tail_ptrs_dl4g_hi = VRB2_PfDmaFec4GdlRespPtrHiRegVf,\n+\t.tail_ptrs_dl4g_lo = VRB2_PfDmaFec4GdlRespPtrLoRegVf,\n+\t.tail_ptrs_ul4g_hi = VRB2_PfDmaFec4GulRespPtrHiRegVf,\n+\t.tail_ptrs_ul4g_lo = VRB2_PfDmaFec4GulRespPtrLoRegVf,\n+\t.tail_ptrs_fft_hi =  VRB2_PfDmaFftRespPtrHiRegVf,\n+\t.tail_ptrs_fft_lo =  VRB2_PfDmaFftRespPtrLoRegVf,\n+\t.tail_ptrs_mld_hi =  VRB2_PfDmaFftRespPtrHiRegVf,\n+\t.tail_ptrs_mld_lo =  VRB2_PfDmaFftRespPtrLoRegVf,\n+\t.depth_log0_offset = VRB2_PfQmgrGrpDepthLog20Vf,\n+\t.depth_log1_offset = VRB2_PfQmgrGrpDepthLog21Vf,\n+\t.qman_group_func =   VRB2_PfQmgrGrpFunction0,\n+\t.hi_mode =           VRB2_PfHiMsixVectorMapperPf,\n+\t.pf_mode =           VRB2_PfHiPfMode,\n+\t.pmon_ctrl_a =       VRB2_PfPermonACntrlRegVf,\n+\t.pmon_ctrl_b =       VRB2_PfPermonBCntrlRegVf,\n+\t.pmon_ctrl_c =       VRB2_PfPermonCCntrlRegVf,\n+\t.vf2pf_doorbell =    0,\n+\t.pf2vf_doorbell =    0,\n+};\n+\n+/* Structure holding registry addresses for VF */\n+static const struct acc_registry_addr vrb2_vf_reg_addr = {\n+\t.dma_ring_dl5g_hi =  VRB2_VfDmaFec5GdlDescBaseHiRegVf,\n+\t.dma_ring_dl5g_lo =  VRB2_VfDmaFec5GdlDescBaseLoRegVf,\n+\t.dma_ring_ul5g_hi =  VRB2_VfDmaFec5GulDescBaseHiRegVf,\n+\t.dma_ring_ul5g_lo =  VRB2_VfDmaFec5GulDescBaseLoRegVf,\n+\t.dma_ring_dl4g_hi =  VRB2_VfDmaFec4GdlDescBaseHiRegVf,\n+\t.dma_ring_dl4g_lo =  VRB2_VfDmaFec4GdlDescBaseLoRegVf,\n+\t.dma_ring_ul4g_hi =  VRB2_VfDmaFec4GulDescBaseHiRegVf,\n+\t.dma_ring_ul4g_lo =  VRB2_VfDmaFec4GulDescBaseLoRegVf,\n+\t.dma_ring_fft_hi =   VRB2_VfDmaFftDescBaseHiRegVf,\n+\t.dma_ring_fft_lo =   VRB2_VfDmaFftDescBaseLoRegVf,\n+\t.dma_ring_mld_hi =   VRB2_VfDmaMldDescBaseHiRegVf,\n+\t.dma_ring_mld_lo =   VRB2_VfDmaMldDescBaseLoRegVf,\n+\t.ring_size =         VRB2_VfQmgrRingSizeVf,\n+\t.info_ring_hi =      VRB2_VfHiInfoRingBaseHiVf,\n+\t.info_ring_lo =      VRB2_VfHiInfoRingBaseLoVf,\n+\t.info_ring_en =      VRB2_VfHiInfoRingIntWrEnVf,\n+\t.info_ring_ptr =     VRB2_VfHiInfoRingPointerVf,\n+\t.tail_ptrs_dl5g_hi = VRB2_VfDmaFec5GdlRespPtrHiRegVf,\n+\t.tail_ptrs_dl5g_lo = VRB2_VfDmaFec5GdlRespPtrLoRegVf,\n+\t.tail_ptrs_ul5g_hi = VRB2_VfDmaFec5GulRespPtrHiRegVf,\n+\t.tail_ptrs_ul5g_lo = VRB2_VfDmaFec5GulRespPtrLoRegVf,\n+\t.tail_ptrs_dl4g_hi = VRB2_VfDmaFec4GdlRespPtrHiRegVf,\n+\t.tail_ptrs_dl4g_lo = VRB2_VfDmaFec4GdlRespPtrLoRegVf,\n+\t.tail_ptrs_ul4g_hi = VRB2_VfDmaFec4GulRespPtrHiRegVf,\n+\t.tail_ptrs_ul4g_lo = VRB2_VfDmaFec4GulRespPtrLoRegVf,\n+\t.tail_ptrs_fft_hi =  VRB2_VfDmaFftRespPtrHiRegVf,\n+\t.tail_ptrs_fft_lo =  VRB2_VfDmaFftRespPtrLoRegVf,\n+\t.tail_ptrs_mld_hi =  VRB2_VfDmaMldRespPtrHiRegVf,\n+\t.tail_ptrs_mld_lo =  VRB2_VfDmaMldRespPtrLoRegVf,\n+\t.depth_log0_offset = VRB2_VfQmgrGrpDepthLog20Vf,\n+\t.depth_log1_offset = VRB2_VfQmgrGrpDepthLog21Vf,\n+\t.qman_group_func =   VRB2_VfQmgrGrpFunction0Vf,\n+\t.hi_mode =           VRB2_VfHiMsixVectorMapperVf,\n+\t.pf_mode =           0,\n+\t.pmon_ctrl_a =       VRB2_VfPmACntrlRegVf,\n+\t.pmon_ctrl_b =       VRB2_VfPmBCntrlRegVf,\n+\t.pmon_ctrl_c =       VRB2_VfPmCCntrlRegVf,\n+\t.vf2pf_doorbell =    VRB2_VfHiVfToPfDbellVf,\n+\t.pf2vf_doorbell =    VRB2_VfHiPfToVfDbellVf,\n+};\n+\n+\n #endif /* _VRB_PMD_H_ */\n",
    "prefixes": [
        "v2",
        "6/7"
    ]
}