get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/135394/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 135394,
    "url": "http://patchwork.dpdk.org/api/patches/135394/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20231220132616.318983-1-nishikanta.nayak@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20231220132616.318983-1-nishikanta.nayak@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20231220132616.318983-1-nishikanta.nayak@intel.com",
    "date": "2023-12-20T13:26:13",
    "name": "[1/4] common/qat: add files specific to GEN5",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "dbdc7ff24dc98fa56d4ac932de89c6413f44c9b7",
    "submitter": {
        "id": 3253,
        "url": "http://patchwork.dpdk.org/api/people/3253/?format=api",
        "name": "Nayak, Nishikanta",
        "email": "nishikanta.nayak@intel.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patchwork.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20231220132616.318983-1-nishikanta.nayak@intel.com/mbox/",
    "series": [
        {
            "id": 30630,
            "url": "http://patchwork.dpdk.org/api/series/30630/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=30630",
            "date": "2023-12-20T13:26:13",
            "name": "[1/4] common/qat: add files specific to GEN5",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/30630/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/135394/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/135394/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 012614374A;\n\tWed, 20 Dec 2023 14:26:50 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E1F5740685;\n\tWed, 20 Dec 2023 14:26:49 +0100 (CET)",
            "from mgamail.intel.com (mgamail.intel.com [134.134.136.126])\n by mails.dpdk.org (Postfix) with ESMTP id 0FA8B4021F\n for <dev@dpdk.org>; Wed, 20 Dec 2023 14:26:47 +0100 (CET)",
            "from fmsmga008.fm.intel.com ([10.253.24.58])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 20 Dec 2023 05:26:46 -0800",
            "from silpixa00400355.ir.intel.com (HELO\n silpixa00400355.ger.corp.intel.com) ([10.237.222.80])\n by fmsmga008.fm.intel.com with ESMTP; 20 Dec 2023 05:26:43 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1703078808; x=1734614808;\n h=from:to:cc:subject:date:message-id:mime-version:\n content-transfer-encoding;\n bh=QzLTLRsexr7Dnm+VZ2J+azEQlNbGhVXo/9zf0/VCz2s=;\n b=dVh/hcZpy0fl4owGTIJDxm3Ww3/HEwdrDTlYhoKAg3NNd+LdIXV1gmbm\n 5iTcGthaVbDkoH+eBD3hXKRjPVRTHV6tFysZUfYooFHgsrD2SWRmCLlA1\n 2TVyQIFbHdSsrhqUkyZhdMHct8GhLrsfr/loofELlXKxi6ZkWuLtiaI1o\n xdQKlGjFHyVZ3SO1pfzmI24LBGLHZCjE7XoYTEuM+GCpDTTEAs6njiHEB\n 8upryNNIygcuptOyLn5N07BJyPQH4wyWQ1YHSkKIqHFfrNJ0ui2ywybFf\n cKXdTRQTFAtkJS8PYPYbMxX7kDEz2Kj+NGwliGG598Gep54oS60sIPnRs A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6600,9927,10929\"; a=\"380801520\"",
            "E=Sophos;i=\"6.04,291,1695711600\"; d=\"scan'208\";a=\"380801520\"",
            "E=McAfee;i=\"6600,9927,10929\"; a=\"842276242\"",
            "E=Sophos;i=\"6.04,291,1695711600\"; d=\"scan'208\";a=\"842276242\""
        ],
        "X-ExtLoop1": "1",
        "From": "Nishikant Nayak <nishikanta.nayak@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "kai.ji@intel.com, ciara.power@intel.com, arkadiuszx.kusztal@intel.com,\n Nishikant Nayak <nishikanta.nayak@intel.com>,\n Thomas Monjalon <thomas@monjalon.net>,\n Anatoly Burakov <anatoly.burakov@intel.com>",
        "Subject": "[PATCH 1/4] common/qat: add files specific to GEN5",
        "Date": "Wed, 20 Dec 2023 13:26:13 +0000",
        "Message-Id": "<20231220132616.318983-1-nishikanta.nayak@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Adding GEN5 files for handling GEN5 specific operaions.\nThese files are inherited from the existing files/APIs\nwhich has some changes specific GEN5 requirements\nAlso updated the mailmap file.\n\nSigned-off-by: Nishikant Nayak <nishikanta.nayak@intel.com>\n---\n .mailmap                                      |   1 +\n drivers/common/qat/dev/qat_dev_gen5.c         | 336 ++++++++++++++++++\n .../adf_transport_access_macros_gen5.h        |  51 +++\n .../adf_transport_access_macros_gen5vf.h      |  48 +++\n drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c  | 336 ++++++++++++++++++\n 5 files changed, 772 insertions(+)\n create mode 100644 drivers/common/qat/dev/qat_dev_gen5.c\n create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen5.h\n create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros_gen5vf.h\n create mode 100644 drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c",
    "diff": "diff --git a/.mailmap b/.mailmap\nindex ab0742a382..ef8e0b79e5 100644\n--- a/.mailmap\n+++ b/.mailmap\n@@ -1027,6 +1027,7 @@ Ning Li <muziding001@163.com> <lining18@jd.com>\n Nipun Gupta <nipun.gupta@amd.com> <nipun.gupta@nxp.com>\n Nir Efrati <nir.efrati@intel.com>\n Nirmoy Das <ndas@suse.de>\n+Nishikant Nayak <nishikanta.nayak@intel.com>\n Nithin Dabilpuram <ndabilpuram@marvell.com> <nithin.dabilpuram@caviumnetworks.com>\n Nitin Saxena <nitin.saxena@caviumnetworks.com>\n Nitzan Weller <nitzanwe@mellanox.com>\ndiff --git a/drivers/common/qat/dev/qat_dev_gen5.c b/drivers/common/qat/dev/qat_dev_gen5.c\nnew file mode 100644\nindex 0000000000..dc2bcd5650\n--- /dev/null\n+++ b/drivers/common/qat/dev/qat_dev_gen5.c\n@@ -0,0 +1,336 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Intel Corporation\n+ */\n+\n+#include <rte_pci.h>\n+#include <rte_vfio.h>\n+\n+#include \"qat_device.h\"\n+#include \"qat_qp.h\"\n+#include \"adf_transport_access_macros_gen5vf.h\"\n+#include \"adf_pf2vf_msg.h\"\n+#include \"qat_pf2vf.h\"\n+\n+#include <stdint.h>\n+#include <linux/kernel.h>\n+#include <sys/ioctl.h>\n+#include <unistd.h>\n+\n+#define BITS_PER_LONG\t\t(sizeof(unsigned long) * 8)\n+#define BITS_PER_ULONG\t\t(sizeof(unsigned long) * 8)\n+\n+#define VFIO_PCI_LCE_DEVICE_CFG_REGION_INDEX\tVFIO_PCI_NUM_REGIONS\n+#define VFIO_PCI_LCE_CY_CFG_REGION_INDEX\t(VFIO_PCI_NUM_REGIONS + 2)\n+#define VFIO_PCI_LCE_RING_CFG_REGION_INDEX\t(VFIO_PCI_NUM_REGIONS + 4)\n+\n+#define min_t(type, x, y) ({                \\\n+\t\t\t\ttype __min1 = (x);                      \\\n+\t\t\t\ttype __min2 = (y);                      \\\n+\t\t\t\t__min1 < __min2 ? __min1 : __min2; })\n+\n+/**\n+ * struct lce_vfio_dev_cap - LCE device capabilities\n+ *\n+ * Device level capabilities and service level capabilities\n+ */\n+struct lce_vfio_dev_cap {\n+\tuint16_t device_num;\n+\tuint16_t device_type;\n+\n+#define LCE_DEVICE_CAP_DYNAMIC_BANK     BIT(31)\n+\tuint32_t capability_mask;\n+\tuint32_t extended_capabilities;\n+\tuint16_t max_banks;\n+\tuint16_t max_rings_per_bank;\n+\tuint16_t arb_mask;\n+\n+#define SERV_TYPE_DC    BIT(0)\n+#define SERV_TYPE_SYM   BIT(1)\n+#define SERV_TYPE_ASYM  BIT(2)\n+#define SERV_TYPE_DMA   BIT(3)\n+\tuint16_t services;\n+\tuint16_t pkg_id;\n+\tuint16_t node_id;\n+\n+#define LCE_DEVICE_NAME_SIZE        64\n+\t__u8 device_name[LCE_DEVICE_NAME_SIZE];\n+};\n+\n+#define LCE_DEVICE_MAX_BANKS    2080\n+#define LCE_DEVICE_BITMAP_SIZE  \\\n+\t__KERNEL_DIV_ROUND_UP(LCE_DEVICE_MAX_BANKS, BITS_PER_LONG)\n+\n+/* struct lce_vfio_dev_cy_cap - CY capabilities of LCE device */\n+struct lce_vfio_dev_cy_cap {\n+\tuint32_t nr_banks;\n+\tunsigned long bitmap[LCE_DEVICE_BITMAP_SIZE];\n+};\n+\n+#define LCE_QAT_NID_LOCAL\t0x7\n+#define LCE_QAT_FUNC_LOCAL\t0x3ff\n+#define LCE_QAT_RID_LOCAL\t0xf\n+#define LCE_QAT_PASID_LOCAL\t0xfffff\n+\n+struct lce_qat_domain {\n+\tuint32_t nid        :3;\n+\tuint32_t fid        :7;\n+\tuint32_t ftype      :2;\n+\tuint32_t vfid       :13;\n+\tuint32_t rid        :4;\n+\tuint32_t vld        :1;\n+\tuint32_t desc_over  :1;\n+\tuint32_t pasid_vld  :1;\n+\tuint32_t pasid      :20;\n+};\n+\n+struct lce_qat_buf_domain {\n+\tuint32_t bank_id:   20;\n+#define LCE_REQ_BUFFER_DOMAIN   1UL\n+#define LCE_RES_BUFFER_DOMAIN   2UL\n+#define LCE_SRC_BUFFER_DOMAIN   4UL\n+#define LCE_DST_BUFFER_DOMAIN   8UL\n+\tuint32_t type:      4;\n+\tuint32_t resv:      8;\n+\tstruct lce_qat_domain dom;\n+};\n+\n+/* QAT GEN 5 specific macros */\n+#define QAT_GEN5_BUNDLE_NUM\t\tLCE_DEVICE_MAX_BANKS\n+#define QAT_GEN5_QPS_PER_BUNDLE_NUM\t1\n+\n+struct qat_dev_gen5_extra {\n+\tstruct qat_qp_hw_data\n+\t    qp_gen5_data[QAT_GEN5_BUNDLE_NUM][QAT_GEN5_QPS_PER_BUNDLE_NUM];\n+};\n+\n+static struct qat_pf2vf_dev qat_pf2vf_gen5 = {\n+\t.pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,\n+\t.vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,\n+\t.pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,\n+\t.pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,\n+\t.pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,\n+\t.pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,\n+};\n+\n+static int\n+qat_select_valid_queue_gen5(struct qat_pci_device *qat_dev, int qp_id,\n+\t\t\t    enum qat_service_type service_type)\n+{\n+\tint i = 0, valid_qps = 0;\n+\tstruct qat_dev_gen5_extra *dev_extra = qat_dev->dev_private;\n+\n+\tfor (; i < QAT_GEN5_BUNDLE_NUM; i++) {\n+\t\tif (dev_extra->qp_gen5_data[i][0].service_type ==\n+\t\t\t\tservice_type) {\n+\t\t\tif (valid_qps == qp_id)\n+\t\t\t\treturn i;\n+\t\t\t++valid_qps;\n+\t\t}\n+\t}\n+\treturn -1;\n+}\n+\n+static const struct qat_qp_hw_data *\n+qat_qp_get_hw_data_gen5(struct qat_pci_device *qat_dev,\n+\t\t\tenum qat_service_type service_type, uint16_t qp_id)\n+{\n+\tstruct qat_dev_gen5_extra *dev_extra = qat_dev->dev_private;\n+\tint ring_pair = qat_select_valid_queue_gen5(qat_dev, qp_id,\n+\t\t\t\t\t\t    service_type);\n+\n+\tif (ring_pair < 0)\n+\t\treturn NULL;\n+\n+\treturn &dev_extra->qp_gen5_data[ring_pair][0];\n+}\n+\n+static int\n+qat_qp_rings_per_service_gen5(struct qat_pci_device *qat_dev,\n+\t\t\t      enum qat_service_type service)\n+{\n+\tint i = 0, count = 0, max_ops_per_srv = 0;\n+\tstruct qat_dev_gen5_extra *dev_extra = qat_dev->dev_private;\n+\n+\tmax_ops_per_srv = QAT_GEN5_BUNDLE_NUM;\n+\tfor (i = 0, count = 0; i < max_ops_per_srv; i++)\n+\t\tif (dev_extra->qp_gen5_data[i][0].service_type == service)\n+\t\t\tcount++;\n+\treturn count;\n+}\n+\n+static int qat_dev_read_config(struct qat_pci_device *qat_dev)\n+{\n+\tstruct qat_dev_gen5_extra *dev_extra = qat_dev->dev_private;\n+\tstruct qat_qp_hw_data *hw_data;\n+\n+\t/** Enable only crypto ring: RP-0 */\n+\thw_data = &dev_extra->qp_gen5_data[0][0];\n+\tmemset(hw_data, 0, sizeof(*hw_data));\n+\n+\thw_data->service_type = QAT_SERVICE_SYMMETRIC;\n+\thw_data->tx_msg_size = 128;\n+\thw_data->rx_msg_size = 32;\n+\n+\thw_data->tx_ring_num = 0;\n+\thw_data->rx_ring_num = 1;\n+\n+\thw_data->hw_bundle_num = 0;\n+\n+\treturn 0;\n+}\n+\n+\n+static int qat_dev_read_config_gen5(struct qat_pci_device *qat_dev)\n+{\n+\treturn qat_dev_read_config(qat_dev);\n+}\n+\n+static void qat_qp_build_ring_base_gen5(void *io_addr, struct qat_queue *queue)\n+{\n+\tuint64_t queue_base;\n+\n+\tqueue_base = BUILD_RING_BASE_ADDR_GEN5(queue->base_phys_addr,\n+\t\t\t\t\t       queue->queue_size);\n+\tWRITE_CSR_RING_BASE_GEN5VF(io_addr, queue->hw_bundle_number,\n+\t\t\t\t   queue->hw_queue_number, queue_base);\n+}\n+\n+static void\n+qat_qp_adf_arb_enable_gen5(const struct qat_queue *txq,\n+\t\t\t   void *base_addr, rte_spinlock_t *lock)\n+{\n+\tuint32_t arb_csr_offset = 0, value;\n+\n+\trte_spinlock_lock(lock);\n+\tarb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +\n+\t    (ADF_RING_BUNDLE_SIZE_GEN5 *\n+\t     txq->hw_bundle_number);\n+\tvalue = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF,\n+\t\t\t   arb_csr_offset);\n+\tvalue |= 0x01;\n+\tADF_CSR_WR(base_addr, arb_csr_offset, value);\n+\trte_spinlock_unlock(lock);\n+}\n+\n+static void\n+qat_qp_adf_arb_disable_gen5(const struct qat_queue *txq,\n+\t\t\t    void *base_addr, rte_spinlock_t *lock)\n+{\n+\tuint32_t arb_csr_offset = 0, value;\n+\n+\trte_spinlock_lock(lock);\n+\tarb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_RING_BUNDLE_SIZE_GEN5 *\n+\t\t\t\t\t\t\ttxq->hw_bundle_number);\n+\tvalue = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF,\n+\t\t\t   arb_csr_offset);\n+\tvalue &= ~(0x01);\n+\tADF_CSR_WR(base_addr, arb_csr_offset, value);\n+\trte_spinlock_unlock(lock);\n+}\n+\n+static void\n+qat_qp_adf_configure_queues_gen5(struct qat_qp *qp)\n+{\n+\tuint32_t q_tx_config, q_resp_config;\n+\tstruct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q;\n+\n+\t/* q_tx/rx->queue_size is initialized as per bundle config register */\n+\tq_tx_config = BUILD_RING_CONFIG(q_tx->queue_size);\n+\n+\tq_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size,\n+\t\t\t\t\t       ADF_RING_NEAR_WATERMARK_512,\n+\t\t\t\t\t       ADF_RING_NEAR_WATERMARK_0);\n+\n+\tWRITE_CSR_RING_CONFIG_GEN5VF(qp->mmap_bar_addr, q_tx->hw_bundle_number,\n+\t\t\t\t     q_tx->hw_queue_number, q_tx_config);\n+\tWRITE_CSR_RING_CONFIG_GEN5VF(qp->mmap_bar_addr, q_rx->hw_bundle_number,\n+\t\t\t\t     q_rx->hw_queue_number, q_resp_config);\n+}\n+\n+static void\n+qat_qp_csr_write_tail_gen5(struct qat_qp *qp, struct qat_queue *q)\n+{\n+\tWRITE_CSR_RING_TAIL_GEN5VF(qp->mmap_bar_addr, q->hw_bundle_number,\n+\t\t\t\t   q->hw_queue_number, q->tail);\n+}\n+\n+static void\n+qat_qp_csr_write_head_gen5(struct qat_qp *qp, struct qat_queue *q,\n+\t\t\t   uint32_t new_head)\n+{\n+\tWRITE_CSR_RING_HEAD_GEN5VF(qp->mmap_bar_addr, q->hw_bundle_number,\n+\t\t\t\t   q->hw_queue_number, new_head);\n+}\n+\n+static void\n+qat_qp_csr_setup_gen5(struct qat_pci_device *qat_dev, void *io_addr,\n+\t\t      struct qat_qp *qp)\n+{\n+\tqat_qp_build_ring_base_gen5(io_addr, &qp->tx_q);\n+\tqat_qp_build_ring_base_gen5(io_addr, &qp->rx_q);\n+\tqat_qp_adf_configure_queues_gen5(qp);\n+\tqat_qp_adf_arb_enable_gen5(&qp->tx_q, qp->mmap_bar_addr,\n+\t\t\t\t   &qat_dev->arb_csr_lock);\n+}\n+\n+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen5 = {\n+\t.qat_qp_rings_per_service = qat_qp_rings_per_service_gen5,\n+\t.qat_qp_build_ring_base = qat_qp_build_ring_base_gen5,\n+\t.qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen5,\n+\t.qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen5,\n+\t.qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen5,\n+\t.qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen5,\n+\t.qat_qp_csr_write_head = qat_qp_csr_write_head_gen5,\n+\t.qat_qp_csr_setup = qat_qp_csr_setup_gen5,\n+\t.qat_qp_get_hw_data = qat_qp_get_hw_data_gen5,\n+};\n+\n+static int\n+qat_reset_ring_pairs_gen5(struct qat_pci_device *qat_pci_dev __rte_unused)\n+{\n+\treturn 0;\n+}\n+\n+static const struct rte_mem_resource*\n+qat_dev_get_transport_bar_gen5(struct rte_pci_device *pci_dev)\n+{\n+\treturn &pci_dev->mem_resource[0];\n+}\n+\n+static int\n+qat_dev_get_misc_bar_gen5(struct rte_mem_resource **mem_resource,\n+\t\t\t  struct rte_pci_device *pci_dev)\n+{\n+\t*mem_resource = &pci_dev->mem_resource[2];\n+\treturn 0;\n+}\n+\n+static int\n+qat_dev_get_extra_size_gen5(void)\n+{\n+\treturn sizeof(struct qat_dev_gen5_extra);\n+}\n+\n+static int\n+qat_dev_get_slice_map_gen5(uint32_t *map __rte_unused,\n+\tconst struct rte_pci_device *pci_dev __rte_unused)\n+{\n+\treturn 0;\n+}\n+\n+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen5 = {\n+\t.qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen5,\n+\t.qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen5,\n+\t.qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen5,\n+\t.qat_dev_read_config = qat_dev_read_config_gen5,\n+\t.qat_dev_get_extra_size = qat_dev_get_extra_size_gen5,\n+\t.qat_dev_get_slice_map = qat_dev_get_slice_map_gen5,\n+};\n+\n+RTE_INIT(qat_dev_gen_5_init)\n+{\n+\tqat_qp_hw_spec[QAT_GEN5] = &qat_qp_hw_spec_gen5;\n+\tqat_dev_hw_spec[QAT_GEN5] = &qat_dev_hw_spec_gen5;\n+\tqat_gen_config[QAT_GEN5].dev_gen = QAT_GEN5;\n+\tqat_gen_config[QAT_GEN5].pf2vf_dev = &qat_pf2vf_gen5;\n+}\ndiff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5.h\nnew file mode 100644\nindex 0000000000..29ce6b8e60\n--- /dev/null\n+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5.h\n@@ -0,0 +1,51 @@\n+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)\n+ * Copyright(c) 2023 Intel Corporation\n+ */\n+\n+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN5_H\n+#define ADF_TRANSPORT_ACCESS_MACROS_GEN5_H\n+\n+#include \"adf_transport_access_macros.h\"\n+\n+#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2\n+#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL\n+#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3\n+#define ADF_RING_BUNDLE_SIZE_GEN5 0x2000\n+#define ADF_RING_CSR_RING_CONFIG_GEN5 0x1000\n+#define ADF_RING_CSR_RING_LBASE_GEN5 0x1040\n+#define ADF_RING_CSR_RING_UBASE_GEN5 0x1080\n+\n+#define BUILD_RING_BASE_ADDR_GEN5(addr, size) \\\n+\t((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)\n+\n+#define WRITE_CSR_RING_BASE_GEN5(csr_base_addr, bank, ring, value) \\\n+do { \\\n+\tuint32_t l_base = 0, u_base = 0; \\\n+\tl_base = (uint32_t)(value & 0xFFFFFFFF); \\\n+\tu_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);\t\\\n+\tADF_CSR_WR(csr_base_addr,\t\\\n+\t\t(ADF_RING_BUNDLE_SIZE_GEN5 * bank) +\t\t\t\\\n+\t\tADF_RING_CSR_RING_LBASE_GEN5 + (ring << 2),\t\t\\\n+\t\tl_base);\t\t\t\t\t\t\\\n+\tADF_CSR_WR(csr_base_addr,\t\\\n+\t\t (ADF_RING_BUNDLE_SIZE_GEN5 * bank) +\t\t\t\\\n+\t\tADF_RING_CSR_RING_UBASE_GEN5 + (ring << 2),\t\t\\\n+\t\tu_base);\t\\\n+} while (0)\n+\n+#define WRITE_CSR_RING_CONFIG_GEN5(csr_base_addr, bank, ring, value) \\\n+\tADF_CSR_WR(csr_base_addr,\t\\\n+\t\t (ADF_RING_BUNDLE_SIZE_GEN5 * bank) + \\\n+\t\tADF_RING_CSR_RING_CONFIG_GEN5 + (ring << 2), value)\n+\n+#define WRITE_CSR_RING_TAIL_GEN5(csr_base_addr, bank, ring, value) \\\n+\tADF_CSR_WR((u8 *)(csr_base_addr), \\\n+\t\t(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \\\n+\t\tADF_RING_CSR_RING_TAIL + ((ring) << 2), value)\n+\n+#define WRITE_CSR_RING_HEAD_GEN5(csr_base_addr, bank, ring, value) \\\n+\tADF_CSR_WR((u8 *)(csr_base_addr), \\\n+\t\t(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \\\n+\t\tADF_RING_CSR_RING_HEAD + ((ring) << 2), value)\n+\n+#endif\ndiff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5vf.h b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5vf.h\nnew file mode 100644\nindex 0000000000..5d2c6706a6\n--- /dev/null\n+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros_gen5vf.h\n@@ -0,0 +1,48 @@\n+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)\n+ * Copyright(c) 2023 Intel Corporation\n+ */\n+\n+#ifndef ADF_TRANSPORT_ACCESS_MACROS_GEN5VF_H\n+#define ADF_TRANSPORT_ACCESS_MACROS_GEN5VF_H\n+\n+#include \"adf_transport_access_macros.h\"\n+#include \"adf_transport_access_macros_gen5.h\"\n+\n+#define ADF_RING_CSR_ADDR_OFFSET_GEN5VF 0x0\n+\n+#define WRITE_CSR_RING_BASE_GEN5VF(csr_base_addr, bank, ring, value) \\\n+do { \\\n+\tuint32_t l_base = 0, u_base = 0; \\\n+\tl_base = (uint32_t)(value & 0xFFFFFFFF); \\\n+\tu_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \\\n+\tADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF, \\\n+\t\t(ADF_RING_BUNDLE_SIZE_GEN5 * bank) + \\\n+\t\tADF_RING_CSR_RING_LBASE_GEN5 + (ring << 2),\t\\\n+\t\tl_base);\t\\\n+\tADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF,\t\\\n+\t\t (ADF_RING_BUNDLE_SIZE_GEN5 * bank) + \\\n+\t\tADF_RING_CSR_RING_UBASE_GEN5 + (ring << 2),\t\t\\\n+\t\tu_base);\t\\\n+} while (0)\n+\n+#define WRITE_CSR_RING_CONFIG_GEN5VF(csr_base_addr, bank, ring, value) \\\n+\tADF_CSR_WR(csr_base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN5VF,\t\\\n+\t\t (ADF_RING_BUNDLE_SIZE_GEN5 * bank) + \\\n+\t\tADF_RING_CSR_RING_CONFIG_GEN5 + (ring << 2), value)\n+\n+#define WRITE_CSR_RING_TAIL_GEN5VF(csr_base_addr, bank, ring, value) \\\n+\tADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN5VF, \\\n+\t\t(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \\\n+\t\tADF_RING_CSR_RING_TAIL + ((ring) << 2), (value))\n+\n+#define WRITE_CSR_RING_HEAD_GEN5VF(csr_base_addr, bank, ring, value) \\\n+\tADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN5VF, \\\n+\t\t(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \\\n+\t\tADF_RING_CSR_RING_HEAD + ((ring) << 2), (value))\n+\n+#define WRITE_CSR_RING_SRV_ARB_EN_GEN5VF(csr_base_addr, bank, value) \\\n+\tADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET_GEN5VF, \\\n+\t\t(ADF_RING_BUNDLE_SIZE_GEN5 * (bank)) + \\\n+\t\tADF_RING_CSR_RING_SRV_ARB_EN, (value))\n+\n+#endif\ndiff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c\nnew file mode 100644\nindex 0000000000..1f1242c5c0\n--- /dev/null\n+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen5.c\n@@ -0,0 +1,336 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2023 Intel Corporation\n+ */\n+\n+#include <rte_cryptodev.h>\n+#include <cryptodev_pmd.h>\n+#include \"qat_sym_session.h\"\n+#include \"qat_sym.h\"\n+#include \"qat_asym.h\"\n+#include \"qat_crypto.h\"\n+#include \"qat_crypto_pmd_gens.h\"\n+\n+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen5[] = {\n+\tQAT_SYM_AEAD_CAP(AES_GCM,\n+\t\tCAP_SET(block_size, 16),\n+\t\tCAP_RNG(key_size, 32, 32, 1), CAP_RNG(digest_size, 16, 16, 1),\n+\t\tCAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 1)),\n+\tRTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()\n+};\n+\n+static int\n+qat_sgl_add_buffer_gen5(void *list_in, uint64_t addr, uint32_t len)\n+{\n+\tstruct qat_sgl *list = (struct qat_sgl *)list_in;\n+\tuint32_t nr;\n+\n+\tnr = list->num_bufs;\n+\n+\tif (nr >= QAT_SYM_SGL_MAX_NUMBER) {\n+\t\tQAT_DP_LOG(ERR, \"Adding %d entry failed, no empty SGL buffer\", nr);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tlist->buffers[nr].len = len;\n+\tlist->buffers[nr].resrvd = 0;\n+\tlist->buffers[nr].addr = addr;\n+\n+\tlist->num_bufs++;\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\tQAT_DP_LOG(INFO, \"SGL with %d buffers:\", list->num_bufs);\n+\tQAT_DP_LOG(INFO, \"QAT SGL buf %d, len = %d, iova = 0x%012\"PRIx64,\n+\t\tnr, list->buffers[nr].len, list->buffers[nr].addr);\n+#endif\n+\treturn 0;\n+}\n+\n+static int\n+qat_sgl_fill_array_with_mbuf(struct rte_mbuf *buf, int64_t offset,\n+\tvoid *list_in, uint32_t data_len)\n+{\n+\tstruct qat_sgl *list = (struct qat_sgl *)list_in;\n+\tuint32_t nr, buf_len;\n+\tint res = -EINVAL;\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\tuint32_t start_idx;\n+\tstart_idx = list->num_bufs;\n+#endif\n+\n+\t/* Append to the existing list */\n+\tnr = list->num_bufs;\n+\n+\tfor (buf_len = 0; buf && nr < QAT_SYM_SGL_MAX_NUMBER; buf = buf->next) {\n+\t\tif (offset >= rte_pktmbuf_data_len(buf)) {\n+\t\t\toffset -= rte_pktmbuf_data_len(buf);\n+\t\t\t/* Jump to next mbuf */\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tlist->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;\n+\t\tlist->buffers[nr].resrvd = 0;\n+\t\tlist->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);\n+\n+\t\toffset = 0;\n+\t\tbuf_len += list->buffers[nr].len;\n+\n+\t\tif (buf_len >= data_len) {\n+\t\t\tlist->buffers[nr].len -= buf_len - data_len;\n+\t\t\tres = 0;\n+\t\t\tbreak;\n+\t\t}\n+\t\t++nr;\n+\t}\n+\n+\tif (unlikely(res != 0)) {\n+\t\tif (nr == QAT_SYM_SGL_MAX_NUMBER)\n+\t\t\tQAT_DP_LOG(ERR, \"Exceeded max segments in QAT SGL (%u)\",\n+\t\t\t\tQAT_SYM_SGL_MAX_NUMBER);\n+\t\telse\n+\t\t\tQAT_DP_LOG(ERR, \"Mbuf chain is too short\");\n+\t} else {\n+\n+\t\tlist->num_bufs = ++nr;\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\t\tQAT_DP_LOG(INFO, \"SGL with %d buffers:\", list->num_bufs);\n+\t\tfor (nr = start_idx; nr < list->num_bufs; nr++) {\n+\t\t\tQAT_DP_LOG(INFO, \"QAT SGL buf %d, len = %d, iova = 0x%012\"PRIx64,\n+\t\t\t\tnr, list->buffers[nr].len,\n+\t\t\t\tlist->buffers[nr].addr);\n+\t\t}\n+#endif\n+\t}\n+\n+\treturn res;\n+}\n+\n+static int\n+qat_sym_build_op_aead_gen5(void *in_op, struct qat_sym_session *ctx,\n+\tuint8_t *out_msg, void *op_cookie)\n+{\n+\tstruct qat_sym_op_cookie *cookie = op_cookie;\n+\tstruct rte_crypto_op *op = in_op;\n+\tuint64_t digest_phys_addr, aad_phys_addr;\n+\tuint16_t iv_len, aad_len, digest_len, key_len;\n+\tuint32_t cipher_ofs, iv_offset, cipher_len;\n+\tregister struct icp_qat_fw_la_bulk_req *qat_req;\n+\tstruct icp_qat_fw_la_cipher_30_req_params *cipher_param;\n+\tenum icp_qat_hw_cipher_dir dir;\n+\tbool is_digest_adjacent = false;\n+\n+\tif (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER ||\n+\t\tctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_AES256 ||\n+\t\tctx->qat_mode != ICP_QAT_HW_CIPHER_AEAD_MODE) {\n+\n+\t\tQAT_DP_LOG(ERR, \"Not supported (cmd: %d, alg: %d, mode: %d). \"\n+\t\t\t\"GEN5 PMD only supports AES-256 AEAD mode\",\n+\t\t\tctx->qat_cmd, ctx->qat_cipher_alg, ctx->qat_mode);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tqat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;\n+\trte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));\n+\tqat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;\n+\tcipher_param = (void *)&qat_req->serv_specif_rqpars;\n+\n+\tdir = ctx->qat_dir;\n+\n+\taad_phys_addr = op->sym->aead.aad.phys_addr;\n+\taad_len = ctx->aad_len;\n+\n+\tiv_offset = ctx->cipher_iv.offset;\n+\tiv_len = ctx->cipher_iv.length;\n+\n+\tcipher_ofs = op->sym->aead.data.offset;\n+\tcipher_len = op->sym->aead.data.length;\n+\n+\tdigest_phys_addr = op->sym->aead.digest.phys_addr;\n+\tdigest_len = ctx->digest_length;\n+\n+\t/* Upto 16B IV can be directly embedded in descriptor.\n+\t * But GCM supports only 12B IV\n+\t */\n+\tif (iv_len != GCM_IV_LENGTH) {\n+\t\tQAT_DP_LOG(ERR, \"iv_len: %d not supported. Must be 12B.\",\n+\t\t\tiv_len);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\trte_memcpy(cipher_param->u.cipher_IV_array,\n+\t\trte_crypto_op_ctod_offset(op, uint8_t*, iv_offset),\n+\t\tiv_len);\n+\n+\t/* Always SGL */\n+\tRTE_ASSERT((qat_req->comn_hdr.comn_req_flags &\n+\t\tICP_QAT_FW_SYM_COMM_ADDR_SGL) == 1);\n+\t/* Always inplace */\n+\tRTE_ASSERT(op->sym->m_dst == NULL);\n+\n+\t/* Key buffer address is already programmed by reusing the\n+\t * content-descriptor buffer\n+\t */\n+\tkey_len = ctx->auth_key_length;\n+\n+\tcipher_param->spc_aad_sz = aad_len;\n+\tcipher_param->cipher_length = key_len;\n+\tcipher_param->spc_auth_res_sz = digest_len;\n+\n+\t/* Knowing digest is contiguous to cipher-text helps optimizing SGL */\n+\tif (rte_pktmbuf_iova_offset(op->sym->m_src, cipher_ofs + cipher_len)\n+\t\t== digest_phys_addr)\n+\t\tis_digest_adjacent = true;\n+\n+\t/* SRC-SGL: 3 entries:\n+\t * a) AAD\n+\t * b) cipher\n+\t * c) digest (only for decrypt and buffer is_NOT_adjacent)\n+\t *\n+\t */\n+\tcookie->qat_sgl_src.num_bufs = 0;\n+\tif (aad_len)\n+\t\tqat_sgl_add_buffer_gen5(&cookie->qat_sgl_src, aad_phys_addr,\n+\t\t\taad_len);\n+\n+\tif (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_DECRYPT) {\n+\t\tqat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,\n+\t\t\t&cookie->qat_sgl_src,\n+\t\t\tcipher_len + digest_len);\n+\t} else {\n+\t\tqat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,\n+\t\t\t&cookie->qat_sgl_src,\n+\t\t\tcipher_len);\n+\n+\t\t/* Digest buffer in decrypt job */\n+\t\tif (dir == ICP_QAT_HW_CIPHER_DECRYPT)\n+\t\t\tqat_sgl_add_buffer_gen5(&cookie->qat_sgl_src,\n+\t\t\t\tdigest_phys_addr, digest_len);\n+\t}\n+\n+\t/* (in-place) DST-SGL: 2 entries:\n+\t * a) cipher\n+\t * b) digest (only for encrypt and buffer is_NOT_adjacent)\n+\t */\n+\tcookie->qat_sgl_dst.num_bufs = 0;\n+\n+\tif (is_digest_adjacent && dir == ICP_QAT_HW_CIPHER_ENCRYPT) {\n+\t\tqat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,\n+\t\t\t&cookie->qat_sgl_dst,\n+\t\t\tcipher_len + digest_len);\n+\t} else {\n+\t\tqat_sgl_fill_array_with_mbuf(op->sym->m_src, cipher_ofs,\n+\t\t\t&cookie->qat_sgl_dst,\n+\t\t\tcipher_len);\n+\n+\t\t/* Digest buffer in Encrypt job */\n+\t\tif (dir == ICP_QAT_HW_CIPHER_ENCRYPT)\n+\t\t\tqat_sgl_add_buffer_gen5(&cookie->qat_sgl_dst,\n+\t\t\t\tdigest_phys_addr, digest_len);\n+\t}\n+\n+\t/* Length values in 128B descriptor */\n+\tqat_req->comn_mid.src_length = cipher_len;\n+\tqat_req->comn_mid.dst_length = cipher_len;\n+\n+\tif (dir == ICP_QAT_HW_CIPHER_ENCRYPT) /* Digest buffer in Encrypt job */\n+\t\tqat_req->comn_mid.dst_length += GCM_256_DIGEST_LEN;\n+\n+\t/* src & dst SGL addresses in 128B descriptor */\n+\tqat_req->comn_mid.src_data_addr = cookie->qat_sgl_src_phys_addr;\n+\tqat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr;\n+\n+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG\n+\tQAT_DP_HEXDUMP_LOG(DEBUG, \"qat_req:\", qat_req,\n+\t\tsizeof(struct icp_qat_fw_la_bulk_req));\n+\tQAT_DP_HEXDUMP_LOG(DEBUG, \"src_data:\",\n+\t\trte_pktmbuf_mtod(op->sym->m_src, uint8_t*),\n+\t\trte_pktmbuf_data_len(op->sym->m_src));\n+\tQAT_DP_HEXDUMP_LOG(DEBUG, \"digest:\", op->sym->aead.digest.data,\n+\t\tdigest_len);\n+\tQAT_DP_HEXDUMP_LOG(DEBUG, \"aad:\", op->sym->aead.aad.data, aad_len);\n+#endif\n+\treturn 0;\n+}\n+\n+static int\n+qat_sym_crypto_set_session_gen5(void *cdev __rte_unused, void *session)\n+{\n+\tstruct qat_sym_session *ctx = session;\n+\tqat_sym_build_request_t build_request = NULL;\n+\tenum rte_proc_type_t proc_type = rte_eal_process_type();\n+\n+\tif (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)\n+\t\treturn -EINVAL;\n+\n+\t/* build request for aead */\n+\tif (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES256 &&\n+\t\tctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) {\n+\t\tbuild_request = qat_sym_build_op_aead_gen5;\n+\t\tif (build_request)\n+\t\t\tctx->build_request[proc_type] = build_request;\n+\t\telse\n+\t\t\treturn -EINVAL;\n+\t}\n+\treturn 0;\n+}\n+\n+\n+static int\n+qat_sym_crypto_cap_get_gen5(struct qat_cryptodev_private *internals,\n+\tconst char *capa_memz_name,\n+\tconst uint16_t __rte_unused slice_map)\n+{\n+\tconst uint32_t size = sizeof(qat_sym_crypto_caps_gen5);\n+\tuint32_t i;\n+\n+\tinternals->capa_mz = rte_memzone_lookup(capa_memz_name);\n+\tif (internals->capa_mz == NULL) {\n+\t\tinternals->capa_mz = rte_memzone_reserve(capa_memz_name,\n+\t\t\tsize, rte_socket_id(), 0);\n+\t\tif (internals->capa_mz == NULL) {\n+\t\t\tQAT_LOG(DEBUG,\n+\t\t\t\t\"Error allocating memzone for capabilities\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tstruct rte_cryptodev_capabilities *addr =\n+\t\t(struct rte_cryptodev_capabilities *)\n+\t\tinternals->capa_mz->addr;\n+\tconst struct rte_cryptodev_capabilities *capabilities =\n+\t\tqat_sym_crypto_caps_gen5;\n+\tconst uint32_t capa_num =\n+\t\tsize / sizeof(struct rte_cryptodev_capabilities);\n+\tuint32_t curr_capa = 0;\n+\n+\tfor (i = 0; i < capa_num; i++) {\n+\t\tmemcpy(addr + curr_capa, capabilities + i,\n+\t\t\tsizeof(struct rte_cryptodev_capabilities));\n+\t\tcurr_capa++;\n+\t}\n+\tinternals->qat_dev_capabilities = internals->capa_mz->addr;\n+\n+\treturn 0;\n+}\n+\n+RTE_INIT(qat_sym_crypto_gen5_init)\n+{\n+\tqat_sym_gen_dev_ops[QAT_GEN5].cryptodev_ops = &qat_sym_crypto_ops_gen1;\n+\tqat_sym_gen_dev_ops[QAT_GEN5].get_capabilities =\n+\t\t\tqat_sym_crypto_cap_get_gen5;\n+\tqat_sym_gen_dev_ops[QAT_GEN5].set_session =\n+\t\t\tqat_sym_crypto_set_session_gen5;\n+\tqat_sym_gen_dev_ops[QAT_GEN5].set_raw_dp_ctx = NULL;\n+\tqat_sym_gen_dev_ops[QAT_GEN5].get_feature_flags =\n+\t\t\tqat_sym_crypto_feature_flags_get_gen1;\n+#ifdef RTE_LIB_SECURITY\n+\tqat_sym_gen_dev_ops[QAT_GEN5].create_security_ctx =\n+\t\t\tqat_sym_create_security_gen1;\n+#endif\n+}\n+\n+RTE_INIT(qat_asym_crypto_gen5_init)\n+{\n+\tqat_asym_gen_dev_ops[QAT_GEN5].cryptodev_ops = NULL;\n+\tqat_asym_gen_dev_ops[QAT_GEN5].get_capabilities = NULL;\n+\tqat_asym_gen_dev_ops[QAT_GEN5].get_feature_flags = NULL;\n+\tqat_asym_gen_dev_ops[QAT_GEN5].set_session = NULL;\n+}\n",
    "prefixes": [
        "1/4"
    ]
}