get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/128861/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 128861,
    "url": "http://patchwork.dpdk.org/api/patches/128861/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230620141115.841226-3-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230620141115.841226-3-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230620141115.841226-3-suanmingm@nvidia.com",
    "date": "2023-06-20T14:11:08",
    "name": "[v4,2/9] crypto/mlx5: split AES-XTS",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "40947ccf64d7d6125dc82ec3f34b6e790654f906",
    "submitter": {
        "id": 1887,
        "url": "http://patchwork.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patchwork.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230620141115.841226-3-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 28586,
            "url": "http://patchwork.dpdk.org/api/series/28586/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=28586",
            "date": "2023-06-20T14:11:06",
            "name": "crypto/mlx5: support AES-GCM",
            "version": 4,
            "mbox": "http://patchwork.dpdk.org/series/28586/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/128861/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/128861/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CCC9442D09;\n\tTue, 20 Jun 2023 16:12:13 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 59FB142D4F;\n\tTue, 20 Jun 2023 16:12:03 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2089.outbound.protection.outlook.com [40.107.243.89])\n by mails.dpdk.org (Postfix) with ESMTP id F081C42D3E\n for <dev@dpdk.org>; Tue, 20 Jun 2023 16:11:59 +0200 (CEST)",
            "from BYAPR04CA0035.namprd04.prod.outlook.com (2603:10b6:a03:40::48)\n by SN7PR12MB8026.namprd12.prod.outlook.com (2603:10b6:806:34b::11)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6500.37; Tue, 20 Jun\n 2023 14:11:55 +0000",
            "from DM6NAM11FT032.eop-nam11.prod.protection.outlook.com\n (2603:10b6:a03:40:cafe::79) by BYAPR04CA0035.outlook.office365.com\n (2603:10b6:a03:40::48) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6500.37 via Frontend\n Transport; Tue, 20 Jun 2023 14:11:53 +0000",
            "from mail.nvidia.com (216.228.117.160) by\n DM6NAM11FT032.mail.protection.outlook.com (10.13.173.93) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.6500.37 via Frontend Transport; Tue, 20 Jun 2023 14:11:54 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.66) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.5; Tue, 20 Jun 2023\n 07:11:37 -0700",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.37; Tue, 20 Jun\n 2023 07:11:34 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=N7wPk/mcfn0H/QbqH8eyp98TtT+XWINRbsunTxVqMK4W9C8x6ZwsybdqvSbcpFvuyPC3yxM7QnxA/Sfhi8eqfN0fUWGm8oKIcA0igGsJrxF93VeDsWuRKH8BaH5qrKbZHpzIhDDXSBHx5SN0N6wAekColpsbRns59tjrj3JVPoUY8jTjqLleiruowCHAZHmSMAdnNWaM1NnnweNj8YJXFRkFtWc0YINQ13IB1WDWfh710yBzEsC9cOXCdevO9CXm+mHEwImCIEJ5XmxXJcr6m6DXv4h/vFbe0W11NUTKK9ES53wfLaCsrO2r0Uc0t6TlHHo+GwDkgJxKWHlxQv4g7w==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=l0zZvcCYaKwO+F65ocWG1bvidNmpTgUe5jl1nzQn2zM=;\n b=DZL1VNa5t0ytTbr0ErzTXE3fYGQx7L/CHt0Eci1n4fkZ9chhF1RpxrK1DKHYGy0O5qfv4tRSHmBh4RVyjh1VNEFLq3jH53r29K+/Ar+9WX4o8bFJBnpJf9Eiuoye3w/Cx6S3f820jDD/mfRQIr3Ry+Q0Ty80ADVB4WwN7XNYl4PPPAvH2QxDL0LNLsP4DDhPZLmjVm5HuxgqYVNKLWZ5Jh17VHXVfU3jyYwBlPdGr8FOtwZ2Jrvc6IuJr4Yhv60Z5ZyI5Ftl/gdre0XzUDqqUCgndplj34O/SBg7jW7jjwrFae/Xutuyd1Wz5JApnRrDTuD011yswGVqIHgCtqAGsA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.160) smtp.rcpttodomain=marvell.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=l0zZvcCYaKwO+F65ocWG1bvidNmpTgUe5jl1nzQn2zM=;\n b=de1jj86eCobhNe7biFjjWuvXNOsd1QT7oS7FOnRHHmfJypkbT3L5lQgcBlk1wTcydGww9H9DrVb1FRpouDvwBij74+KBBqghM63p4Bl79fXxkxc9634scDzqUvaN/vUZiWUhjvpPG0hBBlkCgwZF/WoEBfYgtfCqvQbik6wxqFvZO2lhP3d2LY0MirRpQAznykMFQ1dWdopXOOxOBWQtbSHepfDU7p4r74FjpWy2bhJ5dGbru/NVhIGWbMkTmM7TXw8anwXnYEJwkL1qNpOQdwgmpqMDLX5V6xi2PhbChHD+DrAn/5LonyMletkH76Vzl5df0jOCxHqvM4/CRatSnQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.160)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.160 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.160; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<gakhil@marvell.com>, Matan Azrad <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <dev@dpdk.org>",
        "Subject": "[PATCH v4 2/9] crypto/mlx5: split AES-XTS",
        "Date": "Tue, 20 Jun 2023 17:11:08 +0300",
        "Message-ID": "<20230620141115.841226-3-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230620141115.841226-1-suanmingm@nvidia.com>",
        "References": "<20230418092325.2578712-1-suanmingm@nvidia.com>\n <20230620141115.841226-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "DM6NAM11FT032:EE_|SN7PR12MB8026:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "7660aaa7-b152-430f-3505-08db71984d6a",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n eL637tOyuKe4OhmZTz842hQ5hk8loWBgqASGiHwlNJGMq5ruElVsNBsiB5koPOq5WDXbM59eGiwUQoQ1hO+qUUkAvy/CqF/ec2s0ySgYtiNt7LEM132es1MgxfV8pQrjL716gp6oCdULqT8R3XMSxv2qYTWLUuMq6Iqep8V7sf7C6d3jNe5y7AM5aC2le6Kchy/LMownDKvO9GhBbWbhDCpj4KCE9oNZi+3Re8dxw+cKf0IF4uvWVEEPjZJAUWAVeaD45TGn/OWcrtCNF7FYoQvOG7tihczRyDji3HnhvbaQyw6+Jz4E2raCsMkx4aRDDMGJdoUfdVMOnzt/y40UZgyVC3T5zJYuKsJVa2LH1Mr91zcqHFOhAvAMEsMJHMLhSUIANVks+RND3IhPrKTflpJUHoDrgQ0WFUZlGmJJP2Z7lJFalEW5/UiwPmxB9ymlbvUvvdwW4rqzsuJas9YrleCA8GKNe7tK85B5mcdG6m4r/zeWi0ma7Cl5wcX8ziFhCi9jI/cP2KN0630DYCm6elqhYwp3pfQNHeaEtu28CxLZlSjVOT6OAqg6JvUNdrNZVXgIyFqQPKWN3xnsh7GKIZ7yaPGYigQiee7OUBQhqfcy/ijm6aJqlmggigBmenYgZWmZeP9pAZQVS7RVoZVdKpGAUdz2m2PWw3JwdolgpxSIlfUj91R6eh+ra1s2QGceSzBfSA2VPBcGAUuRNFH2bSVi7W6amkA2KmPECXuD09yLKAVCEsaDhfpay16qOn2W",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.160; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge1.nvidia.com; CAT:NONE;\n SFS:(13230028)(4636009)(376002)(396003)(39860400002)(346002)(136003)(451199021)(46966006)(40470700004)(36840700001)(478600001)(6666004)(70586007)(70206006)(36756003)(8676002)(4326008)(6636002)(41300700001)(7636003)(356005)(2616005)(47076005)(40480700001)(83380400001)(426003)(336012)(82310400005)(86362001)(7696005)(2906002)(5660300002)(110136005)(54906003)(8936002)(316002)(30864003)(40460700003)(1076003)(26005)(186003)(6286002)(55016003)(16526019)(82740400003)(36860700001)(579004);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "20 Jun 2023 14:11:54.9424 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 7660aaa7-b152-430f-3505-08db71984d6a",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.160];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT032.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "SN7PR12MB8026",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "As there will be other crypto algo be supported. This commit splits\nAES-XTS code to another *_xts.c file. The mlx5_crypto.c file will\njust contain the common code.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/crypto/mlx5/meson.build       |   1 +\n drivers/crypto/mlx5/mlx5_crypto.c     | 642 ++------------------------\n drivers/crypto/mlx5/mlx5_crypto.h     |  33 ++\n drivers/crypto/mlx5/mlx5_crypto_xts.c | 594 ++++++++++++++++++++++++\n 4 files changed, 667 insertions(+), 603 deletions(-)\n create mode 100644 drivers/crypto/mlx5/mlx5_crypto_xts.c",
    "diff": "diff --git a/drivers/crypto/mlx5/meson.build b/drivers/crypto/mlx5/meson.build\nindex a2691ec0f0..045e8ce81d 100644\n--- a/drivers/crypto/mlx5/meson.build\n+++ b/drivers/crypto/mlx5/meson.build\n@@ -15,6 +15,7 @@ endif\n \n sources = files(\n         'mlx5_crypto.c',\n+\t'mlx5_crypto_xts.c',\n         'mlx5_crypto_dek.c',\n )\n \ndiff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c\nindex 5267f48c1e..2e6bcc6ddc 100644\n--- a/drivers/crypto/mlx5/mlx5_crypto.c\n+++ b/drivers/crypto/mlx5/mlx5_crypto.c\n@@ -40,33 +40,6 @@ int mlx5_crypto_logtype;\n \n uint8_t mlx5_crypto_driver_id;\n \n-const struct rte_cryptodev_capabilities mlx5_crypto_caps[] = {\n-\t{\t\t/* AES XTS */\n-\t\t.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,\n-\t\t{.sym = {\n-\t\t\t.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,\n-\t\t\t{.cipher = {\n-\t\t\t\t.algo = RTE_CRYPTO_CIPHER_AES_XTS,\n-\t\t\t\t.block_size = 16,\n-\t\t\t\t.key_size = {\n-\t\t\t\t\t.min = 32,\n-\t\t\t\t\t.max = 64,\n-\t\t\t\t\t.increment = 32\n-\t\t\t\t},\n-\t\t\t\t.iv_size = {\n-\t\t\t\t\t.min = 16,\n-\t\t\t\t\t.max = 16,\n-\t\t\t\t\t.increment = 0\n-\t\t\t\t},\n-\t\t\t\t.dataunit_set =\n-\t\t\t\tRTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES |\n-\t\t\t\tRTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES |\n-\t\t\t\tRTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES,\n-\t\t\t}, }\n-\t\t}, }\n-\t},\n-};\n-\n static const char mlx5_crypto_drv_name[] = RTE_STR(MLX5_CRYPTO_DRIVER_NAME);\n \n static const struct rte_driver mlx5_drv = {\n@@ -76,21 +49,6 @@ static const struct rte_driver mlx5_drv = {\n \n static struct cryptodev_driver mlx5_cryptodev_driver;\n \n-struct mlx5_crypto_session {\n-\tuint32_t bs_bpt_eo_es;\n-\t/**< bsf_size, bsf_p_type, encryption_order and encryption standard,\n-\t * saved in big endian format.\n-\t */\n-\tuint32_t bsp_res;\n-\t/**< crypto_block_size_pointer and reserved 24 bits saved in big\n-\t * endian format.\n-\t */\n-\tuint32_t iv_offset:16;\n-\t/**< Starting point for Initialisation Vector. */\n-\tstruct mlx5_crypto_dek *dek; /**< Pointer to dek struct. */\n-\tuint32_t dek_id; /**< DEK ID */\n-} __rte_packed;\n-\n static void\n mlx5_crypto_dev_infos_get(struct rte_cryptodev *dev,\n \t\t\t  struct rte_cryptodev_info *dev_info)\n@@ -102,7 +60,7 @@ mlx5_crypto_dev_infos_get(struct rte_cryptodev *dev,\n \t\tdev_info->driver_id = mlx5_crypto_driver_id;\n \t\tdev_info->feature_flags =\n \t\t\tMLX5_CRYPTO_FEATURE_FLAGS(priv->is_wrapped_mode);\n-\t\tdev_info->capabilities = mlx5_crypto_caps;\n+\t\tdev_info->capabilities = priv->caps;\n \t\tdev_info->max_nb_queue_pairs = MLX5_CRYPTO_MAX_QPS;\n \t\tdev_info->min_mbuf_headroom_req = 0;\n \t\tdev_info->min_mbuf_tailroom_req = 0;\n@@ -114,6 +72,38 @@ mlx5_crypto_dev_infos_get(struct rte_cryptodev *dev,\n \t}\n }\n \n+void\n+mlx5_crypto_indirect_mkeys_release(struct mlx5_crypto_qp *qp,\n+\t\t\t\t   uint16_t n)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < n; i++)\n+\t\tif (qp->mkey[i])\n+\t\t\tclaim_zero(mlx5_devx_cmd_destroy(qp->mkey[i]));\n+}\n+\n+int\n+mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,\n+\t\t\t\t   struct mlx5_crypto_qp *qp,\n+\t\t\t\t   struct mlx5_devx_mkey_attr *attr,\n+\t\t\t\t   mlx5_crypto_mkey_update_t update_cb)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < qp->entries_n; i++) {\n+\t\tattr->klm_array = update_cb(priv, qp, i);\n+\t\tqp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->cdev->ctx, attr);\n+\t\tif (!qp->mkey[i])\n+\t\t\tgoto error;\n+\t}\n+\treturn 0;\n+error:\n+\tDRV_LOG(ERR, \"Failed to allocate indirect mkey.\");\n+\tmlx5_crypto_indirect_mkeys_release(qp, i);\n+\treturn -1;\n+}\n+\n static int\n mlx5_crypto_dev_configure(struct rte_cryptodev *dev,\n \t\t\t  struct rte_cryptodev_config *config)\n@@ -168,72 +158,6 @@ mlx5_crypto_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)\n \treturn sizeof(struct mlx5_crypto_session);\n }\n \n-static int\n-mlx5_crypto_sym_session_configure(struct rte_cryptodev *dev,\n-\t\t\t\t  struct rte_crypto_sym_xform *xform,\n-\t\t\t\t  struct rte_cryptodev_sym_session *session)\n-{\n-\tstruct mlx5_crypto_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_crypto_session *sess_private_data =\n-\t\tCRYPTODEV_GET_SYM_SESS_PRIV(session);\n-\tstruct rte_crypto_cipher_xform *cipher;\n-\tuint8_t encryption_order;\n-\n-\tif (unlikely(xform->next != NULL)) {\n-\t\tDRV_LOG(ERR, \"Xform next is not supported.\");\n-\t\treturn -ENOTSUP;\n-\t}\n-\tif (unlikely((xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) ||\n-\t\t     (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_XTS))) {\n-\t\tDRV_LOG(ERR, \"Only AES-XTS algorithm is supported.\");\n-\t\treturn -ENOTSUP;\n-\t}\n-\tcipher = &xform->cipher;\n-\tsess_private_data->dek = mlx5_crypto_dek_prepare(priv, cipher);\n-\tif (sess_private_data->dek == NULL) {\n-\t\tDRV_LOG(ERR, \"Failed to prepare dek.\");\n-\t\treturn -ENOMEM;\n-\t}\n-\tif (cipher->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)\n-\t\tencryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_MEMORY;\n-\telse\n-\t\tencryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_WIRE;\n-\tsess_private_data->bs_bpt_eo_es = rte_cpu_to_be_32\n-\t\t\t(MLX5_BSF_SIZE_64B << MLX5_BSF_SIZE_OFFSET |\n-\t\t\t MLX5_BSF_P_TYPE_CRYPTO << MLX5_BSF_P_TYPE_OFFSET |\n-\t\t\t encryption_order << MLX5_ENCRYPTION_ORDER_OFFSET |\n-\t\t\t MLX5_ENCRYPTION_STANDARD_AES_XTS);\n-\tswitch (xform->cipher.dataunit_len) {\n-\tcase 0:\n-\t\tsess_private_data->bsp_res = 0;\n-\t\tbreak;\n-\tcase 512:\n-\t\tsess_private_data->bsp_res = rte_cpu_to_be_32\n-\t\t\t\t\t     ((uint32_t)MLX5_BLOCK_SIZE_512B <<\n-\t\t\t\t\t     MLX5_BLOCK_SIZE_OFFSET);\n-\t\tbreak;\n-\tcase 4096:\n-\t\tsess_private_data->bsp_res = rte_cpu_to_be_32\n-\t\t\t\t\t     ((uint32_t)MLX5_BLOCK_SIZE_4096B <<\n-\t\t\t\t\t     MLX5_BLOCK_SIZE_OFFSET);\n-\t\tbreak;\n-\tcase 1048576:\n-\t\tsess_private_data->bsp_res = rte_cpu_to_be_32\n-\t\t\t\t\t     ((uint32_t)MLX5_BLOCK_SIZE_1MB <<\n-\t\t\t\t\t     MLX5_BLOCK_SIZE_OFFSET);\n-\t\tbreak;\n-\tdefault:\n-\t\tDRV_LOG(ERR, \"Cipher data unit length is not supported.\");\n-\t\treturn -ENOTSUP;\n-\t}\n-\tsess_private_data->iv_offset = cipher->iv.offset;\n-\tsess_private_data->dek_id =\n-\t\t\trte_cpu_to_be_32(sess_private_data->dek->obj->id &\n-\t\t\t\t\t 0xffffff);\n-\tDRV_LOG(DEBUG, \"Session %p was configured.\", sess_private_data);\n-\treturn 0;\n-}\n-\n static void\n mlx5_crypto_sym_session_clear(struct rte_cryptodev *dev,\n \t\t\t      struct rte_cryptodev_sym_session *sess)\n@@ -249,412 +173,6 @@ mlx5_crypto_sym_session_clear(struct rte_cryptodev *dev,\n \tDRV_LOG(DEBUG, \"Session %p was cleared.\", spriv);\n }\n \n-static void\n-mlx5_crypto_indirect_mkeys_release(struct mlx5_crypto_qp *qp, uint16_t n)\n-{\n-\tuint16_t i;\n-\n-\tfor (i = 0; i < n; i++)\n-\t\tif (qp->mkey[i])\n-\t\t\tclaim_zero(mlx5_devx_cmd_destroy(qp->mkey[i]));\n-}\n-\n-static void\n-mlx5_crypto_qp_release(struct mlx5_crypto_qp *qp)\n-{\n-\tif (qp == NULL)\n-\t\treturn;\n-\tmlx5_devx_qp_destroy(&qp->qp_obj);\n-\tmlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);\n-\tmlx5_devx_cq_destroy(&qp->cq_obj);\n-\trte_free(qp);\n-}\n-\n-static int\n-mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)\n-{\n-\tstruct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];\n-\n-\tmlx5_crypto_indirect_mkeys_release(qp, qp->entries_n);\n-\tmlx5_crypto_qp_release(qp);\n-\tdev->data->queue_pairs[qp_id] = NULL;\n-\treturn 0;\n-}\n-\n-static __rte_noinline uint32_t\n-mlx5_crypto_get_block_size(struct rte_crypto_op *op)\n-{\n-\tuint32_t bl = op->sym->cipher.data.length;\n-\n-\tswitch (bl) {\n-\tcase (1 << 20):\n-\t\treturn RTE_BE32(MLX5_BLOCK_SIZE_1MB << MLX5_BLOCK_SIZE_OFFSET);\n-\tcase (1 << 12):\n-\t\treturn RTE_BE32(MLX5_BLOCK_SIZE_4096B <<\n-\t\t\t\tMLX5_BLOCK_SIZE_OFFSET);\n-\tcase (1 << 9):\n-\t\treturn RTE_BE32(MLX5_BLOCK_SIZE_512B << MLX5_BLOCK_SIZE_OFFSET);\n-\tdefault:\n-\t\tDRV_LOG(ERR, \"Unknown block size: %u.\", bl);\n-\t\treturn UINT32_MAX;\n-\t}\n-}\n-\n-static __rte_always_inline uint32_t\n-mlx5_crypto_klm_set(struct mlx5_crypto_qp *qp, struct rte_mbuf *mbuf,\n-\t\t    struct mlx5_wqe_dseg *klm, uint32_t offset,\n-\t\t    uint32_t *remain)\n-{\n-\tuint32_t data_len = (rte_pktmbuf_data_len(mbuf) - offset);\n-\tuintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);\n-\n-\tif (data_len > *remain)\n-\t\tdata_len = *remain;\n-\t*remain -= data_len;\n-\tklm->bcount = rte_cpu_to_be_32(data_len);\n-\tklm->pbuf = rte_cpu_to_be_64(addr);\n-\tklm->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);\n-\treturn klm->lkey;\n-\n-}\n-\n-static __rte_always_inline uint32_t\n-mlx5_crypto_klms_set(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op,\n-\t\t     struct rte_mbuf *mbuf, struct mlx5_wqe_dseg *klm)\n-{\n-\tuint32_t remain_len = op->sym->cipher.data.length;\n-\tuint32_t nb_segs = mbuf->nb_segs;\n-\tuint32_t klm_n = 1u;\n-\n-\t/* First mbuf needs to take the cipher offset. */\n-\tif (unlikely(mlx5_crypto_klm_set(qp, mbuf, klm,\n-\t\t     op->sym->cipher.data.offset, &remain_len) == UINT32_MAX)) {\n-\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n-\t\treturn 0;\n-\t}\n-\twhile (remain_len) {\n-\t\tnb_segs--;\n-\t\tmbuf = mbuf->next;\n-\t\tif (unlikely(mbuf == NULL || nb_segs == 0)) {\n-\t\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n-\t\t\treturn 0;\n-\t\t}\n-\t\tif (unlikely(mlx5_crypto_klm_set(qp, mbuf, ++klm, 0,\n-\t\t\t\t\t\t &remain_len) == UINT32_MAX)) {\n-\t\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n-\t\t\treturn 0;\n-\t\t}\n-\t\tklm_n++;\n-\t}\n-\treturn klm_n;\n-}\n-\n-static __rte_always_inline int\n-mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,\n-\t\t\t struct mlx5_crypto_qp *qp,\n-\t\t\t struct rte_crypto_op *op,\n-\t\t\t struct mlx5_umr_wqe *umr)\n-{\n-\tstruct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);\n-\tstruct mlx5_wqe_cseg *cseg = &umr->ctr;\n-\tstruct mlx5_wqe_mkey_cseg *mkc = &umr->mkc;\n-\tstruct mlx5_wqe_dseg *klms = &umr->kseg[0];\n-\tstruct mlx5_wqe_umr_bsf_seg *bsf = ((struct mlx5_wqe_umr_bsf_seg *)\n-\t\t\t\t      RTE_PTR_ADD(umr, priv->umr_wqe_size)) - 1;\n-\tuint32_t ds;\n-\tbool ipl = op->sym->m_dst == NULL || op->sym->m_dst == op->sym->m_src;\n-\t/* Set UMR WQE. */\n-\tuint32_t klm_n = mlx5_crypto_klms_set(qp, op,\n-\t\t\t\t   ipl ? op->sym->m_src : op->sym->m_dst, klms);\n-\n-\tif (unlikely(klm_n == 0))\n-\t\treturn 0;\n-\tbsf->bs_bpt_eo_es = sess->bs_bpt_eo_es;\n-\tif (unlikely(!sess->bsp_res)) {\n-\t\tbsf->bsp_res = mlx5_crypto_get_block_size(op);\n-\t\tif (unlikely(bsf->bsp_res == UINT32_MAX)) {\n-\t\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n-\t\t\treturn 0;\n-\t\t}\n-\t} else {\n-\t\tbsf->bsp_res = sess->bsp_res;\n-\t}\n-\tbsf->raw_data_size = rte_cpu_to_be_32(op->sym->cipher.data.length);\n-\tmemcpy(bsf->xts_initial_tweak,\n-\t       rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv_offset), 16);\n-\tbsf->res_dp = sess->dek_id;\n-\tmkc->len = rte_cpu_to_be_64(op->sym->cipher.data.length);\n-\tcseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) | MLX5_OPCODE_UMR);\n-\tqp->db_pi += priv->umr_wqe_stride;\n-\t/* Set RDMA_WRITE WQE. */\n-\tcseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);\n-\tklms = RTE_PTR_ADD(cseg, sizeof(struct mlx5_rdma_write_wqe));\n-\tif (!ipl) {\n-\t\tklm_n = mlx5_crypto_klms_set(qp, op, op->sym->m_src, klms);\n-\t\tif (unlikely(klm_n == 0))\n-\t\t\treturn 0;\n-\t} else {\n-\t\tmemcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);\n-\t}\n-\tds = 2 + klm_n;\n-\tcseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);\n-\tcseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |\n-\t\t\t\t\t\t\tMLX5_OPCODE_RDMA_WRITE);\n-\tds = RTE_ALIGN(ds, 4);\n-\tqp->db_pi += ds >> 2;\n-\t/* Set NOP WQE if needed. */\n-\tif (priv->max_rdmar_ds > ds) {\n-\t\tcseg += ds;\n-\t\tds = priv->max_rdmar_ds - ds;\n-\t\tcseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);\n-\t\tcseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |\n-\t\t\t\t\t\t\t       MLX5_OPCODE_NOP);\n-\t\tqp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */\n-\t}\n-\tqp->wqe = (uint8_t *)cseg;\n-\treturn 1;\n-}\n-\n-static uint16_t\n-mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,\n-\t\t\t  uint16_t nb_ops)\n-{\n-\tstruct mlx5_crypto_qp *qp = queue_pair;\n-\tstruct mlx5_crypto_priv *priv = qp->priv;\n-\tstruct mlx5_umr_wqe *umr;\n-\tstruct rte_crypto_op *op;\n-\tuint16_t mask = qp->entries_n - 1;\n-\tuint16_t remain = qp->entries_n - (qp->pi - qp->ci);\n-\tuint32_t idx;\n-\n-\tif (remain < nb_ops)\n-\t\tnb_ops = remain;\n-\telse\n-\t\tremain = nb_ops;\n-\tif (unlikely(remain == 0))\n-\t\treturn 0;\n-\tdo {\n-\t\tidx = qp->pi & mask;\n-\t\top = *ops++;\n-\t\tumr = RTE_PTR_ADD(qp->qp_obj.umem_buf,\n-\t\t\tpriv->wqe_set_size * idx);\n-\t\tif (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) {\n-\t\t\tqp->stats.enqueue_err_count++;\n-\t\t\tif (remain != nb_ops) {\n-\t\t\t\tqp->stats.enqueued_count -= remain;\n-\t\t\t\tbreak;\n-\t\t\t}\n-\t\t\treturn 0;\n-\t\t}\n-\t\tqp->ops[idx] = op;\n-\t\tqp->pi++;\n-\t} while (--remain);\n-\tqp->stats.enqueued_count += nb_ops;\n-\tmlx5_doorbell_ring(&priv->uar.bf_db, *(volatile uint64_t *)qp->wqe,\n-\t\t\t   qp->db_pi, &qp->qp_obj.db_rec[MLX5_SND_DBR],\n-\t\t\t   !priv->uar.dbnc);\n-\treturn nb_ops;\n-}\n-\n-static __rte_noinline void\n-mlx5_crypto_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)\n-{\n-\tconst uint32_t idx = qp->ci & (qp->entries_n - 1);\n-\tvolatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)\n-\t\t\t\t\t\t\t&qp->cq_obj.cqes[idx];\n-\n-\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n-\tqp->stats.dequeue_err_count++;\n-\tDRV_LOG(ERR, \"CQE ERR:%x.\\n\", rte_be_to_cpu_32(cqe->syndrome));\n-}\n-\n-static uint16_t\n-mlx5_crypto_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,\n-\t\t\t  uint16_t nb_ops)\n-{\n-\tstruct mlx5_crypto_qp *qp = queue_pair;\n-\tvolatile struct mlx5_cqe *restrict cqe;\n-\tstruct rte_crypto_op *restrict op;\n-\tconst unsigned int cq_size = qp->entries_n;\n-\tconst unsigned int mask = cq_size - 1;\n-\tuint32_t idx;\n-\tuint32_t next_idx = qp->ci & mask;\n-\tconst uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);\n-\tuint16_t i = 0;\n-\tint ret;\n-\n-\tif (unlikely(max == 0))\n-\t\treturn 0;\n-\tdo {\n-\t\tidx = next_idx;\n-\t\tnext_idx = (qp->ci + 1) & mask;\n-\t\top = qp->ops[idx];\n-\t\tcqe = &qp->cq_obj.cqes[idx];\n-\t\tret = check_cqe(cqe, cq_size, qp->ci);\n-\t\trte_io_rmb();\n-\t\tif (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {\n-\t\t\tif (unlikely(ret != MLX5_CQE_STATUS_HW_OWN))\n-\t\t\t\tmlx5_crypto_cqe_err_handle(qp, op);\n-\t\t\tbreak;\n-\t\t}\n-\t\top->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n-\t\tops[i++] = op;\n-\t\tqp->ci++;\n-\t} while (i < max);\n-\tif (likely(i != 0)) {\n-\t\trte_io_wmb();\n-\t\tqp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->ci);\n-\t\tqp->stats.dequeued_count += i;\n-\t}\n-\treturn i;\n-}\n-\n-static void\n-mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)\n-{\n-\tuint32_t i;\n-\n-\tfor (i = 0 ; i < qp->entries_n; i++) {\n-\t\tstruct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->qp_obj.umem_buf,\n-\t\t\ti * priv->wqe_set_size);\n-\t\tstruct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)\n-\t\t\t\t\t\t\t\t     (cseg + 1);\n-\t\tstruct mlx5_wqe_umr_bsf_seg *bsf =\n-\t\t\t(struct mlx5_wqe_umr_bsf_seg *)(RTE_PTR_ADD(cseg,\n-\t\t\t\t\t\t       priv->umr_wqe_size)) - 1;\n-\t\tstruct mlx5_wqe_rseg *rseg;\n-\n-\t\t/* Init UMR WQE. */\n-\t\tcseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) |\n-\t\t\t\t\t (priv->umr_wqe_size / MLX5_WSEG_SIZE));\n-\t\tcseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<\n-\t\t\t\t       MLX5_COMP_MODE_OFFSET);\n-\t\tcseg->misc = rte_cpu_to_be_32(qp->mkey[i]->id);\n-\t\tucseg->if_cf_toe_cq_res = RTE_BE32(1u << MLX5_UMRC_IF_OFFSET);\n-\t\tucseg->mkey_mask = RTE_BE64(1u << 0); /* Mkey length bit. */\n-\t\tucseg->ko_to_bs = rte_cpu_to_be_32\n-\t\t\t((MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size) <<\n-\t\t\t MLX5_UMRC_KO_OFFSET) | (4 << MLX5_UMRC_TO_BS_OFFSET));\n-\t\tbsf->keytag = priv->keytag;\n-\t\t/* Init RDMA WRITE WQE. */\n-\t\tcseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);\n-\t\tcseg->flags = RTE_BE32((MLX5_COMP_ALWAYS <<\n-\t\t\t\t      MLX5_COMP_MODE_OFFSET) |\n-\t\t\t\t      MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE);\n-\t\trseg = (struct mlx5_wqe_rseg *)(cseg + 1);\n-\t\trseg->rkey = rte_cpu_to_be_32(qp->mkey[i]->id);\n-\t}\n-}\n-\n-static int\n-mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,\n-\t\t\t\t  struct mlx5_crypto_qp *qp)\n-{\n-\tstruct mlx5_umr_wqe *umr;\n-\tuint32_t i;\n-\tstruct mlx5_devx_mkey_attr attr = {\n-\t\t.pd = priv->cdev->pdn,\n-\t\t.umr_en = 1,\n-\t\t.crypto_en = 1,\n-\t\t.set_remote_rw = 1,\n-\t\t.klm_num = MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size),\n-\t};\n-\n-\tfor (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0;\n-\t   i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {\n-\t\tattr.klm_array = (struct mlx5_klm *)&umr->kseg[0];\n-\t\tqp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->cdev->ctx, &attr);\n-\t\tif (!qp->mkey[i])\n-\t\t\tgoto error;\n-\t}\n-\treturn 0;\n-error:\n-\tDRV_LOG(ERR, \"Failed to allocate indirect mkey.\");\n-\tmlx5_crypto_indirect_mkeys_release(qp, i);\n-\treturn -1;\n-}\n-\n-static int\n-mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n-\t\t\t     const struct rte_cryptodev_qp_conf *qp_conf,\n-\t\t\t     int socket_id)\n-{\n-\tstruct mlx5_crypto_priv *priv = dev->data->dev_private;\n-\tstruct mlx5_devx_qp_attr attr = {0};\n-\tstruct mlx5_crypto_qp *qp;\n-\tuint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);\n-\tuint32_t ret;\n-\tuint32_t alloc_size = sizeof(*qp);\n-\tuint32_t log_wqbb_n;\n-\tstruct mlx5_devx_cq_attr cq_attr = {\n-\t\t.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),\n-\t};\n-\n-\tif (dev->data->queue_pairs[qp_id] != NULL)\n-\t\tmlx5_crypto_queue_pair_release(dev, qp_id);\n-\talloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);\n-\talloc_size += (sizeof(struct rte_crypto_op *) +\n-\t\t       sizeof(struct mlx5_devx_obj *)) *\n-\t\t       RTE_BIT32(log_nb_desc);\n-\tqp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,\n-\t\t\t\tsocket_id);\n-\tif (qp == NULL) {\n-\t\tDRV_LOG(ERR, \"Failed to allocate QP memory.\");\n-\t\trte_errno = ENOMEM;\n-\t\treturn -rte_errno;\n-\t}\n-\tif (mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq_obj, log_nb_desc,\n-\t\t\t\t&cq_attr, socket_id) != 0) {\n-\t\tDRV_LOG(ERR, \"Failed to create CQ.\");\n-\t\tgoto error;\n-\t}\n-\tlog_wqbb_n = rte_log2_u32(RTE_BIT32(log_nb_desc) *\n-\t\t\t\t(priv->wqe_set_size / MLX5_SEND_WQE_BB));\n-\tattr.pd = priv->cdev->pdn;\n-\tattr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);\n-\tattr.cqn = qp->cq_obj.cq->id;\n-\tattr.num_of_receive_wqes = 0;\n-\tattr.num_of_send_wqbbs = RTE_BIT32(log_wqbb_n);\n-\tattr.ts_format =\n-\t\tmlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);\n-\tret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj,\n-\t\t\t\t\tattr.num_of_send_wqbbs * MLX5_WQE_SIZE,\n-\t\t\t\t\t&attr, socket_id);\n-\tif (ret) {\n-\t\tDRV_LOG(ERR, \"Failed to create QP.\");\n-\t\tgoto error;\n-\t}\n-\tif (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,\n-\t\t\t      priv->dev_config.socket_id) != 0) {\n-\t\tDRV_LOG(ERR, \"Cannot allocate MR Btree for qp %u.\",\n-\t\t\t(uint32_t)qp_id);\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n-\t}\n-\t/*\n-\t * In Order to configure self loopback, when calling devx qp2rts the\n-\t * remote QP id that is used is the id of the same QP.\n-\t */\n-\tif (mlx5_devx_qp2rts(&qp->qp_obj, qp->qp_obj.qp->id))\n-\t\tgoto error;\n-\tqp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),\n-\t\t\t\t\t\t\t   RTE_CACHE_LINE_SIZE);\n-\tqp->ops = (struct rte_crypto_op **)(qp->mkey + RTE_BIT32(log_nb_desc));\n-\tqp->entries_n = 1 << log_nb_desc;\n-\tif (mlx5_crypto_indirect_mkeys_prepare(priv, qp)) {\n-\t\tDRV_LOG(ERR, \"Cannot allocate indirect memory regions.\");\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error;\n-\t}\n-\tmlx5_crypto_qp_init(priv, qp);\n-\tqp->priv = priv;\n-\tdev->data->queue_pairs[qp_id] = qp;\n-\treturn 0;\n-error:\n-\tmlx5_crypto_qp_release(qp);\n-\treturn -1;\n-}\n-\n static void\n mlx5_crypto_stats_get(struct rte_cryptodev *dev,\n \t\t      struct rte_cryptodev_stats *stats)\n@@ -691,10 +209,7 @@ static struct rte_cryptodev_ops mlx5_crypto_ops = {\n \t.dev_infos_get\t\t\t= mlx5_crypto_dev_infos_get,\n \t.stats_get\t\t\t= mlx5_crypto_stats_get,\n \t.stats_reset\t\t\t= mlx5_crypto_stats_reset,\n-\t.queue_pair_setup\t\t= mlx5_crypto_queue_pair_setup,\n-\t.queue_pair_release\t\t= mlx5_crypto_queue_pair_release,\n \t.sym_session_get_size\t\t= mlx5_crypto_sym_session_get_size,\n-\t.sym_session_configure\t\t= mlx5_crypto_sym_session_configure,\n \t.sym_session_clear\t\t= mlx5_crypto_sym_session_clear,\n \t.sym_get_raw_dp_ctx_size\t= NULL,\n \t.sym_configure_raw_dp_ctx\t= NULL,\n@@ -796,81 +311,6 @@ mlx5_crypto_parse_devargs(struct mlx5_kvargs_ctrl *mkvlist,\n \treturn 0;\n }\n \n-/*\n- * Calculate UMR WQE size and RDMA Write WQE size with the\n- * following limitations:\n- *\t- Each WQE size is multiple of 64.\n- *\t- The summarize of both UMR WQE and RDMA_W WQE is a power of 2.\n- *\t- The number of entries in the UMR WQE's KLM list is multiple of 4.\n- */\n-static void\n-mlx5_crypto_get_wqe_sizes(uint32_t segs_num, uint32_t *umr_size,\n-\t\t\tuint32_t *rdmaw_size)\n-{\n-\tuint32_t diff, wqe_set_size;\n-\n-\t*umr_size = MLX5_CRYPTO_UMR_WQE_STATIC_SIZE +\n-\t\t\tRTE_ALIGN(segs_num, 4) *\n-\t\t\tsizeof(struct mlx5_wqe_dseg);\n-\t/* Make sure UMR WQE size is multiple of WQBB. */\n-\t*umr_size = RTE_ALIGN(*umr_size, MLX5_SEND_WQE_BB);\n-\t*rdmaw_size = sizeof(struct mlx5_rdma_write_wqe) +\n-\t\t\tsizeof(struct mlx5_wqe_dseg) *\n-\t\t\t(segs_num <= 2 ? 2 : 2 +\n-\t\t\tRTE_ALIGN(segs_num - 2, 4));\n-\t/* Make sure RDMA_WRITE WQE size is multiple of WQBB. */\n-\t*rdmaw_size = RTE_ALIGN(*rdmaw_size, MLX5_SEND_WQE_BB);\n-\twqe_set_size = *rdmaw_size + *umr_size;\n-\tdiff = rte_align32pow2(wqe_set_size) - wqe_set_size;\n-\t/* Make sure wqe_set size is power of 2. */\n-\tif (diff)\n-\t\t*umr_size += diff;\n-}\n-\n-static uint8_t\n-mlx5_crypto_max_segs_num(uint16_t max_wqe_size)\n-{\n-\tint klms_sizes = max_wqe_size - MLX5_CRYPTO_UMR_WQE_STATIC_SIZE;\n-\tuint32_t max_segs_cap = RTE_ALIGN_FLOOR(klms_sizes, MLX5_SEND_WQE_BB) /\n-\t\t\tsizeof(struct mlx5_wqe_dseg);\n-\n-\tMLX5_ASSERT(klms_sizes >= MLX5_SEND_WQE_BB);\n-\twhile (max_segs_cap) {\n-\t\tuint32_t umr_wqe_size, rdmw_wqe_size;\n-\n-\t\tmlx5_crypto_get_wqe_sizes(max_segs_cap, &umr_wqe_size,\n-\t\t\t\t\t\t&rdmw_wqe_size);\n-\t\tif (umr_wqe_size <= max_wqe_size &&\n-\t\t\t\trdmw_wqe_size <= max_wqe_size)\n-\t\t\tbreak;\n-\t\tmax_segs_cap -= 4;\n-\t}\n-\treturn max_segs_cap;\n-}\n-\n-static int\n-mlx5_crypto_configure_wqe_size(struct mlx5_crypto_priv *priv,\n-\t\t\t\tuint16_t max_wqe_size, uint32_t max_segs_num)\n-{\n-\tuint32_t rdmw_wqe_size, umr_wqe_size;\n-\n-\tmlx5_crypto_get_wqe_sizes(max_segs_num, &umr_wqe_size,\n-\t\t\t\t\t&rdmw_wqe_size);\n-\tpriv->wqe_set_size = rdmw_wqe_size + umr_wqe_size;\n-\tif (umr_wqe_size > max_wqe_size ||\n-\t\t\t\trdmw_wqe_size > max_wqe_size) {\n-\t\tDRV_LOG(ERR, \"Invalid max_segs_num: %u. should be %u or lower.\",\n-\t\t\tmax_segs_num,\n-\t\t\tmlx5_crypto_max_segs_num(max_wqe_size));\n-\t\trte_errno = EINVAL;\n-\t\treturn -EINVAL;\n-\t}\n-\tpriv->umr_wqe_size = (uint16_t)umr_wqe_size;\n-\tpriv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;\n-\tpriv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);\n-\treturn 0;\n-}\n-\n static int\n mlx5_crypto_dev_probe(struct mlx5_common_device *cdev,\n \t\t      struct mlx5_kvargs_ctrl *mkvlist)\n@@ -916,14 +356,18 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev,\n \tDRV_LOG(INFO,\n \t\t\"Crypto device %s was created successfully.\", ibdev_name);\n \tcrypto_dev->dev_ops = &mlx5_crypto_ops;\n-\tcrypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst;\n-\tcrypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst;\n \tcrypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS(wrapped_mode);\n \tcrypto_dev->driver_id = mlx5_crypto_driver_id;\n \tpriv = crypto_dev->data->dev_private;\n \tpriv->cdev = cdev;\n \tpriv->crypto_dev = crypto_dev;\n \tpriv->is_wrapped_mode = wrapped_mode;\n+\tpriv->max_segs_num = devarg_prms.max_segs_num;\n+\tret = mlx5_crypto_xts_init(priv);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to init AES-XTS crypto.\");\n+\t\treturn -ENOTSUP;\n+\t}\n \tif (mlx5_devx_uar_prepare(cdev, &priv->uar) != 0) {\n \t\trte_cryptodev_pmd_destroy(priv->crypto_dev);\n \t\treturn -1;\n@@ -939,14 +383,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev,\n \t\t}\n \t\tpriv->login_obj = login;\n \t}\n-\tret = mlx5_crypto_configure_wqe_size(priv,\n-\t\tcdev->config.hca_attr.max_wqe_sz_sq, devarg_prms.max_segs_num);\n-\tif (ret) {\n-\t\tclaim_zero(mlx5_devx_cmd_destroy(priv->login_obj));\n-\t\tmlx5_devx_uar_release(&priv->uar);\n-\t\trte_cryptodev_pmd_destroy(priv->crypto_dev);\n-\t\treturn -1;\n-\t}\n \tpriv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);\n \tDRV_LOG(INFO, \"Max number of segments: %u.\",\n \t\t(unsigned int)RTE_MIN(\ndiff --git a/drivers/crypto/mlx5/mlx5_crypto.h b/drivers/crypto/mlx5/mlx5_crypto.h\nindex a2771b3dab..05d8fe97fe 100644\n--- a/drivers/crypto/mlx5/mlx5_crypto.h\n+++ b/drivers/crypto/mlx5/mlx5_crypto.h\n@@ -31,6 +31,7 @@ struct mlx5_crypto_priv {\n \tstruct mlx5_uar uar; /* User Access Region. */\n \tuint32_t max_segs_num; /* Maximum supported data segs. */\n \tstruct mlx5_hlist *dek_hlist; /* Dek hash list. */\n+\tconst struct rte_cryptodev_capabilities *caps;\n \tstruct rte_cryptodev_config dev_config;\n \tstruct mlx5_devx_obj *login_obj;\n \tuint64_t keytag;\n@@ -70,6 +71,35 @@ struct mlx5_crypto_devarg_params {\n \tuint32_t max_segs_num;\n };\n \n+struct mlx5_crypto_session {\n+\tuint32_t bs_bpt_eo_es;\n+\t/**< bsf_size, bsf_p_type, encryption_order and encryption standard,\n+\t * saved in big endian format.\n+\t */\n+\tuint32_t bsp_res;\n+\t/**< crypto_block_size_pointer and reserved 24 bits saved in big\n+\t * endian format.\n+\t */\n+\tuint32_t iv_offset:16;\n+\t/**< Starting point for Initialisation Vector. */\n+\tstruct mlx5_crypto_dek *dek; /**< Pointer to dek struct. */\n+\tuint32_t dek_id; /**< DEK ID */\n+} __rte_packed;\n+\n+typedef void *(*mlx5_crypto_mkey_update_t)(struct mlx5_crypto_priv *priv,\n+\t\t\t\t\t   struct mlx5_crypto_qp *qp,\n+\t\t\t\t\t   uint32_t idx);\n+\n+void\n+mlx5_crypto_indirect_mkeys_release(struct mlx5_crypto_qp *qp,\n+\t\t\t\t   uint16_t n);\n+\n+int\n+mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,\n+\t\t\t\t   struct mlx5_crypto_qp *qp,\n+\t\t\t\t   struct mlx5_devx_mkey_attr *attr,\n+\t\t\t\t   mlx5_crypto_mkey_update_t update_cb);\n+\n int\n mlx5_crypto_dek_destroy(struct mlx5_crypto_priv *priv,\n \t\t\tstruct mlx5_crypto_dek *dek);\n@@ -84,4 +114,7 @@ mlx5_crypto_dek_setup(struct mlx5_crypto_priv *priv);\n void\n mlx5_crypto_dek_unset(struct mlx5_crypto_priv *priv);\n \n+int\n+mlx5_crypto_xts_init(struct mlx5_crypto_priv *priv);\n+\n #endif /* MLX5_CRYPTO_H_ */\ndiff --git a/drivers/crypto/mlx5/mlx5_crypto_xts.c b/drivers/crypto/mlx5/mlx5_crypto_xts.c\nnew file mode 100644\nindex 0000000000..964d02e6ed\n--- /dev/null\n+++ b/drivers/crypto/mlx5/mlx5_crypto_xts.c\n@@ -0,0 +1,594 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2023 NVIDIA Corporation & Affiliates\n+ */\n+\n+#include <rte_malloc.h>\n+#include <rte_mempool.h>\n+#include <rte_eal_paging.h>\n+#include <rte_errno.h>\n+#include <rte_log.h>\n+#include <bus_pci_driver.h>\n+#include <rte_memory.h>\n+\n+#include <mlx5_glue.h>\n+#include <mlx5_common.h>\n+#include <mlx5_devx_cmds.h>\n+#include <mlx5_common_os.h>\n+\n+#include \"mlx5_crypto_utils.h\"\n+#include \"mlx5_crypto.h\"\n+\n+const struct rte_cryptodev_capabilities mlx5_crypto_caps[] = {\n+\t{\t\t/* AES XTS */\n+\t\t.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,\n+\t\t{.sym = {\n+\t\t\t.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,\n+\t\t\t{.cipher = {\n+\t\t\t\t.algo = RTE_CRYPTO_CIPHER_AES_XTS,\n+\t\t\t\t.block_size = 16,\n+\t\t\t\t.key_size = {\n+\t\t\t\t\t.min = 32,\n+\t\t\t\t\t.max = 64,\n+\t\t\t\t\t.increment = 32\n+\t\t\t\t},\n+\t\t\t\t.iv_size = {\n+\t\t\t\t\t.min = 16,\n+\t\t\t\t\t.max = 16,\n+\t\t\t\t\t.increment = 0\n+\t\t\t\t},\n+\t\t\t\t.dataunit_set =\n+\t\t\t\tRTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES |\n+\t\t\t\tRTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES |\n+\t\t\t\tRTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES,\n+\t\t\t}, }\n+\t\t}, }\n+\t},\n+};\n+\n+static int\n+mlx5_crypto_xts_sym_session_configure(struct rte_cryptodev *dev,\n+\t\t\t\t      struct rte_crypto_sym_xform *xform,\n+\t\t\t\t      struct rte_cryptodev_sym_session *session)\n+{\n+\tstruct mlx5_crypto_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_crypto_session *sess_private_data =\n+\t\tCRYPTODEV_GET_SYM_SESS_PRIV(session);\n+\tstruct rte_crypto_cipher_xform *cipher;\n+\tuint8_t encryption_order;\n+\n+\tif (unlikely(xform->next != NULL)) {\n+\t\tDRV_LOG(ERR, \"Xform next is not supported.\");\n+\t\treturn -ENOTSUP;\n+\t}\n+\tif (unlikely((xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) ||\n+\t\t     (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_XTS))) {\n+\t\tDRV_LOG(ERR, \"Only AES-XTS algorithm is supported.\");\n+\t\treturn -ENOTSUP;\n+\t}\n+\tcipher = &xform->cipher;\n+\tsess_private_data->dek = mlx5_crypto_dek_prepare(priv, cipher);\n+\tif (sess_private_data->dek == NULL) {\n+\t\tDRV_LOG(ERR, \"Failed to prepare dek.\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tif (cipher->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)\n+\t\tencryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_MEMORY;\n+\telse\n+\t\tencryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_WIRE;\n+\tsess_private_data->bs_bpt_eo_es = rte_cpu_to_be_32\n+\t\t\t(MLX5_BSF_SIZE_64B << MLX5_BSF_SIZE_OFFSET |\n+\t\t\t MLX5_BSF_P_TYPE_CRYPTO << MLX5_BSF_P_TYPE_OFFSET |\n+\t\t\t encryption_order << MLX5_ENCRYPTION_ORDER_OFFSET |\n+\t\t\t MLX5_ENCRYPTION_STANDARD_AES_XTS);\n+\tswitch (xform->cipher.dataunit_len) {\n+\tcase 0:\n+\t\tsess_private_data->bsp_res = 0;\n+\t\tbreak;\n+\tcase 512:\n+\t\tsess_private_data->bsp_res = rte_cpu_to_be_32\n+\t\t\t\t\t     ((uint32_t)MLX5_BLOCK_SIZE_512B <<\n+\t\t\t\t\t     MLX5_BLOCK_SIZE_OFFSET);\n+\t\tbreak;\n+\tcase 4096:\n+\t\tsess_private_data->bsp_res = rte_cpu_to_be_32\n+\t\t\t\t\t     ((uint32_t)MLX5_BLOCK_SIZE_4096B <<\n+\t\t\t\t\t     MLX5_BLOCK_SIZE_OFFSET);\n+\t\tbreak;\n+\tcase 1048576:\n+\t\tsess_private_data->bsp_res = rte_cpu_to_be_32\n+\t\t\t\t\t     ((uint32_t)MLX5_BLOCK_SIZE_1MB <<\n+\t\t\t\t\t     MLX5_BLOCK_SIZE_OFFSET);\n+\t\tbreak;\n+\tdefault:\n+\t\tDRV_LOG(ERR, \"Cipher data unit length is not supported.\");\n+\t\treturn -ENOTSUP;\n+\t}\n+\tsess_private_data->iv_offset = cipher->iv.offset;\n+\tsess_private_data->dek_id =\n+\t\t\trte_cpu_to_be_32(sess_private_data->dek->obj->id &\n+\t\t\t\t\t 0xffffff);\n+\tDRV_LOG(DEBUG, \"Session %p was configured.\", sess_private_data);\n+\treturn 0;\n+}\n+\n+static void\n+mlx5_crypto_xts_qp_release(struct mlx5_crypto_qp *qp)\n+{\n+\tif (qp == NULL)\n+\t\treturn;\n+\tmlx5_devx_qp_destroy(&qp->qp_obj);\n+\tmlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);\n+\tmlx5_devx_cq_destroy(&qp->cq_obj);\n+\trte_free(qp);\n+}\n+\n+static int\n+mlx5_crypto_xts_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)\n+{\n+\tstruct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];\n+\n+\tmlx5_crypto_indirect_mkeys_release(qp, qp->entries_n);\n+\tmlx5_crypto_xts_qp_release(qp);\n+\tdev->data->queue_pairs[qp_id] = NULL;\n+\treturn 0;\n+}\n+\n+static __rte_noinline uint32_t\n+mlx5_crypto_xts_get_block_size(struct rte_crypto_op *op)\n+{\n+\tuint32_t bl = op->sym->cipher.data.length;\n+\n+\tswitch (bl) {\n+\tcase (1 << 20):\n+\t\treturn RTE_BE32(MLX5_BLOCK_SIZE_1MB << MLX5_BLOCK_SIZE_OFFSET);\n+\tcase (1 << 12):\n+\t\treturn RTE_BE32(MLX5_BLOCK_SIZE_4096B <<\n+\t\t\t\tMLX5_BLOCK_SIZE_OFFSET);\n+\tcase (1 << 9):\n+\t\treturn RTE_BE32(MLX5_BLOCK_SIZE_512B << MLX5_BLOCK_SIZE_OFFSET);\n+\tdefault:\n+\t\tDRV_LOG(ERR, \"Unknown block size: %u.\", bl);\n+\t\treturn UINT32_MAX;\n+\t}\n+}\n+\n+static __rte_always_inline uint32_t\n+mlx5_crypto_xts_klm_set(struct mlx5_crypto_qp *qp, struct rte_mbuf *mbuf,\n+\t\t\tstruct mlx5_wqe_dseg *klm, uint32_t offset,\n+\t\t\tuint32_t *remain)\n+{\n+\tuint32_t data_len = (rte_pktmbuf_data_len(mbuf) - offset);\n+\tuintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);\n+\n+\tif (data_len > *remain)\n+\t\tdata_len = *remain;\n+\t*remain -= data_len;\n+\tklm->bcount = rte_cpu_to_be_32(data_len);\n+\tklm->pbuf = rte_cpu_to_be_64(addr);\n+\tklm->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);\n+\treturn klm->lkey;\n+\n+}\n+\n+static __rte_always_inline uint32_t\n+mlx5_crypto_xts_klms_set(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op,\n+\t\t\t struct rte_mbuf *mbuf, struct mlx5_wqe_dseg *klm)\n+{\n+\tuint32_t remain_len = op->sym->cipher.data.length;\n+\tuint32_t nb_segs = mbuf->nb_segs;\n+\tuint32_t klm_n = 1u;\n+\n+\t/* First mbuf needs to take the cipher offset. */\n+\tif (unlikely(mlx5_crypto_xts_klm_set(qp, mbuf, klm,\n+\t\t     op->sym->cipher.data.offset, &remain_len) == UINT32_MAX)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\treturn 0;\n+\t}\n+\twhile (remain_len) {\n+\t\tnb_segs--;\n+\t\tmbuf = mbuf->next;\n+\t\tif (unlikely(mbuf == NULL || nb_segs == 0)) {\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\t\treturn 0;\n+\t\t}\n+\t\tif (unlikely(mlx5_crypto_xts_klm_set(qp, mbuf, ++klm, 0,\n+\t\t\t\t\t\t&remain_len) == UINT32_MAX)) {\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\t\treturn 0;\n+\t\t}\n+\t\tklm_n++;\n+\t}\n+\treturn klm_n;\n+}\n+\n+static __rte_always_inline int\n+mlx5_crypto_xts_wqe_set(struct mlx5_crypto_priv *priv,\n+\t\t\t struct mlx5_crypto_qp *qp,\n+\t\t\t struct rte_crypto_op *op,\n+\t\t\t struct mlx5_umr_wqe *umr)\n+{\n+\tstruct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);\n+\tstruct mlx5_wqe_cseg *cseg = &umr->ctr;\n+\tstruct mlx5_wqe_mkey_cseg *mkc = &umr->mkc;\n+\tstruct mlx5_wqe_dseg *klms = &umr->kseg[0];\n+\tstruct mlx5_wqe_umr_bsf_seg *bsf = ((struct mlx5_wqe_umr_bsf_seg *)\n+\t\t\t\t      RTE_PTR_ADD(umr, priv->umr_wqe_size)) - 1;\n+\tuint32_t ds;\n+\tbool ipl = op->sym->m_dst == NULL || op->sym->m_dst == op->sym->m_src;\n+\t/* Set UMR WQE. */\n+\tuint32_t klm_n = mlx5_crypto_xts_klms_set(qp, op,\n+\t\t\t\t   ipl ? op->sym->m_src : op->sym->m_dst, klms);\n+\n+\tif (unlikely(klm_n == 0))\n+\t\treturn 0;\n+\tbsf->bs_bpt_eo_es = sess->bs_bpt_eo_es;\n+\tif (unlikely(!sess->bsp_res)) {\n+\t\tbsf->bsp_res = mlx5_crypto_xts_get_block_size(op);\n+\t\tif (unlikely(bsf->bsp_res == UINT32_MAX)) {\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;\n+\t\t\treturn 0;\n+\t\t}\n+\t} else {\n+\t\tbsf->bsp_res = sess->bsp_res;\n+\t}\n+\tbsf->raw_data_size = rte_cpu_to_be_32(op->sym->cipher.data.length);\n+\tmemcpy(bsf->xts_initial_tweak,\n+\t       rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv_offset), 16);\n+\tbsf->res_dp = sess->dek_id;\n+\tmkc->len = rte_cpu_to_be_64(op->sym->cipher.data.length);\n+\tcseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) | MLX5_OPCODE_UMR);\n+\tqp->db_pi += priv->umr_wqe_stride;\n+\t/* Set RDMA_WRITE WQE. */\n+\tcseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);\n+\tklms = RTE_PTR_ADD(cseg, sizeof(struct mlx5_rdma_write_wqe));\n+\tif (!ipl) {\n+\t\tklm_n = mlx5_crypto_xts_klms_set(qp, op, op->sym->m_src, klms);\n+\t\tif (unlikely(klm_n == 0))\n+\t\t\treturn 0;\n+\t} else {\n+\t\tmemcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);\n+\t}\n+\tds = 2 + klm_n;\n+\tcseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);\n+\tcseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |\n+\t\t\t\t\t\t\tMLX5_OPCODE_RDMA_WRITE);\n+\tds = RTE_ALIGN(ds, 4);\n+\tqp->db_pi += ds >> 2;\n+\t/* Set NOP WQE if needed. */\n+\tif (priv->max_rdmar_ds > ds) {\n+\t\tcseg += ds;\n+\t\tds = priv->max_rdmar_ds - ds;\n+\t\tcseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);\n+\t\tcseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |\n+\t\t\t\t\t\t\t       MLX5_OPCODE_NOP);\n+\t\tqp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */\n+\t}\n+\tqp->wqe = (uint8_t *)cseg;\n+\treturn 1;\n+}\n+\n+static uint16_t\n+mlx5_crypto_xts_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,\n+\t\t\t      uint16_t nb_ops)\n+{\n+\tstruct mlx5_crypto_qp *qp = queue_pair;\n+\tstruct mlx5_crypto_priv *priv = qp->priv;\n+\tstruct mlx5_umr_wqe *umr;\n+\tstruct rte_crypto_op *op;\n+\tuint16_t mask = qp->entries_n - 1;\n+\tuint16_t remain = qp->entries_n - (qp->pi - qp->ci);\n+\tuint32_t idx;\n+\n+\tif (remain < nb_ops)\n+\t\tnb_ops = remain;\n+\telse\n+\t\tremain = nb_ops;\n+\tif (unlikely(remain == 0))\n+\t\treturn 0;\n+\tdo {\n+\t\tidx = qp->pi & mask;\n+\t\top = *ops++;\n+\t\tumr = RTE_PTR_ADD(qp->qp_obj.umem_buf,\n+\t\t\tpriv->wqe_set_size * idx);\n+\t\tif (unlikely(mlx5_crypto_xts_wqe_set(priv, qp, op, umr) == 0)) {\n+\t\t\tqp->stats.enqueue_err_count++;\n+\t\t\tif (remain != nb_ops) {\n+\t\t\t\tqp->stats.enqueued_count -= remain;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t\treturn 0;\n+\t\t}\n+\t\tqp->ops[idx] = op;\n+\t\tqp->pi++;\n+\t} while (--remain);\n+\tqp->stats.enqueued_count += nb_ops;\n+\tmlx5_doorbell_ring(&priv->uar.bf_db, *(volatile uint64_t *)qp->wqe,\n+\t\t\t   qp->db_pi, &qp->qp_obj.db_rec[MLX5_SND_DBR],\n+\t\t\t   !priv->uar.dbnc);\n+\treturn nb_ops;\n+}\n+\n+static __rte_noinline void\n+mlx5_crypto_xts_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)\n+{\n+\tconst uint32_t idx = qp->ci & (qp->entries_n - 1);\n+\tvolatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)\n+\t\t\t\t\t\t\t&qp->cq_obj.cqes[idx];\n+\n+\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\tqp->stats.dequeue_err_count++;\n+\tDRV_LOG(ERR, \"CQE ERR:%x.\\n\", rte_be_to_cpu_32(cqe->syndrome));\n+}\n+\n+static uint16_t\n+mlx5_crypto_xts_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,\n+\t\t\t  uint16_t nb_ops)\n+{\n+\tstruct mlx5_crypto_qp *qp = queue_pair;\n+\tvolatile struct mlx5_cqe *restrict cqe;\n+\tstruct rte_crypto_op *restrict op;\n+\tconst unsigned int cq_size = qp->entries_n;\n+\tconst unsigned int mask = cq_size - 1;\n+\tuint32_t idx;\n+\tuint32_t next_idx = qp->ci & mask;\n+\tconst uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);\n+\tuint16_t i = 0;\n+\tint ret;\n+\n+\tif (unlikely(max == 0))\n+\t\treturn 0;\n+\tdo {\n+\t\tidx = next_idx;\n+\t\tnext_idx = (qp->ci + 1) & mask;\n+\t\top = qp->ops[idx];\n+\t\tcqe = &qp->cq_obj.cqes[idx];\n+\t\tret = check_cqe(cqe, cq_size, qp->ci);\n+\t\trte_io_rmb();\n+\t\tif (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {\n+\t\t\tif (unlikely(ret != MLX5_CQE_STATUS_HW_OWN))\n+\t\t\t\tmlx5_crypto_xts_cqe_err_handle(qp, op);\n+\t\t\tbreak;\n+\t\t}\n+\t\top->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n+\t\tops[i++] = op;\n+\t\tqp->ci++;\n+\t} while (i < max);\n+\tif (likely(i != 0)) {\n+\t\trte_io_wmb();\n+\t\tqp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->ci);\n+\t\tqp->stats.dequeued_count += i;\n+\t}\n+\treturn i;\n+}\n+\n+static void\n+mlx5_crypto_xts_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)\n+{\n+\tuint32_t i;\n+\n+\tfor (i = 0 ; i < qp->entries_n; i++) {\n+\t\tstruct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->qp_obj.umem_buf,\n+\t\t\ti * priv->wqe_set_size);\n+\t\tstruct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)\n+\t\t\t\t\t\t\t\t     (cseg + 1);\n+\t\tstruct mlx5_wqe_umr_bsf_seg *bsf =\n+\t\t\t(struct mlx5_wqe_umr_bsf_seg *)(RTE_PTR_ADD(cseg,\n+\t\t\t\t\t\t       priv->umr_wqe_size)) - 1;\n+\t\tstruct mlx5_wqe_rseg *rseg;\n+\n+\t\t/* Init UMR WQE. */\n+\t\tcseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) |\n+\t\t\t\t\t (priv->umr_wqe_size / MLX5_WSEG_SIZE));\n+\t\tcseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<\n+\t\t\t\t       MLX5_COMP_MODE_OFFSET);\n+\t\tcseg->misc = rte_cpu_to_be_32(qp->mkey[i]->id);\n+\t\tucseg->if_cf_toe_cq_res = RTE_BE32(1u << MLX5_UMRC_IF_OFFSET);\n+\t\tucseg->mkey_mask = RTE_BE64(1u << 0); /* Mkey length bit. */\n+\t\tucseg->ko_to_bs = rte_cpu_to_be_32\n+\t\t\t((MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size) <<\n+\t\t\t MLX5_UMRC_KO_OFFSET) | (4 << MLX5_UMRC_TO_BS_OFFSET));\n+\t\tbsf->keytag = priv->keytag;\n+\t\t/* Init RDMA WRITE WQE. */\n+\t\tcseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);\n+\t\tcseg->flags = RTE_BE32((MLX5_COMP_ALWAYS <<\n+\t\t\t\t      MLX5_COMP_MODE_OFFSET) |\n+\t\t\t\t      MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE);\n+\t\trseg = (struct mlx5_wqe_rseg *)(cseg + 1);\n+\t\trseg->rkey = rte_cpu_to_be_32(qp->mkey[i]->id);\n+\t}\n+}\n+\n+static void *\n+mlx5_crypto_gcm_mkey_klm_update(struct mlx5_crypto_priv *priv,\n+\t\t\t\tstruct mlx5_crypto_qp *qp,\n+\t\t\t\tuint32_t idx)\n+{\n+\treturn RTE_PTR_ADD(qp->qp_obj.umem_buf, priv->wqe_set_size * idx);\n+}\n+\n+static int\n+mlx5_crypto_xts_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n+\t\t\t\t const struct rte_cryptodev_qp_conf *qp_conf,\n+\t\t\t\t int socket_id)\n+{\n+\tstruct mlx5_crypto_priv *priv = dev->data->dev_private;\n+\tstruct mlx5_devx_qp_attr attr = {0};\n+\tstruct mlx5_crypto_qp *qp;\n+\tuint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);\n+\tuint32_t ret;\n+\tuint32_t alloc_size = sizeof(*qp);\n+\tuint32_t log_wqbb_n;\n+\tstruct mlx5_devx_cq_attr cq_attr = {\n+\t\t.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),\n+\t};\n+\tstruct mlx5_devx_mkey_attr mkey_attr = {\n+\t\t.pd = priv->cdev->pdn,\n+\t\t.umr_en = 1,\n+\t\t.crypto_en = 1,\n+\t\t.set_remote_rw = 1,\n+\t\t.klm_num = MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size),\n+\t};\n+\n+\tif (dev->data->queue_pairs[qp_id] != NULL)\n+\t\tmlx5_crypto_xts_queue_pair_release(dev, qp_id);\n+\talloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);\n+\talloc_size += (sizeof(struct rte_crypto_op *) +\n+\t\t       sizeof(struct mlx5_devx_obj *)) *\n+\t\t       RTE_BIT32(log_nb_desc);\n+\tqp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,\n+\t\t\t\tsocket_id);\n+\tif (qp == NULL) {\n+\t\tDRV_LOG(ERR, \"Failed to allocate QP memory.\");\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n+\tif (mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq_obj, log_nb_desc,\n+\t\t\t\t&cq_attr, socket_id) != 0) {\n+\t\tDRV_LOG(ERR, \"Failed to create CQ.\");\n+\t\tgoto error;\n+\t}\n+\tlog_wqbb_n = rte_log2_u32(RTE_BIT32(log_nb_desc) *\n+\t\t\t\t(priv->wqe_set_size / MLX5_SEND_WQE_BB));\n+\tattr.pd = priv->cdev->pdn;\n+\tattr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);\n+\tattr.cqn = qp->cq_obj.cq->id;\n+\tattr.num_of_receive_wqes = 0;\n+\tattr.num_of_send_wqbbs = RTE_BIT32(log_wqbb_n);\n+\tattr.ts_format =\n+\t\tmlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);\n+\tret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj,\n+\t\t\t\t\tattr.num_of_send_wqbbs * MLX5_WQE_SIZE,\n+\t\t\t\t\t&attr, socket_id);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"Failed to create QP.\");\n+\t\tgoto error;\n+\t}\n+\tif (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,\n+\t\t\t      priv->dev_config.socket_id) != 0) {\n+\t\tDRV_LOG(ERR, \"Cannot allocate MR Btree for qp %u.\",\n+\t\t\t(uint32_t)qp_id);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\t/*\n+\t * In Order to configure self loopback, when calling devx qp2rts the\n+\t * remote QP id that is used is the id of the same QP.\n+\t */\n+\tif (mlx5_devx_qp2rts(&qp->qp_obj, qp->qp_obj.qp->id))\n+\t\tgoto error;\n+\tqp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),\n+\t\t\t\t\t\t\t   RTE_CACHE_LINE_SIZE);\n+\tqp->ops = (struct rte_crypto_op **)(qp->mkey + RTE_BIT32(log_nb_desc));\n+\tqp->entries_n = 1 << log_nb_desc;\n+\tif (mlx5_crypto_indirect_mkeys_prepare(priv, qp, &mkey_attr,\n+\t\t\t\t\t       mlx5_crypto_gcm_mkey_klm_update)) {\n+\t\tDRV_LOG(ERR, \"Cannot allocate indirect memory regions.\");\n+\t\trte_errno = ENOMEM;\n+\t\tgoto error;\n+\t}\n+\tmlx5_crypto_xts_qp_init(priv, qp);\n+\tqp->priv = priv;\n+\tdev->data->queue_pairs[qp_id] = qp;\n+\treturn 0;\n+error:\n+\tmlx5_crypto_xts_qp_release(qp);\n+\treturn -1;\n+}\n+\n+/*\n+ * Calculate UMR WQE size and RDMA Write WQE size with the\n+ * following limitations:\n+ *\t- Each WQE size is multiple of 64.\n+ *\t- The summarize of both UMR WQE and RDMA_W WQE is a power of 2.\n+ *\t- The number of entries in the UMR WQE's KLM list is multiple of 4.\n+ */\n+static void\n+mlx5_crypto_xts_get_wqe_sizes(uint32_t segs_num, uint32_t *umr_size,\n+\t\t\t      uint32_t *rdmaw_size)\n+{\n+\tuint32_t diff, wqe_set_size;\n+\n+\t*umr_size = MLX5_CRYPTO_UMR_WQE_STATIC_SIZE +\n+\t\t\tRTE_ALIGN(segs_num, 4) *\n+\t\t\tsizeof(struct mlx5_wqe_dseg);\n+\t/* Make sure UMR WQE size is multiple of WQBB. */\n+\t*umr_size = RTE_ALIGN(*umr_size, MLX5_SEND_WQE_BB);\n+\t*rdmaw_size = sizeof(struct mlx5_rdma_write_wqe) +\n+\t\t\tsizeof(struct mlx5_wqe_dseg) *\n+\t\t\t(segs_num <= 2 ? 2 : 2 +\n+\t\t\tRTE_ALIGN(segs_num - 2, 4));\n+\t/* Make sure RDMA_WRITE WQE size is multiple of WQBB. */\n+\t*rdmaw_size = RTE_ALIGN(*rdmaw_size, MLX5_SEND_WQE_BB);\n+\twqe_set_size = *rdmaw_size + *umr_size;\n+\tdiff = rte_align32pow2(wqe_set_size) - wqe_set_size;\n+\t/* Make sure wqe_set size is power of 2. */\n+\tif (diff)\n+\t\t*umr_size += diff;\n+}\n+\n+static uint8_t\n+mlx5_crypto_xts_max_segs_num(uint16_t max_wqe_size)\n+{\n+\tint klms_sizes = max_wqe_size - MLX5_CRYPTO_UMR_WQE_STATIC_SIZE;\n+\tuint32_t max_segs_cap = RTE_ALIGN_FLOOR(klms_sizes, MLX5_SEND_WQE_BB) /\n+\t\t\tsizeof(struct mlx5_wqe_dseg);\n+\n+\tMLX5_ASSERT(klms_sizes >= MLX5_SEND_WQE_BB);\n+\twhile (max_segs_cap) {\n+\t\tuint32_t umr_wqe_size, rdmw_wqe_size;\n+\n+\t\tmlx5_crypto_xts_get_wqe_sizes(max_segs_cap, &umr_wqe_size,\n+\t\t\t\t\t\t&rdmw_wqe_size);\n+\t\tif (umr_wqe_size <= max_wqe_size &&\n+\t\t\t\trdmw_wqe_size <= max_wqe_size)\n+\t\t\tbreak;\n+\t\tmax_segs_cap -= 4;\n+\t}\n+\treturn max_segs_cap;\n+}\n+\n+static int\n+mlx5_crypto_xts_configure_wqe_size(struct mlx5_crypto_priv *priv,\n+\t\t\t\t   uint16_t max_wqe_size, uint32_t max_segs_num)\n+{\n+\tuint32_t rdmw_wqe_size, umr_wqe_size;\n+\n+\tmlx5_crypto_xts_get_wqe_sizes(max_segs_num, &umr_wqe_size,\n+\t\t\t&rdmw_wqe_size);\n+\tpriv->wqe_set_size = rdmw_wqe_size + umr_wqe_size;\n+\tif (umr_wqe_size > max_wqe_size ||\n+\t\t\t\trdmw_wqe_size > max_wqe_size) {\n+\t\tDRV_LOG(ERR, \"Invalid max_segs_num: %u. should be %u or lower.\",\n+\t\t\tmax_segs_num,\n+\t\t\tmlx5_crypto_xts_max_segs_num(max_wqe_size));\n+\t\trte_errno = EINVAL;\n+\t\treturn -EINVAL;\n+\t}\n+\tpriv->umr_wqe_size = (uint16_t)umr_wqe_size;\n+\tpriv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;\n+\tpriv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);\n+\treturn 0;\n+}\n+\n+int\n+mlx5_crypto_xts_init(struct mlx5_crypto_priv *priv)\n+{\n+\tstruct mlx5_common_device *cdev = priv->cdev;\n+\tstruct rte_cryptodev *crypto_dev = priv->crypto_dev;\n+\tstruct rte_cryptodev_ops *dev_ops = crypto_dev->dev_ops;\n+\tint ret;\n+\n+\tret = mlx5_crypto_xts_configure_wqe_size(priv,\n+\t\tcdev->config.hca_attr.max_wqe_sz_sq, priv->max_segs_num);\n+\tif (ret)\n+\t\treturn -EINVAL;\n+\t/* Override AES-XST specified ops. */\n+\tdev_ops->sym_session_configure = mlx5_crypto_xts_sym_session_configure;\n+\tdev_ops->queue_pair_setup = mlx5_crypto_xts_queue_pair_setup;\n+\tdev_ops->queue_pair_release = mlx5_crypto_xts_queue_pair_release;\n+\tcrypto_dev->dequeue_burst = mlx5_crypto_xts_dequeue_burst;\n+\tcrypto_dev->enqueue_burst = mlx5_crypto_xts_enqueue_burst;\n+\tpriv->caps = mlx5_crypto_caps;\n+\treturn 0;\n+}\n+\n",
    "prefixes": [
        "v4",
        "2/9"
    ]
}