get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/128866/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 128866,
    "url": "http://patchwork.dpdk.org/api/patches/128866/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230620141115.841226-9-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230620141115.841226-9-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230620141115.841226-9-suanmingm@nvidia.com",
    "date": "2023-06-20T14:11:14",
    "name": "[v4,8/9] crypto/mlx5: add enqueue and dequeue operations",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "f6961413e4446a0270183860b5806acfefd37746",
    "submitter": {
        "id": 1887,
        "url": "http://patchwork.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patchwork.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230620141115.841226-9-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 28586,
            "url": "http://patchwork.dpdk.org/api/series/28586/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=28586",
            "date": "2023-06-20T14:11:06",
            "name": "crypto/mlx5: support AES-GCM",
            "version": 4,
            "mbox": "http://patchwork.dpdk.org/series/28586/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/128866/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/128866/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 3126742D09;\n\tTue, 20 Jun 2023 16:13:02 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id DF5EA42D7E;\n\tTue, 20 Jun 2023 16:12:12 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2081.outbound.protection.outlook.com [40.107.243.81])\n by mails.dpdk.org (Postfix) with ESMTP id 9988142D69\n for <dev@dpdk.org>; Tue, 20 Jun 2023 16:12:11 +0200 (CEST)",
            "from DM6PR11CA0036.namprd11.prod.outlook.com (2603:10b6:5:190::49)\n by MN2PR12MB4335.namprd12.prod.outlook.com (2603:10b6:208:1d4::13) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6500.37; Tue, 20 Jun\n 2023 14:12:09 +0000",
            "from MWH0EPF000989E6.namprd02.prod.outlook.com\n (2603:10b6:5:190:cafe::56) by DM6PR11CA0036.outlook.office365.com\n (2603:10b6:5:190::49) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.6521.21 via Frontend\n Transport; Tue, 20 Jun 2023 14:12:09 +0000",
            "from mail.nvidia.com (216.228.117.161) by\n MWH0EPF000989E6.mail.protection.outlook.com (10.167.241.133) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.6521.19 via Frontend Transport; Tue, 20 Jun 2023 14:12:08 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by mail.nvidia.com\n (10.129.200.67) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.5; Tue, 20 Jun 2023\n 07:11:56 -0700",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.37; Tue, 20 Jun\n 2023 07:11:53 -0700"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=kUZlml2gWqDASEV+x4TVKaGdsTamV5ZvpECQehGpw0hzuNVO6VRY1UeV3t5LFZjoxuuPLMc2Sc/586iAc/Y/8NGmHr0Agd5pZw13s5ExTebAY4kE/7mbMo3H3NA/1hmPYEibMn2Nir4vDeMng3xz/bVbOHwfWEwVSCW7kJkeSLwNKDcDXD/MhjDkOh0u4vxrPtGxhY/95j9ll23a17oG98F+79Q1aT10SyuOAZUJ+2oY3r4Sn2PcevgGQyXZ8ukEIBegDsRJuDqqgOFIHIxJredt1OPXWq2K9PjnPO9RFTn/ysc3YJRBolGKG69qdVS0Sp5XyMoj+AeE/Pet8WBftg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=O1wuIqYLTNQwxBvQ118EfMy3HkxF79ciIRale5eZUZA=;\n b=JReGg2So2oJ2kbiwNzb/0XyVmkAVYZLUPUmPSE+GmDIoKAaAhatVgH8p0Z94zE0wzS3/Tf31/FcbyVSia9eD5HHWtfpSQFPuGQj6nFmFoRvexS5PP5kdYzOIzLPl8fL2N8V2KzPfuSoAFWC8XWZlWnj9bYekR54jkny1WcWipkfJK5ttfuV4S8uaWOKTNV5f20YYn4gPxaWu2m6JEcpJikbqP/DC1tr+z5Eq13RJnIIgSGKFawq8KWJPgY7jWNH+iDbX+lZxouIK9D/8SUZ4P+9hXZ6hIjdtVYy8DmaBKjbpIBm7Nz2atuJX6ZfEsBpOhkaby+dhym5FA6Jk100PDA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.117.161) smtp.rcpttodomain=marvell.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=reject sp=reject pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=O1wuIqYLTNQwxBvQ118EfMy3HkxF79ciIRale5eZUZA=;\n b=WWQNtyApJX9wch0oihqRu6RTKXlgt6mORXW6SbCwBymC4ab7h4pPoOGUTAdrtVhHi/8891wzofS8pIG1TYcH1EZPnzAM+uBr3yJF/NxTq5lvmB35Rt+qstdVrSthJyXhMMvj8V0rRDL+Ombd3cIA77maL18wrbpsE36xoZKYXE1FtuaXSgSc3L7AFZYDs0XqZm2cdMCCHi4Nfrzf+SUX59xHuB68J3MIIYtoqphR4ZVORAQ//nKbHBRP7NT5FXO2v4HfJTd9UIU3oflxho+mFRoN/9WXTnquoAXkZbCbPmsF4LfLr+6s6Oqa9Yf2ZWLnqZii35DI8NM+tStM9Ws16Q==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.117.161)\n smtp.mailfrom=nvidia.com;\n dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.117.161 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.117.161; helo=mail.nvidia.com; pr=C",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<gakhil@marvell.com>, Matan Azrad <matan@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>, Ori Kam <orika@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <dev@dpdk.org>",
        "Subject": "[PATCH v4 8/9] crypto/mlx5: add enqueue and dequeue operations",
        "Date": "Tue, 20 Jun 2023 17:11:14 +0300",
        "Message-ID": "<20230620141115.841226-9-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230620141115.841226-1-suanmingm@nvidia.com>",
        "References": "<20230418092325.2578712-1-suanmingm@nvidia.com>\n <20230620141115.841226-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-TrafficTypeDiagnostic": "MWH0EPF000989E6:EE_|MN2PR12MB4335:EE_",
        "X-MS-Office365-Filtering-Correlation-Id": "7f074d4c-ad5f-466c-4e97-08db719855c2",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n cI1aCwtS8ZW3W544zyJ08xW682cXGb4ZnojpCvsL8FgJr/wmrs61RINoW3cB9tHTbKc0uagz5yU0N9/ExB2vvN71rPGb/VYnDsGlzNj7WQ0dAvCf6QGB7TxlYK1QnqWsrC1ct3+gBJs9DVMnOdEVxQHjIgea73yDuL+aUZUkoELL0ty+17TI8V98aIGmKMmdOHgOTJt99ZP0j2aacab5H15O9bIQtoloCYDQTZUjZyfy5oU/BGej7FakrUTOfKVm9CoMhW0ovwo20+O+6pD+iTwUOGv5Srwi0Ayi+LFxeZvdL/EkW5ubkkPvAFwP4u+T2qdiXgL3uFy1wRNXg3gEeBI2/Nva5Z7ikTzZew3pLfdmASGRw8kAcsHJEOqGG/HJsHeBPOdfDORL2dPqqUax45CRkpIey0cLO8MyV1CI7+E+2G7LYWR8wsS/QDA/iR90uV7xM5e2Dr3HaTkkgY+QWXss+jeJQCS3tuY6tHHuP7DOSbmcD/3MVBgiuV//ip67l+m+g9bdsRB6TpGmBL4CUSoEgbh/Rh54VPy8HRqhaD8nbqs5VdAf73z6F+QbKPvT4cAEIiLDLlM9hXbRgKbcUo8A+Xme581ae1y6j4H/6pfghn33DwSGC305M1EPXVVZwIIF8w+zfnz1TLBW7RR2WDbcWUyI2eck8oS/FLlnCm4z/rpsZzWRhXsTMaX3SXLLT8mRaShW2Tpukm7oRzagzMFKkVIQDejjWvnCgCikyfNHy4RL0KoSq8Y1XrHysnHW",
        "X-Forefront-Antispam-Report": "CIP:216.228.117.161; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:dc6edge2.nvidia.com; CAT:NONE;\n SFS:(13230028)(4636009)(376002)(346002)(39860400002)(396003)(136003)(451199021)(36840700001)(46966006)(40470700004)(36860700001)(82310400005)(47076005)(478600001)(7696005)(6666004)(83380400001)(336012)(110136005)(54906003)(2616005)(1076003)(426003)(26005)(186003)(16526019)(6286002)(2906002)(30864003)(5660300002)(40460700003)(36756003)(70206006)(356005)(6636002)(70586007)(82740400003)(55016003)(40480700001)(7636003)(4326008)(316002)(8936002)(8676002)(41300700001)(86362001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "20 Jun 2023 14:12:08.9884 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 7f074d4c-ad5f-466c-4e97-08db719855c2",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.117.161];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n MWH0EPF000989E6.namprd02.prod.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MN2PR12MB4335",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The crypto operations are performed with crypto WQE. If the input\nbuffers(AAD, mbuf, digest) are not contiguous and there is no enough\nheadroom/tailroom for copying AAD/digest, as the requirement from FW,\nan UMR WQE is needed to generate contiguous address space for crypto\nWQE. The UMR WQE and crypto WQE are handled in two different QPs.\n\nCrypto operation with non-contiguous buffers will have its own UMR\nWQE, while the operation with contiguous buffers doesn't need the\nUMR WQE. Once the all the operations WQE in the enqueue burst built\nfinishes, if any UMR WQEs are built, an additional SEND_EN WQE will\nbe as the final WQE of the burst in the UMR QP. The purpose of that\nSEND_EN WQE is to trigger the crypto QP processing with the UMR ready\ninput memory address space buffers.\n\nThe QP for crypto operations contains only the crypto WQE and the QP\nWQEs are built as fixed in QP setup. The QP processing is triggered\nby doorbell ring or the SEND_EN WQE from UMR QP.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/common/mlx5/mlx5_prm.h        |   1 +\n drivers/crypto/mlx5/mlx5_crypto.c     |   9 +-\n drivers/crypto/mlx5/mlx5_crypto.h     |   8 +\n drivers/crypto/mlx5/mlx5_crypto_gcm.c | 588 ++++++++++++++++++++++++++\n 4 files changed, 604 insertions(+), 2 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h\nindex 4f6925733a..d33d05238c 100644\n--- a/drivers/common/mlx5/mlx5_prm.h\n+++ b/drivers/common/mlx5/mlx5_prm.h\n@@ -617,6 +617,7 @@ struct mlx5_wqe_send_en_wqe {\n /* MMO metadata segment */\n \n #define\tMLX5_OPCODE_MMO\t0x2fu\n+#define\tMLX5_OPC_MOD_MMO_CRYPTO 0x6u\n #define\tMLX5_OPC_MOD_MMO_REGEX 0x4u\n #define\tMLX5_OPC_MOD_MMO_COMP 0x2u\n #define\tMLX5_OPC_MOD_MMO_DECOMP 0x3u\ndiff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c\nindex ff632cd69a..4d7d3ef2a3 100644\n--- a/drivers/crypto/mlx5/mlx5_crypto.c\n+++ b/drivers/crypto/mlx5/mlx5_crypto.c\n@@ -62,8 +62,13 @@ mlx5_crypto_dev_infos_get(struct rte_cryptodev *dev,\n \t\t\tMLX5_CRYPTO_FEATURE_FLAGS(priv->is_wrapped_mode);\n \t\tdev_info->capabilities = priv->caps;\n \t\tdev_info->max_nb_queue_pairs = MLX5_CRYPTO_MAX_QPS;\n-\t\tdev_info->min_mbuf_headroom_req = 0;\n-\t\tdev_info->min_mbuf_tailroom_req = 0;\n+\t\tif (priv->caps->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {\n+\t\t\tdev_info->min_mbuf_headroom_req = MLX5_CRYPTO_GCM_MAX_AAD;\n+\t\t\tdev_info->min_mbuf_tailroom_req = MLX5_CRYPTO_GCM_MAX_DIGEST;\n+\t\t} else {\n+\t\t\tdev_info->min_mbuf_headroom_req = 0;\n+\t\t\tdev_info->min_mbuf_tailroom_req = 0;\n+\t\t}\n \t\tdev_info->sym.max_nb_sessions = 0;\n \t\t/*\n \t\t * If 0, the device does not have any limitation in number of\ndiff --git a/drivers/crypto/mlx5/mlx5_crypto.h b/drivers/crypto/mlx5/mlx5_crypto.h\nindex 88a09a6b1c..6dcb41b27c 100644\n--- a/drivers/crypto/mlx5/mlx5_crypto.h\n+++ b/drivers/crypto/mlx5/mlx5_crypto.h\n@@ -23,6 +23,8 @@\n #define MLX5_CRYPTO_KLM_SEGS_NUM(umr_wqe_sz) ((umr_wqe_sz -\\\n \t\t\t\t\tMLX5_CRYPTO_UMR_WQE_STATIC_SIZE) /\\\n \t\t\t\t\tMLX5_WSEG_SIZE)\n+#define MLX5_CRYPTO_GCM_MAX_AAD 64\n+#define MLX5_CRYPTO_GCM_MAX_DIGEST 16\n \n struct mlx5_crypto_priv {\n \tTAILQ_ENTRY(mlx5_crypto_priv) next;\n@@ -61,6 +63,9 @@ struct mlx5_crypto_qp {\n \tuint8_t *wqe;\n \tuint16_t entries_n;\n \tuint16_t cq_entries_n;\n+\tuint16_t reported_ci;\n+\tuint16_t qp_ci;\n+\tuint16_t cq_ci;\n \tuint16_t pi;\n \tuint16_t ci;\n \tuint16_t db_pi;\n@@ -70,6 +75,9 @@ struct mlx5_crypto_qp {\n \tuint16_t umr_pi;\n \tuint16_t umr_ci;\n \tuint32_t umr_errors;\n+\tuint16_t last_gga_pi;\n+\tbool has_umr;\n+\tuint16_t cpy_tag_op;\n };\n \n struct mlx5_crypto_dek {\ndiff --git a/drivers/crypto/mlx5/mlx5_crypto_gcm.c b/drivers/crypto/mlx5/mlx5_crypto_gcm.c\nindex c3859547ee..8389c03c91 100644\n--- a/drivers/crypto/mlx5/mlx5_crypto_gcm.c\n+++ b/drivers/crypto/mlx5/mlx5_crypto_gcm.c\n@@ -9,6 +9,7 @@\n #include <rte_log.h>\n #include <bus_pci_driver.h>\n #include <rte_memory.h>\n+#include <rte_io.h>\n \n #include <mlx5_glue.h>\n #include <mlx5_common.h>\n@@ -32,6 +33,40 @@\n \t RTE_ALIGN(sizeof(struct mlx5_wqe_send_en_wqe), \\\n \t MLX5_SEND_WQE_BB))\n \n+#define MLX5_UMR_GCM_WQE_STRIDE \\\n+\t(MLX5_UMR_GCM_WQE_SIZE / MLX5_SEND_WQE_BB)\n+\n+#define MLX5_MMO_CRYPTO_OPC (MLX5_OPCODE_MMO | \\\n+\t(MLX5_OPC_MOD_MMO_CRYPTO << WQE_CSEG_OPC_MOD_OFFSET))\n+\n+/*\n+ * The status default value is RTE_CRYPTO_OP_STATUS_SUCCESS.\n+ * Copy tag should fill different value to status.\n+ */\n+#define MLX5_CRYPTO_OP_STATUS_GCM_TAG_COPY (RTE_CRYPTO_OP_STATUS_SUCCESS + 1)\n+\n+struct mlx5_crypto_gcm_op_info {\n+\tbool need_umr;\n+\tbool is_oop;\n+\tbool is_enc;\n+\tvoid *digest;\n+\tvoid *src_addr;\n+};\n+\n+struct mlx5_crypto_gcm_data {\n+\tvoid *src_addr;\n+\tuint32_t src_bytes;\n+\tvoid *dst_addr;\n+\tuint32_t dst_bytes;\n+\tuint32_t src_mkey;\n+\tuint32_t dst_mkey;\n+};\n+\n+struct mlx5_crypto_gcm_tag_cpy_info {\n+\tvoid *digest;\n+\tuint8_t tag_len;\n+} __rte_packed;\n+\n static struct rte_cryptodev_capabilities mlx5_crypto_gcm_caps[] = {\n \t{\n \t\t.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,\n@@ -328,6 +363,557 @@ mlx5_crypto_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n \treturn -1;\n }\n \n+static __rte_always_inline void\n+mlx5_crypto_gcm_get_op_info(struct mlx5_crypto_qp *qp,\n+\t\t\t    struct rte_crypto_op *op,\n+\t\t\t    struct mlx5_crypto_gcm_op_info *op_info)\n+{\n+\tstruct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);\n+\tstruct rte_mbuf *m_src = op->sym->m_src;\n+\tvoid *aad_addr = op->sym->aead.aad.data;\n+\tvoid *tag_addr = op->sym->aead.digest.data;\n+\tvoid *src_addr = rte_pktmbuf_mtod_offset(m_src, void *, op->sym->aead.data.offset);\n+\tstruct rte_mbuf *m_dst = m_src;\n+\tvoid *dst_addr = src_addr;\n+\tvoid *expected_aad = NULL;\n+\tvoid *expected_tag = NULL;\n+\tbool is_enc = sess->op_type == MLX5_CRYPTO_OP_TYPE_ENCRYPTION;\n+\tbool cp_aad = false;\n+\tbool cp_tag = false;\n+\n+\top_info->is_oop = false;\n+\top_info->need_umr = false;\n+\top_info->is_enc = is_enc;\n+\top_info->digest = NULL;\n+\top_info->src_addr = aad_addr;\n+\tif (op->sym->m_dst && op->sym->m_dst != m_src) {\n+\t\top_info->is_oop = true;\n+\t\tm_dst = op->sym->m_dst;\n+\t\tdst_addr = rte_pktmbuf_mtod_offset(m_dst, void *, op->sym->aead.data.offset);\n+\t\tif (m_dst->nb_segs > 1) {\n+\t\t\top_info->need_umr = true;\n+\t\t\treturn;\n+\t\t}\n+\t\t/*\n+\t\t * If the op's mbuf has extra data offset, don't copy AAD to\n+\t\t * this area.\n+\t\t */\n+\t\tif (rte_pktmbuf_headroom(m_dst) < sess->aad_len ||\n+\t\t    op->sym->aead.data.offset) {\n+\t\t\top_info->need_umr = true;\n+\t\t\treturn;\n+\t\t}\n+\t}\n+\tif (m_src->nb_segs > 1) {\n+\t\top_info->need_umr = true;\n+\t\treturn;\n+\t}\n+\texpected_aad = RTE_PTR_SUB(src_addr, sess->aad_len);\n+\tif (expected_aad != aad_addr) {\n+\t\t/*\n+\t\t * If the op's mbuf has extra data offset, don't copy AAD to\n+\t\t * this area.\n+\t\t */\n+\t\tif (sess->aad_len > MLX5_CRYPTO_GCM_MAX_AAD ||\n+\t\t    sess->aad_len > rte_pktmbuf_headroom(m_src) ||\n+\t\t    op->sym->aead.data.offset) {\n+\t\t\top_info->need_umr = true;\n+\t\t\treturn;\n+\t\t}\n+\t\tcp_aad = true;\n+\t\top_info->src_addr = expected_aad;\n+\t}\n+\texpected_tag = RTE_PTR_ADD(is_enc ? dst_addr : src_addr, op->sym->aead.data.length);\n+\tif (expected_tag != tag_addr) {\n+\t\tstruct rte_mbuf *mbuf = is_enc ? m_dst : m_src;\n+\n+\t\t/*\n+\t\t * If op's mbuf is not fully set as payload, don't copy digest to\n+\t\t * the left area.\n+\t\t */\n+\t\tif (rte_pktmbuf_tailroom(mbuf) < sess->tag_len ||\n+\t\t    rte_pktmbuf_data_len(mbuf) != op->sym->aead.data.length) {\n+\t\t\top_info->need_umr = true;\n+\t\t\treturn;\n+\t\t}\n+\t\tif (is_enc) {\n+\t\t\top_info->digest = expected_tag;\n+\t\t\tqp->cpy_tag_op++;\n+\t\t} else {\n+\t\t\tcp_tag = true;\n+\t\t}\n+\t}\n+\tif (cp_aad)\n+\t\tmemcpy(expected_aad, aad_addr, sess->aad_len);\n+\tif (cp_tag)\n+\t\tmemcpy(expected_tag, tag_addr, sess->tag_len);\n+}\n+\n+static __rte_always_inline uint32_t\n+_mlx5_crypto_gcm_umr_build_mbuf_klm(struct mlx5_crypto_qp *qp,\n+\t\t\t\t    struct rte_mbuf *mbuf,\n+\t\t\t\t    struct mlx5_klm *klm,\n+\t\t\t\t    uint32_t offset,\n+\t\t\t\t    uint32_t *remain)\n+{\n+\tuint32_t data_len = (rte_pktmbuf_data_len(mbuf) - offset);\n+\tuintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);\n+\n+\tif (data_len > *remain)\n+\t\tdata_len = *remain;\n+\t*remain -= data_len;\n+\tklm->byte_count = rte_cpu_to_be_32(data_len);\n+\tklm->address = rte_cpu_to_be_64(addr);\n+\tklm->mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);\n+\treturn klm->mkey;\n+}\n+\n+static __rte_always_inline int\n+mlx5_crypto_gcm_build_mbuf_chain_klms(struct mlx5_crypto_qp *qp,\n+\t\t\t\t      struct rte_crypto_op *op,\n+\t\t\t\t      struct rte_mbuf *mbuf,\n+\t\t\t\t      struct mlx5_klm *klm)\n+{\n+\tuint32_t remain_len = op->sym->aead.data.length;\n+\t__rte_unused uint32_t nb_segs = mbuf->nb_segs;\n+\tuint32_t klm_n = 0;\n+\n+\t/* mbuf seg num should be less than max_segs_num. */\n+\tMLX5_ASSERT(nb_segs <= qp->priv->max_segs_num);\n+\t/* First mbuf needs to take the data offset. */\n+\tif (unlikely(_mlx5_crypto_gcm_umr_build_mbuf_klm(qp, mbuf, klm,\n+\t\t     op->sym->aead.data.offset, &remain_len) == UINT32_MAX)) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\treturn 0;\n+\t}\n+\tklm++;\n+\tklm_n++;\n+\twhile (remain_len) {\n+\t\tnb_segs--;\n+\t\tmbuf = mbuf->next;\n+\t\tMLX5_ASSERT(mbuf && nb_segs);\n+\t\tif (unlikely(_mlx5_crypto_gcm_umr_build_mbuf_klm(qp, mbuf, klm,\n+\t\t\t\t\t\t0, &remain_len) == UINT32_MAX)) {\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\t\treturn 0;\n+\t\t}\n+\t\tklm++;\n+\t\tklm_n++;\n+\t}\n+\treturn klm_n;\n+}\n+\n+static __rte_always_inline int\n+mlx5_crypto_gcm_build_klm_by_addr(struct mlx5_crypto_qp *qp,\n+\t\t\t\t  struct mlx5_klm *klm,\n+\t\t\t\t  void *addr,\n+\t\t\t\t  uint32_t len)\n+{\n+\tklm->byte_count = rte_cpu_to_be_32(len);\n+\tklm->address = rte_cpu_to_be_64((uintptr_t)addr);\n+\tklm->mkey = mlx5_mr_addr2mr_bh(&qp->mr_ctrl, (uintptr_t)addr);\n+\tif (klm->mkey == UINT32_MAX)\n+\t\treturn 0;\n+\treturn 1;\n+}\n+\n+static __rte_always_inline int\n+mlx5_crypto_gcm_build_op_klm(struct mlx5_crypto_qp *qp,\n+\t\t\t     struct rte_crypto_op *op,\n+\t\t\t     struct mlx5_crypto_gcm_op_info *op_info,\n+\t\t\t     struct mlx5_klm *klm,\n+\t\t\t     uint32_t *len)\n+{\n+\tstruct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);\n+\tstruct mlx5_klm *digest = NULL, *aad = NULL;\n+\tuint32_t total_len = op->sym->aead.data.length + sess->aad_len + sess->tag_len;\n+\tuint32_t klm_n = 0, klm_src = 0, klm_dst = 0;\n+\n+\t/* Build AAD KLM. */\n+\taad = klm;\n+\tif (!mlx5_crypto_gcm_build_klm_by_addr(qp, aad, op->sym->aead.aad.data, sess->aad_len))\n+\t\treturn 0;\n+\tklm_n++;\n+\t/* Build src mubf KLM. */\n+\tklm_src = mlx5_crypto_gcm_build_mbuf_chain_klms(qp, op, op->sym->m_src, &klm[klm_n]);\n+\tif (!klm_src)\n+\t\treturn 0;\n+\tklm_n += klm_src;\n+\t/* Reserve digest KLM if needed. */\n+\tif (!op_info->is_oop ||\n+\t    sess->op_type == MLX5_CRYPTO_OP_TYPE_DECRYPTION) {\n+\t\tdigest = &klm[klm_n];\n+\t\tklm_n++;\n+\t}\n+\t/* Build dst mbuf KLM. */\n+\tif (op_info->is_oop) {\n+\t\tklm[klm_n] = *aad;\n+\t\tklm_n++;\n+\t\tklm_dst = mlx5_crypto_gcm_build_mbuf_chain_klms(qp, op, op->sym->m_dst,\n+\t\t\t\t\t\t\t\t&klm[klm_n]);\n+\t\tif (!klm_dst)\n+\t\t\treturn 0;\n+\t\tklm_n += klm_dst;\n+\t\ttotal_len += (op->sym->aead.data.length + sess->aad_len);\n+\t}\n+\t/* Update digest at the end if it is not set. */\n+\tif (!digest) {\n+\t\tdigest = &klm[klm_n];\n+\t\tklm_n++;\n+\t}\n+\t/* Build digest KLM. */\n+\tif (!mlx5_crypto_gcm_build_klm_by_addr(qp, digest, op->sym->aead.digest.data,\n+\t\t\t\t\t       sess->tag_len))\n+\t\treturn 0;\n+\t*len = total_len;\n+\treturn klm_n;\n+}\n+\n+static __rte_always_inline struct mlx5_wqe_cseg *\n+mlx5_crypto_gcm_get_umr_wqe(struct mlx5_crypto_qp *qp)\n+{\n+\tuint32_t wqe_offset = qp->umr_pi & (qp->umr_wqbbs - 1);\n+\tuint32_t left_wqbbs = qp->umr_wqbbs - wqe_offset;\n+\tstruct mlx5_wqe_cseg *wqe;\n+\n+\t/* If UMR WQE is near the boundary. */\n+\tif (left_wqbbs < MLX5_UMR_GCM_WQE_STRIDE) {\n+\t\t/* Append NOP WQE as the left WQEBBS is not enough for UMR. */\n+\t\twqe = RTE_PTR_ADD(qp->umr_qp_obj.umem_buf, wqe_offset * MLX5_SEND_WQE_BB);\n+\t\twqe->opcode = rte_cpu_to_be_32(MLX5_OPCODE_NOP | ((uint32_t)qp->umr_pi << 8));\n+\t\twqe->sq_ds = rte_cpu_to_be_32((qp->umr_qp_obj.qp->id << 8) | (left_wqbbs << 2));\n+\t\twqe->flags = RTE_BE32(0);\n+\t\twqe->misc = RTE_BE32(0);\n+\t\tqp->umr_pi += left_wqbbs;\n+\t\twqe_offset = qp->umr_pi & (qp->umr_wqbbs - 1);\n+\t}\n+\twqe_offset *= MLX5_SEND_WQE_BB;\n+\treturn RTE_PTR_ADD(qp->umr_qp_obj.umem_buf, wqe_offset);\n+}\n+\n+static __rte_always_inline int\n+mlx5_crypto_gcm_build_umr(struct mlx5_crypto_qp *qp,\n+\t\t\t  struct rte_crypto_op *op,\n+\t\t\t  uint32_t idx,\n+\t\t\t  struct mlx5_crypto_gcm_op_info *op_info,\n+\t\t\t  struct mlx5_crypto_gcm_data *data)\n+{\n+\tstruct mlx5_crypto_priv *priv = qp->priv;\n+\tstruct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);\n+\tstruct mlx5_wqe_cseg *wqe;\n+\tstruct mlx5_wqe_umr_cseg *ucseg;\n+\tstruct mlx5_wqe_mkey_cseg *mkc;\n+\tstruct mlx5_klm *iklm;\n+\tstruct mlx5_klm *klm = &qp->klm_array[idx * priv->max_klm_num];\n+\tuint16_t klm_size, klm_align;\n+\tuint32_t total_len;\n+\n+\t/* Build KLM base on the op. */\n+\tklm_size = mlx5_crypto_gcm_build_op_klm(qp, op, op_info, klm, &total_len);\n+\tif (!klm_size)\n+\t\treturn -EINVAL;\n+\tklm_align = RTE_ALIGN(klm_size, 4);\n+\t/* Get UMR WQE memory. */\n+\twqe = mlx5_crypto_gcm_get_umr_wqe(qp);\n+\tmemset(wqe, 0, MLX5_UMR_GCM_WQE_SIZE);\n+\t/* Set WQE control seg. Non-inline KLM UMR WQE size must be 9 WQE_DS. */\n+\twqe->opcode = rte_cpu_to_be_32(MLX5_OPCODE_UMR | ((uint32_t)qp->umr_pi << 8));\n+\twqe->sq_ds = rte_cpu_to_be_32((qp->umr_qp_obj.qp->id << 8) | 9);\n+\twqe->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET);\n+\twqe->misc = rte_cpu_to_be_32(qp->mkey[idx]->id);\n+\t/* Set UMR WQE control seg. */\n+\tucseg = (struct mlx5_wqe_umr_cseg *)(wqe + 1);\n+\tucseg->mkey_mask |= RTE_BE64(1u << 0);\n+\tucseg->ko_to_bs = rte_cpu_to_be_32(klm_align << MLX5_UMRC_KO_OFFSET);\n+\t/* Set mkey context seg. */\n+\tmkc = (struct mlx5_wqe_mkey_cseg *)(ucseg + 1);\n+\tmkc->len = rte_cpu_to_be_64(total_len);\n+\tmkc->qpn_mkey = rte_cpu_to_be_32(0xffffff00 | (qp->mkey[idx]->id & 0xff));\n+\t/* Set UMR pointer to data seg. */\n+\tiklm = (struct mlx5_klm *)(mkc + 1);\n+\tiklm->address = rte_cpu_to_be_64((uintptr_t)((char *)klm));\n+\tiklm->mkey = rte_cpu_to_be_32(qp->mr.lkey);\n+\tdata->src_mkey = rte_cpu_to_be_32(qp->mkey[idx]->id);\n+\tdata->dst_mkey = data->src_mkey;\n+\tdata->src_addr = 0;\n+\tdata->src_bytes = sess->aad_len + op->sym->aead.data.length;\n+\tdata->dst_bytes = data->src_bytes;\n+\tif (op_info->is_enc)\n+\t\tdata->dst_bytes += sess->tag_len;\n+\telse\n+\t\tdata->src_bytes += sess->tag_len;\n+\tif (op_info->is_oop)\n+\t\tdata->dst_addr = (void *)(uintptr_t)(data->src_bytes);\n+\telse\n+\t\tdata->dst_addr = 0;\n+\t/* Clear the padding memory. */\n+\tmemset(&klm[klm_size], 0, sizeof(struct mlx5_klm) * (klm_align - klm_size));\n+\t/* Update PI and WQE */\n+\tqp->umr_pi += MLX5_UMR_GCM_WQE_STRIDE;\n+\tqp->umr_wqe = (uint8_t *)wqe;\n+\treturn 0;\n+}\n+\n+static __rte_always_inline void\n+mlx5_crypto_gcm_build_send_en(struct mlx5_crypto_qp *qp)\n+{\n+\tuint32_t wqe_offset = (qp->umr_pi & (qp->umr_wqbbs - 1)) * MLX5_SEND_WQE_BB;\n+\tstruct mlx5_wqe_cseg *cs = RTE_PTR_ADD(qp->umr_qp_obj.wqes, wqe_offset);\n+\tstruct mlx5_wqe_qseg *qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));\n+\n+\tcs->opcode = rte_cpu_to_be_32(MLX5_OPCODE_SEND_EN | ((uint32_t)qp->umr_pi << 8));\n+\tcs->sq_ds = rte_cpu_to_be_32((qp->umr_qp_obj.qp->id << 8) | 2);\n+\t/*\n+\t * No need to generate the SEND_EN CQE as we want only GGA CQE\n+\t * in the CQ normally. We can compare qp->last_send_gga_pi with\n+\t * qp->pi to know if all SEND_EN be consumed.\n+\t */\n+\tcs->flags = RTE_BE32((MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET) |\n+\t\t\tMLX5_WQE_CTRL_INITIATOR_SMALL_FENCE);\n+\tcs->misc = RTE_BE32(0);\n+\tqs->max_index = rte_cpu_to_be_32(qp->pi);\n+\tqs->qpn_cqn = rte_cpu_to_be_32(qp->qp_obj.qp->id);\n+\tqp->umr_wqe = (uint8_t *)cs;\n+\tqp->umr_pi += 1;\n+}\n+\n+static __rte_always_inline void\n+mlx5_crypto_gcm_wqe_set(struct mlx5_crypto_qp *qp,\n+\t\t\tstruct rte_crypto_op *op,\n+\t\t\tuint32_t idx,\n+\t\t\tstruct mlx5_crypto_gcm_data *data)\n+{\n+\tstruct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);\n+\tstruct mlx5_gga_wqe *wqe = &((struct mlx5_gga_wqe *)qp->qp_obj.wqes)[idx];\n+\tunion mlx5_gga_crypto_opaque *opaq = qp->opaque_addr;\n+\n+\tmemcpy(opaq[idx].cp.iv,\n+\t\trte_crypto_op_ctod_offset(op, uint8_t *, sess->iv_offset), sess->iv_len);\n+\topaq[idx].cp.tag_size = sess->wqe_tag_len;\n+\topaq[idx].cp.aad_size = sess->wqe_aad_len;\n+\t/* Update control seg. */\n+\twqe->opcode = rte_cpu_to_be_32(MLX5_MMO_CRYPTO_OPC + (qp->pi << 8));\n+\twqe->gga_ctrl1 = sess->mmo_ctrl;\n+\twqe->gga_ctrl2 = sess->dek_id;\n+\twqe->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET);\n+\t/* Update op_info seg. */\n+\twqe->gather.bcount = rte_cpu_to_be_32(data->src_bytes);\n+\twqe->gather.lkey = data->src_mkey;\n+\twqe->gather.pbuf = rte_cpu_to_be_64((uintptr_t)data->src_addr);\n+\t/* Update output seg. */\n+\twqe->scatter.bcount = rte_cpu_to_be_32(data->dst_bytes);\n+\twqe->scatter.lkey = data->dst_mkey;\n+\twqe->scatter.pbuf = rte_cpu_to_be_64((uintptr_t)data->dst_addr);\n+\tqp->wqe = (uint8_t *)wqe;\n+}\n+\n+static uint16_t\n+mlx5_crypto_gcm_enqueue_burst(void *queue_pair,\n+\t\t\t      struct rte_crypto_op **ops,\n+\t\t\t      uint16_t nb_ops)\n+{\n+\tstruct mlx5_crypto_qp *qp = queue_pair;\n+\tstruct mlx5_crypto_session *sess;\n+\tstruct mlx5_crypto_priv *priv = qp->priv;\n+\tstruct mlx5_crypto_gcm_tag_cpy_info *tag;\n+\tstruct mlx5_crypto_gcm_data gcm_data;\n+\tstruct rte_crypto_op *op;\n+\tstruct mlx5_crypto_gcm_op_info op_info;\n+\tuint16_t mask = qp->entries_n - 1;\n+\tuint16_t remain = qp->entries_n - (qp->pi - qp->qp_ci);\n+\tuint32_t idx;\n+\tuint16_t umr_cnt = 0;\n+\n+\tif (remain < nb_ops)\n+\t\tnb_ops = remain;\n+\telse\n+\t\tremain = nb_ops;\n+\tif (unlikely(remain == 0))\n+\t\treturn 0;\n+\tdo {\n+\t\top = *ops++;\n+\t\tsess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);\n+\t\tidx = qp->pi & mask;\n+\t\tmlx5_crypto_gcm_get_op_info(qp, op, &op_info);\n+\t\tif (!op_info.need_umr) {\n+\t\t\tgcm_data.src_addr = op_info.src_addr;\n+\t\t\tgcm_data.src_bytes = op->sym->aead.data.length + sess->aad_len;\n+\t\t\tgcm_data.src_mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, op->sym->m_src);\n+\t\t\tif (op_info.is_oop) {\n+\t\t\t\tgcm_data.dst_addr = RTE_PTR_SUB\n+\t\t\t\t\t(rte_pktmbuf_mtod_offset(op->sym->m_dst,\n+\t\t\t\t\t void *, op->sym->aead.data.offset), sess->aad_len);\n+\t\t\t\tgcm_data.dst_mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, op->sym->m_dst);\n+\t\t\t} else {\n+\t\t\t\tgcm_data.dst_addr = gcm_data.src_addr;\n+\t\t\t\tgcm_data.dst_mkey = gcm_data.src_mkey;\n+\t\t\t}\n+\t\t\tgcm_data.dst_bytes = gcm_data.src_bytes;\n+\t\t\tif (op_info.is_enc)\n+\t\t\t\tgcm_data.dst_bytes += sess->tag_len;\n+\t\t\telse\n+\t\t\t\tgcm_data.src_bytes += sess->tag_len;\n+\t\t} else {\n+\t\t\tif (unlikely(mlx5_crypto_gcm_build_umr(qp, op, idx,\n+\t\t\t\t\t\t\t&op_info, &gcm_data))) {\n+\t\t\t\tqp->stats.enqueue_err_count++;\n+\t\t\t\tif (remain != nb_ops) {\n+\t\t\t\t\tqp->stats.enqueued_count -= remain;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t\treturn 0;\n+\t\t\t}\n+\t\t\tumr_cnt++;\n+\t\t}\n+\t\tmlx5_crypto_gcm_wqe_set(qp, op, idx, &gcm_data);\n+\t\tif (op_info.digest) {\n+\t\t\ttag = (struct mlx5_crypto_gcm_tag_cpy_info *)op->sym->aead.digest.data;\n+\t\t\ttag->digest = op_info.digest;\n+\t\t\ttag->tag_len = sess->tag_len;\n+\t\t\top->status = MLX5_CRYPTO_OP_STATUS_GCM_TAG_COPY;\n+\t\t} else {\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n+\t\t}\n+\t\tqp->ops[idx] = op;\n+\t\tqp->pi++;\n+\t} while (--remain);\n+\tqp->stats.enqueued_count += nb_ops;\n+\t/* Update the last GGA cseg with COMP. */\n+\t((struct mlx5_wqe_cseg *)qp->wqe)->flags =\n+\t\tRTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);\n+\t/* Only when there are no pending SEND_EN WQEs in background. */\n+\tif (!umr_cnt && !qp->has_umr) {\n+\t\tmlx5_doorbell_ring(&priv->uar.bf_db, *(volatile uint64_t *)qp->wqe,\n+\t\t\t\t   qp->pi, &qp->qp_obj.db_rec[MLX5_SND_DBR],\n+\t\t\t\t   !priv->uar.dbnc);\n+\t} else {\n+\t\tmlx5_crypto_gcm_build_send_en(qp);\n+\t\tmlx5_doorbell_ring(&priv->uar.bf_db, *(volatile uint64_t *)qp->umr_wqe,\n+\t\t\t\t   qp->umr_pi, &qp->umr_qp_obj.db_rec[MLX5_SND_DBR],\n+\t\t\t\t   !priv->uar.dbnc);\n+\t\tqp->last_gga_pi = qp->pi;\n+\t\tqp->has_umr = true;\n+\t}\n+\treturn nb_ops;\n+}\n+\n+static __rte_noinline void\n+mlx5_crypto_gcm_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)\n+{\n+\tuint8_t op_code;\n+\tconst uint32_t idx = qp->cq_ci & (qp->entries_n - 1);\n+\tvolatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)\n+\t\t\t\t\t\t\t&qp->cq_obj.cqes[idx];\n+\n+\top_code = rte_be_to_cpu_32(cqe->s_wqe_opcode_qpn) >> MLX5_CQ_INDEX_WIDTH;\n+\tDRV_LOG(ERR, \"CQE ERR:0x%x, Vendor_ERR:0x%x, OP:0x%x, QPN:0x%x, WQE_CNT:0x%x\",\n+\t\tcqe->syndrome, cqe->vendor_err_synd, op_code,\n+\t\t(rte_be_to_cpu_32(cqe->s_wqe_opcode_qpn) & 0xffffff),\n+\t\trte_be_to_cpu_16(cqe->wqe_counter));\n+\tif (op && op_code == MLX5_OPCODE_MMO) {\n+\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\tqp->stats.dequeue_err_count++;\n+\t}\n+}\n+\n+static __rte_always_inline void\n+mlx5_crypto_gcm_fill_op(struct mlx5_crypto_qp *qp,\n+\t\t\tstruct rte_crypto_op **ops,\n+\t\t\tuint16_t orci,\n+\t\t\tuint16_t rci,\n+\t\t\tuint16_t op_mask)\n+{\n+\tuint16_t n;\n+\n+\torci &= op_mask;\n+\trci &= op_mask;\n+\tif (unlikely(orci > rci)) {\n+\t\tn = op_mask - orci + 1;\n+\t\tmemcpy(ops, &qp->ops[orci], n * sizeof(*ops));\n+\t\torci = 0;\n+\t} else {\n+\t\tn = 0;\n+\t}\n+\t/* rci can be 0 here, memcpy will skip that. */\n+\tmemcpy(&ops[n], &qp->ops[orci], (rci - orci) * sizeof(*ops));\n+}\n+\n+static __rte_always_inline void\n+mlx5_crypto_gcm_cpy_tag(struct mlx5_crypto_qp *qp,\n+\t\t\tuint16_t orci,\n+\t\t\tuint16_t rci,\n+\t\t\tuint16_t op_mask)\n+{\n+\tstruct rte_crypto_op *op;\n+\tstruct mlx5_crypto_gcm_tag_cpy_info *tag;\n+\n+\twhile (qp->cpy_tag_op && orci != rci) {\n+\t\top = qp->ops[orci & op_mask];\n+\t\tif (op->status == MLX5_CRYPTO_OP_STATUS_GCM_TAG_COPY) {\n+\t\t\ttag = (struct mlx5_crypto_gcm_tag_cpy_info *)op->sym->aead.digest.data;\n+\t\t\tmemcpy(op->sym->aead.digest.data, tag->digest, tag->tag_len);\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n+\t\t\tqp->cpy_tag_op--;\n+\t\t}\n+\t\torci++;\n+\t}\n+}\n+\n+static uint16_t\n+mlx5_crypto_gcm_dequeue_burst(void *queue_pair,\n+\t\t\t      struct rte_crypto_op **ops,\n+\t\t\t      uint16_t nb_ops)\n+{\n+\tstruct mlx5_crypto_qp *qp = queue_pair;\n+\tvolatile struct mlx5_cqe *restrict cqe;\n+\tconst unsigned int cq_size = qp->cq_entries_n;\n+\tconst unsigned int mask = cq_size - 1;\n+\tconst unsigned int op_mask = qp->entries_n - 1;\n+\tuint32_t idx;\n+\tuint32_t next_idx = qp->cq_ci & mask;\n+\tuint16_t reported_ci = qp->reported_ci;\n+\tuint16_t qp_ci = qp->qp_ci;\n+\tconst uint16_t max = RTE_MIN((uint16_t)(qp->pi - reported_ci), nb_ops);\n+\tuint16_t op_num = 0;\n+\tint ret;\n+\n+\tif (unlikely(max == 0))\n+\t\treturn 0;\n+\twhile (qp_ci - reported_ci < max) {\n+\t\tidx = next_idx;\n+\t\tnext_idx = (qp->cq_ci + 1) & mask;\n+\t\tcqe = &qp->cq_obj.cqes[idx];\n+\t\tret = check_cqe(cqe, cq_size, qp->cq_ci);\n+\t\tif (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {\n+\t\t\tif (unlikely(ret != MLX5_CQE_STATUS_HW_OWN))\n+\t\t\t\tmlx5_crypto_gcm_cqe_err_handle(qp,\n+\t\t\t\t\t\tqp->ops[reported_ci & op_mask]);\n+\t\t\tbreak;\n+\t\t}\n+\t\tqp_ci = rte_be_to_cpu_16(cqe->wqe_counter) + 1;\n+\t\tif (qp->has_umr &&\n+\t\t    (qp->last_gga_pi + 1) == qp_ci)\n+\t\t\tqp->has_umr = false;\n+\t\tqp->cq_ci++;\n+\t}\n+\t/* If wqe_counter changed, means CQE handled. */\n+\tif (likely(qp->qp_ci != qp_ci)) {\n+\t\tqp->qp_ci = qp_ci;\n+\t\trte_io_wmb();\n+\t\tqp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->cq_ci);\n+\t}\n+\t/* If reported_ci is not same with qp_ci, means op retrieved. */\n+\tif (qp_ci != reported_ci) {\n+\t\top_num = RTE_MIN((uint16_t)(qp_ci - reported_ci), max);\n+\t\treported_ci += op_num;\n+\t\tmlx5_crypto_gcm_cpy_tag(qp, qp->reported_ci, reported_ci, op_mask);\n+\t\tmlx5_crypto_gcm_fill_op(qp, ops, qp->reported_ci, reported_ci, op_mask);\n+\t\tqp->stats.dequeued_count += op_num;\n+\t\tqp->reported_ci = reported_ci;\n+\t}\n+\treturn op_num;\n+}\n+\n int\n mlx5_crypto_gcm_init(struct mlx5_crypto_priv *priv)\n {\n@@ -339,6 +925,8 @@ mlx5_crypto_gcm_init(struct mlx5_crypto_priv *priv)\n \tmlx5_os_set_reg_mr_cb(&priv->reg_mr_cb, &priv->dereg_mr_cb);\n \tdev_ops->queue_pair_setup = mlx5_crypto_gcm_qp_setup;\n \tdev_ops->queue_pair_release = mlx5_crypto_gcm_qp_release;\n+\tcrypto_dev->dequeue_burst = mlx5_crypto_gcm_dequeue_burst;\n+\tcrypto_dev->enqueue_burst = mlx5_crypto_gcm_enqueue_burst;\n \tpriv->max_klm_num = RTE_ALIGN((priv->max_segs_num + 1) * 2 + 1, MLX5_UMR_KLM_NUM_ALIGN);\n \tpriv->caps = mlx5_crypto_gcm_caps;\n \treturn 0;\n",
    "prefixes": [
        "v4",
        "8/9"
    ]
}