get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/128818/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 128818,
    "url": "http://patchwork.dpdk.org/api/patches/128818/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230620052735.3969606-6-ktejasree@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230620052735.3969606-6-ktejasree@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230620052735.3969606-6-ktejasree@marvell.com",
    "date": "2023-06-20T05:27:32",
    "name": "[v2,5/8] crypto/cnxk: add support for raw APIs",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5d8ef6538d982761ff465ddd1cc226f3d3b8d62b",
    "submitter": {
        "id": 1789,
        "url": "http://patchwork.dpdk.org/api/people/1789/?format=api",
        "name": "Tejasree Kondoj",
        "email": "ktejasree@marvell.com"
    },
    "delegate": {
        "id": 6690,
        "url": "http://patchwork.dpdk.org/api/users/6690/?format=api",
        "username": "akhil",
        "first_name": "akhil",
        "last_name": "goyal",
        "email": "gakhil@marvell.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230620052735.3969606-6-ktejasree@marvell.com/mbox/",
    "series": [
        {
            "id": 28570,
            "url": "http://patchwork.dpdk.org/api/series/28570/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=28570",
            "date": "2023-06-20T05:27:27",
            "name": "fixes and improvements to CNXK crypto PMD",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/28570/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/128818/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/128818/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id F0D8142D02;\n\tTue, 20 Jun 2023 07:28:15 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3BF3142D31;\n\tTue, 20 Jun 2023 07:27:54 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n [67.231.148.174])\n by mails.dpdk.org (Postfix) with ESMTP id 58D8642D10\n for <dev@dpdk.org>; Tue, 20 Jun 2023 07:27:53 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n by mx0a-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 35K4hFYW026986 for <dev@dpdk.org>; Mon, 19 Jun 2023 22:27:52 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3rb5b303gg-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Mon, 19 Jun 2023 22:27:52 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Mon, 19 Jun 2023 22:27:50 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Mon, 19 Jun 2023 22:27:50 -0700",
            "from hyd1554.marvell.com (unknown [10.29.57.11])\n by maili.marvell.com (Postfix) with ESMTP id 9F1E73F706F;\n Mon, 19 Jun 2023 22:27:48 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-transfer-encoding : content-type; s=pfpt0220;\n bh=06B4m2jQpW2ywuOeqRcL03mOV5UQEevFkmLL8qwbElI=;\n b=X9RseQqj8lwx98aOiwrrisW+j+0edSn6aIgJ+2gX4ajjEpxCtvys9kwQcVfWAQ0RCCK1\n pjTqPx3g/MC8pUH5rCUfd/IHl5687Wt9pwvELBHq9LSDuK4IjDGqZ1KLdqfxRYPRPSxm\n G3v9wTtuaPceDg88mz+cqzB19Li+Y7yhe42mINrKscCxv7bT56A/f/jqsmBh96natcjq\n kGbotufNoKJjdKABpdCZNuGmZMbTvxtQZ/njmIE8yCXA4q417RBqmTibWlOKrKyjrYl3\n Fr3v92FuRsCw6Vq/FCMkXpZKhi+pxspYWBx7vQnyVHNQkqy99SjTwobln/6AReiTAfON ew==",
        "From": "Tejasree Kondoj <ktejasree@marvell.com>",
        "To": "Akhil Goyal <gakhil@marvell.com>",
        "CC": "Anoob Joseph <anoobj@marvell.com>, Aakash Sasidharan\n <asasidharan@marvell.com>, Gowrishankar Muthukrishnan\n <gmuthukrishn@marvell.com>,\n Vidya Sagar Velumuri <vvelumuri@marvell.com>, <dev@dpdk.org>",
        "Subject": "[PATCH v2 5/8] crypto/cnxk: add support for raw APIs",
        "Date": "Tue, 20 Jun 2023 10:57:32 +0530",
        "Message-ID": "<20230620052735.3969606-6-ktejasree@marvell.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230620052735.3969606-1-ktejasree@marvell.com>",
        "References": "<20230620052735.3969606-1-ktejasree@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-GUID": "w0_G3l8rlLpGJBCAN3UYh4puLscT5mRd",
        "X-Proofpoint-ORIG-GUID": "w0_G3l8rlLpGJBCAN3UYh4puLscT5mRd",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.254,Aquarius:18.0.957,Hydra:6.0.591,FMLib:17.11.176.26\n definitions=2023-06-20_02,2023-06-16_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "From: Anoob Joseph <anoobj@marvell.com>\n\nAdd crypto RAW API support in cnxk PMD\nEnable the flag to allow execution of raw test suite.\n\nSigned-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>\nSigned-off-by: Anoob Joseph <anoobj@marvell.com>\n---\n doc/guides/rel_notes/release_23_07.rst    |   1 +\n drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 459 ++++++++++++++++++++++\n drivers/crypto/cnxk/cnxk_cryptodev.c      |  20 +-\n drivers/crypto/cnxk/cnxk_cryptodev_ops.h  |   1 +\n drivers/crypto/cnxk/cnxk_se.h             | 293 ++++++++++++++\n 5 files changed, 761 insertions(+), 13 deletions(-)",
    "diff": "diff --git a/doc/guides/rel_notes/release_23_07.rst b/doc/guides/rel_notes/release_23_07.rst\nindex 027ae7bd2d..bd41f49458 100644\n--- a/doc/guides/rel_notes/release_23_07.rst\n+++ b/doc/guides/rel_notes/release_23_07.rst\n@@ -154,6 +154,7 @@ New Features\n   * Added support for PDCP chain in cn10k crypto driver.\n   * Added support for SM3 hash operations.\n   * Added support for AES-CCM in cn9k and cn10k drivers.\n+  * Added support for RAW cryptodev APIs in cn10k driver.\n \n * **Updated OpenSSL crypto driver.**\n \ndiff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c\nindex e405a2ad9f..47b0e3a6f3 100644\n--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c\n+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c\n@@ -1064,6 +1064,461 @@ cn10k_cpt_dev_info_get(struct rte_cryptodev *dev,\n \t}\n }\n \n+static inline int\n+cn10k_cpt_raw_fill_inst(struct cnxk_iov *iov, struct cnxk_cpt_qp *qp,\n+\t\t\tstruct cnxk_sym_dp_ctx *dp_ctx, struct cpt_inst_s inst[],\n+\t\t\tstruct cpt_inflight_req *infl_req, void *opaque, const bool is_sg_ver2)\n+{\n+\tstruct cnxk_se_sess *sess;\n+\tint ret;\n+\n+\tconst union cpt_res_s res = {\n+\t\t.cn10k.compcode = CPT_COMP_NOT_DONE,\n+\t};\n+\n+\tinst[0].w0.u64 = 0;\n+\tinst[0].w2.u64 = 0;\n+\tinst[0].w3.u64 = 0;\n+\n+\tsess = dp_ctx->sess;\n+\n+\tswitch (sess->dp_thr_type) {\n+\tcase CPT_DP_THREAD_TYPE_PT:\n+\t\tret = fill_raw_passthrough_params(iov, inst);\n+\t\tbreak;\n+\tcase CPT_DP_THREAD_TYPE_FC_CHAIN:\n+\t\tret = fill_raw_fc_params(iov, sess, &qp->meta_info, infl_req, &inst[0], false,\n+\t\t\t\t\t false, is_sg_ver2);\n+\t\tbreak;\n+\tcase CPT_DP_THREAD_TYPE_FC_AEAD:\n+\t\tret = fill_raw_fc_params(iov, sess, &qp->meta_info, infl_req, &inst[0], false, true,\n+\t\t\t\t\t is_sg_ver2);\n+\t\tbreak;\n+\tcase CPT_DP_THREAD_AUTH_ONLY:\n+\t\tret = fill_raw_digest_params(iov, sess, &qp->meta_info, infl_req, &inst[0],\n+\t\t\t\t\t     is_sg_ver2);\n+\t\tbreak;\n+\tdefault:\n+\t\tret = -EINVAL;\n+\t}\n+\n+\tif (unlikely(ret))\n+\t\treturn 0;\n+\n+\tinst[0].res_addr = (uint64_t)&infl_req->res;\n+\t__atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED);\n+\tinfl_req->opaque = opaque;\n+\n+\tinst[0].w7.u64 = sess->cpt_inst_w7;\n+\n+\treturn 1;\n+}\n+\n+static uint32_t\n+cn10k_cpt_raw_enqueue_burst(void *qpair, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,\n+\t\t\t    union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status,\n+\t\t\t    const bool is_sgv2)\n+{\n+\tuint16_t lmt_id, nb_allowed, nb_ops = vec->num;\n+\tuint64_t lmt_base, lmt_arg, io_addr, head;\n+\tstruct cpt_inflight_req *infl_req;\n+\tstruct cnxk_cpt_qp *qp = qpair;\n+\tstruct cnxk_sym_dp_ctx *dp_ctx;\n+\tstruct pending_queue *pend_q;\n+\tuint32_t count = 0, index;\n+\tunion cpt_fc_write_s fc;\n+\tstruct cpt_inst_s *inst;\n+\tuint64_t *fc_addr;\n+\tint ret, i;\n+\n+\tpend_q = &qp->pend_q;\n+\tconst uint64_t pq_mask = pend_q->pq_mask;\n+\n+\thead = pend_q->head;\n+\tnb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);\n+\tnb_ops = RTE_MIN(nb_ops, nb_allowed);\n+\n+\tif (unlikely(nb_ops == 0))\n+\t\treturn 0;\n+\n+\tlmt_base = qp->lmtline.lmt_base;\n+\tio_addr = qp->lmtline.io_addr;\n+\tfc_addr = qp->lmtline.fc_addr;\n+\n+\tconst uint32_t fc_thresh = qp->lmtline.fc_thresh;\n+\n+\tROC_LMT_BASE_ID_GET(lmt_base, lmt_id);\n+\tinst = (struct cpt_inst_s *)lmt_base;\n+\n+\tdp_ctx = (struct cnxk_sym_dp_ctx *)drv_ctx;\n+again:\n+\tfc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);\n+\tif (unlikely(fc.s.qsize > fc_thresh)) {\n+\t\ti = 0;\n+\t\tgoto pend_q_commit;\n+\t}\n+\n+\tfor (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) {\n+\t\tstruct cnxk_iov iov;\n+\n+\t\tindex = count + i;\n+\t\tinfl_req = &pend_q->req_queue[head];\n+\t\tinfl_req->op_flags = 0;\n+\n+\t\tcnxk_raw_burst_to_iov(vec, &ofs, index, &iov);\n+\t\tret = cn10k_cpt_raw_fill_inst(&iov, qp, dp_ctx, &inst[2 * i], infl_req,\n+\t\t\t\t\t      user_data[index], is_sgv2);\n+\t\tif (unlikely(ret != 1)) {\n+\t\t\tplt_dp_err(\"Could not process vec: %d\", index);\n+\t\t\tif (i == 0 && count == 0)\n+\t\t\t\treturn -1;\n+\t\t\telse if (i == 0)\n+\t\t\t\tgoto pend_q_commit;\n+\t\t\telse\n+\t\t\t\tbreak;\n+\t\t}\n+\t\tpending_queue_advance(&head, pq_mask);\n+\t}\n+\n+\tif (i > PKTS_PER_STEORL) {\n+\t\tlmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 | (uint64_t)lmt_id;\n+\t\troc_lmt_submit_steorl(lmt_arg, io_addr);\n+\t\tlmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - PKTS_PER_STEORL - 1) << 12 |\n+\t\t\t  (uint64_t)(lmt_id + PKTS_PER_STEORL);\n+\t\troc_lmt_submit_steorl(lmt_arg, io_addr);\n+\t} else {\n+\t\tlmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 | (uint64_t)lmt_id;\n+\t\troc_lmt_submit_steorl(lmt_arg, io_addr);\n+\t}\n+\n+\trte_io_wmb();\n+\n+\tif (nb_ops - i > 0 && i == PKTS_PER_LOOP) {\n+\t\tnb_ops -= i;\n+\t\tcount += i;\n+\t\tgoto again;\n+\t}\n+\n+pend_q_commit:\n+\trte_atomic_thread_fence(__ATOMIC_RELEASE);\n+\n+\tpend_q->head = head;\n+\tpend_q->time_out = rte_get_timer_cycles() + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();\n+\n+\t*enqueue_status = 1;\n+\treturn count + i;\n+}\n+\n+static uint32_t\n+cn10k_cpt_raw_enqueue_burst_sgv2(void *qpair, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,\n+\t\t\t\t union rte_crypto_sym_ofs ofs, void *user_data[],\n+\t\t\t\t int *enqueue_status)\n+{\n+\treturn cn10k_cpt_raw_enqueue_burst(qpair, drv_ctx, vec, ofs, user_data, enqueue_status,\n+\t\t\t\t\t   true);\n+}\n+\n+static uint32_t\n+cn10k_cpt_raw_enqueue_burst_sgv1(void *qpair, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,\n+\t\t\t\t union rte_crypto_sym_ofs ofs, void *user_data[],\n+\t\t\t\t int *enqueue_status)\n+{\n+\treturn cn10k_cpt_raw_enqueue_burst(qpair, drv_ctx, vec, ofs, user_data, enqueue_status,\n+\t\t\t\t\t   false);\n+}\n+\n+static int\n+cn10k_cpt_raw_enqueue(void *qpair, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,\n+\t\t      uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,\n+\t\t      struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest,\n+\t\t      struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data,\n+\t\t      const bool is_sgv2)\n+{\n+\tuint64_t lmt_base, lmt_arg, io_addr, head;\n+\tstruct cpt_inflight_req *infl_req;\n+\tstruct cnxk_cpt_qp *qp = qpair;\n+\tstruct cnxk_sym_dp_ctx *dp_ctx;\n+\tuint16_t lmt_id, nb_allowed;\n+\tstruct cpt_inst_s *inst;\n+\tunion cpt_fc_write_s fc;\n+\tstruct cnxk_iov iov;\n+\tuint64_t *fc_addr;\n+\tint ret;\n+\n+\tstruct pending_queue *pend_q = &qp->pend_q;\n+\tconst uint64_t pq_mask = pend_q->pq_mask;\n+\tconst uint32_t fc_thresh = qp->lmtline.fc_thresh;\n+\n+\thead = pend_q->head;\n+\tnb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);\n+\n+\tif (unlikely(nb_allowed == 0))\n+\t\treturn -1;\n+\n+\tcnxk_raw_to_iov(data_vec, n_data_vecs, &ofs, iv, digest, aad_or_auth_iv, &iov);\n+\n+\tlmt_base = qp->lmtline.lmt_base;\n+\tio_addr = qp->lmtline.io_addr;\n+\tfc_addr = qp->lmtline.fc_addr;\n+\n+\tROC_LMT_BASE_ID_GET(lmt_base, lmt_id);\n+\tinst = (struct cpt_inst_s *)lmt_base;\n+\n+\tfc.u64[0] = __atomic_load_n(fc_addr, __ATOMIC_RELAXED);\n+\tif (unlikely(fc.s.qsize > fc_thresh))\n+\t\treturn -1;\n+\n+\tdp_ctx = (struct cnxk_sym_dp_ctx *)drv_ctx;\n+\tinfl_req = &pend_q->req_queue[head];\n+\tinfl_req->op_flags = 0;\n+\n+\tret = cn10k_cpt_raw_fill_inst(&iov, qp, dp_ctx, &inst[0], infl_req, user_data, is_sgv2);\n+\tif (unlikely(ret != 1)) {\n+\t\tplt_dp_err(\"Could not process vec\");\n+\t\treturn -1;\n+\t}\n+\n+\tpending_queue_advance(&head, pq_mask);\n+\n+\tlmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;\n+\troc_lmt_submit_steorl(lmt_arg, io_addr);\n+\n+\trte_io_wmb();\n+\n+\tpend_q->head = head;\n+\tpend_q->time_out = rte_get_timer_cycles() + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();\n+\n+\treturn 1;\n+}\n+\n+static int\n+cn10k_cpt_raw_enqueue_sgv2(void *qpair, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,\n+\t\t\t   uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,\n+\t\t\t   struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest,\n+\t\t\t   struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)\n+{\n+\treturn cn10k_cpt_raw_enqueue(qpair, drv_ctx, data_vec, n_data_vecs, ofs, iv, digest,\n+\t\t\t\t     aad_or_auth_iv, user_data, true);\n+}\n+\n+static int\n+cn10k_cpt_raw_enqueue_sgv1(void *qpair, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,\n+\t\t\t   uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,\n+\t\t\t   struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest,\n+\t\t\t   struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)\n+{\n+\treturn cn10k_cpt_raw_enqueue(qpair, drv_ctx, data_vec, n_data_vecs, ofs, iv, digest,\n+\t\t\t\t     aad_or_auth_iv, user_data, false);\n+}\n+\n+static inline int\n+cn10k_cpt_raw_dequeue_post_process(struct cpt_cn10k_res_s *res)\n+{\n+\tconst uint8_t uc_compcode = res->uc_compcode;\n+\tconst uint8_t compcode = res->compcode;\n+\tint ret = 1;\n+\n+\tif (likely(compcode == CPT_COMP_GOOD)) {\n+\t\tif (unlikely(uc_compcode))\n+\t\t\tplt_dp_info(\"Request failed with microcode error: 0x%x\", res->uc_compcode);\n+\t\telse\n+\t\t\tret = 0;\n+\t}\n+\n+\treturn ret;\n+}\n+\n+static uint32_t\n+cn10k_cpt_sym_raw_dequeue_burst(void *qptr, uint8_t *drv_ctx,\n+\t\t\t\trte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,\n+\t\t\t\tuint32_t max_nb_to_dequeue,\n+\t\t\t\trte_cryptodev_raw_post_dequeue_t post_dequeue, void **out_user_data,\n+\t\t\t\tuint8_t is_user_data_array, uint32_t *n_success,\n+\t\t\t\tint *dequeue_status)\n+{\n+\tstruct cpt_inflight_req *infl_req;\n+\tstruct cnxk_cpt_qp *qp = qptr;\n+\tstruct pending_queue *pend_q;\n+\tuint64_t infl_cnt, pq_tail;\n+\tunion cpt_res_s res;\n+\tint is_op_success;\n+\tuint16_t nb_ops;\n+\tvoid *opaque;\n+\tint i = 0;\n+\n+\tpend_q = &qp->pend_q;\n+\n+\tconst uint64_t pq_mask = pend_q->pq_mask;\n+\n+\tRTE_SET_USED(drv_ctx);\n+\tpq_tail = pend_q->tail;\n+\tinfl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);\n+\n+\t/* Ensure infl_cnt isn't read before data lands */\n+\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\n+\tinfl_req = &pend_q->req_queue[pq_tail];\n+\n+\topaque = infl_req->opaque;\n+\tif (get_dequeue_count)\n+\t\tnb_ops = get_dequeue_count(opaque);\n+\telse\n+\t\tnb_ops = max_nb_to_dequeue;\n+\tnb_ops = RTE_MIN(nb_ops, infl_cnt);\n+\n+\tfor (i = 0; i < nb_ops; i++) {\n+\t\tis_op_success = 0;\n+\t\tinfl_req = &pend_q->req_queue[pq_tail];\n+\n+\t\tres.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);\n+\n+\t\tif (unlikely(res.cn10k.compcode == CPT_COMP_NOT_DONE)) {\n+\t\t\tif (unlikely(rte_get_timer_cycles() > pend_q->time_out)) {\n+\t\t\t\tplt_err(\"Request timed out\");\n+\t\t\t\tcnxk_cpt_dump_on_err(qp);\n+\t\t\t\tpend_q->time_out = rte_get_timer_cycles() +\n+\t\t\t\t\t\t   DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();\n+\t\t\t}\n+\t\t\tbreak;\n+\t\t}\n+\n+\t\tpending_queue_advance(&pq_tail, pq_mask);\n+\n+\t\tif (!cn10k_cpt_raw_dequeue_post_process(&res.cn10k)) {\n+\t\t\tis_op_success = 1;\n+\t\t\t*n_success += 1;\n+\t\t}\n+\n+\t\tif (is_user_data_array) {\n+\t\t\tout_user_data[i] = infl_req->opaque;\n+\t\t\tpost_dequeue(out_user_data[i], i, is_op_success);\n+\t\t} else {\n+\t\t\tif (i == 0)\n+\t\t\t\tout_user_data[0] = opaque;\n+\t\t\tpost_dequeue(out_user_data[0], i, is_op_success);\n+\t\t}\n+\n+\t\tif (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))\n+\t\t\trte_mempool_put(qp->meta_info.pool, infl_req->mdata);\n+\t}\n+\n+\tpend_q->tail = pq_tail;\n+\t*dequeue_status = 1;\n+\n+\treturn i;\n+}\n+\n+static void *\n+cn10k_cpt_sym_raw_dequeue(void *qptr, uint8_t *drv_ctx, int *dequeue_status,\n+\t\t\t  enum rte_crypto_op_status *op_status)\n+{\n+\tstruct cpt_inflight_req *infl_req;\n+\tstruct cnxk_cpt_qp *qp = qptr;\n+\tstruct pending_queue *pend_q;\n+\tuint64_t pq_tail;\n+\tunion cpt_res_s res;\n+\tvoid *opaque = NULL;\n+\n+\tpend_q = &qp->pend_q;\n+\n+\tconst uint64_t pq_mask = pend_q->pq_mask;\n+\n+\tRTE_SET_USED(drv_ctx);\n+\n+\tpq_tail = pend_q->tail;\n+\n+\trte_atomic_thread_fence(__ATOMIC_ACQUIRE);\n+\n+\tinfl_req = &pend_q->req_queue[pq_tail];\n+\n+\tres.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);\n+\n+\tif (unlikely(res.cn10k.compcode == CPT_COMP_NOT_DONE)) {\n+\t\tif (unlikely(rte_get_timer_cycles() > pend_q->time_out)) {\n+\t\t\tplt_err(\"Request timed out\");\n+\t\t\tcnxk_cpt_dump_on_err(qp);\n+\t\t\tpend_q->time_out = rte_get_timer_cycles() +\n+\t\t\t\t\t   DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();\n+\t\t}\n+\t\tgoto exit;\n+\t}\n+\n+\tpending_queue_advance(&pq_tail, pq_mask);\n+\n+\topaque = infl_req->opaque;\n+\n+\tif (!cn10k_cpt_raw_dequeue_post_process(&res.cn10k))\n+\t\t*op_status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n+\telse\n+\t\t*op_status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\n+\tif (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))\n+\t\trte_mempool_put(qp->meta_info.pool, infl_req->mdata);\n+\n+\t*dequeue_status = 1;\n+exit:\n+\treturn opaque;\n+}\n+\n+static int\n+cn10k_sym_get_raw_dp_ctx_size(struct rte_cryptodev *dev __rte_unused)\n+{\n+\treturn sizeof(struct cnxk_sym_dp_ctx);\n+}\n+\n+static int\n+cn10k_sym_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,\n+\t\t\t       struct rte_crypto_raw_dp_ctx *raw_dp_ctx,\n+\t\t\t       enum rte_crypto_op_sess_type sess_type,\n+\t\t\t       union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)\n+{\n+\tstruct cnxk_se_sess *sess = (struct cnxk_se_sess *)session_ctx.crypto_sess;\n+\tstruct cnxk_sym_dp_ctx *dp_ctx;\n+\n+\tif (sess_type != RTE_CRYPTO_OP_WITH_SESSION)\n+\t\treturn -ENOTSUP;\n+\n+\tif (sess == NULL)\n+\t\treturn -EINVAL;\n+\n+\tif ((sess->dp_thr_type == CPT_DP_THREAD_TYPE_PDCP) ||\n+\t    (sess->dp_thr_type == CPT_DP_THREAD_TYPE_PDCP_CHAIN) ||\n+\t    (sess->dp_thr_type == CPT_DP_THREAD_TYPE_KASUMI))\n+\t\treturn -ENOTSUP;\n+\n+\tif ((sess->dp_thr_type == CPT_DP_THREAD_AUTH_ONLY) &&\n+\t    ((sess->roc_se_ctx.fc_type == ROC_SE_KASUMI) ||\n+\t     (sess->roc_se_ctx.fc_type == ROC_SE_PDCP)))\n+\t\treturn -ENOTSUP;\n+\n+\tif ((sess->roc_se_ctx.hash_type == ROC_SE_GMAC_TYPE) ||\n+\t    (sess->roc_se_ctx.hash_type == ROC_SE_SHA1_TYPE))\n+\t\treturn -ENOTSUP;\n+\n+\tdp_ctx = (struct cnxk_sym_dp_ctx *)raw_dp_ctx->drv_ctx_data;\n+\tdp_ctx->sess = sess;\n+\n+\tif (!is_update) {\n+\t\tstruct cnxk_cpt_vf *vf;\n+\n+\t\traw_dp_ctx->qp_data = (struct cnxk_cpt_qp *)dev->data->queue_pairs[qp_id];\n+\t\traw_dp_ctx->dequeue = cn10k_cpt_sym_raw_dequeue;\n+\t\traw_dp_ctx->dequeue_burst = cn10k_cpt_sym_raw_dequeue_burst;\n+\n+\t\tvf = dev->data->dev_private;\n+\t\tif (vf->cpt.hw_caps[CPT_ENG_TYPE_SE].sg_ver2 &&\n+\t\t    vf->cpt.hw_caps[CPT_ENG_TYPE_IE].sg_ver2) {\n+\t\t\traw_dp_ctx->enqueue = cn10k_cpt_raw_enqueue_sgv2;\n+\t\t\traw_dp_ctx->enqueue_burst = cn10k_cpt_raw_enqueue_burst_sgv2;\n+\t\t} else {\n+\t\t\traw_dp_ctx->enqueue = cn10k_cpt_raw_enqueue_sgv1;\n+\t\t\traw_dp_ctx->enqueue_burst = cn10k_cpt_raw_enqueue_burst_sgv1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n struct rte_cryptodev_ops cn10k_cpt_ops = {\n \t/* Device control ops */\n \t.dev_configure = cnxk_cpt_dev_config,\n@@ -1090,4 +1545,8 @@ struct rte_cryptodev_ops cn10k_cpt_ops = {\n \t/* Event crypto ops */\n \t.session_ev_mdata_set = cn10k_cpt_crypto_adapter_ev_mdata_set,\n \t.queue_pair_event_error_query = cnxk_cpt_queue_pair_event_error_query,\n+\n+\t/* Raw data-path API related operations */\n+\t.sym_get_raw_dp_ctx_size = cn10k_sym_get_raw_dp_ctx_size,\n+\t.sym_configure_raw_dp_ctx = cn10k_sym_configure_raw_dp_ctx,\n };\ndiff --git a/drivers/crypto/cnxk/cnxk_cryptodev.c b/drivers/crypto/cnxk/cnxk_cryptodev.c\nindex 4fa1907cea..4819a14184 100644\n--- a/drivers/crypto/cnxk/cnxk_cryptodev.c\n+++ b/drivers/crypto/cnxk/cnxk_cryptodev.c\n@@ -13,22 +13,16 @@\n uint64_t\n cnxk_cpt_default_ff_get(void)\n {\n-\tuint64_t ff = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |\n-\t\t      RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |\n-\t\t      RTE_CRYPTODEV_FF_HW_ACCELERATED |\n-\t\t      RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |\n+\tuint64_t ff = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |\n+\t\t      RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |\n \t\t      RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP |\n-\t\t      RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |\n-\t\t      RTE_CRYPTODEV_FF_IN_PLACE_SGL |\n-\t\t      RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |\n-\t\t      RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |\n-\t\t      RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |\n-\t\t      RTE_CRYPTODEV_FF_SYM_SESSIONLESS |\n-\t\t      RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |\n-\t\t      RTE_CRYPTODEV_FF_SECURITY;\n+\t\t      RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_IN_PLACE_SGL |\n+\t\t      RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |\n+\t\t      RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | RTE_CRYPTODEV_FF_SYM_SESSIONLESS |\n+\t\t      RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED | RTE_CRYPTODEV_FF_SECURITY;\n \n \tif (roc_model_is_cn10k())\n-\t\tff |= RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM;\n+\t\tff |= RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM | RTE_CRYPTODEV_FF_SYM_RAW_DP;\n \n \treturn ff;\n }\ndiff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h\nindex 6ee4cbda70..4a8eb0890b 100644\n--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h\n+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h\n@@ -44,6 +44,7 @@ struct cpt_qp_meta_info {\n struct cpt_inflight_req {\n \tunion cpt_res_s res;\n \tunion {\n+\t\tvoid *opaque;\n \t\tstruct rte_crypto_op *cop;\n \t\tstruct rte_event_vector *vec;\n \t};\ndiff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h\nindex ceb50fa3b6..9f3bff3e68 100644\n--- a/drivers/crypto/cnxk/cnxk_se.h\n+++ b/drivers/crypto/cnxk/cnxk_se.h\n@@ -63,6 +63,23 @@ struct cnxk_se_sess {\n \tstruct roc_cpt_lf *lf;\n } __rte_aligned(ROC_ALIGN);\n \n+struct cnxk_sym_dp_ctx {\n+\tstruct cnxk_se_sess *sess;\n+};\n+\n+struct cnxk_iov {\n+\tchar src[SRC_IOV_SIZE];\n+\tchar dst[SRC_IOV_SIZE];\n+\tvoid *iv_buf;\n+\tvoid *aad_buf;\n+\tvoid *mac_buf;\n+\tuint16_t c_head;\n+\tuint16_t c_tail;\n+\tuint16_t a_head;\n+\tuint16_t a_tail;\n+\tint data_len;\n+};\n+\n static __rte_always_inline int fill_sess_gmac(struct rte_crypto_sym_xform *xform,\n \t\t\t\t\t      struct cnxk_se_sess *sess);\n \n@@ -3061,4 +3078,280 @@ cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cnxk_\n \treturn ret;\n }\n \n+static __rte_always_inline uint32_t\n+prepare_iov_from_raw_vec(struct rte_crypto_vec *vec, struct roc_se_iov_ptr *iovec, uint32_t num)\n+{\n+\tuint32_t i, total_len = 0;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\tiovec->bufs[i].vaddr = vec[i].base;\n+\t\tiovec->bufs[i].size = vec[i].len;\n+\n+\t\ttotal_len += vec[i].len;\n+\t}\n+\n+\tiovec->buf_cnt = i;\n+\treturn total_len;\n+}\n+\n+static __rte_always_inline void\n+cnxk_raw_burst_to_iov(struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs *ofs, int index,\n+\t\t      struct cnxk_iov *iov)\n+{\n+\tiov->iv_buf = vec->iv[index].va;\n+\tiov->aad_buf = vec->aad[index].va;\n+\tiov->mac_buf = vec->digest[index].va;\n+\n+\tiov->data_len =\n+\t\tprepare_iov_from_raw_vec(vec->src_sgl[index].vec, (struct roc_se_iov_ptr *)iov->src,\n+\t\t\t\t\t vec->src_sgl[index].num);\n+\n+\tif (vec->dest_sgl == NULL)\n+\t\tprepare_iov_from_raw_vec(vec->src_sgl[index].vec, (struct roc_se_iov_ptr *)iov->dst,\n+\t\t\t\t\t vec->src_sgl[index].num);\n+\telse\n+\t\tprepare_iov_from_raw_vec(vec->dest_sgl[index].vec,\n+\t\t\t\t\t (struct roc_se_iov_ptr *)iov->dst,\n+\t\t\t\t\t vec->dest_sgl[index].num);\n+\n+\tiov->c_head = ofs->ofs.cipher.head;\n+\tiov->c_tail = ofs->ofs.cipher.tail;\n+\n+\tiov->a_head = ofs->ofs.auth.head;\n+\tiov->a_tail = ofs->ofs.auth.tail;\n+}\n+\n+static __rte_always_inline void\n+cnxk_raw_to_iov(struct rte_crypto_vec *data_vec, uint16_t n_vecs, union rte_crypto_sym_ofs *ofs,\n+\t\tstruct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest,\n+\t\tstruct rte_crypto_va_iova_ptr *aad, struct cnxk_iov *iov)\n+{\n+\tiov->iv_buf = iv->va;\n+\tiov->aad_buf = aad->va;\n+\tiov->mac_buf = digest->va;\n+\n+\tiov->data_len =\n+\t\tprepare_iov_from_raw_vec(data_vec, (struct roc_se_iov_ptr *)iov->src, n_vecs);\n+\tprepare_iov_from_raw_vec(data_vec, (struct roc_se_iov_ptr *)iov->dst, n_vecs);\n+\n+\tiov->c_head = ofs->ofs.cipher.head;\n+\tiov->c_tail = ofs->ofs.cipher.tail;\n+\n+\tiov->a_head = ofs->ofs.auth.head;\n+\tiov->a_tail = ofs->ofs.auth.tail;\n+}\n+\n+static inline void\n+raw_memcpy(struct cnxk_iov *iov)\n+{\n+\tstruct roc_se_iov_ptr *src = (struct roc_se_iov_ptr *)iov->src;\n+\tstruct roc_se_iov_ptr *dst = (struct roc_se_iov_ptr *)iov->dst;\n+\tint num = src->buf_cnt;\n+\tint i;\n+\n+\t/* skip copy in case of inplace */\n+\tif (dst->bufs[0].vaddr == src->bufs[0].vaddr)\n+\t\treturn;\n+\n+\tfor (i = 0; i < num; i++) {\n+\t\trte_memcpy(dst->bufs[i].vaddr, src->bufs[i].vaddr, src->bufs[i].size);\n+\t\tdst->bufs[i].size = src->bufs[i].size;\n+\t}\n+}\n+\n+static inline int\n+fill_raw_passthrough_params(struct cnxk_iov *iov, struct cpt_inst_s *inst)\n+{\n+\tconst union cpt_inst_w4 w4 = {\n+\t\t.s.opcode_major = ROC_SE_MAJOR_OP_MISC,\n+\t\t.s.opcode_minor = ROC_SE_MISC_MINOR_OP_PASSTHROUGH,\n+\t\t.s.param1 = 1,\n+\t\t.s.param2 = 1,\n+\t\t.s.dlen = 0,\n+\t};\n+\n+\tinst->w0.u64 = 0;\n+\tinst->w5.u64 = 0;\n+\tinst->w4.u64 = w4.u64;\n+\n+\traw_memcpy(iov);\n+\n+\treturn 0;\n+}\n+\n+static __rte_always_inline int\n+fill_raw_fc_params(struct cnxk_iov *iov, struct cnxk_se_sess *sess, struct cpt_qp_meta_info *m_info,\n+\t\t   struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst, const bool is_kasumi,\n+\t\t   const bool is_aead, const bool is_sg_ver2)\n+{\n+\tuint32_t cipher_len, auth_len = 0;\n+\tstruct roc_se_fc_params fc_params;\n+\tuint8_t cpt_op = sess->cpt_op;\n+\tuint64_t d_offs, d_lens;\n+\tuint8_t ccm_iv_buf[16];\n+\tuint32_t flags = 0;\n+\tvoid *mdata = NULL;\n+\tuint32_t iv_buf[4];\n+\tint ret;\n+\n+\tfc_params.cipher_iv_len = sess->iv_length;\n+\tfc_params.ctx = &sess->roc_se_ctx;\n+\tfc_params.auth_iv_buf = NULL;\n+\tfc_params.auth_iv_len = 0;\n+\tfc_params.mac_buf.size = 0;\n+\tfc_params.mac_buf.vaddr = 0;\n+\tfc_params.iv_buf = NULL;\n+\n+\tif (likely(is_kasumi || sess->iv_length)) {\n+\t\tflags |= ROC_SE_VALID_IV_BUF;\n+\t\tfc_params.iv_buf = iov->iv_buf;\n+\n+\t\tif (sess->short_iv) {\n+\t\t\tmemcpy((uint8_t *)iv_buf, iov->iv_buf, 12);\n+\t\t\tiv_buf[3] = rte_cpu_to_be_32(0x1);\n+\t\t\tfc_params.iv_buf = iv_buf;\n+\t\t}\n+\n+\t\tif (sess->aes_ccm) {\n+\t\t\tmemcpy((uint8_t *)ccm_iv_buf, iov->iv_buf, sess->iv_length + 1);\n+\t\t\tccm_iv_buf[0] = 14 - sess->iv_length;\n+\t\t\tfc_params.iv_buf = ccm_iv_buf;\n+\t\t}\n+\t}\n+\n+\tfc_params.src_iov = (void *)iov->src;\n+\tfc_params.dst_iov = (void *)iov->dst;\n+\n+\tcipher_len = iov->data_len - iov->c_head - iov->c_tail;\n+\tauth_len = iov->data_len - iov->a_head - iov->a_tail;\n+\n+\td_offs = (iov->c_head << 16) | iov->a_head;\n+\td_lens = ((uint64_t)cipher_len << 32) | auth_len;\n+\n+\tif (is_aead) {\n+\t\tuint16_t aad_len = sess->aad_length;\n+\n+\t\tif (likely(aad_len == 0)) {\n+\t\t\td_offs = (iov->c_head << 16) | iov->c_head;\n+\t\t\td_lens = ((uint64_t)cipher_len << 32) | cipher_len;\n+\t\t} else {\n+\t\t\tflags |= ROC_SE_VALID_AAD_BUF;\n+\t\t\tfc_params.aad_buf.size = sess->aad_length;\n+\t\t\t/* For AES CCM, AAD is written 18B after aad.data as per API */\n+\t\t\tif (sess->aes_ccm)\n+\t\t\t\tfc_params.aad_buf.vaddr = PLT_PTR_ADD((uint8_t *)iov->aad_buf, 18);\n+\t\t\telse\n+\t\t\t\tfc_params.aad_buf.vaddr = iov->aad_buf;\n+\n+\t\t\td_offs = (iov->c_head << 16);\n+\t\t\td_lens = ((uint64_t)cipher_len << 32);\n+\t\t}\n+\t}\n+\n+\tif (likely(sess->mac_len)) {\n+\t\tflags |= ROC_SE_VALID_MAC_BUF;\n+\t\tfc_params.mac_buf.size = sess->mac_len;\n+\t\tfc_params.mac_buf.vaddr = iov->mac_buf;\n+\t}\n+\n+\tfc_params.meta_buf.vaddr = NULL;\n+\tmdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen, m_info->pool, infl_req);\n+\tif (mdata == NULL) {\n+\t\tplt_dp_err(\"Error allocating meta buffer for request\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (is_kasumi) {\n+\t\tif (cpt_op & ROC_SE_OP_ENCODE)\n+\t\t\tret = cpt_enc_hmac_prep(flags, d_offs, d_lens, &fc_params, inst,\n+\t\t\t\t\t\tis_sg_ver2);\n+\t\telse\n+\t\t\tret = cpt_dec_hmac_prep(flags, d_offs, d_lens, &fc_params, inst,\n+\t\t\t\t\t\tis_sg_ver2);\n+\t} else {\n+\t\tif (cpt_op & ROC_SE_OP_ENCODE)\n+\t\t\tret = cpt_enc_hmac_prep(flags, d_offs, d_lens, &fc_params, inst,\n+\t\t\t\t\t\tis_sg_ver2);\n+\t\telse\n+\t\t\tret = cpt_dec_hmac_prep(flags, d_offs, d_lens, &fc_params, inst,\n+\t\t\t\t\t\tis_sg_ver2);\n+\t}\n+\n+\tif (unlikely(ret)) {\n+\t\tplt_dp_err(\"Preparing request failed due to bad input arg\");\n+\t\tgoto free_mdata_and_exit;\n+\t}\n+\n+\treturn 0;\n+\n+free_mdata_and_exit:\n+\trte_mempool_put(m_info->pool, infl_req->mdata);\n+\treturn ret;\n+}\n+\n+static __rte_always_inline int\n+fill_raw_digest_params(struct cnxk_iov *iov, struct cnxk_se_sess *sess,\n+\t\t       struct cpt_qp_meta_info *m_info, struct cpt_inflight_req *infl_req,\n+\t\t       struct cpt_inst_s *inst, const bool is_sg_ver2)\n+{\n+\tuint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;\n+\tstruct roc_se_fc_params fc_params;\n+\tuint16_t mac_len = sess->mac_len;\n+\tuint64_t d_offs, d_lens;\n+\tuint32_t auth_len = 0;\n+\tuint32_t flags = 0;\n+\tvoid *mdata = NULL;\n+\tuint32_t space = 0;\n+\tint ret;\n+\n+\tmemset(&fc_params, 0, sizeof(struct roc_se_fc_params));\n+\tfc_params.cipher_iv_len = sess->iv_length;\n+\tfc_params.ctx = &sess->roc_se_ctx;\n+\n+\tmdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen, m_info->pool, infl_req);\n+\tif (mdata == NULL) {\n+\t\tplt_dp_err(\"Error allocating meta buffer for request\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_exit;\n+\t}\n+\n+\tflags |= ROC_SE_VALID_MAC_BUF;\n+\tfc_params.src_iov = (void *)iov->src;\n+\tauth_len = iov->data_len - iov->a_head - iov->a_tail;\n+\td_lens = auth_len;\n+\td_offs = iov->a_head;\n+\n+\tif (auth_op == ROC_SE_OP_AUTH_GENERATE) {\n+\t\tfc_params.mac_buf.size = sess->mac_len;\n+\t\tfc_params.mac_buf.vaddr = iov->mac_buf;\n+\t} else {\n+\t\tuint64_t *op = mdata;\n+\n+\t\t/* Need space for storing generated mac */\n+\t\tspace += 2 * sizeof(uint64_t);\n+\n+\t\tfc_params.mac_buf.vaddr = (uint8_t *)mdata + space;\n+\t\tfc_params.mac_buf.size = mac_len;\n+\t\tspace += RTE_ALIGN_CEIL(mac_len, 8);\n+\t\top[0] = (uintptr_t)iov->mac_buf;\n+\t\top[1] = mac_len;\n+\t\tinfl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;\n+\t}\n+\n+\tfc_params.meta_buf.vaddr = (uint8_t *)mdata + space;\n+\tfc_params.meta_buf.size -= space;\n+\n+\tret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params, inst, is_sg_ver2);\n+\tif (ret)\n+\t\tgoto free_mdata_and_exit;\n+\n+\treturn 0;\n+\n+free_mdata_and_exit:\n+\tif (infl_req->op_flags & CPT_OP_FLAGS_METABUF)\n+\t\trte_mempool_put(m_info->pool, infl_req->mdata);\n+err_exit:\n+\treturn ret;\n+}\n+\n #endif /*_CNXK_SE_H_ */\n",
    "prefixes": [
        "v2",
        "5/8"
    ]
}