get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/47900/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 47900,
    "url": "http://patchwork.dpdk.org/api/patches/47900/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20181106162248.5172-1-roy.fan.zhang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20181106162248.5172-1-roy.fan.zhang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20181106162248.5172-1-roy.fan.zhang@intel.com",
    "date": "2018-11-06T16:22:48",
    "name": "[v2] vhost/crypto: fix incorrect copy",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "6bbe1653b9bef004e2e9e2310ed95a62ef6a55f2",
    "submitter": {
        "id": 304,
        "url": "http://patchwork.dpdk.org/api/people/304/?format=api",
        "name": "Fan Zhang",
        "email": "roy.fan.zhang@intel.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patchwork.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20181106162248.5172-1-roy.fan.zhang@intel.com/mbox/",
    "series": [
        {
            "id": 2300,
            "url": "http://patchwork.dpdk.org/api/series/2300/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=2300",
            "date": "2018-11-06T16:22:48",
            "name": "[v2] vhost/crypto: fix incorrect copy",
            "version": 2,
            "mbox": "http://patchwork.dpdk.org/series/2300/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/47900/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/47900/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 751DE2862;\n\tTue,  6 Nov 2018 17:22:54 +0100 (CET)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n\tby dpdk.org (Postfix) with ESMTP id 27C302082\n\tfor <dev@dpdk.org>; Tue,  6 Nov 2018 17:22:51 +0100 (CET)",
            "from orsmga005.jf.intel.com ([10.7.209.41])\n\tby fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t06 Nov 2018 08:22:51 -0800",
            "from silpixa00398673.ir.intel.com (HELO\n\tsilpixa00398673.ger.corp.intel.com) ([10.237.223.54])\n\tby orsmga005.jf.intel.com with ESMTP; 06 Nov 2018 08:22:49 -0800"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.54,472,1534834800\"; d=\"scan'208\";a=\"271813892\"",
        "From": "Fan Zhang <roy.fan.zhang@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "maxime.coquelin@redhat.com",
        "Date": "Tue,  6 Nov 2018 16:22:48 +0000",
        "Message-Id": "<20181106162248.5172-1-roy.fan.zhang@intel.com>",
        "X-Mailer": "git-send-email 2.13.6",
        "In-Reply-To": "<20181024131826.6842-1-roy.fan.zhang@intel.com>",
        "References": "<20181024131826.6842-1-roy.fan.zhang@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2] vhost/crypto: fix incorrect copy",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch fixes the incorrect packet content copy in the\nchaining mode. Originally the content before cipher offset is\noverwritten by all zeros. This patch fixes the problem by\nmaking sure the correct write back source and destination\nsettings during set up.\n\nSigned-off-by: Fan Zhang <roy.fan.zhang@intel.com>\n---\nv2:\n- fixed a write back size error bug.\n\n lib/librte_vhost/vhost_crypto.c | 460 ++++++++++++++++++++++++++++++----------\n 1 file changed, 348 insertions(+), 112 deletions(-)",
    "diff": "diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c\nindex 9811a232a..a18b97cc2 100644\n--- a/lib/librte_vhost/vhost_crypto.c\n+++ b/lib/librte_vhost/vhost_crypto.c\n@@ -198,6 +198,7 @@ struct vhost_crypto {\n \tstruct rte_hash *session_map;\n \tstruct rte_mempool *mbuf_pool;\n \tstruct rte_mempool *sess_pool;\n+\tstruct rte_mempool *wb_pool;\n \n \t/** DPDK cryptodev ID */\n \tuint8_t cid;\n@@ -215,13 +216,20 @@ struct vhost_crypto {\n \tuint8_t option;\n } __rte_cache_aligned;\n \n+struct vhost_crypto_writeback_data {\n+\tuint8_t *src;\n+\tuint8_t *dst;\n+\tuint64_t len;\n+\tstruct vhost_crypto_writeback_data *next;\n+};\n+\n struct vhost_crypto_data_req {\n \tstruct vring_desc *head;\n \tstruct virtio_net *dev;\n \tstruct virtio_crypto_inhdr *inhdr;\n \tstruct vhost_virtqueue *vq;\n-\tstruct vring_desc *wb_desc;\n-\tuint16_t wb_len;\n+\tstruct vhost_crypto_writeback_data *wb;\n+\tstruct rte_mempool *wb_pool;\n \tuint16_t desc_idx;\n \tuint16_t len;\n \tuint16_t zero_copy;\n@@ -506,15 +514,29 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,\n \t\tleft -= desc->len;\n \t}\n \n-\tif (unlikely(left > 0)) {\n-\t\tVC_LOG_ERR(\"Incorrect virtio descriptor\");\n+\tif (unlikely(left > 0))\n \t\treturn -1;\n-\t}\n \n \t*cur_desc = &head[desc->next];\n \treturn 0;\n }\n \n+static __rte_always_inline void *\n+get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,\n+\t\tuint8_t perm)\n+{\n+\tvoid *data;\n+\tuint64_t dlen = cur_desc->len;\n+\n+\tdata = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);\n+\tif (unlikely(!data || dlen != cur_desc->len)) {\n+\t\tVC_LOG_ERR(\"Failed to map object\");\n+\t\treturn NULL;\n+\t}\n+\n+\treturn data;\n+}\n+\n static int\n copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,\n \t\tstruct vring_desc **cur_desc, uint32_t size)\n@@ -531,10 +553,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,\n \tdlen = to_copy;\n \tsrc = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,\n \t\t\tVHOST_ACCESS_RO);\n-\tif (unlikely(!src || !dlen)) {\n-\t\tVC_LOG_ERR(\"Failed to map descriptor\");\n+\tif (unlikely(!src || !dlen))\n \t\treturn -1;\n-\t}\n \n \trte_memcpy((uint8_t *)data, src, dlen);\n \tdata += dlen;\n@@ -609,73 +629,158 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,\n \treturn 0;\n }\n \n-static __rte_always_inline void *\n-get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc **cur_desc,\n-\t\tuint32_t size, uint8_t perm)\n+static void\n+write_back_data(struct vhost_crypto_data_req *vc_req)\n {\n-\tvoid *data;\n-\tuint64_t dlen = (*cur_desc)->len;\n-\n-\tdata = IOVA_TO_VVA(void *, vc_req, (*cur_desc)->addr, &dlen, perm);\n-\tif (unlikely(!data || dlen != (*cur_desc)->len)) {\n-\t\tVC_LOG_ERR(\"Failed to map object\");\n-\t\treturn NULL;\n+\tstruct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;\n+\n+\twhile (wb_data) {\n+\t\trte_prefetch0(wb_data->next);\n+\t\trte_memcpy(wb_data->dst, wb_data->src, wb_data->len);\n+\t\twb_last = wb_data;\n+\t\twb_data = wb_data->next;\n+\t\trte_mempool_put(vc_req->wb_pool, wb_last);\n \t}\n+}\n \n-\tif (unlikely(move_desc(vc_req->head, cur_desc, size) < 0))\n-\t\treturn NULL;\n+static void\n+free_wb_data(struct vhost_crypto_writeback_data *wb_data,\n+\t\tstruct rte_mempool *mp)\n+{\n+\twhile (wb_data->next != NULL)\n+\t\tfree_wb_data(wb_data->next, mp);\n \n-\treturn data;\n+\trte_mempool_put(mp, wb_data);\n }\n \n-static int\n-write_back_data(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req)\n+/**\n+ * The function will allocate a vhost_crypto_writeback_data linked list\n+ * containing the source and destination data pointers for the write back\n+ * operation after dequeued from Cryptodev PMD queues.\n+ *\n+ * @param vc_req\n+ *   The vhost crypto data request pointer\n+ * @param cur_desc\n+ *   The pointer of the current in use descriptor pointer. The content of\n+ *   cur_desc is expected to be updated after the function execution.\n+ * @param end_wb_data\n+ *   The last write back data element to be returned. It is used only in cipher\n+ *   and hash chain operations.\n+ * @param src\n+ *   The source data pointer\n+ * @param offset\n+ *   The offset to both source and destination data. For source data the offset\n+ *   is the number of bytes between src and start point of cipher operation. For\n+ *   destination data the offset is the number of bytes from *cur_desc->addr\n+ *   to the point where the src will be written to.\n+ * @param write_back_len\n+ *   The size of the write back length.\n+ * @return\n+ *   The pointer to the start of the write back data linked list.\n+ */\n+static struct vhost_crypto_writeback_data *\n+prepare_write_back_data(struct vhost_crypto_data_req *vc_req,\n+\t\tstruct vring_desc **cur_desc,\n+\t\tstruct vhost_crypto_writeback_data **end_wb_data,\n+\t\tuint8_t *src,\n+\t\tuint32_t offset,\n+\t\tuint64_t write_back_len)\n {\n-\tstruct rte_mbuf *mbuf = op->sym->m_dst;\n-\tstruct vring_desc *head = vc_req->head;\n-\tstruct vring_desc *desc = vc_req->wb_desc;\n-\tint left = vc_req->wb_len;\n-\tuint32_t to_write;\n-\tuint8_t *src_data = mbuf->buf_addr, *dst;\n+\tstruct vhost_crypto_writeback_data *wb_data, *head;\n+\tstruct vring_desc *desc = *cur_desc;\n \tuint64_t dlen;\n+\tuint8_t *dst;\n+\tint ret;\n \n-\trte_prefetch0(&head[desc->next]);\n-\tto_write = RTE_MIN(desc->len, (uint32_t)left);\n-\tdlen = desc->len;\n-\tdst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,\n-\t\t\tVHOST_ACCESS_RW);\n-\tif (unlikely(!dst || dlen != desc->len)) {\n-\t\tVC_LOG_ERR(\"Failed to map descriptor\");\n-\t\treturn -1;\n+\tret = rte_mempool_get(vc_req->wb_pool, (void **)&head);\n+\tif (unlikely(ret < 0)) {\n+\t\tVC_LOG_ERR(\"no memory\");\n+\t\tgoto error_exit;\n \t}\n \n-\trte_memcpy(dst, src_data, to_write);\n-\tleft -= to_write;\n-\tsrc_data += to_write;\n+\twb_data = head;\n \n-\twhile ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {\n-\t\tdesc = &head[desc->next];\n-\t\trte_prefetch0(&head[desc->next]);\n-\t\tto_write = RTE_MIN(desc->len, (uint32_t)left);\n+\tif (likely(desc->len > offset)) {\n+\t\twb_data->src = src + offset;\n \t\tdlen = desc->len;\n-\t\tdst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,\n-\t\t\t\tVHOST_ACCESS_RW);\n+\t\tdst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,\n+\t\t\t&dlen, VHOST_ACCESS_RW) + offset;\n \t\tif (unlikely(!dst || dlen != desc->len)) {\n \t\t\tVC_LOG_ERR(\"Failed to map descriptor\");\n-\t\t\treturn -1;\n+\t\t\tgoto error_exit;\n \t\t}\n \n-\t\trte_memcpy(dst, src_data, to_write);\n-\t\tleft -= to_write;\n-\t\tsrc_data += to_write;\n-\t}\n+\t\twb_data->dst = dst;\n+\t\twb_data->len = desc->len - offset;\n+\t\twrite_back_len -= wb_data->len;\n+\t\tsrc += offset + wb_data->len;\n+\t\toffset = 0;\n+\n+\t\tif (unlikely(write_back_len)) {\n+\t\t\tret = rte_mempool_get(vc_req->wb_pool,\n+\t\t\t\t\t(void **)&(wb_data->next));\n+\t\t\tif (unlikely(ret < 0)) {\n+\t\t\t\tVC_LOG_ERR(\"no memory\");\n+\t\t\t\tgoto error_exit;\n+\t\t\t}\n \n-\tif (unlikely(left < 0)) {\n-\t\tVC_LOG_ERR(\"Incorrect virtio descriptor\");\n-\t\treturn -1;\n+\t\t\twb_data = wb_data->next;\n+\t\t} else\n+\t\t\twb_data->next = NULL;\n+\t} else\n+\t\toffset -= desc->len;\n+\n+\twhile (write_back_len) {\n+\t\tdesc = &vc_req->head[desc->next];\n+\t\tif (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {\n+\t\t\tVC_LOG_ERR(\"incorrect descriptor\");\n+\t\t\tgoto error_exit;\n+\t\t}\n+\n+\t\tif (desc->len <= offset) {\n+\t\t\toffset -= desc->len;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tdlen = desc->len;\n+\t\tdst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,\n+\t\t\t\tVHOST_ACCESS_RW) + offset;\n+\t\tif (unlikely(dst == NULL || dlen != desc->len)) {\n+\t\t\tVC_LOG_ERR(\"Failed to map descriptor\");\n+\t\t\tgoto error_exit;\n+\t\t}\n+\n+\t\twb_data->src = src;\n+\t\twb_data->dst = dst;\n+\t\twb_data->len = RTE_MIN(desc->len - offset, write_back_len);\n+\t\twrite_back_len -= wb_data->len;\n+\t\tsrc += wb_data->len;\n+\t\toffset = 0;\n+\n+\t\tif (write_back_len) {\n+\t\t\tret = rte_mempool_get(vc_req->wb_pool,\n+\t\t\t\t\t(void **)&(wb_data->next));\n+\t\t\tif (unlikely(ret < 0)) {\n+\t\t\t\tVC_LOG_ERR(\"no memory\");\n+\t\t\t\tgoto error_exit;\n+\t\t\t}\n+\n+\t\t\twb_data = wb_data->next;\n+\t\t} else\n+\t\t\twb_data->next = NULL;\n \t}\n \n-\treturn 0;\n+\t*cur_desc = &vc_req->head[desc->next];\n+\n+\t*end_wb_data = wb_data;\n+\n+\treturn head;\n+\n+error_exit:\n+\tif (head)\n+\t\tfree_wb_data(head, vc_req->wb_pool);\n+\n+\treturn NULL;\n }\n \n static uint8_t\n@@ -685,6 +790,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \t\tstruct vring_desc *cur_desc)\n {\n \tstruct vring_desc *desc = cur_desc;\n+\tstruct vhost_crypto_writeback_data *ewb = NULL;\n \tstruct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;\n \tuint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);\n \tuint8_t ret = 0;\n@@ -703,16 +809,25 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \tcase RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:\n \t\tm_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,\n \t\t\t\tcipher->para.src_data_len);\n-\t\tm_src->buf_addr = get_data_ptr(vc_req, &desc,\n-\t\t\t\tcipher->para.src_data_len, VHOST_ACCESS_RO);\n+\t\tm_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);\n \t\tif (unlikely(m_src->buf_iova == 0 ||\n \t\t\t\tm_src->buf_addr == NULL)) {\n \t\t\tVC_LOG_ERR(\"zero_copy may fail due to cross page data\");\n \t\t\tret = VIRTIO_CRYPTO_ERR;\n \t\t\tgoto error_exit;\n \t\t}\n+\n+\t\tif (unlikely(move_desc(vc_req->head, &desc,\n+\t\t\t\tcipher->para.src_data_len) < 0)) {\n+\t\t\tVC_LOG_ERR(\"Incorrect descriptor\");\n+\t\t\tret = VIRTIO_CRYPTO_ERR;\n+\t\t\tgoto error_exit;\n+\t\t}\n+\n \t\tbreak;\n \tcase RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:\n+\t\tvc_req->wb_pool = vcrypto->wb_pool;\n+\n \t\tif (unlikely(cipher->para.src_data_len >\n \t\t\t\tRTE_MBUF_DEFAULT_BUF_SIZE)) {\n \t\t\tVC_LOG_ERR(\"Not enough space to do data copy\");\n@@ -743,24 +858,31 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \tcase RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:\n \t\tm_dst->buf_iova = gpa_to_hpa(vcrypto->dev,\n \t\t\t\tdesc->addr, cipher->para.dst_data_len);\n-\t\tm_dst->buf_addr = get_data_ptr(vc_req, &desc,\n-\t\t\t\tcipher->para.dst_data_len, VHOST_ACCESS_RW);\n+\t\tm_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);\n \t\tif (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {\n \t\t\tVC_LOG_ERR(\"zero_copy may fail due to cross page data\");\n \t\t\tret = VIRTIO_CRYPTO_ERR;\n \t\t\tgoto error_exit;\n \t\t}\n \n+\t\tif (unlikely(move_desc(vc_req->head, &desc,\n+\t\t\t\tcipher->para.dst_data_len) < 0)) {\n+\t\t\tVC_LOG_ERR(\"Incorrect descriptor\");\n+\t\t\tret = VIRTIO_CRYPTO_ERR;\n+\t\t\tgoto error_exit;\n+\t\t}\n+\n \t\tm_dst->data_len = cipher->para.dst_data_len;\n \t\tbreak;\n \tcase RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:\n-\t\tvc_req->wb_desc = desc;\n-\t\tvc_req->wb_len = cipher->para.dst_data_len;\n-\t\tif (unlikely(move_desc(vc_req->head, &desc,\n-\t\t\t\tvc_req->wb_len) < 0)) {\n+\t\tvc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,\n+\t\t\t\trte_pktmbuf_mtod(m_src, uint8_t *), 0,\n+\t\t\t\tcipher->para.dst_data_len);\n+\t\tif (unlikely(vc_req->wb == NULL)) {\n \t\t\tret = VIRTIO_CRYPTO_ERR;\n \t\t\tgoto error_exit;\n \t\t}\n+\n \t\tbreak;\n \tdefault:\n \t\tret = VIRTIO_CRYPTO_BADMSG;\n@@ -774,7 +896,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \top->sym->cipher.data.offset = 0;\n \top->sym->cipher.data.length = cipher->para.src_data_len;\n \n-\tvc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);\n+\tvc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);\n \tif (unlikely(vc_req->inhdr == NULL)) {\n \t\tret = VIRTIO_CRYPTO_BADMSG;\n \t\tgoto error_exit;\n@@ -786,6 +908,9 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \treturn 0;\n \n error_exit:\n+\tif (vc_req->wb)\n+\t\tfree_wb_data(vc_req->wb, vc_req->wb_pool);\n+\n \tvc_req->len = INHDR_LEN;\n \treturn ret;\n }\n@@ -796,7 +921,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \t\tstruct virtio_crypto_alg_chain_data_req *chain,\n \t\tstruct vring_desc *cur_desc)\n {\n-\tstruct vring_desc *desc = cur_desc;\n+\tstruct vring_desc *desc = cur_desc, *digest_desc;\n+\tstruct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;\n \tstruct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;\n \tuint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);\n \tuint32_t digest_offset;\n@@ -812,21 +938,30 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \t}\n \n \tm_src->data_len = chain->para.src_data_len;\n-\tm_dst->data_len = chain->para.dst_data_len;\n \n \tswitch (vcrypto->option) {\n \tcase RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:\n+\t\tm_dst->data_len = chain->para.dst_data_len;\n+\n \t\tm_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,\n \t\t\t\tchain->para.src_data_len);\n-\t\tm_src->buf_addr = get_data_ptr(vc_req, &desc,\n-\t\t\t\tchain->para.src_data_len, VHOST_ACCESS_RO);\n+\t\tm_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);\n \t\tif (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {\n \t\t\tVC_LOG_ERR(\"zero_copy may fail due to cross page data\");\n \t\t\tret = VIRTIO_CRYPTO_ERR;\n \t\t\tgoto error_exit;\n \t\t}\n+\n+\t\tif (unlikely(move_desc(vc_req->head, &desc,\n+\t\t\t\tchain->para.src_data_len) < 0)) {\n+\t\t\tVC_LOG_ERR(\"Incorrect descriptor\");\n+\t\t\tret = VIRTIO_CRYPTO_ERR;\n+\t\t\tgoto error_exit;\n+\t\t}\n \t\tbreak;\n \tcase RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:\n+\t\tvc_req->wb_pool = vcrypto->wb_pool;\n+\n \t\tif (unlikely(chain->para.src_data_len >\n \t\t\t\tRTE_MBUF_DEFAULT_BUF_SIZE)) {\n \t\t\tVC_LOG_ERR(\"Not enough space to do data copy\");\n@@ -838,6 +973,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \t\t\tret = VIRTIO_CRYPTO_BADMSG;\n \t\t\tgoto error_exit;\n \t\t}\n+\n \t\tbreak;\n \tdefault:\n \t\tret = VIRTIO_CRYPTO_BADMSG;\n@@ -856,46 +992,70 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \tcase RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:\n \t\tm_dst->buf_iova = gpa_to_hpa(vcrypto->dev,\n \t\t\t\tdesc->addr, chain->para.dst_data_len);\n-\t\tm_dst->buf_addr = get_data_ptr(vc_req, &desc,\n-\t\t\t\tchain->para.dst_data_len, VHOST_ACCESS_RW);\n+\t\tm_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);\n \t\tif (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {\n \t\t\tVC_LOG_ERR(\"zero_copy may fail due to cross page data\");\n \t\t\tret = VIRTIO_CRYPTO_ERR;\n \t\t\tgoto error_exit;\n \t\t}\n \n+\t\tif (unlikely(move_desc(vc_req->head, &desc,\n+\t\t\t\tchain->para.dst_data_len) < 0)) {\n+\t\t\tVC_LOG_ERR(\"Incorrect descriptor\");\n+\t\t\tret = VIRTIO_CRYPTO_ERR;\n+\t\t\tgoto error_exit;\n+\t\t}\n+\n \t\top->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,\n \t\t\t\tdesc->addr, chain->para.hash_result_len);\n-\t\top->sym->auth.digest.data = get_data_ptr(vc_req, &desc,\n-\t\t\t\tchain->para.hash_result_len, VHOST_ACCESS_RW);\n+\t\top->sym->auth.digest.data = get_data_ptr(vc_req, desc,\n+\t\t\t\tVHOST_ACCESS_RW);\n \t\tif (unlikely(op->sym->auth.digest.phys_addr == 0)) {\n \t\t\tVC_LOG_ERR(\"zero_copy may fail due to cross page data\");\n \t\t\tret = VIRTIO_CRYPTO_ERR;\n \t\t\tgoto error_exit;\n \t\t}\n+\n+\t\tif (unlikely(move_desc(vc_req->head, &desc,\n+\t\t\t\tchain->para.hash_result_len) < 0)) {\n+\t\t\tVC_LOG_ERR(\"Incorrect descriptor\");\n+\t\t\tret = VIRTIO_CRYPTO_ERR;\n+\t\t\tgoto error_exit;\n+\t\t}\n+\n \t\tbreak;\n \tcase RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:\n-\t\tdigest_offset = m_dst->data_len;\n-\t\tdigest_addr = rte_pktmbuf_mtod_offset(m_dst, void *,\n-\t\t\t\tdigest_offset);\n+\t\tvc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,\n+\t\t\t\trte_pktmbuf_mtod(m_src, uint8_t *),\n+\t\t\t\tchain->para.cipher_start_src_offset,\n+\t\t\t\tchain->para.dst_data_len -\n+\t\t\t\tchain->para.cipher_start_src_offset);\n+\t\tif (unlikely(vc_req->wb == NULL)) {\n+\t\t\tret = VIRTIO_CRYPTO_ERR;\n+\t\t\tgoto error_exit;\n+\t\t}\n \n-\t\tvc_req->wb_desc = desc;\n-\t\tvc_req->wb_len = m_dst->data_len + chain->para.hash_result_len;\n+\t\tdigest_offset = m_src->data_len;\n+\t\tdigest_addr = rte_pktmbuf_mtod_offset(m_src, void *,\n+\t\t\t\tdigest_offset);\n+\t\tdigest_desc = desc;\n \n-\t\tif (unlikely(move_desc(vc_req->head, &desc,\n-\t\t\t\tchain->para.dst_data_len) < 0)) {\n-\t\t\tret = VIRTIO_CRYPTO_BADMSG;\n+\t\t/** create a wb_data for digest */\n+\t\tewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,\n+\t\t\t\tdigest_addr, 0, chain->para.hash_result_len);\n+\t\tif (unlikely(ewb->next == NULL)) {\n+\t\t\tret = VIRTIO_CRYPTO_ERR;\n \t\t\tgoto error_exit;\n \t\t}\n \n-\t\tif (unlikely(copy_data(digest_addr, vc_req, &desc,\n+\t\tif (unlikely(copy_data(digest_addr, vc_req, &digest_desc,\n \t\t\t\tchain->para.hash_result_len)) < 0) {\n \t\t\tret = VIRTIO_CRYPTO_BADMSG;\n \t\t\tgoto error_exit;\n \t\t}\n \n \t\top->sym->auth.digest.data = digest_addr;\n-\t\top->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst,\n+\t\top->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,\n \t\t\t\tdigest_offset);\n \t\tbreak;\n \tdefault:\n@@ -904,7 +1064,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \t}\n \n \t/* record inhdr */\n-\tvc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);\n+\tvc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);\n \tif (unlikely(vc_req->inhdr == NULL)) {\n \t\tret = VIRTIO_CRYPTO_BADMSG;\n \t\tgoto error_exit;\n@@ -927,6 +1087,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,\n \treturn 0;\n \n error_exit:\n+\tif (vc_req->wb)\n+\t\tfree_wb_data(vc_req->wb, vc_req->wb_pool);\n \tvc_req->len = INHDR_LEN;\n \treturn ret;\n }\n@@ -967,7 +1129,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,\n \tvc_req->head = head;\n \tvc_req->zero_copy = vcrypto->option;\n \n-\treq = get_data_ptr(vc_req, &desc, sizeof(*req), VHOST_ACCESS_RO);\n+\treq = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);\n \tif (unlikely(req == NULL)) {\n \t\tswitch (vcrypto->option) {\n \t\tcase RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:\n@@ -988,6 +1150,12 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,\n \t\t\tVC_LOG_ERR(\"Invalid option\");\n \t\t\tgoto error_exit;\n \t\t}\n+\t} else {\n+\t\tif (unlikely(move_desc(vc_req->head, &desc,\n+\t\t\t\tsizeof(*req)) < 0)) {\n+\t\t\tVC_LOG_ERR(\"Incorrect descriptor\");\n+\t\t\tgoto error_exit;\n+\t\t}\n \t}\n \n \tswitch (req->header.opcode) {\n@@ -1062,7 +1230,6 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,\n \tstruct rte_mbuf *m_dst = op->sym->m_dst;\n \tstruct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);\n \tuint16_t desc_idx;\n-\tint ret = 0;\n \n \tif (unlikely(!vc_req)) {\n \t\tVC_LOG_ERR(\"Failed to retrieve vc_req\");\n@@ -1077,19 +1244,18 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,\n \tif (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))\n \t\tvc_req->inhdr->status = VIRTIO_CRYPTO_ERR;\n \telse {\n-\t\tif (vc_req->zero_copy == 0) {\n-\t\t\tret = write_back_data(op, vc_req);\n-\t\t\tif (unlikely(ret != 0))\n-\t\t\t\tvc_req->inhdr->status = VIRTIO_CRYPTO_ERR;\n-\t\t}\n+\t\tif (vc_req->zero_copy == 0)\n+\t\t\twrite_back_data(vc_req);\n \t}\n \n \tvc_req->vq->used->ring[desc_idx].id = desc_idx;\n \tvc_req->vq->used->ring[desc_idx].len = vc_req->len;\n \n-\trte_mempool_put(m_dst->pool, (void *)m_dst);\n \trte_mempool_put(m_src->pool, (void *)m_src);\n \n+\tif (m_dst)\n+\t\trte_mempool_put(m_dst->pool, (void *)m_dst);\n+\n \treturn vc_req->vq;\n }\n \n@@ -1186,6 +1352,18 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,\n \t\tgoto error_exit;\n \t}\n \n+\tsnprintf(name, 127, \"WB_POOL_VM_%u\", (uint32_t)vid);\n+\tvcrypto->wb_pool = rte_mempool_create(name,\n+\t\t\tVHOST_CRYPTO_MBUF_POOL_SIZE,\n+\t\t\tsizeof(struct vhost_crypto_writeback_data),\n+\t\t\t128, 0, NULL, NULL, NULL, NULL,\n+\t\t\trte_socket_id(), 0);\n+\tif (!vcrypto->wb_pool) {\n+\t\tVC_LOG_ERR(\"Failed to creath mempool\");\n+\t\tret = -ENOMEM;\n+\t\tgoto error_exit;\n+\t}\n+\n \tdev->extern_data = vcrypto;\n \tdev->extern_ops.pre_msg_handle = NULL;\n \tdev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;\n@@ -1222,6 +1400,7 @@ rte_vhost_crypto_free(int vid)\n \n \trte_hash_free(vcrypto->session_map);\n \trte_mempool_free(vcrypto->mbuf_pool);\n+\trte_mempool_free(vcrypto->wb_pool);\n \trte_free(vcrypto);\n \n \tdev->extern_data = NULL;\n@@ -1257,11 +1436,30 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)\n \tif (vcrypto->option == (uint8_t)option)\n \t\treturn 0;\n \n-\tif (!(rte_mempool_full(vcrypto->mbuf_pool))) {\n+\tif (!(rte_mempool_full(vcrypto->mbuf_pool)) ||\n+\t\t\t!(rte_mempool_full(vcrypto->wb_pool))) {\n \t\tVC_LOG_ERR(\"Cannot update zero copy as mempool is not full\");\n \t\treturn -EINVAL;\n \t}\n \n+\tif (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {\n+\t\tchar name[128];\n+\n+\t\tsnprintf(name, 127, \"WB_POOL_VM_%u\", (uint32_t)vid);\n+\t\tvcrypto->wb_pool = rte_mempool_create(name,\n+\t\t\t\tVHOST_CRYPTO_MBUF_POOL_SIZE,\n+\t\t\t\tsizeof(struct vhost_crypto_writeback_data),\n+\t\t\t\t128, 0, NULL, NULL, NULL, NULL,\n+\t\t\t\trte_socket_id(), 0);\n+\t\tif (!vcrypto->wb_pool) {\n+\t\t\tVC_LOG_ERR(\"Failed to creath mbuf pool\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t} else {\n+\t\trte_mempool_free(vcrypto->wb_pool);\n+\t\tvcrypto->wb_pool = NULL;\n+\t}\n+\n \tvcrypto->option = (uint8_t)option;\n \n \treturn 0;\n@@ -1277,9 +1475,8 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,\n \tstruct vhost_virtqueue *vq;\n \tuint16_t avail_idx;\n \tuint16_t start_idx;\n-\tuint16_t required;\n \tuint16_t count;\n-\tuint16_t i;\n+\tuint16_t i = 0;\n \n \tif (unlikely(dev == NULL)) {\n \t\tVC_LOG_ERR(\"Invalid vid %i\", vid);\n@@ -1311,27 +1508,66 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,\n \t/* for zero copy, we need 2 empty mbufs for src and dst, otherwise\n \t * we need only 1 mbuf as src and dst\n \t */\n-\trequired = count * 2;\n-\tif (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs,\n-\t\t\trequired) < 0)) {\n-\t\tVC_LOG_ERR(\"Insufficient memory\");\n-\t\treturn -ENOMEM;\n-\t}\n+\tswitch (vcrypto->option) {\n+\tcase RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:\n+\t\tif (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,\n+\t\t\t\t(void **)mbufs, count * 2) < 0)) {\n+\t\t\tVC_LOG_ERR(\"Insufficient memory\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n \n-\tfor (i = 0; i < count; i++) {\n-\t\tuint16_t used_idx = (start_idx + i) & (vq->size - 1);\n-\t\tuint16_t desc_idx = vq->avail->ring[used_idx];\n-\t\tstruct vring_desc *head = &vq->desc[desc_idx];\n-\t\tstruct rte_crypto_op *op = ops[i];\n+\t\tfor (i = 0; i < count; i++) {\n+\t\t\tuint16_t used_idx = (start_idx + i) & (vq->size - 1);\n+\t\t\tuint16_t desc_idx = vq->avail->ring[used_idx];\n+\t\t\tstruct vring_desc *head = &vq->desc[desc_idx];\n+\t\t\tstruct rte_crypto_op *op = ops[i];\n \n-\t\top->sym->m_src = mbufs[i * 2];\n-\t\top->sym->m_dst = mbufs[i * 2 + 1];\n-\t\top->sym->m_src->data_off = 0;\n-\t\top->sym->m_dst->data_off = 0;\n+\t\t\top->sym->m_src = mbufs[i * 2];\n+\t\t\top->sym->m_dst = mbufs[i * 2 + 1];\n+\t\t\top->sym->m_src->data_off = 0;\n+\t\t\top->sym->m_dst->data_off = 0;\n+\n+\t\t\tif (unlikely(vhost_crypto_process_one_req(vcrypto, vq,\n+\t\t\t\t\top, head, desc_idx)) < 0)\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (unlikely(i < count))\n+\t\t\trte_mempool_put_bulk(vcrypto->mbuf_pool,\n+\t\t\t\t\t(void **)&mbufs[i * 2],\n+\t\t\t\t\t(count - i) * 2);\n+\n+\t\tbreak;\n+\n+\tcase RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:\n+\t\tif (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,\n+\t\t\t\t(void **)mbufs, count) < 0)) {\n+\t\t\tVC_LOG_ERR(\"Insufficient memory\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tfor (i = 0; i < count; i++) {\n+\t\t\tuint16_t used_idx = (start_idx + i) & (vq->size - 1);\n+\t\t\tuint16_t desc_idx = vq->avail->ring[used_idx];\n+\t\t\tstruct vring_desc *head = &vq->desc[desc_idx];\n+\t\t\tstruct rte_crypto_op *op = ops[i];\n+\n+\t\t\top->sym->m_src = mbufs[i];\n+\t\t\top->sym->m_dst = NULL;\n+\t\t\top->sym->m_src->data_off = 0;\n+\n+\t\t\tif (unlikely(vhost_crypto_process_one_req(vcrypto, vq,\n+\t\t\t\t\top, head, desc_idx)) < 0)\n+\t\t\t\tbreak;\n+\t\t}\n+\n+\t\tif (unlikely(i < count))\n+\t\t\trte_mempool_put_bulk(vcrypto->mbuf_pool,\n+\t\t\t\t\t(void **)&mbufs[i],\n+\t\t\t\t\tcount - i);\n+\n+\t\tbreak;\n \n-\t\tif (unlikely(vhost_crypto_process_one_req(vcrypto, vq, op, head,\n-\t\t\t\tdesc_idx)) < 0)\n-\t\t\tbreak;\n \t}\n \n \tvq->last_used_idx += i;\n",
    "prefixes": [
        "v2"
    ]
}