get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/1.0/patches/10683/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 10683,
    "url": "http://patchwork.dpdk.org/api/1.0/patches/10683/?format=api",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/1.0/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk"
    },
    "msgid": "<1455879670-31446-3-git-send-email-declan.doherty@intel.com>",
    "date": "2016-02-19T11:01:10",
    "name": "[dpdk-dev,v2,2/2] cryptodev: change burst API to be crypto op oriented",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "11c513b9d473341d6ca4151958b0246d1c2ddec3",
    "submitter": {
        "id": 11,
        "url": "http://patchwork.dpdk.org/api/1.0/people/11/?format=api",
        "name": "Doherty, Declan",
        "email": "declan.doherty@intel.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/1455879670-31446-3-git-send-email-declan.doherty@intel.com/mbox/",
    "series": [],
    "check": "pending",
    "checks": "http://patchwork.dpdk.org/api/patches/10683/checks/",
    "tags": {},
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id EE729C524;\n\tFri, 19 Feb 2016 12:03:55 +0100 (CET)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n\tby dpdk.org (Postfix) with ESMTP id 047C3C524\n\tfor <dev@dpdk.org>; Fri, 19 Feb 2016 12:03:53 +0100 (CET)",
            "from orsmga003.jf.intel.com ([10.7.209.27])\n\tby orsmga101.jf.intel.com with ESMTP; 19 Feb 2016 03:03:54 -0800",
            "from dwdohert-dpdk.ir.intel.com ([163.33.210.69])\n\tby orsmga003.jf.intel.com with ESMTP; 19 Feb 2016 03:03:51 -0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.22,470,1449561600\"; d=\"scan'208\";a=\"749309533\"",
        "From": "Declan Doherty <declan.doherty@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Fri, 19 Feb 2016 11:01:10 +0000",
        "Message-Id": "<1455879670-31446-3-git-send-email-declan.doherty@intel.com>",
        "X-Mailer": "git-send-email 2.5.0",
        "In-Reply-To": "<1455879670-31446-1-git-send-email-declan.doherty@intel.com>",
        "References": "<1454159235-5175-1-git-send-email-declan.doherty@intel.com>\n\t<1455879670-31446-1-git-send-email-declan.doherty@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 2/2] cryptodev: change burst API to be crypto\n\top oriented",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch modifies the crypto burst enqueue/dequeue APIs to operate on bursts\nrte_crypto_op's rather than the current implementation which operates on\nrte_mbuf bursts, this simplifies the burst processing in the crypto PMDs and the\nuse of crypto operations in general.\n\nThis change set also continues the separation of the symmetric operation parameters\nfrom the more general operation parameters, this will simplify the integration of\nasymmetric crypto operations in the future.\n\nAs well as the changes to the crypto APIs this patch adds functions for managing\nrte_crypto_op pools to the cryptodev API. It modifies the existing PMDs, unit\ntests and sample application to work with the modified APIs.\n\nFinally this change set removes the now unused rte_mbuf_offload library.\n\nSigned-off-by: Declan Doherty <declan.doherty@intel.com>\n---\n MAINTAINERS                                        |   4 -\n app/test/test_cryptodev.c                          | 800 +++++++++++----------\n app/test/test_cryptodev.h                          |   9 +-\n app/test/test_cryptodev_perf.c                     | 253 +++----\n config/common_bsdapp                               |   7 -\n config/common_linuxapp                             |  11 +-\n doc/api/doxy-api-index.md                          |   1 -\n drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c         | 171 +++--\n drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c     |  12 +-\n drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h |   2 +-\n drivers/crypto/qat/qat_crypto.c                    | 125 ++--\n drivers/crypto/qat/qat_crypto.h                    |  12 +-\n drivers/crypto/qat/rte_qat_cryptodev.c             |   4 +-\n examples/l2fwd-crypto/main.c                       | 264 ++++---\n lib/Makefile                                       |   1 -\n lib/librte_cryptodev/rte_crypto.h                  | 345 +++++++++\n lib/librte_cryptodev/rte_crypto_sym.h              | 377 +++++-----\n lib/librte_cryptodev/rte_cryptodev.c               |  74 ++\n lib/librte_cryptodev/rte_cryptodev.h               | 107 +--\n lib/librte_cryptodev/rte_cryptodev_version.map     |   1 +\n lib/librte_mbuf/rte_mbuf.h                         |   6 -\n lib/librte_mbuf_offload/Makefile                   |  52 --\n lib/librte_mbuf_offload/rte_mbuf_offload.c         | 100 ---\n lib/librte_mbuf_offload/rte_mbuf_offload.h         | 307 --------\n .../rte_mbuf_offload_version.map                   |   7 -\n 25 files changed, 1543 insertions(+), 1509 deletions(-)\n delete mode 100644 lib/librte_mbuf_offload/Makefile\n delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.c\n delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload.h\n delete mode 100644 lib/librte_mbuf_offload/rte_mbuf_offload_version.map",
    "diff": "diff --git a/MAINTAINERS b/MAINTAINERS\nindex 628bc05..8d84dda 100644\n--- a/MAINTAINERS\n+++ b/MAINTAINERS\n@@ -222,10 +222,6 @@ F: lib/librte_mbuf/\n F: doc/guides/prog_guide/mbuf_lib.rst\n F: app/test/test_mbuf.c\n \n-Packet buffer offload - EXPERIMENTAL\n-M: Declan Doherty <declan.doherty@intel.com>\n-F: lib/librte_mbuf_offload/\n-\n Ethernet API\n M: Thomas Monjalon <thomas.monjalon@6wind.com>\n F: lib/librte_ether/\ndiff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c\nindex 951b443..29e4b29 100644\n--- a/app/test/test_cryptodev.c\n+++ b/app/test/test_cryptodev.c\n@@ -35,7 +35,6 @@\n #include <rte_mbuf.h>\n #include <rte_malloc.h>\n #include <rte_memcpy.h>\n-#include <rte_mbuf_offload.h>\n \n #include <rte_crypto.h>\n #include <rte_cryptodev.h>\n@@ -48,7 +47,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;\n \n struct crypto_testsuite_params {\n \tstruct rte_mempool *mbuf_pool;\n-\tstruct rte_mempool *mbuf_ol_pool;\n+\tstruct rte_mempool *op_mpool;\n \tstruct rte_cryptodev_config conf;\n \tstruct rte_cryptodev_qp_conf qp_conf;\n \n@@ -62,8 +61,7 @@ struct crypto_unittest_params {\n \n \tstruct rte_cryptodev_sym_session *sess;\n \n-\tstruct rte_mbuf_offload *ol;\n-\tstruct rte_crypto_sym_op *op;\n+\tstruct rte_crypto_op *op;\n \n \tstruct rte_mbuf *obuf, *ibuf;\n \n@@ -104,7 +102,7 @@ setup_test_string(struct rte_mempool *mpool,\n \treturn m;\n }\n \n-#if HEX_DUMP\n+#ifdef HEX_DUMP\n static void\n hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)\n {\n@@ -112,27 +110,29 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)\n }\n #endif\n \n-static struct rte_mbuf *\n-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)\n+static struct rte_crypto_op *\n+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)\n {\n-\tstruct rte_mbuf *obuf = NULL;\n-#if HEX_DUMP\n+#ifdef HEX_DUMP\n \thexdump_mbuf_data(stdout, \"Enqueued Packet\", ibuf);\n #endif\n \n-\tif (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {\n+\tif (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {\n \t\tprintf(\"Error sending packet for encryption\");\n \t\treturn NULL;\n \t}\n-\twhile (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)\n+\n+\top = NULL;\n+\n+\twhile (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)\n \t\trte_pause();\n \n-#if HEX_DUMP\n+#ifdef HEX_DUMP\n \tif (obuf)\n \t\thexdump_mbuf_data(stdout, \"Dequeued Packet\", obuf);\n #endif\n \n-\treturn obuf;\n+\treturn op;\n }\n \n static struct crypto_testsuite_params testsuite_params = { NULL };\n@@ -162,13 +162,14 @@ testsuite_setup(void)\n \t\t}\n \t}\n \n-\tts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(\n-\t\t\t\"MBUF_OFFLOAD_POOL\",\n+\tts_params->op_mpool = rte_crypto_op_pool_create(\n+\t\t\t\"MBUF_CRYPTO_SYM_OP_POOL\",\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC,\n \t\t\tNUM_MBUFS, MBUF_CACHE_SIZE,\n \t\t\tDEFAULT_NUM_XFORMS *\n-\t\t\t\t\tsizeof(struct rte_crypto_sym_xform),\n+\t\t\tsizeof(struct rte_crypto_sym_xform),\n \t\t\trte_socket_id());\n-\tif (ts_params->mbuf_ol_pool == NULL) {\n+\tif (ts_params->op_mpool == NULL) {\n \t\tRTE_LOG(ERR, USER1, \"Can't create CRYPTO_OP_POOL\\n\");\n \t\treturn TEST_FAILED;\n \t}\n@@ -253,10 +254,9 @@ testsuite_teardown(void)\n \t\trte_mempool_count(ts_params->mbuf_pool));\n \t}\n \n-\n-\tif (ts_params->mbuf_ol_pool != NULL) {\n+\tif (ts_params->op_mpool != NULL) {\n \t\tRTE_LOG(DEBUG, USER1, \"CRYPTO_OP_POOL count %u\\n\",\n-\t\trte_mempool_count(ts_params->mbuf_ol_pool));\n+\t\trte_mempool_count(ts_params->op_mpool));\n \t}\n \n }\n@@ -326,8 +326,8 @@ ut_teardown(void)\n \t}\n \n \t/* free crypto operation structure */\n-\tif (ut_params->ol)\n-\t\trte_pktmbuf_offload_free(ut_params->ol);\n+\tif (ut_params->op)\n+\t\trte_crypto_op_free(ut_params->op);\n \n \t/*\n \t * free mbuf - both obuf and ibuf are usually the same,\n@@ -680,76 +680,77 @@ static uint8_t aes_cbc_iv[] = {\n /* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */\n \n static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {\n-\t0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,\n-\t0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,\n-\t0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,\n-\t0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,\n-\t0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,\n-\t0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,\n-\t0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,\n-\t0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,\n-\t0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,\n-\t0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,\n-\t0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,\n-\t0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,\n-\t0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,\n-\t0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,\n-\t0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,\n-\t0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,\n-\t0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,\n-\t0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,\n-\t0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,\n-\t0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,\n-\t0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,\n-\t0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,\n-\t0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,\n-\t0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,\n-\t0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,\n-\t0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,\n-\t0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,\n-\t0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,\n-\t0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,\n-\t0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,\n-\t0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,\n-\t0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,\n-\t0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,\n-\t0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,\n-\t0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,\n-\t0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,\n-\t0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,\n-\t0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,\n-\t0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,\n-\t0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,\n-\t0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,\n-\t0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,\n-\t0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,\n-\t0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,\n-\t0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,\n-\t0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,\n-\t0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,\n-\t0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,\n-\t0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,\n-\t0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,\n-\t0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,\n-\t0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,\n-\t0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,\n-\t0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,\n-\t0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,\n-\t0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,\n-\t0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,\n-\t0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,\n-\t0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,\n-\t0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,\n-\t0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,\n-\t0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,\n-\t0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,\n-\t0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C\n+\t0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,\n+\t0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,\n+\t0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,\n+\t0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,\n+\t0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,\n+\t0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,\n+\t0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,\n+\t0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,\n+\t0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,\n+\t0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,\n+\t0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,\n+\t0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,\n+\t0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,\n+\t0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,\n+\t0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,\n+\t0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,\n+\t0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,\n+\t0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,\n+\t0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,\n+\t0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,\n+\t0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,\n+\t0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,\n+\t0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,\n+\t0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,\n+\t0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,\n+\t0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,\n+\t0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,\n+\t0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,\n+\t0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,\n+\t0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,\n+\t0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,\n+\t0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,\n+\t0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,\n+\t0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,\n+\t0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,\n+\t0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,\n+\t0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,\n+\t0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,\n+\t0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,\n+\t0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,\n+\t0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,\n+\t0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,\n+\t0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,\n+\t0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,\n+\t0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,\n+\t0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,\n+\t0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,\n+\t0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,\n+\t0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,\n+\t0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,\n+\t0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,\n+\t0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,\n+\t0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,\n+\t0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,\n+\t0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,\n+\t0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,\n+\t0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,\n+\t0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,\n+\t0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,\n+\t0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,\n+\t0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,\n+\t0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,\n+\t0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,\n+\t0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C\n };\n \n static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {\n-\t0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,\n-\t0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,\n-\t0X18, 0X8c, 0X1d, 0X32 };\n+\t0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,\n+\t0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,\n+\t0x18, 0x8c, 0x1d, 0x32\n+};\n \n \n static int\n@@ -776,7 +777,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)\n \tut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;\n \n \t/* Setup HMAC Parameters */\n-\n \tut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;\n \tut_params->auth_xform.next = NULL;\n \n@@ -786,59 +786,65 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)\n \tut_params->auth_xform.auth.key.data = hmac_sha1_key;\n \tut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;\n \n-\t/* Create Crypto session*/\n-\tut_params->sess =\n-\t\trte_cryptodev_sym_session_create(ts_params->valid_devs[0],\n-\t\t\t\t\t\t&ut_params->cipher_xform);\n+\t/* Create crypto session*/\n+\tut_params->sess = rte_cryptodev_sym_session_create(\n+\t\t\tts_params->valid_devs[0],\n+\t\t\t&ut_params->cipher_xform);\n \tTEST_ASSERT_NOT_NULL(ut_params->sess, \"Session creation failed\");\n \n-\t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n+\t/* Generate crypto op data structure */\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n-\tut_params->op = &ut_params->ol->op.crypto;\n+\trte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);\n \n-\t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);\n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n \n-\tut_params->op->digest.data = ut_params->digest;\n-\tut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\t/* set crypto operationsource mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n+\n+\t/* Set crypto operation authentication parameters */\n+\tsym_op->auth.digest.data = ut_params->digest;\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, QUOTE_512_BYTES);\n-\tut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;\n \n-\tut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\t/* Set crypto operation cipher parameters */\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n+\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n \n \t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tTEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op), \"failed to process sym crypto op\");\n+\n+\tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t\t\t\"crypto op processing failed\");\n \n \t/* Validate obuf */\n-\tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n-\t\t\trte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC,\n+\tuint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,\n+\t\t\tuint8_t *, CIPHER_IV_LENGTH_AES_CBC);\n+\n+\tTEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,\n \t\t\tcatch_22_quote_2_512_bytes_AES_CBC_ciphertext,\n \t\t\tQUOTE_512_BYTES,\n-\t\t\t\"Ciphertext data not as expected\");\n+\t\t\t\"ciphertext data not as expected\");\n \n-\tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n-\t\t\trte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,\n+\tuint8_t *digest = ciphertext + QUOTE_512_BYTES;\n+\n+\tTEST_ASSERT_BUFFERS_ARE_EQUAL(digest,\n \t\t\tcatch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,\n \t\t\tgbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?\n \t\t\t\t\tTRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :\n@@ -863,60 +869,66 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)\n \tTEST_ASSERT_NOT_NULL(ut_params->digest, \"no room to append digest\");\n \n \t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n-\n-\tut_params->op = &ut_params->ol->op.crypto;\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n-\tTEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_sym_xforms(\n-\t\t\tut_params->ol, 2),\n+\tTEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),\n \t\t\t\"failed to allocate space for crypto transforms\");\n \n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n+\n+\t/* set crypto operation source mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n+\n \t/* Set crypto operation data parameters */\n-\tut_params->op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;\n+\tsym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;\n \n \t/* cipher parameters */\n-\tut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;\n-\tut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;\n-\tut_params->op->xform->cipher.key.data = aes_cbc_key;\n-\tut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;\n+\tsym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;\n+\tsym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;\n+\tsym_op->xform->cipher.key.data = aes_cbc_key;\n+\tsym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;\n \n \t/* hash parameters */\n-\tut_params->op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;\n+\tsym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;\n \n-\tut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;\n-\tut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;\n-\tut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;\n-\tut_params->op->xform->next->auth.key.data = hmac_sha1_key;\n-\tut_params->op->xform->next->auth.digest_length =\n+\tsym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;\n+\tsym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;\n+\tsym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;\n+\tsym_op->xform->next->auth.key.data = hmac_sha1_key;\n+\tsym_op->xform->next->auth.digest_length =\n \t\t\tDIGEST_BYTE_LENGTH_SHA1;\n \n-\tut_params->op->digest.data = ut_params->digest;\n-\tut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\tsym_op->auth.digest.data = ut_params->digest;\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, QUOTE_512_BYTES);\n-\tut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;\n \n-\tut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n+\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n+\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n \n \t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tTEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op), \"failed to process sym crypto op\");\n+\n+\tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t\t\t\"crypto op processing failed\");\n+\n+\tut_params->obuf = ut_params->op->sym->m_src;\n \n \t/* Validate obuf */\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n@@ -985,42 +997,48 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)\n \tTEST_ASSERT_NOT_NULL(ut_params->sess, \"Session creation failed\");\n \n \t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n-\tut_params->op = &ut_params->ol->op.crypto;\n+\t/* attach symmetric crypto session to crypto operations */\n+\trte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);\n \n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n \n-\t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);\n+\t/* set crypto operation source mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n \n-\tut_params->op->digest.data = ut_params->digest;\n-\tut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\tsym_op->auth.digest.data = ut_params->digest;\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, QUOTE_512_BYTES);\n-\tut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;\n \n-\tut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n+\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n \n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n \n \t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tTEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op), \"failed to process sym crypto op\");\n+\n+\tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t\t\t\"crypto op processing failed\");\n+\n+\tut_params->obuf = ut_params->op->sym->m_src;\n+\n \n \t/* Validate obuf */\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n@@ -1088,47 +1106,51 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)\n \tut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;\n \n \t/* Create Crypto session*/\n-\tut_params->sess =\n-\t\trte_cryptodev_sym_session_create(ts_params->valid_devs[0],\n-\t\t\t\t\t\t&ut_params->cipher_xform);\n+\tut_params->sess = rte_cryptodev_sym_session_create(\n+\t\t\tts_params->valid_devs[0],\n+\t\t\t&ut_params->cipher_xform);\n \tTEST_ASSERT_NOT_NULL(ut_params->sess, \"Session creation failed\");\n \n \t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n-\tut_params->op = &ut_params->ol->op.crypto;\n+\trte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);\n \n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n \n-\t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);\n+\t/* set crypto operation source mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n \n-\tut_params->op->digest.data = ut_params->digest;\n-\tut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\tsym_op->auth.digest.data = ut_params->digest;\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, QUOTE_512_BYTES);\n-\tut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;\n \n-\tut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n+\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n \n \t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tTEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op), \"failed to process sym crypto op\");\n+\n+\tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t\t\t\"crypto op processing failed\");\n+\n+\tut_params->obuf = ut_params->op->sym->m_src;\n \n \t/* Validate obuf */\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n@@ -1197,42 +1219,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)\n \tTEST_ASSERT_NOT_NULL(ut_params->sess, \"Session creation failed\");\n \n \t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n-\n-\tut_params->op = &ut_params->ol->op.crypto;\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n \n \t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);\n+\trte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);\n \n-\tut_params->op->digest.data = ut_params->digest;\n-\tut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n+\n+\t/* set crypto operation source mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n+\n+\tsym_op->auth.digest.data = ut_params->digest;\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, QUOTE_512_BYTES);\n-\tut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;\n \n-\tut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n+\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(\n \t\t\tut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n \n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n+\t/* Process crypto operation */\n+\tTEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op), \"failed to process sym crypto op\");\n \n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n+\tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t\t\t\"crypto op processing failed\");\n \n-\t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tut_params->obuf = ut_params->op->sym->m_src;\n \n \t/* Validate obuf */\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n@@ -1244,7 +1271,6 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)\n \tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n \t\t\t\"Digest verification failed\");\n \n-\n \treturn TEST_SUCCESS;\n }\n \n@@ -1312,43 +1338,46 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)\n \n \tTEST_ASSERT_NOT_NULL(ut_params->sess, \"Session creation failed\");\n \n-\n \t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n-\tut_params->op = &ut_params->ol->op.crypto;\n+\trte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);\n \n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n \n-\t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);\n+\t/* set crypto operation source mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n \n-\tut_params->op->digest.data = ut_params->digest;\n-\tut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\tsym_op->auth.digest.data = ut_params->digest;\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, QUOTE_512_BYTES);\n-\tut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;\n \n-\tut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n+\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n \n \t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tTEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op), \"failed to process sym crypto op\");\n+\n+\tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t\t\t\"crypto op processing failed\");\n+\n+\tut_params->obuf = ut_params->op->sym->m_src;\n \n \t/* Validate obuf */\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n@@ -1367,7 +1396,6 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)\n \t\t\t\t\tDIGEST_BYTE_LENGTH_SHA512,\n \t\t\t\"Generated digest data not as expected\");\n \n-\n \treturn TEST_SUCCESS;\n }\n \n@@ -1424,6 +1452,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(\n \tut_params->auth_xform.auth.key.data = hmac_sha512_key;\n \tut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;\n \tut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;\n+\n \treturn TEST_SUCCESS;\n }\n \n@@ -1448,43 +1477,46 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,\n \t\t\tDIGEST_BYTE_LENGTH_SHA512);\n \n \t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n-\tut_params->op = &ut_params->ol->op.crypto;\n+\trte_crypto_op_attach_sym_session(ut_params->op, sess);\n \n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n \n-\t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(ut_params->op, sess);\n+\t/* set crypto operation source mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n \n-\tut_params->op->digest.data = ut_params->digest;\n-\tut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\tsym_op->auth.digest.data = ut_params->digest;\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, QUOTE_512_BYTES);\n-\tut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;\n \n-\tut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n+\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(\n \t\t\tut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, 0);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n \n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n+\t/* Process crypto operation */\n+\tTEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op), \"failed to process sym crypto op\");\n \n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n+\tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t\t\t\"crypto op processing failed\");\n \n-\t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tut_params->obuf = ut_params->op->sym->m_src;\n \n \t/* Validate obuf */\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n@@ -1497,8 +1529,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,\n \tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n \t\t\t\"Digest verification failed\");\n \n-\n-\n \treturn TEST_SUCCESS;\n }\n \n@@ -1524,10 +1554,6 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)\n \tut_params->ibuf = setup_test_string(ts_params->mbuf_pool,\n \t\t\tcatch_22_quote, QUOTE_512_BYTES, 0);\n \n-\tut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,\n-\t\t\tDIGEST_BYTE_LENGTH_AES_XCBC);\n-\tTEST_ASSERT_NOT_NULL(ut_params->digest, \"no room to append digest\");\n-\n \t/* Setup Cipher Parameters */\n \tut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;\n \tut_params->cipher_xform.next = &ut_params->auth_xform;\n@@ -1548,56 +1574,75 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)\n \tut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;\n \n \t/* Create Crypto session*/\n-\tut_params->sess =\n-\t\trte_cryptodev_sym_session_create(ts_params->valid_devs[0],\n-\t\t\t\t\t\t&ut_params->cipher_xform);\n+\tut_params->sess = rte_cryptodev_sym_session_create(\n+\t\t\tts_params->valid_devs[0],\n+\t\t\t&ut_params->cipher_xform);\n \tTEST_ASSERT_NOT_NULL(ut_params->sess, \"Session creation failed\");\n \n \t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n-\tut_params->op = &ut_params->ol->op.crypto;\n+\trte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);\n \n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n \n-\t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);\n+\t/* set crypto operation source mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n \n-\tut_params->op->iv.data = (uint8_t *)\n-\t\trte_pktmbuf_prepend(ut_params->ibuf,\n-\t\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\t/* Set operation cipher parameters */\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(\n+\t\t\tsym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n+\n+\t/* Set operation authentication parameters */\n+\tsym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(\n+\t\t\tsym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\t\t\tsym_op->m_src,\n+\t\t\tCIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;\n+\n+\tmemset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);\n+\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n \n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n \n \t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tut_params->op = process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op, \"failed to process sym crypto op\");\n+\n+\tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t\t\t\"crypto op processing failed\");\n \n \t/* Validate obuf */\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n-\t\t\trte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC,\n+\t\t\trte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,\n+\t\t\t\t\tuint8_t *, CIPHER_IV_LENGTH_AES_CBC),\n \t\t\tcatch_22_quote_2_512_bytes_AES_CBC_ciphertext,\n \t\t\tQUOTE_512_BYTES,\n \t\t\t\"Ciphertext data not as expected\");\n+\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n-\t\t\trte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,\n+\t\t\trte_pktmbuf_mtod_offset(\n+\t\t\t\t\tut_params->op->sym->m_src, uint8_t *,\n+\t\t\t\t\tCIPHER_IV_LENGTH_AES_CBC +\n+\t\t\t\t\tQUOTE_512_BYTES),\n \t\t\tcatch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,\n-\t\t\tDIGEST_BYTE_LENGTH_AES_XCBC,\n+\t\t\tgbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?\n+\t\t\t\t\tTRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC :\n+\t\t\t\t\tDIGEST_BYTE_LENGTH_AES_XCBC,\n \t\t\t\"Generated digest data not as expected\");\n \n \treturn TEST_SUCCESS;\n@@ -1614,14 +1659,6 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)\n \t\t(const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,\n \t\tQUOTE_512_BYTES, 0);\n \n-\tut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,\n-\t\t\tDIGEST_BYTE_LENGTH_AES_XCBC);\n-\tTEST_ASSERT_NOT_NULL(ut_params->digest, \"no room to append digest\");\n-\n-\trte_memcpy(ut_params->digest,\n-\t\t\tcatch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,\n-\t\t\tDIGEST_BYTE_LENGTH_AES_XCBC);\n-\n \t/* Setup Cipher Parameters */\n \tut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;\n \tut_params->cipher_xform.next = NULL;\n@@ -1648,35 +1685,55 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)\n \tTEST_ASSERT_NOT_NULL(ut_params->sess, \"Session creation failed\");\n \n \t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n-\tut_params->op = &ut_params->ol->op.crypto;\n+\t/* Set crypto operation data parameters */\n+\trte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);\n \n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n \n-\t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);\n+\t/* set crypto operation source mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n \n-\tut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,\n-\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\tsym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(\n+\t\t\t\tut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);\n+\tTEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,\n+\t\t\t\"no room to append digest\");\n+\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\t\t\tut_params->ibuf, QUOTE_512_BYTES);\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;\n+\n+\trte_memcpy(sym_op->auth.digest.data,\n+\t\t\tcatch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,\n+\t\t\tDIGEST_BYTE_LENGTH_AES_XCBC);\n+\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n+\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(\n+\t\t\tut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n \n \t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tTEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op), \"failed to process sym crypto op\");\n+\n+\tTEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t\t\t\"crypto op processing failed\");\n+\n+\tut_params->obuf = ut_params->op->sym->m_src;\n \n \t/* Validate obuf */\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n@@ -1837,50 +1894,49 @@ test_not_in_place_crypto(void)\n \t\t\tDIGEST_BYTE_LENGTH_SHA512);\n \n \t/* Generate Crypto op data structure */\n-\tut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tTEST_ASSERT_NOT_NULL(ut_params->ol,\n-\t\t\t\"Failed to allocate pktmbuf offload\");\n-\n-\tut_params->op = &ut_params->ol->op.crypto;\n+\tut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\tTEST_ASSERT_NOT_NULL(ut_params->op,\n+\t\t\t\"Failed to allocate symmetric crypto operation struct\");\n \n \n \t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(ut_params->op, ut_params->sess);\n+\trte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);\n+\n+\tstruct rte_crypto_sym_op *sym_op = ut_params->op->sym;\n \n-\tut_params->op->digest.data = ut_params->digest;\n-\tut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(\n+\t/* set crypto operation source mbuf */\n+\tsym_op->m_src = ut_params->ibuf;\n+\tsym_op->m_dst = dst_m;\n+\n+\tsym_op->auth.digest.data = ut_params->digest;\n+\tsym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, QUOTE_512_BYTES);\n-\tut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;\n+\tsym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;\n+\n+\tsym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->auth.data.length = QUOTE_512_BYTES;\n \n-\tut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(\n+\n+\tsym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(\n \t\t\tut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);\n-\tut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(\n+\tsym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(\n \t\t\tut_params->ibuf, 0);\n-\tut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\trte_memcpy(ut_params->op->iv.data, aes_cbc_iv,\n+\trte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,\n \t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\tut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_cipher.length = QUOTE_512_BYTES;\n-\n-\tut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\tut_params->op->data.to_hash.length = QUOTE_512_BYTES;\n-\n-\tut_params->op->dst.m = dst_m;\n-\tut_params->op->dst.offset = 0;\n-\n-\trte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);\n+\tsym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\tsym_op->cipher.data.length = QUOTE_512_BYTES;\n \n \t/* Process crypto operation */\n-\tut_params->obuf = process_crypto_request(ts_params->valid_devs[0],\n-\t\t\tut_params->ibuf);\n-\tTEST_ASSERT_NOT_NULL(ut_params->obuf, \"failed to retrieve obuf\");\n+\tTEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],\n+\t\t\tut_params->op), \"failed to process op obuf\");\n \n \t/* Validate obuf */\n \tTEST_ASSERT_BUFFERS_ARE_EQUAL(\n-\t\t\trte_pktmbuf_mtod(ut_params->op->dst.m, char *),\n+\t\t\trte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),\n \t\t\tcatch_22_quote,\n \t\t\tQUOTE_512_BYTES,\n \t\t\t\"Plaintext data not as expected\");\ndiff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h\nindex 034393e..c84ba42 100644\n--- a/app/test/test_cryptodev.h\n+++ b/app/test/test_cryptodev.h\n@@ -32,8 +32,6 @@\n #ifndef TEST_CRYPTODEV_H_\n #define TEST_CRYPTODEV_H_\n \n-#define HEX_DUMP 0\n-\n #define FALSE                           0\n #define TRUE                            1\n \n@@ -47,8 +45,9 @@\n #define DEFAULT_NUM_XFORMS              (2)\n #define NUM_MBUFS                       (8191)\n #define MBUF_CACHE_SIZE                 (250)\n-#define MBUF_SIZE   (2048 + DIGEST_BYTE_LENGTH_SHA512 + \\\n-\t\t\t\tsizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)\n+#define MBUF_DATAPAYLOAD_SIZE\t\t(2048 + DIGEST_BYTE_LENGTH_SHA512)\n+#define MBUF_SIZE\t\t\t(sizeof(struct rte_mbuf) + \\\n+\t\tRTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)\n \n #define BYTE_LENGTH(x)\t\t\t\t(x/8)\n /* HASH DIGEST LENGTHS */\n@@ -62,7 +61,9 @@\n #define AES_XCBC_MAC_KEY_SZ\t\t\t(16)\n \n #define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1\t\t(12)\n+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224\t\t(14)\n #define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256\t\t(16)\n #define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512\t\t(32)\n+#define TRUNCATED_DIGEST_BYTE_LENGTH_AES_XCBC\t\t(12)\n \n #endif /* TEST_CRYPTODEV_H_ */\ndiff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c\nindex 42dd9bc..b3f4fd9 100644\n--- a/app/test/test_cryptodev_perf.c\n+++ b/app/test/test_cryptodev_perf.c\n@@ -32,7 +32,6 @@\n \n #include <rte_common.h>\n #include <rte_mbuf.h>\n-#include <rte_mbuf_offload.h>\n #include <rte_malloc.h>\n #include <rte_memcpy.h>\n \n@@ -50,7 +49,7 @@\n \n struct crypto_testsuite_params {\n \tstruct rte_mempool *mbuf_mp;\n-\tstruct rte_mempool *mbuf_ol_pool;\n+\tstruct rte_mempool *op_mpool;\n \n \tuint16_t nb_queue_pairs;\n \n@@ -68,8 +67,7 @@ struct crypto_unittest_params {\n \n \tstruct rte_cryptodev_sym_session *sess;\n \n-\tstruct rte_crypto_sym_op *op;\n-\tstruct rte_mbuf_offload *ol;\n+\tstruct rte_crypto_op *op;\n \n \tstruct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];\n \tstruct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];\n@@ -79,7 +77,7 @@ struct crypto_unittest_params {\n \n static struct rte_mbuf *\n setup_test_string(struct rte_mempool *mpool,\n-\t\tconst char *string, size_t len, uint8_t blocksize)\n+\t\tconst uint8_t *data, size_t len, uint8_t blocksize)\n {\n \tstruct rte_mbuf *m = rte_pktmbuf_alloc(mpool);\n \tsize_t t_len = len - (blocksize ? (len % blocksize) : 0);\n@@ -92,7 +90,7 @@ setup_test_string(struct rte_mempool *mpool,\n \t\t\treturn NULL;\n \t\t}\n \n-\t\trte_memcpy(dst, string, t_len);\n+\t\trte_memcpy(dst, (const void *)data, t_len);\n \t}\n \treturn m;\n }\n@@ -113,23 +111,24 @@ testsuite_setup(void)\n \tts_params->mbuf_mp = rte_mempool_lookup(\"CRYPTO_PERF_MBUFPOOL\");\n \tif (ts_params->mbuf_mp == NULL) {\n \t\t/* Not already created so create */\n-\t\tts_params->mbuf_mp = rte_mempool_create(\"CRYPTO_PERF_MBUFPOOL\", NUM_MBUFS,\n-\t\t\tMBUF_SIZE, MBUF_CACHE_SIZE,\n-\t\t\tsizeof(struct rte_pktmbuf_pool_private),\n-\t\t\trte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,\n-\t\t\trte_socket_id(), 0);\n+\t\tts_params->mbuf_mp = rte_pktmbuf_pool_create(\n+\t\t\t\t\"CRYPTO_PERF_MBUFPOOL\",\n+\t\t\t\tNUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,\n+\t\t\t\trte_socket_id());\n \t\tif (ts_params->mbuf_mp == NULL) {\n \t\t\tRTE_LOG(ERR, USER1, \"Can't create CRYPTO_PERF_MBUFPOOL\\n\");\n \t\t\treturn TEST_FAILED;\n \t\t}\n \t}\n \n-\tts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(\"CRYPTO_OP_POOL\",\n-\t\t\t\tNUM_MBUFS, MBUF_CACHE_SIZE,\n-\t\t\t\tDEFAULT_NUM_XFORMS *\n-\t\t\t\tsizeof(struct rte_crypto_sym_xform),\n-\t\t\t\trte_socket_id());\n-\t\tif (ts_params->mbuf_ol_pool == NULL) {\n+\n+\tts_params->op_mpool = rte_crypto_op_pool_create(\"CRYPTO_OP_POOL\",\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC,\n+\t\t\tNUM_MBUFS, MBUF_CACHE_SIZE,\n+\t\t\tDEFAULT_NUM_XFORMS *\n+\t\t\tsizeof(struct rte_crypto_sym_xform),\n+\t\t\trte_socket_id());\n+\t\tif (ts_params->op_mpool == NULL) {\n \t\t\tRTE_LOG(ERR, USER1, \"Can't create CRYPTO_OP_POOL\\n\");\n \t\t\treturn TEST_FAILED;\n \t\t}\n@@ -256,8 +255,8 @@ ut_teardown(void)\n \t\t\t\tut_params->sess);\n \n \t/* free crypto operation structure */\n-\tif (ut_params->ol)\n-\t\trte_pktmbuf_offload_free(ut_params->ol);\n+\tif (ut_params->op)\n+\t\trte_crypto_op_free(ut_params->op);\n \n \tfor (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {\n \t\tif (ut_params->obuf[i])\n@@ -1698,11 +1697,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {\n static int\n test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)\n {\n-\tuint32_t num_to_submit = 2048, max_outstanding_reqs = 512;\n-\tstruct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];\n+\tuint32_t num_to_submit = 4096;\n+\tstruct rte_crypto_op *c_ops[num_to_submit];\n+\tstruct rte_crypto_op *proc_ops[num_to_submit];\n \tuint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;\n \tuint32_t burst_sent, burst_received;\n-\tuint32_t b, burst_size, num_sent, num_received;\n+\tuint32_t i, burst_size, num_sent, num_received;\n \tstruct crypto_testsuite_params *ts_params = &testsuite_params;\n \tstruct crypto_unittest_params *ut_params = &unittest_params;\n \tstruct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;\n@@ -1739,46 +1739,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)\n \tTEST_ASSERT_NOT_NULL(ut_params->sess, \"Session creation failed\");\n \n \t/* Generate Crypto op data structure(s) */\n-\tfor (b = 0; b < num_to_submit ; b++) {\n-\t\ttx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,\n-\t\t\t(const char *)data_params[0].expected.ciphertext,\n+\tfor (i = 0; i < num_to_submit ; i++) {\n+\t\tstruct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,\n+\t\t\t\tdata_params[0].expected.ciphertext,\n \t\t\t\tdata_params[0].length, 0);\n-\t\tTEST_ASSERT_NOT_NULL(tx_mbufs[b], \"Failed to allocate tx_buf\");\n+\t\tTEST_ASSERT_NOT_NULL(m, \"Failed to allocate tx_buf\");\n \n-\t\tut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],\n+\t\tut_params->digest = (uint8_t *)rte_pktmbuf_append(m,\n \t\t\t\tDIGEST_BYTE_LENGTH_SHA256);\n-\t\tTEST_ASSERT_NOT_NULL(ut_params->digest, \"no room to append digest\");\n+\t\tTEST_ASSERT_NOT_NULL(ut_params->digest,\n+\t\t\t\t\"no room to append digest\");\n \n \t\trte_memcpy(ut_params->digest, data_params[0].expected.digest,\n \t\t\tDIGEST_BYTE_LENGTH_SHA256);\n \n-\t\tstruct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(\n-\t\t\tts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO_SYM);\n-\t\tTEST_ASSERT_NOT_NULL(ol, \"Failed to allocate pktmbuf offload\");\n \n-\t\tstruct rte_crypto_sym_op *cop = &ol->op.crypto;\n+\t\tstruct rte_crypto_op *op =\n+\t\t\t\trte_crypto_op_alloc(ts_params->op_mpool,\n+\t\t\t\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n \n-\t\trte_crypto_sym_op_attach_session(cop, ut_params->sess);\n+\t\trte_crypto_op_attach_sym_session(op, ut_params->sess);\n \n-\t\tcop->digest.data = ut_params->digest;\n-\t\tcop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],\n+\t\top->sym->auth.digest.data = ut_params->digest;\n+\t\top->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,\n \t\t\t\tdata_params[0].length);\n-\t\tcop->digest.length = DIGEST_BYTE_LENGTH_SHA256;\n+\t\top->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;\n+\n+\t\top->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\t\top->sym->auth.data.length = data_params[0].length;\n+\n \n-\t\tcop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],\n+\t\top->sym->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,\n \t\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n-\t\tcop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);\n-\t\tcop->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\t\top->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);\n+\t\top->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\t\trte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);\n+\t\trte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,\n+\t\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n \n-\t\tcop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\t\tcop->data.to_cipher.length = data_params[0].length;\n+\t\top->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\t\top->sym->cipher.data.length = data_params[0].length;\n \n-\t\tcop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\t\tcop->data.to_hash.length = data_params[0].length;\n+\t\top->sym->m_src = m;\n \n-\t\trte_pktmbuf_offload_attach(tx_mbufs[b], ol);\n+\t\tc_ops[i] = op;\n \t}\n \n \tprintf(\"\\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC \"\n@@ -1789,17 +1793,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)\n \tprintf(\"\\nDev No\\tQP No\\tNum Sent\\tNum Received\\tTx/Rx burst\");\n \tprintf(\"\\tRetries (Device Busy)\\tAverage IA cycle cost \"\n \t\t\t\"(assuming 0 retries)\");\n-\tfor (b = 2; b <= 128 ; b *= 2) {\n+\tfor (i = 2; i <= 128 ; i *= 2) {\n \t\tnum_sent = 0;\n \t\tnum_received = 0;\n \t\tretries = 0;\n \t\tfailed_polls = 0;\n-\t\tburst_size = b;\n+\t\tburst_size = i;\n \t\ttotal_cycles = 0;\n \t\twhile (num_sent < num_to_submit) {\n \t\t\tstart_cycles = rte_rdtsc_precise();\n-\t\t\tburst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,\n-\t\t\t\t\t&tx_mbufs[num_sent],\n+\t\t\tburst_sent = rte_cryptodev_enqueue_burst(dev_num,\n+\t\t\t\t\t0, &c_ops[num_sent],\n \t\t\t\t\t((num_to_submit-num_sent) < burst_size) ?\n \t\t\t\t\tnum_to_submit-num_sent : burst_size);\n \t\t\tif (burst_sent == 0)\n@@ -1814,9 +1818,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)\n \t\t\trte_delay_ms(1);\n \n \t\t\tstart_cycles = rte_rdtsc_precise();\n-\t\t\tburst_received =\n-\t\t\t\trte_cryptodev_dequeue_burst(dev_num,\n-\t\t\t\t\t\t0, rx_mbufs, burst_size);\n+\t\t\tburst_received = rte_cryptodev_dequeue_burst(\n+\t\t\t\t\tdev_num, 0, proc_ops, burst_size);\n \t\t\tif (burst_received == 0)\n \t\t\t\tfailed_polls++;\n \t\t\telse\n@@ -1824,15 +1827,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)\n \t\t\tend_cycles = rte_rdtsc_precise();\n \t\t\ttotal_cycles += end_cycles - start_cycles;\n \t\t}\n+\n \t\twhile (num_received != num_to_submit) {\n \t\t\tif (gbl_cryptodev_preftest_devtype ==\n-\t\t\t\t\t\tRTE_CRYPTODEV_AESNI_MB_PMD)\n-\t\t\t\trte_cryptodev_enqueue_burst(dev_num,\n-\t\t\t\t\t\t\t\t0, NULL, 0);\n+\t\t\t\t\tRTE_CRYPTODEV_AESNI_MB_PMD)\n+\t\t\t\trte_cryptodev_enqueue_burst(dev_num, 0,\n+\t\t\t\t\t\tNULL, 0);\n \n-\t\t\tburst_received =\n-\t\t\t\trte_cryptodev_dequeue_burst(dev_num,\n-\t\t\t\t\t\t0, rx_mbufs, burst_size);\n+\t\t\tburst_received = rte_cryptodev_dequeue_burst(\n+\t\t\t\t\tdev_num, 0, proc_ops, burst_size);\n \t\t\tif (burst_received == 0)\n \t\t\t\tfailed_polls++;\n \t\t\telse\n@@ -1846,16 +1849,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)\n \t}\n \tprintf(\"\\n\");\n \n-\tfor (b = 0; b < max_outstanding_reqs ; b++) {\n-\t\tstruct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;\n-\n-\t\tif (ol) {\n-\t\t\tdo {\n-\t\t\t\trte_pktmbuf_offload_free(ol);\n-\t\t\t\tol = ol->next;\n-\t\t\t} while (ol != NULL);\n-\t\t}\n-\t\trte_pktmbuf_free(tx_mbufs[b]);\n+\tfor (i = 0; i < num_to_submit ; i++) {\n+\t\trte_pktmbuf_free(c_ops[i]->sym->m_src);\n+\t\trte_crypto_op_free(c_ops[i]);\n \t}\n \treturn TEST_SUCCESS;\n }\n@@ -1865,11 +1861,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)\n {\n \tuint16_t index;\n \tuint32_t burst_sent, burst_received;\n-\tuint32_t b, num_sent, num_received, throughput;\n+\tuint32_t b, num_sent, num_received;\n \tuint64_t failed_polls, retries, start_cycles, end_cycles;\n \tconst uint64_t mhz = rte_get_tsc_hz()/1000000;\n-\tdouble mmps;\n-\tstruct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];\n+\tdouble throughput, mmps;\n+\n+\tstruct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];\n+\tstruct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];\n+\n \tstruct crypto_testsuite_params *ts_params = &testsuite_params;\n \tstruct crypto_unittest_params *ut_params = &unittest_params;\n \tstruct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;\n@@ -1908,7 +1907,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)\n \t\t\t\"AES128_CBC_SHA256_HMAC requests with a constant burst \"\n \t\t\t\"size of %u while varying payload sizes\", DEFAULT_BURST_SIZE);\n \tprintf(\"\\nDev No\\tQP No\\tReq Size(B)\\tNum Sent\\tNum Received\\t\"\n-\t\t\t\"Mrps\\tThoughput(Mbps)\");\n+\t\t\t\"Mrps\\tThoughput(Gbps)\");\n \tprintf(\"\\tRetries (Attempted a burst, but the device was busy)\");\n \tfor (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {\n \t\tnum_sent = 0;\n@@ -1918,63 +1917,70 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)\n \n \t\t/* Generate Crypto op data structure(s) */\n \t\tfor (b = 0; b < DEFAULT_BURST_SIZE ; b++) {\n-\t\t\ttx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,\n+\t\t\tstruct rte_mbuf *m = setup_test_string(\n+\t\t\t\t\tts_params->mbuf_mp,\n+\t\t\t\t\t(const uint8_t *)\n \t\t\t\t\tdata_params[index].plaintext,\n \t\t\t\t\tdata_params[index].length,\n \t\t\t\t\t0);\n \n-\t\t\tut_params->digest = (uint8_t *)rte_pktmbuf_append(\n-\t\t\t\ttx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);\n-\t\t\tTEST_ASSERT_NOT_NULL(ut_params->digest,\t\"no room to append digest\");\n+\t\t\tut_params->digest = (uint8_t *)rte_pktmbuf_append(m,\n+\t\t\t\t\tDIGEST_BYTE_LENGTH_SHA256);\n+\t\t\tTEST_ASSERT_NOT_NULL(ut_params->digest\n+\t\t\t\t\t, \"no room to append digest\");\n \n-\t\t\trte_memcpy(ut_params->digest, data_params[index].expected.digest,\n-\t\t\tDIGEST_BYTE_LENGTH_SHA256);\n+\t\t\trte_memcpy(ut_params->digest,\n+\t\t\t\t\tdata_params[index].expected.digest,\n+\t\t\t\t\tDIGEST_BYTE_LENGTH_SHA256);\n \n-\t\t\tstruct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(\n-\t\t\t\t\t\tts_params->mbuf_ol_pool,\n-\t\t\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\t\t\tTEST_ASSERT_NOT_NULL(ol, \"Failed to allocate pktmbuf offload\");\n+\t\t\tstruct rte_crypto_op *op = rte_crypto_op_alloc(\n+\t\t\t\t\tts_params->op_mpool,\n+\t\t\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n \n-\t\t\tstruct rte_crypto_sym_op *cop = &ol->op.crypto;\n+\t\t\trte_crypto_op_attach_sym_session(op, ut_params->sess);\n \n-\t\t\trte_crypto_sym_op_attach_session(cop, ut_params->sess);\n+\t\t\top->sym->auth.digest.data = ut_params->digest;\n+\t\t\top->sym->auth.digest.phys_addr =\n+\t\t\t\t\trte_pktmbuf_mtophys_offset(m,\n+\t\t\t\t\t\tdata_params[index].length);\n+\t\t\top->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;\n \n-\t\t\tcop->digest.data = ut_params->digest;\n-\t\t\tcop->digest.phys_addr = rte_pktmbuf_mtophys_offset(\n-\t\t\t\ttx_mbufs[b], data_params[index].length);\n-\t\t\tcop->digest.length = DIGEST_BYTE_LENGTH_SHA256;\n+\t\t\top->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\t\t\top->sym->auth.data.length = data_params[index].length;\n \n-\t\t\tcop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],\n+\t\t\top->sym->cipher.iv.data = (uint8_t *)\n+\t\t\t\t\trte_pktmbuf_prepend(m,\n+\t\t\t\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n+\t\t\top->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);\n+\t\t\top->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;\n+\n+\t\t\trte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,\n \t\t\t\t\tCIPHER_IV_LENGTH_AES_CBC);\n-\t\t\tcop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);\n-\t\t\tcop->iv.length = CIPHER_IV_LENGTH_AES_CBC;\n \n-\t\t\trte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);\n+\t\t\top->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;\n+\t\t\top->sym->cipher.data.length = data_params[index].length;\n \n-\t\t\tcop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\t\t\tcop->data.to_cipher.length = data_params[index].length;\n \n-\t\t\tcop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;\n-\t\t\tcop->data.to_hash.length = data_params[index].length;\n+\t\t\top->sym->m_src = m;\n \n-\t\t\trte_pktmbuf_offload_attach(tx_mbufs[b], ol);\n+\t\t\tc_ops[b] = op;\n \t\t}\n \t\tstart_cycles = rte_rdtsc_precise();\n \t\twhile (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {\n-\t\t\tburst_sent = rte_cryptodev_enqueue_burst(dev_num,\n-\t\t\t\t\t0, tx_mbufs,\n-\t\t\t\t\t((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent)\n-\t\t\t\t\t\t\t< DEFAULT_BURST_SIZE) ?\n-\t\t\t\t\tDEFAULT_NUM_REQS_TO_SUBMIT-num_sent :\n-\t\t\t\t\t\t\tDEFAULT_BURST_SIZE);\n+\t\t\tuint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -\n+\t\t\t\t\tnum_sent) < DEFAULT_BURST_SIZE ?\n+\t\t\t\t\t\tDEFAULT_NUM_REQS_TO_SUBMIT -\n+\t\t\t\t\t\tnum_sent : DEFAULT_BURST_SIZE;\n+\n+\t\t\tburst_sent = rte_cryptodev_enqueue_burst(\n+\t\t\t\t\tdev_num, 0, c_ops, burst_size);\n \t\t\tif (burst_sent == 0)\n \t\t\t\tretries++;\n \t\t\telse\n \t\t\t\tnum_sent += burst_sent;\n \n-\t\t\tburst_received =\n-\t\t\t\trte_cryptodev_dequeue_burst(dev_num,\n-\t\t\t\t\t0, rx_mbufs, DEFAULT_BURST_SIZE);\n+\t\t\tburst_received = rte_cryptodev_dequeue_burst(dev_num,\n+\t\t\t\t\t0, proc_ops, DEFAULT_BURST_SIZE);\n \t\t\tif (burst_received == 0)\n \t\t\t\tfailed_polls++;\n \t\t\telse\n@@ -1982,37 +1988,34 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)\n \t\t}\n \t\twhile (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {\n \t\t\tif (gbl_cryptodev_preftest_devtype ==\n-\t\t\t\t\t\tRTE_CRYPTODEV_AESNI_MB_PMD)\n-\t\t\t\trte_cryptodev_enqueue_burst(dev_num,\n-\t\t\t\t\t\t\t\t0, NULL, 0);\n+\t\t\t\t\tRTE_CRYPTODEV_AESNI_MB_PMD)\n+\t\t\t\trte_cryptodev_enqueue_burst(dev_num, 0,\n+\t\t\t\t\t\tNULL, 0);\n \n-\t\t\tburst_received =\n-\t\t\t\trte_cryptodev_dequeue_burst(dev_num, 0,\n-\t\t\t\t\t\trx_mbufs, DEFAULT_BURST_SIZE);\n+\t\t\tburst_received = rte_cryptodev_dequeue_burst(\n+\t\t\t\t\tdev_num, 0, proc_ops,\n+\t\t\t\t\tDEFAULT_BURST_SIZE);\n \t\t\tif (burst_received == 0)\n \t\t\t\tfailed_polls++;\n \t\t\telse\n \t\t\t\tnum_received += burst_received;\n \t\t}\n \t\tend_cycles = rte_rdtsc_precise();\n-\t\tmmps = (double)num_received*mhz/(end_cycles - start_cycles);\n-\t\tthroughput = mmps*data_params[index].length*8;\n+\t\tmmps = ((double)num_received * mhz) /\n+\t\t\t\t(end_cycles - start_cycles);\n+\t\tthroughput = (mmps * data_params[index].length * 8) / 1000;\n+\n \t\tprintf(\"\\n%u\\t%u\\t%u\\t\\t%u\\t%u\", dev_num, 0,\n-\t\t\tdata_params[index].length, num_sent, num_received);\n-\t\tprintf(\"\\t%.2f\\t%u\", mmps, throughput);\n+\t\t\t\tdata_params[index].length,\n+\t\t\t\tnum_sent, num_received);\n+\t\tprintf(\"\\t%.2f\\t%.2f\", mmps, throughput);\n \t\tprintf(\"\\t\\t%\"PRIu64, retries);\n \t\tfor (b = 0; b < DEFAULT_BURST_SIZE ; b++) {\n-\t\t\tstruct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;\n-\n-\t\t\tif (ol) {\n-\t\t\t\tdo {\n-\t\t\t\t\trte_pktmbuf_offload_free(ol);\n-\t\t\t\t\tol = ol->next;\n-\t\t\t\t} while (ol != NULL);\n-\t\t\t}\n-\t\t\trte_pktmbuf_free(tx_mbufs[b]);\n+\t\t\trte_pktmbuf_free(c_ops[b]->sym->m_src);\n+\t\t\trte_crypto_op_free(c_ops[b]);\n \t\t}\n \t}\n+\n \tprintf(\"\\n\");\n \treturn TEST_SUCCESS;\n }\ndiff --git a/config/common_bsdapp b/config/common_bsdapp\nindex 696382c..157f9aa 100644\n--- a/config/common_bsdapp\n+++ b/config/common_bsdapp\n@@ -357,13 +357,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y\n CONFIG_RTE_PKTMBUF_HEADROOM=128\n \n #\n-# Compile librte_mbuf_offload\n-# EXPERIMENTAL: API may change without prior notice\n-#\n-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y\n-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n\n-\n-#\n # Compile librte_timer\n #\n CONFIG_RTE_LIBRTE_TIMER=y\ndiff --git a/config/common_linuxapp b/config/common_linuxapp\nindex f1638db..e2dc636 100644\n--- a/config/common_linuxapp\n+++ b/config/common_linuxapp\n@@ -330,7 +330,7 @@ CONFIG_RTE_CRYPTODEV_NAME_LEN=64\n #\n # Compile PMD for QuickAssist based devices\n #\n-CONFIG_RTE_LIBRTE_PMD_QAT=n\n+CONFIG_RTE_LIBRTE_PMD_QAT=y\n CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_INIT=n\n CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_TX=n\n CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_RX=n\n@@ -344,7 +344,7 @@ CONFIG_RTE_QAT_PMD_MAX_NB_SESSIONS=2048\n #\n # Compile PMD for AESNI backed device\n #\n-CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n\n+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y\n CONFIG_RTE_LIBRTE_PMD_AESNI_MB_DEBUG=n\n CONFIG_RTE_AESNI_MB_PMD_MAX_NB_QUEUE_PAIRS=8\n CONFIG_RTE_AESNI_MB_PMD_MAX_NB_SESSIONS=2048\n@@ -373,13 +373,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y\n CONFIG_RTE_PKTMBUF_HEADROOM=128\n \n #\n-# Compile librte_mbuf_offload\n-# EXPERIMENTAL: API may change without prior notice\n-#\n-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y\n-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n\n-\n-#\n # Compile librte_timer\n #\n CONFIG_RTE_LIBRTE_TIMER=y\ndiff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md\nindex 7a91001..f626386 100644\n--- a/doc/api/doxy-api-index.md\n+++ b/doc/api/doxy-api-index.md\n@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:\n \n - **containers**:\n   [mbuf]               (@ref rte_mbuf.h),\n-  [mbuf_offload]       (@ref rte_mbuf_offload.h),\n   [ring]               (@ref rte_ring.h),\n   [distributor]        (@ref rte_distributor.h),\n   [reorder]            (@ref rte_reorder.h),\ndiff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c\nindex 61d93cd..5b26444 100644\n--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c\n+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c\n@@ -37,7 +37,6 @@\n #include <rte_dev.h>\n #include <rte_malloc.h>\n #include <rte_cpuflags.h>\n-#include <rte_mbuf_offload.h>\n \n #include \"rte_aesni_mb_pmd_private.h\"\n \n@@ -296,16 +295,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,\n \n /** Get multi buffer session */\n static struct aesni_mb_session *\n-get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)\n+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)\n {\n \tstruct aesni_mb_session *sess = NULL;\n \n-\tif (crypto_op->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {\n-\t\tif (unlikely(crypto_op->session->type !=\n+\tif (op->sym->type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {\n+\t\tif (unlikely(op->sym->session->type !=\n \t\t\t\tRTE_CRYPTODEV_AESNI_MB_PMD))\n \t\t\treturn NULL;\n \n-\t\tsess = (struct aesni_mb_session *)crypto_op->session->_private;\n+\t\tsess = (struct aesni_mb_session *)op->sym->session->_private;\n \t} else  {\n \t\tvoid *_sess = NULL;\n \n@@ -316,7 +315,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)\n \t\t\t((struct rte_cryptodev_sym_session *)_sess)->_private;\n \n \t\tif (unlikely(aesni_mb_set_session_parameters(qp->ops,\n-\t\t\t\tsess, crypto_op->xform) != 0)) {\n+\t\t\t\tsess, op->sym->xform) != 0)) {\n \t\t\trte_mempool_put(qp->sess_mp, _sess);\n \t\t\tsess = NULL;\n \t\t}\n@@ -338,12 +337,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_sym_op *crypto_op)\n  * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible\n  */\n static JOB_AES_HMAC *\n-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,\n-\t\tstruct rte_crypto_sym_op *c_op,\n+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,\n \t\tstruct aesni_mb_session *session)\n {\n \tJOB_AES_HMAC *job;\n \n+\tstruct rte_mbuf *m_src = op->sym->m_src, *m_dst;\n+\tuint16_t m_offset = 0;\n+\n \tjob = (*qp->ops->job.get_next)(&qp->mb_mgr);\n \tif (unlikely(job == NULL))\n \t\treturn job;\n@@ -372,49 +373,65 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,\n \t}\n \n \t/* Mutable crypto operation parameters */\n+\tif (op->sym->m_dst) {\n+\t\tm_src = m_dst = op->sym->m_dst;\n+\n+\t\t/* append space for output data to mbuf */\n+\t\tchar *odata = rte_pktmbuf_append(m_dst,\n+\t\t\t\trte_pktmbuf_data_len(op->sym->m_src));\n+\t\tif (odata == NULL)\n+\t\t\tMB_LOG_ERR(\"failed to allocate space in destination \"\n+\t\t\t\t\t\"mbuf for source data\");\n+\n+\t\tmemcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),\n+\t\t\t\trte_pktmbuf_data_len(op->sym->m_src));\n+\t} else {\n+\t\tm_dst = m_src;\n+\t\tm_offset = op->sym->cipher.data.offset;\n+\t}\n \n \t/* Set digest output location */\n \tif (job->cipher_direction == DECRYPT) {\n-\t\tjob->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,\n+\t\tjob->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,\n \t\t\t\tget_digest_byte_length(job->hash_alg));\n \n-\t\tif (job->auth_tag_output)\n-\t\t\tmemset(job->auth_tag_output, 0,\n-\t\t\t\tsizeof(get_digest_byte_length(job->hash_alg)));\n-\t\telse\n+\t\tif (job->auth_tag_output == NULL) {\n+\t\t\tMB_LOG_ERR(\"failed to allocate space in output mbuf \"\n+\t\t\t\t\t\"for temp digest\");\n \t\t\treturn NULL;\n+\t\t}\n+\n+\t\tmemset(job->auth_tag_output, 0,\n+\t\t\t\tsizeof(get_digest_byte_length(job->hash_alg)));\n+\n \t} else {\n-\t\tjob->auth_tag_output = c_op->digest.data;\n+\t\tjob->auth_tag_output = op->sym->auth.digest.data;\n \t}\n \n \t/*\n-\t * Multiple buffer library current only support returning a truncated\n+\t * Multi-buffer library current only support returning a truncated\n \t * digest length as specified in the relevant IPsec RFCs\n \t */\n \tjob->auth_tag_output_len_in_bytes =\n \t\t\tget_truncated_digest_byte_length(job->hash_alg);\n \n \t/* Set IV parameters */\n-\tjob->iv = c_op->iv.data;\n-\tjob->iv_len_in_bytes = c_op->iv.length;\n+\tjob->iv = op->sym->cipher.iv.data;\n+\tjob->iv_len_in_bytes = op->sym->cipher.iv.length;\n \n \t/* Data  Parameter */\n-\tjob->src = rte_pktmbuf_mtod(m, uint8_t *);\n-\tjob->dst = c_op->dst.m ?\n-\t\t\trte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +\n-\t\t\tc_op->dst.offset :\n-\t\t\trte_pktmbuf_mtod(m, uint8_t *) +\n-\t\t\tc_op->data.to_cipher.offset;\n+\tjob->src = rte_pktmbuf_mtod(m_src, uint8_t *);\n+\tjob->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);\n \n-\tjob->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;\n-\tjob->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;\n+\tjob->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;\n+\tjob->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;\n \n-\tjob->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;\n-\tjob->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;\n+\tjob->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;\n+\tjob->msg_len_to_hash_in_bytes = op->sym->auth.data.length;\n \n \t/* Set user data to be crypto operation data struct */\n-\tjob->user_data = m;\n-\tjob->user_data2 = c_op;\n+\tjob->user_data = op;\n+\tjob->user_data2 = m_dst;\n \n \treturn job;\n }\n@@ -429,43 +446,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,\n  * verification of supplied digest in the case of a HASH_CIPHER operation\n  * - Returns NULL on invalid job\n  */\n-static struct rte_mbuf *\n+static struct rte_crypto_op *\n post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)\n {\n-\tstruct rte_mbuf *m;\n-\tstruct rte_crypto_sym_op *c_op;\n+\tstruct rte_crypto_op *op =\n+\t\t\t(struct rte_crypto_op *)job->user_data;\n+\tstruct rte_mbuf *m_dst =\n+\t\t\t(struct rte_mbuf *)job->user_data2;\n \n-\tif (job->user_data == NULL)\n+\tif (op == NULL || m_dst == NULL)\n \t\treturn NULL;\n \n-\t/* handled retrieved job */\n-\tm = (struct rte_mbuf *)job->user_data;\n-\tc_op = (struct rte_crypto_sym_op *)job->user_data2;\n-\n \t/* set status as successful by default */\n-\tc_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n+\top->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n \n \t/* check if job has been processed  */\n \tif (unlikely(job->status != STS_COMPLETED)) {\n-\t\tc_op->status = RTE_CRYPTO_OP_STATUS_ERROR;\n-\t\treturn m;\n+\t\top->status = RTE_CRYPTO_OP_STATUS_ERROR;\n+\t\treturn op;\n \t} else if (job->chain_order == HASH_CIPHER) {\n \t\t/* Verify digest if required */\n-\t\tif (memcmp(job->auth_tag_output, c_op->digest.data,\n+\t\tif (memcmp(job->auth_tag_output, op->sym->auth.digest.data,\n \t\t\t\tjob->auth_tag_output_len_in_bytes) != 0)\n-\t\t\tc_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;\n+\t\t\top->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;\n \n \t\t/* trim area used for digest from mbuf */\n-\t\trte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));\n+\t\trte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));\n \t}\n \n \t/* Free session if a session-less crypto op */\n-\tif (c_op->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {\n-\t\trte_mempool_put(qp->sess_mp, c_op->session);\n-\t\tc_op->session = NULL;\n+\tif (op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {\n+\t\trte_mempool_put(qp->sess_mp, op->sym->session);\n+\t\top->sym->session = NULL;\n \t}\n \n-\treturn m;\n+\treturn op;\n }\n \n /**\n@@ -481,16 +496,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)\n static unsigned\n handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)\n {\n-\tstruct rte_mbuf *m = NULL;\n+\tstruct rte_crypto_op *op = NULL;\n \tunsigned processed_jobs = 0;\n \n \twhile (job) {\n \t\tprocessed_jobs++;\n-\t\tm = post_process_mb_job(qp, job);\n-\t\tif (m)\n-\t\t\trte_ring_enqueue(qp->processed_pkts, (void *)m);\n+\t\top = post_process_mb_job(qp, job);\n+\t\tif (op)\n+\t\t\trte_ring_enqueue(qp->processed_pkts, (void *)op);\n \t\telse\n-\t\t\tqp->qp_stats.dequeue_err_count++;\n+\t\t\tqp->stats.dequeue_err_count++;\n \n \t\tjob = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);\n \t}\n@@ -499,53 +514,53 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)\n }\n \n static uint16_t\n-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,\n-\t\tuint16_t nb_bufs)\n+aesni_mb_pmd_enqueue_burst(void *qp, struct rte_crypto_op **ops,\n+\t\tuint16_t nb_ops)\n {\n-\tstruct rte_mbuf_offload *ol;\n-\n \tstruct aesni_mb_session *sess;\n-\tstruct aesni_mb_qp *qp = queue_pair;\n+\tstruct aesni_mb_qp *_qp = qp;\n \n \tJOB_AES_HMAC *job = NULL;\n \n \tint i, processed_jobs = 0;\n \n-\tfor (i = 0; i < nb_bufs; i++) {\n-\t\tol = rte_pktmbuf_offload_get(bufs[i],\n-\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n-\t\tif (unlikely(ol == NULL)) {\n-\t\t\tqp->qp_stats.enqueue_err_count++;\n+\tfor (i = 0; i < nb_ops; i++) {\n+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG\n+\t\tif (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {\n+\t\t\tMB_LOG_ERR(\"PMD only supports symmetric crypto \"\n+\t\t\t\t\"operation requests, op (%p) is not a \"\n+\t\t\t\t\"symmetric operation.\", op);\n+\t\t\t_qp->stats.enqueue_err_count++;\n \t\t\tgoto flush_jobs;\n \t\t}\n-\n-\t\tsess = get_session(qp, &ol->op.crypto);\n+#endif\n+\t\tsess = get_session(_qp, ops[i]);\n \t\tif (unlikely(sess == NULL)) {\n-\t\t\tqp->qp_stats.enqueue_err_count++;\n+\t\t\t_qp->stats.enqueue_err_count++;\n \t\t\tgoto flush_jobs;\n \t\t}\n \n-\t\tjob = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);\n+\t\tjob = process_crypto_op(_qp, ops[i], sess);\n \t\tif (unlikely(job == NULL)) {\n-\t\t\tqp->qp_stats.enqueue_err_count++;\n+\t\t\t_qp->stats.enqueue_err_count++;\n \t\t\tgoto flush_jobs;\n \t\t}\n \n \t\t/* Submit Job */\n-\t\tjob = (*qp->ops->job.submit)(&qp->mb_mgr);\n+\t\tjob = (*_qp->ops->job.submit)(&_qp->mb_mgr);\n \n \t\t/*\n \t\t * If submit returns a processed job then handle it,\n \t\t * before submitting subsequent jobs\n \t\t */\n \t\tif (job)\n-\t\t\tprocessed_jobs += handle_completed_jobs(qp, job);\n+\t\t\tprocessed_jobs += handle_completed_jobs(_qp, job);\n \t}\n \n \tif (processed_jobs == 0)\n \t\tgoto flush_jobs;\n \telse\n-\t\tqp->qp_stats.enqueued_count += processed_jobs;\n+\t\t_qp->stats.enqueued_count += processed_jobs;\n \t\treturn i;\n \n flush_jobs:\n@@ -553,24 +568,24 @@ flush_jobs:\n \t * If we haven't processed any jobs in submit loop, then flush jobs\n \t * queue to stop the output stalling\n \t */\n-\tjob = (*qp->ops->job.flush_job)(&qp->mb_mgr);\n+\tjob = (*_qp->ops->job.flush_job)(&_qp->mb_mgr);\n \tif (job)\n-\t\tqp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);\n+\t\t_qp->stats.enqueued_count += handle_completed_jobs(_qp, job);\n \n \treturn i;\n }\n \n static uint16_t\n-aesni_mb_pmd_dequeue_burst(void *queue_pair,\n-\t\tstruct rte_mbuf **bufs,\tuint16_t nb_bufs)\n+aesni_mb_pmd_dequeue_burst(void *qp, struct rte_crypto_op **ops,\n+\t\tuint16_t nb_ops)\n {\n-\tstruct aesni_mb_qp *qp = queue_pair;\n+\tstruct aesni_mb_qp *_qp = qp;\n \n \tunsigned nb_dequeued;\n \n-\tnb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,\n-\t\t\t(void **)bufs, nb_bufs);\n-\tqp->qp_stats.dequeued_count += nb_dequeued;\n+\tnb_dequeued = rte_ring_dequeue_burst(_qp->processed_pkts,\n+\t\t\t\t\t(void **)ops, nb_ops);\n+\t_qp->stats.dequeued_count += nb_dequeued;\n \n \treturn nb_dequeued;\n }\ndiff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c\nindex 3cd9990..d56de12 100644\n--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c\n+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c\n@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,\n \tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {\n \t\tstruct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];\n \n-\t\tstats->enqueued_count += qp->qp_stats.enqueued_count;\n-\t\tstats->dequeued_count += qp->qp_stats.dequeued_count;\n+\t\tstats->enqueued_count += qp->stats.enqueued_count;\n+\t\tstats->dequeued_count += qp->stats.dequeued_count;\n \n-\t\tstats->enqueue_err_count += qp->qp_stats.enqueue_err_count;\n-\t\tstats->dequeue_err_count += qp->qp_stats.dequeue_err_count;\n+\t\tstats->enqueue_err_count += qp->stats.enqueue_err_count;\n+\t\tstats->dequeue_err_count += qp->stats.dequeue_err_count;\n \t}\n }\n \n@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)\n \tfor (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {\n \t\tstruct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];\n \n-\t\tmemset(&qp->qp_stats, 0, sizeof(qp->qp_stats));\n+\t\tmemset(&qp->stats, 0, sizeof(qp->stats));\n \t}\n }\n \n@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,\n \n \tqp->sess_mp = dev->data->session_pool;\n \n-\tmemset(&qp->qp_stats, 0, sizeof(qp->qp_stats));\n+\tmemset(&qp->stats, 0, sizeof(qp->stats));\n \n \t/* Initialise multi-buffer manager */\n \t(*qp->ops->job.init_mgr)(&qp->mb_mgr);\ndiff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h\nindex ab70c15..0aed177 100644\n--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h\n+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h\n@@ -150,7 +150,7 @@ struct aesni_mb_qp {\n \t/**< Ring for placing process packets */\n \tstruct rte_mempool *sess_mp;\n \t/**< Session Mempool */\n-\tstruct rte_cryptodev_stats qp_stats;\n+\tstruct rte_cryptodev_stats stats;\n \t/**< Queue pair statistics */\n } __rte_cache_aligned;\n \ndiff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c\nindex e7b9027..69162b1 100644\n--- a/drivers/crypto/qat/qat_crypto.c\n+++ b/drivers/crypto/qat/qat_crypto.c\n@@ -59,7 +59,6 @@\n #include <rte_mbuf.h>\n #include <rte_string_fns.h>\n #include <rte_spinlock.h>\n-#include <rte_mbuf_offload.h>\n #include <rte_hexdump.h>\n \n #include \"qat_logs.h\"\n@@ -72,7 +71,7 @@ static inline uint32_t\n adf_modulo(uint32_t data, uint32_t shift);\n \n static inline int\n-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);\n+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);\n \n void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,\n \t\tvoid *session)\n@@ -275,15 +274,16 @@ unsigned qat_crypto_sym_get_session_private_size(\n }\n \n \n-uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,\n-\t\tuint16_t nb_pkts)\n+uint16_t\n+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,\n+\t\tuint16_t nb_ops)\n {\n \tregister struct qat_queue *queue;\n \tstruct qat_qp *tmp_qp = (struct qat_qp *)qp;\n \tregister uint32_t nb_pkts_sent = 0;\n-\tregister struct rte_mbuf **cur_tx_pkt = tx_pkts;\n+\tregister struct rte_crypto_op **cur_op = ops;\n \tregister int ret;\n-\tuint16_t nb_pkts_possible = nb_pkts;\n+\tuint16_t nb_ops_possible = nb_ops;\n \tregister uint8_t *base_addr;\n \tregister uint32_t tail;\n \tint overflow;\n@@ -294,19 +294,17 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,\n \ttail = queue->tail;\n \n \t/* Find how many can actually fit on the ring */\n-\toverflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)\n+\toverflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)\n \t\t\t\t- queue->max_inflights;\n \tif (overflow > 0) {\n \t\trte_atomic16_sub(&tmp_qp->inflights16, overflow);\n-\t\tnb_pkts_possible = nb_pkts - overflow;\n-\t\tif (nb_pkts_possible == 0)\n+\t\tnb_ops_possible = nb_ops - overflow;\n+\t\tif (nb_ops_possible == 0)\n \t\t\treturn 0;\n \t}\n \n-\twhile (nb_pkts_sent != nb_pkts_possible) {\n-\n-\t\tret = qat_alg_write_mbuf_entry(*cur_tx_pkt,\n-\t\t\tbase_addr + tail);\n+\twhile (nb_pkts_sent != nb_ops_possible) {\n+\t\tret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);\n \t\tif (ret != 0) {\n \t\t\ttmp_qp->stats.enqueue_err_count++;\n \t\t\tif (nb_pkts_sent == 0)\n@@ -316,7 +314,7 @@ uint16_t qat_sym_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,\n \n \t\ttail = adf_modulo(tail + queue->msg_size, queue->modulo);\n \t\tnb_pkts_sent++;\n-\t\tcur_tx_pkt++;\n+\t\tcur_op++;\n \t}\n kick_tail:\n \tWRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,\n@@ -327,14 +325,13 @@ kick_tail:\n }\n \n uint16_t\n-qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,\n-\t\t\t\tuint16_t nb_pkts)\n+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,\n+\t\tuint16_t nb_ops)\n {\n-\tstruct rte_mbuf_offload *ol;\n \tstruct qat_queue *queue;\n \tstruct qat_qp *tmp_qp = (struct qat_qp *)qp;\n \tuint32_t msg_counter = 0;\n-\tstruct rte_mbuf *rx_mbuf;\n+\tstruct rte_crypto_op *rx_op;\n \tstruct icp_qat_fw_comn_resp *resp_msg;\n \n \tqueue = &(tmp_qp->rx_q);\n@@ -342,17 +339,20 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,\n \t\t\t((uint8_t *)queue->base_addr + queue->head);\n \n \twhile (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&\n-\t\t\tmsg_counter != nb_pkts) {\n-\t\trx_mbuf = (struct rte_mbuf *)(uintptr_t)(resp_msg->opaque_data);\n-\t\tol = rte_pktmbuf_offload_get(rx_mbuf,\n-\t\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n+\t\t\tmsg_counter != nb_ops) {\n+\t\trx_op = (struct rte_crypto_op *)(uintptr_t)\n+\t\t\t\t(resp_msg->opaque_data);\n+ \n+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX\n+\t\trte_hexdump(stdout, \"qat_response:\", (uint8_t *)resp_msg,\n+\t\t\t\tsizeof(struct icp_qat_fw_comn_resp));\n+#endif\n \t\tif (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=\n \t\t\t\tICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(\n \t\t\t\t\tresp_msg->comn_hdr.comn_status)) {\n-\t\t\tol->op.crypto.status =\n-\t\t\t\t\tRTE_CRYPTO_OP_STATUS_AUTH_FAILED;\n+\t\t\trx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;\n \t\t} else {\n-\t\t\tol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n+\t\t\trx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;\n \t\t}\n \t\t*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;\n \t\tqueue->head = adf_modulo(queue->head +\n@@ -361,9 +361,8 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,\n \t\tresp_msg = (struct icp_qat_fw_comn_resp *)\n \t\t\t\t\t((uint8_t *)queue->base_addr +\n \t\t\t\t\t\t\tqueue->head);\n-\n-\t\t*rx_pkts = rx_mbuf;\n-\t\trx_pkts++;\n+\t\t*ops = rx_op;\n+\t\tops++;\n \t\tmsg_counter++;\n \t}\n \tif (msg_counter > 0) {\n@@ -377,38 +376,36 @@ qat_sym_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts,\n }\n \n static inline int\n-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)\n-{\n-\tstruct rte_mbuf_offload *ol;\n-\n+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)\n+ {\n \tstruct qat_session *ctx;\n \tstruct icp_qat_fw_la_cipher_req_params *cipher_param;\n \tstruct icp_qat_fw_la_auth_req_params *auth_param;\n \tregister struct icp_qat_fw_la_bulk_req *qat_req;\n \n-\tol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO_SYM);\n-\tif (unlikely(ol == NULL)) {\n-\t\tPMD_DRV_LOG(ERR, \"No valid crypto off-load operation attached \"\n-\t\t\t\t\"to (%p) mbuf.\", mbuf);\n+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX\n+\tif (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {\n+\t\tPMD_DRV_LOG(ERR, \"QAT PMD only supports symmetric crypto \"\n+\t\t\t\t\"operation requests, op (%p) is not a \"\n+\t\t\t\t\"symmetric operation.\", op);\n \t\treturn -EINVAL;\n \t}\n-\n-\tif (unlikely(ol->op.crypto.type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {\n+#endif\n+\tif (unlikely(op->sym->type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {\n \t\tPMD_DRV_LOG(ERR, \"QAT PMD only supports session oriented\"\n-\t\t\t\t\" requests mbuf (%p) is sessionless.\", mbuf);\n+\t\t\t\t\" requests, op (%p) is sessionless.\", op);\n \t\treturn -EINVAL;\n \t}\n \n-\tif (unlikely(ol->op.crypto.session->type\n-\t\t\t\t\t!= RTE_CRYPTODEV_QAT_SYM_PMD)) {\n+\tif (unlikely(op->sym->session->type != RTE_CRYPTODEV_QAT_SYM_PMD)) {\n \t\tPMD_DRV_LOG(ERR, \"Session was not created for this device\");\n \t\treturn -EINVAL;\n \t}\n \n-\tctx = (struct qat_session *)ol->op.crypto.session->_private;\n+\tctx = (struct qat_session *)op->sym->session->_private;\n \tqat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;\n \t*qat_req = ctx->fw_req;\n-\tqat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)mbuf;\n+\tqat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;\n \n \t/*\n \t * The following code assumes:\n@@ -416,37 +413,37 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)\n \t * - always in place.\n \t */\n \tqat_req->comn_mid.dst_length =\n-\t\t\tqat_req->comn_mid.src_length = mbuf->data_len;\n+\t\t\tqat_req->comn_mid.src_length =\n+\t\t\t\t\trte_pktmbuf_data_len(op->sym->m_src);\n \tqat_req->comn_mid.dest_data_addr =\n \t\t\tqat_req->comn_mid.src_data_addr =\n-\t\t\t\t\trte_pktmbuf_mtophys(mbuf);\n-\n+\t\t\t\t\trte_pktmbuf_mtophys(op->sym->m_src);\n \tcipher_param = (void *)&qat_req->serv_specif_rqpars;\n \tauth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));\n \n-\tcipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;\n-\tcipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;\n-\tif (ol->op.crypto.iv.length &&\n-\t\t(ol->op.crypto.iv.length <=\n-\t\t\t\tsizeof(cipher_param->u.cipher_IV_array))) {\n+\tcipher_param->cipher_length = op->sym->cipher.data.length;\n+\tcipher_param->cipher_offset = op->sym->cipher.data.offset;\n+\tif (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=\n+\t\t\tsizeof(cipher_param->u.cipher_IV_array))) {\n \t\trte_memcpy(cipher_param->u.cipher_IV_array,\n-\t\t\t\tol->op.crypto.iv.data, ol->op.crypto.iv.length);\n+\t\t\t\top->sym->cipher.iv.data,\n+\t\t\t\top->sym->cipher.iv.length);\n \t} else {\n \t\tICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(\n \t\t\t\tqat_req->comn_hdr.serv_specif_flags,\n \t\t\t\tICP_QAT_FW_CIPH_IV_64BIT_PTR);\n-\t\tcipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;\n+\t\tcipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;\n \t}\n-\tif (ol->op.crypto.digest.phys_addr) {\n+\tif (op->sym->auth.digest.phys_addr) {\n \t\tICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(\n \t\t\t\tqat_req->comn_hdr.serv_specif_flags,\n \t\t\t\tICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);\n-\t\tauth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;\n+\t\tauth_param->auth_res_addr = op->sym->auth.digest.phys_addr;\n \t}\n-\tauth_param->auth_off = ol->op.crypto.data.to_hash.offset;\n-\tauth_param->auth_len = ol->op.crypto.data.to_hash.length;\n-\tauth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;\n+\tauth_param->auth_off = op->sym->auth.data.offset;\n+\tauth_param->auth_len = op->sym->auth.data.length;\n \n+\tauth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;\n \t/* (GCM) aad length(240 max) will be at this location after precompute */\n \tif (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||\n \t\tctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {\n@@ -457,9 +454,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)\n \t}\n \tauth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;\n \n-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER\n+\n+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX\n \trte_hexdump(stdout, \"qat_req:\", qat_req,\n \t\t\tsizeof(struct icp_qat_fw_la_bulk_req));\n+\trte_hexdump(stdout, \"src_data:\",\n+\t\t\trte_pktmbuf_mtod(op->sym->m_src, uint8_t*),\n+\t\t\trte_pktmbuf_data_len(op->sym->m_src));\n+\trte_hexdump(stdout, \"iv:\", op->sym->cipher.iv.data,\n+\t\t\top->sym->cipher.iv.length);\n+\trte_hexdump(stdout, \"digest:\", op->sym->auth.digest.data,\n+\t\t\top->sym->auth.digest.length);\n+\trte_hexdump(stdout, \"aad:\", op->sym->auth.aad.data,\n+\t\t\top->sym->auth.aad.length);\n #endif\n \treturn 0;\n }\ndiff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h\nindex e9f71fe..9323383 100644\n--- a/drivers/crypto/qat/qat_crypto.h\n+++ b/drivers/crypto/qat/qat_crypto.h\n@@ -115,12 +115,12 @@ extern void\n qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);\n \n \n-uint16_t\n-qat_sym_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts,\n-\t\tuint16_t nb_pkts);\n+extern uint16_t\n+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,\n+\t\tuint16_t nb_ops);\n \n-uint16_t\n-qat_sym_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts,\n-\t\tuint16_t nb_pkts);\n+extern uint16_t\n+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,\n+\t\tuint16_t nb_ops);\n \n #endif /* _QAT_CRYPTO_H_ */\ndiff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c\nindex 85700fc..5e51aca 100644\n--- a/drivers/crypto/qat/rte_qat_cryptodev.c\n+++ b/drivers/crypto/qat/rte_qat_cryptodev.c\n@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_\n \tcryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;\n \tcryptodev->dev_ops = &crypto_qat_ops;\n \n-\tcryptodev->enqueue_burst = qat_sym_crypto_pkt_tx_burst;\n-\tcryptodev->dequeue_burst = qat_sym_crypto_pkt_rx_burst;\n+\tcryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;\n+\tcryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;\n \n \n \tinternals = cryptodev->data->dev_private;\ndiff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c\nindex 93de786..9a9dd55 100644\n--- a/examples/l2fwd-crypto/main.c\n+++ b/examples/l2fwd-crypto/main.c\n@@ -62,7 +62,6 @@\n #include <rte_log.h>\n #include <rte_malloc.h>\n #include <rte_mbuf.h>\n-#include <rte_mbuf_offload.h>\n #include <rte_memcpy.h>\n #include <rte_memory.h>\n #include <rte_mempool.h>\n@@ -85,6 +84,7 @@\n  */\n #define RTE_TEST_RX_DESC_DEFAULT 128\n #define RTE_TEST_TX_DESC_DEFAULT 512\n+\n static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;\n static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;\n \n@@ -104,6 +104,11 @@ struct pkt_buffer {\n \tstruct rte_mbuf *buffer[MAX_PKT_BURST];\n };\n \n+struct op_buffer {\n+\tunsigned len;\n+\tstruct rte_crypto_op *buffer[MAX_PKT_BURST];\n+};\n+\n #define MAX_RX_QUEUE_PER_LCORE 16\n #define MAX_TX_QUEUE_PER_PORT 16\n \n@@ -112,6 +117,12 @@ enum l2fwd_crypto_xform_chain {\n \tL2FWD_CRYPTO_HASH_CIPHER\n };\n \n+struct l2fwd_key {\n+\tuint8_t *data;\n+\tuint32_t length;\n+\tphys_addr_t phys_addr;\n+};\n+\n /** l2fwd crypto application command line options */\n struct l2fwd_crypto_options {\n \tunsigned portmask;\n@@ -127,7 +138,7 @@ struct l2fwd_crypto_options {\n \tstruct rte_crypto_sym_xform cipher_xform;\n \tuint8_t ckey_data[32];\n \n-\tstruct rte_crypto_sym_key iv_key;\n+\tstruct l2fwd_key iv_key;\n \tuint8_t ivkey_data[16];\n \n \tstruct rte_crypto_sym_xform auth_xform;\n@@ -142,10 +153,7 @@ struct l2fwd_crypto_params {\n \tunsigned digest_length;\n \tunsigned block_size;\n \n-\tstruct {\n-\t\tuint8_t *data;\n-\t\tuint16_t length;\n-\t} iv;\n+\tstruct l2fwd_key iv_key;\n \tstruct rte_cryptodev_sym_session *session;\n };\n \n@@ -157,14 +165,16 @@ struct lcore_queue_conf {\n \tunsigned nb_crypto_devs;\n \tunsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];\n \n-\tstruct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];\n-\tstruct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];\n+\tstruct op_buffer op_buf[RTE_MAX_ETHPORTS];\n+\tstruct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];\n } __rte_cache_aligned;\n \n struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];\n \n static const struct rte_eth_conf port_conf = {\n \t.rxmode = {\n+\t\t.mq_mode = ETH_MQ_RX_NONE,\n+\t\t.max_rx_pkt_len = ETHER_MAX_LEN,\n \t\t.split_hdr_size = 0,\n \t\t.header_split   = 0, /**< Header Split disabled */\n \t\t.hw_ip_checksum = 0, /**< IP checksum offload disabled */\n@@ -178,7 +188,7 @@ static const struct rte_eth_conf port_conf = {\n };\n \n struct rte_mempool *l2fwd_pktmbuf_pool;\n-struct rte_mempool *l2fwd_mbuf_ol_pool;\n+struct rte_mempool *l2fwd_crypto_op_pool;\n \n /* Per-port statistics struct */\n struct l2fwd_port_statistics {\n@@ -203,7 +213,7 @@ struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];\n \n /* A tsc-based timer responsible for triggering statistics printout */\n #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */\n-#define MAX_TIMER_PERIOD 86400 /* 1 day max */\n+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */\n \n /* default period is 10 seconds */\n static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;\n@@ -292,20 +302,21 @@ static int\n l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,\n \t\tstruct l2fwd_crypto_params *cparams)\n {\n-\tstruct rte_mbuf **pkt_buffer;\n+\tstruct rte_crypto_op **op_buffer;\n \tunsigned ret;\n \n-\tpkt_buffer = (struct rte_mbuf **)\n-\t\t\tqconf->crypto_pkt_buf[cparams->dev_id].buffer;\n+\top_buffer = (struct rte_crypto_op **)\n+\t\t\tqconf->op_buf[cparams->dev_id].buffer;\n+\n+\tret = rte_cryptodev_enqueue_burst(cparams->dev_id,\n+\t\t\tcparams->qp_id,\top_buffer, (uint16_t) n);\n \n-\tret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,\n-\t\t\tpkt_buffer, (uint16_t) n);\n \tcrypto_statistics[cparams->dev_id].enqueued += ret;\n \tif (unlikely(ret < n)) {\n \t\tcrypto_statistics[cparams->dev_id].errors += (n - ret);\n \t\tdo {\n-\t\t\trte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);\n-\t\t\trte_pktmbuf_free(pkt_buffer[ret]);\n+\t\t\trte_pktmbuf_free(op_buffer[ret]->sym->m_src);\n+\t\t\trte_crypto_op_free(op_buffer[ret]);\n \t\t} while (++ret < n);\n \t}\n \n@@ -313,7 +324,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,\n }\n \n static int\n-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)\n+l2fwd_crypto_enqueue(struct rte_crypto_op *op,\n+\t\tstruct l2fwd_crypto_params *cparams)\n {\n \tunsigned lcore_id, len;\n \tstruct lcore_queue_conf *qconf;\n@@ -321,23 +333,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)\n \tlcore_id = rte_lcore_id();\n \n \tqconf = &lcore_queue_conf[lcore_id];\n-\tlen = qconf->crypto_pkt_buf[cparams->dev_id].len;\n-\tqconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;\n+\tlen = qconf->op_buf[cparams->dev_id].len;\n+\tqconf->op_buf[cparams->dev_id].buffer[len] = op;\n \tlen++;\n \n-\t/* enough pkts to be sent */\n+\t/* enough ops to be sent */\n \tif (len == MAX_PKT_BURST) {\n \t\tl2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);\n \t\tlen = 0;\n \t}\n \n-\tqconf->crypto_pkt_buf[cparams->dev_id].len = len;\n+\tqconf->op_buf[cparams->dev_id].len = len;\n \treturn 0;\n }\n \n static int\n l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,\n-\t\tstruct rte_mbuf_offload *ol,\n+\t\tstruct rte_crypto_op *op,\n \t\tstruct l2fwd_crypto_params *cparams)\n {\n \tstruct ether_hdr *eth_hdr;\n@@ -375,43 +387,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,\n \t}\n \n \t/* Set crypto operation data parameters */\n-\trte_crypto_sym_op_attach_session(&ol->op.crypto, cparams->session);\n+\trte_crypto_op_attach_sym_session(op, cparams->session);\n \n \t/* Append space for digest to end of packet */\n-\tol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,\n+\top->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,\n \t\t\tcparams->digest_length);\n-\tol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,\n+\top->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,\n \t\t\trte_pktmbuf_pkt_len(m) - cparams->digest_length);\n-\tol->op.crypto.digest.length = cparams->digest_length;\n+\top->sym->auth.digest.length = cparams->digest_length;\n+\n+\top->sym->auth.data.offset = ipdata_offset;\n+\top->sym->auth.data.length = data_len;\n \n-\tol->op.crypto.iv.data = cparams->iv_key.data;\n-\tol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;\n-\tol->op.crypto.iv.length = cparams->iv_key.length;\n \n-\tol->op.crypto.data.to_cipher.offset = ipdata_offset;\n-\tol->op.crypto.data.to_cipher.length = data_len;\n+\top->sym->cipher.iv.data = cparams->iv_key.data;\n+\top->sym->cipher.iv.phys_addr = cparams->iv_key.phys_addr;\n+\top->sym->cipher.iv.length = cparams->iv_key.length;\n \n-\tol->op.crypto.data.to_hash.offset = ipdata_offset;\n-\tol->op.crypto.data.to_hash.length = data_len;\n+\top->sym->cipher.data.offset = ipdata_offset;\n+\top->sym->cipher.data.length = data_len;\n \n-\trte_pktmbuf_offload_attach(m, ol);\n+\top->sym->m_src = m;\n \n-\treturn l2fwd_crypto_enqueue(m, cparams);\n+\treturn l2fwd_crypto_enqueue(op, cparams);\n }\n \n \n /* Send the burst of packets on an output interface */\n static int\n-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)\n+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,\n+\t\tuint8_t port)\n {\n \tstruct rte_mbuf **pkt_buffer;\n \tunsigned ret;\n-\tunsigned queueid = 0;\n \n-\tpkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;\n+\tpkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;\n \n-\tret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,\n-\t\t\t(uint16_t)n);\n+\tret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);\n \tport_statistics[port].tx += ret;\n \tif (unlikely(ret < n)) {\n \t\tport_statistics[port].dropped += (n - ret);\n@@ -433,8 +445,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)\n \tlcore_id = rte_lcore_id();\n \n \tqconf = &lcore_queue_conf[lcore_id];\n-\tlen = qconf->tx_pkt_buf[port].len;\n-\tqconf->tx_pkt_buf[port].buffer[len] = m;\n+\tlen = qconf->pkt_buf[port].len;\n+\tqconf->pkt_buf[port].buffer[len] = m;\n \tlen++;\n \n \t/* enough pkts to be sent */\n@@ -443,7 +455,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)\n \t\tlen = 0;\n \t}\n \n-\tqconf->tx_pkt_buf[port].len = len;\n+\tqconf->pkt_buf[port].len = len;\n \treturn 0;\n }\n \n@@ -503,6 +515,8 @@ static void\n l2fwd_main_loop(struct l2fwd_crypto_options *options)\n {\n \tstruct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];\n+\tstruct rte_crypto_op *ops_burst[MAX_PKT_BURST];\n+\n \tunsigned lcore_id = rte_lcore_id();\n \tuint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;\n \tunsigned i, j, portid, nb_rx;\n@@ -563,12 +577,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)\n \t\tif (unlikely(diff_tsc > drain_tsc)) {\n \n \t\t\tfor (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {\n-\t\t\t\tif (qconf->tx_pkt_buf[portid].len == 0)\n+\t\t\t\tif (qconf->pkt_buf[portid].len == 0)\n \t\t\t\t\tcontinue;\n \t\t\t\tl2fwd_send_burst(&lcore_queue_conf[lcore_id],\n-\t\t\t\t\t\t qconf->tx_pkt_buf[portid].len,\n+\t\t\t\t\t\t qconf->pkt_buf[portid].len,\n \t\t\t\t\t\t (uint8_t) portid);\n-\t\t\t\tqconf->tx_pkt_buf[portid].len = 0;\n+\t\t\t\tqconf->pkt_buf[portid].len = 0;\n \t\t\t}\n \n \t\t\t/* if timer is enabled */\n@@ -597,7 +611,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)\n \t\t * Read packet from RX queues\n \t\t */\n \t\tfor (i = 0; i < qconf->nb_rx_ports; i++) {\n-\t\t\tstruct rte_mbuf_offload *ol;\n+\t\t\tstruct rte_crypto_op *op;\n \n \t\t\tportid = qconf->rx_port_list[i];\n \n@@ -611,15 +625,14 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)\n \t\t\t/* Enqueue packets from Crypto device*/\n \t\t\tfor (j = 0; j < nb_rx; j++) {\n \t\t\t\tm = pkts_burst[j];\n-\t\t\t\tol = rte_pktmbuf_offload_alloc(\n-\t\t\t\t\t\tl2fwd_mbuf_ol_pool,\n-\t\t\t\t\t\tRTE_PKTMBUF_OL_CRYPTO_SYM);\n+\t\t\t\top = rte_crypto_op_alloc(l2fwd_crypto_op_pool,\n+\t\t\t\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC);\n \t\t\t\t/*\n-\t\t\t\t * If we can't allocate a offload, then drop\n+\t\t\t\t * If we can't allocate a crypto_op, then drop\n \t\t\t\t * the rest of the burst and dequeue and\n \t\t\t\t * process the packets to free offload structs\n \t\t\t\t */\n-\t\t\t\tif (unlikely(ol == NULL)) {\n+\t\t\t\tif (unlikely(op == NULL)) {\n \t\t\t\t\tfor (; j < nb_rx; j++) {\n \t\t\t\t\t\trte_pktmbuf_free(pkts_burst[j]);\n \t\t\t\t\t\tport_statistics[portid].dropped++;\n@@ -628,24 +641,31 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)\n \t\t\t\t}\n \n \t\t\t\trte_prefetch0(rte_pktmbuf_mtod(m, void *));\n-\t\t\t\trte_prefetch0((void *)ol);\n+\t\t\t\trte_prefetch0((void *)op);\n \n-\t\t\t\tl2fwd_simple_crypto_enqueue(m, ol, cparams);\n+\t\t\t\tl2fwd_simple_crypto_enqueue(m, op, cparams);\n \t\t\t}\n \n \t\t\t/* Dequeue packets from Crypto device */\n-\t\t\tnb_rx = rte_cryptodev_dequeue_burst(\n-\t\t\t\t\tcparams->dev_id, cparams->qp_id,\n-\t\t\t\t\tpkts_burst, MAX_PKT_BURST);\n-\t\t\tcrypto_statistics[cparams->dev_id].dequeued += nb_rx;\n+\t\t\tdo {\n+\t\t\t\tnb_rx = rte_cryptodev_dequeue_burst(\n+\t\t\t\t\t\tcparams->dev_id, cparams->qp_id,\n+\t\t\t\t\t\tops_burst, MAX_PKT_BURST);\n \n-\t\t\t/* Forward crypto'd packets */\n-\t\t\tfor (j = 0; j < nb_rx; j++) {\n-\t\t\t\tm = pkts_burst[j];\n-\t\t\t\trte_pktmbuf_offload_free(m->offload_ops);\n-\t\t\t\trte_prefetch0(rte_pktmbuf_mtod(m, void *));\n-\t\t\t\tl2fwd_simple_forward(m, portid);\n-\t\t\t}\n+\t\t\t\tcrypto_statistics[cparams->dev_id].dequeued +=\n+\t\t\t\t\t\tnb_rx;\n+\n+\t\t\t\t/* Forward crypto'd packets */\n+\t\t\t\tfor (j = 0; j < nb_rx; j++) {\n+\t\t\t\t\tm = ops_burst[j]->sym->m_src;\n+\n+\t\t\t\t\trte_crypto_op_free(ops_burst[j]);\n+\t\t\t\t\trte_prefetch0(rte_pktmbuf_mtod(m,\n+\t\t\t\t\t\t\tvoid *));\n+\n+\t\t\t\t\tl2fwd_simple_forward(m, portid);\n+\t\t\t\t}\n+\t\t\t} while (nb_rx == MAX_PKT_BURST);\n \t\t}\n \t}\n }\n@@ -748,8 +768,8 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)\n \n /** Parse crypto key command line argument */\n static int\n-parse_key(struct rte_crypto_sym_key *key __rte_unused,\n-\t\tunsigned length __rte_unused, char *arg __rte_unused)\n+parse_key(struct l2fwd_key *key __rte_unused, unsigned length __rte_unused,\n+\t\tchar *arg __rte_unused)\n {\n \tprintf(\"Currently an unsupported argument!\\n\");\n \treturn -1;\n@@ -759,26 +779,20 @@ parse_key(struct rte_crypto_sym_key *key __rte_unused,\n static int\n parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)\n {\n-\tif (strcmp(\"SHA1\", optarg) == 0) {\n-\t\t*algo = RTE_CRYPTO_AUTH_SHA1;\n+\tif (strcmp(\"MD5_HMAC\", optarg) == 0) {\n+\t\t*algo = RTE_CRYPTO_AUTH_MD5_HMAC;\n \t\treturn 0;\n \t} else if (strcmp(\"SHA1_HMAC\", optarg) == 0) {\n \t\t*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;\n \t\treturn 0;\n-\t} else if (strcmp(\"SHA224\", optarg) == 0) {\n-\t\t*algo = RTE_CRYPTO_AUTH_SHA224;\n-\t\treturn 0;\n \t} else if (strcmp(\"SHA224_HMAC\", optarg) == 0) {\n \t\t*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;\n \t\treturn 0;\n-\t} else if (strcmp(\"SHA256\", optarg) == 0) {\n-\t\t*algo = RTE_CRYPTO_AUTH_SHA256;\n-\t\treturn 0;\n \t} else if (strcmp(\"SHA256_HMAC\", optarg) == 0) {\n \t\t*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;\n \t\treturn 0;\n-\t} else if (strcmp(\"SHA512\", optarg) == 0) {\n-\t\t*algo = RTE_CRYPTO_AUTH_SHA256;\n+\t}  else if (strcmp(\"SHA384_HMAC\", optarg) == 0) {\n+\t\t*algo = RTE_CRYPTO_AUTH_SHA384_HMAC;\n \t\treturn 0;\n \t} else if (strcmp(\"SHA512_HMAC\", optarg) == 0) {\n \t\t*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;\n@@ -809,7 +823,7 @@ static int\n l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,\n \t\tstruct option *lgopts, int option_index)\n {\n-\tif (strcmp(lgopts[option_index].name, \"cdev_type\") == 0)\n+\tif (strcmp(lgopts[option_index].name, \"cdev\") == 0)\n \t\treturn parse_cryptodev_type(&options->cdev_type, optarg);\n \n \telse if (strcmp(lgopts[option_index].name, \"chain\") == 0)\n@@ -824,11 +838,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,\n \t\treturn parse_cipher_op(&options->cipher_xform.cipher.op,\n \t\t\t\toptarg);\n \n-\telse if (strcmp(lgopts[option_index].name, \"cipher_key\") == 0)\n-\t\treturn parse_key(&options->cipher_xform.cipher.key,\n-\t\t\t\tsizeof(options->ckey_data), optarg);\n+\telse if (strcmp(lgopts[option_index].name, \"cipher_key\") == 0) {\n+\t\tstruct l2fwd_key key = { 0 };\n+\t\tint retval = 0;\n+\n+\t\tretval = parse_key(&key, sizeof(options->ckey_data), optarg);\n \n-\telse if (strcmp(lgopts[option_index].name, \"iv\") == 0)\n+\t\toptions->cipher_xform.cipher.key.data = key.data;\n+\t\toptions->cipher_xform.cipher.key.length = key.length;\n+\n+\t\treturn retval;\n+\t} else if (strcmp(lgopts[option_index].name, \"iv\") == 0)\n \t\treturn parse_key(&options->iv_key, sizeof(options->ivkey_data),\n \t\t\t\toptarg);\n \n@@ -841,11 +861,17 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,\n \t\treturn parse_auth_op(&options->auth_xform.auth.op,\n \t\t\t\toptarg);\n \n-\telse if (strcmp(lgopts[option_index].name, \"auth_key\") == 0)\n-\t\treturn parse_key(&options->auth_xform.auth.key,\n-\t\t\t\tsizeof(options->akey_data), optarg);\n+\telse if (strcmp(lgopts[option_index].name, \"auth_key\") == 0) {\n+\t\tstruct l2fwd_key key = { 0 };\n+\t\tint retval = 0;\n+\n+\t\tretval = parse_key(&key, sizeof(options->akey_data), optarg);\n \n-\telse if (strcmp(lgopts[option_index].name, \"sessionless\") == 0) {\n+\t\toptions->auth_xform.auth.key.data = key.data;\n+\t\toptions->auth_xform.auth.key.length = key.length;\n+\n+\t\treturn retval;\n+\t} else if (strcmp(lgopts[option_index].name, \"sessionless\") == 0) {\n \t\toptions->sessionless = 1;\n \t\treturn 0;\n \t}\n@@ -905,16 +931,16 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,\n \t\tconst char *q_arg)\n {\n \tchar *end = NULL;\n-\tlong int n;\n+\tunsigned long n;\n \n \t/* parse number string */\n-\tn = strtol(q_arg, &end, 10);\n+\tn = (unsigned)strtol(q_arg, &end, 10);\n \tif ((q_arg[0] == '\\0') || (end == NULL) || (*end != '\\0'))\n \t\tn = 0;\n \n \tif (n >= MAX_TIMER_PERIOD) {\n-\t\tprintf(\"Warning refresh period specified %ld is greater than \"\n-\t\t\t\t\"max value %d! using max value\",\n+\t\tprintf(\"Warning refresh period specified %lu is greater than \"\n+\t\t\t\t\"max value %lu! using max value\",\n \t\t\t\tn, MAX_TIMER_PERIOD);\n \t\tn = MAX_TIMER_PERIOD;\n \t}\n@@ -934,13 +960,13 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)\n \toptions->nb_ports_per_lcore = 1;\n \toptions->refresh_period = 10000;\n \toptions->single_lcore = 0;\n+\toptions->sessionless = 0;\n \n \toptions->cdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;\n-\toptions->sessionless = 0;\n \toptions->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;\n \n \t/* Cipher Data */\n-\toptions->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;\n+\toptions->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;\n \toptions->cipher_xform.next = NULL;\n \n \toptions->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;\n@@ -949,12 +975,11 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)\n \tgenerate_random_key(options->ckey_data, sizeof(options->ckey_data));\n \n \toptions->cipher_xform.cipher.key.data = options->ckey_data;\n-\toptions->cipher_xform.cipher.key.phys_addr = 0;\n \toptions->cipher_xform.cipher.key.length = 16;\n \n \n \t/* Authentication Data */\n-\toptions->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;\n+\toptions->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;\n \toptions->auth_xform.next = NULL;\n \n \toptions->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;\n@@ -966,7 +991,6 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)\n \tgenerate_random_key(options->akey_data, sizeof(options->akey_data));\n \n \toptions->auth_xform.auth.key.data = options->akey_data;\n-\toptions->auth_xform.auth.key.phys_addr = 0;\n \toptions->auth_xform.auth.key.length = 20;\n }\n \n@@ -993,39 +1017,6 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)\n \n \tprintf(\"sessionless crypto: %s\\n\",\n \t\t\toptions->sessionless ? \"enabled\" : \"disabled\");\n-#if 0\n-\toptions->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;\n-\n-\t/* Cipher Data */\n-\toptions->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;\n-\toptions->cipher_xform.next = NULL;\n-\n-\toptions->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;\n-\toptions->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;\n-\n-\tgenerate_random_key(options->ckey_data, sizeof(options->ckey_data));\n-\n-\toptions->cipher_xform.cipher.key.data = options->ckey_data;\n-\toptions->cipher_xform.cipher.key.phys_addr = 0;\n-\toptions->cipher_xform.cipher.key.length = 16;\n-\n-\n-\t/* Authentication Data */\n-\toptions->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;\n-\toptions->auth_xform.next = NULL;\n-\n-\toptions->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;\n-\toptions->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;\n-\n-\toptions->auth_xform.auth.add_auth_data_length = 0;\n-\toptions->auth_xform.auth.digest_length = 20;\n-\n-\tgenerate_random_key(options->akey_data, sizeof(options->akey_data));\n-\n-\toptions->auth_xform.auth.key.data = options->akey_data;\n-\toptions->auth_xform.auth.key.phys_addr = 0;\n-\toptions->auth_xform.auth.key.length = 20;\n-#endif\n }\n \n /* Parse the argument given in the command line of the application */\n@@ -1039,7 +1030,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,\n \tstatic struct option lgopts[] = {\n \t\t\t{ \"sessionless\", no_argument, 0, 0 },\n \n-\t\t\t{ \"cdev_type\", required_argument, 0, 0 },\n+\t\t\t{ \"cdev\", required_argument, 0, 0 },\n \t\t\t{ \"chain\", required_argument, 0, 0 },\n \n \t\t\t{ \"cipher_algo\", required_argument, 0, 0 },\n@@ -1053,6 +1044,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,\n \t\t\t{ \"iv\", required_argument, 0, 0 },\n \n \t\t\t{ \"sessionless\", no_argument, 0, 0 },\n+\n \t\t\t{ NULL, 0, 0, 0 }\n \t};\n \n@@ -1370,15 +1362,17 @@ main(int argc, char **argv)\n \t\trte_exit(EXIT_FAILURE, \"Invalid L2FWD-CRYPTO arguments\\n\");\n \n \t/* create the mbuf pool */\n-\tl2fwd_pktmbuf_pool = rte_pktmbuf_pool_create(\"mbuf_pool\", NB_MBUF, 128,\n-\t\t0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());\n+\tl2fwd_pktmbuf_pool = rte_pktmbuf_pool_create(\"mbuf_pool\", NB_MBUF, 512,\n+\t\t\tsizeof(struct rte_crypto_op),\n+\t\t\tRTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());\n \tif (l2fwd_pktmbuf_pool == NULL)\n \t\trte_exit(EXIT_FAILURE, \"Cannot create mbuf pool\\n\");\n \n \t/* create crypto op pool */\n-\tl2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(\n-\t\t\t\"mbuf_offload_pool\", NB_MBUF, 128, 0, rte_socket_id());\n-\tif (l2fwd_mbuf_ol_pool == NULL)\n+\tl2fwd_crypto_op_pool = rte_crypto_op_pool_create(\"crypto_op_pool\",\n+\t\t\tRTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,\n+\t\t\trte_socket_id());\n+\tif (l2fwd_crypto_op_pool == NULL)\n \t\trte_exit(EXIT_FAILURE, \"Cannot create crypto op pool\\n\");\n \n \t/* Enable Ethernet ports */\ndiff --git a/lib/Makefile b/lib/Makefile\nindex ef172ea..4c5c1b4 100644\n--- a/lib/Makefile\n+++ b/lib/Makefile\n@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal\n DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring\n DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool\n DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf\n-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload\n DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer\n DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile\n DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline\ndiff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h\nindex df0c0b8..489314b 100644\n--- a/lib/librte_cryptodev/rte_crypto.h\n+++ b/lib/librte_cryptodev/rte_crypto.h\n@@ -44,8 +44,353 @@\n extern \"C\" {\n #endif\n \n+\n+#include <rte_mbuf.h>\n+#include <rte_memory.h>\n+#include <rte_mempool.h>\n+\n #include <rte_crypto_sym.h>\n \n+/** Crypto operation types */\n+enum rte_crypto_op_type {\n+\tRTE_CRYPTO_OP_TYPE_UNDEFINED,\n+\t/**< Undefined operation type */\n+\tRTE_CRYPTO_OP_TYPE_SYMMETRIC,\n+\t/**< Symmetric operation */\n+};\n+\n+/** Status of crypto operation */\n+enum rte_crypto_op_status {\n+\tRTE_CRYPTO_OP_STATUS_SUCCESS,\n+\t/**< Operation completed successfully */\n+\tRTE_CRYPTO_OP_STATUS_NOT_PROCESSED,\n+\t/**< Operation has not yet been processed by a crypto device */\n+\tRTE_CRYPTO_OP_STATUS_ENQUEUED,\n+\t/**< Operation is enqueued on device */\n+\tRTE_CRYPTO_OP_STATUS_AUTH_FAILED,\n+\t/**< Authentication verification failed */\n+\tRTE_CRYPTO_OP_STATUS_INVALID_SESSION,\n+\t/**<\n+\t * Symmetric operation failed due to invalid session arguments, or if\n+\t * in session-less mode, failed to allocate private operation material.\n+\t */\n+\tRTE_CRYPTO_OP_STATUS_INVALID_ARGS,\n+\t/**< Operation failed due to invalid arguments in request */\n+\tRTE_CRYPTO_OP_STATUS_ERROR,\n+\t/**< Error handling operation */\n+};\n+\n+/**\n+ * Cryptographic Operation.\n+ *\n+ * This structure contains data relating to performing cryptographic\n+ * operations. This operation structure is used to contain any operation which\n+ * is supported by the cryptodev API, PMDs should check the type parameter to\n+ * verify that the operation is a support function of the device. Crypto\n+ * operations are enqueued and dequeued in crypto PMDs using the\n+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .\n+ */\n+struct rte_crypto_op {\n+\tenum rte_crypto_op_type type;\n+\t/**< operation type */\n+\n+\tenum rte_crypto_op_status status;\n+\t/**<\n+\t * operation status - this is reset to\n+\t * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and\n+\t * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation\n+\t * is successfully processed by a crypto PMD\n+\t */\n+\n+\tstruct rte_mempool *mempool;\n+\t/**< crypto operation mempool which operation is allocated from */\n+\n+\tvoid *opaque_data;\n+\t/**< Opaque pointer for user data */\n+\n+\tunion {\n+\t\tstruct rte_crypto_sym_op *sym;\n+\t\t/**< Symmetric operation parameters */\n+\t}; /**< operation specific parameters */\n+};\n+\n+/**\n+ * Reset the fields of a crypto operation to their default values.\n+ *\n+ * @param\top\tThe crypto operation to be reset.\n+ * @param\ttype\tThe crypto operation type.\n+ */\n+static inline void\n+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)\n+{\n+\top->type = type;\n+\top->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;\n+\n+\tswitch (type) {\n+\tcase RTE_CRYPTO_OP_TYPE_SYMMETRIC:\n+\t\t/** Symmetric operation structure starts after the end of the\n+\t\t * rte_crypto_op structure.\n+\t\t */\n+\t\top->sym = (struct rte_crypto_sym_op *)(op + 1);\n+\t\top->type = type;\n+\n+\t\t__rte_crypto_sym_op_reset(op->sym);\n+\t\tbreak;\n+\tdefault:\n+\t\tbreak;\n+\t}\n+\n+\top->opaque_data = NULL;\n+}\n+\n+\n+/**\n+ * Private data structure belonging to a crypto symmetric operation pool.\n+ */\n+struct rte_crypto_op_pool_private {\n+\tenum rte_crypto_op_type type;\n+\t/**< Crypto op pool type operation. */\n+\tuint16_t priv_size;\n+\t/**< Size of private area in each crypto operation. */\n+};\n+\n+\n+/**\n+ * Returns the size of private data allocated with each rte_crypto_op object by\n+ * the mempool\n+ *\n+ * @param\tmempool\trte_crypto_op mempool\n+ *\n+ * @return\tprivate data size\n+ */\n+static inline uint16_t\n+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)\n+{\n+\tstruct rte_crypto_op_pool_private *priv =\n+\t\t\trte_mempool_get_priv(mempool);\n+\n+\treturn priv->priv_size;\n+}\n+\n+\n+/**\n+ * Creates a crypto operation pool\n+ *\n+ * @param\tname\t\tpool name\n+ * @param\ttype\t\tcrypto operation type, use\n+ *\t\t\t\tRTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which\n+ *\t\t\t\tsupports all operation types\n+ * @param\tnb_elts\t\tnumber of elements in pool\n+ * @param\tcache_size\tNumber of elements to cache on lcore, see\n+ *\t\t\t\t*rte_mempool_create* for further details about\n+ *\t\t\t\tcache size\n+ * @param\tpriv_size\tSize of private data to allocate with each\n+ *\t\t\t\toperation\n+ * @param\tsocket_id\tSocket to allocate memory on\n+ *\n+ * @return\n+ *  - On success pointer to mempool\n+ *  - On failure NULL\n+ */\n+extern struct rte_mempool *\n+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,\n+\t\tunsigned nb_elts, unsigned cache_size, uint16_t priv_size,\n+\t\tint socket_id);\n+\n+/**\n+ * Bulk allocate raw element from mempool and return as crypto operations\n+ *\n+ * @param\tmempool\t\tcrypto operation mempool.\n+ * @param\ttype\t\tcrypto operation type.\n+ * @param\tops\t\tArray to place allocated crypto operations\n+ * @param\tnb_ops\t\tNumber of crypto operations to allocate\n+ *\n+ * @returns\n+ * - On success returns  0\n+ * - On failure returns <0\n+ */\n+static inline int\n+__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool,\n+\t\tenum rte_crypto_op_type type,\n+\t\tstruct rte_crypto_op **ops, uint16_t nb_ops)\n+{\n+\tstruct rte_crypto_op_pool_private *priv =\n+\t\trte_mempool_get_priv(mempool);\n+\n+\tif (unlikely(priv->type != type &&\n+\t\t\tpriv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED))\n+\t\treturn -EINVAL;\n+\n+\treturn rte_mempool_get_bulk(mempool, (void **)ops, nb_ops);\n+}\n+\n+/**\n+ * Allocate a crypto operation from a mempool with default parameters set\n+ *\n+ * @param\tmempool\tcrypto operation mempool\n+ * @param\ttype\toperation type to allocate\n+ *\n+ * @returns\n+ * - On success returns a valid rte_crypto_op structure\n+ * - On failure returns NULL\n+ */\n+static inline struct rte_crypto_op *\n+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)\n+{\n+\tstruct rte_crypto_op *op = NULL;\n+\tint retval;\n+\n+\tretval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1);\n+\tif (unlikely(retval < 0))\n+\t\treturn NULL;\n+\n+\t__rte_crypto_op_reset(op, type);\n+\n+\treturn op;\n+}\n+\n+\n+static inline int\n+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,\n+\t\tenum rte_crypto_op_type type,\n+\t\tstruct rte_crypto_op **ops, uint16_t nb_ops)\n+{\n+\tint retval, i;\n+\n+\tretval = __rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops);\n+\tif (unlikely(retval < 0))\n+\t\treturn retval;\n+\n+\tfor (i = 0; i < nb_ops; i++)\n+\t\t__rte_crypto_op_reset(ops[i], type);\n+\n+\treturn 0;\n+}\n+\n+\n+\n+/**\n+ * Returns a pointer to the private data of a crypto operation if\n+ * that operation has enough capacity for requested size.\n+ *\n+ * @param\top\tcrypto operation.\n+ * @param\tsize\tsize of space requested in private data.\n+ *\n+ * @returns\n+ * - if sufficient space available returns pointer to start of private data\n+ * - if insufficient space returns NULL\n+ */\n+static inline void *\n+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)\n+{\n+\tuint32_t priv_size;\n+\n+\tif (likely(op->mempool != NULL)) {\n+\t\tpriv_size = __rte_crypto_op_get_priv_data_size(op->mempool);\n+\n+\t\tif (likely(priv_size >= size))\n+\t\t\treturn (void *)((op + 1) +\n+\t\t\t\t\tsizeof(struct rte_crypto_sym_op));\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+/**\n+ * free crypto operation structure\n+ * If operation has been allocate from a rte_mempool, then the operation will\n+ * be returned to the mempool.\n+ *\n+ * @param\top\tsymmetric crypto operation\n+ */\n+static inline void\n+rte_crypto_op_free(struct rte_crypto_op *op)\n+{\n+\tif (op != NULL && op->mempool != NULL)\n+\t\trte_mempool_put(op->mempool, op);\n+}\n+\n+/**\n+ * Allocate a symmetric crypto operation in the private data of an mbuf.\n+ *\n+ * @param\tm\tmbuf which is associated with the crypto operation, the\n+ *\t\t\toperation will be allocated in the private data of that\n+ *\t\t\tmbuf.\n+ *\n+ * @returns\n+ * - On success returns a pointer to the crypto operation.\n+ * - On failure returns NULL.\n+ */\n+static inline struct rte_crypto_op *\n+rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m)\n+{\n+\tif (unlikely(m == NULL))\n+\t\treturn NULL;\n+\n+\t/*\n+\t * check that the mbuf's private data size is sufficient to contain a\n+\t * crypto operation\n+\t */\n+\tif (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) +\n+\t\t\tsizeof(struct rte_crypto_sym_op))))\n+\t\treturn NULL;\n+\n+\t/* private data starts immediately after the mbuf header in the mbuf. */\n+\tstruct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);\n+\n+\t__rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);\n+\n+\top->mempool = NULL;\n+\top->sym->m_src = m;\n+\n+\treturn op;\n+}\n+\n+/**\n+ * Allocate space for symmetric crypto xforms in the private data space of the\n+ * crypto operation. This also defaults the crypto xform type and configures\n+ * the chaining of the xforms in the crypto operation\n+ *\n+ * @return\n+ * - On success returns pointer to first crypto xform in crypto operations chain\n+ * - On failure returns NULL\n+ */\n+static inline struct rte_crypto_sym_xform *\n+rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)\n+{\n+\tvoid *priv_data;\n+\tuint32_t size;\n+\n+\tif (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))\n+\t\treturn NULL;\n+\n+\tsize = sizeof(struct rte_crypto_sym_xform) * nb_xforms;\n+\n+\tpriv_data = __rte_crypto_op_get_priv_data(op, size);\n+\tif (priv_data == NULL)\n+\t\treturn NULL;\n+\n+\treturn __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data,\n+\t\t\tnb_xforms);\n+}\n+\n+\n+/**\n+ * Attach a session to a crypto operation\n+ *\n+ * @param\top\tcrypto operation, must be of type symmetric\n+ * @param\tsess\tcryptodev session\n+ */\n+static inline int\n+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,\n+\t\tstruct rte_cryptodev_sym_session *sess)\n+{\n+\tif (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC))\n+\t\treturn -1;\n+\n+\treturn __rte_crypto_sym_op_attach_sym_session(op->sym, sess);\n+}\n+\n #ifdef __cplusplus\n }\n #endif\ndiff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h\nindex cb2b8f6..88f2727 100644\n--- a/lib/librte_cryptodev/rte_crypto_sym.h\n+++ b/lib/librte_cryptodev/rte_crypto_sym.h\n@@ -46,6 +46,8 @@\n extern \"C\" {\n #endif\n \n+#include <string.h>\n+\n #include <rte_mbuf.h>\n #include <rte_memory.h>\n #include <rte_mempool.h>\n@@ -346,37 +348,24 @@ enum rte_crypto_sym_op_sess_type {\n };\n \n \n-/** Status of crypto operation */\n-enum rte_crypto_op_status {\n-\tRTE_CRYPTO_OP_STATUS_SUCCESS,\n-\t/**< Operation completed successfully */\n-\tRTE_CRYPTO_OP_STATUS_NO_SUBMITTED,\n-\t/**< Operation not yet submitted to a cryptodev */\n-\tRTE_CRYPTO_OP_STATUS_ENQUEUED,\n-\t/**< Operation is enqueued on device */\n-\tRTE_CRYPTO_OP_STATUS_AUTH_FAILED,\n-\t/**< Authentication verification failed */\n-\tRTE_CRYPTO_OP_STATUS_INVALID_ARGS,\n-\t/**< Operation failed due to invalid arguments in request */\n-\tRTE_CRYPTO_OP_STATUS_ERROR,\n-\t/**< Error handling operation */\n-};\n+struct rte_cryptodev_sym_session;\n \n /**\n- * Cryptographic Operation Data.\n+ * Symmetric Cryptographic Operation.\n+ *\n+ * This structure contains data relating to performing symmetric cryptographic\n+ * processing on a referenced mbuf data buffer.\n  *\n- * This structure contains data relating to performing cryptographic processing\n- * on a data buffer. This request is used with rte_crypto_sym_enqueue_burst()\n- * call for performing cipher, hash, or a combined hash and cipher operations.\n+ * When a symmetric crypto operation is enqueued with the device for processing\n+ * it must have a valid *rte_mbuf* structure attached, via m_src parameter,\n+ * which contains the source data which the crypto operation is to be performed\n+ * on.\n  */\n struct rte_crypto_sym_op {\n-\tenum rte_crypto_sym_op_sess_type type;\n-\tenum rte_crypto_op_status status;\n+\tstruct rte_mbuf *m_src;\t/**< source mbuf */\n+\tstruct rte_mbuf *m_dst;\t/**< destination mbuf */\n \n-\tstruct {\n-\t\tstruct rte_mbuf *m;\t/**< Destination mbuf */\n-\t\tuint8_t offset;\t\t/**< Data offset */\n-\t} dst;\n+\tenum rte_crypto_sym_op_sess_type type;\n \n \tunion {\n \t\tstruct rte_cryptodev_sym_session *session;\n@@ -387,7 +376,7 @@ struct rte_crypto_sym_op {\n \n \tstruct {\n \t\tstruct {\n-\t\t\t uint32_t offset;\n+\t\t\tuint32_t offset;\n \t\t\t /**< Starting point for cipher processing, specified\n \t\t\t  * as number of bytes from start of data in the source\n \t\t\t  * buffer. The result of the cipher operation will be\n@@ -395,7 +384,7 @@ struct rte_crypto_sym_op {\n \t\t\t  * this location.\n \t\t\t  */\n \n-\t\t\t uint32_t length;\n+\t\t\tuint32_t length;\n \t\t\t /**< The message length, in bytes, of the source buffer\n \t\t\t  * on which the cryptographic operation will be\n \t\t\t  * computed. This must be a multiple of the block size\n@@ -414,17 +403,68 @@ struct rte_crypto_sym_op {\n \t\t\t  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this\n \t\t\t  * field should be set to 0.\n \t\t\t  */\n-\t\t} to_cipher; /**< Data offsets and length for ciphering */\n+\t\t} data; /**< Data offsets and length for ciphering */\n+\n+\t\tstruct {\n+\t\t\tuint8_t *data;\n+\t\t\t/**< Initialisation Vector or Counter.\n+\t\t\t *\n+\t\t\t * - For block ciphers in CBC or F8 mode, or for Kasumi\n+\t\t\t * in F8 mode, or for SNOW3G in UEA2 mode, this is the\n+\t\t\t * Initialisation Vector (IV) value.\n+\t\t\t *\n+\t\t\t * - For block ciphers in CTR mode, this is the counter.\n+\t\t\t *\n+\t\t\t * - For GCM mode, this is either the IV (if the length\n+\t\t\t * is 96 bits) or J0 (for other sizes), where J0 is as\n+\t\t\t * defined by NIST SP800-38D. Regardless of the IV\n+\t\t\t * length, a full 16 bytes needs to be allocated.\n+\t\t\t *\n+\t\t\t * - For CCM mode, the first byte is reserved, and the\n+\t\t\t * nonce should be written starting at &iv[1] (to allow\n+\t\t\t * space for the implementation to write in the flags\n+\t\t\t * in the first byte). Note that a full 16 bytes should\n+\t\t\t * be allocated, even though the length field will\n+\t\t\t * have a value less than this.\n+\t\t\t *\n+\t\t\t * - For AES-XTS, this is the 128bit tweak, i, from\n+\t\t\t * IEEE Std 1619-2007.\n+\t\t\t *\n+\t\t\t * For optimum performance, the data pointed to SHOULD\n+\t\t\t * be 8-byte aligned.\n+\t\t\t */\n+\t\t\tphys_addr_t phys_addr;\n+\t\t\tuint16_t length;\n+\t\t\t/**< Length of valid IV data.\n+\t\t\t *\n+\t\t\t * - For block ciphers in CBC or F8 mode, or for Kasumi\n+\t\t\t * in F8 mode, or for SNOW3G in UEA2 mode, this is the\n+\t\t\t * length of the IV (which must be the same as the\n+\t\t\t * block length of the cipher).\n+\t\t\t *\n+\t\t\t * - For block ciphers in CTR mode, this is the length\n+\t\t\t * of the counter (which must be the same as the block\n+\t\t\t * length of the cipher).\n+\t\t\t *\n+\t\t\t * - For GCM mode, this is either 12 (for 96-bit IVs)\n+\t\t\t * or 16, in which case data points to J0.\n+\t\t\t *\n+\t\t\t * - For CCM mode, this is the length of the nonce,\n+\t\t\t * which can be in the range 7 to 13 inclusive.\n+\t\t\t */\n+\t\t} iv;\t/**< Initialisation vector parameters */\n+\t} cipher;\n \n+\tstruct {\n \t\tstruct {\n-\t\t\t uint32_t offset;\n+\t\t\tuint32_t offset;\n \t\t\t /**< Starting point for hash processing, specified as\n \t\t\t  * number of bytes from start of packet in source\n \t\t\t  * buffer.\n \t\t\t  *\n \t\t\t  * @note\n \t\t\t  * For CCM and GCM modes of operation, this field is\n-\t\t\t  * ignored. The field @ref additional_auth field\n+\t\t\t  * ignored. The field @ref aad field\n \t\t\t  * should be set instead.\n \t\t\t  *\n \t\t\t  * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)\n@@ -432,180 +472,169 @@ struct rte_crypto_sym_op {\n \t\t\t  * of the AAD data in the source buffer.\n \t\t\t  */\n \n-\t\t\t uint32_t length;\n+\t\t\tuint32_t length;\n \t\t\t /**< The message length, in bytes, of the source\n \t\t\t  * buffer that the hash will be computed on.\n \t\t\t  *\n \t\t\t  * @note\n \t\t\t  * For CCM and GCM modes of operation, this field is\n-\t\t\t  * ignored. The field @ref additional_auth field\n-\t\t\t  * should be set instead.\n+\t\t\t  * ignored. The field @ref aad field should be set\n+\t\t\t  * instead.\n \t\t\t  *\n \t\t\t  * @note\n \t\t\t  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode\n \t\t\t  * of operation, this field specifies the length of\n \t\t\t  * the AAD data in the source buffer.\n \t\t\t  */\n-\t\t} to_hash; /**< Data offsets and length for authentication */\n-\t} data;\t/**< Details of data to be operated on */\n-\n-\tstruct {\n-\t\tuint8_t *data;\n-\t\t/**< Initialisation Vector or Counter.\n-\t\t *\n-\t\t * - For block ciphers in CBC or F8 mode, or for Kasumi in F8\n-\t\t * mode, or for SNOW3G in UEA2 mode, this is the Initialisation\n-\t\t * Vector (IV) value.\n-\t\t *\n-\t\t * - For block ciphers in CTR mode, this is the counter.\n-\t\t *\n-\t\t * - For GCM mode, this is either the IV (if the length is 96\n-\t\t * bits) or J0 (for other sizes), where J0 is as defined by\n-\t\t * NIST SP800-38D. Regardless of the IV length, a full 16 bytes\n-\t\t * needs to be allocated.\n-\t\t *\n-\t\t * - For CCM mode, the first byte is reserved, and the nonce\n-\t\t * should be written starting at &iv[1] (to allow space for the\n-\t\t * implementation to write in the flags in the first byte).\n-\t\t * Note that a full 16 bytes should be allocated, even though\n-\t\t * the length field will have a value less than this.\n-\t\t *\n-\t\t * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std\n-\t\t * 1619-2007.\n-\t\t *\n-\t\t * For optimum performance, the data pointed to SHOULD be\n-\t\t * 8-byte aligned.\n-\t\t */\n-\t\tphys_addr_t phys_addr;\n-\t\tsize_t length;\n-\t\t/**< Length of valid IV data.\n-\t\t *\n-\t\t * - For block ciphers in CBC or F8 mode, or for Kasumi in F8\n-\t\t * mode, or for SNOW3G in UEA2 mode, this is the length of the\n-\t\t * IV (which must be the same as the block length of the\n-\t\t * cipher).\n-\t\t *\n-\t\t * - For block ciphers in CTR mode, this is the length of the\n-\t\t * counter (which must be the same as the block length of the\n-\t\t * cipher).\n-\t\t *\n-\t\t * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in\n-\t\t * which case data points to J0.\n-\t\t *\n-\t\t * - For CCM mode, this is the length of the nonce, which can\n-\t\t * be in the range 7 to 13 inclusive.\n-\t\t */\n-\t} iv;\t/**< Initialisation vector parameters */\n+\t\t} data; /**< Data offsets and length for authentication */\n \n-\tstruct {\n-\t\tuint8_t *data;\n-\t\t/**< If this member of this structure is set this is a\n-\t\t * pointer to the location where the digest result should be\n-\t\t * inserted (in the case of digest generation) or where the\n-\t\t * purported digest exists (in the case of digest\n-\t\t * verification).\n-\t\t *\n-\t\t * At session creation time, the client specified the digest\n-\t\t * result length with the digest_length member of the @ref\n-\t\t * rte_crypto_auth_xform structure. For physical crypto\n-\t\t * devices the caller must allocate at least digest_length of\n-\t\t * physically contiguous memory at this location.\n-\t\t *\n-\t\t * For digest generation, the digest result will overwrite\n-\t\t * any data at this location.\n-\t\t *\n-\t\t * @note\n-\t\t * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for\n-\t\t * \"digest result\" read \"authentication tag T\".\n-\t\t *\n-\t\t * If this member is not set the digest result is understood\n-\t\t * to be in the destination buffer for digest generation, and\n-\t\t * in the source buffer for digest verification. The location\n-\t\t * of the digest result in this case is immediately following\n-\t\t * the region over which the digest is computed.\n-\t\t */\n-\t\tphys_addr_t phys_addr;\t/**< Physical address of digest */\n-\t\tuint32_t length;\t/**< Length of digest */\n-\t} digest; /**< Digest parameters */\n+\t\tstruct {\n+\t\t\tuint8_t *data;\n+\t\t\t/**< If this member of this structure is set this is a\n+\t\t\t * pointer to the location where the digest result\n+\t\t\t * should be inserted (in the case of digest generation)\n+\t\t\t * or where the purported digest exists (in the case of\n+\t\t\t * digest verification).\n+\t\t\t *\n+\t\t\t * At session creation time, the client specified the\n+\t\t\t * digest result length with the digest_length member\n+\t\t\t * of the @ref rte_crypto_auth_xform structure. For\n+\t\t\t * physical crypto devices the caller must allocate at\n+\t\t\t * least digest_length of physically contiguous memory\n+\t\t\t * at this location.\n+\t\t\t *\n+\t\t\t * For digest generation, the digest result will\n+\t\t\t * overwrite any data at this location.\n+\t\t\t *\n+\t\t\t * @note\n+\t\t\t * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for\n+\t\t\t * \"digest result\" read \"authentication tag T\".\n+\t\t\t *\n+\t\t\t * If this member is not set the digest result is\n+\t\t\t * understood to be in the destination buffer for\n+\t\t\t * digest generation, and in the source buffer for\n+\t\t\t * digest verification. The location of the digest\n+\t\t\t * result in this case is immediately following the\n+\t\t\t * region over which the digest is computed.\n+\t\t\t */\n+\t\t\tphys_addr_t phys_addr;\n+\t\t\t/**< Physical address of digest */\n+\t\t\tuint16_t length;\n+\t\t\t/**< Length of digest */\n+\t\t} digest; /**< Digest parameters */\n \n-\tstruct {\n-\t\tuint8_t *data;\n-\t\t/**< Pointer to Additional Authenticated Data (AAD) needed for\n-\t\t * authenticated cipher mechanisms (CCM and GCM), and to the IV\n-\t\t * for SNOW3G authentication\n-\t\t * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other\n-\t\t * authentication mechanisms this pointer is ignored.\n-\t\t *\n-\t\t * The length of the data pointed to by this field is set up\n-\t\t * for the session in the @ref rte_crypto_auth_xform structure\n-\t\t * as part of the @ref rte_cryptodev_sym_session_create function\n-\t\t * call.  This length must not exceed 240 bytes.\n-\t\t *\n-\t\t * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the\n-\t\t * caller should setup this field as follows:\n-\t\t *\n-\t\t * - the nonce should be written starting at an offset of one\n-\t\t *   byte into the array, leaving room for the implementation\n-\t\t *   to write in the flags to the first byte.\n-\t\t *\n-\t\t * - the additional  authentication data itself should be\n-\t\t *   written starting at an offset of 18 bytes into the array,\n-\t\t *   leaving room for the length encoding in the first two\n-\t\t *   bytes of the second block.\n-\t\t *\n-\t\t * - the array should be big enough to hold the above fields,\n-\t\t *   plus any padding to round this up to the nearest multiple\n-\t\t *   of the block size (16 bytes).  Padding will be added by\n-\t\t *   the implementation.\n-\t\t *\n-\t\t * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the\n-\t\t * caller should setup this field as follows:\n-\t\t *\n-\t\t * - the AAD is written in starting at byte 0\n-\t\t * - the array must be big enough to hold the AAD, plus any\n-\t\t *   space to round this up to the nearest multiple of the\n-\t\t *   block size (16 bytes).\n-\t\t *\n-\t\t * @note\n-\t\t * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of\n-\t\t * operation, this field is not used and should be set to 0.\n-\t\t * Instead the AAD data should be placed in the source buffer.\n-\t\t */\n-\t\tphys_addr_t phys_addr;\t/**< physical address */\n-\t\tuint32_t length;\t/**< Length of digest */\n-\t} additional_auth;\n-\t/**< Additional authentication parameters */\n-\n-\tstruct rte_mempool *pool;\n-\t/**< mempool used to allocate crypto op */\n-\n-\tvoid *user_data;\n-\t/**< opaque pointer for user data */\n+\t\tstruct {\n+\t\t\tuint8_t *data;\n+\t\t\t/**< Pointer to Additional Authenticated Data (AAD)\n+\t\t\t * needed for authenticated cipher mechanisms (CCM and\n+\t\t\t * GCM), and to the IV for SNOW3G authentication\n+\t\t\t * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other\n+\t\t\t * authentication mechanisms this pointer is ignored.\n+\t\t\t *\n+\t\t\t * The length of the data pointed to by this field is\n+\t\t\t * set up for the session in the @ref\n+\t\t\t * rte_crypto_auth_xform structure as part of the @ref\n+\t\t\t * rte_cryptodev_session_create function call.  This\n+\t\t\t * length must not exceed 240 bytes.\n+\t\t\t *\n+\t\t\t * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),\n+\t\t\t * the caller should setup this field as follows:\n+\t\t\t *\n+\t\t\t * - the nonce should be written starting at an offset\n+\t\t\t * of one byte into the array, leaving room for the\n+\t\t\t * implementation to write in the flags to the first\n+\t\t\t *  byte.\n+\t\t\t *\n+\t\t\t * - the additional  authentication data itself should\n+\t\t\t * be written starting at an offset of 18 bytes into\n+\t\t\t * the array, leaving room for the length encoding in\n+\t\t\t * the first two bytes of the second block.\n+\t\t\t *\n+\t\t\t * - the array should be big enough to hold the above\n+\t\t\t *  fields, plus any padding to round this up to the\n+\t\t\t *  nearest multiple of the block size (16 bytes).\n+\t\t\t *  Padding will be added by the implementation.\n+\t\t\t *\n+\t\t\t * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the\n+\t\t\t * caller should setup this field as follows:\n+\t\t\t *\n+\t\t\t * - the AAD is written in starting at byte 0\n+\t\t\t * - the array must be big enough to hold the AAD, plus\n+\t\t\t * any space to round this up to the nearest multiple\n+\t\t\t * of the block size (16 bytes).\n+\t\t\t *\n+\t\t\t * @note\n+\t\t\t * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of\n+\t\t\t * operation, this field is not used and should be set\n+\t\t\t * to 0. Instead the AAD data should be placed in the\n+\t\t\t * source buffer.\n+\t\t\t */\n+\t\t\tphys_addr_t phys_addr;\t/**< physical address */\n+\t\t\tuint16_t length;\t/**< Length of digest */\n+\t\t} aad;\n+\t\t/**< Additional authentication parameters */\n+\t} auth;\n };\n \n \n /**\n- * Reset the fields of a crypto operation to their default values.\n+ * Reset the fields of a symmetric operation to their default values.\n  *\n  * @param\top\tThe crypto operation to be reset.\n  */\n static inline void\n __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)\n {\n+\tmemset(op, 0, sizeof(*op));\n+\n \top->type = RTE_CRYPTO_SYM_OP_SESSIONLESS;\n-\top->dst.m = NULL;\n-\top->dst.offset = 0;\n }\n \n-/** Attach a session to a crypto operation */\n-static inline void\n-rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,\n+\n+/**\n+ * Allocate space for symmetric crypto xforms in the private data space of the\n+ * crypto operation. This also defaults the crypto xform type to\n+ * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms\n+ * in the crypto operation\n+ *\n+ * @return\n+ * - On success returns pointer to first crypto xform in crypto operations chain\n+ * - On failure returns NULL\n+ */\n+static inline struct rte_crypto_sym_xform *\n+__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,\n+\t\tvoid *priv_data, uint8_t nb_xforms)\n+{\n+\tstruct rte_crypto_sym_xform *xform;\n+\n+\tsym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;\n+\n+\tdo {\n+\t\txform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;\n+\t\txform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;\n+\t} while (xform);\n+\n+\treturn sym_op->xform;\n+}\n+\n+\n+/**\n+ * Attach a session to a symmetric crypto operation\n+ *\n+ * @param\top\tcrypto operation\n+ * @param\tsess\tcryptodev session\n+ */\n+static inline int\n+__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,\n \t\tstruct rte_cryptodev_sym_session *sess)\n {\n-\top->session = sess;\n-\top->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;\n+\tsym_op->session = sess;\n+\tsym_op->type = RTE_CRYPTO_SYM_OP_WITH_SESSION;\n+\n+\treturn 0;\n }\n \n+\n #ifdef __cplusplus\n }\n #endif\ndiff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c\nindex f32ddd4..634d248 100644\n--- a/lib/librte_cryptodev/rte_cryptodev.c\n+++ b/lib/librte_cryptodev/rte_cryptodev.c\n@@ -1098,3 +1098,77 @@ rte_cryptodev_sym_session_free(uint8_t dev_id,\n \n \treturn NULL;\n }\n+\n+/** Initialise rte_crypto_op mempool element */\n+static void\n+rte_crypto_op_init(struct rte_mempool *mempool,\n+\t\tvoid *opaque_arg,\n+\t\tvoid *_op_data,\n+\t\t__rte_unused unsigned i)\n+{\n+\tstruct rte_crypto_op *op = _op_data;\n+\tenum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;\n+\n+\tmemset(_op_data, 0, mempool->elt_size);\n+\n+\t__rte_crypto_op_reset(op, type);\n+\n+\top->mempool = mempool;\n+}\n+\n+\n+struct rte_mempool *\n+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,\n+\t\tunsigned nb_elts, unsigned cache_size, uint16_t priv_size,\n+\t\tint socket_id)\n+{\n+\tstruct rte_crypto_op_pool_private *priv;\n+\n+\tunsigned elt_size = sizeof(struct rte_crypto_op) +\n+\t\t\tsizeof(struct rte_crypto_sym_op) +\n+\t\t\tpriv_size;\n+\n+\t/* lookup mempool in case already allocated */\n+\tstruct rte_mempool *mp = rte_mempool_lookup(name);\n+\tif (mp != NULL) {\n+\t\tpriv = (struct rte_crypto_op_pool_private *)\n+\t\t\t\trte_mempool_get_priv(mp);\n+\n+\t\tif (mp->elt_size != elt_size ||\n+\t\t\t\tmp->cache_size < cache_size ||\n+\t\t\t\tmp->size < nb_elts ||\n+\t\t\t\tpriv->priv_size <  priv_size) {\n+\t\t\tmp = NULL;\n+\t\t\tCDEV_LOG_ERR(\"Mempool %s already exists but with \"\n+\t\t\t\t\t\"incompatible parameters\", name);\n+\t\t\treturn NULL;\n+\t\t}\n+\t\treturn mp;\n+\t}\n+\n+\tmp = rte_mempool_create(\n+\t\t\tname,\n+\t\t\tnb_elts,\n+\t\t\telt_size,\n+\t\t\tcache_size,\n+\t\t\tsizeof(struct rte_crypto_op_pool_private),\n+\t\t\tNULL,\n+\t\t\tNULL,\n+\t\t\trte_crypto_op_init,\n+\t\t\t&type,\n+\t\t\tsocket_id,\n+\t\t\t0);\n+\n+\tif (mp == NULL) {\n+\t\tCDEV_LOG_ERR(\"Failed to create mempool %s\", name);\n+\t\treturn NULL;\n+\t}\n+\n+\tpriv = (struct rte_crypto_op_pool_private *)\n+\t\t\trte_mempool_get_priv(mp);\n+\n+\tpriv->priv_size = priv_size;\n+\tpriv->type = type;\n+\n+\treturn mp;\n+}\ndiff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h\nindex 732e2b9..5aaa00c 100644\n--- a/lib/librte_cryptodev/rte_cryptodev.h\n+++ b/lib/librte_cryptodev/rte_cryptodev.h\n@@ -58,15 +58,18 @@ extern \"C\" {\n #define CRYPTODEV_NAME_AESNI_MB_PMD\t(\"cryptodev_aesni_mb_pmd\")\n /**< AES-NI Multi buffer PMD device name */\n #define CRYPTODEV_NAME_QAT_SYM_PMD\t(\"cryptodev_qat_sym_pmd\")\n-/**< Intel QAT SYM PMD device name */\n+/**< Intel QAT Symmetric Crypto PMD device name */\n \n /** Crypto device type */\n enum rte_cryptodev_type {\n \tRTE_CRYPTODEV_NULL_PMD = 1,\t/**< Null crypto PMD */\n \tRTE_CRYPTODEV_AESNI_MB_PMD,\t/**< AES-NI multi buffer PMD */\n-\tRTE_CRYPTODEV_QAT_SYM_PMD,\t/**< QAT SYM PMD */\n+\tRTE_CRYPTODEV_QAT_SYM_PMD,\t/**< QAT PMD Symmetric Crypto */\n };\n \n+\n+extern const char **rte_cyptodev_names;\n+\n /* Logging Macros */\n \n #define CDEV_LOG_ERR(fmt, args...)\t\t\t\t\t\\\n@@ -411,12 +414,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,\n \t\trte_cryptodev_cb_fn cb_fn, void *cb_arg);\n \n \n-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,\n-\t\tuint16_t nb_pkts);\n+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,\n+\t\tstruct rte_crypto_op **ops,\tuint16_t nb_ops);\n /**< Dequeue processed packets from queue pair of a device. */\n \n-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,\n-\t\tuint16_t nb_pkts);\n+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,\n+\t\tstruct rte_crypto_op **ops,\tuint16_t nb_ops);\n /**< Enqueue packets for processing on queue pair of a device. */\n \n \n@@ -489,66 +492,65 @@ struct rte_cryptodev_data {\n extern struct rte_cryptodev *rte_cryptodevs;\n /**\n  *\n- * Dequeue a burst of processed packets from a queue of the crypto device.\n- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are\n- * supplied in the *pkts* array.\n+ * Dequeue a burst of processed crypto operations from a queue on the crypto\n+ * device. The dequeued operation are stored in *rte_crypto_op* structures\n+ * whose pointers are supplied in the *ops* array.\n  *\n- * The rte_crypto_dequeue_burst() function returns the number of packets\n- * actually dequeued, which is the number of *rte_mbuf* data structures\n- * effectively supplied into the *pkts* array.\n+ * The rte_cryptodev_dequeue_burst() function returns the number of ops\n+ * actually dequeued, which is the number of *rte_crypto_op* data structures\n+ * effectively supplied into the *ops* array.\n  *\n- * A return value equal to *nb_pkts* indicates that the queue contained\n- * at least *rx_pkts* packets, and this is likely to signify that other\n- * received packets remain in the input queue. Applications implementing\n- * a \"retrieve as much received packets as possible\" policy can check this\n- * specific case and keep invoking the rte_crypto_dequeue_burst() function\n- * until a value less than *nb_pkts* is returned.\n+ * A return value equal to *nb_ops* indicates that the queue contained\n+ * at least *nb_ops* operations, and this is likely to signify that other\n+ * processed operations remain in the devices output queue. Applications\n+ * implementing a \"retrieve as many processed operations as possible\" policy\n+ * can check this specific case and keep invoking the\n+ * rte_cryptodev_dequeue_burst() function until a value less than\n+ * *nb_ops* is returned.\n  *\n- * The rte_crypto_dequeue_burst() function does not provide any error\n+ * The rte_cryptodev_dequeue_burst() function does not provide any error\n  * notification to avoid the corresponding overhead.\n  *\n- * @param\tdev_id\t\tThe identifier of the device.\n+ * @param\tdev_id\t\tThe symmetric crypto device identifier\n  * @param\tqp_id\t\tThe index of the queue pair from which to\n  *\t\t\t\tretrieve processed packets. The value must be\n  *\t\t\t\tin the range [0, nb_queue_pair - 1] previously\n  *\t\t\t\tsupplied to rte_cryptodev_configure().\n- * @param\tpkts\t\tThe address of an array of pointers to\n- *\t\t\t\t*rte_mbuf* structures that must be large enough\n- *\t\t\t\tto store *nb_pkts* pointers in it.\n- * @param\tnb_pkts\t\tThe maximum number of packets to dequeue.\n+ * @param\tops\t\tThe address of an array of pointers to\n+ *\t\t\t\t*rte_crypto_op* structures that must be\n+ *\t\t\t\tlarge enough to store *nb_ops* pointers in it.\n+ * @param\tnb_ops\t\tThe maximum number of operations to dequeue.\n  *\n  * @return\n- *   - The number of packets actually dequeued, which is the number\n- *   of pointers to *rte_mbuf* structures effectively supplied to the\n- *   *pkts* array.\n+ *   - The number of operations actually dequeued, which is the number\n+ *   of pointers to *rte_crypto_op* structures effectively supplied to the\n+ *   *ops* array.\n  */\n static inline uint16_t\n rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,\n-\t\tstruct rte_mbuf **pkts, uint16_t nb_pkts)\n+\t\tstruct rte_crypto_op **ops, uint16_t nb_ops)\n {\n \tstruct rte_cryptodev *dev = &rte_cryptodevs[dev_id];\n \n-\tnb_pkts = (*dev->dequeue_burst)\n-\t\t\t(dev->data->queue_pairs[qp_id], pkts, nb_pkts);\n+\tnb_ops = (*dev->dequeue_burst)\n+\t\t\t(dev->data->queue_pairs[qp_id], ops, nb_ops);\n \n-\treturn nb_pkts;\n+\treturn nb_ops;\n }\n \n /**\n- * Enqueue a burst of packets for processing on a crypto device.\n+ * Enqueue a burst of operations for processing on a crypto device.\n  *\n- * The rte_crypto_enqueue_burst() function is invoked to place packets\n- * on the queue *queue_id* of the device designated by its *dev_id*.\n+ * The rte_cryptodev_enqueue_burst() function is invoked to place\n+ * crypto operations on the queue *qp_id* of the device designated by\n+ * its *dev_id*.\n  *\n- * The *nb_pkts* parameter is the number of packets to process which are\n- * supplied in the *pkts* array of *rte_mbuf* structures.\n+ * The *nb_ops* parameter is the number of operations to process which are\n+ * supplied in the *ops* array of *rte_crypto_op* structures.\n  *\n- * The rte_crypto_enqueue_burst() function returns the number of packets it\n- * actually sent. A return value equal to *nb_pkts* means that all packets\n- * have been sent.\n- *\n- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure\n- * attached which contains a valid crypto operation.\n+ * The rte_cryptodev_enqueue_burst() function returns the number of\n+ * operations it actually enqueued for processing. A return value equal to\n+ * *nb_ops* means that all packets have been enqueued.\n  *\n  * @param\tdev_id\t\tThe identifier of the device.\n  * @param\tqp_id\t\tThe index of the queue pair which packets are\n@@ -556,25 +558,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,\n  *\t\t\t\tmust be in the range [0, nb_queue_pairs - 1]\n  *\t\t\t\tpreviously supplied to\n  *\t\t\t\t *rte_cryptodev_configure*.\n- * @param\tpkts\t\tThe address of an array of *nb_pkts* pointers\n- *\t\t\t\tto *rte_mbuf* structures which contain the\n- *\t\t\t\toutput packets.\n- * @param\tnb_pkts\t\tThe number of packets to transmit.\n+ * @param\tops\t\tThe address of an array of *nb_ops* pointers\n+ *\t\t\t\tto *rte_crypto_op* structures which contain\n+ *\t\t\t\tthe crypto operations to be processed.\n+ * @param\tnb_ops\t\tThe number of operations to process.\n  *\n  * @return\n- * The number of packets actually enqueued on the crypto device. The return\n- * value can be less than the value of the *nb_pkts* parameter when the\n- * crypto devices queue is full or has been filled up.\n- * The number of packets is 0 if the device hasn't been started.\n+ * The number of operations actually enqueued on the crypto device. The return\n+ * value can be less than the value of the *nb_ops* parameter when the\n+ * crypto devices queue is full or if invalid parameters are specified in\n+ * a *rte_crypto_op*.\n  */\n static inline uint16_t\n rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,\n-\t\tstruct rte_mbuf **pkts, uint16_t nb_pkts)\n+\t\tstruct rte_crypto_op **ops, uint16_t nb_ops)\n {\n \tstruct rte_cryptodev *dev = &rte_cryptodevs[dev_id];\n \n \treturn (*dev->enqueue_burst)(\n-\t\t\tdev->data->queue_pairs[qp_id], pkts, nb_pkts);\n+\t\t\tdev->data->queue_pairs[qp_id], ops, nb_ops);\n }\n \n \n@@ -620,7 +622,6 @@ extern struct rte_cryptodev_sym_session *\n rte_cryptodev_sym_session_create(uint8_t dev_id,\n \t\tstruct rte_crypto_sym_xform *xform);\n \n-\n /**\n  * Free the memory associated with a previously allocated session.\n  *\ndiff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map\nindex ff8e93d..24e00bb 100644\n--- a/lib/librte_cryptodev/rte_cryptodev_version.map\n+++ b/lib/librte_cryptodev/rte_cryptodev_version.map\n@@ -27,6 +27,7 @@ DPDK_2.2 {\n \trte_cryptodev_queue_pair_setup;\n \trte_cryptodev_queue_pair_start;\n \trte_cryptodev_queue_pair_stop;\n+\trte_crypto_op_pool_create;\n \n \tlocal: *;\n };\n\\ No newline at end of file\ndiff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h\nindex c973e9b..400a849 100644\n--- a/lib/librte_mbuf/rte_mbuf.h\n+++ b/lib/librte_mbuf/rte_mbuf.h\n@@ -728,9 +728,6 @@ typedef uint8_t  MARKER8[0];  /**< generic marker with 1B alignment */\n typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes\n                                * with a single assignment */\n \n-/** Opaque rte_mbuf_offload  structure declarations */\n-struct rte_mbuf_offload;\n-\n /**\n  * The generic rte_mbuf, containing a packet mbuf.\n  */\n@@ -847,9 +844,6 @@ struct rte_mbuf {\n \n \t/** Timesync flags for use with IEEE1588. */\n \tuint16_t timesync;\n-\n-\t/* Chain of off-load operations to perform on mbuf */\n-\tstruct rte_mbuf_offload *offload_ops;\n } __rte_cache_aligned;\n \n static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);\ndiff --git a/lib/librte_mbuf_offload/Makefile b/lib/librte_mbuf_offload/Makefile\ndeleted file mode 100644\nindex acdb449..0000000\n--- a/lib/librte_mbuf_offload/Makefile\n+++ /dev/null\n@@ -1,52 +0,0 @@\n-#   BSD LICENSE\n-#\n-#   Copyright(c) 2015 Intel Corporation. All rights reserved.\n-#\n-#   Redistribution and use in source and binary forms, with or without\n-#   modification, are permitted provided that the following conditions\n-#   are met:\n-#\n-#     * Redistributions of source code must retain the above copyright\n-#       notice, this list of conditions and the following disclaimer.\n-#     * Redistributions in binary form must reproduce the above copyright\n-#       notice, this list of conditions and the following disclaimer in\n-#       the documentation and/or other materials provided with the\n-#       distribution.\n-#     * Neither the name of Intel Corporation nor the names of its\n-#       contributors may be used to endorse or promote products derived\n-#       from this software without specific prior written permission.\n-#\n-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n-#   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n-\n-include $(RTE_SDK)/mk/rte.vars.mk\n-\n-# library name\n-LIB = librte_mbuf_offload.a\n-\n-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3\n-\n-EXPORT_MAP := rte_mbuf_offload_version.map\n-\n-LIBABIVER := 1\n-\n-# all source are stored in SRCS-y\n-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c\n-\n-# install includes\n-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h\n-\n-# this lib needs eal\n-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf\n-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev\n-\n-include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.c b/lib/librte_mbuf_offload/rte_mbuf_offload.c\ndeleted file mode 100644\nindex 5c0c9dd..0000000\n--- a/lib/librte_mbuf_offload/rte_mbuf_offload.c\n+++ /dev/null\n@@ -1,100 +0,0 @@\n-/*-\n- *   BSD LICENSE\n- *\n- *   Copyright(c) 2015 Intel Corporation. All rights reserved.\n- *\n- *   Redistribution and use in source and binary forms, with or without\n- *   modification, are permitted provided that the following conditions\n- *   are met:\n- *\n- *     * Redistributions of source code must retain the above copyright\n- *       notice, this list of conditions and the following disclaimer.\n- *     * Redistributions in binary form must reproduce the above copyright\n- *       notice, this list of conditions and the following disclaimer in\n- *       the documentation and/or other materials provided with the\n- *       distribution.\n- *     * Neither the name of Intel Corporation nor the names of its\n- *       contributors may be used to endorse or promote products derived\n- *       from this software without specific prior written permission.\n- *\n- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n- */\n-\n-#include <string.h>\n-#include <rte_common.h>\n-\n-#include \"rte_mbuf_offload.h\"\n-\n-/** Initialize rte_mbuf_offload structure */\n-static void\n-rte_pktmbuf_offload_init(struct rte_mempool *mp,\n-\t\t__rte_unused void *opaque_arg,\n-\t\tvoid *_op_data,\n-\t\t__rte_unused unsigned i)\n-{\n-\tstruct rte_mbuf_offload *ol = _op_data;\n-\n-\tmemset(_op_data, 0, mp->elt_size);\n-\n-\tol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;\n-\tol->mp = mp;\n-}\n-\n-\n-struct rte_mempool *\n-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,\n-\t\tunsigned cache_size, uint16_t priv_size, int socket_id)\n-{\n-\tstruct rte_pktmbuf_offload_pool_private *priv;\n-\tunsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;\n-\n-\n-\t/* lookup mempool in case already allocated */\n-\tstruct rte_mempool *mp = rte_mempool_lookup(name);\n-\n-\tif (mp != NULL) {\n-\t\tpriv = (struct rte_pktmbuf_offload_pool_private *)\n-\t\t\t\trte_mempool_get_priv(mp);\n-\n-\t\tif (priv->offload_priv_size <  priv_size ||\n-\t\t\t\tmp->elt_size != elt_size ||\n-\t\t\t\tmp->cache_size < cache_size ||\n-\t\t\t\tmp->size < size) {\n-\t\t\tmp = NULL;\n-\t\t\treturn NULL;\n-\t\t}\n-\t\treturn mp;\n-\t}\n-\n-\tmp = rte_mempool_create(\n-\t\t\tname,\n-\t\t\tsize,\n-\t\t\telt_size,\n-\t\t\tcache_size,\n-\t\t\tsizeof(struct rte_pktmbuf_offload_pool_private),\n-\t\t\tNULL,\n-\t\t\tNULL,\n-\t\t\trte_pktmbuf_offload_init,\n-\t\t\tNULL,\n-\t\t\tsocket_id,\n-\t\t\t0);\n-\n-\tif (mp == NULL)\n-\t\treturn NULL;\n-\n-\tpriv = (struct rte_pktmbuf_offload_pool_private *)\n-\t\t\trte_mempool_get_priv(mp);\n-\n-\tpriv->offload_priv_size = priv_size;\n-\treturn mp;\n-}\ndiff --git a/lib/librte_mbuf_offload/rte_mbuf_offload.h b/lib/librte_mbuf_offload/rte_mbuf_offload.h\ndeleted file mode 100644\nindex 926ab64..0000000\n--- a/lib/librte_mbuf_offload/rte_mbuf_offload.h\n+++ /dev/null\n@@ -1,307 +0,0 @@\n-/*-\n- *   BSD LICENSE\n- *\n- *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.\n- *\n- *   Redistribution and use in source and binary forms, with or without\n- *   modification, are permitted provided that the following conditions\n- *   are met:\n- *\n- *     * Redistributions of source code must retain the above copyright\n- *       notice, this list of conditions and the following disclaimer.\n- *     * Redistributions in binary form must reproduce the above copyright\n- *       notice, this list of conditions and the following disclaimer in\n- *       the documentation and/or other materials provided with the\n- *       distribution.\n- *     * Neither the name of Intel Corporation nor the names of its\n- *       contributors may be used to endorse or promote products derived\n- *       from this software without specific prior written permission.\n- *\n- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n- *   \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n- */\n-\n-#ifndef _RTE_MBUF_OFFLOAD_H_\n-#define _RTE_MBUF_OFFLOAD_H_\n-\n-/**\n- * @file\n- * RTE mbuf offload\n- *\n- * The rte_mbuf_offload library provides the ability to specify a device generic\n- * off-load operation independent of the current Rx/Tx Ethernet offloads\n- * supported within the rte_mbuf structure, and add supports for multiple\n- * off-load operations and offload device types.\n- *\n- * The rte_mbuf_offload specifies the particular off-load operation type, such\n- * as a crypto operation, and provides a container for the operations\n- * parameter's inside the op union. These parameters are then used by the\n- * device which supports that operation to perform the specified offload.\n- *\n- * This library provides an API to create pre-allocated mempool of offload\n- * operations, with supporting allocate and free functions. It also provides\n- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a\n- * specified offload type from an mbuf offload chain.\n- *\n- * @warning\n- * @b EXPERIMENTAL: this API may change without prior notice\n- */\n-\n-#include <rte_mbuf.h>\n-#include <rte_crypto.h>\n-\n-\n-/** packet mbuf offload operation types */\n-enum rte_mbuf_ol_op_type {\n-\tRTE_PKTMBUF_OL_NOT_SPECIFIED = 0,\n-\t/**< Off-load not specified */\n-\tRTE_PKTMBUF_OL_CRYPTO_SYM\n-\t/**< Crypto offload operation */\n-};\n-\n-/**\n- * Generic packet mbuf offload\n- * This is used to specify a offload operation to be performed on a rte_mbuf.\n- * Multiple offload operations can be chained to the same mbuf, but only a\n- * single offload operation of a particular type can be in the chain\n- */\n-struct rte_mbuf_offload {\n-\tstruct rte_mbuf_offload *next;\t/**< next offload in chain */\n-\tstruct rte_mbuf *m;\t\t/**< mbuf offload is attached to */\n-\tstruct rte_mempool *mp;\t\t/**< mempool offload allocated from */\n-\n-\tenum rte_mbuf_ol_op_type type;\t/**< offload type */\n-\tunion {\n-\t\tstruct rte_crypto_sym_op crypto;\t/**< Crypto operation */\n-\t} op;\n-};\n-\n-/**< private data structure belonging to packet mbug offload mempool */\n-struct rte_pktmbuf_offload_pool_private {\n-\tuint16_t offload_priv_size;\n-\t/**< Size of private area in each mbuf_offload. */\n-};\n-\n-\n-/**\n- * Creates a mempool of rte_mbuf_offload objects\n- *\n- * EXPERIMENTAL: this API file may change without prior notice\n- *\n- * @param\tname\t\tmempool name\n- * @param\tsize\t\tnumber of objects in mempool\n- * @param\tcache_size\tcache size of objects for each core\n- * @param\tpriv_size\tsize of private data to be allocated with each\n- *\t\t\t\trte_mbuf_offload object\n- * @param\tsocket_id\tSocket on which to allocate mempool objects\n- *\n- * @return\n- * - On success returns a valid mempool of rte_mbuf_offload objects\n- * - On failure return NULL\n- */\n-extern struct rte_mempool *\n-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,\n-\t\tunsigned cache_size, uint16_t priv_size, int socket_id);\n-\n-\n-/**\n- * Returns private data size allocated with each rte_mbuf_offload object by\n- * the mempool\n- *\n- * @param\tmpool\trte_mbuf_offload mempool\n- *\n- * @return\tprivate data size\n- */\n-static inline uint16_t\n-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)\n-{\n-\tstruct rte_pktmbuf_offload_pool_private *priv =\n-\t\t\trte_mempool_get_priv(mpool);\n-\n-\treturn priv->offload_priv_size;\n-}\n-\n-/**\n- * Get specified off-load operation type from mbuf.\n- *\n- * @param\tm\t\tpacket mbuf.\n- * @param\ttype\t\toffload operation type requested.\n- *\n- * @return\n- * - On success retruns rte_mbuf_offload pointer\n- * - On failure returns NULL\n- *\n- */\n-static inline struct rte_mbuf_offload *\n-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)\n-{\n-\tstruct rte_mbuf_offload *ol;\n-\n-\tfor (ol = m->offload_ops; ol != NULL; ol = ol->next)\n-\t\tif (ol->type == type)\n-\t\t\treturn ol;\n-\n-\treturn ol;\n-}\n-\n-/**\n- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any\n- * one type in our chain of offloads.\n- *\n- * @param\tm\tpacket mbuf.\n- * @param\tol\trte_mbuf_offload strucutre to be attached\n- *\n- * @returns\n- * - On success returns the pointer to the offload we just added\n- * - On failure returns NULL\n- */\n-static inline struct rte_mbuf_offload *\n-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)\n-{\n-\tstruct rte_mbuf_offload **ol_last;\n-\n-\tfor (ol_last = &m->offload_ops;\tol_last[0] != NULL;\n-\t\t\tol_last = &ol_last[0]->next)\n-\t\tif (ol_last[0]->type == ol->type)\n-\t\t\treturn NULL;\n-\n-\tol_last[0] = ol;\n-\tol_last[0]->m = m;\n-\tol_last[0]->next = NULL;\n-\n-\treturn ol_last[0];\n-}\n-\n-\n-/** Rearms rte_mbuf_offload default parameters */\n-static inline void\n-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,\n-\t\tenum rte_mbuf_ol_op_type type)\n-{\n-\tol->m = NULL;\n-\tol->type = type;\n-\n-\tswitch (type) {\n-\tcase RTE_PKTMBUF_OL_CRYPTO_SYM:\n-\t\t__rte_crypto_sym_op_reset(&ol->op.crypto); break;\n-\tdefault:\n-\t\tbreak;\n-\t}\n-}\n-\n-/** Allocate rte_mbuf_offload from mempool */\n-static inline struct rte_mbuf_offload *\n-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)\n-{\n-\tvoid *buf = NULL;\n-\n-\tif (rte_mempool_get(mp, &buf) < 0)\n-\t\treturn NULL;\n-\n-\treturn (struct rte_mbuf_offload *)buf;\n-}\n-\n-/**\n- * Allocate a rte_mbuf_offload with a specified operation type from\n- * rte_mbuf_offload mempool\n- *\n- * @param\tmpool\t\trte_mbuf_offload mempool\n- * @param\ttype\t\toffload operation type\n- *\n- * @returns\n- * - On success returns a valid rte_mbuf_offload structure\n- * - On failure returns NULL\n- */\n-static inline struct rte_mbuf_offload *\n-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,\n-\t\tenum rte_mbuf_ol_op_type type)\n-{\n-\tstruct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);\n-\n-\tif (ol != NULL)\n-\t\t__rte_pktmbuf_offload_reset(ol, type);\n-\n-\treturn ol;\n-}\n-\n-/**\n- * free rte_mbuf_offload structure\n- */\n-static inline void\n-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)\n-{\n-\tif (ol != NULL && ol->mp != NULL)\n-\t\trte_mempool_put(ol->mp, ol);\n-}\n-\n-/**\n- * Checks if the private data of a rte_mbuf_offload has enough capacity for\n- * requested size\n- *\n- * @returns\n- * - if sufficient space available returns pointer to start of private data\n- * - if insufficient space returns NULL\n- */\n-static inline void *\n-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,\n-\t\tuint16_t size)\n-{\n-\tuint16_t priv_size;\n-\n-\tif (likely(ol->mp != NULL)) {\n-\t\tpriv_size = __rte_pktmbuf_offload_priv_size(ol->mp);\n-\n-\t\tif (likely(priv_size >= size))\n-\t\t\treturn (void *)(ol + 1);\n-\t}\n-\treturn NULL;\n-}\n-\n-/**\n- * Allocate space for crypto xforms in the private data space of the\n- * rte_mbuf_offload. This also defaults the crypto xform type and configures\n- * the chaining of the xform in the crypto operation\n- *\n- * @return\n- * - On success returns pointer to first crypto xform in crypto operations chain\n- * - On failure returns NULL\n- */\n-static inline struct rte_crypto_sym_xform *\n-rte_pktmbuf_offload_alloc_crypto_sym_xforms(struct rte_mbuf_offload *ol,\n-\t\tunsigned nb_xforms)\n-{\n-\tstruct rte_crypto_sym_xform *xform;\n-\tvoid *priv_data;\n-\tuint16_t size;\n-\n-\tsize = sizeof(struct rte_crypto_sym_xform) * nb_xforms;\n-\tpriv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);\n-\n-\tif (priv_data == NULL)\n-\t\treturn NULL;\n-\n-\tol->op.crypto.xform = xform = (struct rte_crypto_sym_xform *)priv_data;\n-\n-\tdo {\n-\t\txform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;\n-\t\txform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;\n-\t} while (xform);\n-\n-\treturn ol->op.crypto.xform;\n-}\n-\n-\n-#ifdef __cplusplus\n-}\n-#endif\n-\n-#endif /* _RTE_MBUF_OFFLOAD_H_ */\ndiff --git a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map b/lib/librte_mbuf_offload/rte_mbuf_offload_version.map\ndeleted file mode 100644\nindex 3d3b06a..0000000\n--- a/lib/librte_mbuf_offload/rte_mbuf_offload_version.map\n+++ /dev/null\n@@ -1,7 +0,0 @@\n-DPDK_2.2 {\n-\tglobal:\n-\n-\trte_pktmbuf_offload_pool_create;\n-\n-\tlocal: *;\n-};\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "2/2"
    ]
}