[v5] crypto/qat: default to IPsec MB for computations

Message ID 20230606102820.356924-1-brian.dooley@intel.com (mailing list archive)
State Superseded, archived
Delegated to: akhil goyal
Headers
Series [v5] crypto/qat: default to IPsec MB for computations |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/intel-Functional success Functional PASS
ci/iol-testing warning apply patch failure

Commit Message

Brian Dooley June 6, 2023, 10:28 a.m. UTC
  Pre and post computations currently use the OpenSSL library by default.
This patch changes the default option to Intel IPsec MB library version
1.4 for the required computations. If this version of IPsec is not met
it will fallback to use OpenSSL.

Added version checks for libipsecmb and libcrypto into meson build.
Added directives for detecting IPsec MB or OpenSSL.

Signed-off-by: Brian Dooley <brian.dooley@intel.com>
Acked-by: Kai Ji <kai.ji@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
v5:
Remove ipsec mb define and add previous ack
v4:
Rebase and fix conflicts
v3:
Fix checkpatch warnings by replacing return with goto
v2:
OpenSSL code reintroduced as a fallback feature if Intel IPsec MB 1.4
not available
---
 doc/guides/cryptodevs/qat.rst                |  14 +-
 drivers/common/qat/meson.build               |  30 +-
 drivers/common/qat/qat_device.c              |   1 -
 drivers/common/qat/qat_device.h              |   3 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |  10 +
 drivers/crypto/qat/qat_sym.c                 |   9 +-
 drivers/crypto/qat/qat_sym.h                 |  27 +
 drivers/crypto/qat/qat_sym_session.c         | 875 ++++++++++---------
 drivers/crypto/qat/qat_sym_session.h         |  14 +
 9 files changed, 542 insertions(+), 441 deletions(-)
  

Comments

Power, Ciara June 8, 2023, 12:25 p.m. UTC | #1
> -----Original Message-----
> From: Brian Dooley <brian.dooley@intel.com>
> Sent: Tuesday 6 June 2023 11:28
> To: Ji, Kai <kai.ji@intel.com>
> Cc: dev@dpdk.org; gakhil@marvell.com; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Dooley, Brian <brian.dooley@intel.com>
> Subject: [PATCH v5] crypto/qat: default to IPsec MB for computations
> 
> Pre and post computations currently use the OpenSSL library by default.
> This patch changes the default option to Intel IPsec MB library version
> 1.4 for the required computations. If this version of IPsec is not met
> it will fallback to use OpenSSL.
> 
> Added version checks for libipsecmb and libcrypto into meson build.
> Added directives for detecting IPsec MB or OpenSSL.
> 
> Signed-off-by: Brian Dooley <brian.dooley@intel.com>
> Acked-by: Kai Ji <kai.ji@intel.com>
> Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> ---
> v5:
> Remove ipsec mb define and add previous ack
> v4:
> Rebase and fix conflicts
> v3:
> Fix checkpatch warnings by replacing return with goto
> v2:
> OpenSSL code reintroduced as a fallback feature if Intel IPsec MB 1.4
> not available
<snip>

Acked-by: Ciara Power <ciara.power@intel.com>
  
Akhil Goyal June 14, 2023, 6:23 p.m. UTC | #2
> -IMB_required_ver = '1.2.0'
> +IMB_required_ver = '1.4.0'
>  IMB_header = '#include<intel-ipsec-mb.h>'
>  if arch_subdir == 'arm'
>      IMB_header = '#include<ipsec-mb.h>'
>  endif

I believe there are compilation issues with arm repo for 1.4 version.
It is better to fix that first before making this to default.

>  libipsecmb = cc.find_library('IPSec_MB', required: false)
> -libcrypto_3 = dependency('libcrypto', required: false,
> -    method: 'pkg-config', version : '>=3.0.0')
> -if libipsecmb.found() and libcrypto_3.found()
> +if libipsecmb.found()
>      # version comes with quotes, so we split based on " and take the middle
>      imb_ver = cc.get_define('IMB_VERSION_STR',
>          prefix : IMB_header).split('"')[1]
> 
>      if (imb_ver.version_compare('>=' + IMB_required_ver))
>          ext_deps += libipsecmb
> -        dpdk_conf.set('RTE_QAT_LIBIPSECMB', true)
> +    elif libcrypto.found()
> +        ext_deps += libcrypto
> +        dpdk_conf.set('RTE_QAT_OPENSSL', true)
> +    else
> +        qat_crypto = false
> +        dpdk_drvs_disabled += qat_crypto_path
> +        set_variable(qat_crypto_path.underscorify() + '_disable_reason',
> +            'missing dependency, libipsecmb or libcrypto')
>      endif
> +elif libcrypto.found()
> +    ext_deps += libcrypto
> +    dpdk_conf.set('RTE_QAT_OPENSSL', true)
> +else
> +    qat_crypto = false
> +    dpdk_drvs_disabled += qat_crypto_path
> +    set_variable(qat_crypto_path.underscorify() + '_disable_reason',
> +        'missing dependency, libipsecmb or libcrypto')
>  endif
> 
>  # The driver should not build if both compression and crypto are disabled
> @@ -103,6 +110,5 @@ if qat_crypto
>          sources += files(join_paths(qat_crypto_relpath, f))
>      endforeach
>      deps += ['security']
> -    ext_deps += libcrypto
>      cflags += ['-DBUILD_QAT_SYM', '-DBUILD_QAT_ASYM']
>  endif
> diff --git a/drivers/common/qat/qat_device.c
> b/drivers/common/qat/qat_device.c
> index 0479175b65..20e56b5cf2 100644
> --- a/drivers/common/qat/qat_device.c
> +++ b/drivers/common/qat/qat_device.c
> @@ -371,7 +371,6 @@ static int qat_pci_probe(struct rte_pci_driver *pci_drv
> __rte_unused,
>  	struct qat_pci_device *qat_pci_dev;
>  	struct qat_dev_hw_spec_funcs *ops_hw;
>  	struct qat_dev_cmd_param qat_dev_cmd_param[] = {
> -			{ QAT_IPSEC_MB_LIB, 0 },
>  			{ SYM_ENQ_THRESHOLD_NAME, 0 },
>  			{ ASYM_ENQ_THRESHOLD_NAME, 0 },
>  			{ COMP_ENQ_THRESHOLD_NAME, 0 },
> diff --git a/drivers/common/qat/qat_device.h
> b/drivers/common/qat/qat_device.h
> index 4188474dde..0cfe8654b1 100644
> --- a/drivers/common/qat/qat_device.h
> +++ b/drivers/common/qat/qat_device.h
> @@ -17,13 +17,12 @@
> 
>  #define QAT_DEV_NAME_MAX_LEN	64
> 
> -#define QAT_IPSEC_MB_LIB "qat_ipsec_mb_lib"
>  #define SYM_ENQ_THRESHOLD_NAME "qat_sym_enq_threshold"
>  #define ASYM_ENQ_THRESHOLD_NAME "qat_asym_enq_threshold"
>  #define COMP_ENQ_THRESHOLD_NAME "qat_comp_enq_threshold"
>  #define SYM_CIPHER_CRC_ENABLE_NAME "qat_sym_cipher_crc_enable"
>  #define QAT_CMD_SLICE_MAP "qat_cmd_slice_disable"
> -#define QAT_CMD_SLICE_MAP_POS	5
> +#define QAT_CMD_SLICE_MAP_POS	4
>  #define MAX_QP_THRESHOLD_SIZE	32
> 
>  /**
> diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
> b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
> index e8e92e22d4..7776763356 100644
> --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
> +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
> @@ -82,8 +82,13 @@ qat_bpicipher_preprocess(struct qat_sym_session *ctx,
>  			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-
> process:",
>  			dst, last_block_len);
>  #endif
> +#ifdef RTE_QAT_OPENSSL
>  		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
>  				last_block_len, ctx->bpi_ctx);
> +#else
> +		bpi_cipher_ipsec(last_block, dst, iv, last_block_len, ctx-
> >expkey,
> +			ctx->mb_mgr, ctx->docsis_key_len);
> +#endif
>  #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
>  		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
>  			last_block, last_block_len);
> @@ -231,7 +236,12 @@ qat_sym_convert_op_to_vec_cipher(struct
> rte_crypto_op *op,
>  		cipher_ofs = op->sym->cipher.data.offset >> 3;
>  		break;
>  	case 0:
> +
> +#ifdef RTE_QAT_OPENSSL
>  		if (ctx->bpi_ctx) {
> +#else
> +		if (ctx->mb_mgr) {
> +#endif
>  			/* DOCSIS - only send complete blocks to device.
>  			 * Process any partial block using CFB mode.
>  			 * Even if 0 complete blocks, still send this to device
> diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
> index 345c845325..b93908f007 100644
> --- a/drivers/crypto/qat/qat_sym.c
> +++ b/drivers/crypto/qat/qat_sym.c
> @@ -1,5 +1,5 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2015-2022 Intel Corporation
> + * Copyright(c) 2015-2023 Intel Corporation
>   */
> 
>  #include <openssl/evp.h>
> @@ -16,7 +16,6 @@
>  #include "qat_qp.h"
> 
>  uint8_t qat_sym_driver_id;
> -int qat_ipsec_mb_lib;
> 
>  struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
> 
> @@ -110,7 +109,11 @@ qat_sym_build_request(void *in_op, uint8_t
> *out_msg,
>  			struct rte_cryptodev *cdev;
>  			struct qat_cryptodev_private *internals;
> 
> +#ifdef RTE_QAT_OPENSSL
>  			if (unlikely(ctx->bpi_ctx == NULL)) {
> +#else
> +			if (unlikely(ctx->mb_mgr == NULL)) {
> +#endif
>  				QAT_DP_LOG(ERR, "QAT PMD only supports
> security"
>  						" operation requests for"
>  						" DOCSIS, op (%p) is not for"
> @@ -283,8 +286,6 @@ qat_sym_dev_create(struct qat_pci_device
> *qat_pci_dev,
>  				SYM_CIPHER_CRC_ENABLE_NAME))
>  			internals->cipher_crc_offload_enable =
>  					qat_dev_cmd_param[i].val;
> -		if (!strcmp(qat_dev_cmd_param[i].name, QAT_IPSEC_MB_LIB))
> -			qat_ipsec_mb_lib = qat_dev_cmd_param[i].val;
>  		if (!strcmp(qat_dev_cmd_param[i].name,
> QAT_CMD_SLICE_MAP))
>  			slice_map = qat_dev_cmd_param[i].val;
>  		i++;
> diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
> index 3d841d0eba..341016dcf3 100644
> --- a/drivers/crypto/qat/qat_sym.h
> +++ b/drivers/crypto/qat/qat_sym.h
> @@ -164,6 +164,20 @@ bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
>  	return -EINVAL;
>  }
> 
> +#ifndef RTE_QAT_OPENSSL
> +static __rte_always_inline void
> +bpi_cipher_ipsec(uint8_t *src, uint8_t *dst, uint8_t *iv, int srclen,
> +		uint64_t *expkey, IMB_MGR *m, uint8_t docsis_key_len)
> +{
> +	if (docsis_key_len == ICP_QAT_HW_AES_128_KEY_SZ)
> +		IMB_AES128_CFB_ONE(m, dst, src, (uint64_t *)iv, expkey,
> srclen);
> +	else if (docsis_key_len == ICP_QAT_HW_AES_256_KEY_SZ)
> +		IMB_AES256_CFB_ONE(m, dst, src, (uint64_t *)iv, expkey,
> srclen);
> +	else if (docsis_key_len == ICP_QAT_HW_DES_KEY_SZ)
> +		des_cfb_one(dst, src, (uint64_t *)iv, expkey, srclen);
> +}
> +#endif
> +
>  static inline uint32_t
>  qat_bpicipher_postprocess(struct qat_sym_session *ctx,
>  				struct rte_crypto_op *op)
> @@ -208,8 +222,13 @@ qat_bpicipher_postprocess(struct qat_sym_session
> *ctx,
>  				"BPI: dst before post-process:",
>  				dst, last_block_len);
>  #endif
> +#ifdef RTE_QAT_OPENSSL
>  		bpi_cipher_encrypt(last_block, dst, iv, block_len,
>  				last_block_len, ctx->bpi_ctx);
> +#else
> +		bpi_cipher_ipsec(last_block, dst, iv, last_block_len, ctx-
> >expkey,
> +			ctx->mb_mgr, ctx->docsis_key_len);
> +#endif
>  #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
>  		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
>  				last_block, last_block_len);
> @@ -280,7 +299,11 @@ qat_sym_preprocess_requests(void **ops, uint16_t
> nb_ops)
>  		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
>  			ctx = SECURITY_GET_SESS_PRIV(op->sym->session);
> 
> +#ifdef RTE_QAT_OPENSSL
>  			if (ctx == NULL || ctx->bpi_ctx == NULL)
> +#else
> +			if (ctx == NULL || ctx->mb_mgr == NULL)
> +#endif
>  				continue;
> 
>  			if (ctx->qat_cmd !=
> ICP_QAT_FW_LA_CMD_CIPHER_CRC)
> @@ -329,7 +352,11 @@ qat_sym_process_response(void **op, uint8_t *resp,
> void *op_cookie,
>  	} else {
>  		rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> 
> +#ifdef RTE_QAT_OPENSSL
>  		if (sess->bpi_ctx) {
> +#else
> +		if (sess->mb_mgr) {
> +#endif
>  			qat_bpicipher_postprocess(sess, rx_op);
>  #ifdef RTE_LIB_SECURITY
>  			if (is_docsis_sec && sess->qat_cmd !=
> diff --git a/drivers/crypto/qat/qat_sym_session.c
> b/drivers/crypto/qat/qat_sym_session.c
> index 9babf13b66..da9a50dd49 100644
> --- a/drivers/crypto/qat/qat_sym_session.c
> +++ b/drivers/crypto/qat/qat_sym_session.c
> @@ -9,8 +9,7 @@
>  #include <openssl/md5.h>	/* Needed to calculate pre-compute values */
>  #include <openssl/evp.h>	/* Needed for bpi runt block processing */
> 
> -#ifdef RTE_QAT_LIBIPSECMB
> -#define NO_COMPAT_IMB_API_053
> +#ifndef RTE_QAT_OPENSSL
>  #if defined(RTE_ARCH_ARM)
>  #include <ipsec-mb.h>
>  #else
> @@ -34,6 +33,7 @@
>  #include "qat_sym_session.h"
>  #include "qat_sym.h"
> 
> +#ifdef RTE_QAT_OPENSSL
>  #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
>  #include <openssl/provider.h>
> 
> @@ -66,8 +66,7 @@ static void ossl_legacy_provider_unload(void)
>  	OSSL_PROVIDER_unload(default_lib);
>  }
>  #endif
> -
> -extern int qat_ipsec_mb_lib;
> +#endif
> 
>  #define ETH_CRC32_POLYNOMIAL    0x04c11db7
>  #define ETH_CRC32_INIT_VAL      0xffffffff
> @@ -146,6 +145,7 @@ qat_sym_session_finalize(struct qat_sym_session
> *session)
>  	qat_sym_session_init_common_hdr(session);
>  }
> 
> +#ifdef RTE_QAT_OPENSSL
>  /** Frees a context previously created
>   *  Depends on openssl libcrypto
>   */
> @@ -196,6 +196,51 @@ bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm
> cryptodev_algo,
>  	}
>  	return ret;
>  }
> +#endif
> +
> +#ifndef RTE_QAT_OPENSSL
> +/** Creates a context in either AES or DES in ECB mode
> + */
> +static int
> +ipsec_mb_ctx_init(const uint8_t *key, uint16_t key_length,
> +		enum rte_crypto_cipher_algorithm cryptodev_algo,
> +		uint64_t *expkey, uint32_t *dust, IMB_MGR **m)
> +{
> +	int ret;
> +
> +	*m = alloc_mb_mgr(0);
> +	if (*m == NULL)
> +		return -ENOMEM;
> +
> +	init_mb_mgr_auto(*m, NULL);
> +
> +	if (cryptodev_algo == RTE_CRYPTO_CIPHER_AES_DOCSISBPI) {
> +		if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
> +			IMB_AES_KEYEXP_128(*m, key, expkey, dust);
> +		else if (key_length == ICP_QAT_HW_AES_256_KEY_SZ)
> +			IMB_AES_KEYEXP_256(*m, key, expkey, dust);
> +		else {
> +			ret = -EFAULT;
> +			goto error_out;
> +		}
> +	} else if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) {
> +		if (key_length == ICP_QAT_HW_DES_KEY_SZ)
> +			IMB_DES_KEYSCHED(*m, (uint64_t *)expkey, key);
> +		else {
> +			ret = -EFAULT;
> +			goto error_out;
> +		}
> +	}
> +	return 0;
> +
> +error_out:
> +	if (*m) {
> +		free_mb_mgr(*m);
> +		*m = NULL;
> +	}
> +	return ret;
> +}
> +#endif
> 
>  static int
>  qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
> @@ -245,8 +290,13 @@ qat_sym_session_clear(struct rte_cryptodev *dev
> __rte_unused,
>  {
>  	struct qat_sym_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
> 
> +#ifdef RTE_QAT_OPENSSL
>  	if (s->bpi_ctx)
>  		bpi_cipher_ctx_free(s->bpi_ctx);
> +#else
> +	if (s->mb_mgr)
> +		free_mb_mgr(s->mb_mgr);
> +#endif
>  }
> 
>  static int
> @@ -409,12 +459,23 @@ qat_sym_session_configure_cipher(struct
> rte_cryptodev *dev,
>  		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
>  		break;
>  	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
> +#ifdef RTE_QAT_OPENSSL
>  		ret = bpi_cipher_ctx_init(
>  					cipher_xform->algo,
>  					cipher_xform->op,
>  					cipher_xform->key.data,
>  					cipher_xform->key.length,
>  					&session->bpi_ctx);
> +#else
> +		session->docsis_key_len = cipher_xform->key.length;
> +		ret = ipsec_mb_ctx_init(
> +					cipher_xform->key.data,
> +					cipher_xform->key.length,
> +					cipher_xform->algo,
> +					session->expkey,
> +					session->dust,
> +					&session->mb_mgr);
> +#endif
>  		if (ret != 0) {
>  			QAT_LOG(ERR, "failed to create DES BPI ctx");
>  			goto error_out;
> @@ -428,12 +489,23 @@ qat_sym_session_configure_cipher(struct
> rte_cryptodev *dev,
>  		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
>  		break;
>  	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
> +#ifdef RTE_QAT_OPENSSL
>  		ret = bpi_cipher_ctx_init(
>  					cipher_xform->algo,
>  					cipher_xform->op,
>  					cipher_xform->key.data,
>  					cipher_xform->key.length,
>  					&session->bpi_ctx);
> +#else
> +		session->docsis_key_len = cipher_xform->key.length;
> +		ret = ipsec_mb_ctx_init(
> +					cipher_xform->key.data,
> +					cipher_xform->key.length,
> +					cipher_xform->algo,
> +					session->expkey,
> +					session->dust,
> +					&session->mb_mgr);
> +#endif
>  		if (ret != 0) {
>  			QAT_LOG(ERR, "failed to create AES BPI ctx");
>  			goto error_out;
> @@ -519,10 +591,18 @@ qat_sym_session_configure_cipher(struct
> rte_cryptodev *dev,
>  	return 0;
> 
>  error_out:
> +#ifdef RTE_QAT_OPENSSL
>  	if (session->bpi_ctx) {
>  		bpi_cipher_ctx_free(session->bpi_ctx);
>  		session->bpi_ctx = NULL;
>  	}
> +#else
> +	if (session->mb_mgr) {
> +		free_mb_mgr(session->mb_mgr);
> +		session->mb_mgr = NULL;
> +	}
> +
> +#endif
>  	return ret;
>  }
> 
> @@ -533,8 +613,10 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
>  {
>  	int ret;
> 
> +#ifdef RTE_QAT_OPENSSL
>  #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
>  	ossl_legacy_provider_load();
> +#endif
>  #endif
>  	ret = qat_sym_session_set_parameters(dev, xform,
>  			CRYPTODEV_GET_SYM_SESS_PRIV(sess),
> @@ -546,8 +628,10 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
>  		return ret;
>  	}
> 
> +#ifdef RTE_QAT_OPENSSL
>  # if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
>  	ossl_legacy_provider_unload();
> +# endif
>  # endif
>  	return 0;
>  }
> @@ -1209,57 +1293,91 @@ static int qat_hash_get_block_size(enum
> icp_qat_hw_auth_algo qat_hash_alg)
>  #define HMAC_OPAD_VALUE	0x5c
>  #define HASH_XCBC_PRECOMP_KEY_NUM 3
> 
> -static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
> +#ifdef RTE_QAT_OPENSSL
> +static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
> +{
> +	SHA_CTX ctx;
> 
> -#ifdef RTE_QAT_LIBIPSECMB
> -static int aes_ipsecmb_job(uint8_t *in, uint8_t *out, IMB_MGR *m,
> -		const uint8_t *key, uint16_t auth_keylen)
> +	if (!SHA1_Init(&ctx))
> +		return -EFAULT;
> +	SHA1_Transform(&ctx, data_in);
> +	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
> +	return 0;
> +}
> +
> +static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
>  {
> -	int err;
> -	struct IMB_JOB *job;
> -	DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
> -	DECLARE_ALIGNED(uint32_t dust[4*15], 16);
> +	SHA256_CTX ctx;
> 
> -	if (auth_keylen == ICP_QAT_HW_AES_128_KEY_SZ)
> -		IMB_AES_KEYEXP_128(m, key, expkey, dust);
> -	else if (auth_keylen == ICP_QAT_HW_AES_192_KEY_SZ)
> -		IMB_AES_KEYEXP_192(m, key, expkey, dust);
> -	else if (auth_keylen == ICP_QAT_HW_AES_256_KEY_SZ)
> -		IMB_AES_KEYEXP_256(m, key, expkey, dust);
> -	else
> +	if (!SHA224_Init(&ctx))
>  		return -EFAULT;
> +	SHA256_Transform(&ctx, data_in);
> +	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
> +	return 0;
> +}
> 
> -	job = IMB_GET_NEXT_JOB(m);
> +static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
> +{
> +	SHA256_CTX ctx;
> 
> -	job->src = in;
> -	job->dst = out;
> -	job->enc_keys = expkey;
> -	job->key_len_in_bytes = auth_keylen;
> -	job->msg_len_to_cipher_in_bytes = 16;
> -	job->iv_len_in_bytes = 0;
> -	job->cipher_direction = IMB_DIR_ENCRYPT;
> -	job->cipher_mode = IMB_CIPHER_ECB;
> -	job->hash_alg = IMB_AUTH_NULL;
> +	if (!SHA256_Init(&ctx))
> +		return -EFAULT;
> +	SHA256_Transform(&ctx, data_in);
> +	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
> +	return 0;
> +}
> 
> -	while (IMB_FLUSH_JOB(m) != NULL)
> -		;
> +static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
> +{
> +	SHA512_CTX ctx;
> 
> -	job = IMB_SUBMIT_JOB(m);
> -	if (job) {
> -		if (job->status == IMB_STATUS_COMPLETED)
> -			return 0;
> -	}
> +	if (!SHA384_Init(&ctx))
> +		return -EFAULT;
> +	SHA512_Transform(&ctx, data_in);
> +	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
> +	return 0;
> +}
> 
> -	err = imb_get_errno(m);
> -	if (err)
> -		QAT_LOG(ERR, "Error: %s!\n", imb_get_strerror(err));
> +static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
> +{
> +	SHA512_CTX ctx;
> 
> -	return -EFAULT;
> +	if (!SHA512_Init(&ctx))
> +		return -EFAULT;
> +	SHA512_Transform(&ctx, data_in);
> +	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
> +	return 0;
> +}
> +
> +static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
> +{
> +	MD5_CTX ctx;
> +
> +	if (!MD5_Init(&ctx))
> +		return -EFAULT;
> +	MD5_Transform(&ctx, data_in);
> +	rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
> +
> +	return 0;
> +}
> +
> +static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
> +{
> +	int i;
> +
> +	derived[0] = base[0] << 1;
> +	for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
> +		derived[i] = base[i] << 1;
> +		derived[i - 1] |= base[i] >> 7;
> +	}
> +
> +	if (base[0] & 0x80)
> +		derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^=
> QAT_AES_CMAC_CONST_RB;
>  }
> 
>  static int
> -partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
> -		uint8_t *data_in, uint8_t *data_out, IMB_MGR *m)
> +partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
> +		uint8_t *data_in, uint8_t *data_out)
>  {
>  	int digest_size;
>  	uint8_t digest[qat_hash_get_digest_size(
> @@ -1280,37 +1398,43 @@ partial_hash_compute_ipsec_mb(enum
> icp_qat_hw_auth_algo hash_alg,
> 
>  	switch (hash_alg) {
>  	case ICP_QAT_HW_AUTH_ALGO_SHA1:
> -		IMB_SHA1_ONE_BLOCK(m, data_in, digest);
> +		if (partial_hash_sha1(data_in, digest))
> +			return -EFAULT;
>  		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
>  			*hash_state_out_be32 =
>  				rte_bswap32(*(((uint32_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_SHA224:
> -		IMB_SHA224_ONE_BLOCK(m, data_in, digest);
> +		if (partial_hash_sha224(data_in, digest))
> +			return -EFAULT;
>  		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
>  			*hash_state_out_be32 =
>  				rte_bswap32(*(((uint32_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_SHA256:
> -		IMB_SHA256_ONE_BLOCK(m, data_in, digest);
> +		if (partial_hash_sha256(data_in, digest))
> +			return -EFAULT;
>  		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
>  			*hash_state_out_be32 =
>  				rte_bswap32(*(((uint32_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_SHA384:
> -		IMB_SHA384_ONE_BLOCK(m, data_in, digest);
> +		if (partial_hash_sha384(data_in, digest))
> +			return -EFAULT;
>  		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
>  			*hash_state_out_be64 =
>  				rte_bswap64(*(((uint64_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_SHA512:
> -		IMB_SHA512_ONE_BLOCK(m, data_in, digest);
> +		if (partial_hash_sha512(data_in, digest))
> +			return -EFAULT;
>  		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
>  			*hash_state_out_be64 =
>  				rte_bswap64(*(((uint64_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_MD5:
> -		IMB_MD5_ONE_BLOCK(m, data_in, data_out);
> +		if (partial_hash_md5(data_in, data_out))
> +			return -EFAULT;
>  		break;
>  	default:
>  		QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
> @@ -1320,108 +1444,150 @@ partial_hash_compute_ipsec_mb(enum
> icp_qat_hw_auth_algo hash_alg,
>  	return 0;
>  }
> 
> -static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo
> hash_alg,
> +static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
> +
> +static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
>  				const uint8_t *auth_key,
>  				uint16_t auth_keylen,
>  				uint8_t *p_state_buf,
>  				uint16_t *p_state_len,
>  				uint8_t aes_cmac)
>  {
> -	int block_size = 0;
> +	int block_size;
>  	uint8_t
> ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
>  	uint8_t
> opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
> -	int i, ret = 0;
> -	uint8_t in[ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ];
> -
> -	IMB_MGR *m;
> -	m = alloc_mb_mgr(0);
> -	if (m == NULL)
> -		return -ENOMEM;
> +	int i;
> 
> -	init_mb_mgr_auto(m, NULL);
> -	memset(in, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
>  	if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
> 
>  		/* CMAC */
>  		if (aes_cmac) {
> +			AES_KEY enc_key;
> +			uint8_t *in = NULL;
> +			uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
>  			uint8_t *k1, *k2;
> +
>  			auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
> -			rte_memcpy(p_state_buf, auth_key, auth_keylen);
> 
> -			DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
> -			DECLARE_ALIGNED(uint32_t dust[4*15], 16);
> -			IMB_AES_KEYEXP_128(m, p_state_buf, expkey, dust);
> -			k1 = p_state_buf +
> ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
> -			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
> +			in = rte_zmalloc("AES CMAC K1",
> +					 ICP_QAT_HW_AES_128_KEY_SZ, 16);
> 
> -			IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1, k2);
> -			*p_state_len =
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> -			goto out;
> -		}
> +			if (in == NULL) {
> +				QAT_LOG(ERR, "Failed to alloc memory");
> +				return -ENOMEM;
> +			}
> 
> -		static uint8_t qat_aes_xcbc_key_seed[
> -				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
> -			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> -			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> -			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> -			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> -			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> -			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> -		};
> +			rte_memcpy(in, AES_CMAC_SEED,
> +				   ICP_QAT_HW_AES_128_KEY_SZ);
> +			rte_memcpy(p_state_buf, auth_key, auth_keylen);
> 
> -		uint8_t *input = in;
> -		uint8_t *out = p_state_buf;
> -		rte_memcpy(input, qat_aes_xcbc_key_seed,
> -				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> -		for (i = 0; i < HASH_XCBC_PRECOMP_KEY_NUM; i++) {
> -			if (aes_ipsecmb_job(input, out, m, auth_key,
> auth_keylen)) {
> -				memset(input -
> -				   (i * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
> -				  0,
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> -				ret = -EFAULT;
> -				goto out;
> +			if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
> +				&enc_key) != 0) {
> +				rte_free(in);
> +				return -EFAULT;
>  			}
> 
> -			input += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> -			out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> -		}
> -		*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> -		goto out;
> +			AES_encrypt(in, k0, &enc_key);
> 
> -	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
> -		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
> -		uint8_t *out = p_state_buf;
> +			k1 = p_state_buf +
> ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
> +			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
> 
> -		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
> -				ICP_QAT_HW_GALOIS_LEN_A_SZ +
> -				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
> -		if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen)) {
> -			ret = -EFAULT;
> -			goto out;
> -		}
> +			aes_cmac_key_derive(k0, k1);
> +			aes_cmac_key_derive(k1, k2);
> 
> -		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
> +			memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
> +			*p_state_len =
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> +			rte_free(in);
> +			goto out;
> +		} else {
> +			static uint8_t qat_aes_xcbc_key_seed[
> +
> 	ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
> +				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> +				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> +				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> +				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> +				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> +				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> +			};
> +
> +			uint8_t *in = NULL;
> +			uint8_t *out = p_state_buf;
> +			int x;
> +			AES_KEY enc_key;
> +
> +			in = rte_zmalloc("working mem for key",
> +
> 	ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
> +			if (in == NULL) {
> +				QAT_LOG(ERR, "Failed to alloc memory");
> +				return -ENOMEM;
> +			}
> +
> +			rte_memcpy(in, qat_aes_xcbc_key_seed,
> +
> 	ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> +			for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
> +				if (AES_set_encrypt_key(auth_key,
> +							auth_keylen << 3,
> +							&enc_key) != 0) {
> +					rte_free(in -
> +					  (x *
> ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
> +					memset(out -
> +					   (x *
> ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
> +					  0,
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> +					return -EFAULT;
> +				}
> +				AES_encrypt(in, out, &enc_key);
> +				in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> +				out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> +			}
> +			*p_state_len =
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> +			rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
> +			goto out;
> +		}
> +
> +	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
> +		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
> +		uint8_t *in = NULL;
> +		uint8_t *out = p_state_buf;
> +		AES_KEY enc_key;
> +
> +		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
> +				ICP_QAT_HW_GALOIS_LEN_A_SZ +
> +				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
> +		in = rte_zmalloc("working mem for key",
> +				ICP_QAT_HW_GALOIS_H_SZ, 16);
> +		if (in == NULL) {
> +			QAT_LOG(ERR, "Failed to alloc memory");
> +			return -ENOMEM;
> +		}
> +
> +		memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
> +		if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
> +			&enc_key) != 0) {
> +			return -EFAULT;
> +		}
> +		AES_encrypt(in, out, &enc_key);
> +		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
>  				ICP_QAT_HW_GALOIS_LEN_A_SZ +
>  				ICP_QAT_HW_GALOIS_E_CTR0_SZ;
> -		goto out;
> +		rte_free(in);
> +		return 0;
>  	}
> 
>  	block_size = qat_hash_get_block_size(hash_alg);
> -	if (block_size < 0) {
> -		free_mb_mgr(m);
> +	if (block_size < 0)
>  		return block_size;
> -	}
> +	/* init ipad and opad from key and xor with fixed values */
> +	memset(ipad, 0, block_size);
> +	memset(opad, 0, block_size);
> 
>  	if (auth_keylen > (unsigned int)block_size) {
>  		QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
> -		ret = -EFAULT;
> -		goto out;
> +		return -EFAULT;
>  	}
> -	/* init ipad and opad from key and xor with fixed values */
> -	memset(ipad, 0, block_size);
> -	memset(opad, 0, block_size);
> +
>  	RTE_VERIFY(auth_keylen <= sizeof(ipad));
>  	RTE_VERIFY(auth_keylen <= sizeof(opad));
> +
>  	rte_memcpy(ipad, auth_key, auth_keylen);
>  	rte_memcpy(opad, auth_key, auth_keylen);
> 
> @@ -1433,10 +1599,11 @@ static int
> qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
>  	}
> 
>  	/* do partial hash of ipad and copy to state1 */
> -	if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf, m)) {
> +	if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
> +		memset(ipad, 0, block_size);
> +		memset(opad, 0, block_size);
>  		QAT_LOG(ERR, "ipad precompute failed");
> -		ret = -EFAULT;
> -		goto out;
> +		return -EFAULT;
>  	}
> 
>  	/*
> @@ -1444,105 +1611,70 @@ static int
> qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
>  	 * Put the partial hash of opad state_len bytes after state1
>  	 */
>  	*p_state_len = qat_hash_get_state1_size(hash_alg);
> -	if (partial_hash_compute_ipsec_mb(hash_alg, opad,
> -				p_state_buf + *p_state_len, m)) {
> +	if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
> +		memset(ipad, 0, block_size);
> +		memset(opad, 0, block_size);
>  		QAT_LOG(ERR, "opad precompute failed");
> -		ret = -EFAULT;
> -		goto out;
> +		return -EFAULT;
>  	}
> 
> -out:
>  	/*  don't leave data lying around */
>  	memset(ipad, 0, block_size);
>  	memset(opad, 0, block_size);
> -	free_mb_mgr(m);
> -	return ret;
> -}
> -#endif
> -static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
> -{
> -	SHA_CTX ctx;
> -
> -	if (!SHA1_Init(&ctx))
> -		return -EFAULT;
> -	SHA1_Transform(&ctx, data_in);
> -	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
> -	return 0;
> -}
> -
> -static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
> -{
> -	SHA256_CTX ctx;
> -
> -	if (!SHA224_Init(&ctx))
> -		return -EFAULT;
> -	SHA256_Transform(&ctx, data_in);
> -	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
> -	return 0;
> -}
> -
> -static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
> -{
> -	SHA256_CTX ctx;
> -
> -	if (!SHA256_Init(&ctx))
> -		return -EFAULT;
> -	SHA256_Transform(&ctx, data_in);
> -	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
> +out:
>  	return 0;
>  }
> 
> -static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
> -{
> -	SHA512_CTX ctx;
> -
> -	if (!SHA384_Init(&ctx))
> -		return -EFAULT;
> -	SHA512_Transform(&ctx, data_in);
> -	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
> -	return 0;
> -}
> +#else
> 
> -static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
> +static int aes_ipsecmb_job(uint8_t *in, uint8_t *out, IMB_MGR *m,
> +		const uint8_t *key, uint16_t auth_keylen)
>  {
> -	SHA512_CTX ctx;
> +	int err;
> +	struct IMB_JOB *job;
> +	DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
> +	DECLARE_ALIGNED(uint32_t dust[4*15], 16);
> 
> -	if (!SHA512_Init(&ctx))
> +	if (auth_keylen == ICP_QAT_HW_AES_128_KEY_SZ)
> +		IMB_AES_KEYEXP_128(m, key, expkey, dust);
> +	else if (auth_keylen == ICP_QAT_HW_AES_192_KEY_SZ)
> +		IMB_AES_KEYEXP_192(m, key, expkey, dust);
> +	else if (auth_keylen == ICP_QAT_HW_AES_256_KEY_SZ)
> +		IMB_AES_KEYEXP_256(m, key, expkey, dust);
> +	else
>  		return -EFAULT;
> -	SHA512_Transform(&ctx, data_in);
> -	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
> -	return 0;
> -}
> 
> -static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
> -{
> -	MD5_CTX ctx;
> -
> -	if (!MD5_Init(&ctx))
> -		return -EFAULT;
> -	MD5_Transform(&ctx, data_in);
> -	rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
> +	job = IMB_GET_NEXT_JOB(m);
> 
> -	return 0;
> -}
> +	job->src = in;
> +	job->dst = out;
> +	job->enc_keys = expkey;
> +	job->key_len_in_bytes = auth_keylen;
> +	job->msg_len_to_cipher_in_bytes = 16;
> +	job->iv_len_in_bytes = 0;
> +	job->cipher_direction = IMB_DIR_ENCRYPT;
> +	job->cipher_mode = IMB_CIPHER_ECB;
> +	job->hash_alg = IMB_AUTH_NULL;
> 
> -static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
> -{
> -	int i;
> +	while (IMB_FLUSH_JOB(m) != NULL)
> +		;
> 
> -	derived[0] = base[0] << 1;
> -	for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
> -		derived[i] = base[i] << 1;
> -		derived[i - 1] |= base[i] >> 7;
> +	job = IMB_SUBMIT_JOB(m);
> +	if (job) {
> +		if (job->status == IMB_STATUS_COMPLETED)
> +			return 0;
>  	}
> 
> -	if (base[0] & 0x80)
> -		derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^=
> QAT_AES_CMAC_CONST_RB;
> +	err = imb_get_errno(m);
> +	if (err)
> +		QAT_LOG(ERR, "Error: %s!\n", imb_get_strerror(err));
> +
> +	return -EFAULT;
>  }
> 
>  static int
> -partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
> -		uint8_t *data_in, uint8_t *data_out)
> +partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
> +		uint8_t *data_in, uint8_t *data_out, IMB_MGR *m)
>  {
>  	int digest_size;
>  	uint8_t digest[qat_hash_get_digest_size(
> @@ -1563,43 +1695,37 @@ partial_hash_compute(enum
> icp_qat_hw_auth_algo hash_alg,
> 
>  	switch (hash_alg) {
>  	case ICP_QAT_HW_AUTH_ALGO_SHA1:
> -		if (partial_hash_sha1(data_in, digest))
> -			return -EFAULT;
> +		IMB_SHA1_ONE_BLOCK(m, data_in, digest);
>  		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
>  			*hash_state_out_be32 =
>  				rte_bswap32(*(((uint32_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_SHA224:
> -		if (partial_hash_sha224(data_in, digest))
> -			return -EFAULT;
> +		IMB_SHA224_ONE_BLOCK(m, data_in, digest);
>  		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
>  			*hash_state_out_be32 =
>  				rte_bswap32(*(((uint32_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_SHA256:
> -		if (partial_hash_sha256(data_in, digest))
> -			return -EFAULT;
> +		IMB_SHA256_ONE_BLOCK(m, data_in, digest);
>  		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
>  			*hash_state_out_be32 =
>  				rte_bswap32(*(((uint32_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_SHA384:
> -		if (partial_hash_sha384(data_in, digest))
> -			return -EFAULT;
> +		IMB_SHA384_ONE_BLOCK(m, data_in, digest);
>  		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
>  			*hash_state_out_be64 =
>  				rte_bswap64(*(((uint64_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_SHA512:
> -		if (partial_hash_sha512(data_in, digest))
> -			return -EFAULT;
> +		IMB_SHA512_ONE_BLOCK(m, data_in, digest);
>  		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
>  			*hash_state_out_be64 =
>  				rte_bswap64(*(((uint64_t *)digest)+i));
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_MD5:
> -		if (partial_hash_md5(data_in, data_out))
> -			return -EFAULT;
> +		IMB_MD5_ONE_BLOCK(m, data_in, data_out);
>  		break;
>  	default:
>  		QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
> @@ -1609,148 +1735,108 @@ partial_hash_compute(enum
> icp_qat_hw_auth_algo hash_alg,
>  	return 0;
>  }
> 
> -static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
> +static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo
> hash_alg,
>  				const uint8_t *auth_key,
>  				uint16_t auth_keylen,
>  				uint8_t *p_state_buf,
>  				uint16_t *p_state_len,
>  				uint8_t aes_cmac)
>  {
> -	int block_size;
> +	int block_size = 0;
>  	uint8_t
> ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
>  	uint8_t
> opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
> -	int i;
> +	int i, ret = 0;
> +	uint8_t in[ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ];
> +
> +	IMB_MGR *m;
> +	m = alloc_mb_mgr(0);
> +	if (m == NULL)
> +		return -ENOMEM;
> 
> +	init_mb_mgr_auto(m, NULL);
> +	memset(in, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
>  	if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
> 
>  		/* CMAC */
>  		if (aes_cmac) {
> -			AES_KEY enc_key;
> -			uint8_t *in = NULL;
> -			uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
>  			uint8_t *k1, *k2;
> -
>  			auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
> -
> -			in = rte_zmalloc("AES CMAC K1",
> -					 ICP_QAT_HW_AES_128_KEY_SZ, 16);
> -
> -			if (in == NULL) {
> -				QAT_LOG(ERR, "Failed to alloc memory");
> -				return -ENOMEM;
> -			}
> -
> -			rte_memcpy(in, AES_CMAC_SEED,
> -				   ICP_QAT_HW_AES_128_KEY_SZ);
>  			rte_memcpy(p_state_buf, auth_key, auth_keylen);
> 
> -			if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
> -				&enc_key) != 0) {
> -				rte_free(in);
> -				return -EFAULT;
> -			}
> -
> -			AES_encrypt(in, k0, &enc_key);
> -
> +			DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
> +			DECLARE_ALIGNED(uint32_t dust[4*15], 16);
> +			IMB_AES_KEYEXP_128(m, p_state_buf, expkey, dust);
>  			k1 = p_state_buf +
> ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
>  			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
> 
> -			aes_cmac_key_derive(k0, k1);
> -			aes_cmac_key_derive(k1, k2);
> -
> -			memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
> +			IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1, k2);
>  			*p_state_len =
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> -			rte_free(in);
> -			return 0;
> -		} else {
> -			static uint8_t qat_aes_xcbc_key_seed[
> -
> 	ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
> -				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> -				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> -				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> -				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> -				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> -				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> -			};
> +			goto out;
> +		}
> 
> -			uint8_t *in = NULL;
> -			uint8_t *out = p_state_buf;
> -			int x;
> -			AES_KEY enc_key;
> +		static uint8_t qat_aes_xcbc_key_seed[
> +				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
> +			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> +			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> +			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> +			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> +			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> +			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> +		};
> 
> -			in = rte_zmalloc("working mem for key",
> -
> 	ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
> -			if (in == NULL) {
> -				QAT_LOG(ERR, "Failed to alloc memory");
> -				return -ENOMEM;
> +		uint8_t *input = in;
> +		uint8_t *out = p_state_buf;
> +		rte_memcpy(input, qat_aes_xcbc_key_seed,
> +				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> +		for (i = 0; i < HASH_XCBC_PRECOMP_KEY_NUM; i++) {
> +			if (aes_ipsecmb_job(input, out, m, auth_key,
> auth_keylen)) {
> +				memset(input -
> +				   (i * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
> +				  0,
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> +				ret = -EFAULT;
> +				goto out;
>  			}
> 
> -			rte_memcpy(in, qat_aes_xcbc_key_seed,
> -
> 	ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> -			for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
> -				if (AES_set_encrypt_key(auth_key,
> -							auth_keylen << 3,
> -							&enc_key) != 0) {
> -					rte_free(in -
> -					  (x *
> ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
> -					memset(out -
> -					   (x *
> ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
> -					  0,
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> -					return -EFAULT;
> -				}
> -				AES_encrypt(in, out, &enc_key);
> -				in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> -				out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> -			}
> -			*p_state_len =
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> -			rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
> -			return 0;
> +			input += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> +			out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
>  		}
> +		*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> +		goto out;
> 
>  	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
>  		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
> -		uint8_t *in = NULL;
>  		uint8_t *out = p_state_buf;
> -		AES_KEY enc_key;
> 
>  		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
>  				ICP_QAT_HW_GALOIS_LEN_A_SZ +
>  				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
> -		in = rte_zmalloc("working mem for key",
> -				ICP_QAT_HW_GALOIS_H_SZ, 16);
> -		if (in == NULL) {
> -			QAT_LOG(ERR, "Failed to alloc memory");
> -			return -ENOMEM;
> +		if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen)) {
> +			ret = -EFAULT;
> +			goto out;
>  		}
> 
> -		memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
> -		if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
> -			&enc_key) != 0) {
> -			return -EFAULT;
> -		}
> -		AES_encrypt(in, out, &enc_key);
>  		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
>  				ICP_QAT_HW_GALOIS_LEN_A_SZ +
>  				ICP_QAT_HW_GALOIS_E_CTR0_SZ;
> -		rte_free(in);
> -		return 0;
> +		goto out;
>  	}
> 
>  	block_size = qat_hash_get_block_size(hash_alg);
> -	if (block_size < 0)
> +	if (block_size < 0) {
> +		free_mb_mgr(m);
>  		return block_size;
> -	/* init ipad and opad from key and xor with fixed values */
> -	memset(ipad, 0, block_size);
> -	memset(opad, 0, block_size);
> +	}
> 
>  	if (auth_keylen > (unsigned int)block_size) {
>  		QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
> -		return -EFAULT;
> +		ret = -EFAULT;
> +		goto out;
>  	}
> -
> +	/* init ipad and opad from key and xor with fixed values */
> +	memset(ipad, 0, block_size);
> +	memset(opad, 0, block_size);
>  	RTE_VERIFY(auth_keylen <= sizeof(ipad));
>  	RTE_VERIFY(auth_keylen <= sizeof(opad));
> -
>  	rte_memcpy(ipad, auth_key, auth_keylen);
>  	rte_memcpy(opad, auth_key, auth_keylen);
> 
> @@ -1762,11 +1848,10 @@ static int qat_sym_do_precomputes(enum
> icp_qat_hw_auth_algo hash_alg,
>  	}
> 
>  	/* do partial hash of ipad and copy to state1 */
> -	if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
> -		memset(ipad, 0, block_size);
> -		memset(opad, 0, block_size);
> +	if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf, m)) {
>  		QAT_LOG(ERR, "ipad precompute failed");
> -		return -EFAULT;
> +		ret = -EFAULT;
> +		goto out;
>  	}
> 
>  	/*
> @@ -1774,18 +1859,21 @@ static int qat_sym_do_precomputes(enum
> icp_qat_hw_auth_algo hash_alg,
>  	 * Put the partial hash of opad state_len bytes after state1
>  	 */
>  	*p_state_len = qat_hash_get_state1_size(hash_alg);
> -	if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
> -		memset(ipad, 0, block_size);
> -		memset(opad, 0, block_size);
> +	if (partial_hash_compute_ipsec_mb(hash_alg, opad,
> +				p_state_buf + *p_state_len, m)) {
>  		QAT_LOG(ERR, "opad precompute failed");
> -		return -EFAULT;
> +		ret = -EFAULT;
> +		goto out;
>  	}
> 
> +out:
>  	/*  don't leave data lying around */
>  	memset(ipad, 0, block_size);
>  	memset(opad, 0, block_size);
> -	return 0;
> +	free_mb_mgr(m);
> +	return ret;
>  }
> +#endif
> 
>  static void
>  qat_sym_session_init_common_hdr(struct qat_sym_session *session)
> @@ -2180,20 +2268,16 @@ int qat_sym_cd_auth_set(struct qat_sym_session
> *cdesc,
>  			break;
>  		}
>  		/* SHA-1 HMAC */
> -		if (qat_ipsec_mb_lib) {
> -#ifdef RTE_QAT_LIBIPSECMB
> -			ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA1,
> -				authkey, authkeylen, cdesc->cd_cur_ptr,
> &state1_size,
> -				cdesc->aes_cmac);
> +#ifdef RTE_QAT_OPENSSL
> +		ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
> +			authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
> +
>  #else
> -			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
> -			return -EFAULT;
> +		ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA1,
> +			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #endif
> -		} else {
> -			ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
> -				authkeylen, cdesc->cd_cur_ptr, &state1_size,
> -				cdesc->aes_cmac);
> -		}
> 
>  		if (ret) {
>  			QAT_LOG(ERR, "(SHA)precompute failed");
> @@ -2211,21 +2295,15 @@ int qat_sym_cd_auth_set(struct qat_sym_session
> *cdesc,
>  			break;
>  		}
>  		/* SHA-224 HMAC */
> -		if (qat_ipsec_mb_lib) {
> -#ifdef RTE_QAT_LIBIPSECMB
> -			ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA224,
> -				authkey, authkeylen, cdesc->cd_cur_ptr,
> &state1_size,
> -				cdesc->aes_cmac);
> +#ifdef RTE_QAT_OPENSSL
> +		ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
> +			authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #else
> -			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
> -			return -EFAULT;
> +		ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA224,
> +			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #endif
> -		} else {
> -			ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
> -				authkeylen, cdesc->cd_cur_ptr, &state1_size,
> -				cdesc->aes_cmac);
> -		}
> -
>  		if (ret) {
>  			QAT_LOG(ERR, "(SHA)precompute failed");
>  			return -EFAULT;
> @@ -2242,21 +2320,15 @@ int qat_sym_cd_auth_set(struct qat_sym_session
> *cdesc,
>  			break;
>  		}
>  		/* SHA-256 HMAC */
> -		if (qat_ipsec_mb_lib) {
> -#ifdef RTE_QAT_LIBIPSECMB
> -			ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA256,
> -				authkey, authkeylen, cdesc->cd_cur_ptr,
> &state1_size,
> -				cdesc->aes_cmac);
> +#ifdef RTE_QAT_OPENSSL
> +		ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
> +			authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #else
> -			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
> -			return -EFAULT;
> +		ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA256,
> +			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #endif
> -		} else {
> -			ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
> -				authkeylen, cdesc->cd_cur_ptr, &state1_size,
> -				cdesc->aes_cmac);
> -		}
> -
>  		if (ret) {
>  			QAT_LOG(ERR, "(SHA)precompute failed");
>  			return -EFAULT;
> @@ -2273,21 +2345,15 @@ int qat_sym_cd_auth_set(struct qat_sym_session
> *cdesc,
>  			break;
>  		}
>  		/* SHA-384 HMAC */
> -		if (qat_ipsec_mb_lib) {
> -#ifdef RTE_QAT_LIBIPSECMB
> -			ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA384,
> -				authkey, authkeylen, cdesc->cd_cur_ptr,
> &state1_size,
> -				cdesc->aes_cmac);
> +#ifdef RTE_QAT_OPENSSL
> +		ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
> +			authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #else
> -			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
> -			return -EFAULT;
> +		ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA384,
> +			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #endif
> -		} else {
> -			ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
> -				authkeylen, cdesc->cd_cur_ptr, &state1_size,
> -				cdesc->aes_cmac);
> -		}
> -
>  		if (ret) {
>  			QAT_LOG(ERR, "(SHA)precompute failed");
>  			return -EFAULT;
> @@ -2304,21 +2370,15 @@ int qat_sym_cd_auth_set(struct qat_sym_session
> *cdesc,
>  			break;
>  		}
>  		/* SHA-512 HMAC */
> -		if (qat_ipsec_mb_lib) {
> -#ifdef RTE_QAT_LIBIPSECMB
> -			ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA512,
> -				authkey, authkeylen, cdesc->cd_cur_ptr,
> &state1_size,
> -				cdesc->aes_cmac);
> +#ifdef RTE_QAT_OPENSSL
> +		ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
> +			authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #else
> -			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
> -			return -EFAULT;
> +		ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA512,
> +			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #endif
> -		} else {
> -			ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
> -				authkeylen, cdesc->cd_cur_ptr, &state1_size,
> -				cdesc->aes_cmac);
> -		}
> -
>  		if (ret) {
>  			QAT_LOG(ERR, "(SHA)precompute failed");
>  			return -EFAULT;
> @@ -2354,22 +2414,16 @@ int qat_sym_cd_auth_set(struct qat_sym_session
> *cdesc,
> 
>  		if (cdesc->aes_cmac)
>  			memset(cdesc->cd_cur_ptr, 0, state1_size);
> -		if (qat_ipsec_mb_lib) {
> -#ifdef RTE_QAT_LIBIPSECMB
> -			ret = qat_sym_do_precomputes_ipsec_mb(
> -				ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
> -				authkey, authkeylen, cdesc->cd_cur_ptr +
> state1_size,
> -				&state2_size, cdesc->aes_cmac);
> +#ifdef RTE_QAT_OPENSSL
> +		ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
> +			authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
> +			&state2_size, cdesc->aes_cmac);
>  #else
> -			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
> -			return -EFAULT;
> +		ret = qat_sym_do_precomputes_ipsec_mb(
> +			ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
> +			authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
> +			&state2_size, cdesc->aes_cmac);
>  #endif
> -		} else {
> -			ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
> -				authkey, authkeylen, cdesc->cd_cur_ptr +
> state1_size,
> -				&state2_size, cdesc->aes_cmac);
> -		}
> -
>  		if (ret) {
>  			cdesc->aes_cmac ? QAT_LOG(ERR,
>  						  "(CMAC)precompute failed")
> @@ -2382,21 +2436,15 @@ int qat_sym_cd_auth_set(struct qat_sym_session
> *cdesc,
>  	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
>  		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
>  		state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
> -		if (qat_ipsec_mb_lib) {
> -#ifdef RTE_QAT_LIBIPSECMB
> -			ret = qat_sym_do_precomputes_ipsec_mb(cdesc-
> >qat_hash_alg, authkey,
> -				authkeylen, cdesc->cd_cur_ptr + state1_size,
> -				&state2_size, cdesc->aes_cmac);
> +#ifdef RTE_QAT_OPENSSL
> +		ret = qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
> +			authkeylen, cdesc->cd_cur_ptr + state1_size,
> +			&state2_size, cdesc->aes_cmac);
>  #else
> -			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
> -			return -EFAULT;
> +		ret = qat_sym_do_precomputes_ipsec_mb(cdesc-
> >qat_hash_alg, authkey,
> +			authkeylen, cdesc->cd_cur_ptr + state1_size,
> +			&state2_size, cdesc->aes_cmac);
>  #endif
> -		} else {
> -			ret = qat_sym_do_precomputes(cdesc->qat_hash_alg,
> authkey,
> -				authkeylen, cdesc->cd_cur_ptr + state1_size,
> -				&state2_size, cdesc->aes_cmac);
> -		}
> -
>  		if (ret) {
>  			QAT_LOG(ERR, "(GCM)precompute failed");
>  			return -EFAULT;
> @@ -2453,21 +2501,15 @@ int qat_sym_cd_auth_set(struct qat_sym_session
> *cdesc,
> 
>  		break;
>  	case ICP_QAT_HW_AUTH_ALGO_MD5:
> -		if (qat_ipsec_mb_lib) {
> -#ifdef RTE_QAT_LIBIPSECMB
> -			ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_MD5,
> -				authkey, authkeylen, cdesc->cd_cur_ptr,
> &state1_size,
> -				cdesc->aes_cmac);
> +#ifdef RTE_QAT_OPENSSL
> +		ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
> +			authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #else
> -			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing");
> -			return -EFAULT;
> +		ret =
> qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_MD5,
> +			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
> +			cdesc->aes_cmac);
>  #endif
> -		} else {
> -			ret =
> qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
> -				authkeylen, cdesc->cd_cur_ptr, &state1_size,
> -				cdesc->aes_cmac);
> -		}
> -
>  		if (ret) {
>  			QAT_LOG(ERR, "(MD5)precompute failed");
>  			return -EFAULT;
> @@ -2902,9 +2944,11 @@ qat_security_session_create(void *dev,
>  		return -EINVAL;
>  	}
> 
> +#ifdef RTE_QAT_OPENSSL
>  #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
>  	if (ossl_legacy_provider_load())
>  		return -EINVAL;
> +#endif
>  #endif
>  	ret = qat_sec_session_set_docsis_parameters(cdev, conf,
>  			sess_private_data,
> SECURITY_GET_SESS_PRIV_IOVA(sess));
> @@ -2913,8 +2957,10 @@ qat_security_session_create(void *dev,
>  		return ret;
>  	}
> 
> +#ifdef RTE_QAT_OPENSSL
>  #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
>  	ossl_legacy_provider_unload();
> +#endif
>  #endif
>  	return 0;
>  }
> @@ -2927,8 +2973,13 @@ qat_security_session_destroy(void *dev
> __rte_unused,
>  	struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
> 
>  	if (sess_priv) {
> +#ifdef RTE_QAT_OPENSSL
>  		if (s->bpi_ctx)
>  			bpi_cipher_ctx_free(s->bpi_ctx);
> +#else
> +		if (s->mb_mgr)
> +			free_mb_mgr(s->mb_mgr);
> +#endif
>  		memset(s, 0, qat_sym_session_get_private_size(dev));
>  	}
> 
> diff --git a/drivers/crypto/qat/qat_sym_session.h
> b/drivers/crypto/qat/qat_sym_session.h
> index 9b5d11ac88..ee916b2814 100644
> --- a/drivers/crypto/qat/qat_sym_session.h
> +++ b/drivers/crypto/qat/qat_sym_session.h
> @@ -15,6 +15,14 @@
>  #include "icp_qat_fw.h"
>  #include "icp_qat_fw_la.h"
> 
> +#ifndef RTE_QAT_OPENSSL
> +#if defined(RTE_ARCH_ARM)
> +#include <ipsec-mb.h>
> +#else
> +#include <intel-ipsec-mb.h>
> +#endif
> +#endif
> +
>  /*
>   * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
>   * Integrity Key (IK)
> @@ -129,6 +137,12 @@ struct qat_sym_session {
>  	uint32_t slice_types;
>  	enum qat_sym_proto_flag qat_proto_flag;
>  	qat_sym_build_request_t build_request[2];
> +#ifndef RTE_QAT_OPENSSL
> +	IMB_MGR *mb_mgr;
> +#endif
> +	uint64_t expkey[4*15];
> +	uint32_t dust[4*15];
> +	uint8_t docsis_key_len;
>  };
> 
>  int
> --
> 2.25.1
  

Patch

diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index a4a25711ed..8666ac8b88 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -317,18 +317,12 @@  To use this feature the user must set the devarg on process start as a device ad
  -a 03:01.1,qat_sym_cipher_crc_enable=1
 
 
-Running QAT PMD with Intel IPSEC MB library for symmetric precomputes function
+Running QAT PMD with Intel IPsec MB library for symmetric precomputes function
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The QAT PMD use Openssl library for partial hash calculation in symmetirc precomputes function by
-default, the following parameter is allow QAT PMD switch over to multi-buffer job API if Intel
-IPSEC MB library installed on system.
-
-- qat_ipsec_mb_lib
-
-To use this feature the user must set the parameter on process start as a device additional parameter::
-
-  -a 03:01.1,qat_ipsec_mb_lib=1
+The QAT PMD uses Intel IPsec MB library for partial hash calculation in symmetric precomputes function by
+default, the minimum required version of IPsec MB library is v1.4. If this version of IPsec is not met it
+will fallback to use OpenSSL
 
 
 Device and driver naming
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index b84e5b3c6c..9c5eb99acd 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -28,30 +28,37 @@  if disable_drivers.contains(qat_compress_path)
 endif
 
 libcrypto = dependency('libcrypto', required: false, method: 'pkg-config')
-if qat_crypto and not libcrypto.found()
-    qat_crypto = false
-    dpdk_drvs_disabled += qat_crypto_path
-    set_variable(qat_crypto_path.underscorify() + '_disable_reason',
-            'missing dependency, libcrypto')
-endif
 
-IMB_required_ver = '1.2.0'
+IMB_required_ver = '1.4.0'
 IMB_header = '#include<intel-ipsec-mb.h>'
 if arch_subdir == 'arm'
     IMB_header = '#include<ipsec-mb.h>'
 endif
 libipsecmb = cc.find_library('IPSec_MB', required: false)
-libcrypto_3 = dependency('libcrypto', required: false,
-    method: 'pkg-config', version : '>=3.0.0')
-if libipsecmb.found() and libcrypto_3.found()
+if libipsecmb.found()
     # version comes with quotes, so we split based on " and take the middle
     imb_ver = cc.get_define('IMB_VERSION_STR',
         prefix : IMB_header).split('"')[1]
 
     if (imb_ver.version_compare('>=' + IMB_required_ver))
         ext_deps += libipsecmb
-        dpdk_conf.set('RTE_QAT_LIBIPSECMB', true)
+    elif libcrypto.found()
+        ext_deps += libcrypto
+        dpdk_conf.set('RTE_QAT_OPENSSL', true)
+    else
+        qat_crypto = false
+        dpdk_drvs_disabled += qat_crypto_path
+        set_variable(qat_crypto_path.underscorify() + '_disable_reason',
+            'missing dependency, libipsecmb or libcrypto')
     endif
+elif libcrypto.found()
+    ext_deps += libcrypto
+    dpdk_conf.set('RTE_QAT_OPENSSL', true)
+else
+    qat_crypto = false
+    dpdk_drvs_disabled += qat_crypto_path
+    set_variable(qat_crypto_path.underscorify() + '_disable_reason',
+        'missing dependency, libipsecmb or libcrypto')
 endif
 
 # The driver should not build if both compression and crypto are disabled
@@ -103,6 +110,5 @@  if qat_crypto
         sources += files(join_paths(qat_crypto_relpath, f))
     endforeach
     deps += ['security']
-    ext_deps += libcrypto
     cflags += ['-DBUILD_QAT_SYM', '-DBUILD_QAT_ASYM']
 endif
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 0479175b65..20e56b5cf2 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -371,7 +371,6 @@  static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	struct qat_pci_device *qat_pci_dev;
 	struct qat_dev_hw_spec_funcs *ops_hw;
 	struct qat_dev_cmd_param qat_dev_cmd_param[] = {
-			{ QAT_IPSEC_MB_LIB, 0 },
 			{ SYM_ENQ_THRESHOLD_NAME, 0 },
 			{ ASYM_ENQ_THRESHOLD_NAME, 0 },
 			{ COMP_ENQ_THRESHOLD_NAME, 0 },
diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h
index 4188474dde..0cfe8654b1 100644
--- a/drivers/common/qat/qat_device.h
+++ b/drivers/common/qat/qat_device.h
@@ -17,13 +17,12 @@ 
 
 #define QAT_DEV_NAME_MAX_LEN	64
 
-#define QAT_IPSEC_MB_LIB "qat_ipsec_mb_lib"
 #define SYM_ENQ_THRESHOLD_NAME "qat_sym_enq_threshold"
 #define ASYM_ENQ_THRESHOLD_NAME "qat_asym_enq_threshold"
 #define COMP_ENQ_THRESHOLD_NAME "qat_comp_enq_threshold"
 #define SYM_CIPHER_CRC_ENABLE_NAME "qat_sym_cipher_crc_enable"
 #define QAT_CMD_SLICE_MAP "qat_cmd_slice_disable"
-#define QAT_CMD_SLICE_MAP_POS	5
+#define QAT_CMD_SLICE_MAP_POS	4
 #define MAX_QP_THRESHOLD_SIZE	32
 
 /**
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index e8e92e22d4..7776763356 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -82,8 +82,13 @@  qat_bpicipher_preprocess(struct qat_sym_session *ctx,
 			QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst before pre-process:",
 			dst, last_block_len);
 #endif
+#ifdef RTE_QAT_OPENSSL
 		op_bpi_cipher_decrypt(last_block, dst, iv, block_len,
 				last_block_len, ctx->bpi_ctx);
+#else
+		bpi_cipher_ipsec(last_block, dst, iv, last_block_len, ctx->expkey,
+			ctx->mb_mgr, ctx->docsis_key_len);
+#endif
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
 		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
 			last_block, last_block_len);
@@ -231,7 +236,12 @@  qat_sym_convert_op_to_vec_cipher(struct rte_crypto_op *op,
 		cipher_ofs = op->sym->cipher.data.offset >> 3;
 		break;
 	case 0:
+
+#ifdef RTE_QAT_OPENSSL
 		if (ctx->bpi_ctx) {
+#else
+		if (ctx->mb_mgr) {
+#endif
 			/* DOCSIS - only send complete blocks to device.
 			 * Process any partial block using CFB mode.
 			 * Even if 0 complete blocks, still send this to device
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 345c845325..b93908f007 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -1,5 +1,5 @@ 
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2022 Intel Corporation
+ * Copyright(c) 2015-2023 Intel Corporation
  */
 
 #include <openssl/evp.h>
@@ -16,7 +16,6 @@ 
 #include "qat_qp.h"
 
 uint8_t qat_sym_driver_id;
-int qat_ipsec_mb_lib;
 
 struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
 
@@ -110,7 +109,11 @@  qat_sym_build_request(void *in_op, uint8_t *out_msg,
 			struct rte_cryptodev *cdev;
 			struct qat_cryptodev_private *internals;
 
+#ifdef RTE_QAT_OPENSSL
 			if (unlikely(ctx->bpi_ctx == NULL)) {
+#else
+			if (unlikely(ctx->mb_mgr == NULL)) {
+#endif
 				QAT_DP_LOG(ERR, "QAT PMD only supports security"
 						" operation requests for"
 						" DOCSIS, op (%p) is not for"
@@ -283,8 +286,6 @@  qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
 				SYM_CIPHER_CRC_ENABLE_NAME))
 			internals->cipher_crc_offload_enable =
 					qat_dev_cmd_param[i].val;
-		if (!strcmp(qat_dev_cmd_param[i].name, QAT_IPSEC_MB_LIB))
-			qat_ipsec_mb_lib = qat_dev_cmd_param[i].val;
 		if (!strcmp(qat_dev_cmd_param[i].name, QAT_CMD_SLICE_MAP))
 			slice_map = qat_dev_cmd_param[i].val;
 		i++;
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index 3d841d0eba..341016dcf3 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -164,6 +164,20 @@  bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
 	return -EINVAL;
 }
 
+#ifndef RTE_QAT_OPENSSL
+static __rte_always_inline void
+bpi_cipher_ipsec(uint8_t *src, uint8_t *dst, uint8_t *iv, int srclen,
+		uint64_t *expkey, IMB_MGR *m, uint8_t docsis_key_len)
+{
+	if (docsis_key_len == ICP_QAT_HW_AES_128_KEY_SZ)
+		IMB_AES128_CFB_ONE(m, dst, src, (uint64_t *)iv, expkey, srclen);
+	else if (docsis_key_len == ICP_QAT_HW_AES_256_KEY_SZ)
+		IMB_AES256_CFB_ONE(m, dst, src, (uint64_t *)iv, expkey, srclen);
+	else if (docsis_key_len == ICP_QAT_HW_DES_KEY_SZ)
+		des_cfb_one(dst, src, (uint64_t *)iv, expkey, srclen);
+}
+#endif
+
 static inline uint32_t
 qat_bpicipher_postprocess(struct qat_sym_session *ctx,
 				struct rte_crypto_op *op)
@@ -208,8 +222,13 @@  qat_bpicipher_postprocess(struct qat_sym_session *ctx,
 				"BPI: dst before post-process:",
 				dst, last_block_len);
 #endif
+#ifdef RTE_QAT_OPENSSL
 		bpi_cipher_encrypt(last_block, dst, iv, block_len,
 				last_block_len, ctx->bpi_ctx);
+#else
+		bpi_cipher_ipsec(last_block, dst, iv, last_block_len, ctx->expkey,
+			ctx->mb_mgr, ctx->docsis_key_len);
+#endif
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
 		QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
 				last_block, last_block_len);
@@ -280,7 +299,11 @@  qat_sym_preprocess_requests(void **ops, uint16_t nb_ops)
 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
 			ctx = SECURITY_GET_SESS_PRIV(op->sym->session);
 
+#ifdef RTE_QAT_OPENSSL
 			if (ctx == NULL || ctx->bpi_ctx == NULL)
+#else
+			if (ctx == NULL || ctx->mb_mgr == NULL)
+#endif
 				continue;
 
 			if (ctx->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_CRC)
@@ -329,7 +352,11 @@  qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
 	} else {
 		rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 
+#ifdef RTE_QAT_OPENSSL
 		if (sess->bpi_ctx) {
+#else
+		if (sess->mb_mgr) {
+#endif
 			qat_bpicipher_postprocess(sess, rx_op);
 #ifdef RTE_LIB_SECURITY
 			if (is_docsis_sec && sess->qat_cmd !=
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 9babf13b66..da9a50dd49 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -9,8 +9,7 @@ 
 #include <openssl/md5.h>	/* Needed to calculate pre-compute values */
 #include <openssl/evp.h>	/* Needed for bpi runt block processing */
 
-#ifdef RTE_QAT_LIBIPSECMB
-#define NO_COMPAT_IMB_API_053
+#ifndef RTE_QAT_OPENSSL
 #if defined(RTE_ARCH_ARM)
 #include <ipsec-mb.h>
 #else
@@ -34,6 +33,7 @@ 
 #include "qat_sym_session.h"
 #include "qat_sym.h"
 
+#ifdef RTE_QAT_OPENSSL
 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
 #include <openssl/provider.h>
 
@@ -66,8 +66,7 @@  static void ossl_legacy_provider_unload(void)
 	OSSL_PROVIDER_unload(default_lib);
 }
 #endif
-
-extern int qat_ipsec_mb_lib;
+#endif
 
 #define ETH_CRC32_POLYNOMIAL    0x04c11db7
 #define ETH_CRC32_INIT_VAL      0xffffffff
@@ -146,6 +145,7 @@  qat_sym_session_finalize(struct qat_sym_session *session)
 	qat_sym_session_init_common_hdr(session);
 }
 
+#ifdef RTE_QAT_OPENSSL
 /** Frees a context previously created
  *  Depends on openssl libcrypto
  */
@@ -196,6 +196,51 @@  bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
 	}
 	return ret;
 }
+#endif
+
+#ifndef RTE_QAT_OPENSSL
+/** Creates a context in either AES or DES in ECB mode
+ */
+static int
+ipsec_mb_ctx_init(const uint8_t *key, uint16_t key_length,
+		enum rte_crypto_cipher_algorithm cryptodev_algo,
+		uint64_t *expkey, uint32_t *dust, IMB_MGR **m)
+{
+	int ret;
+
+	*m = alloc_mb_mgr(0);
+	if (*m == NULL)
+		return -ENOMEM;
+
+	init_mb_mgr_auto(*m, NULL);
+
+	if (cryptodev_algo == RTE_CRYPTO_CIPHER_AES_DOCSISBPI) {
+		if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
+			IMB_AES_KEYEXP_128(*m, key, expkey, dust);
+		else if (key_length == ICP_QAT_HW_AES_256_KEY_SZ)
+			IMB_AES_KEYEXP_256(*m, key, expkey, dust);
+		else {
+			ret = -EFAULT;
+			goto error_out;
+		}
+	} else if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) {
+		if (key_length == ICP_QAT_HW_DES_KEY_SZ)
+			IMB_DES_KEYSCHED(*m, (uint64_t *)expkey, key);
+		else {
+			ret = -EFAULT;
+			goto error_out;
+		}
+	}
+	return 0;
+
+error_out:
+	if (*m) {
+		free_mb_mgr(*m);
+		*m = NULL;
+	}
+	return ret;
+}
+#endif
 
 static int
 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
@@ -245,8 +290,13 @@  qat_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
 {
 	struct qat_sym_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
 
+#ifdef RTE_QAT_OPENSSL
 	if (s->bpi_ctx)
 		bpi_cipher_ctx_free(s->bpi_ctx);
+#else
+	if (s->mb_mgr)
+		free_mb_mgr(s->mb_mgr);
+#endif
 }
 
 static int
@@ -409,12 +459,23 @@  qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
 		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
 		break;
 	case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+#ifdef RTE_QAT_OPENSSL
 		ret = bpi_cipher_ctx_init(
 					cipher_xform->algo,
 					cipher_xform->op,
 					cipher_xform->key.data,
 					cipher_xform->key.length,
 					&session->bpi_ctx);
+#else
+		session->docsis_key_len = cipher_xform->key.length;
+		ret = ipsec_mb_ctx_init(
+					cipher_xform->key.data,
+					cipher_xform->key.length,
+					cipher_xform->algo,
+					session->expkey,
+					session->dust,
+					&session->mb_mgr);
+#endif
 		if (ret != 0) {
 			QAT_LOG(ERR, "failed to create DES BPI ctx");
 			goto error_out;
@@ -428,12 +489,23 @@  qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
 		session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+#ifdef RTE_QAT_OPENSSL
 		ret = bpi_cipher_ctx_init(
 					cipher_xform->algo,
 					cipher_xform->op,
 					cipher_xform->key.data,
 					cipher_xform->key.length,
 					&session->bpi_ctx);
+#else
+		session->docsis_key_len = cipher_xform->key.length;
+		ret = ipsec_mb_ctx_init(
+					cipher_xform->key.data,
+					cipher_xform->key.length,
+					cipher_xform->algo,
+					session->expkey,
+					session->dust,
+					&session->mb_mgr);
+#endif
 		if (ret != 0) {
 			QAT_LOG(ERR, "failed to create AES BPI ctx");
 			goto error_out;
@@ -519,10 +591,18 @@  qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
 	return 0;
 
 error_out:
+#ifdef RTE_QAT_OPENSSL
 	if (session->bpi_ctx) {
 		bpi_cipher_ctx_free(session->bpi_ctx);
 		session->bpi_ctx = NULL;
 	}
+#else
+	if (session->mb_mgr) {
+		free_mb_mgr(session->mb_mgr);
+		session->mb_mgr = NULL;
+	}
+
+#endif
 	return ret;
 }
 
@@ -533,8 +613,10 @@  qat_sym_session_configure(struct rte_cryptodev *dev,
 {
 	int ret;
 
+#ifdef RTE_QAT_OPENSSL
 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
 	ossl_legacy_provider_load();
+#endif
 #endif
 	ret = qat_sym_session_set_parameters(dev, xform,
 			CRYPTODEV_GET_SYM_SESS_PRIV(sess),
@@ -546,8 +628,10 @@  qat_sym_session_configure(struct rte_cryptodev *dev,
 		return ret;
 	}
 
+#ifdef RTE_QAT_OPENSSL
 # if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
 	ossl_legacy_provider_unload();
+# endif
 # endif
 	return 0;
 }
@@ -1209,57 +1293,91 @@  static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 #define HMAC_OPAD_VALUE	0x5c
 #define HASH_XCBC_PRECOMP_KEY_NUM 3
 
-static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
+#ifdef RTE_QAT_OPENSSL
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA_CTX ctx;
 
-#ifdef RTE_QAT_LIBIPSECMB
-static int aes_ipsecmb_job(uint8_t *in, uint8_t *out, IMB_MGR *m,
-		const uint8_t *key, uint16_t auth_keylen)
+	if (!SHA1_Init(&ctx))
+		return -EFAULT;
+	SHA1_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
 {
-	int err;
-	struct IMB_JOB *job;
-	DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
-	DECLARE_ALIGNED(uint32_t dust[4*15], 16);
+	SHA256_CTX ctx;
 
-	if (auth_keylen == ICP_QAT_HW_AES_128_KEY_SZ)
-		IMB_AES_KEYEXP_128(m, key, expkey, dust);
-	else if (auth_keylen == ICP_QAT_HW_AES_192_KEY_SZ)
-		IMB_AES_KEYEXP_192(m, key, expkey, dust);
-	else if (auth_keylen == ICP_QAT_HW_AES_256_KEY_SZ)
-		IMB_AES_KEYEXP_256(m, key, expkey, dust);
-	else
+	if (!SHA224_Init(&ctx))
 		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+	return 0;
+}
 
-	job = IMB_GET_NEXT_JOB(m);
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
 
-	job->src = in;
-	job->dst = out;
-	job->enc_keys = expkey;
-	job->key_len_in_bytes = auth_keylen;
-	job->msg_len_to_cipher_in_bytes = 16;
-	job->iv_len_in_bytes = 0;
-	job->cipher_direction = IMB_DIR_ENCRYPT;
-	job->cipher_mode = IMB_CIPHER_ECB;
-	job->hash_alg = IMB_AUTH_NULL;
+	if (!SHA256_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+	return 0;
+}
 
-	while (IMB_FLUSH_JOB(m) != NULL)
-		;
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
 
-	job = IMB_SUBMIT_JOB(m);
-	if (job) {
-		if (job->status == IMB_STATUS_COMPLETED)
-			return 0;
-	}
+	if (!SHA384_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+	return 0;
+}
 
-	err = imb_get_errno(m);
-	if (err)
-		QAT_LOG(ERR, "Error: %s!\n", imb_get_strerror(err));
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA512_CTX ctx;
 
-	return -EFAULT;
+	if (!SHA512_Init(&ctx))
+		return -EFAULT;
+	SHA512_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+	return 0;
+}
+
+static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
+{
+	MD5_CTX ctx;
+
+	if (!MD5_Init(&ctx))
+		return -EFAULT;
+	MD5_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
+
+	return 0;
+}
+
+static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
+{
+	int i;
+
+	derived[0] = base[0] << 1;
+	for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
+		derived[i] = base[i] << 1;
+		derived[i - 1] |= base[i] >> 7;
+	}
+
+	if (base[0] & 0x80)
+		derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
 }
 
 static int
-partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
-		uint8_t *data_in, uint8_t *data_out, IMB_MGR *m)
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1280,37 +1398,43 @@  partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
 
 	switch (hash_alg) {
 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
-		IMB_SHA1_ONE_BLOCK(m, data_in, digest);
+		if (partial_hash_sha1(data_in, digest))
+			return -EFAULT;
 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
 			*hash_state_out_be32 =
 				rte_bswap32(*(((uint32_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
-		IMB_SHA224_ONE_BLOCK(m, data_in, digest);
+		if (partial_hash_sha224(data_in, digest))
+			return -EFAULT;
 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
 			*hash_state_out_be32 =
 				rte_bswap32(*(((uint32_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
-		IMB_SHA256_ONE_BLOCK(m, data_in, digest);
+		if (partial_hash_sha256(data_in, digest))
+			return -EFAULT;
 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
 			*hash_state_out_be32 =
 				rte_bswap32(*(((uint32_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
-		IMB_SHA384_ONE_BLOCK(m, data_in, digest);
+		if (partial_hash_sha384(data_in, digest))
+			return -EFAULT;
 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
 			*hash_state_out_be64 =
 				rte_bswap64(*(((uint64_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
-		IMB_SHA512_ONE_BLOCK(m, data_in, digest);
+		if (partial_hash_sha512(data_in, digest))
+			return -EFAULT;
 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
 			*hash_state_out_be64 =
 				rte_bswap64(*(((uint64_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
-		IMB_MD5_ONE_BLOCK(m, data_in, data_out);
+		if (partial_hash_md5(data_in, data_out))
+			return -EFAULT;
 		break;
 	default:
 		QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
@@ -1320,108 +1444,150 @@  partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
 	return 0;
 }
 
-static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
+static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
+
+static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
 				const uint8_t *auth_key,
 				uint16_t auth_keylen,
 				uint8_t *p_state_buf,
 				uint16_t *p_state_len,
 				uint8_t aes_cmac)
 {
-	int block_size = 0;
+	int block_size;
 	uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
 	uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
-	int i, ret = 0;
-	uint8_t in[ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ];
-
-	IMB_MGR *m;
-	m = alloc_mb_mgr(0);
-	if (m == NULL)
-		return -ENOMEM;
+	int i;
 
-	init_mb_mgr_auto(m, NULL);
-	memset(in, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
 	if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
 
 		/* CMAC */
 		if (aes_cmac) {
+			AES_KEY enc_key;
+			uint8_t *in = NULL;
+			uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
 			uint8_t *k1, *k2;
+
 			auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
-			rte_memcpy(p_state_buf, auth_key, auth_keylen);
 
-			DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
-			DECLARE_ALIGNED(uint32_t dust[4*15], 16);
-			IMB_AES_KEYEXP_128(m, p_state_buf, expkey, dust);
-			k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
-			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+			in = rte_zmalloc("AES CMAC K1",
+					 ICP_QAT_HW_AES_128_KEY_SZ, 16);
 
-			IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1, k2);
-			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
-			goto out;
-		}
+			if (in == NULL) {
+				QAT_LOG(ERR, "Failed to alloc memory");
+				return -ENOMEM;
+			}
 
-		static uint8_t qat_aes_xcbc_key_seed[
-				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
-			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
-			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
-			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
-			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
-		};
+			rte_memcpy(in, AES_CMAC_SEED,
+				   ICP_QAT_HW_AES_128_KEY_SZ);
+			rte_memcpy(p_state_buf, auth_key, auth_keylen);
 
-		uint8_t *input = in;
-		uint8_t *out = p_state_buf;
-		rte_memcpy(input, qat_aes_xcbc_key_seed,
-				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
-		for (i = 0; i < HASH_XCBC_PRECOMP_KEY_NUM; i++) {
-			if (aes_ipsecmb_job(input, out, m, auth_key, auth_keylen)) {
-				memset(input -
-				   (i * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
-				  0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
-				ret = -EFAULT;
-				goto out;
+			if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+				&enc_key) != 0) {
+				rte_free(in);
+				return -EFAULT;
 			}
 
-			input += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
-			out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
-		}
-		*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
-		goto out;
+			AES_encrypt(in, k0, &enc_key);
 
-	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
-		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
-		uint8_t *out = p_state_buf;
+			k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
 
-		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
-				ICP_QAT_HW_GALOIS_LEN_A_SZ +
-				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
-		if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen)) {
-			ret = -EFAULT;
-			goto out;
-		}
+			aes_cmac_key_derive(k0, k1);
+			aes_cmac_key_derive(k1, k2);
 
-		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
+			memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
+			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+			rte_free(in);
+			goto out;
+		} else {
+			static uint8_t qat_aes_xcbc_key_seed[
+					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+			};
+
+			uint8_t *in = NULL;
+			uint8_t *out = p_state_buf;
+			int x;
+			AES_KEY enc_key;
+
+			in = rte_zmalloc("working mem for key",
+					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+			if (in == NULL) {
+				QAT_LOG(ERR, "Failed to alloc memory");
+				return -ENOMEM;
+			}
+
+			rte_memcpy(in, qat_aes_xcbc_key_seed,
+					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+			for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+				if (AES_set_encrypt_key(auth_key,
+							auth_keylen << 3,
+							&enc_key) != 0) {
+					rte_free(in -
+					  (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+					memset(out -
+					   (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+					  0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+					return -EFAULT;
+				}
+				AES_encrypt(in, out, &enc_key);
+				in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+				out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+			}
+			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+			rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+			goto out;
+		}
+
+	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+		uint8_t *in = NULL;
+		uint8_t *out = p_state_buf;
+		AES_KEY enc_key;
+
+		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
+				ICP_QAT_HW_GALOIS_LEN_A_SZ +
+				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
+		in = rte_zmalloc("working mem for key",
+				ICP_QAT_HW_GALOIS_H_SZ, 16);
+		if (in == NULL) {
+			QAT_LOG(ERR, "Failed to alloc memory");
+			return -ENOMEM;
+		}
+
+		memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
+		if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+			&enc_key) != 0) {
+			return -EFAULT;
+		}
+		AES_encrypt(in, out, &enc_key);
+		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
 				ICP_QAT_HW_GALOIS_LEN_A_SZ +
 				ICP_QAT_HW_GALOIS_E_CTR0_SZ;
-		goto out;
+		rte_free(in);
+		return 0;
 	}
 
 	block_size = qat_hash_get_block_size(hash_alg);
-	if (block_size < 0) {
-		free_mb_mgr(m);
+	if (block_size < 0)
 		return block_size;
-	}
+	/* init ipad and opad from key and xor with fixed values */
+	memset(ipad, 0, block_size);
+	memset(opad, 0, block_size);
 
 	if (auth_keylen > (unsigned int)block_size) {
 		QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
-		ret = -EFAULT;
-		goto out;
+		return -EFAULT;
 	}
-	/* init ipad and opad from key and xor with fixed values */
-	memset(ipad, 0, block_size);
-	memset(opad, 0, block_size);
+
 	RTE_VERIFY(auth_keylen <= sizeof(ipad));
 	RTE_VERIFY(auth_keylen <= sizeof(opad));
+
 	rte_memcpy(ipad, auth_key, auth_keylen);
 	rte_memcpy(opad, auth_key, auth_keylen);
 
@@ -1433,10 +1599,11 @@  static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
 	}
 
 	/* do partial hash of ipad and copy to state1 */
-	if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf, m)) {
+	if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
+		memset(ipad, 0, block_size);
+		memset(opad, 0, block_size);
 		QAT_LOG(ERR, "ipad precompute failed");
-		ret = -EFAULT;
-		goto out;
+		return -EFAULT;
 	}
 
 	/*
@@ -1444,105 +1611,70 @@  static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
 	 * Put the partial hash of opad state_len bytes after state1
 	 */
 	*p_state_len = qat_hash_get_state1_size(hash_alg);
-	if (partial_hash_compute_ipsec_mb(hash_alg, opad,
-				p_state_buf + *p_state_len, m)) {
+	if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
+		memset(ipad, 0, block_size);
+		memset(opad, 0, block_size);
 		QAT_LOG(ERR, "opad precompute failed");
-		ret = -EFAULT;
-		goto out;
+		return -EFAULT;
 	}
 
-out:
 	/*  don't leave data lying around */
 	memset(ipad, 0, block_size);
 	memset(opad, 0, block_size);
-	free_mb_mgr(m);
-	return ret;
-}
-#endif
-static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
-{
-	SHA_CTX ctx;
-
-	if (!SHA1_Init(&ctx))
-		return -EFAULT;
-	SHA1_Transform(&ctx, data_in);
-	rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
-	return 0;
-}
-
-static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
-{
-	SHA256_CTX ctx;
-
-	if (!SHA224_Init(&ctx))
-		return -EFAULT;
-	SHA256_Transform(&ctx, data_in);
-	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
-	return 0;
-}
-
-static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
-{
-	SHA256_CTX ctx;
-
-	if (!SHA256_Init(&ctx))
-		return -EFAULT;
-	SHA256_Transform(&ctx, data_in);
-	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+out:
 	return 0;
 }
 
-static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
-{
-	SHA512_CTX ctx;
-
-	if (!SHA384_Init(&ctx))
-		return -EFAULT;
-	SHA512_Transform(&ctx, data_in);
-	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
-	return 0;
-}
+#else
 
-static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+static int aes_ipsecmb_job(uint8_t *in, uint8_t *out, IMB_MGR *m,
+		const uint8_t *key, uint16_t auth_keylen)
 {
-	SHA512_CTX ctx;
+	int err;
+	struct IMB_JOB *job;
+	DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
+	DECLARE_ALIGNED(uint32_t dust[4*15], 16);
 
-	if (!SHA512_Init(&ctx))
+	if (auth_keylen == ICP_QAT_HW_AES_128_KEY_SZ)
+		IMB_AES_KEYEXP_128(m, key, expkey, dust);
+	else if (auth_keylen == ICP_QAT_HW_AES_192_KEY_SZ)
+		IMB_AES_KEYEXP_192(m, key, expkey, dust);
+	else if (auth_keylen == ICP_QAT_HW_AES_256_KEY_SZ)
+		IMB_AES_KEYEXP_256(m, key, expkey, dust);
+	else
 		return -EFAULT;
-	SHA512_Transform(&ctx, data_in);
-	rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
-	return 0;
-}
 
-static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
-{
-	MD5_CTX ctx;
-
-	if (!MD5_Init(&ctx))
-		return -EFAULT;
-	MD5_Transform(&ctx, data_in);
-	rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
+	job = IMB_GET_NEXT_JOB(m);
 
-	return 0;
-}
+	job->src = in;
+	job->dst = out;
+	job->enc_keys = expkey;
+	job->key_len_in_bytes = auth_keylen;
+	job->msg_len_to_cipher_in_bytes = 16;
+	job->iv_len_in_bytes = 0;
+	job->cipher_direction = IMB_DIR_ENCRYPT;
+	job->cipher_mode = IMB_CIPHER_ECB;
+	job->hash_alg = IMB_AUTH_NULL;
 
-static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
-{
-	int i;
+	while (IMB_FLUSH_JOB(m) != NULL)
+		;
 
-	derived[0] = base[0] << 1;
-	for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
-		derived[i] = base[i] << 1;
-		derived[i - 1] |= base[i] >> 7;
+	job = IMB_SUBMIT_JOB(m);
+	if (job) {
+		if (job->status == IMB_STATUS_COMPLETED)
+			return 0;
 	}
 
-	if (base[0] & 0x80)
-		derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
+	err = imb_get_errno(m);
+	if (err)
+		QAT_LOG(ERR, "Error: %s!\n", imb_get_strerror(err));
+
+	return -EFAULT;
 }
 
 static int
-partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-		uint8_t *data_in, uint8_t *data_out)
+partial_hash_compute_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in, uint8_t *data_out, IMB_MGR *m)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1563,43 +1695,37 @@  partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
 
 	switch (hash_alg) {
 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
-		if (partial_hash_sha1(data_in, digest))
-			return -EFAULT;
+		IMB_SHA1_ONE_BLOCK(m, data_in, digest);
 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
 			*hash_state_out_be32 =
 				rte_bswap32(*(((uint32_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA224:
-		if (partial_hash_sha224(data_in, digest))
-			return -EFAULT;
+		IMB_SHA224_ONE_BLOCK(m, data_in, digest);
 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
 			*hash_state_out_be32 =
 				rte_bswap32(*(((uint32_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
-		if (partial_hash_sha256(data_in, digest))
-			return -EFAULT;
+		IMB_SHA256_ONE_BLOCK(m, data_in, digest);
 		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
 			*hash_state_out_be32 =
 				rte_bswap32(*(((uint32_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA384:
-		if (partial_hash_sha384(data_in, digest))
-			return -EFAULT;
+		IMB_SHA384_ONE_BLOCK(m, data_in, digest);
 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
 			*hash_state_out_be64 =
 				rte_bswap64(*(((uint64_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
-		if (partial_hash_sha512(data_in, digest))
-			return -EFAULT;
+		IMB_SHA512_ONE_BLOCK(m, data_in, digest);
 		for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
 			*hash_state_out_be64 =
 				rte_bswap64(*(((uint64_t *)digest)+i));
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
-		if (partial_hash_md5(data_in, data_out))
-			return -EFAULT;
+		IMB_MD5_ONE_BLOCK(m, data_in, data_out);
 		break;
 	default:
 		QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
@@ -1609,148 +1735,108 @@  partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
 	return 0;
 }
 
-static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
+static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg,
 				const uint8_t *auth_key,
 				uint16_t auth_keylen,
 				uint8_t *p_state_buf,
 				uint16_t *p_state_len,
 				uint8_t aes_cmac)
 {
-	int block_size;
+	int block_size = 0;
 	uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
 	uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
-	int i;
+	int i, ret = 0;
+	uint8_t in[ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ];
+
+	IMB_MGR *m;
+	m = alloc_mb_mgr(0);
+	if (m == NULL)
+		return -ENOMEM;
 
+	init_mb_mgr_auto(m, NULL);
+	memset(in, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
 	if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
 
 		/* CMAC */
 		if (aes_cmac) {
-			AES_KEY enc_key;
-			uint8_t *in = NULL;
-			uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
 			uint8_t *k1, *k2;
-
 			auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
-
-			in = rte_zmalloc("AES CMAC K1",
-					 ICP_QAT_HW_AES_128_KEY_SZ, 16);
-
-			if (in == NULL) {
-				QAT_LOG(ERR, "Failed to alloc memory");
-				return -ENOMEM;
-			}
-
-			rte_memcpy(in, AES_CMAC_SEED,
-				   ICP_QAT_HW_AES_128_KEY_SZ);
 			rte_memcpy(p_state_buf, auth_key, auth_keylen);
 
-			if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
-				&enc_key) != 0) {
-				rte_free(in);
-				return -EFAULT;
-			}
-
-			AES_encrypt(in, k0, &enc_key);
-
+			DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
+			DECLARE_ALIGNED(uint32_t dust[4*15], 16);
+			IMB_AES_KEYEXP_128(m, p_state_buf, expkey, dust);
 			k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
 			k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
 
-			aes_cmac_key_derive(k0, k1);
-			aes_cmac_key_derive(k1, k2);
-
-			memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
+			IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1, k2);
 			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
-			rte_free(in);
-			return 0;
-		} else {
-			static uint8_t qat_aes_xcbc_key_seed[
-					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
-				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-				0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
-				0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
-				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
-				0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
-			};
+			goto out;
+		}
 
-			uint8_t *in = NULL;
-			uint8_t *out = p_state_buf;
-			int x;
-			AES_KEY enc_key;
+		static uint8_t qat_aes_xcbc_key_seed[
+				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+			0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+			0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+		};
 
-			in = rte_zmalloc("working mem for key",
-					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
-			if (in == NULL) {
-				QAT_LOG(ERR, "Failed to alloc memory");
-				return -ENOMEM;
+		uint8_t *input = in;
+		uint8_t *out = p_state_buf;
+		rte_memcpy(input, qat_aes_xcbc_key_seed,
+				ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+		for (i = 0; i < HASH_XCBC_PRECOMP_KEY_NUM; i++) {
+			if (aes_ipsecmb_job(input, out, m, auth_key, auth_keylen)) {
+				memset(input -
+				   (i * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+				  0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+				ret = -EFAULT;
+				goto out;
 			}
 
-			rte_memcpy(in, qat_aes_xcbc_key_seed,
-					ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
-			for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
-				if (AES_set_encrypt_key(auth_key,
-							auth_keylen << 3,
-							&enc_key) != 0) {
-					rte_free(in -
-					  (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
-					memset(out -
-					   (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
-					  0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
-					return -EFAULT;
-				}
-				AES_encrypt(in, out, &enc_key);
-				in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
-				out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
-			}
-			*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
-			rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
-			return 0;
+			input += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+			out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
 		}
+		*p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+		goto out;
 
 	} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
 		(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
-		uint8_t *in = NULL;
 		uint8_t *out = p_state_buf;
-		AES_KEY enc_key;
 
 		memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
 				ICP_QAT_HW_GALOIS_LEN_A_SZ +
 				ICP_QAT_HW_GALOIS_E_CTR0_SZ);
-		in = rte_zmalloc("working mem for key",
-				ICP_QAT_HW_GALOIS_H_SZ, 16);
-		if (in == NULL) {
-			QAT_LOG(ERR, "Failed to alloc memory");
-			return -ENOMEM;
+		if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen)) {
+			ret = -EFAULT;
+			goto out;
 		}
 
-		memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
-		if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
-			&enc_key) != 0) {
-			return -EFAULT;
-		}
-		AES_encrypt(in, out, &enc_key);
 		*p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
 				ICP_QAT_HW_GALOIS_LEN_A_SZ +
 				ICP_QAT_HW_GALOIS_E_CTR0_SZ;
-		rte_free(in);
-		return 0;
+		goto out;
 	}
 
 	block_size = qat_hash_get_block_size(hash_alg);
-	if (block_size < 0)
+	if (block_size < 0) {
+		free_mb_mgr(m);
 		return block_size;
-	/* init ipad and opad from key and xor with fixed values */
-	memset(ipad, 0, block_size);
-	memset(opad, 0, block_size);
+	}
 
 	if (auth_keylen > (unsigned int)block_size) {
 		QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto out;
 	}
-
+	/* init ipad and opad from key and xor with fixed values */
+	memset(ipad, 0, block_size);
+	memset(opad, 0, block_size);
 	RTE_VERIFY(auth_keylen <= sizeof(ipad));
 	RTE_VERIFY(auth_keylen <= sizeof(opad));
-
 	rte_memcpy(ipad, auth_key, auth_keylen);
 	rte_memcpy(opad, auth_key, auth_keylen);
 
@@ -1762,11 +1848,10 @@  static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
 	}
 
 	/* do partial hash of ipad and copy to state1 */
-	if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
-		memset(ipad, 0, block_size);
-		memset(opad, 0, block_size);
+	if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf, m)) {
 		QAT_LOG(ERR, "ipad precompute failed");
-		return -EFAULT;
+		ret = -EFAULT;
+		goto out;
 	}
 
 	/*
@@ -1774,18 +1859,21 @@  static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
 	 * Put the partial hash of opad state_len bytes after state1
 	 */
 	*p_state_len = qat_hash_get_state1_size(hash_alg);
-	if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
-		memset(ipad, 0, block_size);
-		memset(opad, 0, block_size);
+	if (partial_hash_compute_ipsec_mb(hash_alg, opad,
+				p_state_buf + *p_state_len, m)) {
 		QAT_LOG(ERR, "opad precompute failed");
-		return -EFAULT;
+		ret = -EFAULT;
+		goto out;
 	}
 
+out:
 	/*  don't leave data lying around */
 	memset(ipad, 0, block_size);
 	memset(opad, 0, block_size);
-	return 0;
+	free_mb_mgr(m);
+	return ret;
 }
+#endif
 
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session)
@@ -2180,20 +2268,16 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-1 HMAC */
-		if (qat_ipsec_mb_lib) {
-#ifdef RTE_QAT_LIBIPSECMB
-			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA1,
-				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
+#ifdef RTE_QAT_OPENSSL
+		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
+			authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
+
 #else
-			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
-			return -EFAULT;
+		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA1,
+			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #endif
-		} else {
-			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
-				authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
-		}
 
 		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
@@ -2211,21 +2295,15 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-224 HMAC */
-		if (qat_ipsec_mb_lib) {
-#ifdef RTE_QAT_LIBIPSECMB
-			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA224,
-				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
+#ifdef RTE_QAT_OPENSSL
+		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
+			authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #else
-			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
-			return -EFAULT;
+		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA224,
+			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #endif
-		} else {
-			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
-				authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
-		}
-
 		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
 			return -EFAULT;
@@ -2242,21 +2320,15 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-256 HMAC */
-		if (qat_ipsec_mb_lib) {
-#ifdef RTE_QAT_LIBIPSECMB
-			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA256,
-				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
+#ifdef RTE_QAT_OPENSSL
+		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
+			authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #else
-			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
-			return -EFAULT;
+		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA256,
+			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #endif
-		} else {
-			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
-				authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
-		}
-
 		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
 			return -EFAULT;
@@ -2273,21 +2345,15 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-384 HMAC */
-		if (qat_ipsec_mb_lib) {
-#ifdef RTE_QAT_LIBIPSECMB
-			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA384,
-				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
+#ifdef RTE_QAT_OPENSSL
+		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
+			authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #else
-			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
-			return -EFAULT;
+		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA384,
+			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #endif
-		} else {
-			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
-				authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
-		}
-
 		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
 			return -EFAULT;
@@ -2304,21 +2370,15 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 			break;
 		}
 		/* SHA-512 HMAC */
-		if (qat_ipsec_mb_lib) {
-#ifdef RTE_QAT_LIBIPSECMB
-			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA512,
-				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
+#ifdef RTE_QAT_OPENSSL
+		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
+			authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #else
-			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
-			return -EFAULT;
+		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_SHA512,
+			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #endif
-		} else {
-			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
-				authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
-		}
-
 		if (ret) {
 			QAT_LOG(ERR, "(SHA)precompute failed");
 			return -EFAULT;
@@ -2354,22 +2414,16 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 
 		if (cdesc->aes_cmac)
 			memset(cdesc->cd_cur_ptr, 0, state1_size);
-		if (qat_ipsec_mb_lib) {
-#ifdef RTE_QAT_LIBIPSECMB
-			ret = qat_sym_do_precomputes_ipsec_mb(
-				ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
-				authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
-				&state2_size, cdesc->aes_cmac);
+#ifdef RTE_QAT_OPENSSL
+		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+			authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+			&state2_size, cdesc->aes_cmac);
 #else
-			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
-			return -EFAULT;
+		ret = qat_sym_do_precomputes_ipsec_mb(
+			ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+			authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+			&state2_size, cdesc->aes_cmac);
 #endif
-		} else {
-			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
-				authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
-				&state2_size, cdesc->aes_cmac);
-		}
-
 		if (ret) {
 			cdesc->aes_cmac ? QAT_LOG(ERR,
 						  "(CMAC)precompute failed")
@@ -2382,21 +2436,15 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
 		state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
-		if (qat_ipsec_mb_lib) {
-#ifdef RTE_QAT_LIBIPSECMB
-			ret = qat_sym_do_precomputes_ipsec_mb(cdesc->qat_hash_alg, authkey,
-				authkeylen, cdesc->cd_cur_ptr + state1_size,
-				&state2_size, cdesc->aes_cmac);
+#ifdef RTE_QAT_OPENSSL
+		ret = qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
+			authkeylen, cdesc->cd_cur_ptr + state1_size,
+			&state2_size, cdesc->aes_cmac);
 #else
-			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing ?");
-			return -EFAULT;
+		ret = qat_sym_do_precomputes_ipsec_mb(cdesc->qat_hash_alg, authkey,
+			authkeylen, cdesc->cd_cur_ptr + state1_size,
+			&state2_size, cdesc->aes_cmac);
 #endif
-		} else {
-			ret = qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
-				authkeylen, cdesc->cd_cur_ptr + state1_size,
-				&state2_size, cdesc->aes_cmac);
-		}
-
 		if (ret) {
 			QAT_LOG(ERR, "(GCM)precompute failed");
 			return -EFAULT;
@@ -2453,21 +2501,15 @@  int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
-		if (qat_ipsec_mb_lib) {
-#ifdef RTE_QAT_LIBIPSECMB
-			ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_MD5,
-				authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
+#ifdef RTE_QAT_OPENSSL
+		ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
+			authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #else
-			QAT_LOG(ERR, "Intel IPSEC-MB LIB missing");
-			return -EFAULT;
+		ret = qat_sym_do_precomputes_ipsec_mb(ICP_QAT_HW_AUTH_ALGO_MD5,
+			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size,
+			cdesc->aes_cmac);
 #endif
-		} else {
-			ret = qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
-				authkeylen, cdesc->cd_cur_ptr, &state1_size,
-				cdesc->aes_cmac);
-		}
-
 		if (ret) {
 			QAT_LOG(ERR, "(MD5)precompute failed");
 			return -EFAULT;
@@ -2902,9 +2944,11 @@  qat_security_session_create(void *dev,
 		return -EINVAL;
 	}
 
+#ifdef RTE_QAT_OPENSSL
 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
 	if (ossl_legacy_provider_load())
 		return -EINVAL;
+#endif
 #endif
 	ret = qat_sec_session_set_docsis_parameters(cdev, conf,
 			sess_private_data, SECURITY_GET_SESS_PRIV_IOVA(sess));
@@ -2913,8 +2957,10 @@  qat_security_session_create(void *dev,
 		return ret;
 	}
 
+#ifdef RTE_QAT_OPENSSL
 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L)
 	ossl_legacy_provider_unload();
+#endif
 #endif
 	return 0;
 }
@@ -2927,8 +2973,13 @@  qat_security_session_destroy(void *dev __rte_unused,
 	struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
 
 	if (sess_priv) {
+#ifdef RTE_QAT_OPENSSL
 		if (s->bpi_ctx)
 			bpi_cipher_ctx_free(s->bpi_ctx);
+#else
+		if (s->mb_mgr)
+			free_mb_mgr(s->mb_mgr);
+#endif
 		memset(s, 0, qat_sym_session_get_private_size(dev));
 	}
 
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 9b5d11ac88..ee916b2814 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -15,6 +15,14 @@ 
 #include "icp_qat_fw.h"
 #include "icp_qat_fw_la.h"
 
+#ifndef RTE_QAT_OPENSSL
+#if defined(RTE_ARCH_ARM)
+#include <ipsec-mb.h>
+#else
+#include <intel-ipsec-mb.h>
+#endif
+#endif
+
 /*
  * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
  * Integrity Key (IK)
@@ -129,6 +137,12 @@  struct qat_sym_session {
 	uint32_t slice_types;
 	enum qat_sym_proto_flag qat_proto_flag;
 	qat_sym_build_request_t build_request[2];
+#ifndef RTE_QAT_OPENSSL
+	IMB_MGR *mb_mgr;
+#endif
+	uint64_t expkey[4*15];
+	uint32_t dust[4*15];
+	uint8_t docsis_key_len;
 };
 
 int