@@ -453,11 +453,14 @@ test_cryptodev_asym_op(struct crypto_testsuite_params_asym *ts_params,
ret = rte_cryptodev_asym_session_create(dev_id, &xform_tc,
ts_params->session_mpool, &sess);
if (ret < 0) {
- snprintf(test_msg, ASYM_TEST_MSG_LEN,
- "line %u "
- "FAILED: %s", __LINE__,
- "Session creation failed");
status = (ret == -ENOTSUP) ? TEST_SKIPPED : TEST_FAILED;
+ if (status == TEST_SKIPPED)
+ snprintf(test_msg, ASYM_TEST_MSG_LEN, "SKIPPED");
+ else
+ snprintf(test_msg, ASYM_TEST_MSG_LEN,
+ "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
goto error_exit;
}
@@ -489,6 +492,11 @@ test_cryptodev_asym_op(struct crypto_testsuite_params_asym *ts_params,
}
if (test_cryptodev_asym_ver(op, &xform_tc, data_tc, result_op) != TEST_SUCCESS) {
+ if (result_op->status == RTE_CRYPTO_OP_STATUS_INVALID_ARGS) {
+ snprintf(test_msg, ASYM_TEST_MSG_LEN, "SESSIONLESS SKIPPED");
+ status = TEST_SKIPPED;
+ goto error_exit;
+ }
snprintf(test_msg, ASYM_TEST_MSG_LEN,
"line %u FAILED: %s",
__LINE__, "Verification failed ");
@@ -619,13 +627,19 @@ test_one_by_one(void)
/* Go through all test cases */
test_index = 0;
for (i = 0; i < test_vector.size; i++) {
- if (test_one_case(test_vector.address[i], 0) != TEST_SUCCESS)
+ status = test_one_case(test_vector.address[i], 0);
+ if (status == TEST_SUCCESS || status == TEST_SKIPPED)
+ status = TEST_SUCCESS;
+ else
status = TEST_FAILED;
}
+
if (sessionless) {
for (i = 0; i < test_vector.size; i++) {
- if (test_one_case(test_vector.address[i], 1)
- != TEST_SUCCESS)
+ status = test_one_case(test_vector.address[i], 1);
+ if (status == TEST_SUCCESS || status == TEST_SKIPPED)
+ status = TEST_SUCCESS;
+ else
status = TEST_FAILED;
}
}
@@ -272,6 +272,20 @@ allocated while for GEN1 devices, 12 buffers are allocated, plus 1472 bytes over
larger than the input size).
+Running QAT PMD with insecure crypto algorithms
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A few insecure crypto algorithms are deprecated from QAT drivers. This needs to be reflected in DPDK QAT PMD.
+DPDK QAT PMD has by default disabled all the insecure crypto algorithms from Gen 1,2,3 and 4.
+A PMD parameter is used to enable the capability.
+
+- qat_legacy_capa
+
+To use this feature the user must set the parameter on process start as a device additional parameter::
+
+ -a b1:01.2,qat_legacy_capa=1
+
+
Running QAT PMD with minimum threshold for burst size
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -371,6 +371,7 @@ static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct qat_pci_device *qat_pci_dev;
struct qat_dev_hw_spec_funcs *ops_hw;
struct qat_dev_cmd_param qat_dev_cmd_param[] = {
+ { QAT_LEGACY_CAPA, 0 },
{ QAT_IPSEC_MB_LIB, 0 },
{ SYM_ENQ_THRESHOLD_NAME, 0 },
{ ASYM_ENQ_THRESHOLD_NAME, 0 },
@@ -17,13 +17,14 @@
#define QAT_DEV_NAME_MAX_LEN 64
+#define QAT_LEGACY_CAPA "qat_legacy_capa"
#define QAT_IPSEC_MB_LIB "qat_ipsec_mb_lib"
#define SYM_ENQ_THRESHOLD_NAME "qat_sym_enq_threshold"
#define ASYM_ENQ_THRESHOLD_NAME "qat_asym_enq_threshold"
#define COMP_ENQ_THRESHOLD_NAME "qat_comp_enq_threshold"
#define SYM_CIPHER_CRC_ENABLE_NAME "qat_sym_cipher_crc_enable"
#define QAT_CMD_SLICE_MAP "qat_cmd_slice_disable"
-#define QAT_CMD_SLICE_MAP_POS 5
+#define QAT_CMD_SLICE_MAP_POS 6
#define MAX_QP_THRESHOLD_SIZE 32
/**
@@ -12,10 +12,41 @@
#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
-static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen2[] = {
+static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen2[] = {
+ QAT_SYM_CIPHER_CAP(DES_CBC,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
+ QAT_SYM_CIPHER_CAP(3DES_CBC,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
+ QAT_SYM_CIPHER_CAP(3DES_CTR,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
QAT_SYM_PLAIN_AUTH_CAP(SHA1,
CAP_SET(block_size, 64),
CAP_RNG(digest_size, 1, 20, 1)),
+ QAT_SYM_AUTH_CAP(SHA224,
+ CAP_SET(block_size, 64),
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA224_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA1_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(MD5_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
+};
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen2[] = {
QAT_SYM_AEAD_CAP(AES_GCM,
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
@@ -32,10 +63,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen2[] = {
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(SHA224,
- CAP_SET(block_size, 64),
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(SHA256,
CAP_SET(block_size, 64),
CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
@@ -51,14 +78,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen2[] = {
QAT_SYM_PLAIN_AUTH_CAP(SHA3_256,
CAP_SET(block_size, 136),
CAP_RNG(digest_size, 32, 32, 0)),
- QAT_SYM_AUTH_CAP(SHA1_HMAC,
- CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(SHA224_HMAC,
- CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(SHA256_HMAC,
CAP_SET(block_size, 64),
CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
@@ -71,10 +90,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen2[] = {
CAP_SET(block_size, 128),
CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(MD5_HMAC,
- CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
@@ -112,18 +127,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen2[] = {
QAT_SYM_CIPHER_CAP(NULL,
CAP_SET(block_size, 1),
CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_CIPHER_CAP(3DES_CBC,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
- QAT_SYM_CIPHER_CAP(3DES_CTR,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
- QAT_SYM_CIPHER_CAP(DES_CBC,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
- QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
QAT_SYM_CIPHER_CAP(ZUC_EEA3,
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
@@ -283,8 +286,13 @@ qat_sym_crypto_cap_get_gen2(struct qat_cryptodev_private *internals,
const char *capa_memz_name,
const uint16_t __rte_unused slice_map)
{
- const uint32_t size = sizeof(qat_sym_crypto_caps_gen2);
- uint32_t i;
+ uint32_t legacy_capa_num;
+ uint32_t size = sizeof(qat_sym_crypto_caps_gen2);
+ uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen2);
+ legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
+
+ if (unlikely(qat_legacy_capa))
+ size = size + legacy_size;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
@@ -300,17 +308,15 @@ qat_sym_crypto_cap_get_gen2(struct qat_cryptodev_private *internals,
struct rte_cryptodev_capabilities *addr =
(struct rte_cryptodev_capabilities *)
internals->capa_mz->addr;
- const struct rte_cryptodev_capabilities *capabilities =
- qat_sym_crypto_caps_gen2;
- const uint32_t capa_num =
- size / sizeof(struct rte_cryptodev_capabilities);
- uint32_t curr_capa = 0;
-
- for (i = 0; i < capa_num; i++) {
- memcpy(addr + curr_capa, capabilities + i,
- sizeof(struct rte_cryptodev_capabilities));
- curr_capa++;
+ struct rte_cryptodev_capabilities *capabilities;
+
+ if (unlikely(qat_legacy_capa)) {
+ capabilities = qat_sym_crypto_legacy_caps_gen2;
+ memcpy(addr, capabilities, legacy_size);
+ addr += legacy_capa_num;
}
+ capabilities = qat_sym_crypto_caps_gen2;
+ memcpy(addr, capabilities, sizeof(qat_sym_crypto_caps_gen2));
internals->qat_dev_capabilities = internals->capa_mz->addr;
return 0;
@@ -10,10 +10,45 @@
#include "qat_crypto.h"
#include "qat_crypto_pmd_gens.h"
-static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen3[] = {
+ QAT_SYM_CIPHER_CAP(3DES_CBC,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
+ QAT_SYM_CIPHER_CAP(DES_CBC,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
+ QAT_SYM_CIPHER_CAP(3DES_CTR,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
QAT_SYM_PLAIN_AUTH_CAP(SHA1,
CAP_SET(block_size, 64),
CAP_RNG(digest_size, 1, 20, 1)),
+ QAT_SYM_AUTH_CAP(SHA224,
+ CAP_SET(block_size, 64),
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA224_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA1_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(MD5_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
+ QAT_SYM_PLAIN_AUTH_CAP(SHA3_224,
+ CAP_SET(block_size, 144),
+ CAP_RNG(digest_size, 28, 28, 0)),
+};
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
QAT_SYM_AEAD_CAP(AES_GCM,
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
@@ -30,10 +65,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(SHA224,
- CAP_SET(block_size, 64),
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(SHA256,
CAP_SET(block_size, 64),
CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
@@ -46,9 +77,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
CAP_SET(block_size, 128),
CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_PLAIN_AUTH_CAP(SHA3_224,
- CAP_SET(block_size, 144),
- CAP_RNG(digest_size, 28, 28, 0)),
QAT_SYM_PLAIN_AUTH_CAP(SHA3_256,
CAP_SET(block_size, 136),
CAP_RNG(digest_size, 32, 32, 0)),
@@ -58,14 +86,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
QAT_SYM_PLAIN_AUTH_CAP(SHA3_512,
CAP_SET(block_size, 72),
CAP_RNG(digest_size, 64, 64, 0)),
- QAT_SYM_AUTH_CAP(SHA1_HMAC,
- CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(SHA224_HMAC,
- CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(SHA256_HMAC,
CAP_SET(block_size, 64),
CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
@@ -78,10 +98,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
CAP_SET(block_size, 128),
CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(MD5_HMAC,
- CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
@@ -119,18 +135,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
QAT_SYM_CIPHER_CAP(NULL,
CAP_SET(block_size, 1),
CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_CIPHER_CAP(3DES_CBC,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
- QAT_SYM_CIPHER_CAP(3DES_CTR,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
- QAT_SYM_CIPHER_CAP(DES_CBC,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
- QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
QAT_SYM_CIPHER_CAP(ZUC_EEA3,
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
@@ -188,8 +192,17 @@ static int
qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
const char *capa_memz_name, const uint16_t slice_map)
{
- const uint32_t size = sizeof(qat_sym_crypto_caps_gen3);
- uint32_t i;
+
+ uint32_t i, iter = 0;
+ uint32_t curr_capa = 0;
+ uint32_t capa_num, legacy_capa_num;
+ uint32_t size = sizeof(qat_sym_crypto_caps_gen3);
+ uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen3);
+ capa_num = size/sizeof(struct rte_cryptodev_capabilities);
+ legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
+
+ if (unlikely(qat_legacy_capa))
+ size = size + legacy_size;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
@@ -205,30 +218,40 @@ qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
struct rte_cryptodev_capabilities *addr =
(struct rte_cryptodev_capabilities *)
internals->capa_mz->addr;
- const struct rte_cryptodev_capabilities *capabilities =
- qat_sym_crypto_caps_gen3;
- const uint32_t capa_num =
- size / sizeof(struct rte_cryptodev_capabilities);
- uint32_t curr_capa = 0;
+ struct rte_cryptodev_capabilities *capabilities;
- for (i = 0; i < capa_num; i++) {
+ if (unlikely(qat_legacy_capa)) {
+ capabilities = qat_sym_crypto_legacy_caps_gen3;
+ capa_num += legacy_capa_num;
+ } else {
+ capabilities = qat_sym_crypto_caps_gen3;
+ }
+
+ for (i = 0; i < capa_num; i++, iter++) {
if (slice_map & ICP_ACCEL_MASK_SM4_SLICE && (
- check_cipher_capa(&capabilities[i],
+ check_cipher_capa(&capabilities[iter],
RTE_CRYPTO_CIPHER_SM4_ECB) ||
- check_cipher_capa(&capabilities[i],
+ check_cipher_capa(&capabilities[iter],
RTE_CRYPTO_CIPHER_SM4_CBC) ||
- check_cipher_capa(&capabilities[i],
+ check_cipher_capa(&capabilities[iter],
RTE_CRYPTO_CIPHER_SM4_CTR))) {
continue;
}
if (slice_map & ICP_ACCEL_MASK_SM3_SLICE && (
- check_auth_capa(&capabilities[i],
+ check_auth_capa(&capabilities[iter],
RTE_CRYPTO_AUTH_SM3))) {
continue;
}
- memcpy(addr + curr_capa, capabilities + i,
+ memcpy(addr + curr_capa, capabilities + iter,
sizeof(struct rte_cryptodev_capabilities));
curr_capa++;
+
+ if (unlikely(qat_legacy_capa) && (i == legacy_capa_num-1)) {
+ capabilities = qat_sym_crypto_caps_gen3;
+ addr += curr_capa;
+ curr_capa = 0;
+ iter = -1;
+ }
}
internals->qat_dev_capabilities = internals->capa_mz->addr;
@@ -10,18 +10,32 @@
#include "qat_crypto.h"
#include "qat_crypto_pmd_gens.h"
-static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = {
- QAT_SYM_CIPHER_CAP(AES_CBC,
- CAP_SET(block_size, 16),
- CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
- QAT_SYM_AUTH_CAP(SHA1_HMAC,
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen4[] = {
+ QAT_SYM_PLAIN_AUTH_CAP(SHA1,
CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
+ CAP_RNG(digest_size, 1, 20, 1)),
+ QAT_SYM_AUTH_CAP(SHA224,
+ CAP_SET(block_size, 64),
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(SHA224_HMAC,
CAP_SET(block_size, 64),
CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA1_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_CIPHER_CAP(SM4_ECB,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0)),
+};
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = {
+ QAT_SYM_CIPHER_CAP(AES_CBC,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
QAT_SYM_AUTH_CAP(SHA256_HMAC,
CAP_SET(block_size, 64),
CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
@@ -52,13 +66,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = {
QAT_SYM_CIPHER_CAP(NULL,
CAP_SET(block_size, 1),
CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_PLAIN_AUTH_CAP(SHA1,
- CAP_SET(block_size, 64),
- CAP_RNG(digest_size, 1, 20, 1)),
- QAT_SYM_AUTH_CAP(SHA224,
- CAP_SET(block_size, 64),
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(SHA256,
CAP_SET(block_size, 64),
CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
@@ -91,9 +98,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = {
CAP_RNG(key_size, 32, 32, 0),
CAP_RNG(digest_size, 16, 16, 0),
CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
- QAT_SYM_CIPHER_CAP(SM4_ECB,
- CAP_SET(block_size, 16),
- CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0)),
QAT_SYM_CIPHER_CAP(SM4_CBC,
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
@@ -111,8 +115,13 @@ qat_sym_crypto_cap_get_gen4(struct qat_cryptodev_private *internals,
const char *capa_memz_name,
const uint16_t __rte_unused slice_map)
{
- const uint32_t size = sizeof(qat_sym_crypto_caps_gen4);
- uint32_t i;
+ uint32_t legacy_capa_num;
+ uint32_t size = sizeof(qat_sym_crypto_caps_gen4);
+ uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen4);
+ legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
+
+ if (unlikely(qat_legacy_capa))
+ size = size + legacy_size;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
@@ -128,17 +137,16 @@ qat_sym_crypto_cap_get_gen4(struct qat_cryptodev_private *internals,
struct rte_cryptodev_capabilities *addr =
(struct rte_cryptodev_capabilities *)
internals->capa_mz->addr;
- const struct rte_cryptodev_capabilities *capabilities =
- qat_sym_crypto_caps_gen4;
- const uint32_t capa_num =
- size / sizeof(struct rte_cryptodev_capabilities);
- uint32_t curr_capa = 0;
-
- for (i = 0; i < capa_num; i++) {
- memcpy(addr + curr_capa, capabilities + i,
- sizeof(struct rte_cryptodev_capabilities));
- curr_capa++;
+
+ struct rte_cryptodev_capabilities *capabilities;
+
+ if (unlikely(qat_legacy_capa)) {
+ capabilities = qat_sym_crypto_legacy_caps_gen4;
+ memcpy(addr, capabilities, legacy_size);
+ addr += legacy_capa_num;
}
+ capabilities = qat_sym_crypto_caps_gen4;
+ memcpy(addr, capabilities, sizeof(qat_sym_crypto_caps_gen4));
internals->qat_dev_capabilities = internals->capa_mz->addr;
return 0;
@@ -16,10 +16,41 @@
#include "qat_crypto.h"
#include "qat_crypto_pmd_gens.h"
-static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen1[] = {
+static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen1[] = {
+ QAT_SYM_CIPHER_CAP(DES_CBC,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
+ QAT_SYM_CIPHER_CAP(3DES_CBC,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
+ QAT_SYM_CIPHER_CAP(3DES_CTR,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
QAT_SYM_PLAIN_AUTH_CAP(SHA1,
CAP_SET(block_size, 64),
CAP_RNG(digest_size, 1, 20, 1)),
+ QAT_SYM_AUTH_CAP(SHA224,
+ CAP_SET(block_size, 64),
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA1_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA224_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(MD5_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
+ CAP_SET(block_size, 8),
+ CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
+};
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen1[] = {
QAT_SYM_AEAD_CAP(AES_GCM,
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
@@ -36,10 +67,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen1[] = {
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(SHA224,
- CAP_SET(block_size, 64),
- CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(SHA256,
CAP_SET(block_size, 64),
CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
@@ -52,14 +79,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen1[] = {
CAP_SET(block_size, 128),
CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(SHA1_HMAC,
- CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(SHA224_HMAC,
- CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(SHA256_HMAC,
CAP_SET(block_size, 64),
CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
@@ -72,10 +91,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen1[] = {
CAP_SET(block_size, 128),
CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_AUTH_CAP(MD5_HMAC,
- CAP_SET(block_size, 64),
- CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
- CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
CAP_SET(block_size, 16),
CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
@@ -113,18 +128,6 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen1[] = {
QAT_SYM_CIPHER_CAP(NULL,
CAP_SET(block_size, 1),
CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
- QAT_SYM_CIPHER_CAP(3DES_CBC,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
- QAT_SYM_CIPHER_CAP(3DES_CTR,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
- QAT_SYM_CIPHER_CAP(DES_CBC,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
- QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
- CAP_SET(block_size, 8),
- CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -157,8 +160,14 @@ qat_sym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
const char *capa_memz_name,
const uint16_t __rte_unused slice_map)
{
- const uint32_t size = sizeof(qat_sym_crypto_caps_gen1);
- uint32_t i;
+
+ uint32_t legacy_capa_num;
+ uint32_t size = sizeof(qat_sym_crypto_caps_gen1);
+ uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen1);
+ legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
+
+ if (unlikely(qat_legacy_capa))
+ size = size + legacy_size;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
@@ -174,17 +183,16 @@ qat_sym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
struct rte_cryptodev_capabilities *addr =
(struct rte_cryptodev_capabilities *)
internals->capa_mz->addr;
- const struct rte_cryptodev_capabilities *capabilities =
- qat_sym_crypto_caps_gen1;
- const uint32_t capa_num =
- size / sizeof(struct rte_cryptodev_capabilities);
- uint32_t curr_capa = 0;
-
- for (i = 0; i < capa_num; i++) {
- memcpy(addr + curr_capa, capabilities + i,
- sizeof(struct rte_cryptodev_capabilities));
- curr_capa++;
+
+ struct rte_cryptodev_capabilities *capabilities;
+
+ if (unlikely(qat_legacy_capa)) {
+ capabilities = qat_sym_crypto_legacy_caps_gen1;
+ memcpy(addr, capabilities, legacy_size);
+ addr += legacy_capa_num;
}
+ capabilities = qat_sym_crypto_caps_gen1;
+ memcpy(addr, capabilities, sizeof(qat_sym_crypto_caps_gen1));
internals->qat_dev_capabilities = internals->capa_mz->addr;
return 0;
@@ -15,6 +15,8 @@
#include "qat_pke.h"
#include "qat_ec.h"
+#define RSA_MODULUS_2048_BITS 2048
+
uint8_t qat_asym_driver_id;
struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS];
@@ -913,8 +915,12 @@ asym_set_input(struct icp_qat_fw_pke_request *qat_req,
return modexp_set_input(qat_req, cookie, asym_op, xform);
case RTE_CRYPTO_ASYM_XFORM_MODINV:
return modinv_set_input(qat_req, cookie, asym_op, xform);
- case RTE_CRYPTO_ASYM_XFORM_RSA:
+ case RTE_CRYPTO_ASYM_XFORM_RSA:{
+ if (unlikely((xform->rsa.n.length < RSA_MODULUS_2048_BITS)
+ && (qat_legacy_capa == 0)))
+ return RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return rsa_set_input(qat_req, cookie, asym_op, xform);
+ }
case RTE_CRYPTO_ASYM_XFORM_ECDSA:
return ecdsa_set_input(qat_req, cookie, asym_op, xform);
case RTE_CRYPTO_ASYM_XFORM_ECPM:
@@ -1273,8 +1279,14 @@ qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
case RTE_CRYPTO_ASYM_XFORM_MODINV:
ret = session_set_modinv(qat_session, xform);
break;
- case RTE_CRYPTO_ASYM_XFORM_RSA:
+ case RTE_CRYPTO_ASYM_XFORM_RSA: {
+ if (unlikely((xform->rsa.n.length < RSA_MODULUS_2048_BITS)
+ && (qat_legacy_capa == 0))) {
+ ret = -ENOTSUP;
+ return ret;
+ }
ret = session_set_rsa(qat_session, xform);
+ }
break;
case RTE_CRYPTO_ASYM_XFORM_ECDSA:
case RTE_CRYPTO_ASYM_XFORM_ECPM:
@@ -11,6 +11,7 @@
extern uint8_t qat_sym_driver_id;
extern uint8_t qat_asym_driver_id;
+extern int qat_legacy_capa;
/**
* helper macro to set cryptodev capability range
@@ -17,6 +17,7 @@
uint8_t qat_sym_driver_id;
int qat_ipsec_mb_lib;
+int qat_legacy_capa;
struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS];
@@ -285,6 +286,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
qat_dev_cmd_param[i].val;
if (!strcmp(qat_dev_cmd_param[i].name, QAT_IPSEC_MB_LIB))
qat_ipsec_mb_lib = qat_dev_cmd_param[i].val;
+ if (!strcmp(qat_dev_cmd_param[i].name, QAT_LEGACY_CAPA))
+ qat_legacy_capa = qat_dev_cmd_param[i].val;
if (!strcmp(qat_dev_cmd_param[i].name, QAT_CMD_SLICE_MAP))
slice_map = qat_dev_cmd_param[i].val;
i++;