@@ -134,6 +134,7 @@ New Features
* **Updated Intel QuickAssist Technology driver.**
* Enabled support for new QAT GEN3 (578a) devices in QAT crypto driver.
+ * Enabled ZUC256 cipher and auth algorithm for wireless slice enabled GEN3 device.
* **Updated Marvell cnxk crypto driver.**
@@ -75,7 +75,8 @@ struct icp_qat_fw_comn_req_hdr {
uint8_t service_type;
uint8_t hdr_flags;
uint16_t serv_specif_flags;
- uint16_t comn_req_flags;
+ uint8_t comn_req_flags;
+ uint8_t ext_flags;
};
struct icp_qat_fw_comn_req_rqpars {
@@ -176,9 +177,6 @@ struct icp_qat_fw_comn_resp {
#define QAT_COMN_PTR_TYPE_SGL 0x1
#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
-#define QAT_COMN_EXT_FLAGS_BITPOS 8
-#define QAT_COMN_EXT_FLAGS_MASK 0x1
-#define QAT_COMN_EXT_FLAGS_USED 0x1
#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
@@ -81,6 +81,15 @@ struct icp_qat_fw_la_bulk_req {
#define ICP_QAT_FW_LA_PARTIAL_END 2
#define QAT_LA_PARTIAL_BITPOS 0
#define QAT_LA_PARTIAL_MASK 0x3
+#define QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_BITPOS 0
+#define QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS 1
+#define QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_MASK 0x1
+#define QAT_LA_USE_WCP_SLICE 1
+#define QAT_LA_USE_WCP_SLICE_BITPOS 2
+#define QAT_LA_USE_WCP_SLICE_MASK 0x1
+#define QAT_LA_USE_WAT_SLICE_BITPOS 3
+#define QAT_LA_USE_WAT_SLICE 1
+#define QAT_LA_USE_WAT_SLICE_MASK 0x1
#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
cmp_auth, ret_auth, update_state, \
ciph_iv, ciphcfg, partial) \
@@ -188,6 +197,21 @@ struct icp_qat_fw_la_bulk_req {
QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
QAT_LA_PARTIAL_MASK)
+#define ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, \
+ QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_BITPOS, \
+ QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_MASK)
+
+#define ICP_QAT_FW_USE_WCP_SLICE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, \
+ QAT_LA_USE_WCP_SLICE_BITPOS, \
+ QAT_LA_USE_WCP_SLICE_MASK)
+
+#define ICP_QAT_FW_USE_WAT_SLICE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, \
+ QAT_LA_USE_WAT_SLICE_BITPOS, \
+ QAT_LA_USE_WAT_SLICE_MASK)
+
#define QAT_FW_LA_MODE2 1
#define QAT_FW_LA_NO_MODE2 0
#define QAT_FW_LA_MODE2_MASK 0x1
@@ -21,7 +21,8 @@ enum icp_qat_slice_mask {
ICP_ACCEL_MASK_CRYPTO1_SLICE = 0x100,
ICP_ACCEL_MASK_CRYPTO2_SLICE = 0x200,
ICP_ACCEL_MASK_SM3_SLICE = 0x400,
- ICP_ACCEL_MASK_SM4_SLICE = 0x800
+ ICP_ACCEL_MASK_SM4_SLICE = 0x800,
+ ICP_ACCEL_MASK_ZUC_256_SLICE = 0x2000,
};
enum icp_qat_hw_ae_id {
@@ -71,7 +72,16 @@ enum icp_qat_hw_auth_algo {
ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
ICP_QAT_HW_AUTH_ALGO_SHA3_384 = 18,
ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
- ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+ ICP_QAT_HW_AUTH_ALGO_RESERVED = 20,
+ ICP_QAT_HW_AUTH_ALGO_RESERVED1 = 21,
+ ICP_QAT_HW_AUTH_ALGO_RESERVED2 = 22,
+ ICP_QAT_HW_AUTH_ALGO_RESERVED3 = 22,
+ ICP_QAT_HW_AUTH_ALGO_RESERVED4 = 23,
+ ICP_QAT_HW_AUTH_ALGO_RESERVED5 = 24,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 = 25,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 = 26,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128 = 27,
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER = 28
};
enum icp_qat_hw_auth_mode {
@@ -167,6 +177,9 @@ struct icp_qat_hw_auth_setup {
#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_256_MAC_32_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_256_MAC_64_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_256_MAC_128_STATE1_SZ 16
#define ICP_QAT_HW_NULL_STATE2_SZ 32
#define ICP_QAT_HW_MD5_STATE2_SZ 16
@@ -191,6 +204,7 @@ struct icp_qat_hw_auth_setup {
#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_ZUC_256_STATE2_SZ 56
#define ICP_QAT_HW_GALOIS_H_SZ 16
#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
@@ -228,7 +242,8 @@ enum icp_qat_hw_cipher_algo {
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
ICP_QAT_HW_CIPHER_ALGO_SM4 = 10,
ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 = 11,
- ICP_QAT_HW_CIPHER_DELIMITER = 12
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_256 = 12,
+ ICP_QAT_HW_CIPHER_DELIMITER = 13
};
enum icp_qat_hw_cipher_mode {
@@ -308,6 +323,7 @@ enum icp_qat_hw_cipher_convert {
#define ICP_QAT_HW_KASUMI_BLK_SZ 8
#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_256_BLK_SZ 8
#define ICP_QAT_HW_NULL_KEY_SZ 256
#define ICP_QAT_HW_DES_KEY_SZ 8
#define ICP_QAT_HW_3DES_KEY_SZ 24
@@ -343,6 +359,8 @@ enum icp_qat_hw_cipher_convert {
#define ICP_QAT_HW_SPC_CTR_SZ 16
#define ICP_QAT_HW_CHACHAPOLY_ICV_SZ 16
#define ICP_QAT_HW_CHACHAPOLY_AAD_MAX_LOG 14
+#define ICP_QAT_HW_ZUC_256_KEY_SZ 32
+#define ICP_QAT_HW_ZUC_256_IV_SZ 24
#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
@@ -182,10 +182,8 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
session->fw_req.cd_ctrl.content_desc_ctrl_lw;
/* Set the Use Extended Protocol Flags bit in LW 1 */
- QAT_FIELD_SET(header->comn_req_flags,
- QAT_COMN_EXT_FLAGS_USED,
- QAT_COMN_EXT_FLAGS_BITPOS,
- QAT_COMN_EXT_FLAGS_MASK);
+ ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(
+ header->ext_flags, QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS);
/* Set Hash Flags in LW 28 */
cd_ctrl->hash_flags |= hash_flag;
@@ -199,6 +197,7 @@ qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
header->serv_specif_flags, 0);
break;
case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+ case ICP_QAT_HW_CIPHER_ALGO_ZUC_256:
ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_PROTO);
ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
@@ -204,6 +204,7 @@ qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen3);
capa_num = size/sizeof(struct rte_cryptodev_capabilities);
legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
+ struct rte_cryptodev_capabilities *cap;
if (unlikely(qat_legacy_capa))
size = size + legacy_size;
@@ -255,6 +256,15 @@ qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
RTE_CRYPTO_AUTH_SM3_HMAC))) {
continue;
}
+
+ if (slice_map & ICP_ACCEL_MASK_ZUC_256_SLICE && (
+ check_auth_capa(&capabilities[iter],
+ RTE_CRYPTO_AUTH_ZUC_EIA3) ||
+ check_cipher_capa(&capabilities[iter],
+ RTE_CRYPTO_CIPHER_ZUC_EEA3))) {
+ continue;
+ }
+
if (internals->qat_dev->has_wireless_slice && (
check_auth_capa(&capabilities[iter],
RTE_CRYPTO_AUTH_KASUMI_F9) ||
@@ -268,6 +278,27 @@ qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
memcpy(addr + curr_capa, capabilities + iter,
sizeof(struct rte_cryptodev_capabilities));
+
+ if (internals->qat_dev->has_wireless_slice && (
+ check_auth_capa(&capabilities[iter],
+ RTE_CRYPTO_AUTH_ZUC_EIA3))) {
+ cap = addr + curr_capa;
+ cap->sym.auth.key_size.max = 32;
+ cap->sym.auth.key_size.increment = 16;
+ cap->sym.auth.iv_size.max = 25;
+ cap->sym.auth.iv_size.increment = 1;
+ cap->sym.auth.digest_size.max = 16;
+ cap->sym.auth.digest_size.increment = 4;
+ }
+ if (internals->qat_dev->has_wireless_slice && (
+ check_cipher_capa(&capabilities[iter],
+ RTE_CRYPTO_CIPHER_ZUC_EEA3))) {
+ cap = addr + curr_capa;
+ cap->sym.cipher.key_size.max = 32;
+ cap->sym.cipher.key_size.increment = 16;
+ cap->sym.cipher.iv_size.max = 25;
+ cap->sym.cipher.iv_size.increment = 1;
+ }
curr_capa++;
}
internals->qat_dev_capabilities = internals->capa_mz->addr;
@@ -480,11 +511,14 @@ qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
}
static int
-qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+qat_sym_crypto_set_session_gen3(void *cdev, void *session)
{
struct qat_sym_session *ctx = session;
enum rte_proc_type_t proc_type = rte_eal_process_type();
int ret;
+ struct qat_cryptodev_private *internals;
+
+ internals = ((struct rte_cryptodev *)cdev)->data->dev_private;
if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
return -EINVAL;
@@ -517,6 +551,22 @@ qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
ctx->qat_cipher_alg ==
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+ } else if ((internals->qat_dev->has_wireless_slice) &&
+ ((ctx->aes_cmac ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+ (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256))) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+ } else if ((internals->qat_dev->has_wireless_slice) &&
+ (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128) &&
+ ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx,
+ 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
}
ret = 0;
@@ -117,7 +117,10 @@ qat_auth_is_len_in_bits(struct qat_sym_session *ctx,
{
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128) {
if (unlikely((op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
(op->sym->auth.data.length % BYTE_LENGTH != 0)))
return -EINVAL;
@@ -132,7 +135,8 @@ qat_cipher_is_len_in_bits(struct qat_sym_session *ctx,
{
if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
- ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
if (unlikely((op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
((op->sym->cipher.data.offset %
BYTE_LENGTH) != 0)))
@@ -589,6 +593,26 @@ qat_sym_convert_op_to_vec_aead(struct rte_crypto_op *op,
return 0;
}
+static inline void
+zuc256_modify_iv(uint8_t *iv)
+{
+ uint8_t iv_tmp[8];
+
+ iv_tmp[0] = iv[16];
+ /* pack the last 8 bytes of IV to 6 bytes.
+ * discard the 2 MSB bits of each byte
+ */
+ iv_tmp[1] = (((iv[17] & 0x3f) << 2) | ((iv[18] >> 4) & 0x3));
+ iv_tmp[2] = (((iv[18] & 0xf) << 4) | ((iv[19] >> 2) & 0xf));
+ iv_tmp[3] = (((iv[19] & 0x3) << 6) | (iv[20] & 0x3f));
+
+ iv_tmp[4] = (((iv[21] & 0x3f) << 2) | ((iv[22] >> 4) & 0x3));
+ iv_tmp[5] = (((iv[22] & 0xf) << 4) | ((iv[23] >> 2) & 0xf));
+ iv_tmp[6] = (((iv[23] & 0x3) << 6) | (iv[24] & 0x3f));
+
+ memcpy(iv + 16, iv_tmp, 8);
+}
+
static __rte_always_inline void
qat_set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
struct rte_crypto_va_iova_ptr *iv_ptr, uint32_t iv_len,
@@ -665,6 +689,9 @@ enqueue_one_auth_job_gen1(struct qat_sym_session *ctx,
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
auth_param->u1.aad_adr = auth_iv->iova;
break;
case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
@@ -747,6 +774,9 @@ enqueue_one_chain_job_gen1(struct qat_sym_session *ctx,
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
auth_param->u1.aad_adr = auth_iv->iova;
break;
case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
@@ -248,6 +248,9 @@ qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
return -EINVAL;
}
+ if (ctx->is_zuc256)
+ zuc256_modify_iv(cipher_iv.va);
+
enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len, op_cookie);
qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
@@ -270,6 +273,8 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
struct rte_crypto_va_iova_ptr digest;
union rte_crypto_sym_ofs ofs;
int32_t total_len;
+ struct rte_cryptodev *cdev;
+ struct qat_cryptodev_private *internals;
in_sgl.vec = in_vec;
out_sgl.vec = out_vec;
@@ -284,6 +289,13 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
return -EINVAL;
}
+ cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
+ internals = cdev->data->dev_private;
+
+ if (internals->qat_dev->has_wireless_slice && !ctx->is_gmac)
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ req->comn_hdr.serv_specif_flags, 0);
+
total_len = qat_sym_build_req_set_data(req, in_op, cookie,
in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
if (unlikely(total_len < 0)) {
@@ -291,6 +303,9 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
return -EINVAL;
}
+ if (ctx->is_zuc256)
+ zuc256_modify_iv(auth_iv.va);
+
enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
total_len);
@@ -381,6 +396,11 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
return -EINVAL;
}
+ if (ctx->is_zuc256) {
+ zuc256_modify_iv(cipher_iv.va);
+ zuc256_modify_iv(auth_iv.va);
+ }
+
enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
ofs, total_len, cookie);
@@ -507,6 +527,9 @@ qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
if (unlikely(data_len < 0))
return -1;
+ if (ctx->is_zuc256)
+ zuc256_modify_iv(iv->va);
+
enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len, cookie);
qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
@@ -563,6 +586,10 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
if (unlikely(data_len < 0))
break;
+
+ if (ctx->is_zuc256)
+ zuc256_modify_iv(vec->iv[i].va);
+
enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
(uint32_t)data_len, cookie);
tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
@@ -613,6 +640,9 @@ qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
if (unlikely(data_len < 0))
return -1;
+ if (ctx->is_zuc256)
+ zuc256_modify_iv(auth_iv->va);
+
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
null_digest.iova = cookie->digest_null_phys_addr;
job_digest = &null_digest;
@@ -678,6 +708,9 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
if (unlikely(data_len < 0))
break;
+ if (ctx->is_zuc256)
+ zuc256_modify_iv(vec->auth_iv[i].va);
+
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
null_digest.iova = cookie->digest_null_phys_addr;
job_digest = &null_digest;
@@ -733,6 +766,11 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
if (unlikely(data_len < 0))
return -1;
+ if (ctx->is_zuc256) {
+ zuc256_modify_iv(cipher_iv->va);
+ zuc256_modify_iv(auth_iv->va);
+ }
+
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
null_digest.iova = cookie->digest_null_phys_addr;
job_digest = &null_digest;
@@ -801,6 +839,11 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
if (unlikely(data_len < 0))
break;
+ if (ctx->is_zuc256) {
+ zuc256_modify_iv(vec->iv[i].va);
+ zuc256_modify_iv(vec->auth_iv[i].va);
+ }
+
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
null_digest.iova = cookie->digest_null_phys_addr;
job_digest = &null_digest;
@@ -379,7 +379,9 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
struct rte_crypto_cipher_xform *cipher_xform = NULL;
enum qat_device_gen qat_dev_gen =
internals->qat_dev->qat_dev_gen;
- int ret;
+ int ret, is_wireless = 0;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
/* Get cipher xform from crypto xform chain */
cipher_xform = qat_get_cipher_xform(xform);
@@ -416,6 +418,8 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ if (internals->qat_dev->has_wireless_slice)
+ is_wireless = 1;
break;
case RTE_CRYPTO_CIPHER_NULL:
session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
@@ -533,6 +537,10 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ if (cipher_xform->key.length == ICP_QAT_HW_ZUC_256_KEY_SZ)
+ session->is_zuc256 = 1;
+ if (internals->qat_dev->has_wireless_slice)
+ is_wireless = 1;
break;
case RTE_CRYPTO_CIPHER_AES_XTS:
if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
@@ -587,6 +595,17 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
goto error_out;
}
+ if (is_wireless) {
+ /* Set the Use Extended Protocol Flags bit in LW 1 */
+ ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(
+ header->ext_flags,
+ QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS);
+ /* Force usage of Wireless Cipher slice */
+ ICP_QAT_FW_USE_WCP_SLICE_SET(header->ext_flags,
+ QAT_LA_USE_WCP_SLICE);
+ session->is_wireless = 1;
+ }
+
return 0;
error_out:
@@ -820,9 +839,16 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
struct qat_cryptodev_private *internals = dev->data->dev_private;
const uint8_t *key_data = auth_xform->key.data;
- uint8_t key_length = auth_xform->key.length;
+ uint16_t key_length = auth_xform->key.length;
enum qat_device_gen qat_dev_gen =
internals->qat_dev->qat_dev_gen;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+ (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+ session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+ uint8_t hash_flag = 0;
+ int is_wireless = 0;
session->aes_cmac = 0;
session->auth_key_length = auth_xform->key.length;
@@ -898,6 +924,10 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CMAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
session->aes_cmac = 1;
+ if (internals->qat_dev->has_wireless_slice) {
+ is_wireless = 1;
+ session->is_wireless = 1;
+ }
break;
case RTE_CRYPTO_AUTH_AES_GMAC:
if (qat_sym_validate_aes_key(auth_xform->key.length,
@@ -918,6 +948,11 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
break;
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ if (internals->qat_dev->has_wireless_slice) {
+ is_wireless = 1;
+ session->is_wireless = 1;
+ hash_flag = 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS;
+ }
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
@@ -934,7 +969,35 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
rte_cryptodev_get_auth_algo_string(auth_xform->algo));
return -ENOTSUP;
}
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ if (key_length == ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ)
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ else if (key_length == ICP_QAT_HW_ZUC_256_KEY_SZ) {
+ switch (auth_xform->digest_length) {
+ case 4:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32;
+ break;
+ case 8:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64;
+ break;
+ case 16:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128;
+ break;
+ default:
+ QAT_LOG(ERR, "Invalid digest length: %d",
+ auth_xform->digest_length);
+ return -ENOTSUP;
+ }
+ session->is_zuc256 = 1;
+ } else {
+ QAT_LOG(ERR, "Invalid key length: %d", key_length);
+ return -ENOTSUP;
+ }
+ if (internals->qat_dev->has_wireless_slice) {
+ is_wireless = 1;
+ session->is_wireless = 1;
+ hash_flag = 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS;
+ } else
+ session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
break;
case RTE_CRYPTO_AUTH_MD5:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
@@ -1002,6 +1065,21 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
return -EINVAL;
}
+ if (is_wireless) {
+ if (!session->aes_cmac) {
+ /* Set the Use Extended Protocol Flags bit in LW 1 */
+ ICP_QAT_FW_USE_EXTENDED_PROTOCOL_FLAGS_SET(
+ header->ext_flags,
+ QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS);
+
+ /* Set Hash Flags in LW 28 */
+ cd_ctrl->hash_flags |= hash_flag;
+ }
+ /* Force usage of Wireless Auth slice */
+ ICP_QAT_FW_USE_WAT_SLICE_SET(header->ext_flags,
+ QAT_LA_USE_WAT_SLICE);
+ }
+
return 0;
}
@@ -1204,6 +1282,15 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_256_MAC_32_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_256_MAC_64_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_256_MAC_128_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
@@ -1286,6 +1373,10 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
return ICP_QAT_HW_AES_BLK_SZ;
case ICP_QAT_HW_AUTH_ALGO_MD5:
return QAT_MD5_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
+ return ICP_QAT_HW_ZUC_256_BLK_SZ;
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum block size in this case */
return QAT_SHA512_CBLOCK;
@@ -2040,7 +2131,8 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
|| cdesc->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3
+ || cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
} else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
@@ -2075,6 +2167,17 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
cipher_cd_ctrl->cipher_state_sz =
ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ } else if (cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
+ if (cdesc->cipher_iv.length != 23 && cdesc->cipher_iv.length != 25) {
+ QAT_LOG(ERR, "Invalid IV length for ZUC256, must be 23 or 25.");
+ return -EINVAL;
+ }
+ total_key_size = ICP_QAT_HW_ZUC_256_KEY_SZ +
+ ICP_QAT_HW_ZUC_256_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_ZUC_256_IV_SZ >> 3;
+ cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
} else {
total_key_size = cipherkeylen;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2246,6 +2349,9 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128
|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
@@ -2519,7 +2625,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
cdesc->aad_len = aad_length;
break;
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
- cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+ if (!cdesc->is_wireless)
+ cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
@@ -2540,10 +2647,12 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
break;
case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
- hash->auth_config.config =
- ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
- cdesc->qat_hash_alg, digestsize);
- cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ if (!cdesc->is_wireless) {
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
+ cdesc->qat_hash_alg, digestsize);
+ cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ }
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
@@ -2554,6 +2663,18 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128:
+ state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
+ state2_size = ICP_QAT_HW_ZUC_256_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
+ + ICP_QAT_HW_ZUC_256_IV_SZ);
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ cd_extra_size += ICP_QAT_HW_ZUC_256_IV_SZ;
+ auth_param->hash_state_sz = ICP_QAT_HW_ZUC_256_IV_SZ >> 3;
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
#ifdef RTE_QAT_OPENSSL
@@ -2740,6 +2861,9 @@ int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
*alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
break;
+ case ICP_QAT_HW_ZUC_256_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_256;
+ break;
default:
return -EINVAL;
}
@@ -140,6 +140,8 @@ struct qat_sym_session {
uint8_t is_auth;
uint8_t is_cnt_zero;
/* Some generations need different setup of counter */
+ uint8_t is_zuc256;
+ uint8_t is_wireless;
uint32_t slice_types;
enum qat_sym_proto_flag qat_proto_flag;
qat_sym_build_request_t build_request[2];