> -----Original Message-----
> From: Kusztal, ArkadiuszX <arkadiuszx.kusztal@intel.com>
> Sent: Monday, June 28, 2021 5:34 PM
> To: dev@dpdk.org
> Cc: gakhil@marvell.com; Trahe, Fiona <fiona.trahe@intel.com>; Zhang, Roy
> Fan <roy.fan.zhang@intel.com>; Kusztal, ArkadiuszX
> <arkadiuszx.kusztal@intel.com>
> Subject: [PATCH v2 04/16] crypto/qat: add gen4 ucs slice type, add ctr mode
>
> This commit adds unified cipher slice to Intel QuickAssist
> Technology PMD and enables AES-CTR algorithm.
>
> Signed-off-by: Arek Kusztal <arkadiuszx.kusztal@intel.com>
> ---
Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
@@ -371,4 +371,32 @@ struct icp_qat_fw_la_resp {
& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+#define ICP_QAT_FW_LA_USE_WIRELESS_SLICE_TYPE 2
+#define ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE 1
+#define ICP_QAT_FW_LA_USE_LEGACY_SLICE_TYPE 0
+#define QAT_LA_SLICE_TYPE_BITPOS 14
+#define QAT_LA_SLICE_TYPE_MASK 0x3
+#define ICP_QAT_FW_LA_SLICE_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_SLICE_TYPE_BITPOS, \
+ QAT_LA_SLICE_TYPE_MASK)
+
+struct icp_qat_fw_la_cipher_20_req_params {
+ uint32_t cipher_offset;
+ uint32_t cipher_length;
+ union {
+ uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ uint64_t cipher_IV_ptr;
+ uint64_t resrvd1;
+ } s;
+
+ } u;
+ uint32_t spc_aad_offset;
+ uint32_t spc_aad_sz;
+ uint64_t spc_aad_addr;
+ uint64_t spc_auth_res_addr;
+ uint8_t reserved[3];
+ uint8_t spc_auth_res_sz;
+};
+
#endif
@@ -342,6 +342,16 @@ struct icp_qat_hw_cipher_algo_blk {
uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
} __rte_cache_aligned;
+struct icp_qat_hw_ucs_cipher_config {
+ uint32_t val;
+ uint32_t reserved[3];
+};
+
+struct icp_qat_hw_cipher_algo_blk20 {
+ struct icp_qat_hw_ucs_cipher_config cipher_config;
+ uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
+} __rte_cache_aligned;
+
/* ========================================================================= */
/* COMPRESSION SLICE */
/* ========================================================================= */
@@ -1064,6 +1064,26 @@
.iv_size = { 0 } \
}, } \
}, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
} \
@@ -246,6 +246,8 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
{
struct qat_sym_dev_private *internals = dev->data->dev_private;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ enum qat_device_gen qat_dev_gen =
+ internals->qat_dev->qat_dev_gen;
int ret;
/* Get cipher xform from crypto xform chain */
@@ -272,6 +274,13 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ if (qat_dev_gen == QAT_GEN4) {
+ /* TODO: Filter WCP */
+ ICP_QAT_FW_LA_SLICE_TYPE_SET(
+ session->fw_req.comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+ session->is_ucs = 1;
+ }
break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
@@ -556,6 +565,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
offsetof(struct qat_sym_session, cd);
session->min_qat_dev_gen = QAT_GEN1;
+ session->is_ucs = 0;
/* Get requested QAT command id */
qat_cmd_id = qat_get_cmd_id(xform);
@@ -1518,6 +1528,7 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
uint32_t cipherkeylen)
{
struct icp_qat_hw_cipher_algo_blk *cipher;
+ struct icp_qat_hw_cipher_algo_blk20 *cipher20;
struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
@@ -1611,7 +1622,6 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
qat_proto_flag =
qat_get_crypto_proto_flag(header->serv_specif_flags);
}
- cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
@@ -1619,6 +1629,7 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
qat_sym_session_init_common_hdr(header, qat_proto_flag);
cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
+ cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
cipher->cipher_config.val =
ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
cdesc->qat_cipher_alg, key_convert,
@@ -1638,6 +1649,19 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
cipherkeylen + cipherkeylen;
+ } else if (cdesc->is_ucs) {
+ const uint8_t *final_key = cipherkey;
+
+ total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
+ ICP_QAT_HW_AES_128_KEY_SZ);
+ cipher20->cipher_config.reserved[0] = 0;
+ cipher20->cipher_config.reserved[1] = 0;
+ cipher20->cipher_config.reserved[2] = 0;
+
+ rte_memcpy(cipher20->key, final_key, cipherkeylen);
+ cdesc->cd_cur_ptr +=
+ sizeof(struct icp_qat_hw_ucs_cipher_config) +
+ cipherkeylen;
} else {
memcpy(cipher->key, cipherkey, cipherkeylen);
cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
@@ -1664,6 +1688,7 @@ int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
}
cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+ cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
return 0;
}
@@ -92,6 +92,7 @@ struct qat_sym_session {
uint8_t aes_cmac;
uint8_t is_single_pass;
uint8_t is_single_pass_gmac;
+ uint8_t is_ucs;
};
int