@@ -179,8 +179,14 @@ uint16_t
qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- return qat_dequeue_op_burst(qp, (void **)ops,
- qat_sym_process_response, nb_ops);
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+
+ if (tmp_qp->qat_dev_gen == QAT_GEN5)
+ return qat_dequeue_op_burst(qp, (void **)ops,
+ qat_sym_process_response_gen5, nb_ops);
+ else
+ return qat_dequeue_op_burst(qp, (void **)ops,
+ qat_sym_process_response, nb_ops);
}
int
@@ -95,6 +95,12 @@
/* Maximum data length for single pass GMAC: 2^14-1 */
#define QAT_AES_GMAC_SPC_MAX_SIZE 16383
+/* Digest length for GCM Algo is 16 bytes */
+#define GCM_256_DIGEST_LEN 16
+
+/* IV length for GCM algo is 12 bytes */
+#define GCM_IV_LENGTH 12
+
struct qat_sym_session;
struct qat_sym_sgl {
@@ -383,6 +389,52 @@ qat_sym_process_response(void **op, uint8_t *resp, void *op_cookie,
return 1;
}
+static __rte_always_inline int
+qat_sym_process_response_gen5(void **op, uint8_t *resp,
+ void *op_cookie __rte_unused,
+ uint64_t *dequeue_err_count __rte_unused)
+{
+ struct icp_qat_fw_comn_resp *resp_msg =
+ (struct icp_qat_fw_comn_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+ struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+ sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+ resp_msg->comn_hdr.comn_status))
+ rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+ resp_msg->comn_hdr.comn_status))
+ rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+ if (ICP_QAT_FW_LA_VER_STATUS_FAIL ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status))
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+
+ *op = (void *)rx_op;
+
+ /*
+ * return 1 as dequeue op only move on to the next op
+ * if one was ready to return to API
+ */
+ return 1;
+}
+
int
qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -448,7 +500,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
static inline void
qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
- void *op_cookie __rte_unused)
+ void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen5(void **op __rte_unused, uint8_t *resp __rte_unused,
+ void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
{
}
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
static void
qat_sym_session_init_common_hdr(struct qat_sym_session *session);
+static void
+qat_sym_session_init_gen5_hdr(struct qat_sym_session *session);
+
/* Req/cd init functions */
static void
@@ -738,6 +741,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
session->qat_cmd);
return -ENOTSUP;
}
+
+ if (qat_dev_gen == QAT_GEN5) {
+ qat_sym_session_init_gen5_hdr(session);
+ return 0;
+ }
+
qat_sym_session_finalize(session);
return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1082,6 +1091,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
if (qat_sym_cd_cipher_set(session,
aead_xform->key.data, aead_xform->key.length))
return -EINVAL;
+
+ if (qat_dev_gen == QAT_GEN5) {
+ session->auth_key_length = aead_xform->key.length;
+ memcpy(session->key_array, aead_xform->key.data,
+ aead_xform->key.length);
+ }
} else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -1970,6 +1985,43 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session)
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
}
+static void
+qat_sym_session_init_gen5_hdr(struct qat_sym_session *session)
+{
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+ /*
+ * GEN5 specifies separate command id for AEAD operations but Cryptodev
+ * API processes AEAD operations as Single pass Crypto operations.
+ * Hence even for GEN5, Session Algo Command ID is CIPHER.
+ * Note, however Session Algo Mode is AEAD.
+ */
+ header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN5(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+ ICP_QAT_FW_COMN_GEN5_DESC_LAYOUT);
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD_GEN5(QAT_COMN_PTR_TYPE_SGL,
+ QAT_COMN_KEY_BUFFER_USED);
+
+ ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags,
+ RTE_CRYPTO_AEAD_AES_GCM_GEN5);
+ ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+ if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+ ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_HW_CIPHER_DECRYPT);
+ } else {
+ ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_HW_CIPHER_ENCRYPT);
+ }
+}
+
int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
const uint8_t *cipherkey,
uint32_t cipherkeylen)
@@ -111,7 +111,10 @@ struct qat_sym_session {
enum icp_qat_hw_auth_op auth_op;
enum icp_qat_hw_auth_mode auth_mode;
void *bpi_ctx;
- struct qat_sym_cd cd;
+ union {
+ struct qat_sym_cd cd;
+ uint8_t key_array[32];
+ };
uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
uint8_t *cd_cur_ptr;
phys_addr_t cd_paddr;
@@ -492,6 +492,9 @@ enum rte_crypto_aead_operation {
/**< Verify digest and decrypt */
};
+/* In GEN5 AEAD AES GCM Algorithm has ID 0 */
+#define RTE_CRYPTO_AEAD_AES_GCM_GEN5 0
+
/** Authentication operation name strings */
extern const char *
rte_crypto_aead_operation_strings[];