@@ -27,6 +27,7 @@ poll mode crypto driver support for the following hardware accelerator devices:
* ``Intel QuickAssist Technology C4xxx``
* ``Intel QuickAssist Technology 4xxx``
* ``Intel QuickAssist Technology 300xx``
+* ``Intel QuickAssist Technology 420xx``
Features
@@ -179,6 +180,7 @@ poll mode crypto driver support for the following hardware accelerator devices:
* ``Intel QuickAssist Technology 4xxx``
* ``Intel QuickAssist Technology 401xxx``
* ``Intel QuickAssist Technology 300xx``
+* ``Intel QuickAssist Technology 420xx``
The QAT ASYM PMD has support for:
@@ -472,6 +474,8 @@ to see the full table)
+-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+
| Yes | No | No | 4 | 402xx | IDZ/ N/A | qat_4xxx | 4xxx | 4944 | 2 | 4945 | 16 |
+-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+
+ | Yes | Yes | Yes | 5 | 420xx | linux/6.8+ | qat_420xx | 420xx | 4946 | 2 | 4947 | 16 |
+ +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+
* Note: Symmetric mixed crypto algorithms feature on Gen 2 works only with IDZ driver version 4.9.0+
@@ -133,8 +133,10 @@ New Features
* **Updated Intel QuickAssist Technology driver.**
- * Enabled support for new QAT GEN3 (578a) devices in QAT crypto driver.
- * Enabled ZUC256 cipher and auth algorithm for wireless slice enabled GEN3 device.
+ * Enabled support for new QAT GEN3 (578a) and QAT GEN5 (4946)
+ devices in QAT crypto driver.
+ * Enabled ZUC256 cipher and auth algorithm for wireless slice
+ enabled GEN3 and GEN5 devices.
* **Updated Marvell cnxk crypto driver.**
@@ -10,6 +10,7 @@
#include "adf_transport_access_macros_gen4vf.h"
#include "adf_pf2vf_msg.h"
#include "qat_pf2vf.h"
+#include "qat_dev_gens.h"
#include <stdint.h>
@@ -60,7 +61,7 @@ qat_select_valid_queue_gen4(struct qat_pci_device *qat_dev, int qp_id,
return -1;
}
-static const struct qat_qp_hw_data *
+const struct qat_qp_hw_data *
qat_qp_get_hw_data_gen4(struct qat_pci_device *qat_dev,
enum qat_service_type service_type, uint16_t qp_id)
{
@@ -74,7 +75,7 @@ qat_qp_get_hw_data_gen4(struct qat_pci_device *qat_dev,
return &dev_extra->qp_gen4_data[ring_pair][0];
}
-static int
+int
qat_qp_rings_per_service_gen4(struct qat_pci_device *qat_dev,
enum qat_service_type service)
{
@@ -103,7 +104,7 @@ gen4_pick_service(uint8_t hw_service)
}
}
-static int
+int
qat_dev_read_config_gen4(struct qat_pci_device *qat_dev)
{
int i = 0;
@@ -143,7 +144,7 @@ qat_dev_read_config_gen4(struct qat_pci_device *qat_dev)
return 0;
}
-static void
+void
qat_qp_build_ring_base_gen4(void *io_addr,
struct qat_queue *queue)
{
@@ -155,7 +156,7 @@ qat_qp_build_ring_base_gen4(void *io_addr,
queue->hw_queue_number, queue_base);
}
-static void
+void
qat_qp_adf_arb_enable_gen4(const struct qat_queue *txq,
void *base_addr, rte_spinlock_t *lock)
{
@@ -172,7 +173,7 @@ qat_qp_adf_arb_enable_gen4(const struct qat_queue *txq,
rte_spinlock_unlock(lock);
}
-static void
+void
qat_qp_adf_arb_disable_gen4(const struct qat_queue *txq,
void *base_addr, rte_spinlock_t *lock)
{
@@ -189,7 +190,7 @@ qat_qp_adf_arb_disable_gen4(const struct qat_queue *txq,
rte_spinlock_unlock(lock);
}
-static void
+void
qat_qp_adf_configure_queues_gen4(struct qat_qp *qp)
{
uint32_t q_tx_config, q_resp_config;
@@ -208,14 +209,14 @@ qat_qp_adf_configure_queues_gen4(struct qat_qp *qp)
q_resp_config);
}
-static void
+void
qat_qp_csr_write_tail_gen4(struct qat_qp *qp, struct qat_queue *q)
{
WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr,
q->hw_bundle_number, q->hw_queue_number, q->tail);
}
-static void
+void
qat_qp_csr_write_head_gen4(struct qat_qp *qp, struct qat_queue *q,
uint32_t new_head)
{
@@ -223,7 +224,7 @@ qat_qp_csr_write_head_gen4(struct qat_qp *qp, struct qat_queue *q,
q->hw_bundle_number, q->hw_queue_number, new_head);
}
-static void
+void
qat_qp_csr_setup_gen4(struct qat_pci_device *qat_dev,
void *io_addr, struct qat_qp *qp)
{
@@ -246,7 +247,7 @@ static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen4 = {
.qat_qp_get_hw_data = qat_qp_get_hw_data_gen4,
};
-static int
+int
qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev)
{
int ret = 0, i;
@@ -268,13 +269,13 @@ qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev)
return 0;
}
-static const struct rte_mem_resource *
+const struct rte_mem_resource *
qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev)
{
return &pci_dev->mem_resource[0];
}
-static int
+int
qat_dev_get_misc_bar_gen4(struct rte_mem_resource **mem_resource,
struct rte_pci_device *pci_dev)
{
@@ -282,14 +283,14 @@ qat_dev_get_misc_bar_gen4(struct rte_mem_resource **mem_resource,
return 0;
}
-static int
+int
qat_dev_get_slice_map_gen4(uint32_t *map __rte_unused,
const struct rte_pci_device *pci_dev __rte_unused)
{
return 0;
}
-static int
+int
qat_dev_get_extra_size_gen4(void)
{
return sizeof(struct qat_dev_gen4_extra);
new file mode 100644
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <dev_driver.h>
+#include <rte_pci.h>
+
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "adf_pf2vf_msg.h"
+#include "qat_dev_gens.h"
+
+#include <stdint.h>
+
+static struct qat_pf2vf_dev qat_pf2vf_gen5 = {
+ .pf2vf_offset = ADF_4XXXIOV_PF2VM_OFFSET,
+ .vf2pf_offset = ADF_4XXXIOV_VM2PF_OFFSET,
+ .pf2vf_type_shift = ADF_PFVF_2X_MSGTYPE_SHIFT,
+ .pf2vf_type_mask = ADF_PFVF_2X_MSGTYPE_MASK,
+ .pf2vf_data_shift = ADF_PFVF_2X_MSGDATA_SHIFT,
+ .pf2vf_data_mask = ADF_PFVF_2X_MSGDATA_MASK,
+};
+
+static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen5 = {
+ .qat_qp_rings_per_service = qat_qp_rings_per_service_gen4,
+ .qat_qp_build_ring_base = qat_qp_build_ring_base_gen4,
+ .qat_qp_adf_arb_enable = qat_qp_adf_arb_enable_gen4,
+ .qat_qp_adf_arb_disable = qat_qp_adf_arb_disable_gen4,
+ .qat_qp_adf_configure_queues = qat_qp_adf_configure_queues_gen4,
+ .qat_qp_csr_write_tail = qat_qp_csr_write_tail_gen4,
+ .qat_qp_csr_write_head = qat_qp_csr_write_head_gen4,
+ .qat_qp_csr_setup = qat_qp_csr_setup_gen4,
+ .qat_qp_get_hw_data = qat_qp_get_hw_data_gen4,
+};
+
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen5 = {
+ .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_gen4,
+ .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen4,
+ .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen4,
+ .qat_dev_read_config = qat_dev_read_config_gen4,
+ .qat_dev_get_extra_size = qat_dev_get_extra_size_gen4,
+ .qat_dev_get_slice_map = qat_dev_get_slice_map_gen4,
+};
+
+RTE_INIT(qat_dev_gen_5_init)
+{
+ qat_qp_hw_spec[QAT_GEN5] = &qat_qp_hw_spec_gen5;
+ qat_dev_hw_spec[QAT_GEN5] = &qat_dev_hw_spec_gen5;
+ qat_gen_config[QAT_GEN5].dev_gen = QAT_GEN5;
+ qat_gen_config[QAT_GEN5].pf2vf_dev = &qat_pf2vf_gen5;
+}
@@ -62,4 +62,58 @@ qat_dev_get_misc_bar_gen1(struct rte_mem_resource **mem_resource,
int
qat_dev_read_config_gen1(struct qat_pci_device *qat_dev);
+int
+qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev);
+
+const struct rte_mem_resource *
+qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev);
+
+int
+qat_dev_get_misc_bar_gen4(struct rte_mem_resource **mem_resource,
+ struct rte_pci_device *pci_dev);
+
+int
+qat_dev_read_config_gen4(struct qat_pci_device *qat_dev);
+
+int
+qat_dev_get_extra_size_gen4(void);
+
+int
+qat_dev_get_slice_map_gen4(uint32_t *map __rte_unused,
+ const struct rte_pci_device *pci_dev __rte_unused);
+
+int
+qat_qp_rings_per_service_gen4(struct qat_pci_device *qat_dev,
+ enum qat_service_type service);
+
+void
+qat_qp_build_ring_base_gen4(void *io_addr,
+ struct qat_queue *queue);
+
+void
+qat_qp_adf_arb_enable_gen4(const struct qat_queue *txq,
+ void *base_addr, rte_spinlock_t *lock);
+
+void
+qat_qp_adf_arb_disable_gen4(const struct qat_queue *txq,
+ void *base_addr, rte_spinlock_t *lock);
+
+void
+qat_qp_adf_configure_queues_gen4(struct qat_qp *qp);
+
+void
+qat_qp_csr_write_tail_gen4(struct qat_qp *qp, struct qat_queue *q);
+
+void
+qat_qp_csr_write_head_gen4(struct qat_qp *qp, struct qat_queue *q,
+ uint32_t new_head);
+
+void
+qat_qp_csr_setup_gen4(struct qat_pci_device *qat_dev,
+ void *io_addr, struct qat_qp *qp);
+
+const struct qat_qp_hw_data *
+qat_qp_get_hw_data_gen4(struct qat_pci_device *qat_dev,
+ enum qat_service_type service_type, uint16_t qp_id);
+
#endif
@@ -82,6 +82,7 @@ sources += files(
'dev/qat_dev_gen2.c',
'dev/qat_dev_gen3.c',
'dev/qat_dev_gen4.c',
+ 'dev/qat_dev_gen5.c',
)
includes += include_directories(
'qat_adf',
@@ -95,6 +96,7 @@ if qat_compress
'dev/qat_comp_pmd_gen2.c',
'dev/qat_comp_pmd_gen3.c',
'dev/qat_comp_pmd_gen4.c',
+ 'dev/qat_comp_pmd_gen5.c',
]
sources += files(join_paths(qat_compress_relpath, f))
endforeach
@@ -108,6 +110,7 @@ if qat_crypto
'dev/qat_crypto_pmd_gen2.c',
'dev/qat_crypto_pmd_gen3.c',
'dev/qat_crypto_pmd_gen4.c',
+ 'dev/qat_crypto_pmd_gen5.c',
]
sources += files(join_paths(qat_crypto_relpath, f))
endforeach
@@ -21,6 +21,7 @@ enum qat_device_gen {
QAT_GEN2,
QAT_GEN3,
QAT_GEN4,
+ QAT_GEN5,
QAT_N_GENS
};
@@ -65,6 +65,9 @@ static const struct rte_pci_id pci_id_qat_map[] = {
{
RTE_PCI_DEVICE(0x8086, 0x4945),
},
+ {
+ RTE_PCI_DEVICE(0x8086, 0x4947),
+ },
{.device_id = 0},
};
@@ -203,6 +206,8 @@ pick_gen(const struct rte_pci_device *pci_dev)
case 0x4943:
case 0x4945:
return QAT_GEN4;
+ case 0x4947:
+ return QAT_GEN5;
default:
QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
return QAT_N_GENS;
@@ -212,7 +217,8 @@ pick_gen(const struct rte_pci_device *pci_dev)
static int
wireless_slice_support(uint16_t pci_dev_id)
{
- return pci_dev_id == 0x578b;
+ return pci_dev_id == 0x578b ||
+ pci_dev_id == 0x4947;
}
struct qat_pci_device *
@@ -27,7 +27,7 @@ qat_gen4_comp_capabilities[] = {
.window_size = {.min = 15, .max = 15, .increment = 0} },
RTE_COMP_END_OF_CAPABILITIES_LIST() };
-static int
+int
qat_comp_dev_config_gen4(struct rte_compressdev *dev,
struct rte_compressdev_config *config)
{
@@ -67,13 +67,13 @@ qat_comp_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
return capa_info;
}
-static uint16_t
+uint16_t
qat_comp_get_ram_bank_flags_gen4(void)
{
return 0;
}
-static int
+int
qat_comp_set_slice_cfg_word_gen4(struct qat_comp_xform *qat_xform,
const struct rte_comp_xform *xform,
enum rte_comp_op_type op_type, uint32_t *comp_slice_cfg_word)
@@ -189,7 +189,7 @@ qat_comp_set_slice_cfg_word_gen4(struct qat_comp_xform *qat_xform,
return 0;
}
-static unsigned int
+unsigned int
qat_comp_get_num_im_bufs_required_gen4(void)
{
return QAT_NUM_INTERM_BUFS_GEN4;
new file mode 100644
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+#include "qat_comp_pmd_gens.h"
+#include "icp_qat_hw_gen4_comp.h"
+#include "icp_qat_hw_gen4_comp_defs.h"
+
+static const struct rte_compressdev_capabilities
+qat_gen5_comp_capabilities[] = {
+ {/* COMPRESSION - deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_ADLER32_CHECKSUM |
+ RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+ .window_size = {.min = 15, .max = 15, .increment = 0} },
+ RTE_COMP_END_OF_CAPABILITIES_LIST() };
+
+static struct rte_compressdev_ops qat_comp_ops_gen5 = {
+
+ /* Device related operations */
+ .dev_configure = qat_comp_dev_config_gen4,
+ .dev_start = qat_comp_dev_start,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = qat_comp_dev_info_get,
+
+ .stats_get = qat_comp_stats_get,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = qat_comp_qp_setup,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = qat_comp_private_xform_create,
+ .private_xform_free = qat_comp_private_xform_free,
+ .stream_create = qat_comp_stream_create,
+ .stream_free = qat_comp_stream_free
+};
+
+static struct qat_comp_capabilities_info
+qat_comp_cap_get_gen5(struct qat_pci_device *qat_dev __rte_unused)
+{
+ struct qat_comp_capabilities_info capa_info = {
+ .data = qat_gen5_comp_capabilities,
+ .size = sizeof(qat_gen5_comp_capabilities)
+ };
+ return capa_info;
+}
+
+RTE_INIT(qat_comp_pmd_gen5_init)
+{
+ qat_comp_gen_dev_ops[QAT_GEN5].compressdev_ops =
+ &qat_comp_ops_gen5;
+ qat_comp_gen_dev_ops[QAT_GEN5].qat_comp_get_capabilities =
+ qat_comp_cap_get_gen5;
+ qat_comp_gen_dev_ops[QAT_GEN5].qat_comp_get_num_im_bufs_required =
+ qat_comp_get_num_im_bufs_required_gen4;
+ qat_comp_gen_dev_ops[QAT_GEN5].qat_comp_get_ram_bank_flags =
+ qat_comp_get_ram_bank_flags_gen4;
+ qat_comp_gen_dev_ops[QAT_GEN5].qat_comp_set_slice_cfg_word =
+ qat_comp_set_slice_cfg_word_gen4;
+ qat_comp_gen_dev_ops[QAT_GEN5].qat_comp_get_feature_flags =
+ qat_comp_get_features_gen1;
+}
@@ -25,6 +25,20 @@ int qat_comp_set_slice_cfg_word_gen1(struct qat_comp_xform *qat_xform,
uint64_t qat_comp_get_features_gen1(void);
+unsigned int
+qat_comp_get_num_im_bufs_required_gen4(void);
+
+int
+qat_comp_set_slice_cfg_word_gen4(struct qat_comp_xform *qat_xform,
+ const struct rte_comp_xform *xform,
+ enum rte_comp_op_type op_type, uint32_t *comp_slice_cfg_word);
+
+uint16_t qat_comp_get_ram_bank_flags_gen4(void);
+
+int
+qat_comp_dev_config_gen4(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config);
+
extern struct rte_compressdev_ops qat_comp_ops_gen1;
#endif /* _QAT_COMP_PMD_GENS_H_ */
@@ -233,7 +233,7 @@ qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
return 0;
}
-static int
+int
qat_sym_crypto_set_session_gen4(void *cdev, void *session)
{
struct qat_sym_session *ctx = session;
@@ -385,7 +385,7 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
return i;
}
-static int
+int
qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
{
struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
new file mode 100644
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2022 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <cryptodev_pmd.h>
+#include "qat_sym_session.h"
+#include "qat_sym.h"
+#include "qat_asym.h"
+#include "qat_crypto.h"
+#include "qat_crypto_pmd_gens.h"
+
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen5[] = {
+ QAT_SYM_PLAIN_AUTH_CAP(SHA1,
+ CAP_SET(block_size, 64),
+ CAP_RNG(digest_size, 1, 20, 1)),
+ QAT_SYM_AUTH_CAP(SHA224,
+ CAP_SET(block_size, 64),
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA224_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA1_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_CIPHER_CAP(SM4_ECB,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0)),
+};
+
+static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen5[] = {
+ QAT_SYM_CIPHER_CAP(AES_CBC,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
+ QAT_SYM_AUTH_CAP(SHA256_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA384_HMAC,
+ CAP_SET(block_size, 128),
+ CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA512_HMAC,
+ CAP_SET(block_size, 128),
+ CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(AES_CMAC,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
+ QAT_SYM_AUTH_CAP(NULL,
+ CAP_SET(block_size, 1),
+ CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_CIPHER_CAP(NULL,
+ CAP_SET(block_size, 1),
+ CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA256,
+ CAP_SET(block_size, 64),
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA384,
+ CAP_SET(block_size, 128),
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_AUTH_CAP(SHA512,
+ CAP_SET(block_size, 128),
+ CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_CIPHER_CAP(AES_CTR,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
+ QAT_SYM_AEAD_CAP(AES_GCM,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
+ CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
+ QAT_SYM_AEAD_CAP(AES_CCM,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
+ CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
+ QAT_SYM_AUTH_CAP(AES_GMAC,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
+ CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
+ QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 32, 32, 0),
+ CAP_RNG(digest_size, 16, 16, 0),
+ CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
+ QAT_SYM_CIPHER_CAP(SM4_CBC,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
+ QAT_SYM_CIPHER_CAP(SM4_CTR,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
+ QAT_SYM_PLAIN_AUTH_CAP(SM3,
+ CAP_SET(block_size, 64),
+ CAP_RNG(digest_size, 32, 32, 0)),
+ QAT_SYM_AUTH_CAP(SM3_HMAC,
+ CAP_SET(block_size, 64),
+ CAP_RNG(key_size, 16, 64, 4), CAP_RNG(digest_size, 32, 32, 0),
+ CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
+ QAT_SYM_CIPHER_CAP(ZUC_EEA3,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 25, 1)),
+ QAT_SYM_AUTH_CAP(ZUC_EIA3,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 32, 16), CAP_RNG(digest_size, 4, 16, 4),
+ CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 25, 1)),
+ QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
+ QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
+ CAP_SET(block_size, 16),
+ CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
+ CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+check_cipher_capa(const struct rte_cryptodev_capabilities *cap,
+ enum rte_crypto_cipher_algorithm algo)
+{
+ if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ return 0;
+ if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return 0;
+ if (cap->sym.cipher.algo != algo)
+ return 0;
+ return 1;
+}
+
+static int
+check_auth_capa(const struct rte_cryptodev_capabilities *cap,
+ enum rte_crypto_auth_algorithm algo)
+{
+ if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ return 0;
+ if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ return 0;
+ if (cap->sym.auth.algo != algo)
+ return 0;
+ return 1;
+}
+
+static int
+qat_sym_crypto_cap_get_gen5(struct qat_cryptodev_private *internals,
+ const char *capa_memz_name,
+ const uint16_t __rte_unused slice_map)
+{
+ uint32_t legacy_capa_num, capa_num;
+ uint32_t size = sizeof(qat_sym_crypto_caps_gen5);
+ uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen5);
+ uint32_t i, iter = 0;
+ uint32_t curr_capa = 0;
+ legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
+ capa_num = RTE_DIM(qat_sym_crypto_caps_gen5);
+
+ if (unlikely(qat_legacy_capa))
+ size = size + legacy_size;
+
+ internals->capa_mz = rte_memzone_lookup(capa_memz_name);
+ if (internals->capa_mz == NULL) {
+ internals->capa_mz = rte_memzone_reserve(capa_memz_name,
+ size, rte_socket_id(), 0);
+ if (internals->capa_mz == NULL) {
+ QAT_LOG(DEBUG,
+ "Error allocating memzone for capabilities");
+ return -1;
+ }
+ }
+
+ struct rte_cryptodev_capabilities *addr =
+ (struct rte_cryptodev_capabilities *)
+ internals->capa_mz->addr;
+
+ struct rte_cryptodev_capabilities *capabilities;
+
+ if (unlikely(qat_legacy_capa)) {
+ capabilities = qat_sym_crypto_legacy_caps_gen5;
+ memcpy(addr, capabilities, legacy_size);
+ addr += legacy_capa_num;
+ }
+ capabilities = qat_sym_crypto_caps_gen5;
+
+ for (i = 0; i < capa_num; i++, iter++) {
+ if (slice_map & ICP_ACCEL_MASK_ZUC_256_SLICE && (
+ check_auth_capa(&capabilities[iter],
+ RTE_CRYPTO_AUTH_ZUC_EIA3) ||
+ check_cipher_capa(&capabilities[iter],
+ RTE_CRYPTO_CIPHER_ZUC_EEA3))) {
+ continue;
+ }
+
+ memcpy(addr + curr_capa, capabilities + iter,
+ sizeof(struct rte_cryptodev_capabilities));
+ curr_capa++;
+ }
+ internals->qat_dev_capabilities = internals->capa_mz->addr;
+
+ return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen5(void *cdev, void *session)
+{
+ struct qat_sym_session *ctx = session;
+ enum rte_proc_type_t proc_type = rte_eal_process_type();
+ int ret;
+
+ if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
+ return -EINVAL;
+
+ ret = qat_sym_crypto_set_session_gen4(cdev, session);
+
+ if (ret == -ENOTSUP) {
+ /* GEN4 returning -ENOTSUP as it cannot handle some mixed algo,
+ * this is addressed by GEN5
+ */
+ if ((ctx->aes_cmac ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+ (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_ZUC_256)) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+ } else if ((ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_32 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_64 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_256_MAC_128) &&
+ ctx->qat_cipher_alg != ICP_QAT_HW_CIPHER_ALGO_ZUC_256) {
+ qat_sym_session_set_ext_hash_flags_gen2(ctx,
+ 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+ }
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+RTE_INIT(qat_sym_crypto_gen5_init)
+{
+ qat_sym_gen_dev_ops[QAT_GEN5].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+ qat_sym_gen_dev_ops[QAT_GEN5].get_capabilities =
+ qat_sym_crypto_cap_get_gen5;
+ qat_sym_gen_dev_ops[QAT_GEN5].set_session =
+ qat_sym_crypto_set_session_gen5;
+ qat_sym_gen_dev_ops[QAT_GEN5].set_raw_dp_ctx =
+ qat_sym_configure_raw_dp_ctx_gen4;
+ qat_sym_gen_dev_ops[QAT_GEN5].get_feature_flags =
+ qat_sym_crypto_feature_flags_get_gen1;
+ qat_sym_gen_dev_ops[QAT_GEN5].create_security_ctx =
+ qat_sym_create_security_gen1;
+}
+
+RTE_INIT(qat_asym_crypto_gen5_init)
+{
+ qat_asym_gen_dev_ops[QAT_GEN5].cryptodev_ops =
+ &qat_asym_crypto_ops_gen1;
+ qat_asym_gen_dev_ops[QAT_GEN5].get_capabilities =
+ qat_asym_crypto_cap_get_gen1;
+ qat_asym_gen_dev_ops[QAT_GEN5].get_feature_flags =
+ qat_asym_crypto_feature_flags_get_gen1;
+ qat_asym_gen_dev_ops[QAT_GEN5].set_session =
+ qat_asym_crypto_set_session_gen1;
+}
@@ -1048,10 +1048,16 @@ qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);
int
qat_sym_crypto_set_session_gen1(void *cryptodev, void *session);
+int
+qat_sym_crypto_set_session_gen4(void *cryptodev, void *session);
+
void
qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
uint8_t hash_flag);
+int
+qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx);
+
int
qat_asym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
const char *capa_memz_name, const uint16_t slice_map);
@@ -407,7 +407,7 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5)
session->is_ucs = 1;
break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
@@ -950,7 +950,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
session->auth_iv.length = AES_GCM_J0_LEN;
else
session->is_iv12B = 1;
- if (qat_dev_gen == QAT_GEN4) {
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5) {
session->is_cnt_zero = 1;
session->is_ucs = 1;
}
@@ -1126,7 +1126,7 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5)
session->is_ucs = 1;
if (session->cipher_iv.length == 0) {
session->cipher_iv.length = AES_GCM_J0_LEN;
@@ -1146,13 +1146,13 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5)
session->is_ucs = 1;
break;
case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
return -EINVAL;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5)
session->is_ucs = 1;
session->qat_cipher_alg =
ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
@@ -2418,7 +2418,7 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
auth_param->u2.inner_prefix_sz =
qat_hash_get_block_size(cdesc->qat_hash_alg);
auth_param->hash_state_sz = digestsize;
- if (qat_dev_gen == QAT_GEN4) {
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_GEN5) {
ICP_QAT_FW_HASH_FLAG_MODE2_SET(
hash_cd_ctrl->hash_flags,
QAT_FW_LA_MODE2);
@@ -2984,6 +2984,7 @@ qat_sym_cd_crc_set(struct qat_sym_session *cdesc,
cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_gen3_crc_cd);
break;
case QAT_GEN4:
+ case QAT_GEN5:
crc_cfg.mode = ICP_QAT_HW_CIPHER_ECB_MODE;
crc_cfg.algo = ICP_QAT_HW_CIPHER_ALGO_NULL;
crc_cfg.hash_cmp_val = 0;