@@ -55,6 +55,9 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Updated Intel QuickAssist Technology driver.**
+
+ * Enabled support for virtual QAT - vQAT (0da5) devices in QAT symmetric crypto driver.
Removed Items
-------------
@@ -143,6 +143,26 @@ qat_dev_read_config_gen4(struct qat_pci_device *qat_dev)
return 0;
}
+static int
+qat_dev_read_config_vqat(struct qat_pci_device *qat_dev)
+{
+ int i = 0;
+ struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
+ struct qat_qp_hw_data *hw_data;
+
+ for (; i < QAT_GEN4_BUNDLE_NUM; i++) {
+ hw_data = &dev_extra->qp_gen4_data[i][0];
+ memset(hw_data, 0, sizeof(*hw_data));
+ hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+ hw_data->tx_msg_size = 128;
+ hw_data->rx_msg_size = 32;
+ hw_data->tx_ring_num = 0;
+ hw_data->rx_ring_num = 1;
+ hw_data->hw_bundle_num = i;
+ }
+ return 0;
+}
+
static void
qat_qp_build_ring_base_gen4(void *io_addr,
struct qat_queue *queue)
@@ -268,6 +288,12 @@ qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev)
return 0;
}
+static int
+qat_reset_ring_pairs_vqat(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
static const struct rte_mem_resource *
qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev)
{
@@ -304,10 +330,21 @@ static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen4 = {
.qat_dev_get_slice_map = qat_dev_get_slice_map_gen4,
};
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_vqat = {
+ .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_vqat,
+ .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen4,
+ .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen4,
+ .qat_dev_read_config = qat_dev_read_config_vqat,
+ .qat_dev_get_extra_size = qat_dev_get_extra_size_gen4,
+ .qat_dev_get_slice_map = qat_dev_get_slice_map_gen4,
+};
+
RTE_INIT(qat_dev_gen_4_init)
{
- qat_qp_hw_spec[QAT_GEN4] = &qat_qp_hw_spec_gen4;
+ qat_qp_hw_spec[QAT_VQAT] = qat_qp_hw_spec[QAT_GEN4] = &qat_qp_hw_spec_gen4;
qat_dev_hw_spec[QAT_GEN4] = &qat_dev_hw_spec_gen4;
+ qat_dev_hw_spec[QAT_VQAT] = &qat_dev_hw_spec_vqat;
qat_gen_config[QAT_GEN4].dev_gen = QAT_GEN4;
+ qat_gen_config[QAT_VQAT].dev_gen = QAT_VQAT;
qat_gen_config[QAT_GEN4].pf2vf_dev = &qat_pf2vf_gen4;
}
@@ -21,6 +21,7 @@ enum qat_device_gen {
QAT_GEN2,
QAT_GEN3,
QAT_GEN4,
+ QAT_VQAT,
QAT_N_GENS
};
@@ -62,6 +62,9 @@ static const struct rte_pci_id pci_id_qat_map[] = {
{
RTE_PCI_DEVICE(0x8086, 0x4945),
},
+ {
+ RTE_PCI_DEVICE(0x8086, 0x0da5),
+ },
{.device_id = 0},
};
@@ -199,6 +202,8 @@ pick_gen(const struct rte_pci_device *pci_dev)
case 0x4943:
case 0x4945:
return QAT_GEN4;
+ case 0x0da5:
+ return QAT_VQAT;
default:
QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
return QAT_N_GENS;
@@ -406,14 +406,18 @@ qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
RTE_INIT(qat_sym_crypto_gen4_init)
{
- qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
- qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
+ qat_sym_gen_dev_ops[QAT_VQAT].cryptodev_ops =
+ qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+ qat_sym_gen_dev_ops[QAT_VQAT].get_capabilities =
+ qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
qat_sym_crypto_cap_get_gen4;
- qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+ qat_sym_gen_dev_ops[QAT_VQAT].set_session =
+ qat_sym_gen_dev_ops[QAT_GEN4].set_session =
qat_sym_crypto_set_session_gen4;
qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
qat_sym_configure_raw_dp_ctx_gen4;
- qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
+ qat_sym_gen_dev_ops[QAT_VQAT].get_feature_flags =
+ qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
qat_sym_crypto_feature_flags_get_gen1;
qat_sym_gen_dev_ops[QAT_GEN4].create_security_ctx =
qat_sym_create_security_gen1;
@@ -405,7 +405,7 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT)
session->is_ucs = 1;
break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
@@ -911,7 +911,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
session->auth_iv.length = AES_GCM_J0_LEN;
else
session->is_iv12B = 1;
- if (qat_dev_gen == QAT_GEN4) {
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT) {
session->is_cnt_zero = 1;
session->is_ucs = 1;
}
@@ -1039,7 +1039,7 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT)
session->is_ucs = 1;
if (session->cipher_iv.length == 0) {
session->cipher_iv.length = AES_GCM_J0_LEN;
@@ -1059,13 +1059,13 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT)
session->is_ucs = 1;
break;
case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
return -EINVAL;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT)
session->is_ucs = 1;
session->qat_cipher_alg =
ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
@@ -2298,7 +2298,7 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
auth_param->u2.inner_prefix_sz =
qat_hash_get_block_size(cdesc->qat_hash_alg);
auth_param->hash_state_sz = digestsize;
- if (qat_dev_gen == QAT_GEN4) {
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT) {
ICP_QAT_FW_HASH_FLAG_MODE2_SET(
hash_cd_ctrl->hash_flags,
QAT_FW_LA_MODE2);
@@ -2840,6 +2840,7 @@ qat_sym_cd_crc_set(struct qat_sym_session *cdesc,
cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_gen3_crc_cd);
break;
case QAT_GEN4:
+ case QAT_VQAT:
crc_cfg.mode = ICP_QAT_HW_CIPHER_ECB_MODE;
crc_cfg.algo = ICP_QAT_HW_CIPHER_ALGO_NULL;
crc_cfg.hash_cmp_val = 0;