@@ -55,6 +55,9 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Updated Intel QuickAssist Technology driver.**
+
+ * Enabled support for virtual QAT - vQAT (0da5) devices in QAT PMD.
Removed Items
-------------
@@ -143,6 +143,42 @@ qat_dev_read_config_gen4(struct qat_pci_device *qat_dev)
return 0;
}
+static int
+qat_dev_read_config_vqat(struct qat_pci_device *qat_dev)
+{
+ int i = 0;
+ struct qat_dev_gen4_extra *dev_extra = qat_dev->dev_private;
+ struct qat_qp_hw_data *hw_data;
+ struct qat_device_info *qat_dev_instance =
+ &qat_pci_devs[qat_dev->qat_dev_id];
+ uint16_t sub_id = qat_dev_instance->pci_dev->id.subsystem_device_id;
+
+ for (; i < QAT_GEN4_BUNDLE_NUM; i++) {
+ hw_data = &dev_extra->qp_gen4_data[i][0];
+ memset(hw_data, 0, sizeof(*hw_data));
+ if (sub_id == ADF_VQAT_SYM_PCI_SUBSYSTEM_ID) {
+ hw_data->service_type = QAT_SERVICE_SYMMETRIC;
+ hw_data->tx_msg_size = 128;
+ hw_data->rx_msg_size = 32;
+ } else if (sub_id == ADF_VQAT_ASYM_PCI_SUBSYSTEM_ID) {
+ hw_data->service_type = QAT_SERVICE_ASYMMETRIC;
+ hw_data->tx_msg_size = 64;
+ hw_data->rx_msg_size = 32;
+ } else if (sub_id == ADF_VQAT_DC_PCI_SUBSYSTEM_ID) {
+ hw_data->service_type = QAT_SERVICE_COMPRESSION;
+ hw_data->tx_msg_size = 128;
+ hw_data->rx_msg_size = 32;
+ } else {
+ QAT_LOG(ERR, "Unrecognized subsystem id %hu", sub_id);
+ return -EINVAL;
+ }
+ hw_data->tx_ring_num = 0;
+ hw_data->rx_ring_num = 1;
+ hw_data->hw_bundle_num = i;
+ }
+ return 0;
+}
+
static void
qat_qp_build_ring_base_gen4(void *io_addr,
struct qat_queue *queue)
@@ -268,6 +304,12 @@ qat_reset_ring_pairs_gen4(struct qat_pci_device *qat_pci_dev)
return 0;
}
+static int
+qat_reset_ring_pairs_vqat(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
static const struct rte_mem_resource *
qat_dev_get_transport_bar_gen4(struct rte_pci_device *pci_dev)
{
@@ -304,10 +346,21 @@ static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_gen4 = {
.qat_dev_get_slice_map = qat_dev_get_slice_map_gen4,
};
+static struct qat_dev_hw_spec_funcs qat_dev_hw_spec_vqat = {
+ .qat_dev_reset_ring_pairs = qat_reset_ring_pairs_vqat,
+ .qat_dev_get_transport_bar = qat_dev_get_transport_bar_gen4,
+ .qat_dev_get_misc_bar = qat_dev_get_misc_bar_gen4,
+ .qat_dev_read_config = qat_dev_read_config_vqat,
+ .qat_dev_get_extra_size = qat_dev_get_extra_size_gen4,
+ .qat_dev_get_slice_map = qat_dev_get_slice_map_gen4,
+};
+
RTE_INIT(qat_dev_gen_4_init)
{
- qat_qp_hw_spec[QAT_GEN4] = &qat_qp_hw_spec_gen4;
+ qat_qp_hw_spec[QAT_VQAT] = qat_qp_hw_spec[QAT_GEN4] = &qat_qp_hw_spec_gen4;
qat_dev_hw_spec[QAT_GEN4] = &qat_dev_hw_spec_gen4;
+ qat_dev_hw_spec[QAT_VQAT] = &qat_dev_hw_spec_vqat;
qat_gen_config[QAT_GEN4].dev_gen = QAT_GEN4;
+ qat_gen_config[QAT_VQAT].dev_gen = QAT_VQAT;
qat_gen_config[QAT_GEN4].pf2vf_dev = &qat_pf2vf_gen4;
}
@@ -9,6 +9,11 @@
#define ADF_C4XXXIOV_VFLEGFUSES_OFFSET 0x4C
#define ADF1_C4XXXIOV_VFLEGFUSES_LEN 4
+/* Definition of virtual QAT subsystem ID*/
+#define ADF_VQAT_SYM_PCI_SUBSYSTEM_ID 0x00
+#define ADF_VQAT_ASYM_PCI_SUBSYSTEM_ID 0x01
+#define ADF_VQAT_DC_PCI_SUBSYSTEM_ID 0x02
+
enum icp_qat_slice_mask {
ICP_ACCEL_MASK_CIPHER_SLICE = 0x01,
ICP_ACCEL_MASK_AUTH_SLICE = 0x02,
@@ -21,6 +21,7 @@ enum qat_device_gen {
QAT_GEN2,
QAT_GEN3,
QAT_GEN4,
+ QAT_VQAT,
QAT_N_GENS
};
@@ -62,6 +62,9 @@ static const struct rte_pci_id pci_id_qat_map[] = {
{
RTE_PCI_DEVICE(0x8086, 0x4945),
},
+ {
+ RTE_PCI_DEVICE(0x8086, 0x0da5),
+ },
{.device_id = 0},
};
@@ -199,6 +202,8 @@ pick_gen(const struct rte_pci_device *pci_dev)
case 0x4943:
case 0x4945:
return QAT_GEN4;
+ case 0x0da5:
+ return QAT_VQAT;
default:
QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
return QAT_N_GENS;
@@ -281,6 +286,7 @@ qat_pci_device_allocate(struct rte_pci_device *pci_dev,
strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN);
qat_dev->qat_dev_id = qat_dev_id;
qat_dev->qat_dev_gen = qat_dev_gen;
+ qat_pci_devs[qat_dev_id].pci_dev = pci_dev;
ops_hw = qat_dev_hw_spec[qat_dev->qat_dev_gen];
NOT_NULL(ops_hw->qat_dev_get_misc_bar, goto error,
@@ -326,7 +332,6 @@ qat_pci_device_allocate(struct rte_pci_device *pci_dev,
* qat_dev to list of devices
*/
qat_pci_devs[qat_dev_id].mz = qat_dev_mz;
- qat_pci_devs[qat_dev_id].pci_dev = pci_dev;
qat_nb_pci_devices++;
QAT_LOG(DEBUG, "QAT device %d found, name %s, total QATs %d",
@@ -198,16 +198,22 @@ qat_comp_get_num_im_bufs_required_gen4(void)
RTE_INIT(qat_comp_pmd_gen4_init)
{
- qat_comp_gen_dev_ops[QAT_GEN4].compressdev_ops =
+ qat_comp_gen_dev_ops[QAT_VQAT].compressdev_ops =
+ qat_comp_gen_dev_ops[QAT_GEN4].compressdev_ops =
&qat_comp_ops_gen4;
- qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_get_capabilities =
+ qat_comp_gen_dev_ops[QAT_VQAT].qat_comp_get_capabilities =
+ qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_get_capabilities =
qat_comp_cap_get_gen4;
- qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_get_num_im_bufs_required =
+ qat_comp_gen_dev_ops[QAT_VQAT].qat_comp_get_num_im_bufs_required =
+ qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_get_num_im_bufs_required =
qat_comp_get_num_im_bufs_required_gen4;
- qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_get_ram_bank_flags =
+ qat_comp_gen_dev_ops[QAT_VQAT].qat_comp_get_ram_bank_flags =
+ qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_get_ram_bank_flags =
qat_comp_get_ram_bank_flags_gen4;
- qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_set_slice_cfg_word =
+ qat_comp_gen_dev_ops[QAT_VQAT].qat_comp_set_slice_cfg_word =
+ qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_set_slice_cfg_word =
qat_comp_set_slice_cfg_word_gen4;
- qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_get_feature_flags =
+ qat_comp_gen_dev_ops[QAT_VQAT].qat_comp_get_feature_flags =
+ qat_comp_gen_dev_ops[QAT_GEN4].qat_comp_get_feature_flags =
qat_comp_get_features_gen1;
}
@@ -682,11 +682,18 @@ qat_comp_dev_create(struct qat_pci_device *qat_pci_dev,
const struct qat_comp_gen_dev_ops *qat_comp_gen_ops =
&qat_comp_gen_dev_ops[qat_pci_dev->qat_dev_gen];
uint64_t capa_size;
+ uint16_t sub_id = qat_dev_instance->pci_dev->id.subsystem_device_id;
snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
qat_pci_dev->name, "comp");
QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
+ if (qat_pci_dev->qat_dev_gen == QAT_VQAT &&
+ sub_id != ADF_VQAT_DC_PCI_SUBSYSTEM_ID) {
+ QAT_LOG(ERR, "Device (vqat instance) %s does not support compression",
+ name);
+ return -EFAULT;
+ }
if (qat_comp_gen_ops->compressdev_ops == NULL) {
QAT_LOG(DEBUG, "Device %s does not support compression", name);
return -ENOTSUP;
@@ -406,14 +406,18 @@ qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
RTE_INIT(qat_sym_crypto_gen4_init)
{
- qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
- qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
+ qat_sym_gen_dev_ops[QAT_VQAT].cryptodev_ops =
+ qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
+ qat_sym_gen_dev_ops[QAT_VQAT].get_capabilities =
+ qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
qat_sym_crypto_cap_get_gen4;
- qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+ qat_sym_gen_dev_ops[QAT_VQAT].set_session =
+ qat_sym_gen_dev_ops[QAT_GEN4].set_session =
qat_sym_crypto_set_session_gen4;
qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
qat_sym_configure_raw_dp_ctx_gen4;
- qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
+ qat_sym_gen_dev_ops[QAT_VQAT].get_feature_flags =
+ qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
qat_sym_crypto_feature_flags_get_gen1;
qat_sym_gen_dev_ops[QAT_GEN4].create_security_ctx =
qat_sym_create_security_gen1;
@@ -421,12 +425,16 @@ RTE_INIT(qat_sym_crypto_gen4_init)
RTE_INIT(qat_asym_crypto_gen4_init)
{
- qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops =
+ qat_asym_gen_dev_ops[QAT_VQAT].cryptodev_ops =
+ qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops =
&qat_asym_crypto_ops_gen1;
- qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities =
+ qat_asym_gen_dev_ops[QAT_VQAT].get_capabilities =
+ qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities =
qat_asym_crypto_cap_get_gen1;
- qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags =
+ qat_asym_gen_dev_ops[QAT_VQAT].get_feature_flags =
+ qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags =
qat_asym_crypto_feature_flags_get_gen1;
- qat_asym_gen_dev_ops[QAT_GEN4].set_session =
+ qat_asym_gen_dev_ops[QAT_VQAT].set_session =
+ qat_asym_gen_dev_ops[QAT_GEN4].set_session =
qat_asym_crypto_set_session_gen1;
}
@@ -1517,11 +1517,18 @@ qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
int i = 0;
uint16_t slice_map = 0;
+ uint16_t sub_id = qat_dev_instance->pci_dev->id.subsystem_device_id;
snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
qat_pci_dev->name, "asym");
QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name);
+ if (qat_pci_dev->qat_dev_gen == QAT_VQAT &&
+ sub_id != ADF_VQAT_ASYM_PCI_SUBSYSTEM_ID) {
+ QAT_LOG(ERR, "Device (vqat instance) %s does not support asymmetric crypto",
+ name);
+ return -EFAULT;
+ }
if (gen_dev_ops->cryptodev_ops == NULL) {
QAT_LOG(ERR, "Device %s does not support asymmetric crypto",
name);
@@ -202,11 +202,18 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
struct qat_cryptodev_private *internals;
const struct qat_crypto_gen_dev_ops *gen_dev_ops =
&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
+ uint16_t sub_id = qat_dev_instance->pci_dev->id.subsystem_device_id;
snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
qat_pci_dev->name, "sym");
QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+ if (qat_pci_dev->qat_dev_gen == QAT_VQAT &&
+ sub_id != ADF_VQAT_SYM_PCI_SUBSYSTEM_ID) {
+ QAT_LOG(ERR, "Device (vqat instance) %s does not support symmetric crypto",
+ name);
+ return -EFAULT;
+ }
if (gen_dev_ops->cryptodev_ops == NULL) {
QAT_LOG(ERR, "Device %s does not support symmetric crypto",
name);
@@ -405,7 +405,7 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT)
session->is_ucs = 1;
break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
@@ -911,7 +911,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
session->auth_iv.length = AES_GCM_J0_LEN;
else
session->is_iv12B = 1;
- if (qat_dev_gen == QAT_GEN4) {
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT) {
session->is_cnt_zero = 1;
session->is_ucs = 1;
}
@@ -1039,7 +1039,7 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT)
session->is_ucs = 1;
if (session->cipher_iv.length == 0) {
session->cipher_iv.length = AES_GCM_J0_LEN;
@@ -1059,13 +1059,13 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT)
session->is_ucs = 1;
break;
case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
if (aead_xform->key.length != ICP_QAT_HW_CHACHAPOLY_KEY_SZ)
return -EINVAL;
- if (qat_dev_gen == QAT_GEN4)
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT)
session->is_ucs = 1;
session->qat_cipher_alg =
ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
@@ -2298,7 +2298,7 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
auth_param->u2.inner_prefix_sz =
qat_hash_get_block_size(cdesc->qat_hash_alg);
auth_param->hash_state_sz = digestsize;
- if (qat_dev_gen == QAT_GEN4) {
+ if (qat_dev_gen == QAT_GEN4 || qat_dev_gen == QAT_VQAT) {
ICP_QAT_FW_HASH_FLAG_MODE2_SET(
hash_cd_ctrl->hash_flags,
QAT_FW_LA_MODE2);
@@ -2840,6 +2840,7 @@ qat_sym_cd_crc_set(struct qat_sym_session *cdesc,
cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_gen3_crc_cd);
break;
case QAT_GEN4:
+ case QAT_VQAT:
crc_cfg.mode = ICP_QAT_HW_CIPHER_ECB_MODE;
crc_cfg.algo = ICP_QAT_HW_CIPHER_ALGO_NULL;
crc_cfg.hash_cmp_val = 0;