[v11,4/7] drivers/baseband: update PMDs to expose queue per operation
Checks
Commit Message
Add support in existing bbdev PMDs for the explicit number of queues
and priority for each operation type configured on the device.
Signed-off-by: Nicolas Chautru <nicolas.chautru@intel.com>
Acked-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
drivers/baseband/acc100/rte_acc100_pmd.c | 29 +++++++++++--------
.../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 8 +++++
drivers/baseband/fpga_lte_fec/fpga_lte_fec.c | 8 +++++
drivers/baseband/la12xx/bbdev_la12xx.c | 7 +++++
.../baseband/turbo_sw/bbdev_turbo_software.c | 12 ++++++++
5 files changed, 52 insertions(+), 12 deletions(-)
@@ -967,6 +967,7 @@ acc100_dev_info_get(struct rte_bbdev *dev,
struct rte_bbdev_driver_info *dev_info)
{
struct acc100_device *d = dev->data->dev_private;
+ int i;
static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
{
@@ -1063,19 +1064,23 @@ acc100_dev_info_get(struct rte_bbdev *dev,
fetch_acc100_config(dev);
dev_info->device_status = RTE_BBDEV_DEV_NOT_SUPPORTED;
- /* This isn't ideal because it reports the maximum number of queues but
- * does not provide info on how many can be uplink/downlink or different
- * priorities
- */
- dev_info->max_num_queues =
- d->acc100_conf.q_dl_5g.num_aqs_per_groups *
- d->acc100_conf.q_dl_5g.num_qgroups +
- d->acc100_conf.q_ul_5g.num_aqs_per_groups *
- d->acc100_conf.q_ul_5g.num_qgroups +
- d->acc100_conf.q_dl_4g.num_aqs_per_groups *
- d->acc100_conf.q_dl_4g.num_qgroups +
- d->acc100_conf.q_ul_4g.num_aqs_per_groups *
+ /* Expose number of queues */
+ dev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = d->acc100_conf.q_ul_4g.num_aqs_per_groups *
d->acc100_conf.q_ul_4g.num_qgroups;
+ dev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = d->acc100_conf.q_dl_4g.num_aqs_per_groups *
+ d->acc100_conf.q_dl_4g.num_qgroups;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = d->acc100_conf.q_ul_5g.num_aqs_per_groups *
+ d->acc100_conf.q_ul_5g.num_qgroups;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = d->acc100_conf.q_dl_5g.num_aqs_per_groups *
+ d->acc100_conf.q_dl_5g.num_qgroups;
+ dev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = d->acc100_conf.q_ul_4g.num_qgroups;
+ dev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = d->acc100_conf.q_dl_4g.num_qgroups;
+ dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = d->acc100_conf.q_ul_5g.num_qgroups;
+ dev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = d->acc100_conf.q_dl_5g.num_qgroups;
+ dev_info->max_num_queues = 0;
+ for (i = RTE_BBDEV_OP_TURBO_DEC; i <= RTE_BBDEV_OP_LDPC_ENC; i++)
+ dev_info->max_num_queues += dev_info->num_queues[i];
dev_info->queue_size_lim = ACC100_MAX_QUEUE_DEPTH;
dev_info->hardware_accelerated = true;
dev_info->max_dl_queue_priority =
@@ -379,6 +379,14 @@ fpga_dev_info_get(struct rte_bbdev *dev,
if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)
dev_info->max_num_queues++;
}
+ /* Expose number of queue per operation type */
+ dev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = dev_info->max_num_queues / 2;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = dev_info->max_num_queues / 2;
+ dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = 1;
+ dev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = 1;
}
/**
@@ -655,6 +655,14 @@ fpga_dev_info_get(struct rte_bbdev *dev,
if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)
dev_info->max_num_queues++;
}
+ /* Expose number of queue per operation type */
+ dev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = dev_info->max_num_queues / 2;
+ dev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = dev_info->max_num_queues / 2;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = 0;
+ dev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] = 1;
+ dev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] = 1;
}
/**
@@ -103,6 +103,13 @@ la12xx_info_get(struct rte_bbdev *dev __rte_unused,
dev_info->min_alignment = 64;
dev_info->device_status = RTE_BBDEV_DEV_NOT_SUPPORTED;
+ dev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = 0;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = LA12XX_MAX_QUEUES / 2;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = LA12XX_MAX_QUEUES / 2;
+ dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = 1;
+ dev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = 1;
rte_bbdev_log_debug("got device info from %u", dev->data->dev_id);
}
@@ -158,6 +158,8 @@ static void
info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
{
struct bbdev_private *internals = dev->data->dev_private;
+ const struct rte_bbdev_op_cap *op_cap;
+ int num_op_type = 0;
static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
#ifdef RTE_BBDEV_SDK_AVX2
@@ -257,6 +259,16 @@ info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
dev_info->data_endianness = RTE_LITTLE_ENDIAN;
dev_info->device_status = RTE_BBDEV_DEV_NOT_SUPPORTED;
+ op_cap = bbdev_capabilities;
+ for (; op_cap->type != RTE_BBDEV_OP_NONE; ++op_cap)
+ num_op_type++;
+ op_cap = bbdev_capabilities;
+ if (num_op_type > 0) {
+ int num_queue_per_type = dev_info->max_num_queues / num_op_type;
+ for (; op_cap->type != RTE_BBDEV_OP_NONE; ++op_cap)
+ dev_info->num_queues[op_cap->type] = num_queue_per_type;
+ }
+
rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id);
}