[v5,06/12] baseband/acc: refactor to allow unified driver extension

Message ID 20231005194907.557517-7-nicolas.chautru@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series VRB2 bbdev PMD introduction |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chautru, Nicolas Oct. 5, 2023, 7:49 p.m. UTC
  Adding a few functions and common code prior to
extending the VRB driver.

Signed-off-by: Nicolas Chautru <nicolas.chautru@intel.com>
---
 drivers/baseband/acc/acc_common.h  | 175 ++++++++++++++++++++++++++---
 drivers/baseband/acc/rte_vrb_pmd.c |  58 +++++-----
 2 files changed, 190 insertions(+), 43 deletions(-)
  

Comments

Maxime Coquelin Oct. 6, 2023, 9:32 a.m. UTC | #1
On 10/5/23 21:49, Nicolas Chautru wrote:
> Adding a few functions and common code prior to
> extending the VRB driver.
> 
> Signed-off-by: Nicolas Chautru <nicolas.chautru@intel.com>
> ---
>   drivers/baseband/acc/acc_common.h  | 175 ++++++++++++++++++++++++++---
>   drivers/baseband/acc/rte_vrb_pmd.c |  58 +++++-----
>   2 files changed, 190 insertions(+), 43 deletions(-)
> 
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Maxime
  

Patch

diff --git a/drivers/baseband/acc/acc_common.h b/drivers/baseband/acc/acc_common.h
index a8b53acd94..6f73d44959 100644
--- a/drivers/baseband/acc/acc_common.h
+++ b/drivers/baseband/acc/acc_common.h
@@ -18,6 +18,7 @@ 
 #define ACC_DMA_BLKID_OUT_HARQ      3
 #define ACC_DMA_BLKID_IN_HARQ       3
 #define ACC_DMA_BLKID_IN_MLD_R      3
+#define ACC_DMA_BLKID_DEWIN_IN      3
 
 /* Values used in filling in decode FCWs */
 #define ACC_FCW_TD_VER              1
@@ -103,6 +104,9 @@ 
 #define ACC_MAX_NUM_QGRPS              32
 #define ACC_RING_SIZE_GRANULARITY      64
 #define ACC_MAX_FCW_SIZE              128
+#define ACC_IQ_SIZE                    4
+
+#define ACC_FCW_FFT_BLEN_3             28
 
 /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
 #define ACC_N_ZC_1 66 /* N = 66 Zc for BG 1 */
@@ -132,6 +136,17 @@ 
 #define ACC_LIM_21 14 /* 0.21 */
 #define ACC_LIM_31 20 /* 0.31 */
 #define ACC_MAX_E (128 * 1024 - 2)
+#define ACC_MAX_CS 12
+
+#define ACC100_VARIANT          0
+#define VRB1_VARIANT		2
+#define VRB2_VARIANT		3
+
+/* Queue Index Hierarchy */
+#define VRB1_GRP_ID_SHIFT    10
+#define VRB1_VF_ID_SHIFT     4
+#define VRB2_GRP_ID_SHIFT    12
+#define VRB2_VF_ID_SHIFT     6
 
 #define ACC_MAX_FFT_WIN      16
 
@@ -334,6 +349,37 @@  struct __rte_packed acc_fcw_fft {
 		res:19;
 };
 
+/* FFT Frame Control Word. */
+struct __rte_packed acc_fcw_fft_3 {
+	uint32_t in_frame_size:16,
+		leading_pad_size:16;
+	uint32_t out_frame_size:16,
+		leading_depad_size:16;
+	uint32_t cs_window_sel;
+	uint32_t cs_window_sel2:16,
+		cs_enable_bmap:16;
+	uint32_t num_antennas:8,
+		idft_size:8,
+		dft_size:8,
+		cs_offset:8;
+	uint32_t idft_shift:8,
+		dft_shift:8,
+		cs_multiplier:16;
+	uint32_t bypass:2,
+		fp16_in:1,
+		fp16_out:1,
+		exp_adj:4,
+		power_shift:4,
+		power_en:1,
+		enable_dewin:1,
+		freq_resample_mode:2,
+		depad_output_size:16;
+	uint16_t cs_theta_0[ACC_MAX_CS];
+	uint32_t cs_theta_d[ACC_MAX_CS];
+	int8_t cs_time_offset[ACC_MAX_CS];
+};
+
+
 /* MLD-TS Frame Control Word */
 struct __rte_packed acc_fcw_mldts {
 	uint32_t fcw_version:4,
@@ -475,14 +521,14 @@  union acc_info_ring_data {
 		uint16_t valid: 1;
 	};
 	struct {
-		uint32_t aq_id_3: 6;
-		uint32_t qg_id_3: 5;
-		uint32_t vf_id_3: 6;
-		uint32_t int_nb_3: 6;
-		uint32_t msi_0_3: 1;
-		uint32_t vf2pf_3: 6;
-		uint32_t loop_3: 1;
-		uint32_t valid_3: 1;
+		uint32_t aq_id_vrb2: 6;
+		uint32_t qg_id_vrb2: 5;
+		uint32_t vf_id_vrb2: 6;
+		uint32_t int_nb_vrb2: 6;
+		uint32_t msi_0_vrb2: 1;
+		uint32_t vf2pf_vrb2: 6;
+		uint32_t loop_vrb2: 1;
+		uint32_t valid_vrb2: 1;
 	};
 } __rte_packed;
 
@@ -764,22 +810,114 @@  alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc_device *d,
 	free_base_addresses(base_addrs, i);
 }
 
+/* Wrapper to provide VF index from ring data. */
+static inline uint16_t
+vf_from_ring(const union acc_info_ring_data ring_data, uint16_t device_variant)
+{
+	if (device_variant == VRB2_VARIANT)
+		return ring_data.vf_id_vrb2;
+	else
+		return ring_data.vf_id;
+}
+
+/* Wrapper to provide QG index from ring data. */
+static inline uint16_t
+qg_from_ring(const union acc_info_ring_data ring_data, uint16_t device_variant)
+{
+	if (device_variant == VRB2_VARIANT)
+		return ring_data.qg_id_vrb2;
+	else
+		return ring_data.qg_id;
+}
+
+/* Wrapper to provide AQ index from ring data. */
+static inline uint16_t
+aq_from_ring(const union acc_info_ring_data ring_data, uint16_t device_variant)
+{
+	if (device_variant == VRB2_VARIANT)
+		return ring_data.aq_id_vrb2;
+	else
+		return ring_data.aq_id;
+}
+
+/* Wrapper to provide int index from ring data. */
+static inline uint16_t
+int_from_ring(const union acc_info_ring_data ring_data, uint16_t device_variant)
+{
+	if (device_variant == VRB2_VARIANT)
+		return ring_data.int_nb_vrb2;
+	else
+		return ring_data.int_nb;
+}
+
+/* Wrapper to provide queue index from group and aq index. */
+static inline int
+queue_index(uint16_t group_idx, uint16_t aq_idx, uint16_t device_variant)
+{
+	if (device_variant == VRB2_VARIANT)
+		return (group_idx << VRB2_GRP_ID_SHIFT) + aq_idx;
+	else
+		return (group_idx << VRB1_GRP_ID_SHIFT) + aq_idx;
+}
+
+/* Wrapper to provide queue group from queue index. */
+static inline int
+qg_from_q(uint32_t q_idx, uint16_t device_variant)
+{
+	if (device_variant == VRB2_VARIANT)
+		return (q_idx >> VRB2_GRP_ID_SHIFT) & 0x1F;
+	else
+		return (q_idx >> VRB1_GRP_ID_SHIFT) & 0xF;
+}
+
+/* Wrapper to provide vf from queue index. */
+static inline int32_t
+vf_from_q(uint32_t q_idx, uint16_t device_variant)
+{
+	if (device_variant == VRB2_VARIANT)
+		return (q_idx >> VRB2_VF_ID_SHIFT)  & 0x3F;
+	else
+		return (q_idx >> VRB1_VF_ID_SHIFT)  & 0x3F;
+}
+
+/* Wrapper to provide aq index from queue index. */
+static inline int32_t
+aq_from_q(uint32_t q_idx, uint16_t device_variant)
+{
+	if (device_variant == VRB2_VARIANT)
+		return q_idx & 0x3F;
+	else
+		return q_idx & 0xF;
+}
+
+/* Wrapper to set VF index in ring data. */
+static inline int32_t
+set_vf_in_ring(volatile union acc_info_ring_data *ring_data,
+		uint16_t device_variant, uint16_t value)
+{
+	if (device_variant == VRB2_VARIANT)
+		return ring_data->vf_id_vrb2 = value;
+	else
+		return ring_data->vf_id = value;
+}
+
 /*
  * Find queue_id of a device queue based on details from the Info Ring.
  * If a queue isn't found UINT16_MAX is returned.
  */
 static inline uint16_t
-get_queue_id_from_ring_info(struct rte_bbdev_data *data,
-		const union acc_info_ring_data ring_data)
+get_queue_id_from_ring_info(struct rte_bbdev_data *data, const union acc_info_ring_data ring_data)
 {
 	uint16_t queue_id;
+	struct acc_queue *acc_q;
+	struct acc_device *d = data->dev_private;
 
 	for (queue_id = 0; queue_id < data->num_queues; ++queue_id) {
-		struct acc_queue *acc_q =
-				data->queues[queue_id].queue_private;
-		if (acc_q != NULL && acc_q->aq_id == ring_data.aq_id &&
-				acc_q->qgrp_id == ring_data.qg_id &&
-				acc_q->vf_id == ring_data.vf_id)
+		acc_q = data->queues[queue_id].queue_private;
+
+		if (acc_q != NULL && acc_q->aq_id == aq_from_ring(ring_data, d->device_variant) &&
+				acc_q->qgrp_id == qg_from_ring(ring_data, d->device_variant) &&
+				acc_q->vf_id == vf_from_ring(ring_data, d->device_variant))
 			return queue_id;
 	}
 
@@ -1441,4 +1579,11 @@  get_num_cbs_in_tb_ldpc_enc(struct rte_bbdev_op_ldpc_enc *ldpc_enc)
 	return cbs_in_tb;
 }
 
+static inline void
+acc_reg_fast_write(struct acc_device *d, uint32_t offset, uint32_t value)
+{
+	void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
+	mmio_write(reg_addr, value);
+}
+
 #endif /* _ACC_COMMON_H_ */
diff --git a/drivers/baseband/acc/rte_vrb_pmd.c b/drivers/baseband/acc/rte_vrb_pmd.c
index 0979de0354..3907102934 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -340,17 +340,18 @@  static inline void
 vrb_check_ir(struct acc_device *acc_dev)
 {
 	volatile union acc_info_ring_data *ring_data;
-	uint16_t info_ring_head = acc_dev->info_ring_head;
+	uint16_t info_ring_head = acc_dev->info_ring_head, int_nb;
 	if (unlikely(acc_dev->info_ring == NULL))
 		return;
 
 	ring_data = acc_dev->info_ring + (acc_dev->info_ring_head & ACC_INFO_RING_MASK);
 
 	while (ring_data->valid) {
-		if ((ring_data->int_nb < ACC_PF_INT_DMA_DL_DESC_IRQ) || (
-				ring_data->int_nb > ACC_PF_INT_DMA_MLD_DESC_IRQ)) {
+		int_nb = int_from_ring(*ring_data, acc_dev->device_variant);
+		if ((int_nb < ACC_PF_INT_DMA_DL_DESC_IRQ) || (
+				int_nb > ACC_PF_INT_DMA_MLD_DESC_IRQ)) {
 			rte_bbdev_log(WARNING, "InfoRing: ITR:%d Info:0x%x",
-					ring_data->int_nb, ring_data->detailed_info);
+					int_nb, ring_data->detailed_info);
 			/* Initialize Info Ring entry and move forward. */
 			ring_data->val = 0;
 		}
@@ -367,16 +368,21 @@  vrb_dev_interrupt_handler(void *cb_arg)
 	struct acc_device *acc_dev = dev->data->dev_private;
 	volatile union acc_info_ring_data *ring_data;
 	struct acc_deq_intr_details deq_intr_det;
+	uint16_t vf_id, aq_id, qg_id, int_nb;
 
 	ring_data = acc_dev->info_ring + (acc_dev->info_ring_head & ACC_INFO_RING_MASK);
 
 	while (ring_data->valid) {
+		vf_id = vf_from_ring(*ring_data, acc_dev->device_variant);
+		aq_id = aq_from_ring(*ring_data, acc_dev->device_variant);
+		qg_id = qg_from_ring(*ring_data, acc_dev->device_variant);
+		int_nb = int_from_ring(*ring_data, acc_dev->device_variant);
 		if (acc_dev->pf_device) {
 			rte_bbdev_log_debug(
-					"VRB1 PF Interrupt received, Info Ring data: 0x%x -> %d",
-					ring_data->val, ring_data->int_nb);
+					"PF Interrupt received, Info Ring data: 0x%x -> %d",
+					ring_data->val, int_nb);
 
-			switch (ring_data->int_nb) {
+			switch (int_nb) {
 			case ACC_PF_INT_DMA_DL_DESC_IRQ:
 			case ACC_PF_INT_DMA_UL_DESC_IRQ:
 			case ACC_PF_INT_DMA_FFT_DESC_IRQ:
@@ -388,9 +394,7 @@  vrb_dev_interrupt_handler(void *cb_arg)
 				if (deq_intr_det.queue_id == UINT16_MAX) {
 					rte_bbdev_log(ERR,
 							"Couldn't find queue: aq_id: %u, qg_id: %u, vf_id: %u",
-							ring_data->aq_id,
-							ring_data->qg_id,
-							ring_data->vf_id);
+							aq_id, qg_id, vf_id);
 					return;
 				}
 				rte_bbdev_pmd_callback_process(dev,
@@ -402,9 +406,9 @@  vrb_dev_interrupt_handler(void *cb_arg)
 			}
 		} else {
 			rte_bbdev_log_debug(
-					"VRB1 VF Interrupt received, Info Ring data: 0x%x\n",
+					"VRB VF Interrupt received, Info Ring data: 0x%x\n",
 					ring_data->val);
-			switch (ring_data->int_nb) {
+			switch (int_nb) {
 			case ACC_VF_INT_DMA_DL_DESC_IRQ:
 			case ACC_VF_INT_DMA_UL_DESC_IRQ:
 			case ACC_VF_INT_DMA_FFT_DESC_IRQ:
@@ -412,14 +416,13 @@  vrb_dev_interrupt_handler(void *cb_arg)
 			case ACC_VF_INT_DMA_DL5G_DESC_IRQ:
 			case ACC_VF_INT_DMA_MLD_DESC_IRQ:
 				/* VFs are not aware of their vf_id - it's set to 0.  */
-				ring_data->vf_id = 0;
+				set_vf_in_ring(ring_data, acc_dev->device_variant, 0);
 				deq_intr_det.queue_id = get_queue_id_from_ring_info(
 						dev->data, *ring_data);
 				if (deq_intr_det.queue_id == UINT16_MAX) {
 					rte_bbdev_log(ERR,
 							"Couldn't find queue: aq_id: %u, qg_id: %u",
-							ring_data->aq_id,
-							ring_data->qg_id);
+							aq_id, qg_id);
 					return;
 				}
 				rte_bbdev_pmd_callback_process(dev,
@@ -434,8 +437,7 @@  vrb_dev_interrupt_handler(void *cb_arg)
 		/* Initialize Info Ring entry and move forward. */
 		ring_data->val = 0;
 		++acc_dev->info_ring_head;
-		ring_data = acc_dev->info_ring +
-				(acc_dev->info_ring_head & ACC_INFO_RING_MASK);
+		ring_data = acc_dev->info_ring + (acc_dev->info_ring_head & ACC_INFO_RING_MASK);
 	}
 }
 
@@ -555,8 +557,7 @@  vrb_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
 
 	/* Configure tail pointer for use when SDONE enabled. */
 	if (d->tail_ptrs == NULL)
-		d->tail_ptrs = rte_zmalloc_socket(
-				dev->device->driver->name,
+		d->tail_ptrs = rte_zmalloc_socket(dev->device->driver->name,
 				VRB_MAX_QGRPS * VRB_MAX_AQS * sizeof(uint32_t),
 				RTE_CACHE_LINE_SIZE, socket_id);
 	if (d->tail_ptrs == NULL) {
@@ -782,7 +783,7 @@  vrb_find_free_queue_idx(struct rte_bbdev *dev,
 			/* Mark the Queue as assigned. */
 			d->q_assigned_bit_map[group_idx] |= (1ULL << aq_idx);
 			/* Report the AQ Index. */
-			return (group_idx << VRB1_GRP_ID_SHIFT) + aq_idx;
+			return queue_index(group_idx, aq_idx, d->device_variant);
 		}
 	}
 	rte_bbdev_log(INFO, "Failed to find free queue on %s, priority %u",
@@ -921,9 +922,10 @@  vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
 		}
 	}
 
-	q->qgrp_id = (q_idx >> VRB1_GRP_ID_SHIFT) & 0xF;
-	q->vf_id = (q_idx >> VRB1_VF_ID_SHIFT)  & 0x3F;
-	q->aq_id = q_idx & 0xF;
+	q->qgrp_id = qg_from_q(q_idx, d->device_variant);
+	q->vf_id = vf_from_q(q_idx, d->device_variant);
+	q->aq_id = aq_from_q(q_idx, d->device_variant);
+
 	q->aq_depth = 0;
 	if (conf->op_type ==  RTE_BBDEV_OP_TURBO_DEC)
 		q->aq_depth = (1 << d->acc_conf.q_ul_4g.aq_depth_log2);
@@ -1311,7 +1313,7 @@  vrb_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc_fcw_td *fcw)
 		fcw->bypass_teq = 0;
 	}
 
-	fcw->code_block_mode = 1; /* FIXME */
+	fcw->code_block_mode = 1;
 	fcw->turbo_crc_type = check_bit(op->turbo_dec.op_flags,
 			RTE_BBDEV_TURBO_CRC_TYPE_24B);
 
@@ -1471,8 +1473,8 @@  vrb_dma_desc_td_fill(struct rte_bbdev_dec_op *op,
 	if (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
 		k = op->turbo_dec.tb_params.k_pos;
 		e = (r < op->turbo_dec.tb_params.cab)
-			? op->turbo_dec.tb_params.ea
-			: op->turbo_dec.tb_params.eb;
+				? op->turbo_dec.tb_params.ea
+				: op->turbo_dec.tb_params.eb;
 	} else {
 		k = op->turbo_dec.cb_params.k;
 		e = op->turbo_dec.cb_params.e;
@@ -1726,7 +1728,7 @@  vrb_dma_desc_ld_update(struct rte_bbdev_dec_op *op,
 	desc->op_addr = op;
 }
 
-/* Enqueue one encode operations for device in CB mode */
+/* Enqueue one encode operations for device in CB mode. */
 static inline int
 enqueue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
 		uint16_t total_enqueued_cbs)
@@ -2263,7 +2265,7 @@  vrb_enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
 	return current_enqueued_cbs;
 }
 
-/* Enqueue one decode operations for device in TB mode */
+/* Enqueue one decode operations for device in TB mode. */
 static inline int
 enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
 		uint16_t total_enqueued_cbs, uint8_t cbs_in_tb)