diff mbox series

[05/10] baseband/ark: add ark baseband device

Message ID 20220421151900.703467-5-john.miller@atomicrules.com (mailing list archive)
State Changes Requested
Delegated to: akhil goyal
Headers show
Series [01/10] doc/guides/bbdevs: add ark baseband device documentation | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

John Miller April 21, 2022, 3:18 p.m. UTC
Add new ark baseband device.

Signed-off-by: John Miller <john.miller@atomicrules.com>
---
 drivers/baseband/ark/ark_bbdev.c        | 1064 +++++++++++++++++++++++
 drivers/baseband/ark/ark_bbdev_common.c |  125 +++
 drivers/baseband/ark/ark_bbdev_common.h |   92 ++
 drivers/baseband/ark/ark_bbdev_custom.c |  201 +++++
 drivers/baseband/ark/ark_bbdev_custom.h |   30 +
 drivers/baseband/ark/meson.build        |   11 +
 drivers/baseband/ark/version.map        |    3 +
 7 files changed, 1526 insertions(+)
 create mode 100644 drivers/baseband/ark/ark_bbdev.c
 create mode 100644 drivers/baseband/ark/ark_bbdev_common.c
 create mode 100644 drivers/baseband/ark/ark_bbdev_common.h
 create mode 100644 drivers/baseband/ark/ark_bbdev_custom.c
 create mode 100644 drivers/baseband/ark/ark_bbdev_custom.h
 create mode 100644 drivers/baseband/ark/meson.build
 create mode 100644 drivers/baseband/ark/version.map

Comments

Nicolas Chautru April 27, 2022, 6:38 p.m. UTC | #1
Hi John, 

Do you think this one can be split into a few incremental commits?

There are a few TODOs, is that v1 ready for review? Also you are targeting 22.11 right?

Thanks
Nic

> -----Original Message-----
> From: John Miller <john.miller@atomicrules.com>
> Sent: Thursday, April 21, 2022 8:19 AM
> To: dev@dpdk.org
> Cc: ferruh.yigit@xilinx.com; ed.czeck@atomicrules.com; John Miller
> <john.miller@atomicrules.com>
> Subject: [PATCH 05/10] baseband/ark: add ark baseband device
> 
> Add new ark baseband device.
> 
> Signed-off-by: John Miller <john.miller@atomicrules.com>
> ---
>  drivers/baseband/ark/ark_bbdev.c        | 1064 +++++++++++++++++++++++
>  drivers/baseband/ark/ark_bbdev_common.c |  125 +++
>  drivers/baseband/ark/ark_bbdev_common.h |   92 ++
>  drivers/baseband/ark/ark_bbdev_custom.c |  201 +++++
>  drivers/baseband/ark/ark_bbdev_custom.h |   30 +
>  drivers/baseband/ark/meson.build        |   11 +
>  drivers/baseband/ark/version.map        |    3 +
>  7 files changed, 1526 insertions(+)
>  create mode 100644 drivers/baseband/ark/ark_bbdev.c  create mode
> 100644 drivers/baseband/ark/ark_bbdev_common.c
>  create mode 100644 drivers/baseband/ark/ark_bbdev_common.h
>  create mode 100644 drivers/baseband/ark/ark_bbdev_custom.c
>  create mode 100644 drivers/baseband/ark/ark_bbdev_custom.h
>  create mode 100644 drivers/baseband/ark/meson.build  create mode
> 100644 drivers/baseband/ark/version.map
> 
> diff --git a/drivers/baseband/ark/ark_bbdev.c
> b/drivers/baseband/ark/ark_bbdev.c
> new file mode 100644
> index 0000000000..b23bbd44d1
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev.c
> @@ -0,0 +1,1064 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> +
> +#include "ark_common.h"
> +#include "ark_bbdev_common.h"
> +#include "ark_bbdev_custom.h"
> +#include "ark_ddm.h"
> +#include "ark_mpu.h"
> +#include "ark_rqp.h"
> +#include "ark_udm.h"
> +
> +#include <rte_bbdev.h>
> +#include <rte_bbdev_pmd.h>
> +#include <rte_bus_pci.h>
> +#include <rte_common.h>
> +#include <rte_devargs.h>
> +#include <rte_malloc.h>
> +#include <rte_ring.h>
> +
> +#include <unistd.h>
> +
> +#define DRIVER_NAME baseband_ark
> +
> +RTE_LOG_REGISTER_DEFAULT(ark_bbdev_logtype, DEBUG);
> +
> +#define ARK_SYSCTRL_BASE  0x0
> +#define ARK_PKTGEN_BASE   0x10000
> +#define ARK_MPU_RX_BASE   0x20000
> +#define ARK_UDM_BASE      0x30000
> +#define ARK_MPU_TX_BASE   0x40000
> +#define ARK_DDM_BASE      0x60000
> +#define ARK_PKTDIR_BASE   0xa0000
> +#define ARK_PKTCHKR_BASE  0x90000
> +#define ARK_RCPACING_BASE 0xb0000
> +#define ARK_MPU_QOFFSET   0x00100
> +
> +#define BB_ARK_TX_Q_FACTOR 4
> +
> +/* TODO move to UDM, verify configuration */ #define ARK_RX_META_SIZE
> +32 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM -
> ARK_RX_META_SIZE)
> +#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
> +
> +static_assert(sizeof(struct ark_rx_meta) == ARK_RX_META_SIZE,
> +"Unexpected struct size ark_rx_meta"); static_assert(sizeof(union
> +ark_tx_meta) == 8, "Unexpected struct size ark_tx_meta");
> +
> +static struct rte_pci_id pci_id_ark[] = {
> +	{RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1015)},
> +	{RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1016)},
> +	{.device_id = 0},
> +};
> +
> +static const struct ark_dev_caps
> +ark_device_caps[] = {
> +		     SET_DEV_CAPS(0x1015, true),
> +		     SET_DEV_CAPS(0x1016, true),
> +		     {.device_id = 0,}
> +};
> +
> +
> +/* Forward declarations */
> +static const struct rte_bbdev_ops ark_bbdev_pmd_ops;
> +
> +
> +/* queue */
> +struct ark_bbdev_queue {
> +	struct rte_ring *active_ops;  /* Ring for processed packets */
> +
> +	/* RX components */
> +	/* array of physical addresses of the mbuf data pointer */
> +	rte_iova_t *rx_paddress_q;
> +	struct ark_udm_t *udm;
> +	struct ark_mpu_t *rx_mpu;
> +
> +	/* TX components */
> +	union ark_tx_meta *tx_meta_q;
> +	struct ark_mpu_t *tx_mpu;
> +	struct ark_ddm_t *ddm;
> +
> +	/*  */
> +	uint32_t tx_queue_mask;
> +	uint32_t rx_queue_mask;
> +
> +	int32_t rx_seed_index;		/* step 1 set with empty mbuf */
> +	int32_t rx_cons_index;		/* step 3 consumed by driver */
> +
> +	/* 3 indexes to the paired data rings. */
> +	int32_t tx_prod_index;		/* where to put the next one */
> +	int32_t tx_free_index;		/* local copy of tx_cons_index */
> +
> +	/* separate cache line -- written by FPGA -- RX announce */
> +	RTE_MARKER cacheline1 __rte_cache_min_aligned;
> +	volatile int32_t rx_prod_index; /* step 2 filled by FPGA */
> +
> +	/* Separate cache line -- written by FPGA -- RX completion */
> +	RTE_MARKER cacheline2 __rte_cache_min_aligned;
> +	volatile int32_t tx_cons_index; /* hw is done, can be freed */ }
> +__rte_cache_aligned;
> +
> +static int
> +ark_bb_hw_q_setup(struct rte_bbdev *bbdev, uint16_t q_id, uint16_t
> +queue_size) {
> +	struct ark_bbdev_queue *q = bbdev->data-
> >queues[q_id].queue_private;
> +
> +	rte_iova_t queue_base;
> +	rte_iova_t phys_addr_q_base;
> +	rte_iova_t phys_addr_prod_index;
> +	rte_iova_t phys_addr_cons_index;
> +
> +	uint32_t write_interval_ns = 500; /* TODO this seems big */
> +
> +	if (ark_mpu_verify(q->rx_mpu, sizeof(rte_iova_t))) {
> +		ARK_BBDEV_LOG(ERR, "Illegal hw/sw configuration RX
> queue");
> +		return -1;
> +	}
> +	ARK_BBDEV_LOG(DEBUG, "ark_bb_q setup %u:%u",
> +		      bbdev->data->dev_id, q_id);
> +
> +	/* RX MPU */
> +	phys_addr_q_base = rte_malloc_virt2iova(q->rx_paddress_q);
> +	/* Force TX mode on MPU to match bbdev behavior */
> +	ark_mpu_configure(q->rx_mpu, phys_addr_q_base, queue_size, 1);
> +	ark_mpu_reset_stats(q->rx_mpu);
> +	ark_mpu_start(q->rx_mpu);
> +
> +	/* UDM */
> +	queue_base = rte_malloc_virt2iova(q);
> +	phys_addr_prod_index = queue_base +
> +		offsetof(struct ark_bbdev_queue, rx_prod_index);
> +	ark_udm_write_addr(q->udm, phys_addr_prod_index);
> +	ark_udm_queue_enable(q->udm, 1);
> +
> +	/* TX MPU */
> +	phys_addr_q_base = rte_malloc_virt2iova(q->tx_meta_q);
> +	ark_mpu_configure(q->tx_mpu, phys_addr_q_base,
> +			  BB_ARK_TX_Q_FACTOR * queue_size, 1);
> +	ark_mpu_start(q->tx_mpu);
> +
> +	/* DDM */
> +	phys_addr_cons_index = queue_base +
> +		offsetof(struct ark_bbdev_queue, tx_cons_index);
> +	ark_ddm_setup(q->ddm, phys_addr_cons_index, write_interval_ns);
> +
> +	return 0;
> +}
> +
> +/* Setup a queue */
> +static int
> +ark_bb_q_setup(struct rte_bbdev *bbdev, uint16_t q_id,
> +	       const struct rte_bbdev_queue_conf *queue_conf) {
> +	struct ark_bbdev_queue *q;
> +	struct ark_bbdevice *ark_bb =  bbdev->data->dev_private;
> +
> +	const uint32_t queue_size = queue_conf->queue_size;
> +	const int socket_id = queue_conf->socket;
> +	const uint64_t pg_sz = sysconf(_SC_PAGESIZE);
> +	char ring_name[RTE_RING_NAMESIZE];
> +
> +	/* Configuration checks */
> +	if (!rte_is_power_of_2(queue_size)) {
> +		ARK_BBDEV_LOG(ERR,
> +			      "Configuration queue size"
> +			      " must be power of two %u",
> +			      queue_size);
> +		return -EINVAL;
> +	}
> +
> +	if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
> +		ARK_BBDEV_LOG(ERR,
> +			      "Error: Ark bbdev requires head room > %d bytes
> (%s)",
> +			      ARK_RX_META_SIZE, __func__);
> +		return -EINVAL;
> +	}
> +
> +	/* Allocate the queue data structure. */
> +	q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
> +			RTE_CACHE_LINE_SIZE, queue_conf->socket);
> +	if (q == NULL) {
> +		ARK_BBDEV_LOG(ERR, "Failed to allocate queue memory");
> +		return -ENOMEM;
> +	}
> +	bbdev->data->queues[q_id].queue_private = q;
> +
> +	/* RING */
> +	snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)
> "%u:%u",
> +		 bbdev->data->dev_id, q_id);
> +	q->active_ops = rte_ring_create(ring_name,
> +					queue_size,
> +					queue_conf->socket,
> +					RING_F_SP_ENQ | RING_F_SC_DEQ);
> +	if (q->active_ops == NULL) {
> +		ARK_BBDEV_LOG(ERR, "Failed to create ring");
> +		goto free_all;
> +	}
> +
> +	q->rx_queue_mask = queue_size - 1;
> +	q->tx_queue_mask = (BB_ARK_TX_Q_FACTOR * queue_size) - 1;
> +
> +	/* Each mbuf requires 2 to 4 objects, factor by
> BB_ARK_TX_Q_FACTOR */
> +	q->tx_meta_q =
> +		rte_zmalloc_socket("Ark_bb_txqueue meta",
> +				   queue_size * BB_ARK_TX_Q_FACTOR *
> +				   sizeof(union ark_tx_meta),
> +				   pg_sz,
> +				   socket_id);
> +
> +	if (q->tx_meta_q == 0) {
> +		ARK_BBDEV_LOG(ERR, "Failed to allocate "
> +			      "queue memory in %s", __func__);
> +		goto free_all;
> +	}
> +
> +	q->ddm = RTE_PTR_ADD(ark_bb->ddm.v, q_id *
> ARK_DDM_QOFFSET);
> +	q->tx_mpu = RTE_PTR_ADD(ark_bb->mputx.v, q_id *
> ARK_MPU_QOFFSET);
> +
> +	q->rx_paddress_q =
> +		rte_zmalloc_socket("ark_bb_rx_paddress_q",
> +				   queue_size * sizeof(rte_iova_t),
> +				   pg_sz,
> +				   socket_id);
> +
> +	if (q->rx_paddress_q == 0) {
> +		ARK_BBDEV_LOG(ERR,
> +			      "Failed to allocate queue memory in %s",
> +			      __func__);
> +		goto free_all;
> +	}
> +	q->udm = RTE_PTR_ADD(ark_bb->udm.v, q_id *
> ARK_UDM_QOFFSET);
> +	q->rx_mpu = RTE_PTR_ADD(ark_bb->mpurx.v, q_id *
> ARK_MPU_QOFFSET);
> +
> +	/* Structure have been configured, set the hardware */
> +	return ark_bb_hw_q_setup(bbdev, q_id, queue_size);
> +
> +free_all:
> +	rte_free(q->tx_meta_q);
> +	rte_free(q->rx_paddress_q);
> +	rte_free(q);
> +	return -EFAULT;
> +}
> +
> +/* Release queue */
> +static int
> +ark_bb_q_release(struct rte_bbdev *bbdev, uint16_t q_id) {
> +	struct ark_bbdev_queue *q = bbdev->data-
> >queues[q_id].queue_private;
> +
> +	/* TODO Wait for ddm to send out all packets in flight,
> +	 * Is this only called after q stop?
> +	 */
> +
> +	ark_mpu_dump(q->rx_mpu, "rx_MPU release", q_id);
> +	ark_mpu_dump(q->tx_mpu, "tx_MPU release", q_id);
> +
> +	rte_ring_free(q->active_ops);
> +	rte_free(q->tx_meta_q);
> +	rte_free(q->rx_paddress_q);
> +	rte_free(q);
> +	bbdev->data->queues[q_id].queue_private = NULL;
> +
> +	ARK_BBDEV_LOG(DEBUG, "released device queue %u:%u",
> +		      bbdev->data->dev_id, q_id);
> +	return 0;
> +}
> +
> +static int
> +ark_bbdev_start(struct rte_bbdev *bbdev) {
> +	struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> +
> +	ARK_BBDEV_LOG(DEBUG, "Starting device %u", bbdev->data-
> >dev_id);
> +	if (ark_bb->started)
> +		return 0;
> +
> +	/* start UDM */
> +	ark_udm_start(ark_bb->udm.v);
> +
> +	/* start DDM */
> +	ark_ddm_start(ark_bb->ddm.v);
> +
> +	ark_bb->started = 1;
> +
> +	if (ark_bb->start_pg)
> +		ark_pktchkr_run(ark_bb->pc);
> +
> +	if (ark_bb->start_pg) {
> +		pthread_t thread;
> +
> +		/* Delay packet generator start allow the hardware to be
> ready
> +		 * This is only used for sanity checking with internal generator
> +		 */
> +		if (pthread_create(&thread, NULL,
> +				   ark_pktgen_delay_start, ark_bb->pg)) {
> +			ARK_BBDEV_LOG(ERR, "Could not create pktgen "
> +				    "starter thread");
> +			return -1;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static void
> +ark_bbdev_stop(struct rte_bbdev *bbdev) {
> +	struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> +	struct ark_mpu_t *mpu;
> +	unsigned int i;
> +	int status;
> +
> +	ARK_BBDEV_LOG(DEBUG, "Stopping device %u", bbdev->data-
> >dev_id);
> +
> +	if (!ark_bb->started)
> +		return;
> +
> +	/* Stop the packet generator */
> +	if (ark_bb->start_pg)
> +		ark_pktgen_pause(ark_bb->pg);
> +
> +	/* Stop DDM */
> +	/* Wait up to 0.1 second.  each stop is up to 1000 * 10 useconds */
> +	for (i = 0; i < 10; i++) {
> +		status = ark_ddm_stop(ark_bb->ddm.v, 1);
> +		if (status == 0)
> +			break;
> +	}
> +	if (status || i != 0) {
> +		ARK_BBDEV_LOG(ERR, "DDM stop anomaly. status:"
> +			      " %d iter: %u. (%s)",
> +			      status,
> +			      i,
> +			      __func__);
> +		ark_ddm_dump(ark_bb->ddm.v, "Stop anomaly");
> +
> +		mpu = ark_bb->mputx.v;
> +		for (i = 0; i < ark_bb->max_nb_queues; i++) {
> +			ark_mpu_dump(mpu, "DDM failure dump", i);
> +			mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> +		}
> +	}
> +	ark_ddm_dump_stats(ark_bb->ddm.v, "bbdev stop");
> +
> +	/* STOP RX Side */
> +	/* Stop UDM  multiple tries attempted */
> +	for (i = 0; i < 10; i++) {
> +		status = ark_udm_stop(ark_bb->udm.v, 1);
> +		if (status == 0)
> +			break;
> +	}
> +	if (status || i != 0) {
> +		ARK_BBDEV_LOG(WARNING, "UDM stop anomaly. status %d
> iter: %u. (%s)",
> +			      status, i, __func__);
> +		ark_udm_dump(ark_bb->udm.v, "Stop anomaly");
> +
> +		mpu = ark_bb->mpurx.v;
> +		for (i = 0; i < ark_bb->max_nb_queues; i++) {
> +			ark_mpu_dump(mpu, "UDM Stop anomaly", i);
> +			mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> +		}
> +	}
> +
> +	ark_udm_dump_stats(ark_bb->udm.v, "Post stop");
> +	ark_udm_dump_perf(ark_bb->udm.v, "Post stop");
> +
> +	/* Stop the packet checker if it is running */
> +	if (ark_bb->start_pg) {
> +		ark_pktchkr_dump_stats(ark_bb->pc);
> +		ark_pktchkr_stop(ark_bb->pc);
> +	}
> +}
> +
> +static int
> +ark_bb_q_start(struct rte_bbdev *bbdev, uint16_t q_id) {
> +	struct ark_bbdev_queue *q = bbdev->data-
> >queues[q_id].queue_private;
> +	ARK_BBDEV_LOG(DEBUG, "ark_bb_q start %u:%u", bbdev->data-
> >dev_id, q_id);
> +	ark_mpu_start(q->tx_mpu);
> +	ark_mpu_start(q->rx_mpu);
> +	return 0;
> +}
> +static int
> +ark_bb_q_stop(struct rte_bbdev *bbdev, uint16_t q_id) {
> +	struct ark_bbdev_queue *q = bbdev->data-
> >queues[q_id].queue_private;
> +	ARK_BBDEV_LOG(DEBUG, "ark_bb_q stop %u:%u", bbdev->data-
> >dev_id, q_id);
> +	ark_mpu_stop(q->tx_mpu);
> +	ark_mpu_stop(q->rx_mpu);
> +	return 0;
> +}
> +
> +/*
> +****************************************************************
> *******
> +** */
> +/* Common function for all enqueue and dequeue ops */ static inline
> +void ark_bb_enqueue_desc_fill(struct ark_bbdev_queue *q,
> +			 struct rte_mbuf *mbuf,
> +			 uint16_t offset, /* Extra offset */
> +			 uint8_t  flags,
> +			 uint32_t *meta,
> +			 uint8_t  meta_cnt /* 0, 1 or 2 */
> +			 )
> +{
> +	union ark_tx_meta *tx_meta;
> +	int32_t tx_idx;
> +	uint8_t m;
> +
> +	/* Header */
> +	tx_idx = q->tx_prod_index & q->tx_queue_mask;
> +	tx_meta = &q->tx_meta_q[tx_idx];
> +	tx_meta->data_len = rte_pktmbuf_data_len(mbuf) - offset;
> +	tx_meta->flags = flags;
> +	tx_meta->meta_cnt = meta_cnt;
> +	tx_meta->user1 = *meta++;
> +	q->tx_prod_index++;
> +
> +	for (m = 0; m < meta_cnt; m++) {
> +		tx_idx = q->tx_prod_index & q->tx_queue_mask;
> +		tx_meta = &q->tx_meta_q[tx_idx];
> +		tx_meta->usermeta0 = *meta++;
> +		tx_meta->usermeta1 = *meta++;
> +		q->tx_prod_index++;
> +	}
> +
> +	tx_idx = q->tx_prod_index & q->tx_queue_mask;
> +	tx_meta = &q->tx_meta_q[tx_idx];
> +	tx_meta->physaddr = rte_mbuf_data_iova(mbuf) + offset;
> +	q->tx_prod_index++;
> +}
> +
> +static inline void
> +ark_bb_enqueue_segmented_pkt(struct ark_bbdev_queue *q,
> +			     struct rte_mbuf *mbuf,
> +			     uint16_t offset,
> +			     uint32_t *meta, uint8_t meta_cnt) {
> +	struct rte_mbuf *next;
> +	uint8_t flags = ARK_DDM_SOP;
> +
> +	while (mbuf != NULL) {
> +		next = mbuf->next;
> +		flags |= (next == NULL) ? ARK_DDM_EOP : 0;
> +
> +		ark_bb_enqueue_desc_fill(q, mbuf, offset, flags,
> +					 meta, meta_cnt);
> +
> +		flags &= ~ARK_DDM_SOP;	/* drop SOP flags */
> +		meta_cnt = 0;
> +		offset = 0;
> +
> +		mbuf = next;
> +	}
> +}
> +
> +static inline int
> +ark_bb_enqueue_common(struct ark_bbdev_queue *q,
> +		      struct rte_mbuf *m_in, struct rte_mbuf *m_out,
> +		      uint16_t offset,
> +		      uint32_t *meta, uint8_t meta_cnt) {
> +	int32_t free_queue_space;
> +	int32_t rx_idx;
> +
> +	/* TX side limit */
> +	free_queue_space = q->tx_queue_mask -
> +		(q->tx_prod_index - q->tx_free_index);
> +	if (unlikely(free_queue_space < (2 + (2 * m_in->nb_segs))))
> +		return 1;
> +
> +	/* RX side limit */
> +	free_queue_space = q->rx_queue_mask -
> +		(q->rx_seed_index - q->rx_cons_index);
> +	if (unlikely(free_queue_space < m_out->nb_segs))
> +		return 1;
> +
> +	if (unlikely(m_in->nb_segs > 1))
> +		ark_bb_enqueue_segmented_pkt(q, m_in, offset, meta,
> meta_cnt);
> +	else
> +		ark_bb_enqueue_desc_fill(q, m_in, offset,
> +					 ARK_DDM_SOP | ARK_DDM_EOP,
> +					 meta, meta_cnt);
> +
> +	/* We assume that the return mubf has exactly enough segments for
> +	 * return data, which is 2048 bytes per segment.
> +	 */
> +	do {
> +		rx_idx = q->rx_seed_index & q->rx_queue_mask;
> +		q->rx_paddress_q[rx_idx] = m_out->buf_iova;
> +		q->rx_seed_index++;
> +		m_out = m_out->next;
> +	} while (m_out);
> +
> +	return 0;
> +}
> +
> +static inline void
> +ark_bb_enqueue_finalize(struct rte_bbdev_queue_data *q_data,
> +			struct ark_bbdev_queue *q,
> +			void **ops,
> +			uint16_t nb_ops, uint16_t nb)
> +{
> +	/* BBDEV global stats */
> +	/* These are not really errors, not sure why bbdev counts these. */
> +	q_data->queue_stats.enqueue_err_count += nb_ops - nb;
> +	q_data->queue_stats.enqueued_count += nb;
> +
> +	/* Notify HW that  */
> +	if (unlikely(nb == 0))
> +		return;
> +
> +	ark_mpu_set_producer(q->tx_mpu, q->tx_prod_index);
> +	ark_mpu_set_producer(q->rx_mpu, q->rx_seed_index);
> +
> +	/* Queue info for dequeue-side processing */
> +	rte_ring_enqueue_burst(q->active_ops,
> +			       (void **)ops, nb, NULL);
> +}
> +
> +static int
> +ark_bb_dequeue_segmented(struct rte_mbuf *mbuf0,
> +			 int32_t *prx_cons_index,
> +			 uint16_t pkt_len
> +			 )
> +{
> +	struct rte_mbuf *mbuf;
> +	uint16_t data_len;
> +	uint16_t remaining;
> +	uint16_t segments = 1;
> +
> +	data_len = RTE_MIN(pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
> +	remaining = pkt_len - data_len;
> +
> +	mbuf = mbuf0;
> +	mbuf0->data_len = data_len;
> +	while (remaining) {
> +		segments += 1;
> +		mbuf = mbuf->next;
> +		if (unlikely(mbuf == 0)) {
> +			ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with
> "
> +				      "at least %d segments for dequeue "
> +				      "of packet length %d",
> +				      segments, pkt_len);
> +			return 1;
> +		}
> +
> +		data_len = RTE_MIN(remaining,
> +				   RTE_MBUF_DEFAULT_DATAROOM);
> +		remaining -= data_len;
> +
> +		mbuf->data_len = data_len;
> +		*prx_cons_index += 1;
> +	}
> +
> +	if (mbuf->next != 0) {
> +		ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with "
> +			      "at exactly %d segments for dequeue "
> +			      "of packet length %d. Found %d "
> +			      "segments",
> +			      segments, pkt_len, mbuf0->nb_segs);
> +		return 1;
> +	}
> +	return 0;
> +}
> +
> +/*
> +****************************************************************
> *******
> +** */
> +/* LDPC Decode ops */
> +static int16_t
> +ark_bb_enqueue_ldpc_dec_one_op(struct ark_bbdev_queue *q,
> +			       struct rte_bbdev_dec_op *this_op) {
> +	struct rte_bbdev_op_ldpc_dec *ldpc_dec_op = &this_op->ldpc_dec;
> +	struct rte_mbuf *m_in = ldpc_dec_op->input.data;
> +	struct rte_mbuf *m_out = ldpc_dec_op->hard_output.data;
> +	uint16_t offset = ldpc_dec_op->input.offset;
> +	uint32_t meta[5] = {0};
> +	uint8_t meta_cnt = 0;
> +
> +	/* User's meta move from bbdev op to Arkville HW */
> +	if (ark_bb_user_enqueue_ldpc_dec(this_op, meta, &meta_cnt)) {
> +		ARK_BBDEV_LOG(ERR, "%s failed", __func__);
> +		return 1;
> +	}
> +
> +	return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,
> meta_cnt);
> +}
> +
> +/* Enqueue LDPC Decode -- burst */
> +static uint16_t
> +ark_bb_enqueue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
> +			    struct rte_bbdev_dec_op **ops, uint16_t nb_ops) {
> +	struct ark_bbdev_queue *q = q_data->queue_private;
> +	unsigned int max_enq;
> +	uint16_t nb;
> +
> +	max_enq = rte_ring_free_count(q->active_ops);
> +	max_enq = RTE_MIN(max_enq, nb_ops);
> +	for (nb = 0; nb < max_enq; nb++) {
> +		if (ark_bb_enqueue_ldpc_dec_one_op(q, ops[nb]))
> +			break;
> +	}
> +
> +	ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
> +	return nb;
> +}
> +
> +/*
> +****************************************************************
> *******
> +** */
> +/* Dequeue LDPC Decode -- burst */
> +static uint16_t
> +ark_bb_dequeue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
> +			    struct rte_bbdev_dec_op **ops, uint16_t nb_ops) {
> +	struct ark_bbdev_queue *q = q_data->queue_private;
> +	struct rte_mbuf *mbuf;
> +	struct rte_bbdev_dec_op *this_op;
> +	struct ark_rx_meta *meta;
> +	uint32_t *usermeta;
> +
> +	uint16_t nb = 0;
> +	int32_t prod_index = q->rx_prod_index;
> +	int32_t cons_index = q->rx_cons_index;
> +
> +	q->tx_free_index = q->tx_cons_index;
> +
> +	while ((prod_index - cons_index) > 0) {
> +		if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
> +			ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
> +				      __func__);
> +			q_data->queue_stats.dequeue_err_count += 1;
> +			break;
> +		}
> +		ops[nb] = this_op;
> +
> +		mbuf = this_op->ldpc_dec.hard_output.data;
> +
> +		/* META DATA embedded in headroom */
> +		meta = RTE_PTR_ADD(mbuf->buf_addr,
> ARK_RX_META_OFFSET);
> +
> +		mbuf->pkt_len = meta->pkt_len;
> +		mbuf->data_len = meta->pkt_len;
> +
> +		if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
> +			if (ark_bb_dequeue_segmented(mbuf, &cons_index,
> +						     meta->pkt_len))
> +				q_data->queue_stats.dequeue_err_count +=
> 1;
> +		} else if (mbuf->next != 0) {
> +			ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
> +				      "at exactly 1 segments for dequeue "
> +				      "of packet length %d. Found %d "
> +				      "segments",
> +				      meta->pkt_len, mbuf->nb_segs);
> +			q_data->queue_stats.dequeue_err_count += 1;
> +		}
> +
> +		usermeta = meta->user_meta;
> +		/* User's meta move from Arkville HW to bbdev OP */
> +		ark_bb_user_dequeue_ldpc_dec(this_op, usermeta);
> +		nb++;
> +		cons_index++;
> +		if (nb >= nb_ops)
> +			break;
> +	}
> +
> +	q->rx_cons_index = cons_index;
> +
> +	/* BBdev stats */
> +	q_data->queue_stats.dequeued_count += nb;
> +
> +	return nb;
> +}
> +
> +/***************************************************************
> *******
> +****/
> +/* Enqueue LDPC Encode */
> +static int16_t
> +ark_bb_enqueue_ldpc_enc_one_op(struct ark_bbdev_queue *q,
> +			       struct rte_bbdev_enc_op *this_op) {
> +	struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &this_op->ldpc_enc;
> +	struct rte_mbuf *m_in = ldpc_enc_op->input.data;
> +	struct rte_mbuf *m_out = ldpc_enc_op->output.data;
> +	uint16_t offset = ldpc_enc_op->input.offset;
> +	uint32_t meta[5] = {0};
> +	uint8_t meta_cnt = 0;
> +
> +	/* User's meta move from bbdev op to Arkville HW */
> +	if (ark_bb_user_enqueue_ldpc_enc(this_op, meta, &meta_cnt)) {
> +		ARK_BBDEV_LOG(ERR, "%s failed", __func__);
> +		return 1;
> +	}
> +
> +	return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,
> meta_cnt);
> +}
> +
> +/* Enqueue LDPC Encode -- burst */
> +static uint16_t
> +ark_bb_enqueue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
> +			    struct rte_bbdev_enc_op **ops, uint16_t nb_ops) {
> +	struct ark_bbdev_queue *q = q_data->queue_private;
> +	unsigned int max_enq;
> +	uint16_t nb;
> +
> +	max_enq = rte_ring_free_count(q->active_ops);
> +	max_enq = RTE_MIN(max_enq, nb_ops);
> +	for (nb = 0; nb < max_enq; nb++) {
> +		if (ark_bb_enqueue_ldpc_enc_one_op(q, ops[nb]))
> +			break;
> +	}
> +
> +	ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
> +	return nb;
> +}
> +
> +/* Dequeue LDPC Encode -- burst */
> +static uint16_t
> +ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
> +			    struct rte_bbdev_enc_op **ops, uint16_t nb_ops) {
> +	struct ark_bbdev_queue *q = q_data->queue_private;
> +	struct rte_mbuf *mbuf;
> +	struct rte_bbdev_enc_op *this_op;
> +	struct ark_rx_meta *meta;
> +	uint32_t *usermeta;
> +
> +	uint16_t nb = 0;
> +	int32_t prod_index = q->rx_prod_index;
> +	int32_t cons_index = q->rx_cons_index;
> +
> +	q->tx_free_index = q->tx_cons_index;
> +
> +	while ((prod_index - cons_index) > 0) {
> +		if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
> +			ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
> +				      __func__);
> +			q_data->queue_stats.dequeue_err_count += 1;
> +			break;
> +		}
> +		ops[nb] = this_op;
> +
> +		mbuf = this_op->ldpc_enc.output.data;
> +
> +		/* META DATA embedded in headroom */
> +		meta = RTE_PTR_ADD(mbuf->buf_addr,
> ARK_RX_META_OFFSET);
> +
> +		mbuf->pkt_len = meta->pkt_len;
> +		mbuf->data_len = meta->pkt_len;
> +		usermeta = meta->user_meta;
> +
> +		if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
> +			if (ark_bb_dequeue_segmented(mbuf, &cons_index,
> +						     meta->pkt_len))
> +				q_data->queue_stats.dequeue_err_count +=
> 1;
> +		} else if (mbuf->next != 0) {
> +			ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
> +				      "at exactly 1 segments for dequeue "
> +				      "of packet length %d. Found %d "
> +				      "segments",
> +				      meta->pkt_len, mbuf->nb_segs);
> +			q_data->queue_stats.dequeue_err_count += 1;
> +		}
> +
> +		/* User's meta move from Arkville HW to bbdev OP */
> +		ark_bb_user_dequeue_ldpc_enc(this_op, usermeta);
> +		nb++;
> +		cons_index++;
> +		if (nb >= nb_ops)
> +			break;
> +	}
> +
> +	q->rx_cons_index = cons_index;
> +
> +	/* BBdev stats */
> +	q_data->queue_stats.dequeued_count += nb;
> +
> +	return nb;
> +}
> +
> +/***************************************************************
> *******
> +****/
> +/*
> + *Initial device hardware configuration when device is opened
> + * setup the DDM, and UDM; called once per PCIE device  */ static int
> +ark_bb_config_device(struct ark_bbdevice *ark_bb) {
> +	uint16_t num_q, i;
> +	struct ark_mpu_t *mpu;
> +
> +	/*
> +	 * Make sure that the packet director, generator and checker are in a
> +	 * known state
> +	 */
> +	ark_bb->start_pg = 0;
> +	ark_bb->pg = ark_pktgen_init(ark_bb->pktgen.v, 0, 1);
> +	if (ark_bb->pg == NULL)
> +		return -1;
> +	ark_pktgen_reset(ark_bb->pg);
> +	ark_bb->pc = ark_pktchkr_init(ark_bb->pktchkr.v, 0, 1);
> +	if (ark_bb->pc == NULL)
> +		return -1;
> +	ark_pktchkr_stop(ark_bb->pc);
> +	ark_bb->pd = ark_pktdir_init(ark_bb->pktdir.v);
> +	if (ark_bb->pd == NULL)
> +		return -1;
> +
> +	/* Verify HW */
> +	if (ark_udm_verify(ark_bb->udm.v))
> +		return -1;
> +	if (ark_ddm_verify(ark_bb->ddm.v))
> +		return -1;
> +
> +	/* UDM */
> +	if (ark_udm_reset(ark_bb->udm.v)) {
> +		ARK_BBDEV_LOG(ERR, "Unable to stop and reset UDM");
> +		return -1;
> +	}
> +	/* Keep in reset until the MPU are cleared */
> +
> +	/* MPU reset */
> +	mpu = ark_bb->mpurx.v;
> +	num_q = ark_api_num_queues(mpu);
> +	ark_bb->max_nb_queues = num_q;
> +
> +	for (i = 0; i < num_q; i++) {
> +		ark_mpu_reset(mpu);
> +		mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> +	}
> +
> +	/* Only 1 queue supported in the udm */
> +	ark_udm_stop(ark_bb->udm.v, 0);
> +	ark_udm_configure(ark_bb->udm.v,
> +			  RTE_PKTMBUF_HEADROOM,
> +			  bbdev->data->queues[q_id]->dataroom,
> +			  ARK_RX_WRITE_TIME_NS);
> +
> +
> +	ark_udm_stats_reset(ark_bb->udm.v);
> +	ark_udm_stop(ark_bb->udm.v, 0);
> +
> +	/* TX -- DDM */
> +	if (ark_ddm_stop(ark_bb->ddm.v, 1))
> +		ARK_BBDEV_LOG(ERR, "Unable to stop DDM");
> +
> +	mpu = ark_bb->mputx.v;
> +	num_q = ark_api_num_queues(mpu);
> +	for (i = 0; i < num_q; i++) {
> +		ark_mpu_reset(mpu);
> +		mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> +	}
> +
> +	ark_ddm_reset(ark_bb->ddm.v);
> +	ark_ddm_stats_reset(ark_bb->ddm.v);
> +
> +	ark_ddm_stop(ark_bb->ddm.v, 0);
> +	if (ark_bb->rqpacing)
> +		ark_rqp_stats_reset(ark_bb->rqpacing);
> +
> +	ARK_BBDEV_LOG(INFO, "packet director set to 0x%x", ark_bb-
> >pkt_dir_v);
> +	ark_pktdir_setup(ark_bb->pd, ark_bb->pkt_dir_v);
> +
> +	if (ark_bb->pkt_gen_args[0]) {
> +		ARK_BBDEV_LOG(INFO, "Setting up the packet generator");
> +		ark_pktgen_parse(ark_bb->pkt_gen_args);
> +		ark_pktgen_reset(ark_bb->pg);
> +		ark_pktgen_setup(ark_bb->pg);
> +		ark_bb->start_pg = 1;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)
> +{
> +	struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> +	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);
> +	bool rqpacing = false;
> +	int p;
> +
> +	RTE_SET_USED(pci_drv);
> +
> +	ark_bb->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
> +	ark_bb->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
> +
> +	ark_bb->sysctrl.v  = (void *)&ark_bb->bar0[ARK_SYSCTRL_BASE];
> +	ark_bb->mpurx.v  = (void *)&ark_bb->bar0[ARK_MPU_RX_BASE];
> +	ark_bb->udm.v  = (void *)&ark_bb->bar0[ARK_UDM_BASE];
> +	ark_bb->mputx.v  = (void *)&ark_bb->bar0[ARK_MPU_TX_BASE];
> +	ark_bb->ddm.v  = (void *)&ark_bb->bar0[ARK_DDM_BASE];
> +	ark_bb->pktdir.v  = (void *)&ark_bb->bar0[ARK_PKTDIR_BASE];
> +	ark_bb->pktgen.v  = (void *)&ark_bb->bar0[ARK_PKTGEN_BASE];
> +	ark_bb->pktchkr.v  = (void *)&ark_bb->bar0[ARK_PKTCHKR_BASE];
> +
> +	p = 0;
> +	while (ark_device_caps[p].device_id != 0) {
> +		if (pci_dev->id.device_id == ark_device_caps[p].device_id) {
> +			rqpacing = ark_device_caps[p].caps.rqpacing;
> +			break;
> +		}
> +		p++;
> +	}
> +
> +	if (rqpacing)
> +		ark_bb->rqpacing =
> +			(struct ark_rqpace_t *)(ark_bb->bar0 +
> ARK_RCPACING_BASE);
> +	else
> +		ark_bb->rqpacing = NULL;
> +
> +	ark_bb->started = 0;
> +
> +	ARK_BBDEV_LOG(INFO, "Sys Ctrl Const = 0x%x  HW Commit_ID:
> %08x",
> +		      ark_bb->sysctrl.t32[4],
> +		      rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
> +	ARK_BBDEV_LOG(INFO, "Arkville HW Commit_ID: %08x",
> +		    rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
> +
> +	/* If HW sanity test fails, return an error */
> +	if (ark_bb->sysctrl.t32[4] != 0xcafef00d) {
> +		ARK_BBDEV_LOG(ERR,
> +			      "HW Sanity test has failed, expected constant"
> +			      " 0x%x, read 0x%x (%s)",
> +			      0xcafef00d,
> +			      ark_bb->sysctrl.t32[4], __func__);
> +		return -1;
> +	}
> +
> +	return ark_bb_config_device(ark_bb);
> +}
> +
> +static int
> +ark_bbdev_uninit(struct rte_bbdev *bbdev) {
> +	struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> +
> +	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> +		return 0;
> +
> +	ark_pktgen_uninit(ark_bb->pg);
> +	ark_pktchkr_uninit(ark_bb->pc);
> +
> +	return 0;
> +}
> +
> +static int
> +ark_bbdev_probe(struct rte_pci_driver *pci_drv,
> +		struct rte_pci_device *pci_dev)
> +{
> +	struct rte_bbdev *bbdev = NULL;
> +	char dev_name[RTE_BBDEV_NAME_MAX_LEN];
> +	struct ark_bbdevice *ark_bb;
> +
> +	if (pci_dev == NULL)
> +		return -EINVAL;
> +
> +	rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
> +
> +	/* Allocate memory to be used privately by drivers */
> +	bbdev = rte_bbdev_allocate(pci_dev->device.name);
> +	if (bbdev == NULL)
> +		return -ENODEV;
> +
> +	/* allocate device private memory */
> +	bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
> +			sizeof(struct ark_bbdevice),
> +			RTE_CACHE_LINE_SIZE,
> +			pci_dev->device.numa_node);
> +
> +	if (bbdev->data->dev_private == NULL) {
> +		ARK_BBDEV_LOG(CRIT,
> +				"Allocate of %zu bytes for device \"%s\"
> failed",
> +				sizeof(struct ark_bbdevice), dev_name);
> +				rte_bbdev_release(bbdev);
> +			return -ENOMEM;
> +	}
> +	ark_bb = bbdev->data->dev_private;
> +	/* Initialize ark_bb */
> +	ark_bb->pkt_dir_v = 0x00110110;
> +
> +	/* Fill HW specific part of device structure */
> +	bbdev->device = &pci_dev->device;
> +	bbdev->intr_handle = NULL;
> +	bbdev->data->socket_id = pci_dev->device.numa_node;
> +	bbdev->dev_ops = &ark_bbdev_pmd_ops;
> +	if (pci_dev->device.devargs)
> +		parse_ark_bbdev_params(pci_dev->device.devargs->args,
> ark_bb);
> +
> +
> +	/* Device specific initialization */
> +	if (ark_bbdev_init(bbdev, pci_drv))
> +		return -EIO;
> +	if (ark_bbdev_start(bbdev))
> +		return -EIO;
> +
> +	/* Core operations LDPC encode amd decode */
> +	bbdev->enqueue_ldpc_enc_ops = ark_bb_enqueue_ldpc_enc_ops;
> +	bbdev->dequeue_ldpc_enc_ops = ark_bb_dequeue_ldpc_enc_ops;
> +	bbdev->enqueue_ldpc_dec_ops = ark_bb_enqueue_ldpc_dec_ops;
> +	bbdev->dequeue_ldpc_dec_ops = ark_bb_dequeue_ldpc_dec_ops;
> +
> +	ARK_BBDEV_LOG(DEBUG, "bbdev id = %u [%s]",
> +		      bbdev->data->dev_id, dev_name);
> +
> +	return 0;
> +}
> +
> +/* Uninitialize device */
> +static int
> +ark_bbdev_remove(struct rte_pci_device *pci_dev) {
> +	struct rte_bbdev *bbdev;
> +	int ret;
> +
> +	if (pci_dev == NULL)
> +		return -EINVAL;
> +
> +	/* Find device */
> +	bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
> +	if (bbdev == NULL) {
> +		ARK_BBDEV_LOG(CRIT,
> +				"Couldn't find HW dev \"%s\" to Uninitialize
> it",
> +				pci_dev->device.name);
> +		return -ENODEV;
> +	}
> +
> +	/* Arkville device close */
> +	ark_bbdev_uninit(bbdev);
> +	rte_free(bbdev->data->dev_private);
> +
> +	/* Close device */
> +	ret = rte_bbdev_close(bbdev->data->dev_id);
> +	if (ret < 0)
> +		ARK_BBDEV_LOG(ERR,
> +				"Device %i failed to close during remove: %i",
> +				bbdev->data->dev_id, ret);
> +
> +	return rte_bbdev_release(bbdev);
> +}
> +
> +/* Operation for the PMD */
> +static const struct rte_bbdev_ops ark_bbdev_pmd_ops = {
> +	.info_get = ark_bbdev_info_get,
> +	.start = ark_bbdev_start,
> +	.stop = ark_bbdev_stop,
> +	.queue_setup = ark_bb_q_setup,
> +	.queue_release = ark_bb_q_release,
> +	.queue_start = ark_bb_q_start,
> +	.queue_stop = ark_bb_q_stop,
> +};
> +
> +
> +
> +static struct rte_pci_driver ark_bbdev_pmd_drv = {
> +	.probe = ark_bbdev_probe,
> +	.remove = ark_bbdev_remove,
> +	.id_table = pci_id_ark,
> +	.drv_flags = RTE_PCI_DRV_NEED_MAPPING
> +};
> +
> +RTE_PMD_REGISTER_PCI(DRIVER_NAME, ark_bbdev_pmd_drv);
> +RTE_PMD_REGISTER_PCI_TABLE(DRIVER_NAME, pci_id_ark);
> +RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
> +			      ARK_BBDEV_PKTGEN_ARG "=<filename> "
> +			      ARK_BBDEV_PKTCHKR_ARG "=<filename> "
> +			      ARK_BBDEV_PKTDIR_ARG "=<bitmap>"
> +			      );
> diff --git a/drivers/baseband/ark/ark_bbdev_common.c
> b/drivers/baseband/ark/ark_bbdev_common.c
> new file mode 100644
> index 0000000000..6ef0f43654
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev_common.c
> @@ -0,0 +1,125 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> +
> +#include <string.h>
> +
> +#include <rte_kvargs.h>
> +#include <rte_log.h>
> +
> +#include "ark_bbdev_common.h"
> +
> +static const char * const ark_bbdev_valid_params[] = {
> +	ARK_BBDEV_PKTDIR_ARG,
> +	ARK_BBDEV_PKTGEN_ARG,
> +	ARK_BBDEV_PKTCHKR_ARG,
> +	NULL
> +};
> +
> +/* Parse 16-bit integer from string argument */ static inline int
> +parse_u16_arg(const char *key, const char *value, void *extra_args) {
> +	uint16_t *u16 = extra_args;
> +	unsigned int long result;
> +
> +	if ((value == NULL) || (extra_args == NULL))
> +		return -EINVAL;
> +	errno = 0;
> +	result = strtoul(value, NULL, 0);
> +	if ((result >= (1 << 16)) || (errno != 0)) {
> +		ARK_BBDEV_LOG(ERR, "Invalid value %" PRIu64 " for %s",
> result, key);
> +		return -ERANGE;
> +	}
> +	*u16 = (uint16_t)result;
> +	return 0;
> +}
> +
> +static inline int
> +process_pktdir_arg(const char *key, const char *value,
> +		   void *extra_args)
> +{
> +	uint32_t *u32 = extra_args;
> +	ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
> +
> +	*u32 = strtol(value, NULL, 0);
> +	ARK_BBDEV_LOG(DEBUG, "pkt_dir_v = 0x%x", *u32);
> +	return 0;
> +}
> +
> +static inline int
> +process_file_args(const char *key, const char *value, void *extra_args)
> +{
> +	char *args = (char *)extra_args;
> +	ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
> +
> +	/* Open the configuration file */
> +	FILE *file = fopen(value, "r");
> +	char line[ARK_MAX_ARG_LEN];
> +	int  size = 0;
> +	int first = 1;
> +
> +	if (file == NULL) {
> +		ARK_BBDEV_LOG(ERR, "Unable to open config file %s",
> +			      value);
> +		return -1;
> +	}
> +
> +	while (fgets(line, sizeof(line), file)) {
> +		size += strlen(line);
> +		if (size >= ARK_MAX_ARG_LEN) {
> +			ARK_BBDEV_LOG(ERR, "Unable to parse file %s args,
> "
> +				      "parameter list is too long", value);
> +			fclose(file);
> +			return -1;
> +		}
> +		if (first) {
> +			strncpy(args, line, ARK_MAX_ARG_LEN);
> +			first = 0;
> +		} else {
> +			strncat(args, line, ARK_MAX_ARG_LEN);
> +		}
> +	}
> +	ARK_BBDEV_LOG(DEBUG, "file = %s", args);
> +	fclose(file);
> +	return 0;
> +}
> +
> +
> +/* Parse parameters used to create device */ int
> +parse_ark_bbdev_params(const char *input_args,
> +		       struct ark_bbdevice *ark_bb)
> +{
> +	struct rte_kvargs *kvlist = NULL;
> +	int ret = 0;
> +
> +	if (ark_bb == NULL)
> +		return -EINVAL;
> +	if (input_args == NULL)
> +		return ret;
> +
> +	kvlist = rte_kvargs_parse(input_args, ark_bbdev_valid_params);
> +	if (kvlist == NULL)
> +		return -EFAULT;
> +
> +	ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTDIR_ARG,
> +				  &process_pktdir_arg, &ark_bb->pkt_dir_v);
> +	if (ret < 0)
> +		goto exit;
> +
> +	ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTGEN_ARG,
> +				 &process_file_args, &ark_bb-
> >pkt_gen_args);
> +	if (ret < 0)
> +		goto exit;
> +
> +	ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTCHKR_ARG,
> +				 &process_file_args, &ark_bb-
> >pkt_chkr_args);
> +	if (ret < 0)
> +		goto exit;
> +
> + exit:
> +	if (kvlist)
> +		rte_kvargs_free(kvlist);
> +	return ret;
> +}
> diff --git a/drivers/baseband/ark/ark_bbdev_common.h
> b/drivers/baseband/ark/ark_bbdev_common.h
> new file mode 100644
> index 0000000000..670e7e86d6
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev_common.h
> @@ -0,0 +1,92 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> +
> +#ifndef _ARK_BBDEV_COMMON_H_
> +#define _ARK_BBDEV_COMMON_H_
> +
> +#include "ark_pktchkr.h"
> +#include "ark_pktdir.h"
> +#include "ark_pktgen.h"
> +
> +#define ARK_MAX_ARG_LEN 256
> +
> +/* Acceptable params for ark BBDEV devices */
> +/*
> + * The packet generator is a functional block used to generate packet
> + * patterns for testing.  It is not intended for nominal use.
> + */
> +#define ARK_BBDEV_PKTGEN_ARG "Pkt_gen"
> +
> +/*
> + * The packet checker is a functional block used to verify packet
> + * patterns for testing.  It is not intended for nominal use.
> + */
> +#define ARK_BBDEV_PKTCHKR_ARG "Pkt_chkr"
> +
> +/*
> + * The packet director is used to select the internal ingress and
> + * egress packets paths during testing.  It is not intended for
> + * nominal use.
> + */
> +#define ARK_BBDEV_PKTDIR_ARG "Pkt_dir"
> +
> +
> +#define def_ptr(type, name) \
> +	union type {		   \
> +		uint64_t *t64;	   \
> +		uint32_t *t32;	   \
> +		uint16_t *t16;	   \
> +		uint8_t  *t8;	   \
> +		void     *v;	   \
> +	} name
> +
> +/*
> + * Structure to store private data for each PF/VF instance.
> + */
> +struct ark_bbdevice {
> +	/* Our Bar 0 */
> +	uint8_t *bar0;
> +
> +	/* Application Bar needed for extensions */
> +	uint8_t *a_bar;
> +
> +	/* Arkville hardware block offsets */
> +	def_ptr(sys_ctrl, sysctrl);
> +	def_ptr(pkt_gen, pktgen);
> +	def_ptr(mpu_rx, mpurx);
> +	def_ptr(UDM, udm);
> +	def_ptr(mpu_tx, mputx);
> +	def_ptr(DDM, ddm);
> +	def_ptr(pkt_dir, pktdir);
> +	def_ptr(pkt_chkr, pktchkr);
> +	struct ark_rqpace_t *rqpacing;
> +
> +	/* Pointers to packet generator and checker */
> +	int start_pg;
> +	ark_pkt_gen_t pg;
> +	ark_pkt_chkr_t pc;
> +	ark_pkt_dir_t pd;
> +
> +	/* Packet generator/checker args */
> +	char pkt_gen_args[ARK_MAX_ARG_LEN];
> +	char pkt_chkr_args[ARK_MAX_ARG_LEN];
> +	uint32_t pkt_dir_v;
> +
> +	int started;
> +	unsigned int max_nb_queues;  /**< Max number of queues */
> +
> +};
> +
> +
> +/* Log message for PMD */
> +extern int ark_bbdev_logtype;
> +
> +/* Helper macro for logging */
> +#define ARK_BBDEV_LOG(level, fmt, ...) \
> +	rte_log(RTE_LOG_ ## level, ark_bbdev_logtype, \
> +		"ARK_BBD: " fmt "\n", ##__VA_ARGS__)
> +
> +int parse_ark_bbdev_params(const char *argv, struct ark_bbdevice *dev);
> +
> +#endif
> diff --git a/drivers/baseband/ark/ark_bbdev_custom.c
> b/drivers/baseband/ark/ark_bbdev_custom.c
> new file mode 100644
> index 0000000000..6b1553abe1
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev_custom.c
> @@ -0,0 +1,201 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> +
> +#include <rte_bbdev.h>
> +#include <rte_bbdev_pmd.h>
> +
> +#include <rte_mbuf.h>
> +#include <rte_hexdump.h>	/* For debug */
> +
> +
> +#include "ark_bbdev_common.h"
> +#include "ark_bbdev_custom.h"
> +
> +/* It is expected that functions in this file will be modified based on
> + * specifics of the FPGA hardware beyond the core Arkville
> + * components.
> + */
> +
> +/* bytyes must be range of 0 to 20 */
> +static inline
> +uint8_t ark_bb_cvt_bytes_meta_cnt(size_t bytes) {
> +	return (bytes + 3) / 8;
> +}
> +
> +void
> +ark_bbdev_info_get(struct rte_bbdev *dev,
> +		   struct rte_bbdev_driver_info *dev_info) {
> +	struct ark_bbdevice *ark_bb =  dev->data->dev_private;
> +
> +	static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
> +		{
> +			.type = RTE_BBDEV_OP_LDPC_DEC,
> +			.cap.ldpc_dec = {
> +				.capability_flags =
> +					RTE_BBDEV_LDPC_CRC_24B_ATTACH
> |
> +					RTE_BBDEV_LDPC_RATE_MATCH,
> +				.num_buffers_src =
> +
> 	RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
> +				.num_buffers_hard_out =
> +
> 	RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
> +			}
> +		},
> +		{
> +			.type = RTE_BBDEV_OP_LDPC_ENC,
> +			.cap.ldpc_enc = {
> +				.capability_flags =
> +					RTE_BBDEV_LDPC_CRC_24B_ATTACH
> |
> +					RTE_BBDEV_LDPC_RATE_MATCH,
> +				.num_buffers_src =
> +
> 	RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
> +				.num_buffers_dst =
> +
> 	RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
> +			}
> +		},
> +		RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
> +	};
> +
> +	static struct rte_bbdev_queue_conf default_queue_conf = {
> +		.queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
> +	};
> +
> +	default_queue_conf.socket = dev->data->socket_id;
> +
> +	dev_info->driver_name = RTE_STR(DRIVER_NAME);
> +	dev_info->max_num_queues = ark_bb->max_nb_queues;
> +	dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
> +	dev_info->hardware_accelerated = true;
> +	dev_info->max_dl_queue_priority = 0;
> +	dev_info->max_ul_queue_priority = 0;
> +	dev_info->default_queue_conf = default_queue_conf;
> +	dev_info->capabilities = bbdev_capabilities;
> +	dev_info->cpu_flag_reqs = NULL;
> +	dev_info->min_alignment = 4;
> +
> +}
> +
> +/* Structure defining layout of the ldpc command struct */ struct
> +ark_bb_ldpc_enc_meta {
> +	uint16_t header;
> +	uint8_t rv_index:2,
> +		basegraph:1,
> +		code_block_mode:1,
> +		rfu_71_68:4;
> +
> +	uint8_t q_m;
> +	uint32_t e_ea;
> +	uint32_t eb;
> +	uint8_t c;
> +	uint8_t cab;
> +	uint16_t n_cb;
> +	uint16_t pad;
> +	uint16_t trailer;
> +} __rte_packed;
> +
> +/* The size must be less then 20 Bytes */ static_assert(sizeof(struct
> +ark_bb_ldpc_enc_meta) <= 20, "struct size");
> +
> +/* Custom operation on equeue ldpc operation  */
> +/* Do these function need queue number? */
> +/* Maximum of 20 bytes */
> +int
> +ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> +			  uint32_t *meta, uint8_t *meta_cnt) {
> +	struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &enc_op->ldpc_enc;
> +	struct ark_bb_ldpc_enc_meta *src = (struct ark_bb_ldpc_enc_meta
> +*)meta;
> +
> +	src->header = 0x4321;	/* For testings */
> +	src->trailer = 0xFEDC;
> +
> +	src->rv_index = ldpc_enc_op->rv_index;
> +	src->basegraph = ldpc_enc_op->basegraph;
> +	src->code_block_mode = ldpc_enc_op->code_block_mode;
> +
> +	src->q_m = ldpc_enc_op->q_m;
> +	src->e_ea = 0xABCD;
> +	src->eb = ldpc_enc_op->tb_params.eb;
> +	src->c = ldpc_enc_op->tb_params.c;
> +	src->cab = ldpc_enc_op->tb_params.cab;
> +
> +	src->n_cb = 0;
> +
> +	meta[0] = 0x11111110;
> +	meta[1] = 0x22222220;
> +	meta[2] = 0x33333330;
> +	meta[3] = 0x44444440;
> +	meta[4] = 0x55555550;
> +
> +	*meta_cnt = ark_bb_cvt_bytes_meta_cnt(
> +			sizeof(struct ark_bb_ldpc_enc_meta));
> +	return 0;
> +}
> +
> +/* Custom operation on dequeue ldpc operation  */ int
> +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> +			     const uint32_t *usermeta)
> +{
> +	static int dump;	/* = 0 */
> +	/* Just compare with what was sent? */
> +	uint32_t meta_in[5] = {0};
> +	uint8_t  meta_cnt;
> +
> +	ark_bb_user_enqueue_ldpc_enc(enc_op, meta_in, &meta_cnt);
> +	if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
> +		fprintf(stderr,
> +			"------------------------------------------\n");
> +		rte_hexdump(stdout, "meta difference for lpdc_enc IN",
> +			    meta_in, 20);
> +		rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
> +			    usermeta, 20);
> +	} else if (dump) {
> +		rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
> +		dump--;
> +	}
> +
> +	return 0;
> +}
> +
> +
> +/* Turbo op call backs for user meta data */ int
> +ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> +				 uint32_t *meta, uint8_t *meta_cnt) {
> +	RTE_SET_USED(enc_op);
> +	meta[0] = 0xF1111110;
> +	meta[1] = 0xF2222220;
> +	meta[2] = 0xF3333330;
> +	meta[3] = 0xF4444440;
> +	meta[4] = 0xF5555550;
> +
> +	*meta_cnt = ark_bb_cvt_bytes_meta_cnt(20);
> +	return 0;
> +}
> +
> +int ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> +				 const uint32_t *usermeta)
> +{
> +	RTE_SET_USED(enc_op);
> +	static int dump;	/* = 0 */
> +	/* Just compare with what was sent? */
> +	uint32_t meta_in[5] = {0};
> +	uint8_t  meta_cnt;
> +
> +	ark_bb_user_enqueue_ldpc_dec(enc_op, meta_in, &meta_cnt);
> +	if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
> +		fprintf(stderr,
> +			"------------------------------------------\n");
> +		rte_hexdump(stdout, "meta difference for lpdc_enc IN",
> +			    meta_in, 20);
> +		rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
> +			    usermeta, 20);
> +	} else if (dump) {
> +		rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
> +		dump--;
> +	}
> +	return 0;
> +}
> diff --git a/drivers/baseband/ark/ark_bbdev_custom.h
> b/drivers/baseband/ark/ark_bbdev_custom.h
> new file mode 100644
> index 0000000000..32a2ef6bb6
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev_custom.h
> @@ -0,0 +1,30 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> +
> +#ifndef _ARK_BBDEV_CUSTOM_H_
> +#define _ARK_BBDEV_CUSTOM_H_
> +
> +#include <stdint.h>
> +
> +/* Forward declarations */
> +struct rte_bbdev;
> +struct rte_bbdev_driver_info;
> +struct rte_bbdev_enc_op;
> +struct rte_bbdev_dec_op;
> +struct rte_mbuf;
> +
> +void ark_bbdev_info_get(struct rte_bbdev *dev,
> +			struct rte_bbdev_driver_info *dev_info);
> +
> +int ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> +				 uint32_t *meta, uint8_t *meta_cnt); int
> +ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> +				 const uint32_t *usermeta);
> +
> +int ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> +				 uint32_t *meta, uint8_t *meta_cnt); int
> +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> +				 const uint32_t *usermeta);
> +
> +#endif
> diff --git a/drivers/baseband/ark/meson.build
> b/drivers/baseband/ark/meson.build
> new file mode 100644
> index 0000000000..b876f05c6e
> --- /dev/null
> +++ b/drivers/baseband/ark/meson.build
> @@ -0,0 +1,11 @@
> +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2018 Luca
> +Boccassi <bluca@debian.org>
> +
> +deps += ['common_ark', 'bbdev', 'bus_pci', 'pci', 'ring'] sources =
> +files(
> +	'ark_bbdev.c',
> +	'ark_bbdev_common.c',
> +	'ark_bbdev_custom.c'
> +	)
> +
> +includes += include_directories('../../common/ark')
> diff --git a/drivers/baseband/ark/version.map
> b/drivers/baseband/ark/version.map
> new file mode 100644
> index 0000000000..4a76d1d52d
> --- /dev/null
> +++ b/drivers/baseband/ark/version.map
> @@ -0,0 +1,3 @@
> +DPDK_21 {
> +	local: *;
> +};
> --
> 2.25.1
John Miller April 28, 2022, 10:01 a.m. UTC | #2
Hi Nic,

Yes, i will split this into smaller patches and take care of the TODO's.  I
will have a v1 patch set ready with these changes within a couple of days.
Given the 4/10 deadline for 22.07 it would seem that 22.11 is our target.

Thank you,
-John


On Wed, Apr 27, 2022 at 2:38 PM Chautru, Nicolas <nicolas.chautru@intel.com>
wrote:

> Hi John,
>
> Do you think this one can be split into a few incremental commits?
>
> There are a few TODOs, is that v1 ready for review? Also you are targeting
> 22.11 right?
>
> Thanks
> Nic
>
> > -----Original Message-----
> > From: John Miller <john.miller@atomicrules.com>
> > Sent: Thursday, April 21, 2022 8:19 AM
> > To: dev@dpdk.org
> > Cc: ferruh.yigit@xilinx.com; ed.czeck@atomicrules.com; John Miller
> > <john.miller@atomicrules.com>
> > Subject: [PATCH 05/10] baseband/ark: add ark baseband device
> >
> > Add new ark baseband device.
> >
> > Signed-off-by: John Miller <john.miller@atomicrules.com>
> > ---
> >  drivers/baseband/ark/ark_bbdev.c        | 1064 +++++++++++++++++++++++
> >  drivers/baseband/ark/ark_bbdev_common.c |  125 +++
> >  drivers/baseband/ark/ark_bbdev_common.h |   92 ++
> >  drivers/baseband/ark/ark_bbdev_custom.c |  201 +++++
> >  drivers/baseband/ark/ark_bbdev_custom.h |   30 +
> >  drivers/baseband/ark/meson.build        |   11 +
> >  drivers/baseband/ark/version.map        |    3 +
> >  7 files changed, 1526 insertions(+)
> >  create mode 100644 drivers/baseband/ark/ark_bbdev.c  create mode
> > 100644 drivers/baseband/ark/ark_bbdev_common.c
> >  create mode 100644 drivers/baseband/ark/ark_bbdev_common.h
> >  create mode 100644 drivers/baseband/ark/ark_bbdev_custom.c
> >  create mode 100644 drivers/baseband/ark/ark_bbdev_custom.h
> >  create mode 100644 drivers/baseband/ark/meson.build  create mode
> > 100644 drivers/baseband/ark/version.map
> >
> > diff --git a/drivers/baseband/ark/ark_bbdev.c
> > b/drivers/baseband/ark/ark_bbdev.c
> > new file mode 100644
> > index 0000000000..b23bbd44d1
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev.c
> > @@ -0,0 +1,1064 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> > +
> > +#include "ark_common.h"
> > +#include "ark_bbdev_common.h"
> > +#include "ark_bbdev_custom.h"
> > +#include "ark_ddm.h"
> > +#include "ark_mpu.h"
> > +#include "ark_rqp.h"
> > +#include "ark_udm.h"
> > +
> > +#include <rte_bbdev.h>
> > +#include <rte_bbdev_pmd.h>
> > +#include <rte_bus_pci.h>
> > +#include <rte_common.h>
> > +#include <rte_devargs.h>
> > +#include <rte_malloc.h>
> > +#include <rte_ring.h>
> > +
> > +#include <unistd.h>
> > +
> > +#define DRIVER_NAME baseband_ark
> > +
> > +RTE_LOG_REGISTER_DEFAULT(ark_bbdev_logtype, DEBUG);
> > +
> > +#define ARK_SYSCTRL_BASE  0x0
> > +#define ARK_PKTGEN_BASE   0x10000
> > +#define ARK_MPU_RX_BASE   0x20000
> > +#define ARK_UDM_BASE      0x30000
> > +#define ARK_MPU_TX_BASE   0x40000
> > +#define ARK_DDM_BASE      0x60000
> > +#define ARK_PKTDIR_BASE   0xa0000
> > +#define ARK_PKTCHKR_BASE  0x90000
> > +#define ARK_RCPACING_BASE 0xb0000
> > +#define ARK_MPU_QOFFSET   0x00100
> > +
> > +#define BB_ARK_TX_Q_FACTOR 4
> > +
> > +/* TODO move to UDM, verify configuration */ #define ARK_RX_META_SIZE
> > +32 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM -
> > ARK_RX_META_SIZE)
> > +#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
> > +
> > +static_assert(sizeof(struct ark_rx_meta) == ARK_RX_META_SIZE,
> > +"Unexpected struct size ark_rx_meta"); static_assert(sizeof(union
> > +ark_tx_meta) == 8, "Unexpected struct size ark_tx_meta");
> > +
> > +static struct rte_pci_id pci_id_ark[] = {
> > +     {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1015)},
> > +     {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1016)},
> > +     {.device_id = 0},
> > +};
> > +
> > +static const struct ark_dev_caps
> > +ark_device_caps[] = {
> > +                  SET_DEV_CAPS(0x1015, true),
> > +                  SET_DEV_CAPS(0x1016, true),
> > +                  {.device_id = 0,}
> > +};
> > +
> > +
> > +/* Forward declarations */
> > +static const struct rte_bbdev_ops ark_bbdev_pmd_ops;
> > +
> > +
> > +/* queue */
> > +struct ark_bbdev_queue {
> > +     struct rte_ring *active_ops;  /* Ring for processed packets */
> > +
> > +     /* RX components */
> > +     /* array of physical addresses of the mbuf data pointer */
> > +     rte_iova_t *rx_paddress_q;
> > +     struct ark_udm_t *udm;
> > +     struct ark_mpu_t *rx_mpu;
> > +
> > +     /* TX components */
> > +     union ark_tx_meta *tx_meta_q;
> > +     struct ark_mpu_t *tx_mpu;
> > +     struct ark_ddm_t *ddm;
> > +
> > +     /*  */
> > +     uint32_t tx_queue_mask;
> > +     uint32_t rx_queue_mask;
> > +
> > +     int32_t rx_seed_index;          /* step 1 set with empty mbuf */
> > +     int32_t rx_cons_index;          /* step 3 consumed by driver */
> > +
> > +     /* 3 indexes to the paired data rings. */
> > +     int32_t tx_prod_index;          /* where to put the next one */
> > +     int32_t tx_free_index;          /* local copy of tx_cons_index */
> > +
> > +     /* separate cache line -- written by FPGA -- RX announce */
> > +     RTE_MARKER cacheline1 __rte_cache_min_aligned;
> > +     volatile int32_t rx_prod_index; /* step 2 filled by FPGA */
> > +
> > +     /* Separate cache line -- written by FPGA -- RX completion */
> > +     RTE_MARKER cacheline2 __rte_cache_min_aligned;
> > +     volatile int32_t tx_cons_index; /* hw is done, can be freed */ }
> > +__rte_cache_aligned;
> > +
> > +static int
> > +ark_bb_hw_q_setup(struct rte_bbdev *bbdev, uint16_t q_id, uint16_t
> > +queue_size) {
> > +     struct ark_bbdev_queue *q = bbdev->data-
> > >queues[q_id].queue_private;
> > +
> > +     rte_iova_t queue_base;
> > +     rte_iova_t phys_addr_q_base;
> > +     rte_iova_t phys_addr_prod_index;
> > +     rte_iova_t phys_addr_cons_index;
> > +
> > +     uint32_t write_interval_ns = 500; /* TODO this seems big */
> > +
> > +     if (ark_mpu_verify(q->rx_mpu, sizeof(rte_iova_t))) {
> > +             ARK_BBDEV_LOG(ERR, "Illegal hw/sw configuration RX
> > queue");
> > +             return -1;
> > +     }
> > +     ARK_BBDEV_LOG(DEBUG, "ark_bb_q setup %u:%u",
> > +                   bbdev->data->dev_id, q_id);
> > +
> > +     /* RX MPU */
> > +     phys_addr_q_base = rte_malloc_virt2iova(q->rx_paddress_q);
> > +     /* Force TX mode on MPU to match bbdev behavior */
> > +     ark_mpu_configure(q->rx_mpu, phys_addr_q_base, queue_size, 1);
> > +     ark_mpu_reset_stats(q->rx_mpu);
> > +     ark_mpu_start(q->rx_mpu);
> > +
> > +     /* UDM */
> > +     queue_base = rte_malloc_virt2iova(q);
> > +     phys_addr_prod_index = queue_base +
> > +             offsetof(struct ark_bbdev_queue, rx_prod_index);
> > +     ark_udm_write_addr(q->udm, phys_addr_prod_index);
> > +     ark_udm_queue_enable(q->udm, 1);
> > +
> > +     /* TX MPU */
> > +     phys_addr_q_base = rte_malloc_virt2iova(q->tx_meta_q);
> > +     ark_mpu_configure(q->tx_mpu, phys_addr_q_base,
> > +                       BB_ARK_TX_Q_FACTOR * queue_size, 1);
> > +     ark_mpu_start(q->tx_mpu);
> > +
> > +     /* DDM */
> > +     phys_addr_cons_index = queue_base +
> > +             offsetof(struct ark_bbdev_queue, tx_cons_index);
> > +     ark_ddm_setup(q->ddm, phys_addr_cons_index, write_interval_ns);
> > +
> > +     return 0;
> > +}
> > +
> > +/* Setup a queue */
> > +static int
> > +ark_bb_q_setup(struct rte_bbdev *bbdev, uint16_t q_id,
> > +            const struct rte_bbdev_queue_conf *queue_conf) {
> > +     struct ark_bbdev_queue *q;
> > +     struct ark_bbdevice *ark_bb =  bbdev->data->dev_private;
> > +
> > +     const uint32_t queue_size = queue_conf->queue_size;
> > +     const int socket_id = queue_conf->socket;
> > +     const uint64_t pg_sz = sysconf(_SC_PAGESIZE);
> > +     char ring_name[RTE_RING_NAMESIZE];
> > +
> > +     /* Configuration checks */
> > +     if (!rte_is_power_of_2(queue_size)) {
> > +             ARK_BBDEV_LOG(ERR,
> > +                           "Configuration queue size"
> > +                           " must be power of two %u",
> > +                           queue_size);
> > +             return -EINVAL;
> > +     }
> > +
> > +     if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
> > +             ARK_BBDEV_LOG(ERR,
> > +                           "Error: Ark bbdev requires head room > %d
> bytes
> > (%s)",
> > +                           ARK_RX_META_SIZE, __func__);
> > +             return -EINVAL;
> > +     }
> > +
> > +     /* Allocate the queue data structure. */
> > +     q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
> > +                     RTE_CACHE_LINE_SIZE, queue_conf->socket);
> > +     if (q == NULL) {
> > +             ARK_BBDEV_LOG(ERR, "Failed to allocate queue memory");
> > +             return -ENOMEM;
> > +     }
> > +     bbdev->data->queues[q_id].queue_private = q;
> > +
> > +     /* RING */
> > +     snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)
> > "%u:%u",
> > +              bbdev->data->dev_id, q_id);
> > +     q->active_ops = rte_ring_create(ring_name,
> > +                                     queue_size,
> > +                                     queue_conf->socket,
> > +                                     RING_F_SP_ENQ | RING_F_SC_DEQ);
> > +     if (q->active_ops == NULL) {
> > +             ARK_BBDEV_LOG(ERR, "Failed to create ring");
> > +             goto free_all;
> > +     }
> > +
> > +     q->rx_queue_mask = queue_size - 1;
> > +     q->tx_queue_mask = (BB_ARK_TX_Q_FACTOR * queue_size) - 1;
> > +
> > +     /* Each mbuf requires 2 to 4 objects, factor by
> > BB_ARK_TX_Q_FACTOR */
> > +     q->tx_meta_q =
> > +             rte_zmalloc_socket("Ark_bb_txqueue meta",
> > +                                queue_size * BB_ARK_TX_Q_FACTOR *
> > +                                sizeof(union ark_tx_meta),
> > +                                pg_sz,
> > +                                socket_id);
> > +
> > +     if (q->tx_meta_q == 0) {
> > +             ARK_BBDEV_LOG(ERR, "Failed to allocate "
> > +                           "queue memory in %s", __func__);
> > +             goto free_all;
> > +     }
> > +
> > +     q->ddm = RTE_PTR_ADD(ark_bb->ddm.v, q_id *
> > ARK_DDM_QOFFSET);
> > +     q->tx_mpu = RTE_PTR_ADD(ark_bb->mputx.v, q_id *
> > ARK_MPU_QOFFSET);
> > +
> > +     q->rx_paddress_q =
> > +             rte_zmalloc_socket("ark_bb_rx_paddress_q",
> > +                                queue_size * sizeof(rte_iova_t),
> > +                                pg_sz,
> > +                                socket_id);
> > +
> > +     if (q->rx_paddress_q == 0) {
> > +             ARK_BBDEV_LOG(ERR,
> > +                           "Failed to allocate queue memory in %s",
> > +                           __func__);
> > +             goto free_all;
> > +     }
> > +     q->udm = RTE_PTR_ADD(ark_bb->udm.v, q_id *
> > ARK_UDM_QOFFSET);
> > +     q->rx_mpu = RTE_PTR_ADD(ark_bb->mpurx.v, q_id *
> > ARK_MPU_QOFFSET);
> > +
> > +     /* Structure have been configured, set the hardware */
> > +     return ark_bb_hw_q_setup(bbdev, q_id, queue_size);
> > +
> > +free_all:
> > +     rte_free(q->tx_meta_q);
> > +     rte_free(q->rx_paddress_q);
> > +     rte_free(q);
> > +     return -EFAULT;
> > +}
> > +
> > +/* Release queue */
> > +static int
> > +ark_bb_q_release(struct rte_bbdev *bbdev, uint16_t q_id) {
> > +     struct ark_bbdev_queue *q = bbdev->data-
> > >queues[q_id].queue_private;
> > +
> > +     /* TODO Wait for ddm to send out all packets in flight,
> > +      * Is this only called after q stop?
> > +      */
> > +
> > +     ark_mpu_dump(q->rx_mpu, "rx_MPU release", q_id);
> > +     ark_mpu_dump(q->tx_mpu, "tx_MPU release", q_id);
> > +
> > +     rte_ring_free(q->active_ops);
> > +     rte_free(q->tx_meta_q);
> > +     rte_free(q->rx_paddress_q);
> > +     rte_free(q);
> > +     bbdev->data->queues[q_id].queue_private = NULL;
> > +
> > +     ARK_BBDEV_LOG(DEBUG, "released device queue %u:%u",
> > +                   bbdev->data->dev_id, q_id);
> > +     return 0;
> > +}
> > +
> > +static int
> > +ark_bbdev_start(struct rte_bbdev *bbdev) {
> > +     struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> > +
> > +     ARK_BBDEV_LOG(DEBUG, "Starting device %u", bbdev->data-
> > >dev_id);
> > +     if (ark_bb->started)
> > +             return 0;
> > +
> > +     /* start UDM */
> > +     ark_udm_start(ark_bb->udm.v);
> > +
> > +     /* start DDM */
> > +     ark_ddm_start(ark_bb->ddm.v);
> > +
> > +     ark_bb->started = 1;
> > +
> > +     if (ark_bb->start_pg)
> > +             ark_pktchkr_run(ark_bb->pc);
> > +
> > +     if (ark_bb->start_pg) {
> > +             pthread_t thread;
> > +
> > +             /* Delay packet generator start allow the hardware to be
> > ready
> > +              * This is only used for sanity checking with internal
> generator
> > +              */
> > +             if (pthread_create(&thread, NULL,
> > +                                ark_pktgen_delay_start, ark_bb->pg)) {
> > +                     ARK_BBDEV_LOG(ERR, "Could not create pktgen "
> > +                                 "starter thread");
> > +                     return -1;
> > +             }
> > +     }
> > +
> > +     return 0;
> > +}
> > +
> > +static void
> > +ark_bbdev_stop(struct rte_bbdev *bbdev) {
> > +     struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> > +     struct ark_mpu_t *mpu;
> > +     unsigned int i;
> > +     int status;
> > +
> > +     ARK_BBDEV_LOG(DEBUG, "Stopping device %u", bbdev->data-
> > >dev_id);
> > +
> > +     if (!ark_bb->started)
> > +             return;
> > +
> > +     /* Stop the packet generator */
> > +     if (ark_bb->start_pg)
> > +             ark_pktgen_pause(ark_bb->pg);
> > +
> > +     /* Stop DDM */
> > +     /* Wait up to 0.1 second.  each stop is up to 1000 * 10 useconds */
> > +     for (i = 0; i < 10; i++) {
> > +             status = ark_ddm_stop(ark_bb->ddm.v, 1);
> > +             if (status == 0)
> > +                     break;
> > +     }
> > +     if (status || i != 0) {
> > +             ARK_BBDEV_LOG(ERR, "DDM stop anomaly. status:"
> > +                           " %d iter: %u. (%s)",
> > +                           status,
> > +                           i,
> > +                           __func__);
> > +             ark_ddm_dump(ark_bb->ddm.v, "Stop anomaly");
> > +
> > +             mpu = ark_bb->mputx.v;
> > +             for (i = 0; i < ark_bb->max_nb_queues; i++) {
> > +                     ark_mpu_dump(mpu, "DDM failure dump", i);
> > +                     mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> > +             }
> > +     }
> > +     ark_ddm_dump_stats(ark_bb->ddm.v, "bbdev stop");
> > +
> > +     /* STOP RX Side */
> > +     /* Stop UDM  multiple tries attempted */
> > +     for (i = 0; i < 10; i++) {
> > +             status = ark_udm_stop(ark_bb->udm.v, 1);
> > +             if (status == 0)
> > +                     break;
> > +     }
> > +     if (status || i != 0) {
> > +             ARK_BBDEV_LOG(WARNING, "UDM stop anomaly. status %d
> > iter: %u. (%s)",
> > +                           status, i, __func__);
> > +             ark_udm_dump(ark_bb->udm.v, "Stop anomaly");
> > +
> > +             mpu = ark_bb->mpurx.v;
> > +             for (i = 0; i < ark_bb->max_nb_queues; i++) {
> > +                     ark_mpu_dump(mpu, "UDM Stop anomaly", i);
> > +                     mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> > +             }
> > +     }
> > +
> > +     ark_udm_dump_stats(ark_bb->udm.v, "Post stop");
> > +     ark_udm_dump_perf(ark_bb->udm.v, "Post stop");
> > +
> > +     /* Stop the packet checker if it is running */
> > +     if (ark_bb->start_pg) {
> > +             ark_pktchkr_dump_stats(ark_bb->pc);
> > +             ark_pktchkr_stop(ark_bb->pc);
> > +     }
> > +}
> > +
> > +static int
> > +ark_bb_q_start(struct rte_bbdev *bbdev, uint16_t q_id) {
> > +     struct ark_bbdev_queue *q = bbdev->data-
> > >queues[q_id].queue_private;
> > +     ARK_BBDEV_LOG(DEBUG, "ark_bb_q start %u:%u", bbdev->data-
> > >dev_id, q_id);
> > +     ark_mpu_start(q->tx_mpu);
> > +     ark_mpu_start(q->rx_mpu);
> > +     return 0;
> > +}
> > +static int
> > +ark_bb_q_stop(struct rte_bbdev *bbdev, uint16_t q_id) {
> > +     struct ark_bbdev_queue *q = bbdev->data-
> > >queues[q_id].queue_private;
> > +     ARK_BBDEV_LOG(DEBUG, "ark_bb_q stop %u:%u", bbdev->data-
> > >dev_id, q_id);
> > +     ark_mpu_stop(q->tx_mpu);
> > +     ark_mpu_stop(q->rx_mpu);
> > +     return 0;
> > +}
> > +
> > +/*
> > +****************************************************************
> > *******
> > +** */
> > +/* Common function for all enqueue and dequeue ops */ static inline
> > +void ark_bb_enqueue_desc_fill(struct ark_bbdev_queue *q,
> > +                      struct rte_mbuf *mbuf,
> > +                      uint16_t offset, /* Extra offset */
> > +                      uint8_t  flags,
> > +                      uint32_t *meta,
> > +                      uint8_t  meta_cnt /* 0, 1 or 2 */
> > +                      )
> > +{
> > +     union ark_tx_meta *tx_meta;
> > +     int32_t tx_idx;
> > +     uint8_t m;
> > +
> > +     /* Header */
> > +     tx_idx = q->tx_prod_index & q->tx_queue_mask;
> > +     tx_meta = &q->tx_meta_q[tx_idx];
> > +     tx_meta->data_len = rte_pktmbuf_data_len(mbuf) - offset;
> > +     tx_meta->flags = flags;
> > +     tx_meta->meta_cnt = meta_cnt;
> > +     tx_meta->user1 = *meta++;
> > +     q->tx_prod_index++;
> > +
> > +     for (m = 0; m < meta_cnt; m++) {
> > +             tx_idx = q->tx_prod_index & q->tx_queue_mask;
> > +             tx_meta = &q->tx_meta_q[tx_idx];
> > +             tx_meta->usermeta0 = *meta++;
> > +             tx_meta->usermeta1 = *meta++;
> > +             q->tx_prod_index++;
> > +     }
> > +
> > +     tx_idx = q->tx_prod_index & q->tx_queue_mask;
> > +     tx_meta = &q->tx_meta_q[tx_idx];
> > +     tx_meta->physaddr = rte_mbuf_data_iova(mbuf) + offset;
> > +     q->tx_prod_index++;
> > +}
> > +
> > +static inline void
> > +ark_bb_enqueue_segmented_pkt(struct ark_bbdev_queue *q,
> > +                          struct rte_mbuf *mbuf,
> > +                          uint16_t offset,
> > +                          uint32_t *meta, uint8_t meta_cnt) {
> > +     struct rte_mbuf *next;
> > +     uint8_t flags = ARK_DDM_SOP;
> > +
> > +     while (mbuf != NULL) {
> > +             next = mbuf->next;
> > +             flags |= (next == NULL) ? ARK_DDM_EOP : 0;
> > +
> > +             ark_bb_enqueue_desc_fill(q, mbuf, offset, flags,
> > +                                      meta, meta_cnt);
> > +
> > +             flags &= ~ARK_DDM_SOP;  /* drop SOP flags */
> > +             meta_cnt = 0;
> > +             offset = 0;
> > +
> > +             mbuf = next;
> > +     }
> > +}
> > +
> > +static inline int
> > +ark_bb_enqueue_common(struct ark_bbdev_queue *q,
> > +                   struct rte_mbuf *m_in, struct rte_mbuf *m_out,
> > +                   uint16_t offset,
> > +                   uint32_t *meta, uint8_t meta_cnt) {
> > +     int32_t free_queue_space;
> > +     int32_t rx_idx;
> > +
> > +     /* TX side limit */
> > +     free_queue_space = q->tx_queue_mask -
> > +             (q->tx_prod_index - q->tx_free_index);
> > +     if (unlikely(free_queue_space < (2 + (2 * m_in->nb_segs))))
> > +             return 1;
> > +
> > +     /* RX side limit */
> > +     free_queue_space = q->rx_queue_mask -
> > +             (q->rx_seed_index - q->rx_cons_index);
> > +     if (unlikely(free_queue_space < m_out->nb_segs))
> > +             return 1;
> > +
> > +     if (unlikely(m_in->nb_segs > 1))
> > +             ark_bb_enqueue_segmented_pkt(q, m_in, offset, meta,
> > meta_cnt);
> > +     else
> > +             ark_bb_enqueue_desc_fill(q, m_in, offset,
> > +                                      ARK_DDM_SOP | ARK_DDM_EOP,
> > +                                      meta, meta_cnt);
> > +
> > +     /* We assume that the return mubf has exactly enough segments for
> > +      * return data, which is 2048 bytes per segment.
> > +      */
> > +     do {
> > +             rx_idx = q->rx_seed_index & q->rx_queue_mask;
> > +             q->rx_paddress_q[rx_idx] = m_out->buf_iova;
> > +             q->rx_seed_index++;
> > +             m_out = m_out->next;
> > +     } while (m_out);
> > +
> > +     return 0;
> > +}
> > +
> > +static inline void
> > +ark_bb_enqueue_finalize(struct rte_bbdev_queue_data *q_data,
> > +                     struct ark_bbdev_queue *q,
> > +                     void **ops,
> > +                     uint16_t nb_ops, uint16_t nb)
> > +{
> > +     /* BBDEV global stats */
> > +     /* These are not really errors, not sure why bbdev counts these. */
> > +     q_data->queue_stats.enqueue_err_count += nb_ops - nb;
> > +     q_data->queue_stats.enqueued_count += nb;
> > +
> > +     /* Notify HW that  */
> > +     if (unlikely(nb == 0))
> > +             return;
> > +
> > +     ark_mpu_set_producer(q->tx_mpu, q->tx_prod_index);
> > +     ark_mpu_set_producer(q->rx_mpu, q->rx_seed_index);
> > +
> > +     /* Queue info for dequeue-side processing */
> > +     rte_ring_enqueue_burst(q->active_ops,
> > +                            (void **)ops, nb, NULL);
> > +}
> > +
> > +static int
> > +ark_bb_dequeue_segmented(struct rte_mbuf *mbuf0,
> > +                      int32_t *prx_cons_index,
> > +                      uint16_t pkt_len
> > +                      )
> > +{
> > +     struct rte_mbuf *mbuf;
> > +     uint16_t data_len;
> > +     uint16_t remaining;
> > +     uint16_t segments = 1;
> > +
> > +     data_len = RTE_MIN(pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
> > +     remaining = pkt_len - data_len;
> > +
> > +     mbuf = mbuf0;
> > +     mbuf0->data_len = data_len;
> > +     while (remaining) {
> > +             segments += 1;
> > +             mbuf = mbuf->next;
> > +             if (unlikely(mbuf == 0)) {
> > +                     ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with
> > "
> > +                                   "at least %d segments for dequeue "
> > +                                   "of packet length %d",
> > +                                   segments, pkt_len);
> > +                     return 1;
> > +             }
> > +
> > +             data_len = RTE_MIN(remaining,
> > +                                RTE_MBUF_DEFAULT_DATAROOM);
> > +             remaining -= data_len;
> > +
> > +             mbuf->data_len = data_len;
> > +             *prx_cons_index += 1;
> > +     }
> > +
> > +     if (mbuf->next != 0) {
> > +             ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with "
> > +                           "at exactly %d segments for dequeue "
> > +                           "of packet length %d. Found %d "
> > +                           "segments",
> > +                           segments, pkt_len, mbuf0->nb_segs);
> > +             return 1;
> > +     }
> > +     return 0;
> > +}
> > +
> > +/*
> > +****************************************************************
> > *******
> > +** */
> > +/* LDPC Decode ops */
> > +static int16_t
> > +ark_bb_enqueue_ldpc_dec_one_op(struct ark_bbdev_queue *q,
> > +                            struct rte_bbdev_dec_op *this_op) {
> > +     struct rte_bbdev_op_ldpc_dec *ldpc_dec_op = &this_op->ldpc_dec;
> > +     struct rte_mbuf *m_in = ldpc_dec_op->input.data;
> > +     struct rte_mbuf *m_out = ldpc_dec_op->hard_output.data;
> > +     uint16_t offset = ldpc_dec_op->input.offset;
> > +     uint32_t meta[5] = {0};
> > +     uint8_t meta_cnt = 0;
> > +
> > +     /* User's meta move from bbdev op to Arkville HW */
> > +     if (ark_bb_user_enqueue_ldpc_dec(this_op, meta, &meta_cnt)) {
> > +             ARK_BBDEV_LOG(ERR, "%s failed", __func__);
> > +             return 1;
> > +     }
> > +
> > +     return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,
> > meta_cnt);
> > +}
> > +
> > +/* Enqueue LDPC Decode -- burst */
> > +static uint16_t
> > +ark_bb_enqueue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
> > +                         struct rte_bbdev_dec_op **ops, uint16_t
> nb_ops) {
> > +     struct ark_bbdev_queue *q = q_data->queue_private;
> > +     unsigned int max_enq;
> > +     uint16_t nb;
> > +
> > +     max_enq = rte_ring_free_count(q->active_ops);
> > +     max_enq = RTE_MIN(max_enq, nb_ops);
> > +     for (nb = 0; nb < max_enq; nb++) {
> > +             if (ark_bb_enqueue_ldpc_dec_one_op(q, ops[nb]))
> > +                     break;
> > +     }
> > +
> > +     ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
> > +     return nb;
> > +}
> > +
> > +/*
> > +****************************************************************
> > *******
> > +** */
> > +/* Dequeue LDPC Decode -- burst */
> > +static uint16_t
> > +ark_bb_dequeue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
> > +                         struct rte_bbdev_dec_op **ops, uint16_t
> nb_ops) {
> > +     struct ark_bbdev_queue *q = q_data->queue_private;
> > +     struct rte_mbuf *mbuf;
> > +     struct rte_bbdev_dec_op *this_op;
> > +     struct ark_rx_meta *meta;
> > +     uint32_t *usermeta;
> > +
> > +     uint16_t nb = 0;
> > +     int32_t prod_index = q->rx_prod_index;
> > +     int32_t cons_index = q->rx_cons_index;
> > +
> > +     q->tx_free_index = q->tx_cons_index;
> > +
> > +     while ((prod_index - cons_index) > 0) {
> > +             if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
> > +                     ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
> > +                                   __func__);
> > +                     q_data->queue_stats.dequeue_err_count += 1;
> > +                     break;
> > +             }
> > +             ops[nb] = this_op;
> > +
> > +             mbuf = this_op->ldpc_dec.hard_output.data;
> > +
> > +             /* META DATA embedded in headroom */
> > +             meta = RTE_PTR_ADD(mbuf->buf_addr,
> > ARK_RX_META_OFFSET);
> > +
> > +             mbuf->pkt_len = meta->pkt_len;
> > +             mbuf->data_len = meta->pkt_len;
> > +
> > +             if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
> > +                     if (ark_bb_dequeue_segmented(mbuf, &cons_index,
> > +                                                  meta->pkt_len))
> > +                             q_data->queue_stats.dequeue_err_count +=
> > 1;
> > +             } else if (mbuf->next != 0) {
> > +                     ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
> > +                                   "at exactly 1 segments for dequeue "
> > +                                   "of packet length %d. Found %d "
> > +                                   "segments",
> > +                                   meta->pkt_len, mbuf->nb_segs);
> > +                     q_data->queue_stats.dequeue_err_count += 1;
> > +             }
> > +
> > +             usermeta = meta->user_meta;
> > +             /* User's meta move from Arkville HW to bbdev OP */
> > +             ark_bb_user_dequeue_ldpc_dec(this_op, usermeta);
> > +             nb++;
> > +             cons_index++;
> > +             if (nb >= nb_ops)
> > +                     break;
> > +     }
> > +
> > +     q->rx_cons_index = cons_index;
> > +
> > +     /* BBdev stats */
> > +     q_data->queue_stats.dequeued_count += nb;
> > +
> > +     return nb;
> > +}
> > +
> > +/***************************************************************
> > *******
> > +****/
> > +/* Enqueue LDPC Encode */
> > +static int16_t
> > +ark_bb_enqueue_ldpc_enc_one_op(struct ark_bbdev_queue *q,
> > +                            struct rte_bbdev_enc_op *this_op) {
> > +     struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &this_op->ldpc_enc;
> > +     struct rte_mbuf *m_in = ldpc_enc_op->input.data;
> > +     struct rte_mbuf *m_out = ldpc_enc_op->output.data;
> > +     uint16_t offset = ldpc_enc_op->input.offset;
> > +     uint32_t meta[5] = {0};
> > +     uint8_t meta_cnt = 0;
> > +
> > +     /* User's meta move from bbdev op to Arkville HW */
> > +     if (ark_bb_user_enqueue_ldpc_enc(this_op, meta, &meta_cnt)) {
> > +             ARK_BBDEV_LOG(ERR, "%s failed", __func__);
> > +             return 1;
> > +     }
> > +
> > +     return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,
> > meta_cnt);
> > +}
> > +
> > +/* Enqueue LDPC Encode -- burst */
> > +static uint16_t
> > +ark_bb_enqueue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
> > +                         struct rte_bbdev_enc_op **ops, uint16_t
> nb_ops) {
> > +     struct ark_bbdev_queue *q = q_data->queue_private;
> > +     unsigned int max_enq;
> > +     uint16_t nb;
> > +
> > +     max_enq = rte_ring_free_count(q->active_ops);
> > +     max_enq = RTE_MIN(max_enq, nb_ops);
> > +     for (nb = 0; nb < max_enq; nb++) {
> > +             if (ark_bb_enqueue_ldpc_enc_one_op(q, ops[nb]))
> > +                     break;
> > +     }
> > +
> > +     ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
> > +     return nb;
> > +}
> > +
> > +/* Dequeue LDPC Encode -- burst */
> > +static uint16_t
> > +ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
> > +                         struct rte_bbdev_enc_op **ops, uint16_t
> nb_ops) {
> > +     struct ark_bbdev_queue *q = q_data->queue_private;
> > +     struct rte_mbuf *mbuf;
> > +     struct rte_bbdev_enc_op *this_op;
> > +     struct ark_rx_meta *meta;
> > +     uint32_t *usermeta;
> > +
> > +     uint16_t nb = 0;
> > +     int32_t prod_index = q->rx_prod_index;
> > +     int32_t cons_index = q->rx_cons_index;
> > +
> > +     q->tx_free_index = q->tx_cons_index;
> > +
> > +     while ((prod_index - cons_index) > 0) {
> > +             if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
> > +                     ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
> > +                                   __func__);
> > +                     q_data->queue_stats.dequeue_err_count += 1;
> > +                     break;
> > +             }
> > +             ops[nb] = this_op;
> > +
> > +             mbuf = this_op->ldpc_enc.output.data;
> > +
> > +             /* META DATA embedded in headroom */
> > +             meta = RTE_PTR_ADD(mbuf->buf_addr,
> > ARK_RX_META_OFFSET);
> > +
> > +             mbuf->pkt_len = meta->pkt_len;
> > +             mbuf->data_len = meta->pkt_len;
> > +             usermeta = meta->user_meta;
> > +
> > +             if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
> > +                     if (ark_bb_dequeue_segmented(mbuf, &cons_index,
> > +                                                  meta->pkt_len))
> > +                             q_data->queue_stats.dequeue_err_count +=
> > 1;
> > +             } else if (mbuf->next != 0) {
> > +                     ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
> > +                                   "at exactly 1 segments for dequeue "
> > +                                   "of packet length %d. Found %d "
> > +                                   "segments",
> > +                                   meta->pkt_len, mbuf->nb_segs);
> > +                     q_data->queue_stats.dequeue_err_count += 1;
> > +             }
> > +
> > +             /* User's meta move from Arkville HW to bbdev OP */
> > +             ark_bb_user_dequeue_ldpc_enc(this_op, usermeta);
> > +             nb++;
> > +             cons_index++;
> > +             if (nb >= nb_ops)
> > +                     break;
> > +     }
> > +
> > +     q->rx_cons_index = cons_index;
> > +
> > +     /* BBdev stats */
> > +     q_data->queue_stats.dequeued_count += nb;
> > +
> > +     return nb;
> > +}
> > +
> > +/***************************************************************
> > *******
> > +****/
> > +/*
> > + *Initial device hardware configuration when device is opened
> > + * setup the DDM, and UDM; called once per PCIE device  */ static int
> > +ark_bb_config_device(struct ark_bbdevice *ark_bb) {
> > +     uint16_t num_q, i;
> > +     struct ark_mpu_t *mpu;
> > +
> > +     /*
> > +      * Make sure that the packet director, generator and checker are
> in a
> > +      * known state
> > +      */
> > +     ark_bb->start_pg = 0;
> > +     ark_bb->pg = ark_pktgen_init(ark_bb->pktgen.v, 0, 1);
> > +     if (ark_bb->pg == NULL)
> > +             return -1;
> > +     ark_pktgen_reset(ark_bb->pg);
> > +     ark_bb->pc = ark_pktchkr_init(ark_bb->pktchkr.v, 0, 1);
> > +     if (ark_bb->pc == NULL)
> > +             return -1;
> > +     ark_pktchkr_stop(ark_bb->pc);
> > +     ark_bb->pd = ark_pktdir_init(ark_bb->pktdir.v);
> > +     if (ark_bb->pd == NULL)
> > +             return -1;
> > +
> > +     /* Verify HW */
> > +     if (ark_udm_verify(ark_bb->udm.v))
> > +             return -1;
> > +     if (ark_ddm_verify(ark_bb->ddm.v))
> > +             return -1;
> > +
> > +     /* UDM */
> > +     if (ark_udm_reset(ark_bb->udm.v)) {
> > +             ARK_BBDEV_LOG(ERR, "Unable to stop and reset UDM");
> > +             return -1;
> > +     }
> > +     /* Keep in reset until the MPU are cleared */
> > +
> > +     /* MPU reset */
> > +     mpu = ark_bb->mpurx.v;
> > +     num_q = ark_api_num_queues(mpu);
> > +     ark_bb->max_nb_queues = num_q;
> > +
> > +     for (i = 0; i < num_q; i++) {
> > +             ark_mpu_reset(mpu);
> > +             mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> > +     }
> > +
> > +     /* Only 1 queue supported in the udm */
> > +     ark_udm_stop(ark_bb->udm.v, 0);
> > +     ark_udm_configure(ark_bb->udm.v,
> > +                       RTE_PKTMBUF_HEADROOM,
> > +                       bbdev->data->queues[q_id]->dataroom,
> > +                       ARK_RX_WRITE_TIME_NS);
> > +
> > +
> > +     ark_udm_stats_reset(ark_bb->udm.v);
> > +     ark_udm_stop(ark_bb->udm.v, 0);
> > +
> > +     /* TX -- DDM */
> > +     if (ark_ddm_stop(ark_bb->ddm.v, 1))
> > +             ARK_BBDEV_LOG(ERR, "Unable to stop DDM");
> > +
> > +     mpu = ark_bb->mputx.v;
> > +     num_q = ark_api_num_queues(mpu);
> > +     for (i = 0; i < num_q; i++) {
> > +             ark_mpu_reset(mpu);
> > +             mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> > +     }
> > +
> > +     ark_ddm_reset(ark_bb->ddm.v);
> > +     ark_ddm_stats_reset(ark_bb->ddm.v);
> > +
> > +     ark_ddm_stop(ark_bb->ddm.v, 0);
> > +     if (ark_bb->rqpacing)
> > +             ark_rqp_stats_reset(ark_bb->rqpacing);
> > +
> > +     ARK_BBDEV_LOG(INFO, "packet director set to 0x%x", ark_bb-
> > >pkt_dir_v);
> > +     ark_pktdir_setup(ark_bb->pd, ark_bb->pkt_dir_v);
> > +
> > +     if (ark_bb->pkt_gen_args[0]) {
> > +             ARK_BBDEV_LOG(INFO, "Setting up the packet generator");
> > +             ark_pktgen_parse(ark_bb->pkt_gen_args);
> > +             ark_pktgen_reset(ark_bb->pg);
> > +             ark_pktgen_setup(ark_bb->pg);
> > +             ark_bb->start_pg = 1;
> > +     }
> > +
> > +     return 0;
> > +}
> > +
> > +static int
> > +ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)
> > +{
> > +     struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> > +     struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);
> > +     bool rqpacing = false;
> > +     int p;
> > +
> > +     RTE_SET_USED(pci_drv);
> > +
> > +     ark_bb->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
> > +     ark_bb->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
> > +
> > +     ark_bb->sysctrl.v  = (void *)&ark_bb->bar0[ARK_SYSCTRL_BASE];
> > +     ark_bb->mpurx.v  = (void *)&ark_bb->bar0[ARK_MPU_RX_BASE];
> > +     ark_bb->udm.v  = (void *)&ark_bb->bar0[ARK_UDM_BASE];
> > +     ark_bb->mputx.v  = (void *)&ark_bb->bar0[ARK_MPU_TX_BASE];
> > +     ark_bb->ddm.v  = (void *)&ark_bb->bar0[ARK_DDM_BASE];
> > +     ark_bb->pktdir.v  = (void *)&ark_bb->bar0[ARK_PKTDIR_BASE];
> > +     ark_bb->pktgen.v  = (void *)&ark_bb->bar0[ARK_PKTGEN_BASE];
> > +     ark_bb->pktchkr.v  = (void *)&ark_bb->bar0[ARK_PKTCHKR_BASE];
> > +
> > +     p = 0;
> > +     while (ark_device_caps[p].device_id != 0) {
> > +             if (pci_dev->id.device_id == ark_device_caps[p].device_id)
> {
> > +                     rqpacing = ark_device_caps[p].caps.rqpacing;
> > +                     break;
> > +             }
> > +             p++;
> > +     }
> > +
> > +     if (rqpacing)
> > +             ark_bb->rqpacing =
> > +                     (struct ark_rqpace_t *)(ark_bb->bar0 +
> > ARK_RCPACING_BASE);
> > +     else
> > +             ark_bb->rqpacing = NULL;
> > +
> > +     ark_bb->started = 0;
> > +
> > +     ARK_BBDEV_LOG(INFO, "Sys Ctrl Const = 0x%x  HW Commit_ID:
> > %08x",
> > +                   ark_bb->sysctrl.t32[4],
> > +                   rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
> > +     ARK_BBDEV_LOG(INFO, "Arkville HW Commit_ID: %08x",
> > +                 rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
> > +
> > +     /* If HW sanity test fails, return an error */
> > +     if (ark_bb->sysctrl.t32[4] != 0xcafef00d) {
> > +             ARK_BBDEV_LOG(ERR,
> > +                           "HW Sanity test has failed, expected
> constant"
> > +                           " 0x%x, read 0x%x (%s)",
> > +                           0xcafef00d,
> > +                           ark_bb->sysctrl.t32[4], __func__);
> > +             return -1;
> > +     }
> > +
> > +     return ark_bb_config_device(ark_bb);
> > +}
> > +
> > +static int
> > +ark_bbdev_uninit(struct rte_bbdev *bbdev) {
> > +     struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> > +
> > +     if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> > +             return 0;
> > +
> > +     ark_pktgen_uninit(ark_bb->pg);
> > +     ark_pktchkr_uninit(ark_bb->pc);
> > +
> > +     return 0;
> > +}
> > +
> > +static int
> > +ark_bbdev_probe(struct rte_pci_driver *pci_drv,
> > +             struct rte_pci_device *pci_dev)
> > +{
> > +     struct rte_bbdev *bbdev = NULL;
> > +     char dev_name[RTE_BBDEV_NAME_MAX_LEN];
> > +     struct ark_bbdevice *ark_bb;
> > +
> > +     if (pci_dev == NULL)
> > +             return -EINVAL;
> > +
> > +     rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
> > +
> > +     /* Allocate memory to be used privately by drivers */
> > +     bbdev = rte_bbdev_allocate(pci_dev->device.name);
> > +     if (bbdev == NULL)
> > +             return -ENODEV;
> > +
> > +     /* allocate device private memory */
> > +     bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
> > +                     sizeof(struct ark_bbdevice),
> > +                     RTE_CACHE_LINE_SIZE,
> > +                     pci_dev->device.numa_node);
> > +
> > +     if (bbdev->data->dev_private == NULL) {
> > +             ARK_BBDEV_LOG(CRIT,
> > +                             "Allocate of %zu bytes for device \"%s\"
> > failed",
> > +                             sizeof(struct ark_bbdevice), dev_name);
> > +                             rte_bbdev_release(bbdev);
> > +                     return -ENOMEM;
> > +     }
> > +     ark_bb = bbdev->data->dev_private;
> > +     /* Initialize ark_bb */
> > +     ark_bb->pkt_dir_v = 0x00110110;
> > +
> > +     /* Fill HW specific part of device structure */
> > +     bbdev->device = &pci_dev->device;
> > +     bbdev->intr_handle = NULL;
> > +     bbdev->data->socket_id = pci_dev->device.numa_node;
> > +     bbdev->dev_ops = &ark_bbdev_pmd_ops;
> > +     if (pci_dev->device.devargs)
> > +             parse_ark_bbdev_params(pci_dev->device.devargs->args,
> > ark_bb);
> > +
> > +
> > +     /* Device specific initialization */
> > +     if (ark_bbdev_init(bbdev, pci_drv))
> > +             return -EIO;
> > +     if (ark_bbdev_start(bbdev))
> > +             return -EIO;
> > +
> > +     /* Core operations LDPC encode amd decode */
> > +     bbdev->enqueue_ldpc_enc_ops = ark_bb_enqueue_ldpc_enc_ops;
> > +     bbdev->dequeue_ldpc_enc_ops = ark_bb_dequeue_ldpc_enc_ops;
> > +     bbdev->enqueue_ldpc_dec_ops = ark_bb_enqueue_ldpc_dec_ops;
> > +     bbdev->dequeue_ldpc_dec_ops = ark_bb_dequeue_ldpc_dec_ops;
> > +
> > +     ARK_BBDEV_LOG(DEBUG, "bbdev id = %u [%s]",
> > +                   bbdev->data->dev_id, dev_name);
> > +
> > +     return 0;
> > +}
> > +
> > +/* Uninitialize device */
> > +static int
> > +ark_bbdev_remove(struct rte_pci_device *pci_dev) {
> > +     struct rte_bbdev *bbdev;
> > +     int ret;
> > +
> > +     if (pci_dev == NULL)
> > +             return -EINVAL;
> > +
> > +     /* Find device */
> > +     bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
> > +     if (bbdev == NULL) {
> > +             ARK_BBDEV_LOG(CRIT,
> > +                             "Couldn't find HW dev \"%s\" to
> Uninitialize
> > it",
> > +                             pci_dev->device.name);
> > +             return -ENODEV;
> > +     }
> > +
> > +     /* Arkville device close */
> > +     ark_bbdev_uninit(bbdev);
> > +     rte_free(bbdev->data->dev_private);
> > +
> > +     /* Close device */
> > +     ret = rte_bbdev_close(bbdev->data->dev_id);
> > +     if (ret < 0)
> > +             ARK_BBDEV_LOG(ERR,
> > +                             "Device %i failed to close during remove:
> %i",
> > +                             bbdev->data->dev_id, ret);
> > +
> > +     return rte_bbdev_release(bbdev);
> > +}
> > +
> > +/* Operation for the PMD */
> > +static const struct rte_bbdev_ops ark_bbdev_pmd_ops = {
> > +     .info_get = ark_bbdev_info_get,
> > +     .start = ark_bbdev_start,
> > +     .stop = ark_bbdev_stop,
> > +     .queue_setup = ark_bb_q_setup,
> > +     .queue_release = ark_bb_q_release,
> > +     .queue_start = ark_bb_q_start,
> > +     .queue_stop = ark_bb_q_stop,
> > +};
> > +
> > +
> > +
> > +static struct rte_pci_driver ark_bbdev_pmd_drv = {
> > +     .probe = ark_bbdev_probe,
> > +     .remove = ark_bbdev_remove,
> > +     .id_table = pci_id_ark,
> > +     .drv_flags = RTE_PCI_DRV_NEED_MAPPING
> > +};
> > +
> > +RTE_PMD_REGISTER_PCI(DRIVER_NAME, ark_bbdev_pmd_drv);
> > +RTE_PMD_REGISTER_PCI_TABLE(DRIVER_NAME, pci_id_ark);
> > +RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
> > +                           ARK_BBDEV_PKTGEN_ARG "=<filename> "
> > +                           ARK_BBDEV_PKTCHKR_ARG "=<filename> "
> > +                           ARK_BBDEV_PKTDIR_ARG "=<bitmap>"
> > +                           );
> > diff --git a/drivers/baseband/ark/ark_bbdev_common.c
> > b/drivers/baseband/ark/ark_bbdev_common.c
> > new file mode 100644
> > index 0000000000..6ef0f43654
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev_common.c
> > @@ -0,0 +1,125 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> > +
> > +#include <string.h>
> > +
> > +#include <rte_kvargs.h>
> > +#include <rte_log.h>
> > +
> > +#include "ark_bbdev_common.h"
> > +
> > +static const char * const ark_bbdev_valid_params[] = {
> > +     ARK_BBDEV_PKTDIR_ARG,
> > +     ARK_BBDEV_PKTGEN_ARG,
> > +     ARK_BBDEV_PKTCHKR_ARG,
> > +     NULL
> > +};
> > +
> > +/* Parse 16-bit integer from string argument */ static inline int
> > +parse_u16_arg(const char *key, const char *value, void *extra_args) {
> > +     uint16_t *u16 = extra_args;
> > +     unsigned int long result;
> > +
> > +     if ((value == NULL) || (extra_args == NULL))
> > +             return -EINVAL;
> > +     errno = 0;
> > +     result = strtoul(value, NULL, 0);
> > +     if ((result >= (1 << 16)) || (errno != 0)) {
> > +             ARK_BBDEV_LOG(ERR, "Invalid value %" PRIu64 " for %s",
> > result, key);
> > +             return -ERANGE;
> > +     }
> > +     *u16 = (uint16_t)result;
> > +     return 0;
> > +}
> > +
> > +static inline int
> > +process_pktdir_arg(const char *key, const char *value,
> > +                void *extra_args)
> > +{
> > +     uint32_t *u32 = extra_args;
> > +     ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
> > +
> > +     *u32 = strtol(value, NULL, 0);
> > +     ARK_BBDEV_LOG(DEBUG, "pkt_dir_v = 0x%x", *u32);
> > +     return 0;
> > +}
> > +
> > +static inline int
> > +process_file_args(const char *key, const char *value, void *extra_args)
> > +{
> > +     char *args = (char *)extra_args;
> > +     ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
> > +
> > +     /* Open the configuration file */
> > +     FILE *file = fopen(value, "r");
> > +     char line[ARK_MAX_ARG_LEN];
> > +     int  size = 0;
> > +     int first = 1;
> > +
> > +     if (file == NULL) {
> > +             ARK_BBDEV_LOG(ERR, "Unable to open config file %s",
> > +                           value);
> > +             return -1;
> > +     }
> > +
> > +     while (fgets(line, sizeof(line), file)) {
> > +             size += strlen(line);
> > +             if (size >= ARK_MAX_ARG_LEN) {
> > +                     ARK_BBDEV_LOG(ERR, "Unable to parse file %s args,
> > "
> > +                                   "parameter list is too long", value);
> > +                     fclose(file);
> > +                     return -1;
> > +             }
> > +             if (first) {
> > +                     strncpy(args, line, ARK_MAX_ARG_LEN);
> > +                     first = 0;
> > +             } else {
> > +                     strncat(args, line, ARK_MAX_ARG_LEN);
> > +             }
> > +     }
> > +     ARK_BBDEV_LOG(DEBUG, "file = %s", args);
> > +     fclose(file);
> > +     return 0;
> > +}
> > +
> > +
> > +/* Parse parameters used to create device */ int
> > +parse_ark_bbdev_params(const char *input_args,
> > +                    struct ark_bbdevice *ark_bb)
> > +{
> > +     struct rte_kvargs *kvlist = NULL;
> > +     int ret = 0;
> > +
> > +     if (ark_bb == NULL)
> > +             return -EINVAL;
> > +     if (input_args == NULL)
> > +             return ret;
> > +
> > +     kvlist = rte_kvargs_parse(input_args, ark_bbdev_valid_params);
> > +     if (kvlist == NULL)
> > +             return -EFAULT;
> > +
> > +     ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTDIR_ARG,
> > +                               &process_pktdir_arg, &ark_bb->pkt_dir_v);
> > +     if (ret < 0)
> > +             goto exit;
> > +
> > +     ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTGEN_ARG,
> > +                              &process_file_args, &ark_bb-
> > >pkt_gen_args);
> > +     if (ret < 0)
> > +             goto exit;
> > +
> > +     ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTCHKR_ARG,
> > +                              &process_file_args, &ark_bb-
> > >pkt_chkr_args);
> > +     if (ret < 0)
> > +             goto exit;
> > +
> > + exit:
> > +     if (kvlist)
> > +             rte_kvargs_free(kvlist);
> > +     return ret;
> > +}
> > diff --git a/drivers/baseband/ark/ark_bbdev_common.h
> > b/drivers/baseband/ark/ark_bbdev_common.h
> > new file mode 100644
> > index 0000000000..670e7e86d6
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev_common.h
> > @@ -0,0 +1,92 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> > +
> > +#ifndef _ARK_BBDEV_COMMON_H_
> > +#define _ARK_BBDEV_COMMON_H_
> > +
> > +#include "ark_pktchkr.h"
> > +#include "ark_pktdir.h"
> > +#include "ark_pktgen.h"
> > +
> > +#define ARK_MAX_ARG_LEN 256
> > +
> > +/* Acceptable params for ark BBDEV devices */
> > +/*
> > + * The packet generator is a functional block used to generate packet
> > + * patterns for testing.  It is not intended for nominal use.
> > + */
> > +#define ARK_BBDEV_PKTGEN_ARG "Pkt_gen"
> > +
> > +/*
> > + * The packet checker is a functional block used to verify packet
> > + * patterns for testing.  It is not intended for nominal use.
> > + */
> > +#define ARK_BBDEV_PKTCHKR_ARG "Pkt_chkr"
> > +
> > +/*
> > + * The packet director is used to select the internal ingress and
> > + * egress packets paths during testing.  It is not intended for
> > + * nominal use.
> > + */
> > +#define ARK_BBDEV_PKTDIR_ARG "Pkt_dir"
> > +
> > +
> > +#define def_ptr(type, name) \
> > +     union type {               \
> > +             uint64_t *t64;     \
> > +             uint32_t *t32;     \
> > +             uint16_t *t16;     \
> > +             uint8_t  *t8;      \
> > +             void     *v;       \
> > +     } name
> > +
> > +/*
> > + * Structure to store private data for each PF/VF instance.
> > + */
> > +struct ark_bbdevice {
> > +     /* Our Bar 0 */
> > +     uint8_t *bar0;
> > +
> > +     /* Application Bar needed for extensions */
> > +     uint8_t *a_bar;
> > +
> > +     /* Arkville hardware block offsets */
> > +     def_ptr(sys_ctrl, sysctrl);
> > +     def_ptr(pkt_gen, pktgen);
> > +     def_ptr(mpu_rx, mpurx);
> > +     def_ptr(UDM, udm);
> > +     def_ptr(mpu_tx, mputx);
> > +     def_ptr(DDM, ddm);
> > +     def_ptr(pkt_dir, pktdir);
> > +     def_ptr(pkt_chkr, pktchkr);
> > +     struct ark_rqpace_t *rqpacing;
> > +
> > +     /* Pointers to packet generator and checker */
> > +     int start_pg;
> > +     ark_pkt_gen_t pg;
> > +     ark_pkt_chkr_t pc;
> > +     ark_pkt_dir_t pd;
> > +
> > +     /* Packet generator/checker args */
> > +     char pkt_gen_args[ARK_MAX_ARG_LEN];
> > +     char pkt_chkr_args[ARK_MAX_ARG_LEN];
> > +     uint32_t pkt_dir_v;
> > +
> > +     int started;
> > +     unsigned int max_nb_queues;  /**< Max number of queues */
> > +
> > +};
> > +
> > +
> > +/* Log message for PMD */
> > +extern int ark_bbdev_logtype;
> > +
> > +/* Helper macro for logging */
> > +#define ARK_BBDEV_LOG(level, fmt, ...) \
> > +     rte_log(RTE_LOG_ ## level, ark_bbdev_logtype, \
> > +             "ARK_BBD: " fmt "\n", ##__VA_ARGS__)
> > +
> > +int parse_ark_bbdev_params(const char *argv, struct ark_bbdevice *dev);
> > +
> > +#endif
> > diff --git a/drivers/baseband/ark/ark_bbdev_custom.c
> > b/drivers/baseband/ark/ark_bbdev_custom.c
> > new file mode 100644
> > index 0000000000..6b1553abe1
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev_custom.c
> > @@ -0,0 +1,201 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> > +
> > +#include <rte_bbdev.h>
> > +#include <rte_bbdev_pmd.h>
> > +
> > +#include <rte_mbuf.h>
> > +#include <rte_hexdump.h>     /* For debug */
> > +
> > +
> > +#include "ark_bbdev_common.h"
> > +#include "ark_bbdev_custom.h"
> > +
> > +/* It is expected that functions in this file will be modified based on
> > + * specifics of the FPGA hardware beyond the core Arkville
> > + * components.
> > + */
> > +
> > +/* bytyes must be range of 0 to 20 */
> > +static inline
> > +uint8_t ark_bb_cvt_bytes_meta_cnt(size_t bytes) {
> > +     return (bytes + 3) / 8;
> > +}
> > +
> > +void
> > +ark_bbdev_info_get(struct rte_bbdev *dev,
> > +                struct rte_bbdev_driver_info *dev_info) {
> > +     struct ark_bbdevice *ark_bb =  dev->data->dev_private;
> > +
> > +     static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
> > +             {
> > +                     .type = RTE_BBDEV_OP_LDPC_DEC,
> > +                     .cap.ldpc_dec = {
> > +                             .capability_flags =
> > +                                     RTE_BBDEV_LDPC_CRC_24B_ATTACH
> > |
> > +                                     RTE_BBDEV_LDPC_RATE_MATCH,
> > +                             .num_buffers_src =
> > +
> >       RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
> > +                             .num_buffers_hard_out =
> > +
> >       RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
> > +                     }
> > +             },
> > +             {
> > +                     .type = RTE_BBDEV_OP_LDPC_ENC,
> > +                     .cap.ldpc_enc = {
> > +                             .capability_flags =
> > +                                     RTE_BBDEV_LDPC_CRC_24B_ATTACH
> > |
> > +                                     RTE_BBDEV_LDPC_RATE_MATCH,
> > +                             .num_buffers_src =
> > +
> >       RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
> > +                             .num_buffers_dst =
> > +
> >       RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
> > +                     }
> > +             },
> > +             RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
> > +     };
> > +
> > +     static struct rte_bbdev_queue_conf default_queue_conf = {
> > +             .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
> > +     };
> > +
> > +     default_queue_conf.socket = dev->data->socket_id;
> > +
> > +     dev_info->driver_name = RTE_STR(DRIVER_NAME);
> > +     dev_info->max_num_queues = ark_bb->max_nb_queues;
> > +     dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
> > +     dev_info->hardware_accelerated = true;
> > +     dev_info->max_dl_queue_priority = 0;
> > +     dev_info->max_ul_queue_priority = 0;
> > +     dev_info->default_queue_conf = default_queue_conf;
> > +     dev_info->capabilities = bbdev_capabilities;
> > +     dev_info->cpu_flag_reqs = NULL;
> > +     dev_info->min_alignment = 4;
> > +
> > +}
> > +
> > +/* Structure defining layout of the ldpc command struct */ struct
> > +ark_bb_ldpc_enc_meta {
> > +     uint16_t header;
> > +     uint8_t rv_index:2,
> > +             basegraph:1,
> > +             code_block_mode:1,
> > +             rfu_71_68:4;
> > +
> > +     uint8_t q_m;
> > +     uint32_t e_ea;
> > +     uint32_t eb;
> > +     uint8_t c;
> > +     uint8_t cab;
> > +     uint16_t n_cb;
> > +     uint16_t pad;
> > +     uint16_t trailer;
> > +} __rte_packed;
> > +
> > +/* The size must be less then 20 Bytes */ static_assert(sizeof(struct
> > +ark_bb_ldpc_enc_meta) <= 20, "struct size");
> > +
> > +/* Custom operation on equeue ldpc operation  */
> > +/* Do these function need queue number? */
> > +/* Maximum of 20 bytes */
> > +int
> > +ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> > +                       uint32_t *meta, uint8_t *meta_cnt) {
> > +     struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &enc_op->ldpc_enc;
> > +     struct ark_bb_ldpc_enc_meta *src = (struct ark_bb_ldpc_enc_meta
> > +*)meta;
> > +
> > +     src->header = 0x4321;   /* For testings */
> > +     src->trailer = 0xFEDC;
> > +
> > +     src->rv_index = ldpc_enc_op->rv_index;
> > +     src->basegraph = ldpc_enc_op->basegraph;
> > +     src->code_block_mode = ldpc_enc_op->code_block_mode;
> > +
> > +     src->q_m = ldpc_enc_op->q_m;
> > +     src->e_ea = 0xABCD;
> > +     src->eb = ldpc_enc_op->tb_params.eb;
> > +     src->c = ldpc_enc_op->tb_params.c;
> > +     src->cab = ldpc_enc_op->tb_params.cab;
> > +
> > +     src->n_cb = 0;
> > +
> > +     meta[0] = 0x11111110;
> > +     meta[1] = 0x22222220;
> > +     meta[2] = 0x33333330;
> > +     meta[3] = 0x44444440;
> > +     meta[4] = 0x55555550;
> > +
> > +     *meta_cnt = ark_bb_cvt_bytes_meta_cnt(
> > +                     sizeof(struct ark_bb_ldpc_enc_meta));
> > +     return 0;
> > +}
> > +
> > +/* Custom operation on dequeue ldpc operation  */ int
> > +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> > +                          const uint32_t *usermeta)
> > +{
> > +     static int dump;        /* = 0 */
> > +     /* Just compare with what was sent? */
> > +     uint32_t meta_in[5] = {0};
> > +     uint8_t  meta_cnt;
> > +
> > +     ark_bb_user_enqueue_ldpc_enc(enc_op, meta_in, &meta_cnt);
> > +     if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
> > +             fprintf(stderr,
> > +                     "------------------------------------------\n");
> > +             rte_hexdump(stdout, "meta difference for lpdc_enc IN",
> > +                         meta_in, 20);
> > +             rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
> > +                         usermeta, 20);
> > +     } else if (dump) {
> > +             rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
> > +             dump--;
> > +     }
> > +
> > +     return 0;
> > +}
> > +
> > +
> > +/* Turbo op call backs for user meta data */ int
> > +ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> > +                              uint32_t *meta, uint8_t *meta_cnt) {
> > +     RTE_SET_USED(enc_op);
> > +     meta[0] = 0xF1111110;
> > +     meta[1] = 0xF2222220;
> > +     meta[2] = 0xF3333330;
> > +     meta[3] = 0xF4444440;
> > +     meta[4] = 0xF5555550;
> > +
> > +     *meta_cnt = ark_bb_cvt_bytes_meta_cnt(20);
> > +     return 0;
> > +}
> > +
> > +int ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> > +                              const uint32_t *usermeta)
> > +{
> > +     RTE_SET_USED(enc_op);
> > +     static int dump;        /* = 0 */
> > +     /* Just compare with what was sent? */
> > +     uint32_t meta_in[5] = {0};
> > +     uint8_t  meta_cnt;
> > +
> > +     ark_bb_user_enqueue_ldpc_dec(enc_op, meta_in, &meta_cnt);
> > +     if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
> > +             fprintf(stderr,
> > +                     "------------------------------------------\n");
> > +             rte_hexdump(stdout, "meta difference for lpdc_enc IN",
> > +                         meta_in, 20);
> > +             rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
> > +                         usermeta, 20);
> > +     } else if (dump) {
> > +             rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
> > +             dump--;
> > +     }
> > +     return 0;
> > +}
> > diff --git a/drivers/baseband/ark/ark_bbdev_custom.h
> > b/drivers/baseband/ark/ark_bbdev_custom.h
> > new file mode 100644
> > index 0000000000..32a2ef6bb6
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev_custom.h
> > @@ -0,0 +1,30 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC  */
> > +
> > +#ifndef _ARK_BBDEV_CUSTOM_H_
> > +#define _ARK_BBDEV_CUSTOM_H_
> > +
> > +#include <stdint.h>
> > +
> > +/* Forward declarations */
> > +struct rte_bbdev;
> > +struct rte_bbdev_driver_info;
> > +struct rte_bbdev_enc_op;
> > +struct rte_bbdev_dec_op;
> > +struct rte_mbuf;
> > +
> > +void ark_bbdev_info_get(struct rte_bbdev *dev,
> > +                     struct rte_bbdev_driver_info *dev_info);
> > +
> > +int ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> > +                              uint32_t *meta, uint8_t *meta_cnt); int
> > +ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> > +                              const uint32_t *usermeta);
> > +
> > +int ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> > +                              uint32_t *meta, uint8_t *meta_cnt); int
> > +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> > +                              const uint32_t *usermeta);
> > +
> > +#endif
> > diff --git a/drivers/baseband/ark/meson.build
> > b/drivers/baseband/ark/meson.build
> > new file mode 100644
> > index 0000000000..b876f05c6e
> > --- /dev/null
> > +++ b/drivers/baseband/ark/meson.build
> > @@ -0,0 +1,11 @@
> > +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2018 Luca
> > +Boccassi <bluca@debian.org>
> > +
> > +deps += ['common_ark', 'bbdev', 'bus_pci', 'pci', 'ring'] sources =
> > +files(
> > +     'ark_bbdev.c',
> > +     'ark_bbdev_common.c',
> > +     'ark_bbdev_custom.c'
> > +     )
> > +
> > +includes += include_directories('../../common/ark')
> > diff --git a/drivers/baseband/ark/version.map
> > b/drivers/baseband/ark/version.map
> > new file mode 100644
> > index 0000000000..4a76d1d52d
> > --- /dev/null
> > +++ b/drivers/baseband/ark/version.map
> > @@ -0,0 +1,3 @@
> > +DPDK_21 {
> > +     local: *;
> > +};
> > --
> > 2.25.1
>
>
diff mbox series

Patch

diff --git a/drivers/baseband/ark/ark_bbdev.c b/drivers/baseband/ark/ark_bbdev.c
new file mode 100644
index 0000000000..b23bbd44d1
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev.c
@@ -0,0 +1,1064 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#include "ark_common.h"
+#include "ark_bbdev_common.h"
+#include "ark_bbdev_custom.h"
+#include "ark_ddm.h"
+#include "ark_mpu.h"
+#include "ark_rqp.h"
+#include "ark_udm.h"
+
+#include <rte_bbdev.h>
+#include <rte_bbdev_pmd.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+
+#include <unistd.h>
+
+#define DRIVER_NAME baseband_ark
+
+RTE_LOG_REGISTER_DEFAULT(ark_bbdev_logtype, DEBUG);
+
+#define ARK_SYSCTRL_BASE  0x0
+#define ARK_PKTGEN_BASE   0x10000
+#define ARK_MPU_RX_BASE   0x20000
+#define ARK_UDM_BASE      0x30000
+#define ARK_MPU_TX_BASE   0x40000
+#define ARK_DDM_BASE      0x60000
+#define ARK_PKTDIR_BASE   0xa0000
+#define ARK_PKTCHKR_BASE  0x90000
+#define ARK_RCPACING_BASE 0xb0000
+#define ARK_MPU_QOFFSET   0x00100
+
+#define BB_ARK_TX_Q_FACTOR 4
+
+/* TODO move to UDM, verify configuration */
+#define ARK_RX_META_SIZE 32
+#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
+#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+static_assert(sizeof(struct ark_rx_meta) == ARK_RX_META_SIZE, "Unexpected struct size ark_rx_meta");
+static_assert(sizeof(union ark_tx_meta) == 8, "Unexpected struct size ark_tx_meta");
+
+static struct rte_pci_id pci_id_ark[] = {
+	{RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1015)},
+	{RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1016)},
+	{.device_id = 0},
+};
+
+static const struct ark_dev_caps
+ark_device_caps[] = {
+		     SET_DEV_CAPS(0x1015, true),
+		     SET_DEV_CAPS(0x1016, true),
+		     {.device_id = 0,}
+};
+
+
+/* Forward declarations */
+static const struct rte_bbdev_ops ark_bbdev_pmd_ops;
+
+
+/* queue */
+struct ark_bbdev_queue {
+	struct rte_ring *active_ops;  /* Ring for processed packets */
+
+	/* RX components */
+	/* array of physical addresses of the mbuf data pointer */
+	rte_iova_t *rx_paddress_q;
+	struct ark_udm_t *udm;
+	struct ark_mpu_t *rx_mpu;
+
+	/* TX components */
+	union ark_tx_meta *tx_meta_q;
+	struct ark_mpu_t *tx_mpu;
+	struct ark_ddm_t *ddm;
+
+	/*  */
+	uint32_t tx_queue_mask;
+	uint32_t rx_queue_mask;
+
+	int32_t rx_seed_index;		/* step 1 set with empty mbuf */
+	int32_t rx_cons_index;		/* step 3 consumed by driver */
+
+	/* 3 indexes to the paired data rings. */
+	int32_t tx_prod_index;		/* where to put the next one */
+	int32_t tx_free_index;		/* local copy of tx_cons_index */
+
+	/* separate cache line -- written by FPGA -- RX announce */
+	RTE_MARKER cacheline1 __rte_cache_min_aligned;
+	volatile int32_t rx_prod_index; /* step 2 filled by FPGA */
+
+	/* Separate cache line -- written by FPGA -- RX completion */
+	RTE_MARKER cacheline2 __rte_cache_min_aligned;
+	volatile int32_t tx_cons_index; /* hw is done, can be freed */
+} __rte_cache_aligned;
+
+static int
+ark_bb_hw_q_setup(struct rte_bbdev *bbdev, uint16_t q_id, uint16_t queue_size)
+{
+	struct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;
+
+	rte_iova_t queue_base;
+	rte_iova_t phys_addr_q_base;
+	rte_iova_t phys_addr_prod_index;
+	rte_iova_t phys_addr_cons_index;
+
+	uint32_t write_interval_ns = 500; /* TODO this seems big */
+
+	if (ark_mpu_verify(q->rx_mpu, sizeof(rte_iova_t))) {
+		ARK_BBDEV_LOG(ERR, "Illegal hw/sw configuration RX queue");
+		return -1;
+	}
+	ARK_BBDEV_LOG(DEBUG, "ark_bb_q setup %u:%u",
+		      bbdev->data->dev_id, q_id);
+
+	/* RX MPU */
+	phys_addr_q_base = rte_malloc_virt2iova(q->rx_paddress_q);
+	/* Force TX mode on MPU to match bbdev behavior */
+	ark_mpu_configure(q->rx_mpu, phys_addr_q_base, queue_size, 1);
+	ark_mpu_reset_stats(q->rx_mpu);
+	ark_mpu_start(q->rx_mpu);
+
+	/* UDM */
+	queue_base = rte_malloc_virt2iova(q);
+	phys_addr_prod_index = queue_base +
+		offsetof(struct ark_bbdev_queue, rx_prod_index);
+	ark_udm_write_addr(q->udm, phys_addr_prod_index);
+	ark_udm_queue_enable(q->udm, 1);
+
+	/* TX MPU */
+	phys_addr_q_base = rte_malloc_virt2iova(q->tx_meta_q);
+	ark_mpu_configure(q->tx_mpu, phys_addr_q_base,
+			  BB_ARK_TX_Q_FACTOR * queue_size, 1);
+	ark_mpu_start(q->tx_mpu);
+
+	/* DDM */
+	phys_addr_cons_index = queue_base +
+		offsetof(struct ark_bbdev_queue, tx_cons_index);
+	ark_ddm_setup(q->ddm, phys_addr_cons_index, write_interval_ns);
+
+	return 0;
+}
+
+/* Setup a queue */
+static int
+ark_bb_q_setup(struct rte_bbdev *bbdev, uint16_t q_id,
+	       const struct rte_bbdev_queue_conf *queue_conf)
+{
+	struct ark_bbdev_queue *q;
+	struct ark_bbdevice *ark_bb =  bbdev->data->dev_private;
+
+	const uint32_t queue_size = queue_conf->queue_size;
+	const int socket_id = queue_conf->socket;
+	const uint64_t pg_sz = sysconf(_SC_PAGESIZE);
+	char ring_name[RTE_RING_NAMESIZE];
+
+	/* Configuration checks */
+	if (!rte_is_power_of_2(queue_size)) {
+		ARK_BBDEV_LOG(ERR,
+			      "Configuration queue size"
+			      " must be power of two %u",
+			      queue_size);
+		return -EINVAL;
+	}
+
+	if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
+		ARK_BBDEV_LOG(ERR,
+			      "Error: Ark bbdev requires head room > %d bytes (%s)",
+			      ARK_RX_META_SIZE, __func__);
+		return -EINVAL;
+	}
+
+	/* Allocate the queue data structure. */
+	q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
+			RTE_CACHE_LINE_SIZE, queue_conf->socket);
+	if (q == NULL) {
+		ARK_BBDEV_LOG(ERR, "Failed to allocate queue memory");
+		return -ENOMEM;
+	}
+	bbdev->data->queues[q_id].queue_private = q;
+
+	/* RING */
+	snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u",
+		 bbdev->data->dev_id, q_id);
+	q->active_ops = rte_ring_create(ring_name,
+					queue_size,
+					queue_conf->socket,
+					RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (q->active_ops == NULL) {
+		ARK_BBDEV_LOG(ERR, "Failed to create ring");
+		goto free_all;
+	}
+
+	q->rx_queue_mask = queue_size - 1;
+	q->tx_queue_mask = (BB_ARK_TX_Q_FACTOR * queue_size) - 1;
+
+	/* Each mbuf requires 2 to 4 objects, factor by BB_ARK_TX_Q_FACTOR */
+	q->tx_meta_q =
+		rte_zmalloc_socket("Ark_bb_txqueue meta",
+				   queue_size * BB_ARK_TX_Q_FACTOR *
+				   sizeof(union ark_tx_meta),
+				   pg_sz,
+				   socket_id);
+
+	if (q->tx_meta_q == 0) {
+		ARK_BBDEV_LOG(ERR, "Failed to allocate "
+			      "queue memory in %s", __func__);
+		goto free_all;
+	}
+
+	q->ddm = RTE_PTR_ADD(ark_bb->ddm.v, q_id * ARK_DDM_QOFFSET);
+	q->tx_mpu = RTE_PTR_ADD(ark_bb->mputx.v, q_id * ARK_MPU_QOFFSET);
+
+	q->rx_paddress_q =
+		rte_zmalloc_socket("ark_bb_rx_paddress_q",
+				   queue_size * sizeof(rte_iova_t),
+				   pg_sz,
+				   socket_id);
+
+	if (q->rx_paddress_q == 0) {
+		ARK_BBDEV_LOG(ERR,
+			      "Failed to allocate queue memory in %s",
+			      __func__);
+		goto free_all;
+	}
+	q->udm = RTE_PTR_ADD(ark_bb->udm.v, q_id * ARK_UDM_QOFFSET);
+	q->rx_mpu = RTE_PTR_ADD(ark_bb->mpurx.v, q_id * ARK_MPU_QOFFSET);
+
+	/* Structure have been configured, set the hardware */
+	return ark_bb_hw_q_setup(bbdev, q_id, queue_size);
+
+free_all:
+	rte_free(q->tx_meta_q);
+	rte_free(q->rx_paddress_q);
+	rte_free(q);
+	return -EFAULT;
+}
+
+/* Release queue */
+static int
+ark_bb_q_release(struct rte_bbdev *bbdev, uint16_t q_id)
+{
+	struct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;
+
+	/* TODO Wait for ddm to send out all packets in flight,
+	 * Is this only called after q stop?
+	 */
+
+	ark_mpu_dump(q->rx_mpu, "rx_MPU release", q_id);
+	ark_mpu_dump(q->tx_mpu, "tx_MPU release", q_id);
+
+	rte_ring_free(q->active_ops);
+	rte_free(q->tx_meta_q);
+	rte_free(q->rx_paddress_q);
+	rte_free(q);
+	bbdev->data->queues[q_id].queue_private = NULL;
+
+	ARK_BBDEV_LOG(DEBUG, "released device queue %u:%u",
+		      bbdev->data->dev_id, q_id);
+	return 0;
+}
+
+static int
+ark_bbdev_start(struct rte_bbdev *bbdev)
+{
+	struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
+
+	ARK_BBDEV_LOG(DEBUG, "Starting device %u", bbdev->data->dev_id);
+	if (ark_bb->started)
+		return 0;
+
+	/* start UDM */
+	ark_udm_start(ark_bb->udm.v);
+
+	/* start DDM */
+	ark_ddm_start(ark_bb->ddm.v);
+
+	ark_bb->started = 1;
+
+	if (ark_bb->start_pg)
+		ark_pktchkr_run(ark_bb->pc);
+
+	if (ark_bb->start_pg) {
+		pthread_t thread;
+
+		/* Delay packet generator start allow the hardware to be ready
+		 * This is only used for sanity checking with internal generator
+		 */
+		if (pthread_create(&thread, NULL,
+				   ark_pktgen_delay_start, ark_bb->pg)) {
+			ARK_BBDEV_LOG(ERR, "Could not create pktgen "
+				    "starter thread");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static void
+ark_bbdev_stop(struct rte_bbdev *bbdev)
+{
+	struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
+	struct ark_mpu_t *mpu;
+	unsigned int i;
+	int status;
+
+	ARK_BBDEV_LOG(DEBUG, "Stopping device %u", bbdev->data->dev_id);
+
+	if (!ark_bb->started)
+		return;
+
+	/* Stop the packet generator */
+	if (ark_bb->start_pg)
+		ark_pktgen_pause(ark_bb->pg);
+
+	/* Stop DDM */
+	/* Wait up to 0.1 second.  each stop is up to 1000 * 10 useconds */
+	for (i = 0; i < 10; i++) {
+		status = ark_ddm_stop(ark_bb->ddm.v, 1);
+		if (status == 0)
+			break;
+	}
+	if (status || i != 0) {
+		ARK_BBDEV_LOG(ERR, "DDM stop anomaly. status:"
+			      " %d iter: %u. (%s)",
+			      status,
+			      i,
+			      __func__);
+		ark_ddm_dump(ark_bb->ddm.v, "Stop anomaly");
+
+		mpu = ark_bb->mputx.v;
+		for (i = 0; i < ark_bb->max_nb_queues; i++) {
+			ark_mpu_dump(mpu, "DDM failure dump", i);
+			mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+		}
+	}
+	ark_ddm_dump_stats(ark_bb->ddm.v, "bbdev stop");
+
+	/* STOP RX Side */
+	/* Stop UDM  multiple tries attempted */
+	for (i = 0; i < 10; i++) {
+		status = ark_udm_stop(ark_bb->udm.v, 1);
+		if (status == 0)
+			break;
+	}
+	if (status || i != 0) {
+		ARK_BBDEV_LOG(WARNING, "UDM stop anomaly. status %d iter: %u. (%s)",
+			      status, i, __func__);
+		ark_udm_dump(ark_bb->udm.v, "Stop anomaly");
+
+		mpu = ark_bb->mpurx.v;
+		for (i = 0; i < ark_bb->max_nb_queues; i++) {
+			ark_mpu_dump(mpu, "UDM Stop anomaly", i);
+			mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+		}
+	}
+
+	ark_udm_dump_stats(ark_bb->udm.v, "Post stop");
+	ark_udm_dump_perf(ark_bb->udm.v, "Post stop");
+
+	/* Stop the packet checker if it is running */
+	if (ark_bb->start_pg) {
+		ark_pktchkr_dump_stats(ark_bb->pc);
+		ark_pktchkr_stop(ark_bb->pc);
+	}
+}
+
+static int
+ark_bb_q_start(struct rte_bbdev *bbdev, uint16_t q_id)
+{
+	struct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;
+	ARK_BBDEV_LOG(DEBUG, "ark_bb_q start %u:%u", bbdev->data->dev_id, q_id);
+	ark_mpu_start(q->tx_mpu);
+	ark_mpu_start(q->rx_mpu);
+	return 0;
+}
+static int
+ark_bb_q_stop(struct rte_bbdev *bbdev, uint16_t q_id)
+{
+	struct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;
+	ARK_BBDEV_LOG(DEBUG, "ark_bb_q stop %u:%u", bbdev->data->dev_id, q_id);
+	ark_mpu_stop(q->tx_mpu);
+	ark_mpu_stop(q->rx_mpu);
+	return 0;
+}
+
+/* ************************************************************************* */
+/* Common function for all enqueue and dequeue ops */
+static inline void
+ark_bb_enqueue_desc_fill(struct ark_bbdev_queue *q,
+			 struct rte_mbuf *mbuf,
+			 uint16_t offset, /* Extra offset */
+			 uint8_t  flags,
+			 uint32_t *meta,
+			 uint8_t  meta_cnt /* 0, 1 or 2 */
+			 )
+{
+	union ark_tx_meta *tx_meta;
+	int32_t tx_idx;
+	uint8_t m;
+
+	/* Header */
+	tx_idx = q->tx_prod_index & q->tx_queue_mask;
+	tx_meta = &q->tx_meta_q[tx_idx];
+	tx_meta->data_len = rte_pktmbuf_data_len(mbuf) - offset;
+	tx_meta->flags = flags;
+	tx_meta->meta_cnt = meta_cnt;
+	tx_meta->user1 = *meta++;
+	q->tx_prod_index++;
+
+	for (m = 0; m < meta_cnt; m++) {
+		tx_idx = q->tx_prod_index & q->tx_queue_mask;
+		tx_meta = &q->tx_meta_q[tx_idx];
+		tx_meta->usermeta0 = *meta++;
+		tx_meta->usermeta1 = *meta++;
+		q->tx_prod_index++;
+	}
+
+	tx_idx = q->tx_prod_index & q->tx_queue_mask;
+	tx_meta = &q->tx_meta_q[tx_idx];
+	tx_meta->physaddr = rte_mbuf_data_iova(mbuf) + offset;
+	q->tx_prod_index++;
+}
+
+static inline void
+ark_bb_enqueue_segmented_pkt(struct ark_bbdev_queue *q,
+			     struct rte_mbuf *mbuf,
+			     uint16_t offset,
+			     uint32_t *meta, uint8_t meta_cnt)
+{
+	struct rte_mbuf *next;
+	uint8_t flags = ARK_DDM_SOP;
+
+	while (mbuf != NULL) {
+		next = mbuf->next;
+		flags |= (next == NULL) ? ARK_DDM_EOP : 0;
+
+		ark_bb_enqueue_desc_fill(q, mbuf, offset, flags,
+					 meta, meta_cnt);
+
+		flags &= ~ARK_DDM_SOP;	/* drop SOP flags */
+		meta_cnt = 0;
+		offset = 0;
+
+		mbuf = next;
+	}
+}
+
+static inline int
+ark_bb_enqueue_common(struct ark_bbdev_queue *q,
+		      struct rte_mbuf *m_in, struct rte_mbuf *m_out,
+		      uint16_t offset,
+		      uint32_t *meta, uint8_t meta_cnt)
+{
+	int32_t free_queue_space;
+	int32_t rx_idx;
+
+	/* TX side limit */
+	free_queue_space = q->tx_queue_mask -
+		(q->tx_prod_index - q->tx_free_index);
+	if (unlikely(free_queue_space < (2 + (2 * m_in->nb_segs))))
+		return 1;
+
+	/* RX side limit */
+	free_queue_space = q->rx_queue_mask -
+		(q->rx_seed_index - q->rx_cons_index);
+	if (unlikely(free_queue_space < m_out->nb_segs))
+		return 1;
+
+	if (unlikely(m_in->nb_segs > 1))
+		ark_bb_enqueue_segmented_pkt(q, m_in, offset, meta, meta_cnt);
+	else
+		ark_bb_enqueue_desc_fill(q, m_in, offset,
+					 ARK_DDM_SOP | ARK_DDM_EOP,
+					 meta, meta_cnt);
+
+	/* We assume that the return mubf has exactly enough segments for
+	 * return data, which is 2048 bytes per segment.
+	 */
+	do {
+		rx_idx = q->rx_seed_index & q->rx_queue_mask;
+		q->rx_paddress_q[rx_idx] = m_out->buf_iova;
+		q->rx_seed_index++;
+		m_out = m_out->next;
+	} while (m_out);
+
+	return 0;
+}
+
+static inline void
+ark_bb_enqueue_finalize(struct rte_bbdev_queue_data *q_data,
+			struct ark_bbdev_queue *q,
+			void **ops,
+			uint16_t nb_ops, uint16_t nb)
+{
+	/* BBDEV global stats */
+	/* These are not really errors, not sure why bbdev counts these. */
+	q_data->queue_stats.enqueue_err_count += nb_ops - nb;
+	q_data->queue_stats.enqueued_count += nb;
+
+	/* Notify HW that  */
+	if (unlikely(nb == 0))
+		return;
+
+	ark_mpu_set_producer(q->tx_mpu, q->tx_prod_index);
+	ark_mpu_set_producer(q->rx_mpu, q->rx_seed_index);
+
+	/* Queue info for dequeue-side processing */
+	rte_ring_enqueue_burst(q->active_ops,
+			       (void **)ops, nb, NULL);
+}
+
+static int
+ark_bb_dequeue_segmented(struct rte_mbuf *mbuf0,
+			 int32_t *prx_cons_index,
+			 uint16_t pkt_len
+			 )
+{
+	struct rte_mbuf *mbuf;
+	uint16_t data_len;
+	uint16_t remaining;
+	uint16_t segments = 1;
+
+	data_len = RTE_MIN(pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
+	remaining = pkt_len - data_len;
+
+	mbuf = mbuf0;
+	mbuf0->data_len = data_len;
+	while (remaining) {
+		segments += 1;
+		mbuf = mbuf->next;
+		if (unlikely(mbuf == 0)) {
+			ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with "
+				      "at least %d segments for dequeue "
+				      "of packet length %d",
+				      segments, pkt_len);
+			return 1;
+		}
+
+		data_len = RTE_MIN(remaining,
+				   RTE_MBUF_DEFAULT_DATAROOM);
+		remaining -= data_len;
+
+		mbuf->data_len = data_len;
+		*prx_cons_index += 1;
+	}
+
+	if (mbuf->next != 0) {
+		ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with "
+			      "at exactly %d segments for dequeue "
+			      "of packet length %d. Found %d "
+			      "segments",
+			      segments, pkt_len, mbuf0->nb_segs);
+		return 1;
+	}
+	return 0;
+}
+
+/* ************************************************************************* */
+/* LDPC Decode ops */
+static int16_t
+ark_bb_enqueue_ldpc_dec_one_op(struct ark_bbdev_queue *q,
+			       struct rte_bbdev_dec_op *this_op)
+{
+	struct rte_bbdev_op_ldpc_dec *ldpc_dec_op = &this_op->ldpc_dec;
+	struct rte_mbuf *m_in = ldpc_dec_op->input.data;
+	struct rte_mbuf *m_out = ldpc_dec_op->hard_output.data;
+	uint16_t offset = ldpc_dec_op->input.offset;
+	uint32_t meta[5] = {0};
+	uint8_t meta_cnt = 0;
+
+	/* User's meta move from bbdev op to Arkville HW */
+	if (ark_bb_user_enqueue_ldpc_dec(this_op, meta, &meta_cnt)) {
+		ARK_BBDEV_LOG(ERR, "%s failed", __func__);
+		return 1;
+	}
+
+	return ark_bb_enqueue_common(q, m_in, m_out, offset, meta, meta_cnt);
+}
+
+/* Enqueue LDPC Decode -- burst */
+static uint16_t
+ark_bb_enqueue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
+			    struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+	struct ark_bbdev_queue *q = q_data->queue_private;
+	unsigned int max_enq;
+	uint16_t nb;
+
+	max_enq = rte_ring_free_count(q->active_ops);
+	max_enq = RTE_MIN(max_enq, nb_ops);
+	for (nb = 0; nb < max_enq; nb++) {
+		if (ark_bb_enqueue_ldpc_dec_one_op(q, ops[nb]))
+			break;
+	}
+
+	ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
+	return nb;
+}
+
+/* ************************************************************************* */
+/* Dequeue LDPC Decode -- burst */
+static uint16_t
+ark_bb_dequeue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
+			    struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+	struct ark_bbdev_queue *q = q_data->queue_private;
+	struct rte_mbuf *mbuf;
+	struct rte_bbdev_dec_op *this_op;
+	struct ark_rx_meta *meta;
+	uint32_t *usermeta;
+
+	uint16_t nb = 0;
+	int32_t prod_index = q->rx_prod_index;
+	int32_t cons_index = q->rx_cons_index;
+
+	q->tx_free_index = q->tx_cons_index;
+
+	while ((prod_index - cons_index) > 0) {
+		if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
+			ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
+				      __func__);
+			q_data->queue_stats.dequeue_err_count += 1;
+			break;
+		}
+		ops[nb] = this_op;
+
+		mbuf = this_op->ldpc_dec.hard_output.data;
+
+		/* META DATA embedded in headroom */
+		meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+
+		mbuf->pkt_len = meta->pkt_len;
+		mbuf->data_len = meta->pkt_len;
+
+		if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
+			if (ark_bb_dequeue_segmented(mbuf, &cons_index,
+						     meta->pkt_len))
+				q_data->queue_stats.dequeue_err_count += 1;
+		} else if (mbuf->next != 0) {
+			ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
+				      "at exactly 1 segments for dequeue "
+				      "of packet length %d. Found %d "
+				      "segments",
+				      meta->pkt_len, mbuf->nb_segs);
+			q_data->queue_stats.dequeue_err_count += 1;
+		}
+
+		usermeta = meta->user_meta;
+		/* User's meta move from Arkville HW to bbdev OP */
+		ark_bb_user_dequeue_ldpc_dec(this_op, usermeta);
+		nb++;
+		cons_index++;
+		if (nb >= nb_ops)
+			break;
+	}
+
+	q->rx_cons_index = cons_index;
+
+	/* BBdev stats */
+	q_data->queue_stats.dequeued_count += nb;
+
+	return nb;
+}
+
+/**************************************************************************/
+/* Enqueue LDPC Encode */
+static int16_t
+ark_bb_enqueue_ldpc_enc_one_op(struct ark_bbdev_queue *q,
+			       struct rte_bbdev_enc_op *this_op)
+{
+	struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &this_op->ldpc_enc;
+	struct rte_mbuf *m_in = ldpc_enc_op->input.data;
+	struct rte_mbuf *m_out = ldpc_enc_op->output.data;
+	uint16_t offset = ldpc_enc_op->input.offset;
+	uint32_t meta[5] = {0};
+	uint8_t meta_cnt = 0;
+
+	/* User's meta move from bbdev op to Arkville HW */
+	if (ark_bb_user_enqueue_ldpc_enc(this_op, meta, &meta_cnt)) {
+		ARK_BBDEV_LOG(ERR, "%s failed", __func__);
+		return 1;
+	}
+
+	return ark_bb_enqueue_common(q, m_in, m_out, offset, meta, meta_cnt);
+}
+
+/* Enqueue LDPC Encode -- burst */
+static uint16_t
+ark_bb_enqueue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
+			    struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+	struct ark_bbdev_queue *q = q_data->queue_private;
+	unsigned int max_enq;
+	uint16_t nb;
+
+	max_enq = rte_ring_free_count(q->active_ops);
+	max_enq = RTE_MIN(max_enq, nb_ops);
+	for (nb = 0; nb < max_enq; nb++) {
+		if (ark_bb_enqueue_ldpc_enc_one_op(q, ops[nb]))
+			break;
+	}
+
+	ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
+	return nb;
+}
+
+/* Dequeue LDPC Encode -- burst */
+static uint16_t
+ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
+			    struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+	struct ark_bbdev_queue *q = q_data->queue_private;
+	struct rte_mbuf *mbuf;
+	struct rte_bbdev_enc_op *this_op;
+	struct ark_rx_meta *meta;
+	uint32_t *usermeta;
+
+	uint16_t nb = 0;
+	int32_t prod_index = q->rx_prod_index;
+	int32_t cons_index = q->rx_cons_index;
+
+	q->tx_free_index = q->tx_cons_index;
+
+	while ((prod_index - cons_index) > 0) {
+		if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
+			ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
+				      __func__);
+			q_data->queue_stats.dequeue_err_count += 1;
+			break;
+		}
+		ops[nb] = this_op;
+
+		mbuf = this_op->ldpc_enc.output.data;
+
+		/* META DATA embedded in headroom */
+		meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+
+		mbuf->pkt_len = meta->pkt_len;
+		mbuf->data_len = meta->pkt_len;
+		usermeta = meta->user_meta;
+
+		if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
+			if (ark_bb_dequeue_segmented(mbuf, &cons_index,
+						     meta->pkt_len))
+				q_data->queue_stats.dequeue_err_count += 1;
+		} else if (mbuf->next != 0) {
+			ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
+				      "at exactly 1 segments for dequeue "
+				      "of packet length %d. Found %d "
+				      "segments",
+				      meta->pkt_len, mbuf->nb_segs);
+			q_data->queue_stats.dequeue_err_count += 1;
+		}
+
+		/* User's meta move from Arkville HW to bbdev OP */
+		ark_bb_user_dequeue_ldpc_enc(this_op, usermeta);
+		nb++;
+		cons_index++;
+		if (nb >= nb_ops)
+			break;
+	}
+
+	q->rx_cons_index = cons_index;
+
+	/* BBdev stats */
+	q_data->queue_stats.dequeued_count += nb;
+
+	return nb;
+}
+
+/**************************************************************************/
+/*
+ *Initial device hardware configuration when device is opened
+ * setup the DDM, and UDM; called once per PCIE device
+ */
+static int
+ark_bb_config_device(struct ark_bbdevice *ark_bb)
+{
+	uint16_t num_q, i;
+	struct ark_mpu_t *mpu;
+
+	/*
+	 * Make sure that the packet director, generator and checker are in a
+	 * known state
+	 */
+	ark_bb->start_pg = 0;
+	ark_bb->pg = ark_pktgen_init(ark_bb->pktgen.v, 0, 1);
+	if (ark_bb->pg == NULL)
+		return -1;
+	ark_pktgen_reset(ark_bb->pg);
+	ark_bb->pc = ark_pktchkr_init(ark_bb->pktchkr.v, 0, 1);
+	if (ark_bb->pc == NULL)
+		return -1;
+	ark_pktchkr_stop(ark_bb->pc);
+	ark_bb->pd = ark_pktdir_init(ark_bb->pktdir.v);
+	if (ark_bb->pd == NULL)
+		return -1;
+
+	/* Verify HW */
+	if (ark_udm_verify(ark_bb->udm.v))
+		return -1;
+	if (ark_ddm_verify(ark_bb->ddm.v))
+		return -1;
+
+	/* UDM */
+	if (ark_udm_reset(ark_bb->udm.v)) {
+		ARK_BBDEV_LOG(ERR, "Unable to stop and reset UDM");
+		return -1;
+	}
+	/* Keep in reset until the MPU are cleared */
+
+	/* MPU reset */
+	mpu = ark_bb->mpurx.v;
+	num_q = ark_api_num_queues(mpu);
+	ark_bb->max_nb_queues = num_q;
+
+	for (i = 0; i < num_q; i++) {
+		ark_mpu_reset(mpu);
+		mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+	}
+
+	/* Only 1 queue supported in the udm */
+	ark_udm_stop(ark_bb->udm.v, 0);
+	ark_udm_configure(ark_bb->udm.v,
+			  RTE_PKTMBUF_HEADROOM,
+			  bbdev->data->queues[q_id]->dataroom,
+			  ARK_RX_WRITE_TIME_NS);
+
+
+	ark_udm_stats_reset(ark_bb->udm.v);
+	ark_udm_stop(ark_bb->udm.v, 0);
+
+	/* TX -- DDM */
+	if (ark_ddm_stop(ark_bb->ddm.v, 1))
+		ARK_BBDEV_LOG(ERR, "Unable to stop DDM");
+
+	mpu = ark_bb->mputx.v;
+	num_q = ark_api_num_queues(mpu);
+	for (i = 0; i < num_q; i++) {
+		ark_mpu_reset(mpu);
+		mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+	}
+
+	ark_ddm_reset(ark_bb->ddm.v);
+	ark_ddm_stats_reset(ark_bb->ddm.v);
+
+	ark_ddm_stop(ark_bb->ddm.v, 0);
+	if (ark_bb->rqpacing)
+		ark_rqp_stats_reset(ark_bb->rqpacing);
+
+	ARK_BBDEV_LOG(INFO, "packet director set to 0x%x", ark_bb->pkt_dir_v);
+	ark_pktdir_setup(ark_bb->pd, ark_bb->pkt_dir_v);
+
+	if (ark_bb->pkt_gen_args[0]) {
+		ARK_BBDEV_LOG(INFO, "Setting up the packet generator");
+		ark_pktgen_parse(ark_bb->pkt_gen_args);
+		ark_pktgen_reset(ark_bb->pg);
+		ark_pktgen_setup(ark_bb->pg);
+		ark_bb->start_pg = 1;
+	}
+
+	return 0;
+}
+
+static int
+ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)
+{
+	struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
+	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);
+	bool rqpacing = false;
+	int p;
+
+	RTE_SET_USED(pci_drv);
+
+	ark_bb->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
+	ark_bb->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
+
+	ark_bb->sysctrl.v  = (void *)&ark_bb->bar0[ARK_SYSCTRL_BASE];
+	ark_bb->mpurx.v  = (void *)&ark_bb->bar0[ARK_MPU_RX_BASE];
+	ark_bb->udm.v  = (void *)&ark_bb->bar0[ARK_UDM_BASE];
+	ark_bb->mputx.v  = (void *)&ark_bb->bar0[ARK_MPU_TX_BASE];
+	ark_bb->ddm.v  = (void *)&ark_bb->bar0[ARK_DDM_BASE];
+	ark_bb->pktdir.v  = (void *)&ark_bb->bar0[ARK_PKTDIR_BASE];
+	ark_bb->pktgen.v  = (void *)&ark_bb->bar0[ARK_PKTGEN_BASE];
+	ark_bb->pktchkr.v  = (void *)&ark_bb->bar0[ARK_PKTCHKR_BASE];
+
+	p = 0;
+	while (ark_device_caps[p].device_id != 0) {
+		if (pci_dev->id.device_id == ark_device_caps[p].device_id) {
+			rqpacing = ark_device_caps[p].caps.rqpacing;
+			break;
+		}
+		p++;
+	}
+
+	if (rqpacing)
+		ark_bb->rqpacing =
+			(struct ark_rqpace_t *)(ark_bb->bar0 + ARK_RCPACING_BASE);
+	else
+		ark_bb->rqpacing = NULL;
+
+	ark_bb->started = 0;
+
+	ARK_BBDEV_LOG(INFO, "Sys Ctrl Const = 0x%x  HW Commit_ID: %08x",
+		      ark_bb->sysctrl.t32[4],
+		      rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
+	ARK_BBDEV_LOG(INFO, "Arkville HW Commit_ID: %08x",
+		    rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
+
+	/* If HW sanity test fails, return an error */
+	if (ark_bb->sysctrl.t32[4] != 0xcafef00d) {
+		ARK_BBDEV_LOG(ERR,
+			      "HW Sanity test has failed, expected constant"
+			      " 0x%x, read 0x%x (%s)",
+			      0xcafef00d,
+			      ark_bb->sysctrl.t32[4], __func__);
+		return -1;
+	}
+
+	return ark_bb_config_device(ark_bb);
+}
+
+static int
+ark_bbdev_uninit(struct rte_bbdev *bbdev)
+{
+	struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	ark_pktgen_uninit(ark_bb->pg);
+	ark_pktchkr_uninit(ark_bb->pc);
+
+	return 0;
+}
+
+static int
+ark_bbdev_probe(struct rte_pci_driver *pci_drv,
+		struct rte_pci_device *pci_dev)
+{
+	struct rte_bbdev *bbdev = NULL;
+	char dev_name[RTE_BBDEV_NAME_MAX_LEN];
+	struct ark_bbdevice *ark_bb;
+
+	if (pci_dev == NULL)
+		return -EINVAL;
+
+	rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
+
+	/* Allocate memory to be used privately by drivers */
+	bbdev = rte_bbdev_allocate(pci_dev->device.name);
+	if (bbdev == NULL)
+		return -ENODEV;
+
+	/* allocate device private memory */
+	bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
+			sizeof(struct ark_bbdevice),
+			RTE_CACHE_LINE_SIZE,
+			pci_dev->device.numa_node);
+
+	if (bbdev->data->dev_private == NULL) {
+		ARK_BBDEV_LOG(CRIT,
+				"Allocate of %zu bytes for device \"%s\" failed",
+				sizeof(struct ark_bbdevice), dev_name);
+				rte_bbdev_release(bbdev);
+			return -ENOMEM;
+	}
+	ark_bb = bbdev->data->dev_private;
+	/* Initialize ark_bb */
+	ark_bb->pkt_dir_v = 0x00110110;
+
+	/* Fill HW specific part of device structure */
+	bbdev->device = &pci_dev->device;
+	bbdev->intr_handle = NULL;
+	bbdev->data->socket_id = pci_dev->device.numa_node;
+	bbdev->dev_ops = &ark_bbdev_pmd_ops;
+	if (pci_dev->device.devargs)
+		parse_ark_bbdev_params(pci_dev->device.devargs->args, ark_bb);
+
+
+	/* Device specific initialization */
+	if (ark_bbdev_init(bbdev, pci_drv))
+		return -EIO;
+	if (ark_bbdev_start(bbdev))
+		return -EIO;
+
+	/* Core operations LDPC encode amd decode */
+	bbdev->enqueue_ldpc_enc_ops = ark_bb_enqueue_ldpc_enc_ops;
+	bbdev->dequeue_ldpc_enc_ops = ark_bb_dequeue_ldpc_enc_ops;
+	bbdev->enqueue_ldpc_dec_ops = ark_bb_enqueue_ldpc_dec_ops;
+	bbdev->dequeue_ldpc_dec_ops = ark_bb_dequeue_ldpc_dec_ops;
+
+	ARK_BBDEV_LOG(DEBUG, "bbdev id = %u [%s]",
+		      bbdev->data->dev_id, dev_name);
+
+	return 0;
+}
+
+/* Uninitialize device */
+static int
+ark_bbdev_remove(struct rte_pci_device *pci_dev)
+{
+	struct rte_bbdev *bbdev;
+	int ret;
+
+	if (pci_dev == NULL)
+		return -EINVAL;
+
+	/* Find device */
+	bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
+	if (bbdev == NULL) {
+		ARK_BBDEV_LOG(CRIT,
+				"Couldn't find HW dev \"%s\" to Uninitialize it",
+				pci_dev->device.name);
+		return -ENODEV;
+	}
+
+	/* Arkville device close */
+	ark_bbdev_uninit(bbdev);
+	rte_free(bbdev->data->dev_private);
+
+	/* Close device */
+	ret = rte_bbdev_close(bbdev->data->dev_id);
+	if (ret < 0)
+		ARK_BBDEV_LOG(ERR,
+				"Device %i failed to close during remove: %i",
+				bbdev->data->dev_id, ret);
+
+	return rte_bbdev_release(bbdev);
+}
+
+/* Operation for the PMD */
+static const struct rte_bbdev_ops ark_bbdev_pmd_ops = {
+	.info_get = ark_bbdev_info_get,
+	.start = ark_bbdev_start,
+	.stop = ark_bbdev_stop,
+	.queue_setup = ark_bb_q_setup,
+	.queue_release = ark_bb_q_release,
+	.queue_start = ark_bb_q_start,
+	.queue_stop = ark_bb_q_stop,
+};
+
+
+
+static struct rte_pci_driver ark_bbdev_pmd_drv = {
+	.probe = ark_bbdev_probe,
+	.remove = ark_bbdev_remove,
+	.id_table = pci_id_ark,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING
+};
+
+RTE_PMD_REGISTER_PCI(DRIVER_NAME, ark_bbdev_pmd_drv);
+RTE_PMD_REGISTER_PCI_TABLE(DRIVER_NAME, pci_id_ark);
+RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
+			      ARK_BBDEV_PKTGEN_ARG "=<filename> "
+			      ARK_BBDEV_PKTCHKR_ARG "=<filename> "
+			      ARK_BBDEV_PKTDIR_ARG "=<bitmap>"
+			      );
diff --git a/drivers/baseband/ark/ark_bbdev_common.c b/drivers/baseband/ark/ark_bbdev_common.c
new file mode 100644
index 0000000000..6ef0f43654
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev_common.c
@@ -0,0 +1,125 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#include <string.h>
+
+#include <rte_kvargs.h>
+#include <rte_log.h>
+
+#include "ark_bbdev_common.h"
+
+static const char * const ark_bbdev_valid_params[] = {
+	ARK_BBDEV_PKTDIR_ARG,
+	ARK_BBDEV_PKTGEN_ARG,
+	ARK_BBDEV_PKTCHKR_ARG,
+	NULL
+};
+
+/* Parse 16-bit integer from string argument */
+static inline int
+parse_u16_arg(const char *key, const char *value, void *extra_args)
+{
+	uint16_t *u16 = extra_args;
+	unsigned int long result;
+
+	if ((value == NULL) || (extra_args == NULL))
+		return -EINVAL;
+	errno = 0;
+	result = strtoul(value, NULL, 0);
+	if ((result >= (1 << 16)) || (errno != 0)) {
+		ARK_BBDEV_LOG(ERR, "Invalid value %" PRIu64 " for %s", result, key);
+		return -ERANGE;
+	}
+	*u16 = (uint16_t)result;
+	return 0;
+}
+
+static inline int
+process_pktdir_arg(const char *key, const char *value,
+		   void *extra_args)
+{
+	uint32_t *u32 = extra_args;
+	ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
+
+	*u32 = strtol(value, NULL, 0);
+	ARK_BBDEV_LOG(DEBUG, "pkt_dir_v = 0x%x", *u32);
+	return 0;
+}
+
+static inline int
+process_file_args(const char *key, const char *value, void *extra_args)
+{
+	char *args = (char *)extra_args;
+	ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
+
+	/* Open the configuration file */
+	FILE *file = fopen(value, "r");
+	char line[ARK_MAX_ARG_LEN];
+	int  size = 0;
+	int first = 1;
+
+	if (file == NULL) {
+		ARK_BBDEV_LOG(ERR, "Unable to open config file %s",
+			      value);
+		return -1;
+	}
+
+	while (fgets(line, sizeof(line), file)) {
+		size += strlen(line);
+		if (size >= ARK_MAX_ARG_LEN) {
+			ARK_BBDEV_LOG(ERR, "Unable to parse file %s args, "
+				      "parameter list is too long", value);
+			fclose(file);
+			return -1;
+		}
+		if (first) {
+			strncpy(args, line, ARK_MAX_ARG_LEN);
+			first = 0;
+		} else {
+			strncat(args, line, ARK_MAX_ARG_LEN);
+		}
+	}
+	ARK_BBDEV_LOG(DEBUG, "file = %s", args);
+	fclose(file);
+	return 0;
+}
+
+
+/* Parse parameters used to create device */
+int
+parse_ark_bbdev_params(const char *input_args,
+		       struct ark_bbdevice *ark_bb)
+{
+	struct rte_kvargs *kvlist = NULL;
+	int ret = 0;
+
+	if (ark_bb == NULL)
+		return -EINVAL;
+	if (input_args == NULL)
+		return ret;
+
+	kvlist = rte_kvargs_parse(input_args, ark_bbdev_valid_params);
+	if (kvlist == NULL)
+		return -EFAULT;
+
+	ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTDIR_ARG,
+				  &process_pktdir_arg, &ark_bb->pkt_dir_v);
+	if (ret < 0)
+		goto exit;
+
+	ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTGEN_ARG,
+				 &process_file_args, &ark_bb->pkt_gen_args);
+	if (ret < 0)
+		goto exit;
+
+	ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTCHKR_ARG,
+				 &process_file_args, &ark_bb->pkt_chkr_args);
+	if (ret < 0)
+		goto exit;
+
+ exit:
+	if (kvlist)
+		rte_kvargs_free(kvlist);
+	return ret;
+}
diff --git a/drivers/baseband/ark/ark_bbdev_common.h b/drivers/baseband/ark/ark_bbdev_common.h
new file mode 100644
index 0000000000..670e7e86d6
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev_common.h
@@ -0,0 +1,92 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#ifndef _ARK_BBDEV_COMMON_H_
+#define _ARK_BBDEV_COMMON_H_
+
+#include "ark_pktchkr.h"
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+
+#define ARK_MAX_ARG_LEN 256
+
+/* Acceptable params for ark BBDEV devices */
+/*
+ * The packet generator is a functional block used to generate packet
+ * patterns for testing.  It is not intended for nominal use.
+ */
+#define ARK_BBDEV_PKTGEN_ARG "Pkt_gen"
+
+/*
+ * The packet checker is a functional block used to verify packet
+ * patterns for testing.  It is not intended for nominal use.
+ */
+#define ARK_BBDEV_PKTCHKR_ARG "Pkt_chkr"
+
+/*
+ * The packet director is used to select the internal ingress and
+ * egress packets paths during testing.  It is not intended for
+ * nominal use.
+ */
+#define ARK_BBDEV_PKTDIR_ARG "Pkt_dir"
+
+
+#define def_ptr(type, name) \
+	union type {		   \
+		uint64_t *t64;	   \
+		uint32_t *t32;	   \
+		uint16_t *t16;	   \
+		uint8_t  *t8;	   \
+		void     *v;	   \
+	} name
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+struct ark_bbdevice {
+	/* Our Bar 0 */
+	uint8_t *bar0;
+
+	/* Application Bar needed for extensions */
+	uint8_t *a_bar;
+
+	/* Arkville hardware block offsets */
+	def_ptr(sys_ctrl, sysctrl);
+	def_ptr(pkt_gen, pktgen);
+	def_ptr(mpu_rx, mpurx);
+	def_ptr(UDM, udm);
+	def_ptr(mpu_tx, mputx);
+	def_ptr(DDM, ddm);
+	def_ptr(pkt_dir, pktdir);
+	def_ptr(pkt_chkr, pktchkr);
+	struct ark_rqpace_t *rqpacing;
+
+	/* Pointers to packet generator and checker */
+	int start_pg;
+	ark_pkt_gen_t pg;
+	ark_pkt_chkr_t pc;
+	ark_pkt_dir_t pd;
+
+	/* Packet generator/checker args */
+	char pkt_gen_args[ARK_MAX_ARG_LEN];
+	char pkt_chkr_args[ARK_MAX_ARG_LEN];
+	uint32_t pkt_dir_v;
+
+	int started;
+	unsigned int max_nb_queues;  /**< Max number of queues */
+
+};
+
+
+/* Log message for PMD */
+extern int ark_bbdev_logtype;
+
+/* Helper macro for logging */
+#define ARK_BBDEV_LOG(level, fmt, ...) \
+	rte_log(RTE_LOG_ ## level, ark_bbdev_logtype, \
+		"ARK_BBD: " fmt "\n", ##__VA_ARGS__)
+
+int parse_ark_bbdev_params(const char *argv, struct ark_bbdevice *dev);
+
+#endif
diff --git a/drivers/baseband/ark/ark_bbdev_custom.c b/drivers/baseband/ark/ark_bbdev_custom.c
new file mode 100644
index 0000000000..6b1553abe1
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev_custom.c
@@ -0,0 +1,201 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#include <rte_bbdev.h>
+#include <rte_bbdev_pmd.h>
+
+#include <rte_mbuf.h>
+#include <rte_hexdump.h>	/* For debug */
+
+
+#include "ark_bbdev_common.h"
+#include "ark_bbdev_custom.h"
+
+/* It is expected that functions in this file will be modified based on
+ * specifics of the FPGA hardware beyond the core Arkville
+ * components.
+ */
+
+/* bytyes must be range of 0 to 20 */
+static inline
+uint8_t ark_bb_cvt_bytes_meta_cnt(size_t bytes)
+{
+	return (bytes + 3) / 8;
+}
+
+void
+ark_bbdev_info_get(struct rte_bbdev *dev,
+		   struct rte_bbdev_driver_info *dev_info)
+{
+	struct ark_bbdevice *ark_bb =  dev->data->dev_private;
+
+	static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
+		{
+			.type = RTE_BBDEV_OP_LDPC_DEC,
+			.cap.ldpc_dec = {
+				.capability_flags =
+					RTE_BBDEV_LDPC_CRC_24B_ATTACH |
+					RTE_BBDEV_LDPC_RATE_MATCH,
+				.num_buffers_src =
+					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+				.num_buffers_hard_out =
+					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
+			}
+		},
+		{
+			.type = RTE_BBDEV_OP_LDPC_ENC,
+			.cap.ldpc_enc = {
+				.capability_flags =
+					RTE_BBDEV_LDPC_CRC_24B_ATTACH |
+					RTE_BBDEV_LDPC_RATE_MATCH,
+				.num_buffers_src =
+					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+				.num_buffers_dst =
+					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
+			}
+		},
+		RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
+	};
+
+	static struct rte_bbdev_queue_conf default_queue_conf = {
+		.queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
+	};
+
+	default_queue_conf.socket = dev->data->socket_id;
+
+	dev_info->driver_name = RTE_STR(DRIVER_NAME);
+	dev_info->max_num_queues = ark_bb->max_nb_queues;
+	dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
+	dev_info->hardware_accelerated = true;
+	dev_info->max_dl_queue_priority = 0;
+	dev_info->max_ul_queue_priority = 0;
+	dev_info->default_queue_conf = default_queue_conf;
+	dev_info->capabilities = bbdev_capabilities;
+	dev_info->cpu_flag_reqs = NULL;
+	dev_info->min_alignment = 4;
+
+}
+
+/* Structure defining layout of the ldpc command struct */
+struct ark_bb_ldpc_enc_meta {
+	uint16_t header;
+	uint8_t rv_index:2,
+		basegraph:1,
+		code_block_mode:1,
+		rfu_71_68:4;
+
+	uint8_t q_m;
+	uint32_t e_ea;
+	uint32_t eb;
+	uint8_t c;
+	uint8_t cab;
+	uint16_t n_cb;
+	uint16_t pad;
+	uint16_t trailer;
+} __rte_packed;
+
+/* The size must be less then 20 Bytes */
+static_assert(sizeof(struct ark_bb_ldpc_enc_meta) <= 20, "struct size");
+
+/* Custom operation on equeue ldpc operation  */
+/* Do these function need queue number? */
+/* Maximum of 20 bytes */
+int
+ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
+			  uint32_t *meta, uint8_t *meta_cnt)
+{
+	struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &enc_op->ldpc_enc;
+	struct ark_bb_ldpc_enc_meta *src = (struct ark_bb_ldpc_enc_meta *)meta;
+
+	src->header = 0x4321;	/* For testings */
+	src->trailer = 0xFEDC;
+
+	src->rv_index = ldpc_enc_op->rv_index;
+	src->basegraph = ldpc_enc_op->basegraph;
+	src->code_block_mode = ldpc_enc_op->code_block_mode;
+
+	src->q_m = ldpc_enc_op->q_m;
+	src->e_ea = 0xABCD;
+	src->eb = ldpc_enc_op->tb_params.eb;
+	src->c = ldpc_enc_op->tb_params.c;
+	src->cab = ldpc_enc_op->tb_params.cab;
+
+	src->n_cb = 0;
+
+	meta[0] = 0x11111110;
+	meta[1] = 0x22222220;
+	meta[2] = 0x33333330;
+	meta[3] = 0x44444440;
+	meta[4] = 0x55555550;
+
+	*meta_cnt = ark_bb_cvt_bytes_meta_cnt(
+			sizeof(struct ark_bb_ldpc_enc_meta));
+	return 0;
+}
+
+/* Custom operation on dequeue ldpc operation  */
+int
+ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
+			     const uint32_t *usermeta)
+{
+	static int dump;	/* = 0 */
+	/* Just compare with what was sent? */
+	uint32_t meta_in[5] = {0};
+	uint8_t  meta_cnt;
+
+	ark_bb_user_enqueue_ldpc_enc(enc_op, meta_in, &meta_cnt);
+	if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
+		fprintf(stderr,
+			"------------------------------------------\n");
+		rte_hexdump(stdout, "meta difference for lpdc_enc IN",
+			    meta_in, 20);
+		rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
+			    usermeta, 20);
+	} else if (dump) {
+		rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
+		dump--;
+	}
+
+	return 0;
+}
+
+
+/* Turbo op call backs for user meta data */
+int ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
+				 uint32_t *meta, uint8_t *meta_cnt)
+{
+	RTE_SET_USED(enc_op);
+	meta[0] = 0xF1111110;
+	meta[1] = 0xF2222220;
+	meta[2] = 0xF3333330;
+	meta[3] = 0xF4444440;
+	meta[4] = 0xF5555550;
+
+	*meta_cnt = ark_bb_cvt_bytes_meta_cnt(20);
+	return 0;
+}
+
+int ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
+				 const uint32_t *usermeta)
+{
+	RTE_SET_USED(enc_op);
+	static int dump;	/* = 0 */
+	/* Just compare with what was sent? */
+	uint32_t meta_in[5] = {0};
+	uint8_t  meta_cnt;
+
+	ark_bb_user_enqueue_ldpc_dec(enc_op, meta_in, &meta_cnt);
+	if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
+		fprintf(stderr,
+			"------------------------------------------\n");
+		rte_hexdump(stdout, "meta difference for lpdc_enc IN",
+			    meta_in, 20);
+		rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
+			    usermeta, 20);
+	} else if (dump) {
+		rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
+		dump--;
+	}
+	return 0;
+}
diff --git a/drivers/baseband/ark/ark_bbdev_custom.h b/drivers/baseband/ark/ark_bbdev_custom.h
new file mode 100644
index 0000000000..32a2ef6bb6
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev_custom.h
@@ -0,0 +1,30 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#ifndef _ARK_BBDEV_CUSTOM_H_
+#define _ARK_BBDEV_CUSTOM_H_
+
+#include <stdint.h>
+
+/* Forward declarations */
+struct rte_bbdev;
+struct rte_bbdev_driver_info;
+struct rte_bbdev_enc_op;
+struct rte_bbdev_dec_op;
+struct rte_mbuf;
+
+void ark_bbdev_info_get(struct rte_bbdev *dev,
+			struct rte_bbdev_driver_info *dev_info);
+
+int ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
+				 uint32_t *meta, uint8_t *meta_cnt);
+int ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
+				 const uint32_t *usermeta);
+
+int ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
+				 uint32_t *meta, uint8_t *meta_cnt);
+int ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
+				 const uint32_t *usermeta);
+
+#endif
diff --git a/drivers/baseband/ark/meson.build b/drivers/baseband/ark/meson.build
new file mode 100644
index 0000000000..b876f05c6e
--- /dev/null
+++ b/drivers/baseband/ark/meson.build
@@ -0,0 +1,11 @@ 
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+deps += ['common_ark', 'bbdev', 'bus_pci', 'pci', 'ring']
+sources = files(
+	'ark_bbdev.c',
+	'ark_bbdev_common.c',
+	'ark_bbdev_custom.c'
+	)
+
+includes += include_directories('../../common/ark')
diff --git a/drivers/baseband/ark/version.map b/drivers/baseband/ark/version.map
new file mode 100644
index 0000000000..4a76d1d52d
--- /dev/null
+++ b/drivers/baseband/ark/version.map
@@ -0,0 +1,3 @@ 
+DPDK_21 {
+	local: *;
+};