get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/119176/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 119176,
    "url": "http://patchwork.dpdk.org/api/patches/119176/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221026194613.1008232-10-john.miller@atomicrules.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221026194613.1008232-10-john.miller@atomicrules.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221026194613.1008232-10-john.miller@atomicrules.com",
    "date": "2022-10-26T19:46:09",
    "name": "[10/14] baseband/ark: introduce ark baseband driver",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "33717e5f01531ff2b3126178705f50c9a920cb95",
    "submitter": {
        "id": 719,
        "url": "http://patchwork.dpdk.org/api/people/719/?format=api",
        "name": "John Miller",
        "email": "john.miller@atomicrules.com"
    },
    "delegate": {
        "id": 2642,
        "url": "http://patchwork.dpdk.org/api/users/2642/?format=api",
        "username": "mcoquelin",
        "first_name": "Maxime",
        "last_name": "Coquelin",
        "email": "maxime.coquelin@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221026194613.1008232-10-john.miller@atomicrules.com/mbox/",
    "series": [
        {
            "id": 25445,
            "url": "http://patchwork.dpdk.org/api/series/25445/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25445",
            "date": "2022-10-26T19:46:00",
            "name": "[01/14] doc/guides/bbdevs: add ark baseband device documentation",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/25445/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/119176/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/119176/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 408F9A04FD;\n\tWed, 26 Oct 2022 21:47:27 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C156E42BB9;\n\tWed, 26 Oct 2022 21:46:48 +0200 (CEST)",
            "from mail-il1-f181.google.com (mail-il1-f181.google.com\n [209.85.166.181])\n by mails.dpdk.org (Postfix) with ESMTP id A5A814282B\n for <dev@dpdk.org>; Wed, 26 Oct 2022 21:46:41 +0200 (CEST)",
            "by mail-il1-f181.google.com with SMTP id l6so9479283ilq.3\n for <dev@dpdk.org>; Wed, 26 Oct 2022 12:46:41 -0700 (PDT)",
            "from john-System-Product-Name.tds\n (h64-35-205-155.cntcnh.broadband.dynamic.tds.net. [64.35.205.155])\n by smtp.googlemail.com with ESMTPSA id\n m1-20020a056e020de100b002faecf63c48sm2336169ilj.79.2022.10.26.12.46.39\n (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);\n Wed, 26 Oct 2022 12:46:40 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=atomicrules-com.20210112.gappssmtp.com; s=20210112;\n h=content-transfer-encoding:mime-version:references:in-reply-to\n :message-id:date:subject:cc:to:from:from:to:cc:subject:date\n :message-id:reply-to;\n bh=LDjg0Xq2Lfa6yF6VfR3IiOsN01jatXdzBtyTOUtUf38=;\n b=AAZXd+9kX0p9TtrCXwa1noR+dU6j9NZpP0lCa99y72k4aiVnBlP6GXrvqBMiwJGOC/\n wlMkrtgZeKTaQ5J/josBYUNjwVZUZYw7B8Pzsw18w9JqhcPT68EDTSnNncCVmymk6mgI\n RGgKyOxkHSbOrqSrIXlJyBcSwauLNegiGAYO5lED4mqSpL6mB3bC1JSW/HwdzcnX1mnU\n rRDNdKTgsF8DdmCAaGJ3vworaMiQW8EaPHoTOQUb8kS9Uejcu7WNfUMR590h+fNxDRjz\n TyBuT627CIXkY3SNlXoV6Mrgl9+XVBHSbLhXu2kM4GEIx5cRarWQ9a7hJHA3mhlHsl3q\n cKsg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20210112;\n h=content-transfer-encoding:mime-version:references:in-reply-to\n :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc\n :subject:date:message-id:reply-to;\n bh=LDjg0Xq2Lfa6yF6VfR3IiOsN01jatXdzBtyTOUtUf38=;\n b=WMtUSPom4h7zlV1OsbgaPvQHO9UhVjiqn/Q1Q2kRPpouFvgiC9UMZy2A6zv33mCRtj\n ZPGZR/ez8mSSl/BDRNYbad2DPBcnORS8EaUKaYmd88nhpVI27jMdpGOgGPDu3UDmhFxY\n pte6+vYUA0Ys58d8lDtpAseyY2fo3rysiDb7aTsE7sDeqlRdBKlXHrIkdM2M+tXG6dbs\n ojjeUNI9UUX83uoq8w1CXKhhYR0AJS19KL4YyQYQa67tCMwieZUOG0uRbmGgGtO+QF7f\n 2tTfMSCZBnhUtyV62gCYU4Q01v6LWKWNxMEQNeHqyFgoWZc/nKuSqov4G1C5aJpdfRT2\n PycA==",
        "X-Gm-Message-State": "ACrzQf1i60j3lj5waJMv9NeKSEkbWF+7p/f+ZvEopGgapqMDmjQlun+E\n A5px5Ec76E2mZnVaoKkJK6DqYM5uHGtkMA==",
        "X-Google-Smtp-Source": "\n AMsMyM7sMFRzdZQ4BWFXEH16qEv3UDbouMRzb+oyEBCRDN/nKcS6SkeqQ96InhZcjP91hY8ofCD7Lw==",
        "X-Received": "by 2002:a05:6e02:12e1:b0:2fc:2163:5077 with SMTP id\n l1-20020a056e0212e100b002fc21635077mr28709859iln.105.1666813600796;\n Wed, 26 Oct 2022 12:46:40 -0700 (PDT)",
        "From": "John Miller <john.miller@atomicrules.com>",
        "To": "nicolas.chautru@intel.com",
        "Cc": "dev@dpdk.org, ed.czeck@atomicrules.com, shepard.siegel@atomicrules.com,\n John Miller <john.miller@atomicrules.com>",
        "Subject": "[PATCH 10/14] baseband/ark: introduce ark baseband driver",
        "Date": "Wed, 26 Oct 2022 15:46:09 -0400",
        "Message-Id": "<20221026194613.1008232-10-john.miller@atomicrules.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20221026194613.1008232-1-john.miller@atomicrules.com>",
        "References": "<20221026194613.1008232-1-john.miller@atomicrules.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This patch introduces the Arkville baseband device driver.\n\nSigned-off-by: John Miller <john.miller@atomicrules.com>\n---\n drivers/baseband/ark/ark_bbdev.c | 1127 ++++++++++++++++++++++++++++++\n drivers/baseband/ark/ark_bbext.h |  163 +++++\n 2 files changed, 1290 insertions(+)\n create mode 100644 drivers/baseband/ark/ark_bbdev.c\n create mode 100644 drivers/baseband/ark/ark_bbext.h",
    "diff": "diff --git a/drivers/baseband/ark/ark_bbdev.c b/drivers/baseband/ark/ark_bbdev.c\nnew file mode 100644\nindex 0000000000..8736d170d1\n--- /dev/null\n+++ b/drivers/baseband/ark/ark_bbdev.c\n@@ -0,0 +1,1127 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2016-2021 Atomic Rules LLC\n+ */\n+\n+#include <sys/stat.h>\n+#include <dlfcn.h>\n+#include <unistd.h>\n+\n+#include <rte_common.h>\n+#include <rte_pci.h>\n+#include <bus_pci_driver.h>\n+#include <rte_bbdev.h>\n+#include <rte_bbdev_pmd.h>\n+#include <rte_bus_pci.h>\n+#include <rte_devargs.h>\n+#include <rte_malloc.h>\n+#include <rte_ring.h>\n+\n+#include \"ark_common.h\"\n+#include \"ark_bbdev_common.h\"\n+#include \"ark_bbdev_custom.h\"\n+#include \"ark_ddm.h\"\n+#include \"ark_mpu.h\"\n+#include \"ark_rqp.h\"\n+#include \"ark_udm.h\"\n+#include \"ark_bbext.h\"\n+\n+#define DRIVER_NAME baseband_ark\n+\n+#define ARK_SYSCTRL_BASE  0x0\n+#define ARK_PKTGEN_BASE   0x10000\n+#define ARK_MPU_RX_BASE   0x20000\n+#define ARK_UDM_BASE      0x30000\n+#define ARK_MPU_TX_BASE   0x40000\n+#define ARK_DDM_BASE      0x60000\n+#define ARK_PKTDIR_BASE   0xa0000\n+#define ARK_PKTCHKR_BASE  0x90000\n+#define ARK_RCPACING_BASE 0xb0000\n+#define ARK_MPU_QOFFSET   0x00100\n+\n+#define BB_ARK_TX_Q_FACTOR 4\n+\n+#define ARK_RX_META_SIZE 32\n+#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)\n+#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)\n+\n+static_assert(sizeof(struct ark_rx_meta) == ARK_RX_META_SIZE, \"Unexpected struct size ark_rx_meta\");\n+static_assert(sizeof(union ark_tx_meta) == 8, \"Unexpected struct size ark_tx_meta\");\n+\n+static struct rte_pci_id pci_id_ark[] = {\n+\t{RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1015)},\n+\t{RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1016)},\n+\t{.device_id = 0},\n+};\n+\n+static const struct ark_dev_caps\n+ark_device_caps[] = {\n+\t\t     SET_DEV_CAPS(0x1015, true, false),\n+\t\t     SET_DEV_CAPS(0x1016, true, false),\n+\t\t     {.device_id = 0,}\n+};\n+\n+\n+/* Forward declarations */\n+static const struct rte_bbdev_ops ark_bbdev_pmd_ops;\n+\n+static int\n+check_for_ext(struct ark_bbdevice *ark)\n+{\n+\t/* Get the env */\n+\tconst char *dllpath = getenv(\"ARK_BBEXT_PATH\");\n+\n+\tif (dllpath == NULL) {\n+\t\tARK_BBDEV_LOG(DEBUG, \"EXT NO dll path specified\\n\");\n+\t\treturn 0;\n+\t}\n+\tARK_BBDEV_LOG(NOTICE, \"EXT found dll path at %s\\n\", dllpath);\n+\n+\t/* Open and load the .so */\n+\tark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY);\n+\tif (ark->d_handle == NULL) {\n+\t\tARK_BBDEV_LOG(ERR, \"Could not load user extension %s\\n\",\n+\t\t\t    dllpath);\n+\t\treturn -1;\n+\t}\n+\tARK_BBDEV_LOG(DEBUG, \"SUCCESS: loaded user extension %s\\n\",\n+\t\t\t    dllpath);\n+\n+\t/* Get the entry points */\n+\tark->user_ext.dev_init =\n+\t\t(void *(*)(struct rte_bbdev *, void *))\n+\t\tdlsym(ark->d_handle, \"rte_pmd_ark_bbdev_init\");\n+\n+\tark->user_ext.dev_uninit =\n+\t\t(int (*)(struct rte_bbdev *, void *))\n+\t\tdlsym(ark->d_handle, \"rte_pmd_ark_dev_uninit\");\n+\tark->user_ext.dev_start =\n+\t\t(int (*)(struct rte_bbdev *, void *))\n+\t\tdlsym(ark->d_handle, \"rte_pmd_ark_bbdev_start\");\n+\tark->user_ext.dev_stop =\n+\t\t(int (*)(struct rte_bbdev *, void *))\n+\t\tdlsym(ark->d_handle, \"rte_pmd_ark_bbdev_stop\");\n+\tark->user_ext.dequeue_ldpc_dec  =\n+\t\t(int (*)(struct rte_bbdev *,\n+\t\t\t struct rte_bbdev_dec_op *,\n+\t\t\t uint32_t *,\n+\t\t\t void *))\n+\t\tdlsym(ark->d_handle, \"rte_pmd_ark_bbdev_dequeue_ldpc_dec\");\n+\tark->user_ext.enqueue_ldpc_dec  =\n+\t\t(int (*)(struct rte_bbdev *,\n+\t\t\t struct rte_bbdev_dec_op *,\n+\t\t\t uint32_t *,\n+\t\t\t uint8_t *,\n+\t\t\t void *))\n+\t\tdlsym(ark->d_handle, \"rte_pmd_ark_bbdev_enqueue_ldpc_dec\");\n+\tark->user_ext.dequeue_ldpc_enc  =\n+\t\t(int (*)(struct rte_bbdev *,\n+\t\t\t struct rte_bbdev_enc_op *,\n+\t\t\t uint32_t *,\n+\t\t\t void *))\n+\t\tdlsym(ark->d_handle, \"rte_pmd_ark_bbdev_dequeue_ldpc_enc\");\n+\tark->user_ext.enqueue_ldpc_enc  =\n+\t\t(int (*)(struct rte_bbdev *,\n+\t\t\t struct rte_bbdev_enc_op *,\n+\t\t\t uint32_t *,\n+\t\t\t uint8_t *,\n+\t\t\t void *))\n+\t\tdlsym(ark->d_handle, \"rte_pmd_ark_bbdev_enqueue_ldpc_enc\");\n+\n+\treturn 0;\n+}\n+\n+\n+/* queue */\n+struct ark_bbdev_queue {\n+\tstruct ark_bbdevice *ark_bbdev;\n+\n+\tstruct rte_ring *active_ops;  /* Ring for processed packets */\n+\n+\t/* RX components */\n+\t/* array of physical addresses of the mbuf data pointer */\n+\trte_iova_t *rx_paddress_q;\n+\tstruct ark_udm_t *udm;\n+\tstruct ark_mpu_t *rx_mpu;\n+\n+\t/* TX components */\n+\tunion ark_tx_meta *tx_meta_q;\n+\tstruct ark_mpu_t *tx_mpu;\n+\tstruct ark_ddm_t *ddm;\n+\n+\t/*  */\n+\tuint32_t tx_queue_mask;\n+\tuint32_t rx_queue_mask;\n+\n+\tint32_t rx_seed_index;\t\t/* step 1 set with empty mbuf */\n+\tint32_t rx_cons_index;\t\t/* step 3 consumed by driver */\n+\n+\t/* 3 indexes to the paired data rings. */\n+\tint32_t tx_prod_index;\t\t/* where to put the next one */\n+\tint32_t tx_free_index;\t\t/* local copy of tx_cons_index */\n+\n+\t/* separate cache line -- written by FPGA -- RX announce */\n+\tRTE_MARKER cacheline1 __rte_cache_min_aligned;\n+\tvolatile int32_t rx_prod_index; /* step 2 filled by FPGA */\n+\n+\t/* Separate cache line -- written by FPGA -- RX completion */\n+\tRTE_MARKER cacheline2 __rte_cache_min_aligned;\n+\tvolatile int32_t tx_cons_index; /* hw is done, can be freed */\n+} __rte_cache_aligned;\n+\n+\n+static int\n+ark_bb_hw_q_setup(struct rte_bbdev *bbdev, uint16_t q_id, uint16_t queue_size)\n+{\n+\tstruct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;\n+\n+\trte_iova_t queue_base;\n+\trte_iova_t phys_addr_q_base;\n+\trte_iova_t phys_addr_prod_index;\n+\trte_iova_t phys_addr_cons_index;\n+\n+\tif (ark_mpu_verify(q->rx_mpu, sizeof(rte_iova_t))) {\n+\t\tARK_BBDEV_LOG(ERR, \"Illegal hw/sw configuration RX queue\");\n+\t\treturn -1;\n+\t}\n+\tARK_BBDEV_LOG(DEBUG, \"ark_bb_q setup %u:%u\",\n+\t\t      bbdev->data->dev_id, q_id);\n+\n+\t/* RX MPU */\n+\tphys_addr_q_base = rte_malloc_virt2iova(q->rx_paddress_q);\n+\t/* Force TX mode on MPU to match bbdev behavior */\n+\tark_mpu_configure(q->rx_mpu, phys_addr_q_base, queue_size, 1);\n+\tark_mpu_start(q->rx_mpu);\n+\n+\t/* UDM */\n+\tqueue_base = rte_malloc_virt2iova(q);\n+\tphys_addr_prod_index = queue_base +\n+\t\toffsetof(struct ark_bbdev_queue, rx_prod_index);\n+\tark_udm_write_addr(q->udm, phys_addr_prod_index);\n+\tark_udm_queue_enable(q->udm, 1);\n+\n+\t/* TX MPU */\n+\tphys_addr_q_base = rte_malloc_virt2iova(q->tx_meta_q);\n+\tark_mpu_configure(q->tx_mpu, phys_addr_q_base,\n+\t\t\t  BB_ARK_TX_Q_FACTOR * queue_size, 1);\n+\tark_mpu_start(q->tx_mpu);\n+\n+\t/* DDM */\n+\tphys_addr_cons_index = queue_base +\n+\t\toffsetof(struct ark_bbdev_queue, tx_cons_index);\n+\tark_ddm_queue_setup(q->ddm, phys_addr_cons_index);\n+\tark_ddm_queue_reset_stats(q->ddm);\n+\n+\treturn 0;\n+}\n+\n+\n+\n+/* Setup a queue */\n+static int\n+ark_bb_q_setup(struct rte_bbdev *bbdev, uint16_t q_id,\n+\t       const struct rte_bbdev_queue_conf *queue_conf)\n+{\n+\tstruct ark_bbdev_queue *q;\n+\tstruct ark_bbdevice *ark_bb =  bbdev->data->dev_private;\n+\n+\tconst uint32_t queue_size = queue_conf->queue_size;\n+\tconst int socket_id = queue_conf->socket;\n+\tconst uint64_t pg_sz = sysconf(_SC_PAGESIZE);\n+\tchar ring_name[RTE_RING_NAMESIZE];\n+\n+\t/* Configuration checks */\n+\tif (!rte_is_power_of_2(queue_size)) {\n+\t\tARK_BBDEV_LOG(ERR,\n+\t\t\t      \"Configuration queue size\"\n+\t\t\t      \" must be power of two %u\",\n+\t\t\t      queue_size);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {\n+\t\tARK_BBDEV_LOG(ERR,\n+\t\t\t      \"Error: Ark bbdev requires head room > %d bytes (%s)\",\n+\t\t\t      ARK_RX_META_SIZE, __func__);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Allocate the queue data structure. */\n+\tq = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),\n+\t\t\tRTE_CACHE_LINE_SIZE, queue_conf->socket);\n+\tif (q == NULL) {\n+\t\tARK_BBDEV_LOG(ERR, \"Failed to allocate queue memory\");\n+\t\treturn -ENOMEM;\n+\t}\n+\tbbdev->data->queues[q_id].queue_private = q;\n+\tq->ark_bbdev = ark_bb;\n+\n+\t/* RING */\n+\tsnprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) \"%u:%u\",\n+\t\t bbdev->data->dev_id, q_id);\n+\tq->active_ops = rte_ring_create(ring_name,\n+\t\t\t\t\tqueue_size,\n+\t\t\t\t\tqueue_conf->socket,\n+\t\t\t\t\tRING_F_SP_ENQ | RING_F_SC_DEQ);\n+\tif (q->active_ops == NULL) {\n+\t\tARK_BBDEV_LOG(ERR, \"Failed to create ring\");\n+\t\tgoto free_all;\n+\t}\n+\n+\tq->rx_queue_mask = queue_size - 1;\n+\tq->tx_queue_mask = (BB_ARK_TX_Q_FACTOR * queue_size) - 1;\n+\n+\t/* Each mbuf requires 2 to 4 objects, factor by BB_ARK_TX_Q_FACTOR */\n+\tq->tx_meta_q =\n+\t\trte_zmalloc_socket(\"Ark_bb_txqueue meta\",\n+\t\t\t\t   queue_size * BB_ARK_TX_Q_FACTOR *\n+\t\t\t\t   sizeof(union ark_tx_meta),\n+\t\t\t\t   pg_sz,\n+\t\t\t\t   socket_id);\n+\n+\tif (q->tx_meta_q == 0) {\n+\t\tARK_BBDEV_LOG(ERR, \"Failed to allocate \"\n+\t\t\t      \"queue memory in %s\", __func__);\n+\t\tgoto free_all;\n+\t}\n+\n+\tq->ddm = RTE_PTR_ADD(ark_bb->ddm.v, q_id * ARK_DDM_QOFFSET);\n+\tq->tx_mpu = RTE_PTR_ADD(ark_bb->mputx.v, q_id * ARK_MPU_QOFFSET);\n+\n+\tq->rx_paddress_q =\n+\t\trte_zmalloc_socket(\"ark_bb_rx_paddress_q\",\n+\t\t\t\t   queue_size * sizeof(rte_iova_t),\n+\t\t\t\t   pg_sz,\n+\t\t\t\t   socket_id);\n+\n+\tif (q->rx_paddress_q == 0) {\n+\t\tARK_BBDEV_LOG(ERR,\n+\t\t\t      \"Failed to allocate queue memory in %s\",\n+\t\t\t      __func__);\n+\t\tgoto free_all;\n+\t}\n+\tq->udm = RTE_PTR_ADD(ark_bb->udm.v, q_id * ARK_UDM_QOFFSET);\n+\tq->rx_mpu = RTE_PTR_ADD(ark_bb->mpurx.v, q_id * ARK_MPU_QOFFSET);\n+\n+\t/* Structure have been configured, set the hardware */\n+\treturn ark_bb_hw_q_setup(bbdev, q_id, queue_size);\n+\n+free_all:\n+\trte_free(q->tx_meta_q);\n+\trte_free(q->rx_paddress_q);\n+\trte_free(q);\n+\treturn -EFAULT;\n+}\n+\n+\n+/* Release queue */\n+static int\n+ark_bb_q_release(struct rte_bbdev *bbdev, uint16_t q_id)\n+{\n+\tstruct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;\n+\n+\tark_mpu_dump(q->rx_mpu, \"rx_MPU release\", q_id);\n+\tark_mpu_dump(q->tx_mpu, \"tx_MPU release\", q_id);\n+\n+\trte_ring_free(q->active_ops);\n+\trte_free(q->tx_meta_q);\n+\trte_free(q->rx_paddress_q);\n+\trte_free(q);\n+\tbbdev->data->queues[q_id].queue_private = NULL;\n+\n+\tARK_BBDEV_LOG(DEBUG, \"released device queue %u:%u\",\n+\t\t      bbdev->data->dev_id, q_id);\n+\treturn 0;\n+}\n+\n+static int\n+ark_bbdev_start(struct rte_bbdev *bbdev)\n+{\n+\tstruct ark_bbdevice *ark_bb = bbdev->data->dev_private;\n+\n+\tARK_BBDEV_LOG(DEBUG, \"Starting device %u\", bbdev->data->dev_id);\n+\tif (ark_bb->started)\n+\t\treturn 0;\n+\n+\t/* User start hook */\n+\tif (ark_bb->user_ext.dev_start)\n+\t\tark_bb->user_ext.dev_start(bbdev,\n+\t\t\t\t\t   ark_bb->user_data);\n+\n+\tark_bb->started = 1;\n+\n+\tif (ark_bb->start_pg)\n+\t\tark_pktchkr_run(ark_bb->pc);\n+\n+\tif (ark_bb->start_pg) {\n+\t\tpthread_t thread;\n+\n+\t\t/* Delay packet generator start allow the hardware to be ready\n+\t\t * This is only used for sanity checking with internal generator\n+\t\t */\n+\t\tif (pthread_create(&thread, NULL,\n+\t\t\t\t   ark_pktgen_delay_start, ark_bb->pg)) {\n+\t\t\tARK_BBDEV_LOG(ERR, \"Could not create pktgen \"\n+\t\t\t\t    \"starter thread\");\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\treturn 0;\n+}\n+\n+\n+static void\n+ark_bbdev_stop(struct rte_bbdev *bbdev)\n+{\n+\tstruct ark_bbdevice *ark_bb = bbdev->data->dev_private;\n+\n+\tARK_BBDEV_LOG(DEBUG, \"Stopping device %u\", bbdev->data->dev_id);\n+\n+\tif (!ark_bb->started)\n+\t\treturn;\n+\n+\t/* Stop the packet generator */\n+\tif (ark_bb->start_pg)\n+\t\tark_pktgen_pause(ark_bb->pg);\n+\n+\t/* STOP RX Side */\n+\tark_udm_dump_stats(ark_bb->udm.v, \"Post stop\");\n+\n+\t/* Stop the packet checker if it is running */\n+\tif (ark_bb->start_pg) {\n+\t\tark_pktchkr_dump_stats(ark_bb->pc);\n+\t\tark_pktchkr_stop(ark_bb->pc);\n+\t}\n+\n+\t/* User stop hook */\n+\tif (ark_bb->user_ext.dev_stop)\n+\t\tark_bb->user_ext.dev_stop(bbdev,\n+\t\t\t\t\t  ark_bb->user_data);\n+\n+}\n+\n+\n+static int\n+ark_bb_q_start(struct rte_bbdev *bbdev, uint16_t q_id)\n+{\n+\tstruct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;\n+\tARK_BBDEV_LOG(DEBUG, \"ark_bb_q start %u:%u\", bbdev->data->dev_id, q_id);\n+\tark_ddm_queue_enable(q->ddm, 1);\n+\tark_udm_queue_enable(q->udm, 1);\n+\tark_mpu_start(q->tx_mpu);\n+\tark_mpu_start(q->rx_mpu);\n+\treturn 0;\n+}\n+static int\n+ark_bb_q_stop(struct rte_bbdev *bbdev, uint16_t q_id)\n+{\n+\tstruct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;\n+\tint cnt = 0;\n+\n+\tARK_BBDEV_LOG(DEBUG, \"ark_bb_q stop %u:%u\", bbdev->data->dev_id, q_id);\n+\n+\twhile (q->tx_cons_index != q->tx_prod_index) {\n+\t\tusleep(100);\n+\t\tif (cnt++ > 10000) {\n+\t\t\tfprintf(stderr, \"XXXX %s(%u, %u %u) %d Failured\\n\", __func__, q_id,\n+\t\t\t\tq->tx_cons_index, q->tx_prod_index,\n+\t\t\t\t(int32_t) (q->tx_prod_index - q->tx_cons_index));\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n+\n+\tark_mpu_stop(q->tx_mpu);\n+\tark_mpu_stop(q->rx_mpu);\n+\tark_udm_queue_enable(q->udm, 0);\n+\tark_ddm_queue_enable(q->ddm, 0);\n+\treturn 0;\n+}\n+\n+\n+\n+\n+/* ************************************************************************* */\n+/* Common function for all enqueue and dequeue ops */\n+static inline void\n+ark_bb_enqueue_desc_fill(struct ark_bbdev_queue *q,\n+\t\t\t struct rte_mbuf *mbuf,\n+\t\t\t uint16_t offset, /* Extra offset */\n+\t\t\t uint8_t  flags,\n+\t\t\t uint32_t *meta,\n+\t\t\t uint8_t  meta_cnt /* 0, 1 or 2 */\n+\t\t\t )\n+{\n+\tunion ark_tx_meta *tx_meta;\n+\tint32_t tx_idx;\n+\tuint8_t m;\n+\n+\t/* Header */\n+\ttx_idx = q->tx_prod_index & q->tx_queue_mask;\n+\ttx_meta = &q->tx_meta_q[tx_idx];\n+\ttx_meta->data_len = rte_pktmbuf_data_len(mbuf) - offset;\n+\ttx_meta->flags = flags;\n+\ttx_meta->meta_cnt = meta_cnt;\n+\ttx_meta->user1 = *meta++;\n+\tq->tx_prod_index++;\n+\n+\tfor (m = 0; m < meta_cnt; m++) {\n+\t\ttx_idx = q->tx_prod_index & q->tx_queue_mask;\n+\t\ttx_meta = &q->tx_meta_q[tx_idx];\n+\t\ttx_meta->usermeta0 = *meta++;\n+\t\ttx_meta->usermeta1 = *meta++;\n+\t\tq->tx_prod_index++;\n+\t}\n+\n+\ttx_idx = q->tx_prod_index & q->tx_queue_mask;\n+\ttx_meta = &q->tx_meta_q[tx_idx];\n+\ttx_meta->physaddr = rte_mbuf_data_iova(mbuf) + offset;\n+\tq->tx_prod_index++;\n+}\n+\n+static inline void\n+ark_bb_enqueue_segmented_pkt(struct ark_bbdev_queue *q,\n+\t\t\t     struct rte_mbuf *mbuf,\n+\t\t\t     uint16_t offset,\n+\t\t\t     uint32_t *meta, uint8_t meta_cnt)\n+{\n+\tstruct rte_mbuf *next;\n+\tuint8_t flags = ARK_DDM_SOP;\n+\n+\twhile (mbuf != NULL) {\n+\t\tnext = mbuf->next;\n+\t\tflags |= (next == NULL) ? ARK_DDM_EOP : 0;\n+\n+\t\tark_bb_enqueue_desc_fill(q, mbuf, offset, flags,\n+\t\t\t\t\t meta, meta_cnt);\n+\n+\t\tflags &= ~ARK_DDM_SOP;\t/* drop SOP flags */\n+\t\tmeta_cnt = 0;\n+\t\toffset = 0;\n+\n+\t\tmbuf = next;\n+\t}\n+}\n+\n+static inline int\n+ark_bb_enqueue_common(struct ark_bbdev_queue *q,\n+\t\t      struct rte_mbuf *m_in, struct rte_mbuf *m_out,\n+\t\t      uint16_t offset,\n+\t\t      uint32_t *meta, uint8_t meta_cnt)\n+{\n+\tint32_t free_queue_space;\n+\tint32_t rx_idx;\n+\n+\t/* TX side limit */\n+\tfree_queue_space = q->tx_queue_mask -\n+\t\t(q->tx_prod_index - q->tx_free_index);\n+\tif (unlikely(free_queue_space < (2 + (2 * m_in->nb_segs))))\n+\t\treturn 1;\n+\n+\t/* RX side limit */\n+\tfree_queue_space = q->rx_queue_mask -\n+\t\t(q->rx_seed_index - q->rx_cons_index);\n+\tif (unlikely(free_queue_space < m_out->nb_segs))\n+\t\treturn 1;\n+\n+\tif (unlikely(m_in->nb_segs > 1))\n+\t\tark_bb_enqueue_segmented_pkt(q, m_in, offset, meta, meta_cnt);\n+\telse\n+\t\tark_bb_enqueue_desc_fill(q, m_in, offset,\n+\t\t\t\t\t ARK_DDM_SOP | ARK_DDM_EOP,\n+\t\t\t\t\t meta, meta_cnt);\n+\n+\t/* We assume that the return mubf has exactly enough segments for\n+\t * return data, which is 2048 bytes per segment.\n+\t */\n+\tdo {\n+\t\trx_idx = q->rx_seed_index & q->rx_queue_mask;\n+\t\tq->rx_paddress_q[rx_idx] = m_out->buf_iova;\n+\t\tq->rx_seed_index++;\n+\t\tm_out = m_out->next;\n+\t} while (m_out);\n+\n+\treturn 0;\n+}\n+\n+static inline void\n+ark_bb_enqueue_finalize(struct rte_bbdev_queue_data *q_data,\n+\t\t\tstruct ark_bbdev_queue *q,\n+\t\t\tvoid **ops,\n+\t\t\tuint16_t nb_ops, uint16_t nb)\n+{\n+\t/* BBDEV global stats */\n+\t/* These are not really errors, not sure why bbdev counts these. */\n+\tq_data->queue_stats.enqueue_err_count += nb_ops - nb;\n+\tq_data->queue_stats.enqueued_count += nb;\n+\n+\t/* Notify HW that  */\n+\tif (unlikely(nb == 0))\n+\t\treturn;\n+\n+\tark_mpu_set_producer(q->tx_mpu, q->tx_prod_index);\n+\tark_mpu_set_producer(q->rx_mpu, q->rx_seed_index);\n+\n+\t/* Queue info for dequeue-side processing */\n+\trte_ring_enqueue_burst(q->active_ops,\n+\t\t\t       (void **)ops, nb, NULL);\n+}\n+\n+static int\n+ark_bb_dequeue_segmented(struct rte_mbuf *mbuf0,\n+\t\t\t int32_t *prx_cons_index,\n+\t\t\t uint16_t pkt_len\n+\t\t\t )\n+{\n+\tstruct rte_mbuf *mbuf;\n+\tuint16_t data_len;\n+\tuint16_t remaining;\n+\tuint16_t segments = 1;\n+\n+\tdata_len = RTE_MIN(pkt_len, RTE_MBUF_DEFAULT_DATAROOM);\n+\tremaining = pkt_len - data_len;\n+\n+\tmbuf = mbuf0;\n+\tmbuf0->data_len = data_len;\n+\twhile (remaining) {\n+\t\tsegments += 1;\n+\t\tmbuf = mbuf->next;\n+\t\tif (unlikely(mbuf == 0)) {\n+\t\t\tARK_BBDEV_LOG(CRIT, \"Expected chained mbuf with \"\n+\t\t\t\t      \"at least %d segments for dequeue \"\n+\t\t\t\t      \"of packet length %d\",\n+\t\t\t\t      segments, pkt_len);\n+\t\t\treturn 1;\n+\t\t}\n+\n+\t\tdata_len = RTE_MIN(remaining,\n+\t\t\t\t   RTE_MBUF_DEFAULT_DATAROOM);\n+\t\tremaining -= data_len;\n+\n+\t\tmbuf->data_len = data_len;\n+\t\t*prx_cons_index += 1;\n+\t}\n+\n+\tif (mbuf->next != 0) {\n+\t\tARK_BBDEV_LOG(CRIT, \"Expected chained mbuf with \"\n+\t\t\t      \"at exactly %d segments for dequeue \"\n+\t\t\t      \"of packet length %d. Found %d \"\n+\t\t\t      \"segments\",\n+\t\t\t      segments, pkt_len, mbuf0->nb_segs);\n+\t\treturn 1;\n+\t}\n+\treturn 0;\n+}\n+\n+/* ************************************************************************* */\n+/* LDPC Decode ops */\n+static int16_t\n+ark_bb_enqueue_ldpc_dec_one_op(struct ark_bbdev_queue *q,\n+\t\t\t       struct rte_bbdev_dec_op *this_op)\n+{\n+\tstruct rte_bbdev_op_ldpc_dec *ldpc_dec_op = &this_op->ldpc_dec;\n+\tstruct rte_mbuf *m_in = ldpc_dec_op->input.data;\n+\tstruct rte_mbuf *m_out = ldpc_dec_op->hard_output.data;\n+\tuint16_t offset = ldpc_dec_op->input.offset;\n+\tuint32_t meta[5] = {0};\n+\tuint8_t meta_cnt = 0;\n+\n+\tif (q->ark_bbdev->user_ext.enqueue_ldpc_dec) {\n+\t\tif (q->ark_bbdev->user_ext.enqueue_ldpc_dec(q->ark_bbdev->bbdev,\n+\t\t\t\t\t\t\t    this_op,\n+\t\t\t\t\t\t\t    meta,\n+\t\t\t\t\t\t\t    &meta_cnt,\n+\t\t\t\t\t\t\t    q->ark_bbdev->user_data)) {\n+\t\t\tARK_BBDEV_LOG(ERR, \"%s failed\", __func__);\n+\t\t\treturn 1;\n+\t\t}\n+\t}\n+\n+\treturn ark_bb_enqueue_common(q, m_in, m_out, offset, meta, meta_cnt);\n+}\n+\n+/* Enqueue LDPC Decode -- burst */\n+static uint16_t\n+ark_bb_enqueue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,\n+\t\t\t    struct rte_bbdev_dec_op **ops, uint16_t nb_ops)\n+{\n+\tstruct ark_bbdev_queue *q = q_data->queue_private;\n+\tunsigned int max_enq;\n+\tuint16_t nb;\n+\n+\tmax_enq = rte_ring_free_count(q->active_ops);\n+\tmax_enq = RTE_MIN(max_enq, nb_ops);\n+\tfor (nb = 0; nb < max_enq; nb++) {\n+\t\tif (ark_bb_enqueue_ldpc_dec_one_op(q, ops[nb]))\n+\t\t\tbreak;\n+\t}\n+\n+\tark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);\n+\treturn nb;\n+}\n+\n+\n+/* ************************************************************************* */\n+/* Dequeue LDPC Decode -- burst */\n+static uint16_t\n+ark_bb_dequeue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,\n+\t\t\t    struct rte_bbdev_dec_op **ops, uint16_t nb_ops)\n+{\n+\tstruct ark_bbdev_queue *q = q_data->queue_private;\n+\tstruct rte_mbuf *mbuf;\n+\tstruct rte_bbdev_dec_op *this_op;\n+\tstruct ark_rx_meta *meta;\n+\tuint32_t *usermeta;\n+\n+\tuint16_t nb = 0;\n+\tint32_t prod_index = q->rx_prod_index;\n+\tint32_t cons_index = q->rx_cons_index;\n+\n+\tq->tx_free_index = q->tx_cons_index;\n+\n+\twhile ((prod_index - cons_index) > 0) {\n+\t\tif (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {\n+\t\t\tARK_BBDEV_LOG(ERR, \"%s data ready but no op!\",\n+\t\t\t\t      __func__);\n+\t\t\tq_data->queue_stats.dequeue_err_count += 1;\n+\t\t\tbreak;\n+\t\t}\n+\t\tops[nb] = this_op;\n+\n+\t\tmbuf = this_op->ldpc_dec.hard_output.data;\n+\n+\t\t/* META DATA embedded in headroom */\n+\t\tmeta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);\n+\n+\t\tmbuf->pkt_len = meta->pkt_len;\n+\t\tmbuf->data_len = meta->pkt_len;\n+\n+\t\tif (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {\n+\t\t\tif (ark_bb_dequeue_segmented(mbuf, &cons_index,\n+\t\t\t\t\t\t     meta->pkt_len))\n+\t\t\t\tq_data->queue_stats.dequeue_err_count += 1;\n+\t\t} else if (mbuf->next != 0) {\n+\t\t\tARK_BBDEV_LOG(CRIT, \"Expected mbuf with \"\n+\t\t\t\t      \"at exactly 1 segments for dequeue \"\n+\t\t\t\t      \"of packet length %d. Found %d \"\n+\t\t\t\t      \"segments\",\n+\t\t\t\t      meta->pkt_len, mbuf->nb_segs);\n+\t\t\tq_data->queue_stats.dequeue_err_count += 1;\n+\t\t}\n+\n+\t\tusermeta = meta->user_meta;\n+\n+\t\t/* User's meta move from Arkville HW to bbdev OP */\n+\t\tif (q->ark_bbdev->user_ext.dequeue_ldpc_dec) {\n+\t\t\tif (q->ark_bbdev->user_ext.dequeue_ldpc_dec(q->ark_bbdev->bbdev,\n+\t\t\t\t\t\t\t\t    this_op,\n+\t\t\t\t\t\t\t\t    usermeta,\n+\t\t\t\t\t\t\t\t    q->ark_bbdev->user_data)) {\n+\t\t\t\tARK_BBDEV_LOG(ERR, \"%s failed\", __func__);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\t\t}\n+\n+\t\tnb++;\n+\t\tcons_index++;\n+\t\tif (nb >= nb_ops)\n+\t\t\tbreak;\n+\t}\n+\n+\tq->rx_cons_index = cons_index;\n+\n+\t/* BBdev stats */\n+\tq_data->queue_stats.dequeued_count += nb;\n+\n+\treturn nb;\n+}\n+\n+/**************************************************************************/\n+/* Enqueue LDPC Encode */\n+static int16_t\n+ark_bb_enqueue_ldpc_enc_one_op(struct ark_bbdev_queue *q,\n+\t\t\t       struct rte_bbdev_enc_op *this_op)\n+{\n+\tstruct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &this_op->ldpc_enc;\n+\tstruct rte_mbuf *m_in = ldpc_enc_op->input.data;\n+\tstruct rte_mbuf *m_out = ldpc_enc_op->output.data;\n+\tuint16_t offset = ldpc_enc_op->input.offset;\n+\tuint32_t meta[5] = {0};\n+\tuint8_t meta_cnt = 0;\n+\n+\t/* User's meta move from bbdev op to Arkville HW */\n+\tif (q->ark_bbdev->user_ext.enqueue_ldpc_enc) {\n+\t\tif (q->ark_bbdev->user_ext.enqueue_ldpc_enc(q->ark_bbdev->bbdev,\n+\t\t\t\t\t\t\t    this_op,\n+\t\t\t\t\t\t\t    meta,\n+\t\t\t\t\t\t\t    &meta_cnt,\n+\t\t\t\t\t\t\t    q->ark_bbdev->user_data)) {\n+\t\t\tARK_BBDEV_LOG(ERR, \"%s failed\", __func__);\n+\t\t\treturn 1;\n+\t\t}\n+\t}\n+\n+\treturn ark_bb_enqueue_common(q, m_in, m_out, offset, meta, meta_cnt);\n+}\n+\n+/* Enqueue LDPC Encode -- burst */\n+static uint16_t\n+ark_bb_enqueue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,\n+\t\t\t    struct rte_bbdev_enc_op **ops, uint16_t nb_ops)\n+{\n+\tstruct ark_bbdev_queue *q = q_data->queue_private;\n+\tunsigned int max_enq;\n+\tuint16_t nb;\n+\n+\tmax_enq = rte_ring_free_count(q->active_ops);\n+\tmax_enq = RTE_MIN(max_enq, nb_ops);\n+\tfor (nb = 0; nb < max_enq; nb++) {\n+\t\tif (ark_bb_enqueue_ldpc_enc_one_op(q, ops[nb]))\n+\t\t\tbreak;\n+\t}\n+\n+\tark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);\n+\treturn nb;\n+}\n+\n+/* Dequeue LDPC Encode -- burst */\n+static uint16_t\n+ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,\n+\t\t\t    struct rte_bbdev_enc_op **ops, uint16_t nb_ops)\n+{\n+\tstruct ark_bbdev_queue *q = q_data->queue_private;\n+\tstruct rte_mbuf *mbuf;\n+\tstruct rte_bbdev_enc_op *this_op;\n+\tstruct ark_rx_meta *meta;\n+\tuint32_t *usermeta;\n+\n+\tuint16_t nb = 0;\n+\tint32_t prod_index = q->rx_prod_index;\n+\tint32_t cons_index = q->rx_cons_index;\n+\n+\tq->tx_free_index = q->tx_cons_index;\n+\n+\twhile ((prod_index - cons_index) > 0) {\n+\t\tif (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {\n+\t\t\tARK_BBDEV_LOG(ERR, \"%s data ready but no op!\",\n+\t\t\t\t      __func__);\n+\t\t\tq_data->queue_stats.dequeue_err_count += 1;\n+\t\t\tbreak;\n+\t\t}\n+\t\tops[nb] = this_op;\n+\n+\t\tmbuf = this_op->ldpc_enc.output.data;\n+\n+\t\t/* META DATA embedded in headroom */\n+\t\tmeta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);\n+\n+\t\tmbuf->pkt_len = meta->pkt_len;\n+\t\tmbuf->data_len = meta->pkt_len;\n+\t\tusermeta = meta->user_meta;\n+\n+\t\tif (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {\n+\t\t\tif (ark_bb_dequeue_segmented(mbuf, &cons_index,\n+\t\t\t\t\t\t     meta->pkt_len))\n+\t\t\t\tq_data->queue_stats.dequeue_err_count += 1;\n+\t\t} else if (mbuf->next != 0) {\n+\t\t\tARK_BBDEV_LOG(CRIT, \"Expected mbuf with \"\n+\t\t\t\t      \"at exactly 1 segments for dequeue \"\n+\t\t\t\t      \"of packet length %d. Found %d \"\n+\t\t\t\t      \"segments\",\n+\t\t\t\t      meta->pkt_len, mbuf->nb_segs);\n+\t\t\tq_data->queue_stats.dequeue_err_count += 1;\n+\t\t}\n+\n+\t\t/* User's meta move from Arkville HW to bbdev OP */\n+\t\tif (q->ark_bbdev->user_ext.dequeue_ldpc_enc) {\n+\t\t\tif (q->ark_bbdev->user_ext.dequeue_ldpc_enc(q->ark_bbdev->bbdev,\n+\t\t\t\t\t\t\t\t    this_op,\n+\t\t\t\t\t\t\t\t    usermeta,\n+\t\t\t\t\t\t\t\t    q->ark_bbdev->user_data)) {\n+\t\t\t\tARK_BBDEV_LOG(ERR, \"%s failed\", __func__);\n+\t\t\t\treturn 1;\n+\t\t\t}\n+\t\t}\n+\n+\t\tnb++;\n+\t\tcons_index++;\n+\t\tif (nb >= nb_ops)\n+\t\t\tbreak;\n+\t}\n+\n+\tq->rx_cons_index = cons_index;\n+\n+\t/* BBdev stats */\n+\tq_data->queue_stats.dequeued_count += nb;\n+\n+\treturn nb;\n+}\n+\n+\n+/**************************************************************************/\n+/*\n+ *Initial device hardware configuration when device is opened\n+ * setup the DDM, and UDM; called once per PCIE device\n+ */\n+static int\n+ark_bb_config_device(struct ark_bbdevice *ark_bb)\n+{\n+\tuint16_t num_q, i;\n+\tstruct ark_mpu_t *mpu;\n+\n+\t/*\n+\t * Make sure that the packet director, generator and checker are in a\n+\t * known state\n+\t */\n+\tark_bb->start_pg = 0;\n+\tark_bb->pg = ark_pktgen_init(ark_bb->pktgen.v, 0, 1);\n+\tif (ark_bb->pg == NULL)\n+\t\treturn -1;\n+\tark_pktgen_reset(ark_bb->pg);\n+\tark_bb->pc = ark_pktchkr_init(ark_bb->pktchkr.v, 0, 1);\n+\tif (ark_bb->pc == NULL)\n+\t\treturn -1;\n+\tark_pktchkr_stop(ark_bb->pc);\n+\tark_bb->pd = ark_pktdir_init(ark_bb->pktdir.v);\n+\tif (ark_bb->pd == NULL)\n+\t\treturn -1;\n+\n+\t/* Verify HW */\n+\tif (ark_udm_verify(ark_bb->udm.v))\n+\t\treturn -1;\n+\tif (ark_ddm_verify(ark_bb->ddm.v))\n+\t\treturn -1;\n+\n+\t/* MPU reset */\n+\tmpu = ark_bb->mpurx.v;\n+\tnum_q = ark_api_num_queues(mpu);\n+\tark_bb->max_nb_queues = num_q;\n+\n+\tfor (i = 0; i < num_q; i++) {\n+\t\tark_mpu_reset(mpu);\n+\t\tmpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);\n+\t}\n+\n+\tark_udm_configure(ark_bb->udm.v,\n+\t\t\t  RTE_PKTMBUF_HEADROOM,\n+\t\t\t  RTE_MBUF_DEFAULT_DATAROOM);\n+\n+\tmpu = ark_bb->mputx.v;\n+\tnum_q = ark_api_num_queues(mpu);\n+\tfor (i = 0; i < num_q; i++) {\n+\t\tark_mpu_reset(mpu);\n+\t\tmpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);\n+\t}\n+\n+\tark_rqp_stats_reset(ark_bb->rqpacing);\n+\n+\tARK_BBDEV_LOG(INFO, \"packet director set to 0x%x\", ark_bb->pkt_dir_v);\n+\tark_pktdir_setup(ark_bb->pd, ark_bb->pkt_dir_v);\n+\n+\tif (ark_bb->pkt_gen_args[0]) {\n+\t\tARK_BBDEV_LOG(INFO, \"Setting up the packet generator\");\n+\t\tark_pktgen_parse(ark_bb->pkt_gen_args);\n+\t\tark_pktgen_reset(ark_bb->pg);\n+\t\tark_pktgen_setup(ark_bb->pg);\n+\t\tark_bb->start_pg = 1;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)\n+{\n+\tstruct ark_bbdevice *ark_bb = bbdev->data->dev_private;\n+\tstruct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);\n+\tbool rqpacing = false;\n+\tint p;\n+\tark_bb->bbdev = bbdev;\n+\n+\tRTE_SET_USED(pci_drv);\n+\n+\tark_bb->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;\n+\tark_bb->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;\n+\n+\tark_bb->sysctrl.v  = (void *)&ark_bb->bar0[ARK_SYSCTRL_BASE];\n+\tark_bb->mpurx.v  = (void *)&ark_bb->bar0[ARK_MPU_RX_BASE];\n+\tark_bb->udm.v  = (void *)&ark_bb->bar0[ARK_UDM_BASE];\n+\tark_bb->mputx.v  = (void *)&ark_bb->bar0[ARK_MPU_TX_BASE];\n+\tark_bb->ddm.v  = (void *)&ark_bb->bar0[ARK_DDM_BASE];\n+\tark_bb->pktdir.v  = (void *)&ark_bb->bar0[ARK_PKTDIR_BASE];\n+\tark_bb->pktgen.v  = (void *)&ark_bb->bar0[ARK_PKTGEN_BASE];\n+\tark_bb->pktchkr.v  = (void *)&ark_bb->bar0[ARK_PKTCHKR_BASE];\n+\n+\tp = 0;\n+\twhile (ark_device_caps[p].device_id != 0) {\n+\t\tif (pci_dev->id.device_id == ark_device_caps[p].device_id) {\n+\t\t\trqpacing = ark_device_caps[p].caps.rqpacing;\n+\t\t\tbreak;\n+\t\t}\n+\t\tp++;\n+\t}\n+\n+\tif (rqpacing)\n+\t\tark_bb->rqpacing =\n+\t\t\t(struct ark_rqpace_t *)(ark_bb->bar0 + ARK_RCPACING_BASE);\n+\telse\n+\t\tark_bb->rqpacing = NULL;\n+\n+\t/* Check to see if there is an extension that we need to load */\n+\tif (check_for_ext(ark_bb))\n+\t\treturn -1;\n+\n+\tark_bb->started = 0;\n+\n+\tARK_BBDEV_LOG(INFO, \"Sys Ctrl Const = 0x%x  HW Commit_ID: %08x\",\n+\t\t      ark_bb->sysctrl.t32[4],\n+\t\t      rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));\n+\tARK_BBDEV_LOG(INFO, \"Arkville HW Commit_ID: %08x\",\n+\t\t    rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));\n+\n+\t/* If HW sanity test fails, return an error */\n+\tif (ark_bb->sysctrl.t32[4] != 0xcafef00d) {\n+\t\tARK_BBDEV_LOG(ERR,\n+\t\t\t      \"HW Sanity test has failed, expected constant\"\n+\t\t\t      \" 0x%x, read 0x%x (%s)\",\n+\t\t\t      0xcafef00d,\n+\t\t\t      ark_bb->sysctrl.t32[4], __func__);\n+\t\treturn -1;\n+\t}\n+\n+\treturn ark_bb_config_device(ark_bb);\n+}\n+\n+static int\n+ark_bbdev_uninit(struct rte_bbdev *bbdev)\n+{\n+\tstruct ark_bbdevice *ark_bb = bbdev->data->dev_private;\n+\n+\tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n+\t\treturn 0;\n+\n+\tark_pktgen_uninit(ark_bb->pg);\n+\tark_pktchkr_uninit(ark_bb->pc);\n+\n+\treturn 0;\n+}\n+\n+static int\n+ark_bbdev_probe(struct rte_pci_driver *pci_drv,\n+\t\tstruct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_bbdev *bbdev = NULL;\n+\tchar dev_name[RTE_BBDEV_NAME_MAX_LEN];\n+\tstruct ark_bbdevice *ark_bb;\n+\n+\tif (pci_dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\trte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));\n+\n+\t/* Allocate memory to be used privately by drivers */\n+\tbbdev = rte_bbdev_allocate(pci_dev->device.name);\n+\tif (bbdev == NULL)\n+\t\treturn -ENODEV;\n+\n+\t/* allocate device private memory */\n+\tbbdev->data->dev_private = rte_zmalloc_socket(dev_name,\n+\t\t\tsizeof(struct ark_bbdevice),\n+\t\t\tRTE_CACHE_LINE_SIZE,\n+\t\t\tpci_dev->device.numa_node);\n+\n+\tif (bbdev->data->dev_private == NULL) {\n+\t\tARK_BBDEV_LOG(CRIT,\n+\t\t\t\t\"Allocate of %zu bytes for device \\\"%s\\\" failed\",\n+\t\t\t\tsizeof(struct ark_bbdevice), dev_name);\n+\t\t\t\trte_bbdev_release(bbdev);\n+\t\t\treturn -ENOMEM;\n+\t}\n+\tark_bb = bbdev->data->dev_private;\n+\t/* Initialize ark_bb */\n+\tark_bb->pkt_dir_v = 0x00110110;\n+\n+\t/* Fill HW specific part of device structure */\n+\tbbdev->device = &pci_dev->device;\n+\tbbdev->intr_handle = NULL;\n+\tbbdev->data->socket_id = pci_dev->device.numa_node;\n+\tbbdev->dev_ops = &ark_bbdev_pmd_ops;\n+\tif (pci_dev->device.devargs)\n+\t\tparse_ark_bbdev_params(pci_dev->device.devargs->args, ark_bb);\n+\n+\n+\t/* Device specific initialization */\n+\tif (ark_bbdev_init(bbdev, pci_drv))\n+\t\treturn -EIO;\n+\tif (ark_bbdev_start(bbdev))\n+\t\treturn -EIO;\n+\n+\t/* Core operations LDPC encode amd decode */\n+\tbbdev->enqueue_ldpc_enc_ops = ark_bb_enqueue_ldpc_enc_ops;\n+\tbbdev->dequeue_ldpc_enc_ops = ark_bb_dequeue_ldpc_enc_ops;\n+\tbbdev->enqueue_ldpc_dec_ops = ark_bb_enqueue_ldpc_dec_ops;\n+\tbbdev->dequeue_ldpc_dec_ops = ark_bb_dequeue_ldpc_dec_ops;\n+\n+\tARK_BBDEV_LOG(DEBUG, \"bbdev id = %u [%s]\",\n+\t\t      bbdev->data->dev_id, dev_name);\n+\n+\treturn 0;\n+}\n+\n+/* Uninitialize device */\n+static int\n+ark_bbdev_remove(struct rte_pci_device *pci_dev)\n+{\n+\tstruct rte_bbdev *bbdev;\n+\tint ret;\n+\n+\tif (pci_dev == NULL)\n+\t\treturn -EINVAL;\n+\n+\t/* Find device */\n+\tbbdev = rte_bbdev_get_named_dev(pci_dev->device.name);\n+\tif (bbdev == NULL) {\n+\t\tARK_BBDEV_LOG(CRIT,\n+\t\t\t\t\"Couldn't find HW dev \\\"%s\\\" to Uninitialize it\",\n+\t\t\t\tpci_dev->device.name);\n+\t\treturn -ENODEV;\n+\t}\n+\n+\t/* Arkville device close */\n+\tark_bbdev_uninit(bbdev);\n+\trte_free(bbdev->data->dev_private);\n+\n+\t/* Close device */\n+\tret = rte_bbdev_close(bbdev->data->dev_id);\n+\tif (ret < 0)\n+\t\tARK_BBDEV_LOG(ERR,\n+\t\t\t\t\"Device %i failed to close during remove: %i\",\n+\t\t\t\tbbdev->data->dev_id, ret);\n+\n+\treturn rte_bbdev_release(bbdev);\n+}\n+\n+/* Operation for the PMD */\n+static const struct rte_bbdev_ops ark_bbdev_pmd_ops = {\n+\t.info_get = ark_bbdev_info_get,\n+\t.start = ark_bbdev_start,\n+\t.stop = ark_bbdev_stop,\n+\t.queue_setup = ark_bb_q_setup,\n+\t.queue_release = ark_bb_q_release,\n+\t.queue_start = ark_bb_q_start,\n+\t.queue_stop = ark_bb_q_stop,\n+};\n+\n+static struct rte_pci_driver ark_bbdev_pmd_drv = {\n+\t.probe = ark_bbdev_probe,\n+\t.remove = ark_bbdev_remove,\n+\t.id_table = pci_id_ark,\n+\t.drv_flags = RTE_PCI_DRV_NEED_MAPPING\n+};\n+\n+RTE_PMD_REGISTER_PCI(DRIVER_NAME, ark_bbdev_pmd_drv);\n+RTE_PMD_REGISTER_PCI_TABLE(DRIVER_NAME, pci_id_ark);\n+RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,\n+\t\t\t      ARK_BBDEV_PKTGEN_ARG \"=<filename> \"\n+\t\t\t      ARK_BBDEV_PKTCHKR_ARG \"=<filename> \"\n+\t\t\t      ARK_BBDEV_PKTDIR_ARG \"=<bitmap>\"\n+\t\t\t      );\ndiff --git a/drivers/baseband/ark/ark_bbext.h b/drivers/baseband/ark/ark_bbext.h\nnew file mode 100644\nindex 0000000000..2e9cc4ccf3\n--- /dev/null\n+++ b/drivers/baseband/ark/ark_bbext.h\n@@ -0,0 +1,163 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright (c) 2015-2018 Atomic Rules LLC\n+ */\n+\n+#ifndef _ARK_BBEXT_H_\n+#define _ARK_BBEXT_H_\n+\n+#include <rte_bbdev.h>\n+#include <rte_bbdev_pmd.h>\n+\n+/* The following section lists function prototypes for Arkville's\n+ * baseband dynamic PMD extension. User's who create an extension\n+ * must include this file and define the necessary and desired\n+ * functions. Only 1 function is required for an extension,\n+ * rte_pmd_ark_bbdev_init(); all other functions prototypes in this\n+ * section are optional.\n+ * See documentation for compiling and use of extensions.\n+ */\n+\n+/**\n+ * Extension prototype, required implementation if extensions are used.\n+ * Called during device probe to initialize the user structure\n+ * passed to other extension functions.  This is called once for each\n+ * port of the device.\n+ *\n+ * @param dev\n+ *   current device.\n+ * @param a_bar\n+ *   access to PCIe device bar (application bar) and hence access to\n+ *   user's portion of FPGA.\n+ * @return user_data\n+ *   which will be passed to other extension functions.\n+ */\n+void *rte_pmd_ark_bbdev_init(struct rte_bbdev *dev, void *a_bar);\n+\n+/**\n+ * Extension prototype, optional implementation.\n+ * Called during device uninit.\n+ *\n+ * @param dev\n+ *   current device.\n+ * @param user_data\n+ *   user argument from dev_init() call.\n+ */\n+int rte_pmd_ark_bbdev_uninit(struct rte_bbdev *dev, void *user_data);\n+\n+/**\n+ * Extension prototype, optional implementation.\n+ * Called during rte_bbdev_start().\n+ *\n+ * @param dev\n+ *   current device.\n+ * @param user_data\n+ *   user argument from dev_init() call.\n+ * @return (0) if successful.\n+ */\n+int rte_pmd_ark_bbdev_start(struct rte_bbdev *dev, void *user_data);\n+\n+/**\n+ * Extension prototype, optional implementation.\n+ * Called during  rte_bbdev_stop().\n+ *\n+ * @param dev\n+ *   current device.\n+ * @param user_data\n+ *   user argument from dev_init() call.\n+ * @return (0) if successful.\n+ */\n+int rte_pmd_ark_bbdev_stop(struct rte_bbdev *dev, void *user_data);\n+\n+/**\n+ * Extension prototype, optional implementation.\n+ * Called during rte_bbdev_dequeue_ldpc_dec_ops\n+ *\n+ * @param dev\n+ *   current device.\n+ * @param user_data\n+ *   user argument from dev_init() call.\n+ * @return (0) if successful.\n+ */\n+int rte_pmd_ark_bbdev_dequeue_ldpc_dec(struct rte_bbdev *dev,\n+\t\t\t\t  struct rte_bbdev_dec_op *this_op,\n+\t\t\t\t  uint32_t *usermeta,\n+\t\t\t\t  void *user_data);\n+\n+/**\n+ * Extension prototype, optional implementation.\n+ * Called during rte_bbdev_dequeue_ldpc_enc_ops\n+ *\n+ * @param dev\n+ *   current device.\n+ * @param user_data\n+ *   user argument from dev_init() call.\n+ * @return (0) if successful.\n+ */\n+int rte_pmd_ark_bbdev_dequeue_ldpc_enc(struct rte_bbdev *dev,\n+\t\t\t\t  struct rte_bbdev_enc_op *this_op,\n+\t\t\t\t  uint32_t *usermeta,\n+\t\t\t\t  void *user_data);\n+\n+/**\n+ * Extension prototype, optional implementation.\n+ * Called during rte_bbdev_enqueue_ldpc_dec_ops\n+ *\n+ * @param dev\n+ *   current device.\n+ * @param user_data\n+ *   user argument from dev_init() call.\n+ * @return (0) if successful.\n+ */\n+int rte_pmd_ark_bbdev_enqueue_ldpc_dec(struct rte_bbdev *dev,\n+\t\t\t\t\tstruct rte_bbdev_dec_op *this_op,\n+\t\t\t\t\tuint32_t *usermeta,\n+\t\t\t\t\tuint8_t *meta_cnt,\n+\t\t\t\t\tvoid *user_data);\n+\n+/**\n+ * Extension prototype, optional implementation.\n+ * Called during rte_bbdev_enqueue_ldpc_enc_ops\n+ *\n+ * @param dev\n+ *   current device.\n+ * @param user_data\n+ *   user argument from dev_init() call.\n+ * @return (0) if successful.\n+ */\n+int rte_pmd_ark_bbdev_enqueue_ldpc_enc(struct rte_bbdev *dev,\n+\t\t\t\t\tstruct rte_bbdev_enc_op *this_op,\n+\t\t\t\t\tuint32_t *usermeta,\n+\t\t\t\t\tuint8_t *meta_cnt,\n+\t\t\t\t\tvoid *user_data);\n+\n+\n+struct arkbb_user_ext {\n+\tvoid *(*dev_init)(struct rte_bbdev *dev, void *abar);\n+\tint (*dev_uninit)(struct rte_bbdev *dev, void *udata);\n+\tint (*dev_start)(struct rte_bbdev *dev, void *udata);\n+\tint (*dev_stop)(struct rte_bbdev *dev, void *udata);\n+\tint (*dequeue_ldpc_dec)(struct rte_bbdev *dev,\n+\t\t\t\t struct rte_bbdev_dec_op *op,\n+\t\t\t\t uint32_t *v,\n+\t\t\t\t void *udata);\n+\tint (*dequeue_ldpc_enc)(struct rte_bbdev *dev,\n+\t\t\t\t struct rte_bbdev_enc_op *op,\n+\t\t\t\t uint32_t *v,\n+\t\t\t\t void *udata);\n+\tint (*enqueue_ldpc_dec)(struct rte_bbdev *dev,\n+\t\t\t\t struct rte_bbdev_dec_op *op,\n+\t\t\t\t uint32_t *v,\n+\t\t\t\t uint8_t *v1,\n+\t\t\t\t void *udata);\n+\tint (*enqueue_ldpc_enc)(struct rte_bbdev *dev,\n+\t\t\t\t struct rte_bbdev_enc_op *op,\n+\t\t\t\t uint32_t *v,\n+\t\t\t\t uint8_t *v1,\n+\t\t\t\t void *udata);\n+};\n+\n+\n+\n+\n+\n+#endif\n",
    "prefixes": [
        "10/14"
    ]
}