@@ -59,6 +59,7 @@ ifneq ($(PQOS_INSTALL_PATH),)
DIRS-y += l2fwd-cat
endif
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += l2fwd-crypto
+DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += bbdev_app
DIRS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += l2fwd-jobstats
DIRS-y += l2fwd-keepalive
DIRS-y += l2fwd-keepalive/ka-agent
new file mode 100644
@@ -0,0 +1,50 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = bbdev
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
new file mode 100644
@@ -0,0 +1,1396 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/unistd.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <math.h>
+#include <assert.h>
+#include <getopt.h>
+#include <signal.h>
+#include <inttypes.h>
+
+#include "rte_common.h"
+#include "rte_eal.h"
+#include "rte_cycles.h"
+#include "rte_eal.h"
+#include "rte_ether.h"
+#include "rte_ethdev.h"
+#include "rte_ip.h"
+#include "rte_lcore.h"
+#include "rte_malloc.h"
+#include "rte_mbuf.h"
+#include "rte_memory.h"
+#include "rte_mempool.h"
+#include "rte_log.h"
+#include "rte_bbdev.h"
+#include "rte_bbdev_op.h"
+
+#define MAX_PKT_BURST 32
+#define NB_MBUF 8163
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_LCORE 16
+#define MEMPOOL_CACHE_SIZE 256
+
+/*Configurable number of RX/TX ring descriptors */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+
+#define BBDEV_ASSERT(a) do { \
+ if (!(a)) { \
+ usage(prgname); \
+ return -1; \
+ } \
+} while (0)
+
+static struct rte_mempool *ethdev_mbuf_mempool;
+
+struct dev_qs {
+ unsigned int port_id;
+ unsigned int queues_list[RTE_MAX_QUEUES_PER_PORT];
+ unsigned int queues;
+};
+
+static struct rte_mempool *bbdev_op_pool[RTE_BBDEV_OP_TYPE_COUNT];
+
+/**all lcores used*/
+struct lcore_setup {
+ unsigned int rxte_lcores;
+ unsigned int rxte_lcore_list[RTE_MAX_LCORE];
+ unsigned int tetx_lcores;
+ unsigned int tetx_lcore_list[RTE_MAX_LCORE];
+ unsigned int rxtd_lcores;
+ unsigned int rxtd_lcore_list[RTE_MAX_LCORE];
+ unsigned int tdtx_lcores;
+ unsigned int tdtx_lcore_list[RTE_MAX_LCORE];
+};
+
+/** each lcore configuration */
+struct lcore_queue_conf {
+ unsigned int nb_ports;
+ struct dev_qs port_list[RTE_MAX_ETHPORTS];
+ /* ethernet addresses of ports */
+ struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
+
+ unsigned int bbdev_id;
+
+ unsigned int bbdev_qs[128];
+ unsigned int nb_bbdev_qs;
+
+ struct rte_mempool *mbuf_pool;
+} __rte_cache_aligned;
+
+static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static const struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
+ .split_hdr_size = 0,
+ .header_split = 0, /**< Header Split disabled */
+ .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+ .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+ .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
+ .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+};
+
+struct rte_bbdev_op_turbo_enc def_op_enc = {
+ /* These values are arbitrarily put, and does not map to the real
+ * values for the data received from ethdev ports
+ */
+ .rv_index = 0,
+ .code_block_mode = 1,
+ .cb_params.e = 272,
+ .cb_params.ncb = 192,
+ .op_flags = RTE_BBDEV_TURBO_CRC_24B_ATTACH
+};
+
+struct rte_bbdev_op_turbo_dec def_op_dec = {
+ /* These values are arbitrarily put, and does not map to the real
+ * values for the data received from ethdev ports
+ */
+ .cb_params.e = 44,
+ .rv_index = 0,
+ .iter_max = 8,
+ .iter_min = 4,
+ .ext_scale = 15,
+ .num_maps = 0,
+ .op_flags = RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
+ RTE_BBDEV_TURBO_EQUALIZER | RTE_BBDEV_TURBO_SOFT_OUTPUT
+};
+
+struct bbdev_config_params {
+ uint8_t downlink_lcores;
+ uint8_t uplink_lcores;
+
+ uint8_t downlink_rx_ports;
+ uint8_t downlink_tx_ports;
+
+ uint8_t uplink_rx_ports;
+ uint8_t uplink_tx_ports;
+
+ unsigned int rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+ unsigned int tx_port_list[MAX_TX_QUEUE_PER_LCORE];
+};
+
+struct lcore_statistics {
+ unsigned int enqueued;
+ unsigned int dequeued;
+ unsigned int lost_packets;
+} __rte_cache_aligned;
+
+static struct lcore_statistics lcore_stats[RTE_MAX_LCORE];
+
+static volatile int global_exit_flag;
+
+struct port_queues {
+ unsigned int nb_rx_qs;
+ unsigned int nb_tx_qs;
+};
+
+static struct port_queues ports_qs[RTE_MAX_ETHPORTS];
+
+/* display usage */
+static inline void
+usage(const char *prgname)
+{
+ printf("%s [EAL options] "
+ " --\n"
+ " --downlink_cores - downlink cores mask\n"
+ " --uplink_cores - uplink cores mask\n"
+ " --downlink_rx_ports - downlink Rx ports mask\n"
+ " --downlink_tx_ports - downlink Tx ports mask\n"
+ " --uplink_tx_ports -uplink Tx ports mask\n"
+ " --uplink_rx_ports -uplink Rx ports msk\n"
+ "\n", prgname);
+}
+
+/* parse port or core mask */
+static inline
+uint8_t bbdev_parse_mask(const char *mask)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(mask, &end, 16);
+ if ((mask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return 0;
+
+ if (pm == 0)
+ return 0;
+
+ return pm;
+}
+
+static int
+bbdev_parse_args(int argc, char **argv,
+ struct bbdev_config_params *bbdev_params)
+{
+ int optind = 0;
+ int opt;
+ int opt_indx = 0;
+ char *prgname = argv[0];
+
+ memset(bbdev_params, 0, sizeof(*bbdev_params));
+
+ static struct option lgopts[] = {
+ { "downlink_cores", required_argument, 0, 'c' },
+ { "uplink_cores", required_argument, 0, 'C' },
+ { "downlink_rx_ports", required_argument, 0, 'r' },
+ { "downlink_tx_ports", required_argument, 0, 't' },
+ { "uplink_rx_ports", required_argument, 0, 'R' },
+ { "uplink_tx_ports", required_argument, 0, 'T' },
+ { NULL, 0, 0, 0 }
+ };
+
+ BBDEV_ASSERT(argc != 0);
+ BBDEV_ASSERT(argv != NULL);
+ BBDEV_ASSERT(bbdev_params != NULL);
+
+ while ((opt = getopt_long(argc, argv, "c:C:r:t:R:T:", lgopts,
+ &opt_indx)) != EOF) {
+
+ switch (opt) {
+ case 'c':
+ bbdev_params->downlink_lcores =
+ bbdev_parse_mask(optarg);
+ if (bbdev_params->downlink_lcores == 0) {
+ usage(prgname);
+ return -1;
+ }
+ break;
+
+ case 'C':
+ bbdev_params->uplink_lcores =
+ bbdev_parse_mask(optarg);
+ if (bbdev_params->uplink_lcores == 0) {
+ usage(prgname);
+ return -1;
+ }
+ break;
+
+ case 'r':
+ bbdev_params->downlink_rx_ports =
+ bbdev_parse_mask(optarg);
+ if (bbdev_params->downlink_rx_ports == 0) {
+ usage(prgname);
+ return -1;
+ }
+ break;
+
+ case 't':
+ bbdev_params->downlink_tx_ports =
+ bbdev_parse_mask(optarg);
+ if (bbdev_params->downlink_tx_ports == 0) {
+ usage(prgname);
+ return -1;
+ }
+ break;
+
+ case 'R':
+ bbdev_params->uplink_rx_ports =
+ bbdev_parse_mask(optarg);
+ if (bbdev_params->uplink_rx_ports == 0) {
+ usage(prgname);
+ return -1;
+ }
+ break;
+
+ case 'T':
+ bbdev_params->uplink_tx_ports =
+ bbdev_parse_mask(optarg);
+ if (bbdev_params->uplink_tx_ports == 0) {
+ usage(prgname);
+ return -1;
+ }
+ break;
+
+ default:
+ usage(prgname);
+ return -1;
+ }
+ }
+ optind = 0;
+ return optind;
+}
+
+static void
+signal_handler(int signum)
+{
+ printf("\nSignal %d received", signum);
+ global_exit_flag = 1;
+}
+
+static void
+print_mac(unsigned int portid, struct ether_addr *bbdev_ports_eth_address)
+{
+ printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+ (unsigned int) portid,
+ bbdev_ports_eth_address[portid].addr_bytes[0],
+ bbdev_ports_eth_address[portid].addr_bytes[1],
+ bbdev_ports_eth_address[portid].addr_bytes[2],
+ bbdev_ports_eth_address[portid].addr_bytes[3],
+ bbdev_ports_eth_address[portid].addr_bytes[4],
+ bbdev_ports_eth_address[portid].addr_bytes[5]);
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint8_t portid, count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ all_ports_up = 1;
+ for (portid = 0; portid < port_num; portid++) {
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(portid, &link);
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status) {
+ const char *dp = (link.link_duplex ==
+ ETH_LINK_FULL_DUPLEX) ?
+ "full-duplex" :
+ "half-duplex";
+ printf("Port %u Link Up - speed %u Mbps"
+ " - %s\n", portid,
+ link.link_speed,
+ dp);
+ } else
+ printf("Port %d Link Down\n", portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == 0) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+static void
+update_pkt_mac(uint16_t n, struct rte_mbuf *pkts_burst[],
+ struct ether_addr *addr, unsigned int dest_portid)
+{
+ struct ether_hdr *eth;
+ void *tmp;
+
+ for (uint16_t i = 0; i < n; i++) {
+ eth = rte_pktmbuf_mtod(pkts_burst[i], struct ether_hdr *);
+ /* 02:00:00:00:00:xx */
+ tmp = ð->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x000000000002 +
+ ((uint64_t)dest_portid << 40);
+ /* src addr */
+ ether_addr_copy(addr, ð->s_addr);
+ }
+}
+
+static int
+initialize_ports(uint8_t ports_mask, char link, char port_type,
+ uint32_t *bbdev_enabled_port_mask)
+{
+ uint8_t portid;
+ unsigned int enabled_portcount = 0, q;
+ int ret;
+ struct port_queues *port_qs;
+ /* ethernet addresses of ports */
+ struct ether_addr bbdev_dl_ports_eth_addr[RTE_MAX_ETHPORTS];
+ struct ether_addr bbdev_ul_ports_eth_addr[RTE_MAX_ETHPORTS];
+
+ uint8_t nb_ports = rte_eth_dev_count();
+ if (nb_ports == 0) {
+ printf("No Ethernet ports available\n");
+ return -1;
+ }
+
+ for (portid = 0; portid < nb_ports; portid++) {
+
+ /* Skip ports that are not enabled */
+ if ((ports_mask & (1 << portid)) == 0)
+ continue;
+
+ port_qs = &ports_qs[portid];
+ /* initialize ports */
+ printf("\nInitializing %cx port %u... ",
+ port_type, portid);
+ fflush(stdout);
+ ret = rte_eth_dev_configure(portid, port_qs->nb_rx_qs,
+ port_qs->nb_tx_qs, &port_conf);
+
+ if (ret < 0) {
+ printf("Cannot configure device: err=%d, port=%u\n",
+ ret, portid);
+ return -1;
+ }
+ fflush(stdout);
+
+ /* initialize one RX or TX queue on each port*/
+ if (port_type == 'R')
+ for (q = 0; q < port_qs->nb_rx_qs; q++)
+ ret = rte_eth_rx_queue_setup(portid, q,
+ RTE_TEST_RX_DESC_DEFAULT,
+ rte_eth_dev_socket_id(portid),
+ NULL, ethdev_mbuf_mempool);
+ else if (port_type == 'T')
+ for (q = 0; q < port_qs->nb_rx_qs; q++)
+ ret = rte_eth_tx_queue_setup(portid, q,
+ RTE_TEST_TX_DESC_DEFAULT,
+ rte_eth_dev_socket_id(portid),
+ NULL);
+
+ if (ret < 0) {
+ printf("%cL rte_eth_%cx_queue_setup:err=%d, port=%u\n",
+ port_type, link, ret,
+ (unsigned int) portid);
+ return -1;
+ }
+
+ rte_eth_promiscuous_enable(portid);
+
+ if (link == 'D') {
+ rte_eth_macaddr_get(portid,
+ &bbdev_dl_ports_eth_addr[portid]);
+ print_mac(portid, bbdev_dl_ports_eth_addr);
+ } else {
+ rte_eth_macaddr_get(portid,
+ &bbdev_ul_ports_eth_addr[portid]);
+ print_mac(portid, bbdev_ul_ports_eth_addr);
+ }
+
+ *bbdev_enabled_port_mask |= (1 << portid);
+ enabled_portcount++;
+ }
+ return enabled_portcount;
+}
+
+static int
+start_ethdev_ports(uint8_t nb_ports, uint32_t bbdev_enabled_port_mask)
+{
+ unsigned int portid;
+ int ret, ports_started = 0;
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+
+ if ((bbdev_enabled_port_mask & (uint32_t)(1 << portid)) == 0)
+ continue;
+ ret = rte_eth_dev_start(portid);
+ if (ret < 0) {
+ printf("rte_eth_dev_start:err=%d, port=%u\n", ret,
+ (unsigned int) portid);
+ return -1;
+ }
+
+ ports_started++;
+ if (ports_started == nb_ports)
+ break;
+ }
+ return ports_started;
+}
+
+static int
+lcore_queue_init(uint8_t nb_ports, struct bbdev_config_params *bbdev_params,
+ struct lcore_setup *l_setup)
+{
+ uint16_t port;
+ unsigned int lcore;
+ int lcount, total_count = 0;
+ struct lcore_queue_conf *qconf;
+
+ /*
+ * for each core in the uplink populate the array of ports (rx or tx).
+ * core with odd id receives rx ports, core with even id receives all
+ * tx ports.
+ */
+ for (lcore = 0, lcount = 0; lcore < sizeof(int) * 8; lcore++) {
+
+ /* get the lcore_id */
+ if ((bbdev_params->downlink_lcores & (1 << lcore)) == 0)
+ continue;
+
+ lcount++;
+ for (port = 0; port <= nb_ports; port++) {
+ if ((lcount & 1) == 1) {
+ if (port < nb_ports) {
+ /*get the port id*/
+ if ((bbdev_params->downlink_rx_ports &
+ (1 << port)) == 0)
+ continue;
+
+ l_setup->rxte_lcore_list[
+ l_setup->rxte_lcores] =
+ lcore;
+ printf("Lcore %d: DL RX port %d\n",
+ lcore, port);
+ } else {
+ l_setup->rxte_lcores++;
+ continue;
+ }
+
+ } else {
+ if (port < nb_ports) {
+ if ((bbdev_params->downlink_tx_ports &
+ (1 << port)) == 0)
+ continue;
+
+ l_setup->tdtx_lcore_list[
+ l_setup->tdtx_lcores] =
+ lcore;
+ printf("Lcore %d: DL TX port %d\n",
+ lcore, port);
+ } else {
+ l_setup->tdtx_lcores++;
+ continue;
+ }
+ }
+ qconf = &lcore_queue_conf[lcore];
+ qconf->port_list[qconf->nb_ports].queues++;
+ qconf->port_list[qconf->nb_ports].port_id = port;
+ rte_eth_macaddr_get(port,
+ &qconf->ports_eth_addr
+ [qconf->nb_ports]);
+ qconf->nb_ports++;
+ }
+ }
+
+ if (lcount % 2) {
+ printf("\nNumber of DL lcores is not an even number");
+ return -1;
+ }
+
+ total_count += lcount;
+
+ /*
+ * for each core in the downlink populate the array of ports (rx or tx).
+ * core with odd id receives rx ports, core with even id receives all
+ * tx ports
+ */
+ for (lcore = 0, lcount = 0; lcore < (sizeof(int) * 8); lcore++) {
+ if ((bbdev_params->uplink_lcores & (1 << lcore)) == 0)
+ continue;
+
+ lcount++;
+ for (port = 0; port <= nb_ports; port++) {
+
+ if ((lcount & 1) == 1) {
+ if (port < nb_ports) {
+ if ((bbdev_params->uplink_rx_ports &
+ (1 << port)) == 0)
+ continue;
+
+ l_setup->rxtd_lcore_list[
+ l_setup->rxtd_lcores] =
+ lcore;
+ printf("Lcore %u: UL RX port %u\n",
+ lcore, port);
+ } else {
+ l_setup->rxtd_lcores++;
+ continue;
+ }
+
+ } else {
+ if (port < nb_ports) {
+ if ((bbdev_params->uplink_tx_ports &
+ (1 << port)) == 0)
+ continue;
+
+ l_setup->tetx_lcore_list[
+ l_setup->tetx_lcores] =
+ lcore;
+ printf("Lcore %d: UL TX port %d\n",
+ lcore, port);
+ } else {
+ l_setup->tetx_lcores++;
+ continue;
+ }
+ }
+ qconf = &lcore_queue_conf[lcore];
+ qconf->port_list[qconf->nb_ports].queues++;
+ qconf->port_list[qconf->nb_ports].port_id = port;
+ rte_eth_macaddr_get(port,
+ &qconf->ports_eth_addr
+ [qconf->nb_ports]);
+ qconf->nb_ports++;
+ }
+ }
+ if (lcount % 2) {
+ printf("\nNumber of DL lcores is not an even number");
+ return -1;
+ }
+ total_count += lcount;
+ /*return number of lcores processed*/
+ return total_count;
+}
+
+static void
+bbdev_dequeue_dec(void)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_bbdev_dec_op *ops_burst[MAX_PKT_BURST];
+ unsigned int i, j, portid, nb_tx, lcoreid, qid = 0, q, nb_tx_sent = 0;
+ struct lcore_queue_conf *qconf;
+ struct lcore_statistics *lcore_stat;
+
+ lcoreid = rte_lcore_id();
+ lcore_stat = &lcore_stats[lcoreid];
+ qconf = &lcore_queue_conf[lcoreid];
+
+ if (qconf->nb_ports == 0)
+ return;
+
+ while (!global_exit_flag) {
+ for (i = 0; i < qconf->nb_ports; i++) {
+ portid = qconf->port_list[i].port_id;
+
+ for (q = 0; q < qconf->port_list[i].queues; q++) {
+ qid = qconf->port_list[i].queues_list[q];
+ /* Dequeue packets from bbdev device*/
+ nb_tx = rte_bbdev_dequeue_dec_ops(
+ qconf->bbdev_id,
+ qconf->bbdev_qs[q], ops_burst,
+ MAX_PKT_BURST);
+
+ if (!nb_tx)
+ continue;
+
+ lcore_stat->dequeued += nb_tx;
+
+ for (j = 0; j < nb_tx; j++) {
+ pkts_burst[j] =
+ ops_burst[j]->turbo_dec
+ .hard_output.data;
+ /* input mbufs are no longer needed,
+ * release!
+ */
+ rte_pktmbuf_free(ops_burst[j]->
+ turbo_dec.input.data);
+ }
+
+ rte_bbdev_dec_op_free_bulk(ops_burst, nb_tx);
+
+ update_pkt_mac(nb_tx, pkts_burst,
+ &(qconf->ports_eth_addr
+ [portid]),
+ portid);
+
+ /*Enqueue packets to ethdev*/
+ nb_tx_sent = rte_eth_tx_burst(
+ (uint8_t)portid, qid,
+ pkts_burst, nb_tx);
+ if (unlikely(nb_tx_sent < nb_tx)) {
+ lcore_stat->lost_packets +=
+ nb_tx - nb_tx_sent;
+ for (j = nb_tx_sent; j < nb_tx; j++)
+ rte_pktmbuf_free(pkts_burst[j]);
+ }
+ }
+ }
+ }
+}
+
+static void
+bbdev_dequeue_enc(void)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_bbdev_enc_op *ops_burst[MAX_PKT_BURST];
+ unsigned int i, j, portid, nb_tx, lcoreid, qid = 0, q, nb_tx_sent = 0;
+ struct lcore_queue_conf *qconf;
+ struct lcore_statistics *lcore_stat;
+
+ lcoreid = rte_lcore_id();
+ lcore_stat = &lcore_stats[lcoreid];
+ qconf = &lcore_queue_conf[lcoreid];
+
+ if (qconf->nb_ports == 0)
+ return;
+
+ while (!global_exit_flag) {
+ for (i = 0; i < qconf->nb_ports; i++) {
+ portid = qconf->port_list[i].port_id;
+
+ for (q = 0; q < qconf->port_list[i].queues; q++) {
+ qid = qconf->port_list[i].queues_list[q];
+ /* Dequeue packets from bbdev device*/
+ nb_tx = rte_bbdev_dequeue_enc_ops(
+ qconf->bbdev_id,
+ qconf->bbdev_qs[q], ops_burst,
+ MAX_PKT_BURST);
+
+ if (!nb_tx)
+ continue;
+
+ lcore_stat->dequeued += nb_tx;
+
+ for (j = 0; j < nb_tx; j++) {
+ pkts_burst[j] =
+ ops_burst[j]->
+ turbo_enc.output.data;
+ /* input mbufs are no longer needed,
+ * release!
+ */
+ rte_pktmbuf_free(ops_burst[j]->
+ turbo_enc.input.data);
+ }
+
+ rte_bbdev_enc_op_free_bulk(ops_burst, nb_tx);
+
+ update_pkt_mac(nb_tx, pkts_burst,
+ &(qconf->ports_eth_addr
+ [portid]), portid);
+
+ /*Enqueue packets to ethdev*/
+ nb_tx_sent = rte_eth_tx_burst(
+ (uint8_t)portid, qid,
+ pkts_burst, nb_tx);
+ if (unlikely(nb_tx_sent < nb_tx)) {
+ lcore_stat->lost_packets +=
+ nb_tx - nb_tx_sent;
+ for (j = nb_tx_sent; j < nb_tx; j++)
+ rte_pktmbuf_free(pkts_burst[j]);
+ }
+ }
+ }
+ }
+}
+
+static void
+bbdev_enqueue_dec(void)
+{
+ unsigned int i, portid, nb_rx = 0, j, lcoreid, q, qid = 0;
+ int ret;
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *bbdev_pkts[MAX_PKT_BURST];
+ struct rte_bbdev_dec_op *ops_burst[MAX_PKT_BURST];
+ struct lcore_statistics *lcore_stat;
+ struct lcore_queue_conf *qconf = &lcore_queue_conf[rte_lcore_id()];
+
+ if (qconf->nb_ports == 0)
+ return;
+
+ lcoreid = rte_lcore_id();
+ lcore_stat = &lcore_stats[lcoreid];
+
+ /* Read packet from RX queues*/
+ while (!global_exit_flag) {
+
+ for (i = 0; i < qconf->nb_ports; i++) {
+ portid = qconf->port_list[i].port_id;
+
+ for (q = 0; q < qconf->port_list[i].queues; q++) {
+
+ qid = qconf->port_list[i].queues_list[q];
+ /* Dequeue packets from ethdev */
+ nb_rx = rte_eth_rx_burst((uint8_t) portid, qid,
+ pkts_burst, MAX_PKT_BURST);
+
+ if (!nb_rx)
+ continue;
+
+ if (rte_bbdev_dec_op_alloc_bulk(
+ bbdev_op_pool
+ [RTE_BBDEV_OP_TURBO_DEC],
+ ops_burst, nb_rx) != 0)
+ continue;
+
+ ret = rte_pktmbuf_alloc_bulk(qconf->mbuf_pool,
+ bbdev_pkts, MAX_PKT_BURST);
+ if (ret < 0)
+ continue;
+
+ for (j = 0; j < nb_rx; j++) {
+ struct rte_bbdev_op_turbo_dec *td;
+ /* append the size of the etherneit
+ * header
+ */
+ rte_pktmbuf_append(bbdev_pkts[j],
+ sizeof(struct ether_hdr));
+ /* copy the ethernet header */
+ void *in_buf = rte_pktmbuf_mtod(
+ pkts_burst[j], void *);
+ void *out_buf = rte_pktmbuf_mtod(
+ bbdev_pkts[j], void *);
+ rte_memcpy(out_buf, in_buf,
+ sizeof(struct ether_hdr));
+ /* set op */
+ ops_burst[j]->turbo_dec = def_op_dec;
+ td = &ops_burst[j]->turbo_dec;
+ td->cb_params.k =
+ rte_pktmbuf_pkt_len(
+ bbdev_pkts[j])
+ * 8;
+ td->input.offset =
+ sizeof(
+ struct
+ ether_hdr);
+ td->input.length =
+ rte_pktmbuf_pkt_len(
+ bbdev_pkts[j]);
+ td->input.data =
+ pkts_burst[j];
+ td->hard_output.offset =
+ sizeof(
+ struct
+ ether_hdr);
+ td->hard_output.data =
+ bbdev_pkts[j];
+ }
+
+ /* Enqueue packets on BBDEV device */
+ nb_rx = rte_bbdev_enqueue_dec_ops(
+ qconf->bbdev_id,
+ qconf->bbdev_qs[q], ops_burst,
+ nb_rx);
+ lcore_stat->enqueued += nb_rx;
+ }
+ }
+ }
+}
+
+static void
+bbdev_enqueue_enc(void)
+{
+ unsigned int i, portid, nb_rx = 0, j, lcoreid, q, qid = 0;
+ int ret;
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *bbdev_pkts[MAX_PKT_BURST];
+ struct rte_bbdev_enc_op *ops_burst[MAX_PKT_BURST];
+ struct lcore_statistics *lcore_stat;
+ struct lcore_queue_conf *qconf = &lcore_queue_conf[rte_lcore_id()];
+
+ lcoreid = rte_lcore_id();
+ lcore_stat = &lcore_stats[lcoreid];
+
+ /* Read packet from RX queues*/
+ while (!global_exit_flag) {
+
+ for (i = 0; i < qconf->nb_ports; i++) {
+ portid = qconf->port_list[i].port_id;
+
+ for (q = 0; q < qconf->port_list[i].queues; q++) {
+
+ qid = qconf->port_list[i].queues_list[q];
+ /* Dequeue packets from ethdev */
+ nb_rx = rte_eth_rx_burst((uint8_t)portid, qid,
+ pkts_burst, MAX_PKT_BURST);
+
+ if (!nb_rx)
+ continue;
+
+ if (rte_bbdev_enc_op_alloc_bulk(
+ bbdev_op_pool
+ [RTE_BBDEV_OP_TURBO_ENC],
+ ops_burst, nb_rx) != 0)
+ continue;
+
+ ret = rte_pktmbuf_alloc_bulk(qconf->mbuf_pool,
+ bbdev_pkts, MAX_PKT_BURST);
+ if (ret < 0)
+ continue;
+
+ for (j = 0; j < nb_rx; j++) {
+ /* append the size of the ethernet
+ * header
+ */
+ rte_pktmbuf_append(bbdev_pkts[j],
+ sizeof(
+ struct
+ ether_hdr));
+ /* copy the ethernet header */
+ void *in_buf = rte_pktmbuf_mtod(
+ pkts_burst[j], void *);
+ void *out_buf = rte_pktmbuf_mtod(
+ bbdev_pkts[j], void *);
+ rte_memcpy(out_buf, in_buf,
+ sizeof(
+ struct
+ ether_hdr));
+ /* set op */
+ ops_burst[j]->turbo_enc = def_op_enc;
+ ops_burst[j]->turbo_enc.cb_params.k =
+ rte_pktmbuf_pkt_len(
+ bbdev_pkts[j])
+ * 8;
+ ops_burst[j]->turbo_enc.input.offset =
+ sizeof(
+ struct
+ ether_hdr);
+ ops_burst[j]->turbo_enc.input.length =
+ rte_pktmbuf_pkt_len(
+ bbdev_pkts[j]);
+ ops_burst[j]->turbo_enc.input.data =
+ pkts_burst[j];
+ ops_burst[j]->turbo_enc.output.offset =
+ sizeof(
+ struct
+ ether_hdr);
+ ops_burst[j]->turbo_enc.output.data =
+ bbdev_pkts[j];
+ }
+
+ /* Enqueue packets on BBDEV device */
+ nb_rx = rte_bbdev_enqueue_enc_ops(
+ qconf->bbdev_id,
+ qconf->bbdev_qs[q], ops_burst,
+ nb_rx);
+ lcore_stat->enqueued += nb_rx;
+ }
+ }
+ }
+}
+
+static void
+print_lcore_stats(unsigned int lcore_id)
+{
+ struct lcore_statistics *lcore_stat;
+ static const char *stats_border = "_______";
+
+ lcore_stat = &lcore_stats[lcore_id];
+ printf("\nLcore %d: %s enqueued count:\t\t%u\n",
+ lcore_id, stats_border, lcore_stat->enqueued);
+ printf("Lcore %d: %s dequeued count:\t\t%u\n",
+ lcore_id, stats_border, lcore_stat->dequeued);
+}
+
+static void
+print_stats(struct lcore_setup *lcore_setup)
+{
+ unsigned int l_id, dev_id;
+ unsigned char x, nb_ports;
+ int len, ret, i;
+
+ struct rte_eth_xstat *xstats;
+ struct rte_eth_xstat_name *xstats_names;
+ struct rte_bbdev_stats bbstats;
+ static const char *stats_border = "_______";
+
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ nb_ports = rte_eth_dev_count();
+ printf("PORT STATISTICS:\n================\n");
+ for (x = 0; x < nb_ports; x++) {
+
+ len = rte_eth_xstats_get(x, NULL, 0);
+ if (len < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_xstats_get(%hhu) failed: %d",
+ x, len);
+
+ xstats = calloc(len, sizeof(*xstats));
+ if (xstats == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Failed to calloc memory for xstats");
+
+ ret = rte_eth_xstats_get(x, xstats, len);
+ if (ret < 0 || ret > len)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_xstats_get(%hhu) len%i failed: %d",
+ x, len, ret);
+
+ xstats_names = calloc(len, sizeof(*xstats_names));
+ if (xstats_names == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Failed to calloc memory for xstats_names");
+
+ ret = rte_eth_xstats_get_names(x, xstats_names, len);
+ if (ret < 0 || ret > len)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_xstats_get_names(%hhu) len%i failed: %d",
+ x, len, ret);
+
+ for (i = 0; i < len; i++) {
+ if (xstats[i].value > 0)
+ printf("Port %u: %s %s:\t\t%"PRIu64"\n",
+ x, stats_border,
+ xstats_names[i].name,
+ xstats[i].value);
+ }
+ }
+
+ printf("\nBBDEV STATISTICS:\n=================\n");
+ RTE_BBDEV_FOREACH(dev_id) {
+ rte_bbdev_stats_get(dev_id, &bbstats);
+ printf("BBDEV %u: %s enqueue count:\t\t%"PRIu64"\n",
+ dev_id, stats_border,
+ bbstats.enqueued_count);
+ printf("BBDEV %u: %s dequeue count:\t\t%"PRIu64"\n",
+ dev_id, stats_border,
+ bbstats.dequeued_count);
+ printf("BBDEV %u: %s enqueue error count:\t\t%"PRIu64"\n",
+ dev_id, stats_border,
+ bbstats.enqueue_err_count);
+ printf("BBDEV %u: %s dequeue error count:\t\t%"PRIu64"\n\n",
+ dev_id, stats_border,
+ bbstats.dequeue_err_count);
+ }
+
+ printf("LCORE STATISTICS:\n=================\n");
+ for (l_id = 0; l_id < lcore_setup->tdtx_lcores; l_id++)
+ print_lcore_stats(lcore_setup->tdtx_lcore_list[l_id]);
+ for (l_id = 0; l_id < lcore_setup->tetx_lcores; l_id++)
+ print_lcore_stats(lcore_setup->tetx_lcore_list[l_id]);
+ for (l_id = 0; l_id < lcore_setup->rxtd_lcores; l_id++)
+ print_lcore_stats(lcore_setup->rxtd_lcore_list[l_id]);
+ for (l_id = 0; l_id < lcore_setup->rxte_lcores; l_id++)
+ print_lcore_stats(lcore_setup->rxte_lcore_list[l_id]);
+}
+
+static void
+qs_to_lcores(unsigned int nb_lcores, unsigned int lcores[])
+{
+ unsigned int port = 0, lcore, nb_ports = 0, q_id;
+ unsigned int l = 0, y, x;
+ struct lcore_queue_conf *qconf;
+ struct dev_qs temp[RTE_MAX_LCORE];
+ int count = 0;
+ struct dev_qs *p;
+ struct port_queues *port_qs;
+
+ /*
+ * for each core copy array of ports (holding all ports available to
+ * this lcore) from qconf into temp array and empty the one in qconf
+ */
+ for (x = 0; x < nb_lcores; x++) {
+
+ lcore = lcores[x];
+ qconf = &lcore_queue_conf[lcore];
+ nb_ports = qconf->nb_ports;
+ qconf->nb_ports = 0;
+ p = qconf->port_list;
+
+ for (y = 0; y < nb_ports; y++, p++) {
+ temp[y] = *p;
+ p->port_id = 0;
+ }
+ }
+ /*
+ * re-populate array of ports and queues in qconf according to number
+ * of available lcores, ports, and queues so only the required ports
+ * and queues stay in the configuration of each particular lcore
+ */
+ while (port < nb_ports || l < nb_lcores) {
+
+ if (port < nb_ports && l == nb_lcores) {
+ l = 0;
+ count++;
+ }
+ if (port == nb_ports && l < nb_lcores)
+ port = 0;
+
+ lcore = lcores[l];
+ qconf = &lcore_queue_conf[lcore];
+ port_qs = &ports_qs[temp[port].port_id];
+
+ if ((lcore & 1) == 1) {
+ qconf->port_list[count].port_id =
+ temp[port].port_id;
+
+ for (q_id = 0; q_id < qconf->port_list[count].queues;
+ q_id++) {
+ qconf->port_list[count].queues_list[q_id] =
+ port_qs->nb_tx_qs;
+ port_qs->nb_tx_qs++;
+ }
+
+ qconf->nb_ports++;
+ } else {
+ qconf->port_list[count].port_id =
+ temp[port].port_id;
+ for (q_id = 0; q_id < qconf->port_list[count].queues;
+ q_id++) {
+ qconf->port_list[count].queues_list[q_id] =
+ port_qs->nb_rx_qs;
+ port_qs->nb_rx_qs++;
+ }
+ qconf->nb_ports++;
+ }
+ printf("Lcore: %u connected to port: %u\n",
+ lcores[l], temp[port].port_id);
+ port++;
+ l++;
+ }
+}
+
+static void
+main_loop(struct lcore_setup *lcore_setup)
+{
+ unsigned int i = 0, this_lcore;
+ this_lcore = rte_lcore_id();
+
+ /* print stats on master core */
+ if (rte_get_master_lcore() == this_lcore) {
+ while (!global_exit_flag) {
+ print_stats(lcore_setup);
+ rte_delay_ms(500);
+ }
+ }
+
+ for (i = 0; i < lcore_setup->rxte_lcores; i++) {
+ if (lcore_setup->rxte_lcore_list[i] == this_lcore)
+ bbdev_enqueue_enc();
+ }
+
+ for (i = 0; i < lcore_setup->tetx_lcores; i++) {
+ if (lcore_setup->tetx_lcore_list[i] == this_lcore)
+ bbdev_dequeue_enc();
+ }
+
+ for (i = 0; i < lcore_setup->rxtd_lcores; i++) {
+ if (lcore_setup->rxtd_lcore_list[i] == this_lcore)
+ bbdev_enqueue_dec();
+ }
+
+ for (i = 0; i < lcore_setup->tdtx_lcores; i++) {
+ if (lcore_setup->tdtx_lcore_list[i] == this_lcore)
+ bbdev_dequeue_dec();
+ }
+}
+
+static int
+launch_one_lcore(void *arg)
+{
+ main_loop((struct lcore_setup *)arg);
+ return 0;
+}
+
+static int
+prepare_bbdev_device(unsigned int dev_id, uint8_t qs_nb)
+{
+ int ret;
+ unsigned int q_id;
+ struct rte_bbdev_info info;
+ struct rte_bbdev_queue_conf qconf = {0};
+
+ rte_bbdev_info_get(dev_id, &info);
+
+ ret = rte_bbdev_setup_queues(dev_id, qs_nb, info.socket_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "ERROR(%d): BBDEV %u not configured properly\n",
+ ret, dev_id);
+
+ /* setup device queues */
+ qconf.socket = info.socket_id;
+ qconf.queue_size = info.drv.queue_size_lim;
+ qconf.op_type = (dev_id & 0x1) ? RTE_BBDEV_OP_TURBO_DEC :
+ RTE_BBDEV_OP_TURBO_ENC;
+
+ for (q_id = 0; q_id < qs_nb; q_id++) {
+
+ ret = rte_bbdev_queue_configure(dev_id, q_id, &qconf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "ERROR(%d): BBDEV %u queue %u not configured properly\n",
+ ret, dev_id, q_id);
+ }
+
+ ret = rte_bbdev_start(dev_id);
+
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "ERROR(%d): BBDEV %u not started\n",
+ ret, dev_id);
+
+ printf("BBdev %u started\n", dev_id);
+
+ return 0;
+}
+
+static void
+enable_bbdev(unsigned int lcores[], unsigned int nb_lcores, unsigned int dev_id)
+{
+ unsigned int i, nb_qs, tot_nb_bbdev_qs = 1, lcore, lcoreid;
+ struct lcore_queue_conf *qconf, *qconf_next;
+ unsigned int nb_bbdev_qs = 0;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ /* set num of port queues and bbdev queues for each lcore on the link */
+ for (lcore = 0; lcore < nb_lcores; lcore++) {
+ nb_qs = 0;
+ lcoreid = lcores[lcore];
+ qconf = &lcore_queue_conf[lcoreid];
+
+ for (i = 0; i < qconf->nb_ports; i++)
+ nb_qs += qconf->port_list->queues;
+
+ for (i = 0; i < nb_qs; i++) {
+ qconf->bbdev_qs[i] = nb_bbdev_qs;
+ nb_bbdev_qs++;
+ }
+
+ /* create the mbuf mempool for ethdev pkts */
+ snprintf(pool_name, sizeof(pool_name), "bbdev%d_mbuf_pool%d",
+ dev_id, lcore);
+ qconf->mbuf_pool = rte_pktmbuf_pool_create(pool_name,
+ NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ if (qconf->mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Unable to create '%s' pool\n",
+ pool_name);
+
+ qconf->nb_bbdev_qs = nb_qs;
+ qconf->bbdev_id = dev_id;
+ printf("BBdev: %u, lcore %u\n", dev_id, lcoreid);
+
+ qconf_next = &lcore_queue_conf[lcoreid + 1];
+ qconf_next->bbdev_id = dev_id;
+
+ for (i = 0; i < nb_qs; i++)
+ qconf_next->bbdev_qs[i] = qconf->bbdev_qs[i];
+
+ qconf_next->nb_bbdev_qs = nb_qs;
+ printf("BBdev: %u, lcore %u\n", dev_id, lcoreid + 1);
+ }
+
+ /* count the number of queues needed on the bbdev device on this link */
+ for (lcore = 0; lcore < nb_lcores; lcore++) {
+ lcoreid = lcores[lcore];
+ qconf = &lcore_queue_conf[lcoreid];
+ tot_nb_bbdev_qs += qconf->nb_bbdev_qs;
+ }
+
+ prepare_bbdev_device(dev_id, tot_nb_bbdev_qs);
+}
+
+int
+main(int argc, char **argv)
+{
+ int ret, nb_bbdevs, nb_ports;
+ int enabled_portcount = 0;
+ uint8_t lcore_id;
+ char rx = 'R', tx = 'T', up_link = 'U', down_link = 'D';
+ void *sigret;
+ uint32_t bbdev_enabled_port_mask = 0;
+ struct lcore_setup lcore_setup = { 0 };
+ struct bbdev_config_params bbdev_params = { 0 };
+
+ sigret = signal(SIGTERM, signal_handler);
+ if (sigret == SIG_ERR)
+ rte_exit(EXIT_FAILURE, "signal(%d, ...) failed", SIGTERM);
+
+ sigret = signal(SIGINT, signal_handler);
+ if (sigret == SIG_ERR)
+ rte_exit(EXIT_FAILURE, "signal(%d, ...) failed", SIGINT);
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+
+ argc -= ret;
+ argv += ret;
+
+ /* parse application arguments (after the EAL ones) */
+ ret = bbdev_parse_args(argc, argv, &bbdev_params);
+
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid BBDEV arguments\n");
+
+ /* Get number of available bbdev devices */
+ nb_bbdevs = rte_bbdev_count();
+ if (nb_bbdevs == 0)
+ rte_exit(EXIT_FAILURE, "No bbdevs detected!\n");
+ printf("Number of bbdevs detected: %d\n", nb_bbdevs);
+
+ /* Get number of available ethdev devices */
+ nb_ports = rte_eth_dev_count();
+ /* Initialize the port/queue configuration of each logical core */
+ ret = lcore_queue_init(nb_ports, &bbdev_params, &lcore_setup);
+ if (ret < nb_bbdevs * 2)
+ rte_exit(EXIT_FAILURE, "Failed to initialize queues.\n");
+
+ /* reconfigure ports to lcores assignment to allow for multiple lcores
+ * usage
+ */
+ qs_to_lcores(lcore_setup.rxte_lcores, lcore_setup.rxte_lcore_list);
+ qs_to_lcores(lcore_setup.rxtd_lcores, lcore_setup.rxtd_lcore_list);
+ qs_to_lcores(lcore_setup.tdtx_lcores, lcore_setup.tdtx_lcore_list);
+ qs_to_lcores(lcore_setup.tetx_lcores, lcore_setup.tetx_lcore_list);
+
+ /* create the mbuf mempool for ethdev pkts */
+ ethdev_mbuf_mempool = rte_pktmbuf_pool_create("ethdev_mbuf_pool",
+ NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ if (ethdev_mbuf_mempool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create ethdev mbuf mempool\n");
+
+ /*initialize ports*/
+ enabled_portcount = initialize_ports(bbdev_params.downlink_rx_ports,
+ down_link, rx, &bbdev_enabled_port_mask);
+ enabled_portcount += initialize_ports(bbdev_params.downlink_tx_ports,
+ down_link, tx, &bbdev_enabled_port_mask);
+ enabled_portcount += initialize_ports(bbdev_params.uplink_rx_ports,
+ up_link, rx, &bbdev_enabled_port_mask);
+ enabled_portcount += initialize_ports(bbdev_params.uplink_tx_ports,
+ up_link, tx, &bbdev_enabled_port_mask);
+
+ if (enabled_portcount < 1)
+ rte_exit(EXIT_FAILURE, "Failed to initialize Ethernet ports\n");
+
+ /*start Ethernet devices*/
+ ret = start_ethdev_ports(nb_ports, bbdev_enabled_port_mask);
+ if (ret <= 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to start Ethernet devices on ports\n");
+ check_all_ports_link_status(nb_ports, bbdev_enabled_port_mask);
+
+ /*create bbdev op pools*/
+ bbdev_op_pool[RTE_BBDEV_OP_TURBO_DEC] =
+ rte_bbdev_op_pool_create("bbdev_op_pool_dec",
+ RTE_BBDEV_OP_TURBO_DEC, NB_MBUF, 128, rte_socket_id());
+ bbdev_op_pool[RTE_BBDEV_OP_TURBO_ENC] =
+ rte_bbdev_op_pool_create("bbdev_op_pool_enc",
+ RTE_BBDEV_OP_TURBO_ENC, NB_MBUF, 128, rte_socket_id());
+
+ if ((bbdev_op_pool[RTE_BBDEV_OP_TURBO_DEC] == NULL) ||
+ (bbdev_op_pool[RTE_BBDEV_OP_TURBO_ENC] == NULL))
+ rte_exit(EXIT_FAILURE, "Cannot create bbdev op pools\n");
+
+ /*start DL bbdev device*/
+ if (lcore_setup.rxte_lcores)
+ enable_bbdev(lcore_setup.rxte_lcore_list,
+ lcore_setup.rxte_lcores, 0);
+ /*start UL bbdev device*/
+ if (lcore_setup.rxtd_lcores)
+ enable_bbdev(lcore_setup.rxtd_lcore_list,
+ lcore_setup.rxtd_lcores, 1);
+
+ /*launch per-lcore init on every lcore*/
+ rte_eal_mp_remote_launch(launch_one_lcore, (void *)&lcore_setup,
+ CALL_MASTER);
+
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (rte_eal_wait_lcore(lcore_id) < 0)
+ return -1;
+ }
+
+ return 0;
+}