@@ -64,9 +64,19 @@
#include <rte_dpaa_bus.h>
#include <rte_dpaa_logs.h>
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
struct rte_dpaa_bus rte_dpaa_bus;
+struct netcfg_info *dpaa_netcfg;
+/* define a variable to hold the portal_key, once created.*/
+pthread_key_t dpaa_portal_key;
+
+RTE_DEFINE_PER_LCORE(bool, _dpaa_io);
static inline void
dpaa_add_to_device_list(struct rte_dpaa_device *dev)
@@ -79,11 +89,226 @@ dpaa_remove_from_device_list(struct rte_dpaa_device *dev)
{
TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
}
+
+static int
+dpaa_create_device_list(void)
+{
+ int dev_id;
+ struct rte_dpaa_device *dev;
+ struct fm_eth_port_cfg *cfg;
+ struct fman_if *fman_intf;
+
+ for (dev_id = 0; dev_id < dpaa_netcfg->num_ethports; dev_id++) {
+ dev = rte_zmalloc(NULL, sizeof(struct rte_dpaa_device),
+ RTE_CACHE_LINE_SIZE);
+ if (!dev)
+ return -ENOMEM;
+
+ cfg = &dpaa_netcfg->port_cfg[dev_id];
+ fman_intf = cfg->fman_if;
+
+ /* Device identifiers */
+ dev->id.vendor_id = FSL_VENDOR_ID;
+ dev->id.class_id = FSL_DEVICE_ID;
+ dev->id.fman_id = fman_intf->fman_idx + 1;
+ dev->id.mac_id = fman_intf->mac_idx;
+ dev->id.dev_id = dev_id;
+
+ /* Create device name */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
+ fman_intf->mac_idx);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count = dev_id;
+
+ return 0;
+}
+
+static void
+dpaa_clean_device_list(void)
+{
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
+ rte_free(dev);
+ dev = NULL;
+ }
+}
+
+/** XXX move this function into a separate file */
+static int
+_dpaa_portal_init(void *arg)
+{
+ cpu_set_t cpuset;
+ pthread_t id;
+ uint32_t cpu = rte_lcore_id();
+ int ret;
+ struct dpaa_portal *dpaa_io_portal;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY)
+ cpu = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else
+ if (cpu >= RTE_MAX_LCORE)
+ return -1;
+
+ /* Set CPU affinity for this thread */
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ id = pthread_self();
+ ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "pthread_setaffinity_np failed on "
+ "core :%d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ /* Initialise bman thread portals */
+ ret = bman_thread_init();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ PMD_DRV_LOG(DEBUG, "BMAN thread initialized");
+
+ /* Initialise qman thread portals */
+ ret = qman_thread_init();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ bman_thread_finish();
+ return ret;
+ }
+
+ PMD_DRV_LOG(DEBUG, "QMAN thread initialized");
+
+ dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa_io_portal) {
+ PMD_DRV_LOG(ERR, "Unable to allocate memory");
+ bman_thread_finish();
+ qman_thread_finish();
+ return -ENOMEM;
+ }
+
+ dpaa_io_portal->qman_idx = qman_get_portal_index();
+ dpaa_io_portal->bman_idx = bman_get_portal_index();
+ dpaa_io_portal->tid = syscall(SYS_gettid);
+
+ ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "pthread_setspecific failed on "
+ "core %d with ret: %d", cpu, ret);
+ dpaa_portal_finish(NULL);
+
+ return ret;
+ }
+
+ RTE_PER_LCORE(_dpaa_io) = true;
+
+ PMD_DRV_LOG(DEBUG, "QMAN thread initialized");
+
+ return 0;
+}
+
+/*
+ * rte_dpaa_portal_init - Wrapper over _dpaa_portal_init with thread level check
+ * XXX Complete this
+ */
+int
+rte_dpaa_portal_init(void *arg)
+{
+ if (unlikely(!RTE_PER_LCORE(_dpaa_io)))
+ return _dpaa_portal_init(arg);
+
+ return 0;
+}
+
+void
+dpaa_portal_finish(void *arg)
+{
+ struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
+
+ if (!dpaa_io_portal) {
+ PMD_DRV_LOG(DEBUG, "Portal already cleaned");
+ return;
+ }
+
+ bman_thread_finish();
+ qman_thread_finish();
+
+ pthread_setspecific(dpaa_portal_key, NULL);
+
+ rte_free(dpaa_io_portal);
+ dpaa_io_portal = NULL;
+
+ RTE_PER_LCORE(_dpaa_io) = false;
+}
+
static int
rte_dpaa_bus_scan(void)
{
+ int ret;
+
PMD_INIT_FUNC_TRACE();
+ /* Load the device-tree driver */
+ ret = of_init();
+ if (ret) {
+ PMD_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
+ return -1;
+ }
+
+ /* Get the interface configurations from device-tree */
+ dpaa_netcfg = netcfg_acquire();
+ if (!dpaa_netcfg) {
+ PMD_BUS_LOG(ERR, "netcfg_acquire failed");
+ return -EINVAL;
+ }
+
+ if (!dpaa_netcfg->num_ethports) {
+ PMD_BUS_LOG(INFO, "no network interfaces available");
+ /* This is not an error */
+ return 0;
+ }
+
+ PMD_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d",
+ dpaa_netcfg, dpaa_netcfg->num_ethports);
+
+#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
+ dump_netcfg(dpaa_netcfg);
+#endif
+
+ PMD_BUS_LOG(DEBUG, "Number of devices = %d\n",
+ dpaa_netcfg->num_ethports);
+ ret = dpaa_create_device_list();
+ if (ret) {
+ PMD_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
+ return ret;
+ }
+
+ /* create the key, supplying a function that'll be invoked
+ * when a portal affined thread will be deleted.
+ */
+ ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
+ if (ret) {
+ PMD_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
+ dpaa_clean_device_list();
+ return ret;
+ }
+
+ PMD_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n",
+ (unsigned int)dpaa_portal_key, ret);
+
return 0;
}
@@ -120,6 +345,7 @@ rte_dpaa_device_match(struct rte_dpaa_driver *drv __rte_unused,
struct rte_dpaa_device *dev __rte_unused)
{
int ret = -1;
+ char *dev_name = NULL;
PMD_INIT_FUNC_TRACE();
@@ -128,6 +354,39 @@ rte_dpaa_device_match(struct rte_dpaa_driver *drv __rte_unused,
return ret;
}
+ /* For identifying DPAA devices,
+ * 1. name starts with fman-
+ * 2. FSL_VENDOR_ID and FSL_DEVICE_ID are specified
+ */
+ PMD_DRV_LOG(DEBUG, "vendor=%d, class=%d, name=%s\n",
+ dev->id.vendor_id, dev->id.class_id, dev->name);
+ if ((dev->id.vendor_id == FSL_VENDOR_ID) &&
+ (dev->id.class_id == FSL_DEVICE_ID)) {
+ /* Generate name */
+ dev_name = rte_zmalloc(NULL, RTE_ETH_NAME_MAX_LEN, 0);
+ if (!dev_name) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ sprintf(dev_name, "fm%d-mac%d",
+ dev->id.fman_id, dev->id.mac_id);
+
+ /* Verify the naming pattern */
+ ret = strncmp(dev->name, dev_name, strlen(dev_name));
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "(%s) not DPAA device", dev->name);
+ ret = -1;
+ goto err_out;
+ } else {
+ PMD_DRV_LOG(DEBUG, "DPAA Device (%s)", dev->name);
+ }
+ }
+
+err_out:
+ if (dev_name)
+ rte_free(dev_name);
+
return ret;
}
@@ -1,7 +1,46 @@
DPDK_17.08 {
global:
+ bman_acquire;
+ bman_free_pool;
+ bman_get_params;
+ bman_new_pool;
+ bman_release;
+ dpaa_netcfg;
+ fman_ccsr_map_fd;
+ fman_dealloc_bufs_mask_hi;
+ fman_dealloc_bufs_mask_lo;
+ fman_if_disable_rx;
+ fman_if_enable_rx;
+ fman_if_discard_rx_errors;
+ fman_if_get_fc_threshold;
+ fman_if_get_fc_quanta;
+ fman_if_promiscuous_disable;
+ fman_if_promiscuous_enable;
+ fman_if_reset_mcast_filter_table;
+ fman_if_set_bp;
+ fman_if_set_fc_threshold;
+ fman_if_set_fc_quanta;
+ fman_if_set_fdoff;
+ fman_if_set_ic_params;
+ fman_if_set_maxfrm;
+ fman_if_set_mcast_filter_table;
+ fman_if_stats_get;
+ fman_if_stats_reset;
+ fm_mac_add_exact_match_mac_addr;
+ fm_mac_rem_exact_match_mac_addr;
+ netcfg_acquire;
+ netcfg_release;
+ qman_create_fq;
+ qman_dequeue;
+ qman_dqrr_consume;
+ qman_enqueue_multi;
+ qman_init_fq;
+ qman_set_vdq;
+ qman_reserve_fqid_range;
rte_dpaa_driver_register;
rte_dpaa_driver_unregister;
+ rte_dpaa_mem_ptov;
+ rte_dpaa_portal_init;
};
@@ -36,6 +36,12 @@
#include <rte_bus.h>
#include <rte_mempool.h>
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
#define FSL_CLASS_ID 0
#define FSL_VENDOR_ID 0x1957
#define FSL_DEVICE_ID 0x410 /* custom */