@@ -15,6 +15,8 @@
#include <rte_byteorder.h>
#include <rte_memzone.h>
#include <linux/pci.h>
+
+#include "qdma_resource_mgmt.h"
#include "qdma_log.h"
#define QDMA_NUM_BARS (6)
@@ -23,6 +25,8 @@
#define QDMA_FUNC_ID_INVALID 0xFFFF
+#define DEFAULT_QUEUE_BASE (0)
+
#define DEFAULT_TIMER_CNT_TRIG_MODE_TIMER (5)
enum dma_data_direction {
@@ -186,6 +190,9 @@ struct qdma_pci_dev {
*/
uint32_t dma_device_index;
+ /* Device capabilities */
+ struct qdma_dev_attributes dev_cap;
+
uint8_t cmpt_desc_len;
uint8_t c2h_bypass_mode;
uint8_t h2c_bypass_mode;
@@ -210,6 +217,9 @@ struct qdma_pci_dev {
struct queue_info *q_info;
uint8_t init_q_range;
+ /* Pointer to QDMA access layer function pointers */
+ struct qdma_hw_access *hw_access;
+
struct qdma_vf_info *vfinfo;
uint8_t vf_online_count;
@@ -218,8 +228,9 @@ struct qdma_pci_dev {
};
int qdma_identify_bars(struct rte_eth_dev *dev);
+int qdma_get_hw_version(struct rte_eth_dev *dev);
int qdma_check_kvargs(struct rte_devargs *devargs,
struct qdma_pci_dev *qdma_dev);
-
+void qdma_check_errors(void *arg);
#endif /* ifndef __QDMA_H__ */
@@ -10,6 +10,7 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include "qdma.h"
+#include "qdma_access_common.h"
#include <fcntl.h>
#include <unistd.h>
@@ -199,7 +200,8 @@ int qdma_check_kvargs(struct rte_devargs *devargs,
int qdma_identify_bars(struct rte_eth_dev *dev)
{
- int bar_len, i;
+ int bar_len, i, ret;
+ uint8_t usr_bar;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct qdma_pci_dev *dma_priv;
@@ -213,6 +215,24 @@ int qdma_identify_bars(struct rte_eth_dev *dev)
return -1;
}
+ /* Find AXI Master Lite(user bar) */
+ ret = dma_priv->hw_access->qdma_get_user_bar(dev,
+ dma_priv->is_vf, dma_priv->func_id, &usr_bar);
+ if (ret != QDMA_SUCCESS ||
+ pci_dev->mem_resource[usr_bar].len == 0) {
+ if (dma_priv->ip_type == QDMA_VERSAL_HARD_IP) {
+ if (pci_dev->mem_resource[1].len == 0)
+ dma_priv->user_bar_idx = 2;
+ else
+ dma_priv->user_bar_idx = 1;
+ } else {
+ dma_priv->user_bar_idx = -1;
+ PMD_DRV_LOG(INFO, "Cannot find AXI Master Lite BAR");
+ }
+ } else {
+ dma_priv->user_bar_idx = usr_bar;
+ }
+
/* Find AXI Bridge Master bar(bypass bar) */
for (i = 0; i < QDMA_NUM_BARS; i++) {
bar_len = pci_dev->mem_resource[i].len;
@@ -234,3 +254,33 @@ int qdma_identify_bars(struct rte_eth_dev *dev)
return 0;
}
+int qdma_get_hw_version(struct rte_eth_dev *dev)
+{
+ int ret;
+ struct qdma_pci_dev *dma_priv;
+ struct qdma_hw_version_info version_info;
+
+ dma_priv = (struct qdma_pci_dev *)dev->data->dev_private;
+ ret = dma_priv->hw_access->qdma_get_version(dev,
+ dma_priv->is_vf, &version_info);
+ if (ret < 0)
+ return dma_priv->hw_access->qdma_get_error_code(ret);
+
+ dma_priv->rtl_version = version_info.rtl_version;
+ dma_priv->vivado_rel = version_info.vivado_release;
+ dma_priv->device_type = version_info.device_type;
+ dma_priv->ip_type = version_info.ip_type;
+
+ PMD_DRV_LOG(INFO, "QDMA RTL VERSION : %s\n",
+ version_info.qdma_rtl_version_str);
+ PMD_DRV_LOG(INFO, "QDMA DEVICE TYPE : %s\n",
+ version_info.qdma_device_type_str);
+ PMD_DRV_LOG(INFO, "QDMA VIVADO RELEASE ID : %s\n",
+ version_info.qdma_vivado_release_id_str);
+ if (version_info.ip_type == QDMA_VERSAL_HARD_IP) {
+ PMD_DRV_LOG(INFO, "QDMA VERSAL IP TYPE : %s\n",
+ version_info.qdma_ip_type_str);
+ }
+
+ return 0;
+}
@@ -22,11 +22,27 @@
#include <rte_cycles.h>
#include "qdma.h"
+#include "qdma_version.h"
+#include "qdma_access_common.h"
+#include "qdma_access_export.h"
+/* Poll for QDMA errors every 1 second */
+#define QDMA_ERROR_POLL_FRQ (1000000)
#define PCI_CONFIG_BRIDGE_DEVICE (6)
#define PCI_CONFIG_CLASS_CODE_SHIFT (16)
#define MAX_PCIE_CAPABILITY (48)
+static void qdma_device_attributes_get(struct rte_eth_dev *dev);
+
+/* Poll for any QDMA errors */
+void qdma_check_errors(void *arg)
+{
+ struct qdma_pci_dev *qdma_dev;
+ qdma_dev = ((struct rte_eth_dev *)arg)->data->dev_private;
+ qdma_dev->hw_access->qdma_hw_error_process(arg);
+ rte_eal_alarm_set(QDMA_ERROR_POLL_FRQ, qdma_check_errors, arg);
+}
+
/*
* The set of PCI devices this driver supports
*/
@@ -43,6 +59,92 @@ static struct rte_pci_id qdma_pci_id_tbl[] = {
{ .vendor_id = 0, /* sentinel */ },
};
+static void qdma_device_attributes_get(struct rte_eth_dev *dev)
+{
+ struct qdma_pci_dev *qdma_dev;
+
+ qdma_dev = (struct qdma_pci_dev *)dev->data->dev_private;
+ qdma_dev->hw_access->qdma_get_device_attributes(dev,
+ &qdma_dev->dev_cap);
+
+ /* Check DPDK configured queues per port */
+ if (qdma_dev->dev_cap.num_qs > RTE_MAX_QUEUES_PER_PORT)
+ qdma_dev->dev_cap.num_qs = RTE_MAX_QUEUES_PER_PORT;
+
+ PMD_DRV_LOG(INFO, "qmax = %d, mm %d, st %d.\n",
+ qdma_dev->dev_cap.num_qs, qdma_dev->dev_cap.mm_en,
+ qdma_dev->dev_cap.st_en);
+}
+
+static inline uint8_t pcie_find_cap(const struct rte_pci_device *pci_dev,
+ uint8_t cap)
+{
+ uint8_t pcie_cap_pos = 0;
+ uint8_t pcie_cap_id = 0;
+ int ttl = MAX_PCIE_CAPABILITY;
+ int ret;
+
+ ret = rte_pci_read_config(pci_dev, &pcie_cap_pos, sizeof(uint8_t),
+ PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "PCIe config space read failed..\n");
+ return 0;
+ }
+
+ while (ttl-- && pcie_cap_pos >= PCI_STD_HEADER_SIZEOF) {
+ pcie_cap_pos &= ~3;
+
+ ret = rte_pci_read_config(pci_dev,
+ &pcie_cap_id, sizeof(uint8_t),
+ (pcie_cap_pos + PCI_CAP_LIST_ID));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "PCIe config space read failed..\n");
+ return 0;
+ }
+
+ if (pcie_cap_id == 0xff)
+ break;
+
+ if (pcie_cap_id == cap)
+ return pcie_cap_pos;
+
+ ret = rte_pci_read_config(pci_dev,
+ &pcie_cap_pos, sizeof(uint8_t),
+ (pcie_cap_pos + PCI_CAP_LIST_NEXT));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "PCIe config space read failed..\n");
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static void pcie_perf_enable(const struct rte_pci_device *pci_dev)
+{
+ uint16_t value;
+ uint8_t pcie_cap_pos = pcie_find_cap(pci_dev, PCI_CAP_ID_EXP);
+
+ if (!pcie_cap_pos)
+ return;
+
+ if (pcie_cap_pos > 0) {
+ if (rte_pci_read_config(pci_dev, &value, sizeof(uint16_t),
+ pcie_cap_pos + PCI_EXP_DEVCTL) < 0) {
+ PMD_DRV_LOG(ERR, "PCIe config space read failed..\n");
+ return;
+ }
+
+ value |= (PCI_EXP_DEVCTL_EXT_TAG | PCI_EXP_DEVCTL_RELAX_EN);
+
+ if (rte_pci_write_config(pci_dev, &value, sizeof(uint16_t),
+ pcie_cap_pos + PCI_EXP_DEVCTL) < 0) {
+ PMD_DRV_LOG(ERR, "PCIe config space write failed..\n");
+ return;
+ }
+ }
+}
+
/* parse a sysfs file containing one integer value */
static int parse_sysfs_value(const char *filename, uint32_t *val)
{
@@ -234,7 +336,7 @@ static int qdma_eth_dev_init(struct rte_eth_dev *dev)
{
struct qdma_pci_dev *dma_priv;
uint8_t *baseaddr;
- int i, idx, ret;
+ int i, idx, ret, qbase;
struct rte_pci_device *pci_dev;
uint16_t num_vfs;
uint8_t max_pci_bus = 0;
@@ -297,8 +399,30 @@ static int qdma_eth_dev_init(struct rte_eth_dev *dev)
pci_dev->mem_resource[dma_priv->config_bar_idx].addr;
dma_priv->bar_addr[dma_priv->config_bar_idx] = baseaddr;
+ /* Assigning QDMA access layer function pointers based on the HW design */
+ dma_priv->hw_access = rte_zmalloc("hwaccess",
+ sizeof(struct qdma_hw_access), 0);
+ if (dma_priv->hw_access == NULL) {
+ rte_free(dev->data->mac_addrs);
+ return -ENOMEM;
+ }
+ idx = qdma_hw_access_init(dev, dma_priv->is_vf, dma_priv->hw_access);
+ if (idx < 0) {
+ rte_free(dma_priv->hw_access);
+ rte_free(dev->data->mac_addrs);
+ return -EINVAL;
+ }
+
+ idx = qdma_get_hw_version(dev);
+ if (idx < 0) {
+ rte_free(dma_priv->hw_access);
+ rte_free(dev->data->mac_addrs);
+ return -EINVAL;
+ }
+
idx = qdma_identify_bars(dev);
if (idx < 0) {
+ rte_free(dma_priv->hw_access);
rte_free(dev->data->mac_addrs);
return -EINVAL;
}
@@ -312,14 +436,99 @@ static int qdma_eth_dev_init(struct rte_eth_dev *dev)
PMD_DRV_LOG(INFO, "QDMA device driver probe:");
+ /* Getting the device attributes from the Hardware */
+ qdma_device_attributes_get(dev);
+
+ /* Create master resource node for queue management on the given
+ * bus number. Node will be created only once per bus number.
+ */
+ qbase = DEFAULT_QUEUE_BASE;
+
ret = get_max_pci_bus_num(pci_dev->addr.bus, &max_pci_bus);
- if (ret != 0 && !max_pci_bus) {
+ if (ret != QDMA_SUCCESS && !max_pci_bus) {
PMD_DRV_LOG(ERR, "Failed to get max pci bus number\n");
+ rte_free(dma_priv->hw_access);
rte_free(dev->data->mac_addrs);
return -EINVAL;
}
PMD_DRV_LOG(INFO, "PCI max bus number : 0x%x", max_pci_bus);
+ ret = qdma_master_resource_create(pci_dev->addr.bus, max_pci_bus,
+ qbase, dma_priv->dev_cap.num_qs,
+ &dma_priv->dma_device_index);
+ if (ret == -QDMA_ERR_NO_MEM) {
+ rte_free(dma_priv->hw_access);
+ rte_free(dev->data->mac_addrs);
+ return -ENOMEM;
+ }
+
+ dma_priv->hw_access->qdma_get_function_number(dev,
+ &dma_priv->func_id);
+ PMD_DRV_LOG(INFO, "PF function ID: %d", dma_priv->func_id);
+
+ /* CSR programming is done once per given board or bus number,
+ * done by the master PF
+ */
+ if (ret == QDMA_SUCCESS) {
+ RTE_LOG(INFO, PMD, "QDMA PMD VERSION: %s\n", QDMA_PMD_VERSION);
+ dma_priv->hw_access->qdma_set_default_global_csr(dev);
+ for (i = 0; i < dma_priv->dev_cap.mm_channel_max; i++) {
+ if (dma_priv->dev_cap.mm_en) {
+ /* Enable MM C2H Channel */
+ dma_priv->hw_access->qdma_mm_channel_conf(dev,
+ i, 1, 1);
+ /* Enable MM H2C Channel */
+ dma_priv->hw_access->qdma_mm_channel_conf(dev,
+ i, 0, 1);
+ } else {
+ /* Disable MM C2H Channel */
+ dma_priv->hw_access->qdma_mm_channel_conf(dev,
+ i, 1, 0);
+ /* Disable MM H2C Channel */
+ dma_priv->hw_access->qdma_mm_channel_conf(dev,
+ i, 0, 0);
+ }
+ }
+
+ ret = dma_priv->hw_access->qdma_init_ctxt_memory(dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "%s: Failed to initialize ctxt memory, err = %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ dma_priv->hw_access->qdma_hw_error_enable(dev,
+ dma_priv->hw_access->qdma_max_errors);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "%s: Failed to enable hw errors, err = %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ rte_eal_alarm_set(QDMA_ERROR_POLL_FRQ, qdma_check_errors,
+ (void *)dev);
+ dma_priv->is_master = 1;
+ }
+
+ /*
+ * Create an entry for the device in board list if not already
+ * created
+ */
+ ret = qdma_dev_entry_create(dma_priv->dma_device_index,
+ dma_priv->func_id);
+ if (ret != QDMA_SUCCESS &&
+ ret != -QDMA_ERR_RM_DEV_EXISTS) {
+ PMD_DRV_LOG(ERR, "PF-%d(DEVFN) qdma_dev_entry_create failed: %d\n",
+ dma_priv->func_id, ret);
+ rte_free(dma_priv->hw_access);
+ rte_free(dev->data->mac_addrs);
+ return -ENOMEM;
+ }
+
+ pcie_perf_enable(pci_dev);
+
if (!dma_priv->reset_in_progress) {
num_vfs = pci_dev->max_vfs;
if (num_vfs) {
@@ -358,6 +567,14 @@ static int qdma_eth_dev_uninit(struct rte_eth_dev *dev)
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
+ /* cancel pending polls */
+ if (qdma_dev->is_master)
+ rte_eal_alarm_cancel(qdma_check_errors, (void *)dev);
+
+ /* Remove the device node from the board list */
+ qdma_dev_entry_destroy(qdma_dev->dma_device_index,
+ qdma_dev->func_id);
+ qdma_master_resource_destroy(qdma_dev->dma_device_index);
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
@@ -381,6 +598,10 @@ static int qdma_eth_dev_uninit(struct rte_eth_dev *dev)
qdma_dev->q_info = NULL;
}
+ if (qdma_dev->hw_access != NULL) {
+ rte_free(qdma_dev->hw_access);
+ qdma_dev->hw_access = NULL;
+ }
return 0;
}