@@ -303,6 +303,7 @@ struct qdma_pci_dev {
void qdma_dev_ops_init(struct rte_eth_dev *dev);
void qdma_txq_pidx_update(void *arg);
int qdma_pf_csr_read(struct rte_eth_dev *dev);
+int qdma_vf_csr_read(struct rte_eth_dev *dev);
uint8_t qmda_get_desc_sz_idx(enum rte_pmd_qdma_bypass_desc_len);
@@ -22,6 +22,8 @@
#include "qdma.h"
#include "qdma_access_common.h"
+#include "qdma_mbox_protocol.h"
+#include "qdma_mbox.h"
#include "qdma_reg_dump.h"
#include "qdma_platform.h"
#include "qdma_devops.h"
@@ -64,6 +66,39 @@ static void qdma_sort_c2h_cntr_th_values(struct qdma_pci_dev *qdma_dev)
}
#endif /* QDMA_LATENCY_OPTIMIZED */
+int qdma_vf_csr_read(struct rte_eth_dev *dev)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ struct qdma_mbox_msg *m = qdma_mbox_msg_alloc();
+ int rv, i;
+ struct qdma_csr_info csr_info;
+
+ if (!m)
+ return -ENOMEM;
+
+ qdma_mbox_compose_csr_read(qdma_dev->func_id, m->raw_data);
+ rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
+ if (rv < 0)
+ goto free_msg;
+
+ rv = qdma_mbox_vf_csr_get(m->raw_data, &csr_info);
+ if (rv < 0)
+ goto free_msg;
+ for (i = 0; i < QDMA_NUM_RING_SIZES; i++) {
+ qdma_dev->g_ring_sz[i] = (uint32_t)csr_info.ringsz[i];
+ qdma_dev->g_c2h_buf_sz[i] = (uint32_t)csr_info.bufsz[i];
+ qdma_dev->g_c2h_timer_cnt[i] = (uint32_t)csr_info.timer_cnt[i];
+ qdma_dev->g_c2h_cnt_th[i] = (uint32_t)csr_info.cnt_thres[i];
+ #ifdef QDMA_LATENCY_OPTIMIZED
+ qdma_sort_c2h_cntr_th_values(qdma_dev);
+ #endif /* QDMA_LATENCY_OPTIMIZED */
+ }
+
+free_msg:
+ qdma_mbox_msg_free(m);
+ return rv;
+}
+
int qdma_pf_csr_read(struct rte_eth_dev *dev)
{
int ret = 0;
@@ -131,6 +166,44 @@ static int qdma_pf_fmap_prog(struct rte_eth_dev *dev)
return ret;
}
+int qdma_dev_notify_qadd(struct rte_eth_dev *dev, uint32_t qidx_hw,
+ enum qdma_dev_q_type q_type)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ struct qdma_mbox_msg *m;
+ int rv = 0;
+
+ m = qdma_mbox_msg_alloc();
+ if (!m)
+ return -ENOMEM;
+
+ qdma_mbox_compose_vf_notify_qadd(qdma_dev->func_id, qidx_hw,
+ q_type, m->raw_data);
+ rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
+
+ qdma_mbox_msg_free(m);
+ return rv;
+}
+
+int qdma_dev_notify_qdel(struct rte_eth_dev *dev, uint32_t qidx_hw,
+ enum qdma_dev_q_type q_type)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ struct qdma_mbox_msg *m;
+ int rv = 0;
+
+ m = qdma_mbox_msg_alloc();
+ if (!m)
+ return -ENOMEM;
+
+ qdma_mbox_compose_vf_notify_qdel(qdma_dev->func_id, qidx_hw,
+ q_type, m->raw_data);
+ rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
+
+ qdma_mbox_msg_free(m);
+ return rv;
+}
+
uint8_t qmda_get_desc_sz_idx(enum rte_pmd_qdma_bypass_desc_len size)
{
uint8_t ret;
@@ -243,9 +316,33 @@ int qdma_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
return -EINVAL;
}
}
+ } else {
+ err = qdma_dev_notify_qadd(dev, rx_queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_C2H);
+ if (err < 0)
+ return -EINVAL;
+
+ if (qdma_dev->q_info[rx_queue_id].queue_mode ==
+ RTE_PMD_QDMA_STREAMING_MODE) {
+ err = qdma_dev_notify_qadd(dev, rx_queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_CMPT);
+ if (err < 0) {
+ qdma_dev_notify_qdel(dev, rx_queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_C2H);
+ return -EINVAL;
+ }
+ }
}
+
if (!qdma_dev->init_q_range) {
- if (!qdma_dev->is_vf) {
+ if (qdma_dev->is_vf) {
+ err = qdma_vf_csr_read(dev);
+ if (err < 0)
+ goto rx_setup_err;
+ } else {
err = qdma_pf_csr_read(dev);
if (err < 0)
goto rx_setup_err;
@@ -534,18 +631,27 @@ int qdma_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
QDMA_DEV_Q_TYPE_C2H);
if (qdma_dev->q_info[rx_queue_id].queue_mode ==
- RTE_PMD_QDMA_STREAMING_MODE)
+ RTE_PMD_QDMA_STREAMING_MODE) {
qdma_dev_decrement_active_queue
(qdma_dev->dma_device_index,
qdma_dev->func_id,
QDMA_DEV_Q_TYPE_CMPT);
- }
- if (rxq) {
- if (rxq->rx_mz)
- rte_memzone_free(rxq->rx_mz);
- if (rxq->sw_ring)
- rte_free(rxq->sw_ring);
- rte_free(rxq);
+ } else {
+ qdma_dev_notify_qdel(dev, rx_queue_id +
+ qdma_dev->queue_base, QDMA_DEV_Q_TYPE_C2H);
+
+ if (qdma_dev->q_info[rx_queue_id].queue_mode ==
+ RTE_PMD_QDMA_STREAMING_MODE)
+ qdma_dev_notify_qdel(dev, rx_queue_id +
+ qdma_dev->queue_base, QDMA_DEV_Q_TYPE_CMPT);
+ }
+ if (rxq) {
+ if (rxq->rx_mz)
+ rte_memzone_free(rxq->rx_mz);
+ if (rxq->sw_ring)
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ }
}
return err;
}
@@ -591,9 +697,21 @@ int qdma_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
QDMA_DEV_Q_TYPE_H2C);
if (err != QDMA_SUCCESS)
return -EINVAL;
+ } else {
+ err = qdma_dev_notify_qadd(dev, tx_queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_H2C);
+ if (err < 0)
+ return -EINVAL;
}
if (!qdma_dev->init_q_range) {
- if (!qdma_dev->is_vf) {
+ if (qdma_dev->is_vf) {
+ err = qdma_vf_csr_read(dev);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "CSR read failed\n");
+ goto tx_setup_err;
+ }
+ } else {
err = qdma_pf_csr_read(dev);
if (err < 0) {
PMD_DRV_LOG(ERR, "CSR read failed\n");
@@ -751,16 +869,28 @@ int qdma_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
tx_setup_err:
PMD_DRV_LOG(ERR, " Tx queue setup failed");
- if (!qdma_dev->is_vf)
+ if (!qdma_dev->is_vf) {
qdma_dev_decrement_active_queue(qdma_dev->dma_device_index,
qdma_dev->func_id,
QDMA_DEV_Q_TYPE_H2C);
+ } else {
+ qdma_dev_notify_qdel(dev, tx_queue_id +
+ qdma_dev->queue_base, QDMA_DEV_Q_TYPE_H2C);
+ }
if (txq) {
- if (txq->tx_mz)
- rte_memzone_free(txq->tx_mz);
- if (txq->sw_ring)
- rte_free(txq->sw_ring);
- rte_free(txq);
+ if (qdma_dev->is_vf) {
+ err = qdma_vf_csr_read(dev);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "CSR read failed\n");
+ goto tx_setup_err;
+ }
+ } else {
+ if (txq->tx_mz)
+ rte_memzone_free(txq->tx_mz);
+ if (txq->sw_ring)
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ }
}
return err;
}
@@ -802,11 +932,16 @@ void qdma_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_id)
PMD_DRV_LOG(INFO, "Remove H2C queue: %d", txq->queue_id);
qdma_dev = txq->dev->data->dev_private;
- if (!qdma_dev->is_vf)
+ if (!qdma_dev->is_vf) {
qdma_dev_decrement_active_queue
(qdma_dev->dma_device_index,
qdma_dev->func_id,
- QDMA_DEV_Q_TYPE_H2C);
+ QDMA_DEV_Q_TYPE_H2C);
+ } else {
+ qdma_dev_notify_qdel(txq->dev, txq->queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_H2C);
+ }
if (txq->sw_ring)
rte_free(txq->sw_ring);
if (txq->tx_mz)
@@ -837,6 +972,15 @@ void qdma_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_id)
(qdma_dev->dma_device_index,
qdma_dev->func_id,
QDMA_DEV_Q_TYPE_CMPT);
+ } else {
+ qdma_dev_notify_qdel(rxq->dev, rxq->queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_C2H);
+
+ if (rxq->st_mode)
+ qdma_dev_notify_qdel(rxq->dev, rxq->queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_CMPT);
}
if (rxq->sw_ring)
@@ -1111,6 +1255,7 @@ int qdma_dev_reset(struct rte_eth_dev *dev)
{
struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct qdma_mbox_msg *m = NULL;
uint32_t vf_device_count = 0;
uint32_t i = 0;
int ret = 0;
@@ -1141,6 +1286,21 @@ int qdma_dev_reset(struct rte_eth_dev *dev)
for (i = 0; i < pci_dev->max_vfs; i++) {
if (qdma_dev->vfinfo[i].func_id == QDMA_FUNC_ID_INVALID)
continue;
+
+ m = qdma_mbox_msg_alloc();
+ if (!m)
+ return -ENOMEM;
+ qdma_mbox_compose_pf_reset_done_message(m->raw_data, qdma_dev->func_id,
+ qdma_dev->vfinfo[i].func_id);
+ ret = qdma_mbox_msg_send(dev, m, 0);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "Sending reset failed from PF:%d to VF:%d\n",
+ qdma_dev->func_id, qdma_dev->vfinfo[i].func_id);
+
+ /* Mark VFs with invalid function id mapping,
+ * and this gets updated when VF comes online again
+ */
+ qdma_dev->vfinfo[i].func_id = QDMA_FUNC_ID_INVALID;
}
/* Start waiting for a maximum of 60 secs to get all its VFs
@@ -25,6 +25,7 @@
#include "qdma_version.h"
#include "qdma_access_common.h"
#include "qdma_access_export.h"
+#include "qdma_mbox.h"
#include "qdma_devops.h"
/* Poll for QDMA errors every 1 second */
@@ -546,6 +547,8 @@ int qdma_eth_dev_init(struct rte_eth_dev *dev)
}
pcie_perf_enable(pci_dev);
+ if (dma_priv->dev_cap.mailbox_en && pci_dev->max_vfs)
+ qdma_mbox_init(dev);
if (!dma_priv->reset_in_progress) {
num_vfs = pci_dev->max_vfs;
@@ -581,13 +584,57 @@ int qdma_eth_dev_init(struct rte_eth_dev *dev)
int qdma_eth_dev_uninit(struct rte_eth_dev *dev)
{
struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct qdma_mbox_msg *m = NULL;
+ int i, rv;
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
+ if (qdma_dev->vf_online_count) {
+ for (i = 0; i < pci_dev->max_vfs; i++) {
+ if (qdma_dev->vfinfo[i].func_id == QDMA_FUNC_ID_INVALID)
+ continue;
+ m = qdma_mbox_msg_alloc();
+ if (!m)
+ return -ENOMEM;
+ if (!qdma_dev->reset_in_progress)
+ qdma_mbox_compose_pf_offline(m->raw_data,
+ qdma_dev->func_id,
+ qdma_dev->vfinfo[i].func_id);
+ else
+ qdma_mbox_compose_vf_reset_message(m->raw_data,
+ qdma_dev->func_id,
+ qdma_dev->vfinfo[i].func_id);
+ rv = qdma_mbox_msg_send(dev, m, 0);
+ if (rv < 0)
+ PMD_DRV_LOG(ERR, "Send bye failed from PF:%d to VF:%d\n",
+ qdma_dev->func_id,
+ qdma_dev->vfinfo[i].func_id);
+ }
+ PMD_DRV_LOG(INFO, "%s: Wait till all VFs shutdown for PF-%d(DEVFN)\n",
+ __func__, qdma_dev->func_id);
+ i = 0;
+ while (i < SHUTDOWN_TIMEOUT) {
+ if (!qdma_dev->vf_online_count) {
+ PMD_DRV_LOG(INFO, "%s: VFs shutdown completed for PF-%d(DEVFN)\n",
+ __func__, qdma_dev->func_id);
+ break;
+ }
+ rte_delay_ms(1);
+ i++;
+ }
+
+ if (i >= SHUTDOWN_TIMEOUT) {
+ PMD_DRV_LOG(ERR, "%s: Failed VFs shutdown for PF-%d(DEVFN)\n",
+ __func__, qdma_dev->func_id);
+ }
+ }
if (qdma_dev->dev_configured)
qdma_dev_close(dev);
+ if (qdma_dev->dev_cap.mailbox_en && pci_dev->max_vfs)
+ qdma_mbox_uninit(dev);
/* cancel pending polls */
if (qdma_dev->is_master)