@@ -94,11 +94,652 @@ static int qdma_ethdev_offline(struct rte_eth_dev *dev)
return rv;
}
+static int qdma_vf_set_qrange(struct rte_eth_dev *dev)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ struct qdma_mbox_msg *m;
+ int rv = 0;
+
+
+ m = qdma_mbox_msg_alloc();
+ if (!m)
+ return -ENOMEM;
+
+ qdma_mbox_compose_vf_fmap_prog(qdma_dev->func_id,
+ (uint16_t)qdma_dev->qsets_en,
+ (int)qdma_dev->queue_base,
+ m->raw_data);
+ rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
+ if (rv < 0) {
+ if (rv != -ENODEV)
+ PMD_DRV_LOG(ERR, "%x set q range (fmap) failed %d.\n",
+ qdma_dev->func_id, rv);
+ goto err_out;
+ }
+
+ rv = qdma_mbox_vf_response_status(m->raw_data);
+
+err_out:
+ qdma_mbox_msg_free(m);
+ return rv;
+}
+
+static int qdma_set_qmax(struct rte_eth_dev *dev, int *qmax, int *qbase)
+{
+ struct qdma_mbox_msg *m;
+ int rv = 0;
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+
+ m = qdma_mbox_msg_alloc();
+ if (!m)
+ return -ENOMEM;
+
+ qdma_mbox_compose_vf_qreq(qdma_dev->func_id, (uint16_t)*qmax & 0xFFFF,
+ *qbase, m->raw_data);
+ rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
+ if (rv < 0) {
+ PMD_DRV_LOG(ERR, "%x set q max failed %d.\n",
+ qdma_dev->func_id, rv);
+ goto err_out;
+ }
+
+ rv = qdma_mbox_vf_qinfo_get(m->raw_data, qbase, (uint16_t *)qmax);
+err_out:
+ qdma_mbox_msg_free(m);
+ return rv;
+}
+
+static int qdma_rxq_context_setup(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ uint32_t qid_hw;
+ struct qdma_mbox_msg *m = qdma_mbox_msg_alloc();
+ struct mbox_descq_conf descq_conf;
+ int rv, bypass_desc_sz_idx;
+ struct qdma_rx_queue *rxq;
+ uint8_t cmpt_desc_fmt;
+ enum mbox_cmpt_ctxt_type cmpt_ctxt_type = QDMA_MBOX_CMPT_CTXT_NONE;
+
+ if (!m)
+ return -ENOMEM;
+ memset(&descq_conf, 0, sizeof(struct mbox_descq_conf));
+ rxq = (struct qdma_rx_queue *)dev->data->rx_queues[qid];
+ qid_hw = qdma_dev->queue_base + rxq->queue_id;
+
+ switch (rxq->cmpt_desc_len) {
+ case RTE_PMD_QDMA_CMPT_DESC_LEN_8B:
+ cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_8B;
+ break;
+ case RTE_PMD_QDMA_CMPT_DESC_LEN_16B:
+ cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_16B;
+ break;
+ case RTE_PMD_QDMA_CMPT_DESC_LEN_32B:
+ cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_32B;
+ break;
+ case RTE_PMD_QDMA_CMPT_DESC_LEN_64B:
+ if (!qdma_dev->dev_cap.cmpt_desc_64b) {
+ PMD_DRV_LOG(ERR, "PF-%d(DEVFN) 64B is not supported in this "
+ "mode:\n", qdma_dev->func_id);
+ return -1;
+ }
+ cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_64B;
+ break;
+ default:
+ cmpt_desc_fmt = CMPT_CNTXT_DESC_SIZE_8B;
+ break;
+ }
+ descq_conf.ring_bs_addr = rxq->rx_mz->iova;
+ descq_conf.en_bypass = rxq->en_bypass;
+ descq_conf.irq_arm = 0;
+ descq_conf.at = 0;
+ descq_conf.wbk_en = 1;
+ descq_conf.irq_en = 0;
+
+ bypass_desc_sz_idx = qmda_get_desc_sz_idx(rxq->bypass_desc_sz);
+
+ if (!rxq->st_mode) {/* mm c2h */
+ descq_conf.desc_sz = SW_DESC_CNTXT_MEMORY_MAP_DMA;
+ descq_conf.wbi_intvl_en = 1;
+ descq_conf.wbi_chk = 1;
+ } else {/* st c2h */
+ descq_conf.desc_sz = SW_DESC_CNTXT_C2H_STREAM_DMA;
+ descq_conf.forced_en = 1;
+ descq_conf.cmpt_ring_bs_addr = rxq->rx_cmpt_mz->iova;
+ descq_conf.cmpt_desc_sz = cmpt_desc_fmt;
+ descq_conf.triggermode = rxq->triggermode;
+
+ descq_conf.cmpt_color = CMPT_DEFAULT_COLOR_BIT;
+ descq_conf.cmpt_full_upd = 0;
+ descq_conf.cnt_thres =
+ qdma_dev->g_c2h_cnt_th[rxq->threshidx];
+ descq_conf.timer_thres =
+ qdma_dev->g_c2h_timer_cnt[rxq->timeridx];
+ descq_conf.cmpt_ringsz =
+ qdma_dev->g_ring_sz[rxq->cmpt_ringszidx] - 1;
+ descq_conf.bufsz = qdma_dev->g_c2h_buf_sz[rxq->buffszidx];
+ descq_conf.cmpt_int_en = 0;
+ descq_conf.cmpl_stat_en = rxq->st_mode;
+ descq_conf.pfch_en = rxq->en_prefetch;
+ descq_conf.en_bypass_prefetch = rxq->en_bypass_prefetch;
+ if (qdma_dev->dev_cap.cmpt_ovf_chk_dis)
+ descq_conf.dis_overflow_check = rxq->dis_overflow_check;
+
+ cmpt_ctxt_type = QDMA_MBOX_CMPT_WITH_ST;
+ }
+
+ if (rxq->en_bypass && rxq->bypass_desc_sz != 0)
+ descq_conf.desc_sz = bypass_desc_sz_idx;
+
+ descq_conf.func_id = rxq->func_id;
+ descq_conf.ringsz = qdma_dev->g_ring_sz[rxq->ringszidx] - 1;
+
+ qdma_mbox_compose_vf_qctxt_write(rxq->func_id, qid_hw, rxq->st_mode, 1,
+ cmpt_ctxt_type,
+ &descq_conf, m->raw_data);
+
+ rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
+ if (rv < 0) {
+ PMD_DRV_LOG(ERR, "%x, qid_hw 0x%x, mbox failed %d.\n",
+ qdma_dev->func_id, qid_hw, rv);
+ goto err_out;
+ }
+
+ rv = qdma_mbox_vf_response_status(m->raw_data);
+
+err_out:
+ qdma_mbox_msg_free(m);
+ return rv;
+}
+
+static int qdma_txq_context_setup(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ struct qdma_mbox_msg *m = qdma_mbox_msg_alloc();
+ struct mbox_descq_conf descq_conf;
+ int rv, bypass_desc_sz_idx;
+ struct qdma_tx_queue *txq;
+ uint32_t qid_hw;
+
+ if (!m)
+ return -ENOMEM;
+ memset(&descq_conf, 0, sizeof(struct mbox_descq_conf));
+ txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
+ qid_hw = qdma_dev->queue_base + txq->queue_id;
+ descq_conf.ring_bs_addr = txq->tx_mz->iova;
+ descq_conf.en_bypass = txq->en_bypass;
+ descq_conf.wbi_intvl_en = 1;
+ descq_conf.wbi_chk = 1;
+ descq_conf.wbk_en = 1;
+
+ bypass_desc_sz_idx = qmda_get_desc_sz_idx(txq->bypass_desc_sz);
+
+ if (!txq->st_mode) /* mm h2c */
+ descq_conf.desc_sz = SW_DESC_CNTXT_MEMORY_MAP_DMA;
+ else /* st h2c */
+ descq_conf.desc_sz = SW_DESC_CNTXT_H2C_STREAM_DMA;
+ descq_conf.func_id = txq->func_id;
+ descq_conf.ringsz = qdma_dev->g_ring_sz[txq->ringszidx] - 1;
+
+ if (txq->en_bypass && txq->bypass_desc_sz != 0)
+ descq_conf.desc_sz = bypass_desc_sz_idx;
+
+ qdma_mbox_compose_vf_qctxt_write(txq->func_id, qid_hw, txq->st_mode, 0,
+ QDMA_MBOX_CMPT_CTXT_NONE,
+ &descq_conf, m->raw_data);
+
+ rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
+ if (rv < 0) {
+ PMD_DRV_LOG(ERR, "%x, qid_hw 0x%x, mbox failed %d.\n",
+ qdma_dev->func_id, qid_hw, rv);
+ goto err_out;
+ }
+
+ rv = qdma_mbox_vf_response_status(m->raw_data);
+
+err_out:
+ qdma_mbox_msg_free(m);
+ return rv;
+}
+
+static int qdma_queue_context_invalidate(struct rte_eth_dev *dev, uint32_t qid,
+ bool st, bool c2h)
+{
+ struct qdma_mbox_msg *m = qdma_mbox_msg_alloc();
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ uint32_t qid_hw;
+ int rv;
+ enum mbox_cmpt_ctxt_type cmpt_ctxt_type = QDMA_MBOX_CMPT_CTXT_NONE;
+
+ if (!m)
+ return -ENOMEM;
+
+ if (st && c2h)
+ cmpt_ctxt_type = QDMA_MBOX_CMPT_WITH_ST;
+ qid_hw = qdma_dev->queue_base + qid;
+ qdma_mbox_compose_vf_qctxt_invalidate(qdma_dev->func_id, qid_hw,
+ st, c2h, cmpt_ctxt_type,
+ m->raw_data);
+ rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
+ if (rv < 0) {
+ if (rv != -ENODEV)
+ PMD_DRV_LOG(INFO, "%x, qid_hw 0x%x mbox failed %d.\n",
+ qdma_dev->func_id, qid_hw, rv);
+ goto err_out;
+ }
+
+ rv = qdma_mbox_vf_response_status(m->raw_data);
+
+err_out:
+ qdma_mbox_msg_free(m);
+ return rv;
+}
+
+static int qdma_vf_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
+ dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_25G;
+
+ PMD_DRV_LOG(INFO, "Link update done\n");
+
+ return 0;
+}
+
+static int qdma_vf_dev_infos_get(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+
+ dev_info->max_rx_queues = qdma_dev->dev_cap.num_qs;
+ dev_info->max_tx_queues = qdma_dev->dev_cap.num_qs;
+
+ dev_info->min_rx_bufsize = QDMA_MIN_RXBUFF_SIZE;
+ dev_info->max_rx_pktlen = DMA_BRAM_SIZE;
+ dev_info->max_mac_addrs = 1;
+
+ return 0;
+}
+
+int qdma_vf_dev_close(struct rte_eth_dev *dev)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ struct qdma_tx_queue *txq;
+ struct qdma_rx_queue *rxq;
+ struct qdma_cmpt_queue *cmptq;
+ uint32_t qid;
+
+ PMD_DRV_LOG(INFO, "Closing all queues\n");
+
+ /* iterate over rx queues */
+ for (qid = 0; qid < dev->data->nb_rx_queues; ++qid) {
+ rxq = dev->data->rx_queues[qid];
+ if (rxq != NULL) {
+ PMD_DRV_LOG(INFO, "VF-%d(DEVFN) Remove C2H queue: %d",
+ qdma_dev->func_id, qid);
+
+ qdma_dev_notify_qdel(rxq->dev, rxq->queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_C2H);
+
+ if (rxq->st_mode)
+ qdma_dev_notify_qdel(rxq->dev, rxq->queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_CMPT);
+
+ if (rxq->sw_ring)
+ rte_free(rxq->sw_ring);
+
+ if (rxq->st_mode) { /* if ST-mode */
+ if (rxq->rx_cmpt_mz)
+ rte_memzone_free(rxq->rx_cmpt_mz);
+ }
+
+ if (rxq->rx_mz)
+ rte_memzone_free(rxq->rx_mz);
+ rte_free(rxq);
+ PMD_DRV_LOG(INFO, "VF-%d(DEVFN) C2H queue %d removed",
+ qdma_dev->func_id, qid);
+ }
+ }
+
+ /* iterate over tx queues */
+ for (qid = 0; qid < dev->data->nb_tx_queues; ++qid) {
+ txq = dev->data->tx_queues[qid];
+ if (txq != NULL) {
+ PMD_DRV_LOG(INFO, "VF-%d(DEVFN) Remove H2C queue: %d",
+ qdma_dev->func_id, qid);
+
+ qdma_dev_notify_qdel(txq->dev, txq->queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_H2C);
+ if (txq->sw_ring)
+ rte_free(txq->sw_ring);
+ if (txq->tx_mz)
+ rte_memzone_free(txq->tx_mz);
+ rte_free(txq);
+ PMD_DRV_LOG(INFO, "VF-%d(DEVFN) H2C queue %d removed",
+ qdma_dev->func_id, qid);
+ }
+ }
+ if (qdma_dev->dev_cap.mm_cmpt_en) {
+ /* iterate over cmpt queues */
+ for (qid = 0; qid < qdma_dev->qsets_en; ++qid) {
+ cmptq = qdma_dev->cmpt_queues[qid];
+ if (cmptq != NULL) {
+ PMD_DRV_LOG(INFO, "VF-%d(DEVFN) Remove CMPT queue: %d",
+ qdma_dev->func_id, qid);
+ qdma_dev_notify_qdel(cmptq->dev,
+ cmptq->queue_id +
+ qdma_dev->queue_base,
+ QDMA_DEV_Q_TYPE_CMPT);
+ if (cmptq->cmpt_mz)
+ rte_memzone_free(cmptq->cmpt_mz);
+ rte_free(cmptq);
+ PMD_DRV_LOG(INFO, "VF-%d(DEVFN) CMPT queue %d removed",
+ qdma_dev->func_id, qid);
+ }
+ }
+
+ if (qdma_dev->cmpt_queues != NULL) {
+ rte_free(qdma_dev->cmpt_queues);
+ qdma_dev->cmpt_queues = NULL;
+ }
+ }
+
+ qdma_dev->qsets_en = 0;
+ qdma_set_qmax(dev, (int *)&qdma_dev->qsets_en,
+ (int *)&qdma_dev->queue_base);
+ qdma_dev->init_q_range = 0;
+ rte_free(qdma_dev->q_info);
+ qdma_dev->q_info = NULL;
+ qdma_dev->dev_configured = 0;
+
+ return 0;
+}
+
+static int qdma_vf_dev_reset(struct rte_eth_dev *dev)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ uint32_t i = 0;
+ int ret;
+
+ PMD_DRV_LOG(INFO, "%s: Reset VF-%d(DEVFN)",
+ __func__, qdma_dev->func_id);
+
+ ret = eth_qdma_vf_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ if (qdma_dev->reset_state == RESET_STATE_IDLE) {
+ ret = eth_qdma_vf_dev_init(dev);
+ } else {
+ /* VFs do not stop mbox and start waiting for a
+ * "PF_RESET_DONE" mailbox message from PF
+ * for a maximum of 60 secs
+ */
+ PMD_DRV_LOG(INFO,
+ "%s: Waiting for reset done message from PF",
+ __func__);
+ while (i < RESET_TIMEOUT) {
+ if (qdma_dev->reset_state ==
+ RESET_STATE_RECV_PF_RESET_DONE) {
+ qdma_mbox_uninit(dev);
+
+ ret = eth_qdma_vf_dev_init(dev);
+ return ret;
+ }
+
+ rte_delay_ms(1);
+ i++;
+ }
+ }
+
+ if (i >= RESET_TIMEOUT) {
+ PMD_DRV_LOG(ERR, "%s: Reset failed for VF-%d(DEVFN)\n",
+ __func__, qdma_dev->func_id);
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int qdma_vf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ int32_t ret = 0, queue_base = -1;
+ uint32_t qid = 0;
+
+ /* FMAP configuration */
+ qdma_dev->qsets_en = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+
+ if (qdma_dev->qsets_en > qdma_dev->dev_cap.num_qs) {
+ PMD_DRV_LOG(INFO, "VF-%d(DEVFN) Error: Number of Queues to be "
+ "configured are greater than the queues "
+ "supported by the hardware\n",
+ qdma_dev->func_id);
+ qdma_dev->qsets_en = 0;
+ return -1;
+ }
+
+ /* Request queue base from the resource manager */
+ ret = qdma_set_qmax(dev, (int *)&qdma_dev->qsets_en,
+ (int *)&queue_base);
+ if (ret != QDMA_SUCCESS) {
+ PMD_DRV_LOG(ERR, "VF-%d(DEVFN) queue allocation failed: %d\n",
+ qdma_dev->func_id, ret);
+ return -1;
+ }
+ qdma_dev->queue_base = queue_base;
+
+ qdma_dev->q_info = rte_zmalloc("qinfo", sizeof(struct queue_info) *
+ qdma_dev->qsets_en, 0);
+ if (qdma_dev->q_info == NULL) {
+ PMD_DRV_LOG(INFO, "VF-%d fail to allocate queue info memory\n",
+ qdma_dev->func_id);
+ return (-ENOMEM);
+ }
+
+ /* Reserve memory for cmptq ring pointers
+ * Max completion queues can be maximum of rx and tx queues.
+ */
+ qdma_dev->cmpt_queues = rte_zmalloc("cmpt_queues",
+ sizeof(qdma_dev->cmpt_queues[0]) *
+ qdma_dev->qsets_en,
+ RTE_CACHE_LINE_SIZE);
+ if (qdma_dev->cmpt_queues == NULL) {
+ PMD_DRV_LOG(ERR, "VF-%d(DEVFN) cmpt ring pointers memory "
+ "allocation failed:\n", qdma_dev->func_id);
+ rte_free(qdma_dev->q_info);
+ qdma_dev->q_info = NULL;
+ return -(ENOMEM);
+ }
+
+ /* Initialize queue_modes to all 1's ( i.e. Streaming) */
+ for (qid = 0 ; qid < qdma_dev->qsets_en; qid++)
+ qdma_dev->q_info[qid].queue_mode = RTE_PMD_QDMA_STREAMING_MODE;
+
+ for (qid = 0 ; qid < dev->data->nb_rx_queues; qid++) {
+ qdma_dev->q_info[qid].cmpt_desc_sz = qdma_dev->cmpt_desc_len;
+ qdma_dev->q_info[qid].rx_bypass_mode =
+ qdma_dev->c2h_bypass_mode;
+ qdma_dev->q_info[qid].trigger_mode = qdma_dev->trigger_mode;
+ qdma_dev->q_info[qid].timer_count =
+ qdma_dev->timer_count;
+ }
+
+ for (qid = 0 ; qid < dev->data->nb_tx_queues; qid++)
+ qdma_dev->q_info[qid].tx_bypass_mode =
+ qdma_dev->h2c_bypass_mode;
+
+ ret = qdma_vf_set_qrange(dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "FMAP programming failed\n");
+ rte_free(qdma_dev->q_info);
+ qdma_dev->q_info = NULL;
+ rte_free(qdma_dev->cmpt_queues);
+ qdma_dev->cmpt_queues = NULL;
+ return ret;
+ }
+
+ qdma_dev->dev_configured = 1;
+
+ return ret;
+}
+
+int qdma_vf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ struct qdma_tx_queue *txq;
+
+ txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
+ qdma_reset_tx_queue(txq);
+
+ if (qdma_txq_context_setup(dev, qid) < 0)
+ return -1;
+
+ txq->q_pidx_info.pidx = 0;
+ qdma_dev->hw_access->qdma_queue_pidx_update(dev, qdma_dev->is_vf,
+ qid, 0, &txq->q_pidx_info);
+
+ dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+ txq->status = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+int qdma_vf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+ struct qdma_rx_queue *rxq;
+ int err;
+
+ rxq = (struct qdma_rx_queue *)dev->data->rx_queues[qid];
+ qdma_reset_rx_queue(rxq);
+
+ err = qdma_init_rx_queue(rxq);
+ if (err != 0)
+ return err;
+ if (qdma_rxq_context_setup(dev, qid) < 0) {
+ PMD_DRV_LOG(ERR, "context_setup for qid - %u failed", qid);
+
+ return -1;
+ }
+
+ if (rxq->st_mode) {
+ rxq->cmpt_cidx_info.counter_idx = rxq->threshidx;
+ rxq->cmpt_cidx_info.timer_idx = rxq->timeridx;
+ rxq->cmpt_cidx_info.trig_mode = rxq->triggermode;
+ rxq->cmpt_cidx_info.wrb_en = 1;
+ qdma_dev->hw_access->qdma_queue_cmpt_cidx_update(dev, 1,
+ qid, &rxq->cmpt_cidx_info);
+
+ rxq->q_pidx_info.pidx = (rxq->nb_rx_desc - 2);
+ qdma_dev->hw_access->qdma_queue_pidx_update(dev, 1,
+ qid, 1, &rxq->q_pidx_info);
+ }
+
+ dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->status = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+}
+
+int qdma_vf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct qdma_rx_queue *rxq;
+ int i = 0, cnt = 0;
+
+ rxq = (struct qdma_rx_queue *)dev->data->rx_queues[qid];
+
+ rxq->status = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ /* Wait for queue to recv all packets. */
+ if (rxq->st_mode) { /* ST-mode */
+ while (rxq->wb_status->pidx != rxq->cmpt_cidx_info.wrb_cidx) {
+ usleep(10);
+ if (cnt++ > 10000)
+ break;
+ }
+ } else { /* MM mode */
+ while (rxq->wb_status->cidx != rxq->q_pidx_info.pidx) {
+ usleep(10);
+ if (cnt++ > 10000)
+ break;
+ }
+ }
+
+ qdma_queue_context_invalidate(dev, qid, rxq->st_mode, 1);
+
+ if (rxq->st_mode) { /* ST-mode */
+#ifdef DUMP_MEMPOOL_USAGE_STATS
+ PMD_DRV_LOG(INFO, "%s(): %d: queue id = %d, mbuf_avail_count = "
+ "%d, mbuf_in_use_count = %d",
+ __func__, __LINE__, rxq->queue_id,
+ rte_mempool_avail_count(rxq->mb_pool),
+ rte_mempool_in_use_count(rxq->mb_pool));
+#endif /* DUMP_MEMPOOL_USAGE_STATS */
+
+ for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
+ rte_pktmbuf_free(rxq->sw_ring[i]);
+ rxq->sw_ring[i] = NULL;
+ }
+#ifdef DUMP_MEMPOOL_USAGE_STATS
+ PMD_DRV_LOG(INFO, "%s(): %d: queue id = %d, mbuf_avail_count = "
+ "%d, mbuf_in_use_count = %d",
+ __func__, __LINE__, rxq->queue_id,
+ rte_mempool_avail_count(rxq->mb_pool),
+ rte_mempool_in_use_count(rxq->mb_pool));
+#endif /* DUMP_MEMPOOL_USAGE_STATS */
+ }
+
+ qdma_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+
+int qdma_vf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct qdma_tx_queue *txq;
+ int i = 0, cnt = 0;
+
+ txq = (struct qdma_tx_queue *)dev->data->tx_queues[qid];
+
+ txq->status = RTE_ETH_QUEUE_STATE_STOPPED;
+ /* Wait for TXQ to send out all packets. */
+ while (txq->wb_status->cidx != txq->q_pidx_info.pidx) {
+ usleep(10);
+ if (cnt++ > 10000)
+ break;
+ }
+
+ qdma_queue_context_invalidate(dev, qid, txq->st_mode, 0);
+
+ /* Free mbufs if any pending in the ring */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free(txq->sw_ring[i]);
+ txq->sw_ring[i] = NULL;
+ }
+ qdma_reset_tx_queue(txq);
+ dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
static struct eth_dev_ops qdma_vf_eth_dev_ops = {
+ .dev_configure = qdma_vf_dev_configure,
+ .dev_infos_get = qdma_vf_dev_infos_get,
+ .dev_close = qdma_vf_dev_close,
+ .dev_reset = qdma_vf_dev_reset,
+ .link_update = qdma_vf_dev_link_update,
.rx_queue_setup = qdma_dev_rx_queue_setup,
.tx_queue_setup = qdma_dev_tx_queue_setup,
.rx_queue_release = qdma_dev_rx_queue_release,
.tx_queue_release = qdma_dev_tx_queue_release,
+ .rx_queue_start = qdma_vf_dev_rx_queue_start,
+ .rx_queue_stop = qdma_vf_dev_rx_queue_stop,
+ .tx_queue_start = qdma_vf_dev_tx_queue_start,
+ .tx_queue_stop = qdma_vf_dev_tx_queue_stop,
};
/**