From patchwork Mon Oct 31 05:15:45 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 119323 X-Patchwork-Delegate: andrew.rybchenko@oktetlabs.ru Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F1896A00C4; Mon, 31 Oct 2022 06:47:07 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 36C1241101; Mon, 31 Oct 2022 06:46:38 +0100 (CET) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 1712940695 for ; Mon, 31 Oct 2022 06:46:31 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1667195192; x=1698731192; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=y08YtbO4TeqJ2Wo+MBL8MYtTEl/MAjnB0i5EgMvfBEQ=; b=OX+EggF+StciS8s6DO2MUi9UakVlVyW7mweElp/DfN4hPVVO5gjAfJ4K a50EcTptTZTIJ9puq6eJqGa3LMEPZXiq/biBUujGXEC7ugfashehfhkl+ Rt5u1F37qVL4XCeiVXpDTK9z9ESJsDSlN5vyV+0a2cx5VIhO1vJWA/NL8 j+/62+1+SoLEGf0CeFgFLQHUVNtp15yJqEzii7l83VXzU9QiXRoaXwJJN myDjpRLJGde7H37+IRUmGFc3dzheYwHTpLhGKLa2P2/h/tJYOZyckd12C dNLA/VeEH7rCUFCCtWx3xq0/he63RXOjn+u5xcj9TgJZtmAuhqfBBmpG4 Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10516"; a="292114104" X-IronPort-AV: E=Sophos;i="5.95,227,1661842800"; d="scan'208";a="292114104" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 Oct 2022 22:46:21 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10516"; a="664664003" X-IronPort-AV: E=Sophos;i="5.95,227,1661842800"; d="scan'208";a="664664003" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by orsmga008.jf.intel.com with ESMTP; 30 Oct 2022 22:46:19 -0700 From: beilei.xing@intel.com To: andrew.rybchenko@oktetlabs.ru, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Xiaoyun Li Subject: [PATCH v17 07/18] net/idpf: add support for queue stop Date: Mon, 31 Oct 2022 05:15:45 +0000 Message-Id: <20221031051556.98549-8-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20221031051556.98549-1-beilei.xing@intel.com> References: <20221031033651.98072-1-beilei.xing@intel.com> <20221031051556.98549-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Junfeng Guo Add support for these device ops: - rx_queue_stop - tx_queue_stop Signed-off-by: Beilei Xing Signed-off-by: Xiaoyun Li Signed-off-by: Junfeng Guo --- drivers/net/idpf/idpf_ethdev.c | 17 ++-- drivers/net/idpf/idpf_rxtx.c | 148 +++++++++++++++++++++++++++++++++ drivers/net/idpf/idpf_rxtx.h | 13 +++ drivers/net/idpf/idpf_vchnl.c | 69 +++++++++++++++ 4 files changed, 242 insertions(+), 5 deletions(-) diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index 0400ed611f..9f1e1e6a18 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -324,7 +324,8 @@ idpf_dev_start(struct rte_eth_dev *dev) if (dev->data->mtu > vport->max_mtu) { PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu); - return -EINVAL; + ret = -EINVAL; + goto err_mtu; } vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD; @@ -332,17 +333,21 @@ idpf_dev_start(struct rte_eth_dev *dev) ret = idpf_start_queues(dev); if (ret != 0) { PMD_DRV_LOG(ERR, "Failed to start queues"); - return ret; + goto err_mtu; } ret = idpf_vc_ena_dis_vport(vport, true); if (ret != 0) { PMD_DRV_LOG(ERR, "Failed to enable vport"); - /* TODO: stop queues */ - return ret; + goto err_vport; } return 0; + +err_vport: + idpf_stop_queues(dev); +err_mtu: + return ret; } static int @@ -352,7 +357,7 @@ idpf_dev_stop(struct rte_eth_dev *dev) idpf_vc_ena_dis_vport(vport, false); - /* TODO: stop queues */ + idpf_stop_queues(dev); return 0; } @@ -751,6 +756,8 @@ static const struct eth_dev_ops idpf_eth_dev_ops = { .link_update = idpf_dev_link_update, .rx_queue_start = idpf_rx_queue_start, .tx_queue_start = idpf_tx_queue_start, + .rx_queue_stop = idpf_rx_queue_stop, + .tx_queue_stop = idpf_tx_queue_stop, }; static uint16_t diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index 6d954afd9d..8d5ec41a1f 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -71,6 +71,55 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh, return 0; } +static void +release_rxq_mbufs(struct idpf_rx_queue *rxq) +{ + uint16_t i; + + if (rxq->sw_ring == NULL) + return; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i] != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + rxq->sw_ring[i] = NULL; + } + } +} + +static void +release_txq_mbufs(struct idpf_tx_queue *txq) +{ + uint16_t nb_desc, i; + + if (txq == NULL || txq->sw_ring == NULL) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + return; + } + + if (txq->sw_nb_desc != 0) { + /* For split queue model, descriptor ring */ + nb_desc = txq->sw_nb_desc; + } else { + /* For single queue model */ + nb_desc = txq->nb_tx_desc; + } + for (i = 0; i < nb_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } +} + +static const struct idpf_rxq_ops def_rxq_ops = { + .release_mbufs = release_rxq_mbufs, +}; + +static const struct idpf_txq_ops def_txq_ops = { + .release_mbufs = release_txq_mbufs, +}; + static void reset_split_rx_descq(struct idpf_rx_queue *rxq) { @@ -122,6 +171,14 @@ reset_split_rx_bufq(struct idpf_rx_queue *rxq) rxq->bufq2 = NULL; } +static inline void +reset_split_rx_queue(struct idpf_rx_queue *rxq) +{ + reset_split_rx_descq(rxq); + reset_split_rx_bufq(rxq->bufq1); + reset_split_rx_bufq(rxq->bufq2); +} + static void reset_single_rx_queue(struct idpf_rx_queue *rxq) { @@ -301,6 +358,7 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq, bufq->q_set = true; bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start + queue_idx * vport->chunks_info.rx_buf_qtail_spacing); + bufq->ops = &def_rxq_ops; /* TODO: allow bulk or vec */ @@ -527,6 +585,7 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, dev->data->rx_queues[queue_idx] = rxq; rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start + queue_idx * vport->chunks_info.rx_qtail_spacing); + rxq->ops = &def_rxq_ops; return 0; } @@ -621,6 +680,7 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, reset_split_tx_descq(txq); txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start + queue_idx * vport->chunks_info.tx_qtail_spacing); + txq->ops = &def_txq_ops; /* Allocate the TX completion queue data structure. */ txq->complq = rte_zmalloc_socket("idpf splitq cq", @@ -748,6 +808,7 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, dev->data->tx_queues[queue_idx] = txq; txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start + queue_idx * vport->chunks_info.tx_qtail_spacing); + txq->ops = &def_txq_ops; return 0; } @@ -979,3 +1040,90 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) return err; } + +int +idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_rx_queue *rxq; + int err; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + err = idpf_switch_queue(vport, rx_queue_id, true, false); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { + rxq->ops->release_mbufs(rxq); + reset_single_rx_queue(rxq); + } else { + rxq->bufq1->ops->release_mbufs(rxq->bufq1); + rxq->bufq2->ops->release_mbufs(rxq->bufq2); + reset_split_rx_queue(rxq); + } + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +int +idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_tx_queue *txq; + int err; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + err = idpf_switch_queue(vport, tx_queue_id, false, false); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); + return err; + } + + txq = dev->data->tx_queues[tx_queue_id]; + txq->ops->release_mbufs(txq); + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { + reset_single_tx_queue(txq); + } else { + reset_split_tx_descq(txq); + reset_split_tx_complq(txq->complq); + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +void +idpf_stop_queues(struct rte_eth_dev *dev) +{ + struct idpf_rx_queue *rxq; + struct idpf_tx_queue *txq; + int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq == NULL) + continue; + + if (idpf_rx_queue_stop(dev, i) != 0) + PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq == NULL) + continue; + + if (idpf_tx_queue_stop(dev, i) != 0) + PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i); + } +} diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h index ab9b3830fd..b959638489 100644 --- a/drivers/net/idpf/idpf_rxtx.h +++ b/drivers/net/idpf/idpf_rxtx.h @@ -51,6 +51,7 @@ struct idpf_rx_queue { bool q_set; /* if rx queue has been configured */ bool q_started; /* if rx queue has been started */ bool rx_deferred_start; /* don't start this queue in dev start */ + const struct idpf_rxq_ops *ops; /* only valid for split queue mode */ uint8_t expected_gen_id; @@ -97,6 +98,7 @@ struct idpf_tx_queue { bool q_set; /* if tx queue has been configured */ bool q_started; /* if tx queue has been started */ bool tx_deferred_start; /* don't start this queue in dev start */ + const struct idpf_txq_ops *ops; /* only valid for split queue mode */ uint16_t sw_nb_desc; @@ -107,16 +109,27 @@ struct idpf_tx_queue { struct idpf_tx_queue *complq; }; +struct idpf_rxq_ops { + void (*release_mbufs)(struct idpf_rx_queue *rxq); +}; + +struct idpf_txq_ops { + void (*release_mbufs)(struct idpf_tx_queue *txq); +}; + int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp); int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id); int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id); int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +void idpf_stop_queues(struct rte_eth_dev *dev); #endif /* _IDPF_RXTX_H_ */ diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c index 08fb9061c2..f92141b54f 100644 --- a/drivers/net/idpf/idpf_vchnl.c +++ b/drivers/net/idpf/idpf_vchnl.c @@ -837,6 +837,75 @@ idpf_switch_queue(struct idpf_vport *vport, uint16_t qid, return err; } +#define IDPF_RXTX_QUEUE_CHUNKS_NUM 2 +int +idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_del_ena_dis_queues *queue_select; + struct virtchnl2_queue_chunk *queue_chunk; + uint32_t type; + struct idpf_cmd_info args; + uint16_t num_chunks; + int err, len; + + num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM; + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) + num_chunks++; + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) + num_chunks++; + + len = sizeof(struct virtchnl2_del_ena_dis_queues) + + sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1); + queue_select = rte_zmalloc("queue_select", len, 0); + if (queue_select == NULL) + return -ENOMEM; + + queue_chunk = queue_select->chunks.chunks; + queue_select->chunks.num_chunks = num_chunks; + queue_select->vport_id = vport->vport_id; + + type = VIRTCHNL_QUEUE_TYPE_RX; + queue_chunk[type].type = type; + queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid; + queue_chunk[type].num_queues = vport->num_rx_q; + + type = VIRTCHNL2_QUEUE_TYPE_TX; + queue_chunk[type].type = type; + queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid; + queue_chunk[type].num_queues = vport->num_tx_q; + + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; + queue_chunk[type].type = type; + queue_chunk[type].start_queue_id = + vport->chunks_info.rx_buf_start_qid; + queue_chunk[type].num_queues = vport->num_rx_bufq; + } + + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; + queue_chunk[type].type = type; + queue_chunk[type].start_queue_id = + vport->chunks_info.tx_compl_start_qid; + queue_chunk[type].num_queues = vport->num_tx_complq; + } + + args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES : + VIRTCHNL2_OP_DISABLE_QUEUES; + args.in_args = (u8 *)queue_select; + args.in_args_size = len; + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + err = idpf_execute_vc_cmd(adapter, &args); + if (err != 0) + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES", + enable ? "ENABLE" : "DISABLE"); + + rte_free(queue_select); + return err; +} + int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable) {