From patchwork Fri May 26 07:38:46 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 127546 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D7D2C42BA9; Fri, 26 May 2023 10:03:36 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5137642D55; Fri, 26 May 2023 10:02:56 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 3FAAF40A89 for ; Fri, 26 May 2023 10:02:51 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1685088171; x=1716624171; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=D2r6oNwtc9/3Z0QHQUGmEwZxyH4E2gRMr1FgY9DOtHI=; b=SCCALTGQSBGo4Glodck+hOycRr8JVKWwnoHLL0WcqCPK1Y6ja1tOEFo9 R6NI/Jq0aY3xODa1CoQmKCFXKQ8Dw9ixmm/FiLWJWNV3WuoVbmeqhdLBP cH3uk8cFsevvab63NNgKXUKTy41uGEUTnC7dR7dEpUG4Eeor3pPy+6q2+ baPnrdwcsZi8+F6/O7MvD2u6Ba/k/mpFLf/qn9Ga1MigYYlStqFISk5Xl QSJfAUVzoT57a07LD2m8ovhUhL/n415VmP6AdXN9/xe0cQobQt/tBDDNS FgiweNLUtEtznWLfnr9jsoVlc4MAhfPKmz+NF8/bF4uA86MGapCwh86yQ w==; X-IronPort-AV: E=McAfee;i="6600,9927,10721"; a="338742719" X-IronPort-AV: E=Sophos;i="6.00,193,1681196400"; d="scan'208";a="338742719" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 May 2023 01:02:50 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10721"; a="849483911" X-IronPort-AV: E=Sophos;i="6.00,193,1681196400"; d="scan'208";a="849483911" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by fmsmga001.fm.intel.com with ESMTP; 26 May 2023 01:02:48 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Xiao Wang Subject: [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop Date: Fri, 26 May 2023 07:38:46 +0000 Message-Id: <20230526073850.101079-10-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20230526073850.101079-1-beilei.xing@intel.com> References: <20230519073116.56749-1-beilei.xing@intel.com> <20230526073850.101079-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing This patch supports Rx/Tx hairpin queue start/stop. Signed-off-by: Xiao Wang Signed-off-by: Mingxia Liu Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 41 +++++++++ drivers/net/cpfl/cpfl_rxtx.c | 151 +++++++++++++++++++++++++++++---- drivers/net/cpfl/cpfl_rxtx.h | 14 +++ 3 files changed, 188 insertions(+), 18 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index a06def06d0..8035878602 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -896,6 +896,47 @@ cpfl_start_queues(struct rte_eth_dev *dev) } } + /* For non-manual bind hairpin queues, enable Tx queue and Rx queue, + * then enable Tx completion queue and Rx buffer queue. + */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + cpfl_txq = dev->data->tx_queues[i]; + if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) { + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport, + i - cpfl_vport->nb_data_txq, + false, true); + if (err) + PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on", + i); + else + cpfl_txq->base.q_started = true; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + cpfl_rxq = dev->data->rx_queues[i]; + if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) { + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport, + i - cpfl_vport->nb_data_rxq, + true, true); + if (err) + PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on", + i); + else + cpfl_rxq->base.q_started = true; + } + } + + if (!cpfl_vport->p2p_manual_bind && + cpfl_vport->p2p_tx_complq != NULL && + cpfl_vport->p2p_rx_bufq != NULL) { + err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq and Rx bufq"); + return err; + } + } + return err; } diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index 702054d1c5..38c48ad8c7 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -991,6 +991,81 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq return idpf_vc_txq_config_by_info(vport, txq_info, 1); } +int +cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on) +{ + struct idpf_vport *vport = &cpfl_vport->base; + uint32_t type; + int err, queue_id; + + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; + queue_id = cpfl_vport->p2p_tx_complq->queue_id; + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); + if (err) + return err; + + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; + queue_id = cpfl_vport->p2p_rx_bufq->queue_id; + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); + + return err; +} + +int +cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid, + bool rx, bool on) +{ + struct idpf_vport *vport = &cpfl_vport->base; + uint32_t type; + int err, queue_id; + + type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX; + + if (type == VIRTCHNL2_QUEUE_TYPE_RX) + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid); + else + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid); + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on); + if (err) + return err; + + return err; +} + +static int +cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq) +{ + volatile struct virtchnl2_p2p_rx_buf_desc *rxd; + struct rte_mbuf *mbuf = NULL; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!mbuf)) { + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i]; + rxd->reserve0 = 0; + rxd->pkt_addr = dma_addr; + } + + rxq->nb_rx_hold = 0; + /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/ + rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE; + + return 0; +} + int cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) { @@ -1044,22 +1119,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); } else { /* Split queue */ - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1); - if (err != 0) { - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf"); - return err; - } - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2); - if (err != 0) { - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf"); - return err; + if (cpfl_rxq->hairpin_info.hairpin_q) { + err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf"); + return err; + } + } else { + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf"); + return err; + } + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf"); + return err; + } } rte_wmb(); /* Init the RX tail register. */ IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail); - IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail); + if (rxq->bufq2) + IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail); } return err; @@ -1166,7 +1250,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -EINVAL; cpfl_rxq = dev->data->rx_queues[rx_queue_id]; - err = idpf_vc_queue_switch(vport, rx_queue_id, true, false); + if (cpfl_rxq->hairpin_info.hairpin_q) + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport, + rx_queue_id - cpfl_vport->nb_data_txq, + true, false); + else + err = idpf_vc_queue_switch(vport, rx_queue_id, true, false); if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", rx_queue_id); @@ -1180,10 +1269,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) idpf_qc_single_rx_queue_reset(rxq); } else { rxq->bufq1->ops->release_mbufs(rxq->bufq1); - rxq->bufq2->ops->release_mbufs(rxq->bufq2); - idpf_qc_split_rx_queue_reset(rxq); + if (rxq->bufq2) + rxq->bufq2->ops->release_mbufs(rxq->bufq2); + if (cpfl_rxq->hairpin_info.hairpin_q) { + cpfl_rx_hairpin_descq_reset(rxq); + cpfl_rx_hairpin_bufq_reset(rxq->bufq1); + } else { + idpf_qc_split_rx_queue_reset(rxq); + } } - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + if (!cpfl_rxq->hairpin_info.hairpin_q) + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -1202,7 +1298,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) cpfl_txq = dev->data->tx_queues[tx_queue_id]; - err = idpf_vc_queue_switch(vport, tx_queue_id, false, false); + if (cpfl_txq->hairpin_info.hairpin_q) + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport, + tx_queue_id - cpfl_vport->nb_data_txq, + false, false); + else + err = idpf_vc_queue_switch(vport, tx_queue_id, false, false); if (err != 0) { PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", tx_queue_id); @@ -1215,10 +1316,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) { idpf_qc_single_tx_queue_reset(txq); } else { - idpf_qc_split_tx_descq_reset(txq); - idpf_qc_split_tx_complq_reset(txq->complq); + if (cpfl_txq->hairpin_info.hairpin_q) { + cpfl_tx_hairpin_descq_reset(txq); + cpfl_tx_hairpin_complq_reset(txq->complq); + } else { + idpf_qc_split_tx_descq_reset(txq); + idpf_qc_split_tx_complq_reset(txq->complq); + } } - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + if (!cpfl_txq->hairpin_info.hairpin_q) + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -1238,10 +1346,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) void cpfl_stop_queues(struct rte_eth_dev *dev) { + struct cpfl_vport *cpfl_vport = + (struct cpfl_vport *)dev->data->dev_private; struct cpfl_rx_queue *cpfl_rxq; struct cpfl_tx_queue *cpfl_txq; int i; + if (cpfl_vport->p2p_rx_bufq != NULL) { + if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0) + PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq and Rx bufq"); + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { cpfl_rxq = dev->data->rx_queues[i]; if (cpfl_rxq == NULL) diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index 872ebc1bfd..42dfd07155 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -41,6 +41,17 @@ #define CPFL_RX_BUF_STRIDE 64 +/* The value written in the RX buffer queue tail register, + * and in WritePTR field in the TX completion queue context, + * must be a multiple of 8. + */ +#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8 + +struct virtchnl2_p2p_rx_buf_desc { + __le64 reserve0; + __le64 pkt_addr; /* Packet buffer address */ +}; + struct cpfl_rxq_hairpin_info { bool hairpin_q; /* if rx queue is a hairpin queue */ uint16_t peer_txp; @@ -102,4 +113,7 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport); int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq); int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport); int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq); +int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on); +int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid, + bool rx, bool on); #endif /* _CPFL_RXTX_H_ */