From patchwork Mon Jun 5 09:06:34 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 128109 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E0DF342C34; Mon, 5 Jun 2023 11:32:50 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2958042D56; Mon, 5 Jun 2023 11:32:11 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 1D9F742D40 for ; Mon, 5 Jun 2023 11:32:07 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1685957528; x=1717493528; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=BYDtVvYo4GpzCWqOajJTQ3H9j4vZkCIM2mEJlsQf0R0=; b=BXlTo+cPn+qk0SqpXObiSiolwdmdSlkxQz2fzUhq0iBbAjTx1I9N46p+ jKsoVoGTPYVPGwoMLN/6nc9nDgTkTgdwaovimeF4ZUyUd1Auerh79sUuf xzh0PTdtavzPRnaxC86bCcU1UwIGH2M5XRyE3o/9w2XrKfzEVkatEP5ze l6CrVbPxbISxkIHfFGFhXNVpLtC2LVcWa4wogpKHw3UIDZ3IdEFVi6U4b YD0CtHM0ghetvdeV0CC+Xcv53bxtUAHD212MqTIMCBsdG3M3oOXvsKT+z 1NTQ6l03VUzU3FRjO3//V75wVEhxKz9YJUqDanuLvfvGJsDKZbDOrIs2T Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10731"; a="355181081" X-IronPort-AV: E=Sophos;i="6.00,217,1681196400"; d="scan'208";a="355181081" Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 05 Jun 2023 02:32:07 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10731"; a="741652093" X-IronPort-AV: E=Sophos;i="6.00,217,1681196400"; d="scan'208";a="741652093" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by orsmga001.jf.intel.com with ESMTP; 05 Jun 2023 02:32:05 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Xiao Wang Subject: [PATCH v9 07/14] net/cpfl: support hairpin queue configuration Date: Mon, 5 Jun 2023 09:06:34 +0000 Message-Id: <20230605090641.36525-8-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20230605090641.36525-1-beilei.xing@intel.com> References: <20230605061724.88130-1-beilei.xing@intel.com> <20230605090641.36525-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing This patch supports Rx/Tx hairpin queue configuration. Signed-off-by: Xiao Wang Signed-off-by: Mingxia Liu Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 136 +++++++++++++++++++++++++++++++-- drivers/net/cpfl/cpfl_rxtx.c | 88 +++++++++++++++++++++ drivers/net/cpfl/cpfl_rxtx.h | 7 ++ 3 files changed, 225 insertions(+), 6 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index d64b506038..0696c6bc68 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev) return idpf_vport_irq_map_config(vport, nb_rx_queues); } +/* Update hairpin_info for dev's tx hairpin queue */ +static int +cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port) +{ + struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private; + struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port]; + struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private; + struct cpfl_txq_hairpin_info *hairpin_info; + struct cpfl_tx_queue *cpfl_txq; + int i; + + for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) { + cpfl_txq = dev->data->tx_queues[i]; + hairpin_info = &cpfl_txq->hairpin_info; + if (hairpin_info->peer_rxp != rx_port) { + PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port); + return -EINVAL; + } + hairpin_info->peer_rxq_id = + cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid, + hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq); + } + + return 0; +} + +/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */ +static void +cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev) +{ + struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private; + struct idpf_vport *vport = &cpfl_rx_vport->base; + struct idpf_adapter *adapter = vport->adapter; + struct idpf_hw *hw = &adapter->hw; + struct cpfl_rx_queue *cpfl_rxq; + struct cpfl_tx_queue *cpfl_txq; + struct rte_eth_dev *peer_dev; + const struct rte_memzone *mz; + uint16_t peer_tx_port; + uint16_t peer_tx_qid; + int i; + + for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) { + cpfl_rxq = dev->data->rx_queues[i]; + peer_tx_port = cpfl_rxq->hairpin_info.peer_txp; + peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id; + peer_dev = &rte_eth_devices[peer_tx_port]; + cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid]; + + /* bind rx queue */ + mz = cpfl_txq->base.mz; + cpfl_rxq->base.rx_ring_phys_addr = mz->iova; + cpfl_rxq->base.rx_ring = mz->addr; + cpfl_rxq->base.mz = mz; + + /* bind rx buffer queue */ + mz = cpfl_txq->base.complq->mz; + cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova; + cpfl_rxq->base.bufq1->rx_ring = mz->addr; + cpfl_rxq->base.bufq1->mz = mz; + cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr + + cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start, + 0, cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing); + } +} + static int cpfl_start_queues(struct rte_eth_dev *dev) { + struct cpfl_vport *cpfl_vport = dev->data->dev_private; + struct idpf_vport *vport = &cpfl_vport->base; struct cpfl_rx_queue *cpfl_rxq; struct cpfl_tx_queue *cpfl_txq; + int update_flag = 0; int err = 0; int i; + /* For normal data queues, configure, init and enale Txq. + * For non-manual bind hairpin queues, configure Txq. + */ for (i = 0; i < dev->data->nb_tx_queues; i++) { cpfl_txq = dev->data->tx_queues[i]; if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start) continue; - err = cpfl_tx_queue_start(dev, i); + if (!cpfl_txq->hairpin_info.hairpin_q) { + err = cpfl_tx_queue_start(dev, i); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i); + return err; + } + } else if (!cpfl_vport->p2p_manual_bind) { + if (update_flag == 0) { + err = cpfl_txq_hairpin_info_update(dev, + cpfl_txq->hairpin_info.peer_rxp); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info"); + return err; + } + update_flag = 1; + } + err = cpfl_hairpin_txq_config(vport, cpfl_txq); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i); + return err; + } + } + } + + /* For non-manual bind hairpin queues, configure Tx completion queue first.*/ + if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) { + err = cpfl_hairpin_tx_complq_config(cpfl_vport); if (err != 0) { - PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i); + PMD_DRV_LOG(ERR, "Fail to config Tx completion queue"); return err; } } + /* For non-manual bind hairpin queues, configure Rx buffer queue.*/ + if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) { + cpfl_rxq_hairpin_mz_bind(dev); + err = cpfl_hairpin_rx_bufq_config(cpfl_vport); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue"); + return err; + } + } + + /* For normal data queues, configure, init and enale Rxq. + * For non-manual bind hairpin queues, configure Rxq, and then init Rxq. + */ for (i = 0; i < dev->data->nb_rx_queues; i++) { cpfl_rxq = dev->data->rx_queues[i]; if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start) continue; - err = cpfl_rx_queue_start(dev, i); - if (err != 0) { - PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i); - return err; + if (!cpfl_rxq->hairpin_info.hairpin_q) { + err = cpfl_rx_queue_start(dev, i); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i); + return err; + } + } else if (!cpfl_vport->p2p_manual_bind) { + err = cpfl_hairpin_rxq_config(vport, cpfl_rxq); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i); + return err; + } + err = cpfl_rx_queue_init(dev, i); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i); + return err; + } } } diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index 90b408d1f4..fd24d544a1 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -922,6 +922,94 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return ret; } +int +cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport) +{ + struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq; + struct virtchnl2_rxq_info rxq_info; + + memset(&rxq_info, 0, sizeof(rxq_info)); + + rxq_info.type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; + rxq_info.queue_id = rx_bufq->queue_id; + rxq_info.ring_len = rx_bufq->nb_rx_desc; + rxq_info.dma_ring_addr = rx_bufq->rx_ring_phys_addr; + rxq_info.desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; + rxq_info.rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK; + rxq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT; + rxq_info.data_buffer_size = rx_bufq->rx_buf_len; + rxq_info.buffer_notif_stride = CPFL_RX_BUF_STRIDE; + + return idpf_vc_rxq_config_by_info(&cpfl_vport->base, &rxq_info, 1); +} + +int +cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq) +{ + struct virtchnl2_rxq_info rxq_info; + struct idpf_rx_queue *rxq = &cpfl_rxq->base; + + memset(&rxq_info, 0, sizeof(rxq_info)); + + rxq_info.type = VIRTCHNL2_QUEUE_TYPE_RX; + rxq_info.queue_id = rxq->queue_id; + rxq_info.ring_len = rxq->nb_rx_desc; + rxq_info.dma_ring_addr = rxq->rx_ring_phys_addr; + rxq_info.rx_bufq1_id = rxq->bufq1->queue_id; + rxq_info.max_pkt_size = vport->max_pkt_len; + rxq_info.desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; + rxq_info.qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE; + + rxq_info.data_buffer_size = rxq->rx_buf_len; + rxq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT; + rxq_info.rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK; + + PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x", + vport->vport_id, rxq_info.queue_id); + + return idpf_vc_rxq_config_by_info(vport, &rxq_info, 1); +} + +int +cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport) +{ + struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq; + struct virtchnl2_txq_info txq_info; + + memset(&txq_info, 0, sizeof(txq_info)); + + txq_info.dma_ring_addr = tx_complq->tx_ring_phys_addr; + txq_info.type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; + txq_info.queue_id = tx_complq->queue_id; + txq_info.ring_len = tx_complq->nb_tx_desc; + txq_info.peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id; + txq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT; + txq_info.sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; + + return idpf_vc_txq_config_by_info(&cpfl_vport->base, &txq_info, 1); +} + +int +cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq) +{ + struct idpf_tx_queue *txq = &cpfl_txq->base; + struct virtchnl2_txq_info txq_info; + + memset(&txq_info, 0, sizeof(txq_info)); + + txq_info.dma_ring_addr = txq->tx_ring_phys_addr; + txq_info.type = VIRTCHNL2_QUEUE_TYPE_TX; + txq_info.queue_id = txq->queue_id; + txq_info.ring_len = txq->nb_tx_desc; + txq_info.tx_compl_queue_id = txq->complq->queue_id; + txq_info.relative_queue_id = txq->queue_id; + txq_info.peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id; + txq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT; + txq_info.sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; + + return idpf_vc_txq_config_by_info(vport, &txq_info, 1); +} + int cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) { diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index 06198d4aad..872ebc1bfd 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -32,12 +32,15 @@ #define CPFL_RING_BASE_ALIGN 128 #define CPFL_DEFAULT_RX_FREE_THRESH 32 +#define CPFL_RXBUF_LOW_WATERMARK 64 #define CPFL_DEFAULT_TX_RS_THRESH 32 #define CPFL_DEFAULT_TX_FREE_THRESH 32 #define CPFL_SUPPORT_CHAIN_NUM 5 +#define CPFL_RX_BUF_STRIDE 64 + struct cpfl_rxq_hairpin_info { bool hairpin_q; /* if rx queue is a hairpin queue */ uint16_t peer_txp; @@ -95,4 +98,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf); +int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport); +int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq); +int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport); +int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq); #endif /* _CPFL_RXTX_H_ */