From patchwork Mon Jun 5 06:17:23 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 128089 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 65D9742C31; Mon, 5 Jun 2023 08:43:41 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 24BCB42D93; Mon, 5 Jun 2023 08:42:38 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 1F9994113F for ; Mon, 5 Jun 2023 08:42:35 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1685947356; x=1717483356; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=b7WywgjAkhemV5RAtfTJTAfFXMEszSeIc/c+S/CaSSU=; b=ae2cV11LVc1fAqUei2oL4jyaDch6VX/Tr1GYxEuVD/Jg6GGNNrfvWsz4 4l4rTkoEsFyFtdjeilnZSMPobfs7VhjEHZV9HRss4EWam4S5ggNJ9/ezE NURRxddkTp3mHoFZpOGVfMik56upnpLkUT9yHbNV6TabQyB/gAIkwbWsk cY+bFPBhKucKJq7CbOZUOPBAoUHArWJV4vPZ4HBbXGCz8RE7toVKoVOPj ESZTqTsEVAgVW8BoBKIIQTxZLaWy+QMpahFDdugCLzLBzxCR8Z3ZLaHbp +pIzAz1/TbkqQbebF7tF2bbDdUr3c2t+QKpKfLaYOpSFgurvbyTWsGnF/ A==; X-IronPort-AV: E=McAfee;i="6600,9927,10731"; a="419839666" X-IronPort-AV: E=Sophos;i="6.00,217,1681196400"; d="scan'208";a="419839666" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 04 Jun 2023 23:42:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10731"; a="798301066" X-IronPort-AV: E=Sophos;i="6.00,217,1681196400"; d="scan'208";a="798301066" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by FMSMGA003.fm.intel.com with ESMTP; 04 Jun 2023 23:42:34 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Xiao Wang Subject: [PATCH v8 13/14] net/cpfl: support hairpin bind/unbind Date: Mon, 5 Jun 2023 06:17:23 +0000 Message-Id: <20230605061724.88130-14-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20230605061724.88130-1-beilei.xing@intel.com> References: <20230531130450.26380-1-beilei.xing@intel.com> <20230605061724.88130-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing This patch supports hairpin_bind/unbind ops. Signed-off-by: Xiao Wang Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 1a1ca4bc77..0d127eae3e 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1120,6 +1120,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, return j; } +static int +cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port) +{ + struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private; + struct idpf_vport *tx_vport = &cpfl_tx_vport->base; + struct cpfl_vport *cpfl_rx_vport; + struct cpfl_tx_queue *cpfl_txq; + struct cpfl_rx_queue *cpfl_rxq; + struct rte_eth_dev *peer_dev; + struct idpf_vport *rx_vport; + int err = 0; + int i; + + err = cpfl_txq_hairpin_info_update(dev, rx_port); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info."); + return err; + } + + /* configure hairpin queues */ + for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) { + cpfl_txq = dev->data->tx_queues[i]; + err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i); + return err; + } + } + + err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to config Tx completion queue"); + return err; + } + + peer_dev = &rte_eth_devices[rx_port]; + cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private; + rx_vport = &cpfl_rx_vport->base; + cpfl_rxq_hairpin_mz_bind(peer_dev); + + err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue"); + return err; + } + + for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) { + cpfl_rxq = peer_dev->data->rx_queues[i]; + err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i); + return err; + } + err = cpfl_rx_queue_init(peer_dev, i); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i); + return err; + } + } + + /* enable hairpin queues */ + for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) { + cpfl_txq = dev->data->tx_queues[i]; + err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport, + i - cpfl_tx_vport->nb_data_txq, + false, true); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on", + i); + return err; + } + cpfl_txq->base.q_started = true; + } + + err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq"); + return err; + } + + for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) { + cpfl_rxq = peer_dev->data->rx_queues[i]; + err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport, + i - cpfl_rx_vport->nb_data_rxq, + true, true); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on", + i); + } + cpfl_rxq->base.q_started = true; + } + + err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue"); + return err; + } + + return 0; +} + +static int +cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port) +{ + struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private; + struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port]; + struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private; + struct cpfl_tx_queue *cpfl_txq; + struct cpfl_rx_queue *cpfl_rxq; + int i; + + /* disable hairpin queues */ + for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) { + cpfl_txq = dev->data->tx_queues[i]; + cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport, + i - cpfl_tx_vport->nb_data_txq, + false, false); + cpfl_txq->base.q_started = false; + } + + cpfl_switch_hairpin_complq(cpfl_tx_vport, false); + + for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) { + cpfl_rxq = peer_dev->data->rx_queues[i]; + cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport, + i - cpfl_rx_vport->nb_data_rxq, + true, false); + cpfl_rxq->base.q_started = false; + } + + cpfl_switch_hairpin_bufq(cpfl_rx_vport, false); + + return 0; +} + static const struct eth_dev_ops cpfl_eth_dev_ops = { .dev_configure = cpfl_dev_configure, .dev_close = cpfl_dev_close, @@ -1150,6 +1285,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = { .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup, .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup, .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports, + .hairpin_bind = cpfl_hairpin_bind, + .hairpin_unbind = cpfl_hairpin_unbind, }; static int