From patchwork Wed May 31 13:04:49 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 127800 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D2EE842BF4; Wed, 31 May 2023 15:30:32 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C31AA42DA8; Wed, 31 May 2023 15:29:42 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by mails.dpdk.org (Postfix) with ESMTP id 049E842D43 for ; Wed, 31 May 2023 15:29:33 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1685539774; x=1717075774; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=HDlV8ug6AhtR9WODeC6bcVQCms9w0VeVj26p3MvI+Z0=; b=Yr28j0VtL5dl6D43o+/AVRe3J197tjUgPMAp1wWQCip12saSjccgtf7+ MbF3ekUavx5vugK4Cyh14XKhir46LBslgWtLk+lmrZXMcN1Nu58R1IL0r Tb8O8lh8ExyQbquOuiSZmJ2aB+5eugkjRzjVaeXQuNdXrFydpALQNglqt 76sSRSqZLChbHEUlRg6tN53RONAi7dhtAL1QK4W0C8MKVLKlwF7x6AYd8 pQZi4Z7/WRu+x8GiEbPlB9v/5mRqbYEtFAnyq+WTwmsTmsGS6hAqmtS/4 pILB7lis181EKdlFBMzGmenTCISeEgFQpsR/C+uaHwSAtRQHO03JHfdqk Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10727"; a="358497928" X-IronPort-AV: E=Sophos;i="6.00,207,1681196400"; d="scan'208";a="358497928" Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 31 May 2023 06:29:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10727"; a="657325566" X-IronPort-AV: E=Sophos;i="6.00,207,1681196400"; d="scan'208";a="657325566" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by orsmga003.jf.intel.com with ESMTP; 31 May 2023 06:29:31 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing , Xiao Wang Subject: [PATCH v7 13/14] net/cpfl: support hairpin bind/unbind Date: Wed, 31 May 2023 13:04:49 +0000 Message-Id: <20230531130450.26380-14-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20230531130450.26380-1-beilei.xing@intel.com> References: <20230531102551.20936-1-beilei.xing@intel.com> <20230531130450.26380-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing This patch supports hairpin_bind/unbind ops. Signed-off-by: Xiao Wang Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 9fc7d3401f..ff36f02b11 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1119,6 +1119,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, return j; } +static int +cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port) +{ + struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private; + struct idpf_vport *tx_vport = &cpfl_tx_vport->base; + struct cpfl_vport *cpfl_rx_vport; + struct cpfl_tx_queue *cpfl_txq; + struct cpfl_rx_queue *cpfl_rxq; + struct rte_eth_dev *peer_dev; + struct idpf_vport *rx_vport; + int err = 0; + int i; + + err = cpfl_txq_hairpin_info_update(dev, rx_port); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info."); + return err; + } + + /* configure hairpin queues */ + for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) { + cpfl_txq = dev->data->tx_queues[i]; + err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i); + return err; + } + } + + err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to config Tx completion queue"); + return err; + } + + peer_dev = &rte_eth_devices[rx_port]; + cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private; + rx_vport = &cpfl_rx_vport->base; + cpfl_rxq_hairpin_mz_bind(peer_dev); + + err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue"); + return err; + } + + for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) { + cpfl_rxq = peer_dev->data->rx_queues[i]; + err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i); + return err; + } + err = cpfl_rx_queue_init(peer_dev, i); + if (err != 0) { + PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i); + return err; + } + } + + /* enable hairpin queues */ + for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) { + cpfl_txq = dev->data->tx_queues[i]; + err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport, + i - cpfl_tx_vport->nb_data_txq, + false, true); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on", + i); + return err; + } + cpfl_txq->base.q_started = true; + } + + err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq"); + return err; + } + + for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) { + cpfl_rxq = peer_dev->data->rx_queues[i]; + err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport, + i - cpfl_rx_vport->nb_data_rxq, + true, true); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on", + i); + } + cpfl_rxq->base.q_started = true; + } + + err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true); + if (err != 0) { + PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue"); + return err; + } + + return 0; +} + +static int +cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port) +{ + struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private; + struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port]; + struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private; + struct cpfl_tx_queue *cpfl_txq; + struct cpfl_rx_queue *cpfl_rxq; + int i; + + /* disable hairpin queues */ + for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) { + cpfl_txq = dev->data->tx_queues[i]; + cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport, + i - cpfl_tx_vport->nb_data_txq, + false, false); + cpfl_txq->base.q_started = false; + } + + cpfl_switch_hairpin_complq(cpfl_tx_vport, false); + + for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) { + cpfl_rxq = peer_dev->data->rx_queues[i]; + cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport, + i - cpfl_rx_vport->nb_data_rxq, + true, false); + cpfl_rxq->base.q_started = false; + } + + cpfl_switch_hairpin_bufq(cpfl_rx_vport, false); + + return 0; +} + static const struct eth_dev_ops cpfl_eth_dev_ops = { .dev_configure = cpfl_dev_configure, .dev_close = cpfl_dev_close, @@ -1149,6 +1284,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = { .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup, .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup, .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports, + .hairpin_bind = cpfl_hairpin_bind, + .hairpin_unbind = cpfl_hairpin_unbind, }; static int