From patchwork Wed Aug 9 15:51:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 130025 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D5B7543016; Wed, 9 Aug 2023 09:34:40 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C3A8D43294; Wed, 9 Aug 2023 09:33:31 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 0ACC34328C for ; Wed, 9 Aug 2023 09:33:29 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691566410; x=1723102410; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=PG19LpAzOmclYjzGd1HjTJMV7g9eODcI561Va9EGkKU=; b=M7Vj402GKX9+Z7Dfbv9DXkVYZy5PJS9fv30zjIj6Ll9yaANAkplvvfdh Gb7Is02BlSKdhfPF3PKMm/uhs4HF/lpxXOIqWUOqo2GKWwL7gsH9pXJ2u W1HO7bKpMzDe3dbFUMsnG/GgpZcIRkvLNt2WpCXn0/eOsxQVTaATYbev6 AUweCUN1ui7l9XwbTdc4hnDJhF/q+JQ5F50vs1EaEJIPrqM/o+TOowclx vvCiD6x/JMya2UC0CF2qt3QbloroO2daql6MnuZyoRFWdh+64Wvekyt0P dkhBvAXhndxbPg/G2c6iAruDsI/QjTsvtxt+ENrCFydhrsuwpEjKJ1w+y g==; X-IronPort-AV: E=McAfee;i="6600,9927,10795"; a="356014512" X-IronPort-AV: E=Sophos;i="6.01,158,1684825200"; d="scan'208";a="356014512" Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Aug 2023 00:33:29 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10795"; a="1062337445" X-IronPort-AV: E=Sophos;i="6.01,158,1684825200"; d="scan'208";a="1062337445" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.252]) by fmsmga005.fm.intel.com with ESMTP; 09 Aug 2023 00:33:28 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com, mingxia.liu@intel.com Cc: dev@dpdk.org, Beilei Xing Subject: [PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup Date: Wed, 9 Aug 2023 15:51:27 +0000 Message-Id: <20230809155134.539287-13-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230809155134.539287-1-beilei.xing@intel.com> References: <20230809155134.539287-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Support Rx/Tx queue setup for port representor. Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.h | 11 +++ drivers/net/cpfl/cpfl_representor.c | 126 ++++++++++++++++++++++++++++ 2 files changed, 137 insertions(+) diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index b0fb05c7b9..8a8721bbe9 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -170,6 +170,17 @@ struct cpfl_repr { struct cpfl_vport_info *vport_info; }; +struct cpfl_repr_rx_queue { + struct cpfl_repr *repr; + struct rte_mempool *mb_pool; + struct rte_ring *rx_ring; +}; + +struct cpfl_repr_tx_queue { + struct cpfl_repr *repr; + struct cpfl_tx_queue *txq; +}; + struct cpfl_adapter_ext { TAILQ_ENTRY(cpfl_adapter_ext) next; struct idpf_adapter base; diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c index dcc01d0669..19c7fb4cb9 100644 --- a/drivers/net/cpfl/cpfl_representor.c +++ b/drivers/net/cpfl/cpfl_representor.c @@ -285,12 +285,138 @@ cpfl_repr_dev_stop(struct rte_eth_dev *dev) return 0; } +static int +cpfl_repr_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_id, + uint16_t nb_desc, + unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *conf, + struct rte_mempool *pool) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(dev); + struct cpfl_repr_rx_queue *rxq; + char ring_name[RTE_RING_NAMESIZE]; + struct rte_ring *rx_ring; + + if (!(dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) { + PMD_INIT_LOG(ERR, "This ethdev is not representor."); + return -EINVAL; + } + + if (!RTE_IS_POWER_OF_2(nb_desc) || + nb_desc > CPFL_MAX_RING_DESC || + nb_desc < CPFL_MIN_RING_DESC) { + PMD_INIT_LOG(ERR, "nb_desc should < %u, > %u and power of 2)", + CPFL_MAX_RING_DESC, CPFL_MIN_RING_DESC); + return -EINVAL; + } + + /* Free memory if needed */ + rxq = dev->data->rx_queues[queue_id]; + if (rxq) { + rte_ring_free(rxq->rx_ring); + rte_free(rxq); + dev->data->rx_queues[queue_id] = NULL; + } + + /* Allocate rx queue data structure */ + rxq = rte_zmalloc_socket("cpfl representor rx queue", + sizeof(struct cpfl_repr_rx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!rxq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for representor rx queue"); + return -ENOMEM; + } + + /* use rte_ring as rx queue of representor */ + if (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) + snprintf(ring_name, sizeof(ring_name), "cpfl_repr_c%dpf%dvf%d_rx", + repr->repr_id.host_id, repr->repr_id.pf_id, repr->repr_id.vf_id); + else + snprintf(ring_name, sizeof(ring_name), "cpfl_repr_c%dpf%d_rx", + repr->repr_id.host_id, repr->repr_id.pf_id); + rx_ring = rte_ring_lookup(ring_name); + if (rx_ring) { + PMD_INIT_LOG(ERR, "rte_ring %s is occuriped.", ring_name); + rte_free(rxq); + return -EEXIST; + } + + rx_ring = rte_ring_create(ring_name, nb_desc, socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (!rx_ring) { + PMD_INIT_LOG(ERR, "Failed to create ring %s.", ring_name); + rte_free(rxq); + return -EINVAL; + } + + rxq->mb_pool = pool; + rxq->repr = repr; + rxq->rx_ring = rx_ring; + dev->data->rx_queues[queue_id] = rxq; + + return 0; +} + +static int +cpfl_repr_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_id, + __rte_unused uint16_t nb_desc, + unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *conf) +{ + struct cpfl_repr *repr = CPFL_DEV_TO_REPR(dev); + struct cpfl_adapter_ext *adapter = repr->itf.adapter; + struct cpfl_repr_tx_queue *txq; + struct cpfl_vport *vport; + + if (!(dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) { + PMD_INIT_LOG(ERR, "This ethdev is not representor."); + return -EINVAL; + } + + txq = dev->data->tx_queues[queue_id]; + if (txq) { + rte_free(txq); + dev->data->rx_queues[queue_id] = NULL; + } + txq = rte_zmalloc_socket("cpfl representor tx queue", + sizeof(struct cpfl_repr_tx_queue), + RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for representor tx queue"); + return -ENOMEM; + } + /* use vport HW queue to transmit, no need to allocate + * a ring for it + */ + txq->repr = repr; + dev->data->tx_queues[queue_id] = txq; + + vport = adapter->exceptional_vport; + if (!vport) { + PMD_INIT_LOG(ERR, "No default vport is created for exceptianl path"); + return -ENODEV; + } + /* TODO: need to select the hw txq when multi txqs are there. + * Now just use the default queue 0 + */ + txq->txq = ((struct rte_eth_dev_data *)vport->itf.data)->tx_queues[0]; + + return 0; +} + static const struct eth_dev_ops cpfl_repr_dev_ops = { .dev_start = cpfl_repr_dev_start, .dev_stop = cpfl_repr_dev_stop, .dev_configure = cpfl_repr_dev_configure, .dev_close = cpfl_repr_dev_close, .dev_infos_get = cpfl_repr_dev_info_get, + + .rx_queue_setup = cpfl_repr_rx_queue_setup, + .tx_queue_setup = cpfl_repr_tx_queue_setup, }; static int