From patchwork Mon Mar 26 08:59:16 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qi Zhang X-Patchwork-Id: 36497 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 85BB3AAA2; Mon, 26 Mar 2018 10:59:21 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id E2E137CE1 for ; Mon, 26 Mar 2018 10:59:16 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 26 Mar 2018 01:59:16 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.48,364,1517904000"; d="scan'208";a="41252552" Received: from dpdk51.sh.intel.com ([10.67.110.184]) by fmsmga001.fm.intel.com with ESMTP; 26 Mar 2018 01:59:15 -0700 From: Qi Zhang To: thomas@monjalon.net, konstantin.ananyev@intel.com Cc: dev@dpdk.org, beilei.xing@intel.com, jingjing.wu@intel.com, wenzhuo.lu@intel.com, Qi Zhang Date: Mon, 26 Mar 2018 16:59:16 +0800 Message-Id: <20180326085916.264811-4-qi.z.zhang@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20180326085916.264811-1-qi.z.zhang@intel.com> References: <20180212045314.171616-1-qi.z.zhang@intel.com> <20180326085916.264811-1-qi.z.zhang@intel.com> Subject: [dpdk-dev] [PATCH v4 3/3] net/i40e: enable runtime queue setup X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Expose the runtime queue configuration capability and enhance i40e_dev_[rx|tx]_queue_setup to handle the situation when device already started. Signed-off-by: Qi Zhang --- v4: - fix rx/tx conflict check. - no need conflict check for first rx/tx queue at runtime setup. v3: - no queue start/stop in setup/release - return fail when required rx/tx function conflict with exist setup drivers/net/i40e/i40e_ethdev.c | 4 + drivers/net/i40e/i40e_rxtx.c | 195 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 176 insertions(+), 23 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 508b4171c..68960dcaa 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -3197,6 +3197,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_IPIP_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO; + dev_info->runtime_queue_setup_capa = + DEV_RUNTIME_RX_QUEUE_SETUP | + DEV_RUNTIME_TX_QUEUE_SETUP; + dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); dev_info->reta_size = pf->hash_lut_size; diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 1217e5a61..101c20ba0 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1692,6 +1692,75 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } +static int +i40e_dev_first_rx_queue(struct rte_eth_dev *dev, + uint16_t queue_idx) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (i != queue_idx && dev->data->rx_queues[i]) + return 0; + } + + return 1; +} + +static int +i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_rx_queue *rxq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + uint16_t buf_size = + (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + int use_scattered_rx = + ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) ? + 1 : 0; + + if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_rx_queue(dev, rxq->queue_id)) { + /** + * If it is the first queue to setup, + * set all flags to default and call + * i40e_set_rx_function. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + dev->data->scattered_rx = use_scattered_rx; + if (use_def_burst_func) + ad->rx_bulk_alloc_allowed = false; + i40e_set_rx_function(dev); + return 0; + } + + /* check bulk alloc conflict */ + if (ad->rx_bulk_alloc_allowed && use_def_burst_func) { + PMD_DRV_LOG(ERR, "Can't use default burst."); + return -EINVAL; + } + /* check scatterred conflict */ + if (!dev->data->scattered_rx && use_scattered_rx) { + PMD_DRV_LOG(ERR, "Scattered rx is required."); + return -EINVAL; + } + /* check vector conflict */ + if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) { + PMD_DRV_LOG(ERR, "Failed vector rx setup."); + return -EINVAL; + } + + return 0; +} + int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1808,25 +1877,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, i40e_reset_rx_queue(rxq); rxq->q_set = TRUE; - dev->data->rx_queues[queue_idx] = rxq; - - use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); - - if (!use_def_burst_func) { -#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "satisfied. Rx Burst Bulk Alloc function will be " - "used on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); -#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ - } else { - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "not satisfied, Scattered Rx is requested, " - "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " - "not enabled on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); - ad->rx_bulk_alloc_allowed = false; - } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -1841,6 +1891,34 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) { + i40e_dev_rx_queue_release(rxq); + return -EINVAL; + } + } else { + use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + if (!use_def_burst_func) { +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + } else { + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested, " + "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " + "not enabled on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + } + + dev->data->rx_queues[queue_idx] = rxq; return 0; } @@ -1972,6 +2050,67 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } +static int +i40e_dev_first_tx_queue(struct rte_eth_dev *dev, + uint16_t queue_idx) +{ + uint16_t i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (i != queue_idx && dev->data->rx_queues[i]) + return 0; + } + + return 1; +} + +static int +i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (i40e_tx_queue_init(txq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do TX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_tx_queue(dev, txq->queue_id)) { + /** + * If it is the first queue to setup, + * set all flags to default and call + * i40e_set_tx_function. + */ + ad->tx_simple_allowed = true; + ad->tx_vec_allowed = true; + i40e_set_tx_function_flag(dev, txq); + i40e_set_tx_function(dev); + return 0; + } + + /* check vector conflict */ + if (ad->tx_vec_allowed) { + if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ || + i40e_txq_vec_setup(txq)) { + PMD_DRV_LOG(ERR, "Failed vector tx setup."); + return -EINVAL; + } + } + /* check simple tx conflict */ + if (ad->tx_simple_allowed) { + if (((txq->txq_flags & I40E_SIMPLE_FLAGS) != + I40E_SIMPLE_FLAGS) || + (txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST)) { + } + PMD_DRV_LOG(ERR, "No-simple tx is required."); + return -EINVAL; + } + + return 0; +} + int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -2144,10 +2283,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, i40e_reset_tx_queue(txq); txq->q_set = TRUE; - dev->data->tx_queues[queue_idx] = txq; - - /* Use a simple TX queue without offloads or multi segs if possible */ - i40e_set_tx_function_flag(dev, txq); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -2162,6 +2297,20 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_tx_queue_setup_runtime(dev, txq)) { + i40e_dev_tx_queue_release(txq); + return -EINVAL; + } + } else { + /** + * Use a simple TX queue without offloads or + * multi segs if possible + */ + i40e_set_tx_function_flag(dev, txq); + } + dev->data->tx_queues[queue_idx] = txq; + return 0; }