From patchwork Sun Apr 26 14:46:08 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladislav Zolotarov X-Patchwork-Id: 4475 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id CA215C3B2; Sun, 26 Apr 2015 16:46:20 +0200 (CEST) Received: from mail-wi0-f176.google.com (mail-wi0-f176.google.com [209.85.212.176]) by dpdk.org (Postfix) with ESMTP id 026FFC37E for ; Sun, 26 Apr 2015 16:46:18 +0200 (CEST) Received: by wiax7 with SMTP id x7so66301714wia.0 for ; Sun, 26 Apr 2015 07:46:17 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=w50AZvF1x7qcg5CXHZxAEXTA190EEWP5sM89vYtvULQ=; b=Ow9iuJbmvESRW438R0yf97lPsbr/+/jvE/ZOonzAytIQBtTUutcNF88PsHn8/BZCS+ jnP0cIpTTRYhcK8FYAINBj709TnTL1J0/6inTZuPx0Q1uXzam2uZhq3f74tpZdahT0eX jZF1yRNuTPpWTIgFrozYBnpzR7mfQo6XHafxwhpQin7oNP7F5hg3q+COgx2ObpucaqxD 7UfQ62uFphINARyxxHWMB9XoUmFgf0/nGH5pbHZzWcDzEmWnNEw0CUENJo44sF4/diGJ 0ffjDERPrZ4IKhc1oEXnHsWV6ZaP9n7r1MolNT9hj/TDQTLk7+mCL268+bWDDPMIHFcO I7gQ== X-Gm-Message-State: ALoCoQkpdFfCh8IxuefYAqgoWqb7ChB5D5Hp9WglIjFMiiHHxsM1Wk7N4EMtCdzUVg0V3BVe7ijD X-Received: by 10.180.90.82 with SMTP id bu18mr13065309wib.88.1430059577916; Sun, 26 Apr 2015 07:46:17 -0700 (PDT) Received: from vladz-laptop.cloudius-systems.com. ([212.143.139.214]) by mx.google.com with ESMTPSA id gy8sm7595913wib.13.2015.04.26.07.46.16 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Sun, 26 Apr 2015 07:46:16 -0700 (PDT) From: Vlad Zolotarov To: dev@dpdk.org Date: Sun, 26 Apr 2015 17:46:08 +0300 Message-Id: <1430059571-20843-2-git-send-email-vladz@cloudius-systems.com> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1430059571-20843-1-git-send-email-vladz@cloudius-systems.com> References: <1430059571-20843-1-git-send-email-vladz@cloudius-systems.com> Subject: [dpdk-dev] [PATCH v1 1/4] ixgbe: move rx_bulk_alloc_allowed and rx_vec_allowed to ixgbe_adapter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Move the above fields from ixgbe_hw to ixgbe_adapter. Signed-off-by: Vlad Zolotarov --- lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h | 2 -- lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 8 +++---- lib/librte_pmd_ixgbe/ixgbe_ethdev.h | 3 +++ lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 38 +++++++++++++++++++-------------- 4 files changed, 29 insertions(+), 22 deletions(-) diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h index 9a66370..c67d462 100644 --- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h @@ -3657,8 +3657,6 @@ struct ixgbe_hw { bool force_full_reset; bool allow_unsupported_sfp; bool wol_enabled; - bool rx_bulk_alloc_allowed; - bool rx_vec_allowed; }; #define ixgbe_call_func(hw, func, params, error) \ diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index 366aa45..aec1de9 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -1428,8 +1428,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; PMD_INIT_FUNC_TRACE(); @@ -1440,8 +1440,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) * Initialize to TRUE. If any of Rx queues doesn't meet the bulk * allocation or vector Rx preconditions we will reset it. */ - hw->rx_bulk_alloc_allowed = true; - hw->rx_vec_allowed = true; + adapter->rx_bulk_alloc_allowed = true; + adapter->rx_vec_allowed = true; return 0; } diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h index e45e727..5b90115 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h @@ -265,6 +265,9 @@ struct ixgbe_adapter { struct ixgbe_bypass_info bps; #endif /* RTE_NIC_BYPASS */ struct ixgbe_filter_info filter; + + bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; }; #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index 3c61d1c..60344a9 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -2442,7 +2442,7 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) /* Reset dynamic ixgbe_rx_queue fields back to defaults */ static void -ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct ixgbe_rx_queue *rxq) +ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) { static const union ixgbe_adv_rx_desc zeroed_desc = {{0}}; unsigned i; @@ -2458,7 +2458,7 @@ ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct ixgbe_rx_queue *rxq) * constraints here to see if we need to zero out memory after the end * of the H/W descriptor ring. */ - if (hw->rx_bulk_alloc_allowed) + if (adapter->rx_bulk_alloc_allowed) /* zero out extra memory */ len += RTE_PMD_IXGBE_RX_MAX_BURST; @@ -2504,6 +2504,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, struct ixgbe_rx_queue *rxq; struct ixgbe_hw *hw; uint16_t len; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; struct rte_eth_dev_info dev_info = { 0 }; struct rte_eth_rxmode *dev_rx_mode = &dev->data->dev_conf.rxmode; bool rsc_requested = false; @@ -2602,7 +2604,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, "preconditions - canceling the feature for " "the whole port[%d]", rxq->queue_id, rxq->port_id); - hw->rx_bulk_alloc_allowed = false; + adapter->rx_bulk_alloc_allowed = false; } /* @@ -2611,7 +2613,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, * function does not access an invalid memory region. */ len = nb_desc; - if (hw->rx_bulk_alloc_allowed) + if (adapter->rx_bulk_alloc_allowed) len += RTE_PMD_IXGBE_RX_MAX_BURST; rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", @@ -2644,13 +2646,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, "preconditions - canceling the feature for " "the whole port[%d]", rxq->queue_id, rxq->port_id); - hw->rx_vec_allowed = false; + adapter->rx_vec_allowed = false; } else ixgbe_rxq_vec_setup(rxq); dev->data->rx_queues[queue_idx] = rxq; - ixgbe_reset_rx_queue(hw, rxq); + ixgbe_reset_rx_queue(adapter, rxq); return 0; } @@ -2704,7 +2706,8 @@ void ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; PMD_INIT_FUNC_TRACE(); @@ -2720,7 +2723,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; if (rxq != NULL) { ixgbe_rx_queue_release_mbufs(rxq); - ixgbe_reset_rx_queue(hw, rxq); + ixgbe_reset_rx_queue(adapter, rxq); } } } @@ -3969,20 +3972,21 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type) void ixgbe_set_rx_function(struct rte_eth_dev *dev) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; /* * In order to allow Vector Rx there are a few configuration * conditions to be met and Rx Bulk Allocation should be allowed. */ if (ixgbe_rx_vec_dev_conf_condition_check(dev) || - !hw->rx_bulk_alloc_allowed) { + !adapter->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx " "preconditions or RTE_IXGBE_INC_VECTOR is " "not enabled", dev->data->port_id); - hw->rx_vec_allowed = false; + adapter->rx_vec_allowed = false; } /* @@ -3993,7 +3997,7 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev) * Otherwise use a single allocation version. */ if (dev->data->lro) { - if (hw->rx_bulk_alloc_allowed) { + if (adapter->rx_bulk_alloc_allowed) { PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk " "allocation version"); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; @@ -4007,7 +4011,7 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev) * Set the non-LRO scattered callback: there are Vector and * single allocation versions. */ - if (hw->rx_vec_allowed) { + if (adapter->rx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " "callback (port=%d).", dev->data->port_id); @@ -4029,12 +4033,12 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev) * - Bulk Allocation * - Single buffer allocation (the simplest one) */ - } else if (hw->rx_vec_allowed) { + } else if (adapter->rx_vec_allowed) { PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX " "burst size no less than 32."); dev->rx_pkt_burst = ixgbe_recv_pkts_vec; - } else if (hw->rx_bulk_alloc_allowed) { + } else if (adapter->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function " "will be used on port=%d.", @@ -4594,6 +4598,8 @@ int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; struct ixgbe_rx_queue *rxq; uint32_t rxdctl; int poll_ms; @@ -4621,7 +4627,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rte_delay_us(RTE_IXGBE_WAIT_100_US); ixgbe_rx_queue_release_mbufs(rxq); - ixgbe_reset_rx_queue(hw, rxq); + ixgbe_reset_rx_queue(adapter, rxq); } else return -1;