From patchwork Wed Jan 15 00:50:25 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wang, Haiyue" X-Patchwork-Id: 64684 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 553BFA04FD; Wed, 15 Jan 2020 01:57:36 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 29FE01C0D8; Wed, 15 Jan 2020 01:57:32 +0100 (CET) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 242231C0C9 for ; Wed, 15 Jan 2020 01:57:27 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 14 Jan 2020 16:57:27 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,320,1574150400"; d="scan'208";a="424844001" Received: from npg-dpdk-haiyue-1.sh.intel.com ([10.67.119.213]) by fmsmga006.fm.intel.com with ESMTP; 14 Jan 2020 16:57:26 -0800 From: Haiyue Wang To: dev@dpdk.org, xiaolong.ye@intel.com, qi.z.zhang@intel.com, qiming.yang@intel.com Cc: Haiyue Wang Date: Wed, 15 Jan 2020 08:50:25 +0800 Message-Id: <20200115005028.21026-2-haiyue.wang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200115005028.21026-1-haiyue.wang@intel.com> References: <20200115005028.21026-1-haiyue.wang@intel.com> Subject: [dpdk-dev] [PATCH v3 1/4] net/iavf: unify the bool type value X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Replaces the redefined TRUE and FALSE values with standard ones to match the 'bool' type definition. Signed-off-by: Haiyue Wang Acked-by: Qiming Yang --- drivers/net/iavf/iavf_ethdev.c | 31 ++++++++++++++++--------------- drivers/net/iavf/iavf_rxtx.c | 34 +++++++++++++++++----------------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index f69c50df5..34913f9c4 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -454,7 +454,7 @@ iavf_dev_start(struct rte_eth_dev *dev) } /* Set all mac addrs */ - iavf_add_del_all_mac_addr(adapter, TRUE); + iavf_add_del_all_mac_addr(adapter, true); if (iavf_start_queues(dev) != 0) { PMD_DRV_LOG(ERR, "enable queues failed"); @@ -464,7 +464,7 @@ iavf_dev_start(struct rte_eth_dev *dev) return 0; err_mac: - iavf_add_del_all_mac_addr(adapter, FALSE); + iavf_add_del_all_mac_addr(adapter, false); err_queue: err_rss: return -1; @@ -493,7 +493,7 @@ iavf_dev_stop(struct rte_eth_dev *dev) } /* remove all mac addrs */ - iavf_add_del_all_mac_addr(adapter, FALSE); + iavf_add_del_all_mac_addr(adapter, false); adapter->stopped = 1; } @@ -648,9 +648,9 @@ iavf_dev_promiscuous_enable(struct rte_eth_dev *dev) if (vf->promisc_unicast_enabled) return 0; - ret = iavf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled); + ret = iavf_config_promisc(adapter, true, vf->promisc_multicast_enabled); if (!ret) - vf->promisc_unicast_enabled = TRUE; + vf->promisc_unicast_enabled = true; else ret = -EAGAIN; @@ -668,9 +668,10 @@ iavf_dev_promiscuous_disable(struct rte_eth_dev *dev) if (!vf->promisc_unicast_enabled) return 0; - ret = iavf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled); + ret = iavf_config_promisc(adapter, false, + vf->promisc_multicast_enabled); if (!ret) - vf->promisc_unicast_enabled = FALSE; + vf->promisc_unicast_enabled = false; else ret = -EAGAIN; @@ -688,9 +689,9 @@ iavf_dev_allmulticast_enable(struct rte_eth_dev *dev) if (vf->promisc_multicast_enabled) return 0; - ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE); + ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, true); if (!ret) - vf->promisc_multicast_enabled = TRUE; + vf->promisc_multicast_enabled = true; else ret = -EAGAIN; @@ -708,9 +709,9 @@ iavf_dev_allmulticast_disable(struct rte_eth_dev *dev) if (!vf->promisc_multicast_enabled) return 0; - ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE); + ret = iavf_config_promisc(adapter, vf->promisc_unicast_enabled, false); if (!ret) - vf->promisc_multicast_enabled = FALSE; + vf->promisc_multicast_enabled = false; else ret = -EAGAIN; @@ -732,7 +733,7 @@ iavf_dev_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr, return -EINVAL; } - err = iavf_add_del_eth_addr(adapter, addr, TRUE); + err = iavf_add_del_eth_addr(adapter, addr, true); if (err) { PMD_DRV_LOG(ERR, "fail to add MAC address"); return -EIO; @@ -754,7 +755,7 @@ iavf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) addr = &dev->data->mac_addrs[index]; - err = iavf_add_del_eth_addr(adapter, addr, FALSE); + err = iavf_add_del_eth_addr(adapter, addr, false); if (err) PMD_DRV_LOG(ERR, "fail to delete MAC address"); @@ -979,7 +980,7 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, if (rte_is_valid_assigned_ether_addr(perm_addr)) return -EPERM; - ret = iavf_add_del_eth_addr(adapter, old_addr, FALSE); + ret = iavf_add_del_eth_addr(adapter, old_addr, false); if (ret) PMD_DRV_LOG(ERR, "Fail to delete old MAC:" " %02X:%02X:%02X:%02X:%02X:%02X", @@ -990,7 +991,7 @@ iavf_dev_set_default_mac_addr(struct rte_eth_dev *dev, old_addr->addr_bytes[4], old_addr->addr_bytes[5]); - ret = iavf_add_del_eth_addr(adapter, mac_addr, TRUE); + ret = iavf_add_del_eth_addr(adapter, mac_addr, true); if (ret) PMD_DRV_LOG(ERR, "Fail to add new MAC:" " %02X:%02X:%02X:%02X:%02X:%02X", diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 067290db4..85d9a8e3b 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -95,11 +95,11 @@ check_rx_vec_allow(struct iavf_rx_queue *rxq) if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST && rxq->nb_rx_desc % rxq->rx_free_thresh == 0) { PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq."); - return TRUE; + return true; } PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq."); - return FALSE; + return false; } static inline bool @@ -109,29 +109,29 @@ check_tx_vec_allow(struct iavf_tx_queue *txq) txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST && txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) { PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq."); - return TRUE; + return true; } PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq."); - return FALSE; + return false; } static inline bool check_rx_bulk_allow(struct iavf_rx_queue *rxq) { - int ret = TRUE; + int ret = true; if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->rx_free_thresh=%d, " "IAVF_RX_MAX_BURST=%d", rxq->rx_free_thresh, IAVF_RX_MAX_BURST); - ret = FALSE; + ret = false; } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->nb_rx_desc=%d, " "rxq->rx_free_thresh=%d", rxq->nb_rx_desc, rxq->rx_free_thresh); - ret = FALSE; + ret = false; } return ret; } @@ -390,12 +390,12 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->mz = mz; reset_rx_queue(rxq); - rxq->q_set = TRUE; + rxq->q_set = true; dev->data->rx_queues[queue_idx] = rxq; rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id); rxq->ops = &def_rxq_ops; - if (check_rx_bulk_allow(rxq) == TRUE) { + if (check_rx_bulk_allow(rxq) == true) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function will be " "used on port=%d, queue=%d.", @@ -408,7 +408,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, ad->rx_bulk_alloc_allowed = false; } - if (check_rx_vec_allow(rxq) == FALSE) + if (check_rx_vec_allow(rxq) == false) ad->rx_vec_allowed = false; return 0; @@ -500,12 +500,12 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->mz = mz; reset_tx_queue(txq); - txq->q_set = TRUE; + txq->q_set = true; dev->data->tx_queues[queue_idx] = txq; txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx); txq->ops = &def_txq_ops; - if (check_tx_vec_allow(txq) == FALSE) { + if (check_tx_vec_allow(txq) == false) { struct iavf_adapter *ad = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); ad->tx_vec_allowed = false; @@ -543,7 +543,7 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = iavf_switch_queue(adapter, rx_queue_id, TRUE, TRUE); + err = iavf_switch_queue(adapter, rx_queue_id, true, true); if (err) PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); @@ -575,7 +575,7 @@ iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) IAVF_WRITE_FLUSH(hw); /* Ready to switch the queue on */ - err = iavf_switch_queue(adapter, tx_queue_id, FALSE, TRUE); + err = iavf_switch_queue(adapter, tx_queue_id, false, true); if (err) PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", @@ -600,7 +600,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rx_queue_id >= dev->data->nb_rx_queues) return -EINVAL; - err = iavf_switch_queue(adapter, rx_queue_id, TRUE, FALSE); + err = iavf_switch_queue(adapter, rx_queue_id, true, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", rx_queue_id); @@ -628,7 +628,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (tx_queue_id >= dev->data->nb_tx_queues) return -EINVAL; - err = iavf_switch_queue(adapter, tx_queue_id, FALSE, FALSE); + err = iavf_switch_queue(adapter, tx_queue_id, false, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", tx_queue_id); @@ -1815,7 +1815,7 @@ iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->nb_desc = rxq->nb_rx_desc; qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; - qinfo->conf.rx_drop_en = TRUE; + qinfo->conf.rx_drop_en = true; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; } From patchwork Wed Jan 15 00:50:26 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wang, Haiyue" X-Patchwork-Id: 64685 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C4B59A04FD; Wed, 15 Jan 2020 01:57:44 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7C4E11C0DB; Wed, 15 Jan 2020 01:57:35 +0100 (CET) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 040681C0C9 for ; Wed, 15 Jan 2020 01:57:28 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 14 Jan 2020 16:57:28 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,320,1574150400"; d="scan'208";a="424844008" Received: from npg-dpdk-haiyue-1.sh.intel.com ([10.67.119.213]) by fmsmga006.fm.intel.com with ESMTP; 14 Jan 2020 16:57:27 -0800 From: Haiyue Wang To: dev@dpdk.org, xiaolong.ye@intel.com, qi.z.zhang@intel.com, qiming.yang@intel.com Cc: Haiyue Wang Date: Wed, 15 Jan 2020 08:50:26 +0800 Message-Id: <20200115005028.21026-3-haiyue.wang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200115005028.21026-1-haiyue.wang@intel.com> References: <20200115005028.21026-1-haiyue.wang@intel.com> Subject: [dpdk-dev] [PATCH v3 2/4] net/ice: unify the bool type value X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Replaces the redefined TRUE and FALSE values with standard ones to match the 'bool' type definition. Signed-off-by: Haiyue Wang Acked-by: Qiming Yang --- drivers/net/ice/ice_ethdev.c | 22 +++++++++++----------- drivers/net/ice/ice_rxtx.c | 16 ++++++++-------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index f99eb4e1b..8e9369e0a 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -1720,7 +1720,7 @@ ice_pf_setup(struct ice_pf *pf) uint16_t unused; /* Clear all stats counters */ - pf->offset_loaded = FALSE; + pf->offset_loaded = false; memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats)); memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats)); memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats)); @@ -2234,16 +2234,16 @@ ice_dev_init(struct rte_eth_dev *dev) vsi = pf->main_vsi; /* Disable double vlan by default */ - ice_vsi_config_double_vlan(vsi, FALSE); + ice_vsi_config_double_vlan(vsi, false); - ret = ice_aq_stop_lldp(hw, TRUE, FALSE, NULL); + ret = ice_aq_stop_lldp(hw, true, false, NULL); if (ret != ICE_SUCCESS) PMD_INIT_LOG(DEBUG, "lldp has already stopped\n"); - ret = ice_init_dcb(hw, TRUE); + ret = ice_init_dcb(hw, true); if (ret != ICE_SUCCESS) PMD_INIT_LOG(DEBUG, "Failed to init DCB\n"); /* Forward LLDP packets to default VSI */ - ret = ice_vsi_config_sw_lldp(vsi, TRUE); + ret = ice_vsi_config_sw_lldp(vsi, true); if (ret != ICE_SUCCESS) PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n"); /* register callback func to eal lib */ @@ -3449,23 +3449,23 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask) rxmode = &dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_FILTER_MASK) { if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) - ice_vsi_config_vlan_filter(vsi, TRUE); + ice_vsi_config_vlan_filter(vsi, true); else - ice_vsi_config_vlan_filter(vsi, FALSE); + ice_vsi_config_vlan_filter(vsi, false); } if (mask & ETH_VLAN_STRIP_MASK) { if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) - ice_vsi_config_vlan_stripping(vsi, TRUE); + ice_vsi_config_vlan_stripping(vsi, true); else - ice_vsi_config_vlan_stripping(vsi, FALSE); + ice_vsi_config_vlan_stripping(vsi, false); } if (mask & ETH_VLAN_EXTEND_MASK) { if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) - ice_vsi_config_double_vlan(vsi, TRUE); + ice_vsi_config_double_vlan(vsi, true); else - ice_vsi_config_double_vlan(vsi, FALSE); + ice_vsi_config_double_vlan(vsi, false); } return 0; diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index ce499af43..ad3cb9c46 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -424,7 +424,7 @@ ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* Init the RX tail register. */ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, true); if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); @@ -450,7 +450,7 @@ ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rx_queue_id < dev->data->nb_rx_queues) { rxq = dev->data->rx_queues[rx_queue_id]; - err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", rx_queue_id); @@ -630,7 +630,7 @@ ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* Init the RX tail register. */ ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, true); if (err) { PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on", rx_queue_id); @@ -816,7 +816,7 @@ ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxq = pf->fdir.rxq; - err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE); + err = ice_switch_rx_queue(hw, rxq->reg_idx, false); if (err) { PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off", rx_queue_id); @@ -973,7 +973,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, } ice_reset_rx_queue(rxq); - rxq->q_set = TRUE; + rxq->q_set = true; dev->data->rx_queues[queue_idx] = rxq; rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; @@ -1186,7 +1186,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, } ice_reset_tx_queue(txq); - txq->q_set = TRUE; + txq->q_set = true; dev->data->tx_queues[queue_idx] = txq; txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; ice_set_tx_function_flag(dev, txq); @@ -2043,7 +2043,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf) * don't need to allocate software ring and reset for the fdir * program queue just set the queue has been configured. */ - txq->q_set = TRUE; + txq->q_set = true; pf->fdir.txq = txq; txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; @@ -2104,7 +2104,7 @@ ice_fdir_setup_rx_resources(struct ice_pf *pf) * Don't need to allocate software ring and reset for the fdir * rx queue, just set the queue has been configured. */ - rxq->q_set = TRUE; + rxq->q_set = true; pf->fdir.rxq = rxq; rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; From patchwork Wed Jan 15 00:50:27 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wang, Haiyue" X-Patchwork-Id: 64686 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E4500A04FD; Wed, 15 Jan 2020 01:57:52 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 837EF1C116; Wed, 15 Jan 2020 01:57:37 +0100 (CET) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 218201C0C9 for ; Wed, 15 Jan 2020 01:57:29 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 14 Jan 2020 16:57:29 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,320,1574150400"; d="scan'208";a="424844018" Received: from npg-dpdk-haiyue-1.sh.intel.com ([10.67.119.213]) by fmsmga006.fm.intel.com with ESMTP; 14 Jan 2020 16:57:28 -0800 From: Haiyue Wang To: dev@dpdk.org, xiaolong.ye@intel.com, qi.z.zhang@intel.com, qiming.yang@intel.com Cc: Haiyue Wang Date: Wed, 15 Jan 2020 08:50:27 +0800 Message-Id: <20200115005028.21026-4-haiyue.wang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200115005028.21026-1-haiyue.wang@intel.com> References: <20200115005028.21026-1-haiyue.wang@intel.com> Subject: [dpdk-dev] [PATCH v3 3/4] common/iavf: osdep.h clean up X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Remove the unused definitions, rewrite the IO data read/write helpers, and put the common definitions related to RTE defines under the macro __INTEL_NET_BASE_OSDEP__, so it works like OS(RTE) dependency. Signed-off-by: Haiyue Wang --- drivers/common/iavf/iavf_osdep.h | 107 +++++++++++++++++-------------- 1 file changed, 59 insertions(+), 48 deletions(-) diff --git a/drivers/common/iavf/iavf_osdep.h b/drivers/common/iavf/iavf_osdep.h index 7d72863bc..0bf040d63 100644 --- a/drivers/common/iavf/iavf_osdep.h +++ b/drivers/common/iavf/iavf_osdep.h @@ -21,24 +21,20 @@ #include #include +#ifndef __INTEL_NET_BASE_OSDEP__ +#define __INTEL_NET_BASE_OSDEP__ + #define INLINE inline #define STATIC static typedef uint8_t u8; typedef int8_t s8; typedef uint16_t u16; +typedef int16_t s16; typedef uint32_t u32; typedef int32_t s32; typedef uint64_t u64; - -#define __iomem -#define hw_dbg(hw, S, A...) do {} while (0) -#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) -#define lower_32_bits(n) ((u32)(n)) - -#ifndef ETH_ADDR_LEN -#define ETH_ADDR_LEN 6 -#endif +typedef uint64_t s64; #ifndef __le16 #define __le16 uint16_t @@ -59,16 +55,11 @@ typedef uint64_t u64; #define __be64 uint64_t #endif -#define FALSE 0 -#define TRUE 1 -#define false 0 -#define true 1 +#define min(a, b) RTE_MIN(a, b) +#define max(a, b) RTE_MAX(a, b) -#define min(a,b) RTE_MIN(a,b) -#define max(a,b) RTE_MAX(a,b) - -#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) -#define ASSERT(x) if(!(x)) rte_panic("IAVF: x") +#define FIELD_SIZEOF(t, f) RTE_SIZEOF_FIELD(t, f) +#define ARRAY_SIZE(arr) RTE_DIM(arr) #define CPU_TO_LE16(o) rte_cpu_to_le_16(o) #define CPU_TO_LE32(s) rte_cpu_to_le_32(s) @@ -77,45 +68,65 @@ typedef uint64_t u64; #define LE32_TO_CPU(c) rte_le_to_cpu_32(c) #define LE64_TO_CPU(k) rte_le_to_cpu_64(k) -#define cpu_to_le16(o) rte_cpu_to_le_16(o) -#define cpu_to_le32(s) rte_cpu_to_le_32(s) -#define cpu_to_le64(h) rte_cpu_to_le_64(h) -#define le16_to_cpu(a) rte_le_to_cpu_16(a) -#define le32_to_cpu(c) rte_le_to_cpu_32(c) -#define le64_to_cpu(k) rte_le_to_cpu_64(k) +#define CPU_TO_BE16(o) rte_cpu_to_be_16(o) +#define CPU_TO_BE32(o) rte_cpu_to_be_32(o) +#define CPU_TO_BE64(o) rte_cpu_to_be_64(o) -#define iavf_memset(a, b, c, d) memset((a), (b), (c)) -#define iavf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c)) +#define NTOHS(a) rte_be_to_cpu_16(a) +#define NTOHL(a) rte_be_to_cpu_32(a) +#define HTONS(a) rte_cpu_to_be_16(a) +#define HTONL(a) rte_cpu_to_be_32(a) -#define iavf_usec_delay(x) rte_delay_us_sleep(x) -#define iavf_msec_delay(x) iavf_usec_delay(1000 * (x)) +static __rte_always_inline uint32_t +readl(volatile void *addr) +{ + return rte_le_to_cpu_32(rte_read32(addr)); +} -#define IAVF_PCI_REG(reg) rte_read32(reg) -#define IAVF_PCI_REG_ADDR(a, reg) \ - ((volatile uint32_t *)((char *)(a)->hw_addr + (reg))) +static __rte_always_inline void +writel(uint32_t value, volatile void *addr) +{ + rte_write32(rte_cpu_to_le_32(value), addr); +} -#define IAVF_PCI_REG_WRITE(reg, value) \ - rte_write32((rte_cpu_to_le_32(value)), reg) -#define IAVF_PCI_REG_WRITE_RELAXED(reg, value) \ - rte_write32_relaxed((rte_cpu_to_le_32(value)), reg) -static inline -uint32_t iavf_read_addr(volatile void *addr) +static __rte_always_inline void +writel_relaxed(uint32_t value, volatile void *addr) { - return rte_le_to_cpu_32(IAVF_PCI_REG(addr)); + rte_write32_relaxed(rte_cpu_to_le_32(value), addr); } -#define IAVF_READ_REG(hw, reg) \ - iavf_read_addr(IAVF_PCI_REG_ADDR((hw), (reg))) -#define IAVF_WRITE_REG(hw, reg, value) \ - IAVF_PCI_REG_WRITE(IAVF_PCI_REG_ADDR((hw), (reg)), (value)) -#define IAVF_WRITE_FLUSH(a) \ - IAVF_READ_REG(a, IAVF_VFGEN_RSTAT) +static __rte_always_inline uint64_t +readq(volatile void *addr) +{ + return rte_le_to_cpu_64(rte_read64(addr)); +} + +static __rte_always_inline void +writeq(uint64_t value, volatile void *addr) +{ + rte_write64(rte_cpu_to_le_64(value), addr); +} + +#define wr32(a, reg, value) writel((value), (a)->hw_addr + (reg)) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) +#define wr64(a, reg, value) writeq((value), (a)->hw_addr + (reg)) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) + +#endif /* __INTEL_NET_BASE_OSDEP__ */ + +#define iavf_memset(a, b, c, d) memset((a), (b), (c)) +#define iavf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c)) + +#define iavf_usec_delay(x) rte_delay_us_sleep(x) +#define iavf_msec_delay(x) iavf_usec_delay(1000 * (x)) + +#define IAVF_PCI_REG_WRITE(reg, value) writel(value, reg) +#define IAVF_PCI_REG_WRITE_RELAXED(reg, value) writel_relaxed(value, reg) -#define rd32(a, reg) iavf_read_addr(IAVF_PCI_REG_ADDR((a), (reg))) -#define wr32(a, reg, value) \ - IAVF_PCI_REG_WRITE(IAVF_PCI_REG_ADDR((a), (reg)), (value)) +#define IAVF_READ_REG(hw, reg) rd32(hw, reg) +#define IAVF_WRITE_REG(hw, reg, value) wr32(hw, reg, value) -#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0])) +#define IAVF_WRITE_FLUSH(a) IAVF_READ_REG(a, IAVF_VFGEN_RSTAT) extern int iavf_common_logger; From patchwork Wed Jan 15 00:50:28 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Wang, Haiyue" X-Patchwork-Id: 64687 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B44A5A04FD; Wed, 15 Jan 2020 01:58:00 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2A20D1C123; Wed, 15 Jan 2020 01:57:39 +0100 (CET) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 4884B1C0D5 for ; Wed, 15 Jan 2020 01:57:31 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 14 Jan 2020 16:57:30 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,320,1574150400"; d="scan'208";a="424844024" Received: from npg-dpdk-haiyue-1.sh.intel.com ([10.67.119.213]) by fmsmga006.fm.intel.com with ESMTP; 14 Jan 2020 16:57:29 -0800 From: Haiyue Wang To: dev@dpdk.org, xiaolong.ye@intel.com, qi.z.zhang@intel.com, qiming.yang@intel.com Cc: Haiyue Wang Date: Wed, 15 Jan 2020 08:50:28 +0800 Message-Id: <20200115005028.21026-5-haiyue.wang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200115005028.21026-1-haiyue.wang@intel.com> References: <20200115005028.21026-1-haiyue.wang@intel.com> Subject: [dpdk-dev] [PATCH v3 4/4] net/ice/base: osdep.h clean up X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Remove the unused definitions, rewrite the IO data read/write helpers, and put the common definitions related to RTE defines under the macro __INTEL_NET_BASE_OSDEP__, so it works like OS(RTE) dependency. Signed-off-by: Haiyue Wang --- drivers/net/ice/base/ice_osdep.h | 132 ++++++++++++++++--------------- 1 file changed, 67 insertions(+), 65 deletions(-) diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h index 27c1830c5..45b9f3617 100644 --- a/drivers/net/ice/base/ice_osdep.h +++ b/drivers/net/ice/base/ice_osdep.h @@ -26,6 +26,9 @@ #include "../ice_logs.h" +#ifndef __INTEL_NET_BASE_OSDEP__ +#define __INTEL_NET_BASE_OSDEP__ + #define INLINE inline #define STATIC static @@ -38,17 +41,6 @@ typedef int32_t s32; typedef uint64_t u64; typedef uint64_t s64; -#define __iomem -#define hw_dbg(hw, S, A...) do {} while (0) -#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) -#define lower_32_bits(n) ((u32)(n)) -#define low_16_bits(x) ((x) & 0xFFFF) -#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16) - -#ifndef ETH_ADDR_LEN -#define ETH_ADDR_LEN 6 -#endif - #ifndef __le16 #define __le16 uint16_t #endif @@ -68,6 +60,65 @@ typedef uint64_t s64; #define __be64 uint64_t #endif +#define min(a, b) RTE_MIN(a, b) +#define max(a, b) RTE_MAX(a, b) + +#define FIELD_SIZEOF(t, f) RTE_SIZEOF_FIELD(t, f) +#define ARRAY_SIZE(arr) RTE_DIM(arr) + +#define CPU_TO_LE16(o) rte_cpu_to_le_16(o) +#define CPU_TO_LE32(s) rte_cpu_to_le_32(s) +#define CPU_TO_LE64(h) rte_cpu_to_le_64(h) +#define LE16_TO_CPU(a) rte_le_to_cpu_16(a) +#define LE32_TO_CPU(c) rte_le_to_cpu_32(c) +#define LE64_TO_CPU(k) rte_le_to_cpu_64(k) + +#define CPU_TO_BE16(o) rte_cpu_to_be_16(o) +#define CPU_TO_BE32(o) rte_cpu_to_be_32(o) +#define CPU_TO_BE64(o) rte_cpu_to_be_64(o) + +#define NTOHS(a) rte_be_to_cpu_16(a) +#define NTOHL(a) rte_be_to_cpu_32(a) +#define HTONS(a) rte_cpu_to_be_16(a) +#define HTONL(a) rte_cpu_to_be_32(a) + +static __rte_always_inline uint32_t +readl(volatile void *addr) +{ + return rte_le_to_cpu_32(rte_read32(addr)); +} + +static __rte_always_inline void +writel(uint32_t value, volatile void *addr) +{ + rte_write32(rte_cpu_to_le_32(value), addr); +} + +static __rte_always_inline void +writel_relaxed(uint32_t value, volatile void *addr) +{ + rte_write32_relaxed(rte_cpu_to_le_32(value), addr); +} + +static __rte_always_inline uint64_t +readq(volatile void *addr) +{ + return rte_le_to_cpu_64(rte_read64(addr)); +} + +static __rte_always_inline void +writeq(uint64_t value, volatile void *addr) +{ + rte_write64(rte_cpu_to_le_64(value), addr); +} + +#define wr32(a, reg, value) writel((value), (a)->hw_addr + (reg)) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) +#define wr64(a, reg, value) writeq((value), (a)->hw_addr + (reg)) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) + +#endif /* __INTEL_NET_BASE_OSDEP__ */ + #ifndef __always_unused #define __always_unused __attribute__((unused)) #endif @@ -82,21 +133,8 @@ typedef uint64_t s64; #define BIT_ULL(a) (1ULL << (a)) #endif -#define FALSE 0 -#define TRUE 1 -#define false 0 -#define true 1 - -#define min(a, b) RTE_MIN(a, b) -#define max(a, b) RTE_MAX(a, b) - -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) -#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f)) #define MAKEMASK(m, s) ((m) << (s)) -#define DEBUGOUT(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A) -#define DEBUGFUNC(F) PMD_DRV_LOG_RAW(DEBUG, F) - #define ice_debug(h, m, s, ...) \ do { \ if (((m) & (h)->debug_mask)) \ @@ -123,37 +161,16 @@ do { \ #define SNPRINTF ice_snprintf #endif -#define ICE_PCI_REG(reg) rte_read32(reg) -#define ICE_PCI_REG_ADDR(a, reg) \ - ((volatile uint32_t *)((char *)(a)->hw_addr + (reg))) -#define ICE_PCI_REG64(reg) rte_read64(reg) -#define ICE_PCI_REG_ADDR64(a, reg) \ - ((volatile uint64_t *)((char *)(a)->hw_addr + (reg))) -static inline uint32_t ice_read_addr(volatile void *addr) -{ - return rte_le_to_cpu_32(ICE_PCI_REG(addr)); -} - -static inline uint64_t ice_read_addr64(volatile void *addr) -{ - return rte_le_to_cpu_64(ICE_PCI_REG64(addr)); -} +#define ICE_PCI_REG_WRITE(reg, value) writel(value, reg) -#define ICE_PCI_REG_WRITE(reg, value) \ - rte_write32((rte_cpu_to_le_32(value)), reg) +#define ICE_READ_REG(hw, reg) rd32(hw, reg) +#define ICE_WRITE_REG(hw, reg, value) wr32(hw, reg, value) #define ice_flush(a) ICE_READ_REG((a), GLGEN_STAT) #define icevf_flush(a) ICE_READ_REG((a), VFGEN_RSTAT) -#define ICE_READ_REG(hw, reg) ice_read_addr(ICE_PCI_REG_ADDR((hw), (reg))) -#define ICE_WRITE_REG(hw, reg, value) \ - ICE_PCI_REG_WRITE(ICE_PCI_REG_ADDR((hw), (reg)), (value)) - -#define rd32(a, reg) ice_read_addr(ICE_PCI_REG_ADDR((a), (reg))) -#define wr32(a, reg, value) \ - ICE_PCI_REG_WRITE(ICE_PCI_REG_ADDR((a), (reg)), (value)) -#define flush(a) ice_read_addr(ICE_PCI_REG_ADDR((a), (GLGEN_STAT))) + +#define flush(a) ICE_READ_REG((a), GLGEN_STAT) #define div64_long(n, d) ((n) / (d)) -#define rd64(a, reg) ice_read_addr64(ICE_PCI_REG_ADDR64((a), (reg))) #define BITS_PER_BYTE 8 @@ -178,21 +195,6 @@ struct ice_virt_mem { #define ice_memcpy(a, b, c, d) rte_memcpy((a), (b), (c)) #define ice_memdup(a, b, c, d) rte_memcpy(ice_malloc(a, c), b, c) -#define CPU_TO_BE16(o) rte_cpu_to_be_16(o) -#define CPU_TO_BE32(o) rte_cpu_to_be_32(o) -#define CPU_TO_BE64(o) rte_cpu_to_be_64(o) -#define CPU_TO_LE16(o) rte_cpu_to_le_16(o) -#define CPU_TO_LE32(s) rte_cpu_to_le_32(s) -#define CPU_TO_LE64(h) rte_cpu_to_le_64(h) -#define LE16_TO_CPU(a) rte_le_to_cpu_16(a) -#define LE32_TO_CPU(c) rte_le_to_cpu_32(c) -#define LE64_TO_CPU(k) rte_le_to_cpu_64(k) - -#define NTOHS(a) rte_be_to_cpu_16(a) -#define NTOHL(a) rte_be_to_cpu_32(a) -#define HTONS(a) rte_cpu_to_be_16(a) -#define HTONL(a) rte_cpu_to_be_32(a) - /* SW spinlock */ struct ice_lock { rte_spinlock_t spinlock;