From patchwork Sat Aug 5 08:36:26 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Dongdong Liu X-Patchwork-Id: 129919 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9F03842FDE; Sat, 5 Aug 2023 10:40:31 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CB3B64326F; Sat, 5 Aug 2023 10:40:05 +0200 (CEST) Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) by mails.dpdk.org (Postfix) with ESMTP id D57FD40A81; Sat, 5 Aug 2023 10:39:58 +0200 (CEST) Received: from kwepemi500017.china.huawei.com (unknown [172.30.72.53]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4RHwwV3t5WzrSCT; Sat, 5 Aug 2023 16:38:50 +0800 (CST) Received: from localhost.localdomain (10.28.79.22) by kwepemi500017.china.huawei.com (7.221.188.110) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2507.27; Sat, 5 Aug 2023 16:39:56 +0800 From: Dongdong Liu To: , , , CC: Subject: [PATCH 4/5] net/hns3: fix TM thread safety risk Date: Sat, 5 Aug 2023 16:36:26 +0800 Message-ID: <20230805083627.8681-5-liudongdong3@huawei.com> X-Mailer: git-send-email 2.22.0 In-Reply-To: <20230805083627.8681-1-liudongdong3@huawei.com> References: <20230805083627.8681-1-liudongdong3@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.28.79.22] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To kwepemi500017.china.huawei.com (7.221.188.110) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Chengwen Feng The driver-related TM (traffic management) info is implemented through the linked list. The following threads are involved in the read and write of the TM info: 1. main thread: invokes the rte_tm_xxx() API family to modify or read. 2. interrupt thread: will read TM info in reset recover process. 3. telemetry/proc-info thread: invoke rte_eth_dev_priv_dump() API to read TM info. Currently, thread safety protection of TM info is implemented only in the following operations: 1. some of the rte_tm_xxx() API's implementation. 2. reset recover process. Thread safety risks may exist in other scenarios, so fix by: 1. make sure all the rte_tm_xxx() API's implementations protected by hw.lock. 2. make sure rte_eth_dev_priv_dump() API's implementation protected by hw.lock. Fixes: c09c7847d892 ("net/hns3: support traffic management") Fixes: e4cfe6bb9114 ("net/hns3: dump TM configuration info") Cc: stable@dpdk.org Signed-off-by: Chengwen Feng Signed-off-by: Dongdong Liu --- drivers/net/hns3/hns3_dump.c | 8 +- drivers/net/hns3/hns3_tm.c | 173 ++++++++++++++++++++++++++++++----- 2 files changed, 157 insertions(+), 24 deletions(-) diff --git a/drivers/net/hns3/hns3_dump.c b/drivers/net/hns3/hns3_dump.c index c0839380ea..67b45e6dc3 100644 --- a/drivers/net/hns3/hns3_dump.c +++ b/drivers/net/hns3/hns3_dump.c @@ -918,6 +918,8 @@ hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file) struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + rte_spinlock_lock(&hw->lock); + hns3_get_device_basic_info(file, dev); hns3_get_dev_feature_capability(file, hw); hns3_get_rxtx_queue_info(file, dev); @@ -927,8 +929,10 @@ hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file) * VF only supports dumping basic info, feature capability and queue * info. */ - if (hns->is_vf) + if (hns->is_vf) { + rte_spinlock_unlock(&hw->lock); return 0; + } hns3_get_dev_mac_info(file, hns); hns3_get_vlan_config_info(file, hw); @@ -936,6 +940,8 @@ hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file) hns3_get_tm_conf_info(file, dev); hns3_get_flow_ctrl_info(file, dev); + rte_spinlock_unlock(&hw->lock); + return 0; } diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c index e1089b6bd0..67402a700f 100644 --- a/drivers/net/hns3/hns3_tm.c +++ b/drivers/net/hns3/hns3_tm.c @@ -1081,21 +1081,6 @@ hns3_tm_hierarchy_commit(struct rte_eth_dev *dev, return -EINVAL; } -static int -hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev, - int clear_on_fail, - struct rte_tm_error *error) -{ - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; - - rte_spinlock_lock(&hw->lock); - ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error); - rte_spinlock_unlock(&hw->lock); - - return ret; -} - static int hns3_tm_node_shaper_do_update(struct hns3_hw *hw, uint32_t node_id, @@ -1195,6 +1180,148 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev, return 0; } +static int +hns3_tm_capabilities_get_wrap(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_capabilities_get(dev, cap, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_shaper_profile_add_wrap(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_shaper_profile_add(dev, shaper_profile_id, profile, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_shaper_profile_del_wrap(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_shaper_profile_del(dev, shaper_profile_id, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_node_add_wrap(struct rte_eth_dev *dev, uint32_t node_id, + uint32_t parent_node_id, uint32_t priority, + uint32_t weight, uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_node_add(dev, node_id, parent_node_id, priority, + weight, level_id, params, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_node_delete_wrap(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_node_delete(dev, node_id, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_node_type_get_wrap(struct rte_eth_dev *dev, + uint32_t node_id, + int *is_leaf, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_node_type_get(dev, node_id, is_leaf, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_level_capabilities_get_wrap(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_level_capabilities_get(dev, level_id, cap, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_node_capabilities_get_wrap(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_node_capabilities_get(dev, node_id, cap, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +static int +hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + rte_spinlock_lock(&hw->lock); + ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error); + rte_spinlock_unlock(&hw->lock); + + return ret; +} + static int hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev, uint32_t node_id, @@ -1213,14 +1340,14 @@ hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev, } static const struct rte_tm_ops hns3_tm_ops = { - .capabilities_get = hns3_tm_capabilities_get, - .shaper_profile_add = hns3_tm_shaper_profile_add, - .shaper_profile_delete = hns3_tm_shaper_profile_del, - .node_add = hns3_tm_node_add, - .node_delete = hns3_tm_node_delete, - .node_type_get = hns3_tm_node_type_get, - .level_capabilities_get = hns3_tm_level_capabilities_get, - .node_capabilities_get = hns3_tm_node_capabilities_get, + .capabilities_get = hns3_tm_capabilities_get_wrap, + .shaper_profile_add = hns3_tm_shaper_profile_add_wrap, + .shaper_profile_delete = hns3_tm_shaper_profile_del_wrap, + .node_add = hns3_tm_node_add_wrap, + .node_delete = hns3_tm_node_delete_wrap, + .node_type_get = hns3_tm_node_type_get_wrap, + .level_capabilities_get = hns3_tm_level_capabilities_get_wrap, + .node_capabilities_get = hns3_tm_node_capabilities_get_wrap, .hierarchy_commit = hns3_tm_hierarchy_commit_wrap, .node_shaper_update = hns3_tm_node_shaper_update_wrap, };