From patchwork Fri Jul 12 03:09:21 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ruifeng Wang X-Patchwork-Id: 56363 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5B8131B9A0; Fri, 12 Jul 2019 05:10:22 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by dpdk.org (Postfix) with ESMTP id 7CC471B998 for ; Fri, 12 Jul 2019 05:10:20 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 05FB12B; Thu, 11 Jul 2019 20:10:20 -0700 (PDT) Received: from net-arm-c2400-02.shanghai.arm.com (net-arm-c2400-02.shanghai.arm.com [10.169.40.42]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 6D7293F246; Thu, 11 Jul 2019 20:10:18 -0700 (PDT) From: Ruifeng Wang To: vladimir.medvedkin@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, honnappa.nagarahalli@arm.com, gavin.hu@arm.com, nd@arm.com, Ruifeng Wang Date: Fri, 12 Jul 2019 11:09:21 +0800 Message-Id: <20190712030923.37832-5-ruifeng.wang@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190712030923.37832-1-ruifeng.wang@arm.com> References: <20190605055451.30473-1-ruifeng.wang@arm.com> <20190712030923.37832-1-ruifeng.wang@arm.com> Subject: [dpdk-dev] [PATCH v5 4/6] lib/lpm: use atomic store to avoid partial update X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Compiler could generate non-atomic stores for whole table entry updating. This may cause incorrect nexthop to be returned, if the byte with valid flag is updated prior to the byte with nexthop is updated. Changed to use atomic store to update whole table entry. Suggested-by: Medvedkin Vladimir Signed-off-by: Ruifeng Wang Reviewed-by: Gavin Hu --- lib/librte_lpm/rte_lpm.c | 69 ++++++++++++++++++++++++++++++++-------- 1 file changed, 55 insertions(+), 14 deletions(-) diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index 0a94630db..d35d64448 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -654,11 +654,19 @@ tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8) tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; /* If a free tbl8 group is found clean it and set as VALID. */ if (!tbl8_entry->valid_group) { + struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { + .valid = INVALID, + .depth = 0, + .valid_group = VALID, + }; + new_tbl8_entry.next_hop = 0; + memset(&tbl8_entry[0], 0, RTE_LPM_TBL8_GROUP_NUM_ENTRIES * sizeof(tbl8_entry[0])); - tbl8_entry->valid_group = VALID; + __atomic_store(tbl8_entry, &new_tbl8_entry, + __ATOMIC_RELAXED); /* Return group index for allocated tbl8 group. */ return group_idx; @@ -680,11 +688,19 @@ tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; /* If a free tbl8 group is found clean it and set as VALID. */ if (!tbl8_entry->valid_group) { + struct rte_lpm_tbl_entry new_tbl8_entry = { + .next_hop = 0, + .valid = INVALID, + .depth = 0, + .valid_group = VALID, + }; + memset(&tbl8_entry[0], 0, RTE_LPM_TBL8_GROUP_NUM_ENTRIES * sizeof(tbl8_entry[0])); - tbl8_entry->valid_group = VALID; + __atomic_store(tbl8_entry, &new_tbl8_entry, + __ATOMIC_RELAXED); /* Return group index for allocated tbl8 group. */ return group_idx; @@ -699,14 +715,25 @@ static void tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start) { /* Set tbl8 group invalid*/ - tbl8[tbl8_group_start].valid_group = INVALID; + struct rte_lpm_tbl_entry_v20 zero_tbl8_entry = { + .valid = INVALID, + .depth = 0, + .valid_group = INVALID, + }; + zero_tbl8_entry.next_hop = 0; + + __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry, + __ATOMIC_RELAXED); } static void tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { /* Set tbl8 group invalid*/ - tbl8[tbl8_group_start].valid_group = INVALID; + struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; + + __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry, + __ATOMIC_RELAXED); } static __rte_noinline int32_t @@ -767,7 +794,9 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, * Setting tbl8 entry in one go to avoid * race conditions */ - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } @@ -837,7 +866,9 @@ add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, * Setting tbl8 entry in one go to avoid * race conditions */ - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } @@ -965,7 +996,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, * Setting tbl8 entry in one go to avoid race * condition */ - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } @@ -1100,7 +1132,8 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * Setting tbl8 entry in one go to avoid race * condition */ - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } @@ -1393,7 +1426,9 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { if (lpm->tbl8[j].depth <= depth) - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); } } } @@ -1490,7 +1525,9 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { if (lpm->tbl8[j].depth <= depth) - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); } } } @@ -1646,7 +1683,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, */ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { if (lpm->tbl8[i].depth <= depth) - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } } @@ -1677,7 +1715,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, /* Set tbl24 before freeing tbl8 to avoid race condition. * Prevent the free of the tbl8 group from hoisting. */ - lpm->tbl24[tbl24_index] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELAXED); __atomic_thread_fence(__ATOMIC_RELEASE); tbl8_free_v20(lpm->tbl8, tbl8_group_start); } @@ -1730,7 +1769,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, */ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { if (lpm->tbl8[i].depth <= depth) - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } } @@ -1761,7 +1801,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, /* Set tbl24 before freeing tbl8 to avoid race condition. * Prevent the free of the tbl8 group from hoisting. */ - lpm->tbl24[tbl24_index] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELAXED); __atomic_thread_fence(__ATOMIC_RELEASE); tbl8_free_v1604(lpm->tbl8, tbl8_group_start); }