When a tbl8 group is getting attached to a tbl24 entry, lookup
might fail even though the entry is configured in the table.
For ex: consider a LPM table configured with 10.10.10.1/24.
When a new entry 10.10.10.32/28 is being added, a new tbl8
group is allocated and tbl24 entry is changed to point to
the tbl8 group. If the tbl24 entry is written without the tbl8
group entries updated, a lookup on 10.10.10.9 will return
failure.
Correct memory orderings are required to ensure that the
store to tbl24 does not happen before the stores to tbl8 group
entries complete.
Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
---
lib/librte_lpm/rte_lpm.c | 31 ++++++++++++++++++++++++-------
1 file changed, 24 insertions(+), 7 deletions(-)
@@ -737,7 +737,8 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
/* Setting tbl24 entry in one go to avoid race
* conditions
*/
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
continue;
}
@@ -892,7 +893,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
@@ -938,7 +940,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else { /*
* If it is valid, extended entry calculate the index into tbl8.
@@ -1320,7 +1323,14 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i].valid = INVALID;
+ struct rte_lpm_tbl_entry_v20 zero_tbl_entry = {
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = 0,
+ };
+ zero_tbl_entry.next_hop = 0;
+ __atomic_store(&lpm->tbl24[i], &zero_tbl_entry,
+ __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
@@ -1365,7 +1375,8 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
@@ -1647,8 +1658,11 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
if (tbl8_recycle_index == -EINVAL) {
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index].valid = 0;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v20(lpm->tbl8, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
@@ -1659,8 +1673,11 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
.depth = lpm->tbl8[tbl8_recycle_index].depth,
};
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v20(lpm->tbl8, tbl8_group_start);
}