@@ -654,11 +654,19 @@ tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = VALID,
+ };
+ new_tbl8_entry.next_hop = 0;
+
memset(&tbl8_entry[0], 0,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
- tbl8_entry->valid_group = VALID;
+ __atomic_store(tbl8_entry, &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Return group index for allocated tbl8 group. */
return group_idx;
@@ -680,11 +688,19 @@ tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+ .next_hop = 0,
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = VALID,
+ };
+
memset(&tbl8_entry[0], 0,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
- tbl8_entry->valid_group = VALID;
+ __atomic_store(tbl8_entry, &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Return group index for allocated tbl8 group. */
return group_idx;
@@ -699,14 +715,25 @@ static void
tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
- tbl8[tbl8_group_start].valid_group = INVALID;
+ struct rte_lpm_tbl_entry_v20 zero_tbl8_entry = {
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = INVALID,
+ };
+ zero_tbl8_entry.next_hop = 0;
+
+ __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __ATOMIC_RELAXED);
}
static void
tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
- tbl8[tbl8_group_start].valid_group = INVALID;
+ struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
+
+ __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __ATOMIC_RELAXED);
}
static __rte_noinline int32_t
@@ -767,7 +794,9 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
* Setting tbl8 entry in one go to avoid
* race conditions
*/
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
continue;
}
@@ -837,7 +866,9 @@ add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
* Setting tbl8 entry in one go to avoid
* race conditions
*/
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
continue;
}
@@ -965,7 +996,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
* Setting tbl8 entry in one go to avoid race
* condition
*/
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
continue;
}
@@ -1100,7 +1132,8 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
* Setting tbl8 entry in one go to avoid race
* condition
*/
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
continue;
}
@@ -1393,7 +1426,9 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
if (lpm->tbl8[j].depth <= depth)
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
}
}
@@ -1490,7 +1525,9 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
if (lpm->tbl8[j].depth <= depth)
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
}
}
@@ -1646,7 +1683,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
if (lpm->tbl8[i].depth <= depth)
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
}
@@ -1677,7 +1715,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
/* Set tbl24 before freeing tbl8 to avoid race condition.
* Prevent the free of the tbl8 group from hoisting.
*/
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v20(lpm->tbl8, tbl8_group_start);
}
@@ -1730,7 +1769,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
if (lpm->tbl8[i].depth <= depth)
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
}
@@ -1761,7 +1801,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
/* Set tbl24 before freeing tbl8 to avoid race condition.
* Prevent the free of the tbl8 group from hoisting.
*/
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
}