@@ -126,6 +126,9 @@ eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
/* skip first iteration */
ms = rte_fbarray_get(&msl->memseg_arr, start_seg);
+ if (ms == NULL)
+ return false;
+
cur = ms->iova;
expected = cur + pgsz;
@@ -137,7 +140,7 @@ eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
cur_seg++, expected += pgsz) {
ms = rte_fbarray_get(&msl->memseg_arr, cur_seg);
- if (ms->iova != expected)
+ if ((ms != NULL) && (ms->iova != expected))
return false;
}
}
@@ -63,6 +63,8 @@ malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
cur_page = RTE_PTR_ALIGN_FLOOR(contig_seg_start, page_sz);
ms = rte_mem_virt2memseg(cur_page, elem->msl);
+ if (ms == NULL)
+ return 0;
/* do first iteration outside the loop */
page_end = RTE_PTR_ADD(cur_page, page_sz);
@@ -91,9 +93,12 @@ malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
* we're not blowing past data end.
*/
ms = rte_mem_virt2memseg(contig_seg_start, elem->msl);
- cur_page = ms->addr;
- /* don't trigger another recalculation */
- expected_iova = ms->iova;
+ if (ms != NULL) {
+ cur_page = ms->addr;
+
+ /* don't trigger another recalculation */
+ expected_iova = ms->iova;
+ }
continue;
}
/* cur_seg_end ends on a page boundary or on data end. if we're
@@ -430,6 +435,9 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
{
struct malloc_elem *new_elem = elem_start_pt(elem, size, align, bound,
contig);
+ if (new_elem == NULL)
+ return NULL;
+
const size_t old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
const size_t trailer_size = elem->size - old_elem_size - size -
MALLOC_ELEM_OVERHEAD;
@@ -97,6 +97,8 @@ malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
malloc_elem_insert(elem);
elem = malloc_elem_join_adjacent_free(elem);
+ if (elem == NULL)
+ return NULL;
malloc_elem_free_list_insert(elem);
@@ -321,6 +323,8 @@ alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
map_addr = ms[0]->addr;
msl = rte_mem_virt2memseg_list(map_addr);
+ if (msl == NULL)
+ return NULL;
/* check if we wanted contiguous memory but didn't get it */
if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) {
@@ -897,6 +901,9 @@ malloc_heap_free(struct malloc_elem *elem)
/* anything after this is a bonus */
ret = 0;
+ if (elem == NULL)
+ goto free_unlock;
+
/* ...of which we can't avail if we are in legacy mode, or if this is an
* externally allocated segment.
*/
@@ -935,7 +942,7 @@ malloc_heap_free(struct malloc_elem *elem)
const struct rte_memseg *tmp =
rte_mem_virt2memseg(aligned_start, msl);
- if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
+ if ((tmp != NULL) && (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE)) {
/* this is an unfreeable segment, so move start */
aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len);
}