summaryrefslogtreecommitdiff
path: root/mm/sparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index e06f514fe04f..a2183cb5d524 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -83,6 +83,8 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
return -EEXIST;
section = sparse_index_alloc(nid);
+ if (!section)
+ return -ENOMEM;
/*
* This lock keeps two different sections from
* reallocating for the same index
@@ -389,9 +391,17 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
* no locking for this, because it does its own
* plus, it does a kmalloc
*/
- sparse_index_init(section_nr, pgdat->node_id);
+ ret = sparse_index_init(section_nr, pgdat->node_id);
+ if (ret < 0 && ret != -EEXIST)
+ return ret;
memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
+ if (!memmap)
+ return -ENOMEM;
usemap = __kmalloc_section_usemap();
+ if (!usemap) {
+ __kfree_section_memmap(memmap, nr_pages);
+ return -ENOMEM;
+ }
pgdat_resize_lock(pgdat, &flags);
@@ -401,18 +411,16 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
goto out;
}
- if (!usemap) {
- ret = -ENOMEM;
- goto out;
- }
ms->section_mem_map |= SECTION_MARKED_PRESENT;
ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
out:
pgdat_resize_unlock(pgdat, &flags);
- if (ret <= 0)
+ if (ret <= 0) {
+ kfree(usemap);
__kfree_section_memmap(memmap, nr_pages);
+ }
return ret;
}
#endif