summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/numa.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index cf81049e1e51..213664c9cdca 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -840,8 +840,16 @@ static void __init *careful_allocation(int nid, unsigned long size,
size, nid);
/*
- * If the memory came from a previously allocated node, we must
- * retry with the bootmem allocator.
+ * We initialize the nodes in numeric order: 0, 1, 2...
+ * and hand over control from the LMB allocator to the
+ * bootmem allocator. If this function is called for
+ * node 5, then we know that all nodes <5 are using the
+ * bootmem allocator instead of the LMB allocator.
+ *
+ * So, check the nid from which this allocation came
+ * and double check to see if we need to use bootmem
+ * instead of the LMB. We don't free the LMB memory
+ * since it would be useless.
*/
new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
if (new_nid < nid) {