summaryrefslogtreecommitdiff
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2008-01-30 13:33:17 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:33:17 +0100
commit751752789162fde69474edfa15935d0a77c0bc17 (patch)
tree43eef77784989bc25979da1cc128e31fc46b3cea /arch/x86/mm/init_64.c
parentedcd81199dbad5db11ae91b507cec1d46dd94a49 (diff)
x86: replace hard coded reservations in 64-bit early boot code with dynamic table
On x86-64 there are several memory allocations before bootmem. To avoid them stomping on each other they used to be all hard coded in bad_area(). Replace this with an array that is filled as needed. This cleans up the code considerably and allows to expand its use. Cc: peterz@infradead.org Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 05f12c527b02..8198840c3dcb 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -176,7 +176,8 @@ __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
set_pte_phys(address, phys, prot);
}
-unsigned long __meminitdata table_start, table_end;
+static unsigned long __initdata table_start;
+static unsigned long __meminitdata table_end;
static __meminit void *alloc_low_page(unsigned long *phys)
{
@@ -387,6 +388,8 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
if (!after_bootmem)
mmu_cr4_features = read_cr4();
__flush_tlb_all();
+
+ reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
}
#ifndef CONFIG_NUMA