summaryrefslogtreecommitdiff
path: root/arch/x86_64
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-04-07 19:49:15 +0200
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-09 11:53:16 -0700
commit9d99aaa31f5994d1923c3713ce9144c4c42332e1 (patch)
treeae608593ca196dd6493cccbdfc1b8dd098e91ee8 /arch/x86_64
parent805e8c03c9ea9bdb402a36341e02ec24825d5417 (diff)
[PATCH] x86_64: Support memory hotadd without sparsemem
Memory hotadd doesn't need SPARSEMEM, but can be handled by just preallocating mem_maps. This only needs some untangling of ifdefs to enable the necessary code even without SPARSEMEM. Originally from Keith Mannthey, hacked by AK. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/mm/init.c36
2 files changed, 35 insertions, 3 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 293cd71a266a..db57d3ff04e3 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -80,7 +80,7 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
return 0;
}
-int __init e820_mapped(unsigned long start, unsigned long end, unsigned type)
+int __meminit e820_mapped(unsigned long start, unsigned long end, unsigned type)
{
int i;
for (i = 0; i < e820.nr_map; i++) {
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index e5f7f1c34462..492161168402 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -507,9 +507,8 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
/*
* Memory hotplug specific functions
- * These are only for non-NUMA machines right now.
*/
-#ifdef CONFIG_MEMORY_HOTPLUG
+#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
void online_page(struct page *page)
{
@@ -520,6 +519,39 @@ void online_page(struct page *page)
num_physpages++;
}
+#ifndef CONFIG_MEMORY_HOTPLUG
+/*
+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
+ * just online the pages.
+ */
+int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+{
+ int err = -EIO;
+ unsigned long pfn;
+ unsigned long total = 0, mem = 0;
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
+ unsigned long addr = pfn << PAGE_SHIFT;
+ if (pfn_valid(pfn) && e820_mapped(addr, addr+1, E820_RAM)) {
+ online_page(pfn_to_page(pfn));
+ err = 0;
+ mem++;
+ }
+ total++;
+ }
+ if (!err) {
+ z->spanned_pages += total;
+ z->present_pages += mem;
+ z->zone_pgdat->node_spanned_pages += total;
+ z->zone_pgdat->node_present_pages += mem;
+ }
+ return err;
+}
+#endif
+
+/*
+ * Memory is added always to NORMAL zone. This means you will never get
+ * additional DMA/DMA32 memory.
+ */
int add_memory(u64 start, u64 size)
{
struct pglist_data *pgdat = NODE_DATA(0);