summaryrefslogtreecommitdiff
path: root/arch/x86_64/mm/srat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/mm/srat.c')
-rw-r--r--arch/x86_64/mm/srat.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 15ae9fcd65a7..474df22c6ed2 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata;
static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
static int found_add_area __initdata;
-int hotadd_percent __initdata = 10;
+int hotadd_percent __initdata = 0;
+#ifndef RESERVE_HOTADD
+#define hotadd_percent 0 /* Ignore all settings */
+#endif
static u8 pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
@@ -103,6 +106,7 @@ static __init void bad_srat(void)
int i;
printk(KERN_ERR "SRAT: SRAT not used.\n");
acpi_numa = -1;
+ found_add_area = 0;
for (i = 0; i < MAX_LOCAL_APIC; i++)
apicid_to_node[i] = NUMA_NO_NODE;
for (i = 0; i < MAX_NUMNODES; i++)
@@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
int pxm, node;
if (srat_disabled())
return;
- if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
+ if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
+ bad_srat();
return;
}
if (pa->flags.enabled == 0)
@@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd)
allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
allowed = (allowed / 100) * hotadd_percent;
if (allocated + mem > allowed) {
+ unsigned long range;
/* Give them at least part of their hotadd memory upto hotadd_percent
It would be better to spread the limit out
over multiple hotplug areas, but that is too complicated
right now */
if (allocated >= allowed)
return 0;
- pages = (allowed - allocated + mem) / sizeof(struct page);
+ range = allowed - allocated;
+ pages = (range / PAGE_SIZE);
mem = pages * sizeof(struct page);
- nd->end = nd->start + pages*PAGE_SIZE;
+ nd->end = nd->start + range;
}
/* Not completely fool proof, but a good sanity check */
addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
@@ -392,8 +399,10 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
/* First clean up the node list */
for (i = 0; i < MAX_NUMNODES; i++) {
cutoff_node(i, start, end);
- if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE)
+ if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
unparse_node(i);
+ node_set_offline(i);
+ }
}
if (acpi_numa <= 0)