From 443c6f145de813518c36ac6b6e4e08d9445337e7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 23 Dec 2009 21:00:47 +0100 Subject: SYSCTL: Add a mutex to the page_alloc zone order sysctl The zone list code clearly cannot tolerate concurrent writers (I couldn't find any locks for that), so simply add a global mutex. No need for RCU in this case. Signed-off-by: Andi Kleen --- mm/page_alloc.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d79b92580561..4e9f5cc5fb59 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2402,13 +2402,14 @@ int numa_zonelist_order_handler(ctl_table *table, int write, { char saved_string[NUMA_ZONELIST_ORDER_LEN]; int ret; + static DEFINE_MUTEX(zl_order_mutex); + mutex_lock(&zl_order_mutex); if (write) - strncpy(saved_string, (char*)table->data, - NUMA_ZONELIST_ORDER_LEN); + strcpy(saved_string, (char*)table->data); ret = proc_dostring(table, write, buffer, length, ppos); if (ret) - return ret; + goto out; if (write) { int oldval = user_zonelist_order; if (__parse_numa_zonelist_order((char*)table->data)) { @@ -2421,7 +2422,9 @@ int numa_zonelist_order_handler(ctl_table *table, int write, } else if (oldval != user_zonelist_order) build_all_zonelists(); } - return 0; +out: + mutex_unlock(&zl_order_mutex); + return ret; } -- cgit v1.2.3 From d2dbe08ddceb4ba2b274abb84326d7e69d454e5c Mon Sep 17 00:00:00 2001 From: Kazuhisa Ichikawa Date: Fri, 15 Jan 2010 17:01:20 -0800 Subject: mm/page_alloc: fix the range check for backward merging The current check for 'backward merging' within add_active_range() does not seem correct. start_pfn must be compared against early_node_map[i].start_pfn (and NOT against .end_pfn) to find out whether the new region is backward-mergeable with the existing range. Signed-off-by: Kazuhisa Ichikawa Acked-by: David Rientjes Cc: KOSAKI Motohiro Cc: Mel Gorman Cc: Christoph Lameter Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4e9f5cc5fb59..6ea4966a6334 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3998,7 +3998,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn, } /* Merge backward if suitable */ - if (start_pfn < early_node_map[i].end_pfn && + if (start_pfn < early_node_map[i].start_pfn && end_pfn >= early_node_map[i].start_pfn) { early_node_map[i].start_pfn = start_pfn; return; -- cgit v1.2.3 From 6ccf80eb15ccaca4d3f1ab5162b9ded5eecd9971 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 15 Jan 2010 17:01:18 -0800 Subject: page allocator: update NR_FREE_PAGES only when necessary commit f2260e6b (page allocator: update NR_FREE_PAGES only as necessary) made one minor regression. if __rmqueue() was failed, NR_FREE_PAGES stat go wrong. this patch fixes it. Signed-off-by: KOSAKI Motohiro Cc: Mel Gorman Reviewed-by: Minchan Kim Reported-by: Huang Shijie Reviewed-by: Christoph Lameter Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6ea4966a6334..d2a8889b4c58 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1222,10 +1222,10 @@ again: } spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); spin_unlock(&zone->lock); if (!page) goto failed; + __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); } __count_zone_vm_events(PGALLOC, zone, 1 << order); -- cgit v1.2.3