From 925268a06dc2b1ff7bfcc37419a6827a0e739639 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Tue, 11 Jan 2011 16:44:01 +0900 Subject: memory hotplug: one more lock on memory hotplug Now, memory_hotplug_(un)lock() is used for add/remove/offline pages for avoiding races with hibernation. But this should be held in online_pages(), too. It seems asymmetric. There are cases where one has to avoid a race with memory hotplug notifier and his own local code, and hotplug v.s. hotplug. This will add a generic solution for avoiding races. In other view, having lock here has no big impacts. online pages is tend to be done by udev script at el against each memory section one by one. Then, it's better to have lock here, too. Cc: # 2.6.37 Reviewed-by: Christoph Lameter Acked-by: David Rientjes Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Pekka Enberg --- include/linux/memory_hotplug.h | 6 ++++++ mm/memory_hotplug.c | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 31c237a00c48..12b9eb5a36c3 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -161,6 +161,12 @@ extern void register_page_bootmem_info_node(struct pglist_data *pgdat); extern void put_page_bootmem(struct page *page); #endif +/* + * Lock for memory hotplug guarantees 1) all callbacks for memory hotplug + * notifier will be called under this. 2) offline/online/add/remove memory + * will not run simultaneously. + */ + void lock_memory_hotplug(void); void unlock_memory_hotplug(void); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2c6523af5473..83163c096a75 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -407,6 +407,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) int ret; struct memory_notify arg; + lock_memory_hotplug(); arg.start_pfn = pfn; arg.nr_pages = nr_pages; arg.status_change_nid = -1; @@ -419,6 +420,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) ret = notifier_to_errno(ret); if (ret) { memory_notify(MEM_CANCEL_ONLINE, &arg); + unlock_memory_hotplug(); return ret; } /* @@ -443,6 +445,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) printk(KERN_DEBUG "online_pages %lx at %lx failed\n", nr_pages, pfn); memory_notify(MEM_CANCEL_ONLINE, &arg); + unlock_memory_hotplug(); return ret; } @@ -467,6 +470,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) if (onlined_pages) memory_notify(MEM_ONLINE, &arg); + unlock_memory_hotplug(); return 0; } -- cgit v1.2.3 From 04d94879c8a4973b5499dc26b9d38acee8928791 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 10 Jan 2011 10:15:15 -0600 Subject: slub: Avoid use of slub_lock in show_slab_objects() The purpose of the locking is to prevent removal and additions of nodes when statistics are gathered for a slab cache. So we need to avoid racing with memory hotplug functionality. It is enough to take the memory hotplug locks there instead of the slub_lock. online_pages() currently does not acquire the memory_hotplug lock. Another patch will be submitted by the memory hotplug authors to take the memory hotplug lock and describe the uses of the memory hotplug lock to protect against adding and removal of nodes from non hotplug data structures. Cc: # 2.6.37 Reported-and-tested-by: Bart Van Assche Acked-by: David Rientjes Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index bec0e355fbad..96e690717822 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3821,7 +3821,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, } } - down_read(&slub_lock); + lock_memory_hotplug(); #ifdef CONFIG_SLUB_DEBUG if (flags & SO_ALL) { for_each_node_state(node, N_NORMAL_MEMORY) { @@ -3862,7 +3862,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, x += sprintf(buf + x, " N%d=%lu", node, nodes[node]); #endif - up_read(&slub_lock); + unlock_memory_hotplug(); kfree(nodes); return x + sprintf(buf + x, "\n"); } -- cgit v1.2.3 From 68a1b1955957e222d890f550d2a44ae598db3de9 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Tue, 11 Jan 2011 17:49:32 -0600 Subject: mm/slab.c: make local symbols static Local symbols should be static. Signed-off-by: H Hartley Sweeten Cc: Christoph Lameter Cc: Pekka Enberg Cc: Matt Mackall Signed-off-by: Pekka Enberg --- mm/slab.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 264037449f08..37961d1f584f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -284,7 +284,7 @@ struct kmem_list3 { * Need this for bootstrapping a per node allocator. */ #define NUM_INIT_LISTS (3 * MAX_NUMNODES) -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; +static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; #define CACHE_CACHE 0 #define SIZE_AC MAX_NUMNODES #define SIZE_L3 (2 * MAX_NUMNODES) @@ -4053,7 +4053,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) * necessary. Note that the l3 listlock also protects the array_cache * if drain_array() is used on the shared array. */ -void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, +static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node) { int tofree; @@ -4317,7 +4317,7 @@ static const struct seq_operations slabinfo_op = { * @count: data length * @ppos: unused */ -ssize_t slabinfo_write(struct file *file, const char __user * buffer, +static ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; -- cgit v1.2.3 From 2ed1c5257b5d669df2a64bd53e61dc19c7d936cd Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sat, 15 Jan 2011 13:30:04 +0200 Subject: Update Pekka's email address in MAINTAINERS Signed-off-by: Pekka Enberg --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 89e4d4b145bb..1af022e63668 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3684,7 +3684,7 @@ F: kernel/debug/ KMEMCHECK M: Vegard Nossum -M: Pekka Enberg +M: Pekka Enberg S: Maintained F: Documentation/kmemcheck.txt F: arch/x86/include/asm/kmemcheck.h @@ -5646,7 +5646,7 @@ F: drivers/net/sky2.* SLAB ALLOCATOR M: Christoph Lameter -M: Pekka Enberg +M: Pekka Enberg M: Matt Mackall L: linux-mm@kvack.org S: Maintained -- cgit v1.2.3