diff options
author | Michael Holzheu <holzheu@linux.vnet.ibm.com> | 2014-03-06 18:47:21 +0100 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2015-08-04 14:06:53 +0200 |
commit | c29a7baf091fc6b2c9e40561030f8c62e6145a19 (patch) | |
tree | dfddc7a273858c32c9946857bfff2dc7779e64a9 /drivers/s390 | |
parent | e8054b654bf5d4f549f4f24b708acce6d2718b1b (diff) |
s390/numa: add emulation support
NUMA emulation (aka fake NUMA) distributes the available memory to nodes
without using real topology information about the physical memory of the
machine.
Splitting the system memory into nodes replicates the memory management
structures for each node. Particularly each node has its own "mm locks"
and its own "kswapd" task.
For large systems, under certain conditions, this results in improved
system performance and/or latency based on reduced pressure on the mm
locks and the kswapd tasks.
NUMA emulation distributes CPUs to nodes while respecting the original
machine topology information. This is done by trying to avoid to separate
CPUs which reside on the same book or even on the same MC. Because the
current Linux scheduler code requires a stable cpu to node mapping, cores
are pinned to nodes when the first CPU thread is set online.
This patch is based on the initial implementation from Philipp Hachtmann.
Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/char/sclp_cmd.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index e9485fbbb373..806239c2cf2f 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -25,6 +25,7 @@ #include <asm/setup.h> #include <asm/page.h> #include <asm/sclp.h> +#include <asm/numa.h> #include "sclp.h" @@ -388,11 +389,11 @@ static struct notifier_block sclp_mem_nb = { }; static void __init align_to_block_size(unsigned long long *start, - unsigned long long *size) + unsigned long long *size, + unsigned long long alignment) { - unsigned long long start_align, size_align, alignment; + unsigned long long start_align, size_align; - alignment = memory_block_size_bytes(); start_align = roundup(*start, alignment); size_align = rounddown(*start + *size, alignment) - start_align; @@ -404,8 +405,8 @@ static void __init align_to_block_size(unsigned long long *start, static void __init add_memory_merged(u16 rn) { + unsigned long long start, size, addr, block_size; static u16 first_rn, num; - unsigned long long start, size; if (rn && first_rn && (first_rn + num == rn)) { num++; @@ -423,9 +424,12 @@ static void __init add_memory_merged(u16 rn) goto skip_add; if (memory_end_set && (start + size > memory_end)) size = memory_end - start; - align_to_block_size(&start, &size); - if (size) - add_memory(0, start, size); + block_size = memory_block_size_bytes(); + align_to_block_size(&start, &size, block_size); + if (!size) + goto skip_add; + for (addr = start; addr < start + size; addr += block_size) + add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size); skip_add: first_rn = rn; num = 1; |