diff options
author | Tejun Heo <tj@kernel.org> | 2010-04-09 18:57:00 +0900 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-05-01 08:30:49 +0200 |
commit | 020ec6537aa65c18e9084c568d7b94727f2026fd (patch) | |
tree | 0466d590090ed9db214846887e7ea636fcd26169 /mm/percpu.c | |
parent | be1066bbcd443a65df312fdecea7e4959adedb45 (diff) |
percpu: factor out pcpu_addr_in_first/reserved_chunk() and update per_cpu_ptr_to_phys()
Factor out pcpu_addr_in_first/reserved_chunk() from
pcpu_chunk_addr_search() and use it to update per_cpu_ptr_to_phys()
such that it handles first chunk differently from the rest.
This patch doesn't cause any functional change and is to prepare for
percpu nommu support.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: David Howells <dhowells@redhat.com>
Cc: Graff Yang <graff.yang@gmail.com>
Cc: Sonic Zhang <sonic.adi@gmail.com>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 32 |
1 files changed, 24 insertions, 8 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index 6e09741ddc62..1aeb081f30ec 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -177,6 +177,21 @@ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ static void pcpu_reclaim(struct work_struct *work); static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); +static bool pcpu_addr_in_first_chunk(void *addr) +{ + void *first_start = pcpu_first_chunk->base_addr; + + return addr >= first_start && addr < first_start + pcpu_unit_size; +} + +static bool pcpu_addr_in_reserved_chunk(void *addr) +{ + void *first_start = pcpu_first_chunk->base_addr; + + return addr >= first_start && + addr < first_start + pcpu_reserved_chunk_limit; +} + static int __pcpu_size_to_slot(int size) { int highbit = fls(size); /* size is in bytes */ @@ -334,12 +349,10 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) */ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) { - void *first_start = pcpu_first_chunk->base_addr; - /* is it in the first chunk? */ - if (addr >= first_start && addr < first_start + pcpu_unit_size) { + if (pcpu_addr_in_first_chunk(addr)) { /* is it in the reserved area? */ - if (addr < first_start + pcpu_reserved_chunk_limit) + if (pcpu_addr_in_reserved_chunk(addr)) return pcpu_reserved_chunk; return pcpu_first_chunk; } @@ -1343,10 +1356,13 @@ bool is_kernel_percpu_address(unsigned long addr) */ phys_addr_t per_cpu_ptr_to_phys(void *addr) { - if ((unsigned long)addr < VMALLOC_START || - (unsigned long)addr >= VMALLOC_END) - return __pa(addr); - else + if (pcpu_addr_in_first_chunk(addr)) { + if ((unsigned long)addr < VMALLOC_START || + (unsigned long)addr >= VMALLOC_END) + return __pa(addr); + else + return page_to_phys(vmalloc_to_page(addr)); + } else return page_to_phys(vmalloc_to_page(addr)); } |