summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-18 11:44:31 +0200
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-01-16 16:45:03 -0500
commit3f52a4057b24ed3a9d628a4594f80f15949e3b12 (patch)
tree5465a3f4539289b343dbbe2d607b93165238f875
parenta0cf4d370df2e6fdf50f73679f1d6315c13e073a (diff)
percpu: fix first chunk match in per_cpu_ptr_to_phys()
commit 9983b6f0cf8263e51bcf4c8a9dc0c1ef175b3c60 upstream. per_cpu_ptr_to_phys() determines whether the passed in @addr belongs to the first_chunk or not by just matching the address against the address range of the base unit (unit0, used by cpu0). When an adress from another cpu was passed in, it will always determine that the address doesn't belong to the first chunk even when it does. This makes the function return a bogus physical address which may lead to crash. This problem was discovered by Cliff Wickman while investigating a crash during kdump on a SGI UV system. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Cliff Wickman <cpw@sgi.com> Tested-by: Cliff Wickman <cpw@sgi.com> [PG: for 2.6.34, diffstat differs slightly due to a trivial indenting difference, and 34 does not have the _maybe_unused annotation to delete] Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--mm/percpu.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 717cc6ea31f9..558543b33b52 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1343,10 +1343,36 @@ bool is_kernel_percpu_address(unsigned long addr)
*/
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
- if ((unsigned long)addr < VMALLOC_START ||
- (unsigned long)addr >= VMALLOC_END)
+ void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
+ bool in_first_chunk = false;
+ unsigned long first_start, first_end;
+ unsigned int cpu;
+
+ /*
+ * The following test on first_start/end isn't strictly
+ * necessary but will speed up lookups of addresses which
+ * aren't in the first chunk.
+ */
+ first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
+ first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
+ pcpu_unit_pages);
+ if ((unsigned long)addr >= first_start &&
+ (unsigned long)addr < first_end) {
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr(base, cpu);
+
+ if (addr >= start && addr < start + pcpu_unit_size) {
+ in_first_chunk = true;
+ break;
+ }
+ }
+ }
+
+ if (in_first_chunk) {
+ if ((unsigned long)addr < VMALLOC_START ||
+ (unsigned long)addr >= VMALLOC_END)
return __pa(addr);
- else
+ } else
return page_to_phys(vmalloc_to_page(addr));
}