diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-02-22 16:19:14 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-03-11 17:10:14 +1100 |
commit | 1cdab55d8a8313f77a95fb8ca966dc4334f8e810 (patch) | |
tree | 09649768514553e9c25d1a5a53a5cd567d0e138d /arch/powerpc/mm | |
parent | 16962e7ce1dce29e1e92d231ac7d6844d7385d54 (diff) |
powerpc: Wire up /proc/vmallocinfo to our ioremap()
This adds the necessary bits and pieces to powerpc implementation of
ioremap to benefit from caller tracking in /proc/vmallocinfo, at least
for ioremap's done after mem init as the older ones aren't tracked.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 14 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 25 |
2 files changed, 28 insertions, 11 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 58bcaeba728d..0f8c4371dfab 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -129,7 +129,8 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) void __iomem * ioremap(phys_addr_t addr, unsigned long size) { - return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); + return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED, + __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap); @@ -143,13 +144,20 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ flags &= ~(_PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC); - return __ioremap(addr, size, flags); + return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_flags); void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { + return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); +} + +void __iomem * +__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, + void *caller) +{ unsigned long v, i; phys_addr_t p; int err; @@ -212,7 +220,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) if (mem_init_done) { struct vm_struct *area; - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area_caller(size, VM_IOREMAP, caller); if (area == 0) return NULL; v = (unsigned long) area->addr; diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 365e61ae5dbc..bfa7db6b2fd5 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -144,8 +144,8 @@ void __iounmap_at(void *ea, unsigned long size) unmap_kernel_range((unsigned long)ea, size); } -void __iomem * __ioremap(phys_addr_t addr, unsigned long size, - unsigned long flags) +void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, + unsigned long flags, void *caller) { phys_addr_t paligned; void __iomem *ret; @@ -168,8 +168,9 @@ void __iomem * __ioremap(phys_addr_t addr, unsigned long size, if (mem_init_done) { struct vm_struct *area; - area = __get_vm_area(size, VM_IOREMAP, - ioremap_bot, IOREMAP_END); + area = __get_vm_area_caller(size, VM_IOREMAP, + ioremap_bot, IOREMAP_END, + caller); if (area == NULL) return NULL; ret = __ioremap_at(paligned, area->addr, size, flags); @@ -186,19 +187,27 @@ void __iomem * __ioremap(phys_addr_t addr, unsigned long size, return ret; } +void __iomem * __ioremap(phys_addr_t addr, unsigned long size, + unsigned long flags) +{ + return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); +} void __iomem * ioremap(phys_addr_t addr, unsigned long size) { unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED; + void *caller = __builtin_return_address(0); if (ppc_md.ioremap) - return ppc_md.ioremap(addr, size, flags); - return __ioremap(addr, size, flags); + return ppc_md.ioremap(addr, size, flags, caller); + return __ioremap_caller(addr, size, flags, caller); } void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) { + void *caller = __builtin_return_address(0); + /* writeable implies dirty for kernel addresses */ if (flags & _PAGE_RW) flags |= _PAGE_DIRTY; @@ -207,8 +216,8 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, flags &= ~(_PAGE_USER | _PAGE_EXEC); if (ppc_md.ioremap) - return ppc_md.ioremap(addr, size, flags); - return __ioremap(addr, size, flags); + return ppc_md.ioremap(addr, size, flags, caller); + return __ioremap_caller(addr, size, flags, caller); } |