diff options
Diffstat (limited to 'arch/sparc')
37 files changed, 361 insertions, 143 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 253986bd6bb6..1074dddcb104 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -41,6 +41,7 @@ config SPARC64 select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KRETPROBES select HAVE_KPROBES + select HAVE_RCU_TABLE_FREE if SMP select HAVE_MEMBLOCK select HAVE_SYSCALL_WRAPPERS select HAVE_DYNAMIC_FTRACE @@ -81,10 +82,6 @@ config IOMMU_HELPER bool default y if SPARC64 -config QUICKLIST - bool - default y if SPARC64 - config STACKTRACE_SUPPORT bool default y if SPARC64 diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7ae128b19d3f..5c3c8b69884d 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -22,7 +22,7 @@ extern int __atomic_add_return(int, atomic_t *); extern int atomic_cmpxchg(atomic_t *, int, int); #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -extern int atomic_add_unless(atomic_t *, int, int); +extern int __atomic_add_unless(atomic_t *, int, int); extern void atomic_set(atomic_t *, int); #define atomic_read(v) (*(volatile int *)&(v)->counter) @@ -52,7 +52,6 @@ extern void atomic_set(atomic_t *, int); #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /* This is the old 24-bit implementation. It's still used internally * by some sparc-specific code, notably the semaphore implementation. @@ -161,5 +160,4 @@ static inline int __atomic24_sub(int i, atomic24_t *v) #endif /* !(__KERNEL__) */ -#include <asm-generic/atomic-long.h> #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index bdb2ff880bdd..9f421df46aec 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -70,7 +70,7 @@ extern long atomic64_sub_ret(long, atomic64_t *); #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -82,10 +82,9 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) @@ -114,5 +113,4 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include <asm-generic/atomic-long.h> #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h index 38e9aa1b2cea..325e295d60de 100644 --- a/arch/sparc/include/asm/bitops_64.h +++ b/arch/sparc/include/asm/bitops_64.h @@ -91,10 +91,7 @@ static inline unsigned int __arch_hweight8(unsigned int w) #include <asm-generic/bitops/le.h> -#define ext2_set_bit_atomic(lock,nr,addr) \ - test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) -#define ext2_clear_bit_atomic(lock,nr,addr) \ - test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) +#include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __KERNEL__ */ diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index e67880381b84..cfa9cd2e5519 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -186,7 +186,7 @@ static inline unsigned int sparc64_elf_hwcap(void) return cap; } -#define ELF_HWCAP sparc64_elf_hwcap(); +#define ELF_HWCAP sparc64_elf_hwcap() /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h index d4d0711de0f9..14848909e0de 100644 --- a/arch/sparc/include/asm/irqflags_32.h +++ b/arch/sparc/include/asm/irqflags_32.h @@ -18,7 +18,7 @@ extern void arch_local_irq_restore(unsigned long); extern unsigned long arch_local_irq_save(void); extern void arch_local_irq_enable(void); -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long arch_local_save_flags(void) { unsigned long flags; @@ -26,17 +26,17 @@ static inline unsigned long arch_local_save_flags(void) return flags; } -static inline void arch_local_irq_disable(void) +static inline notrace void arch_local_irq_disable(void) { arch_local_irq_save(); } -static inline bool arch_irqs_disabled_flags(unsigned long flags) +static inline notrace bool arch_irqs_disabled_flags(unsigned long flags) { return (flags & PSR_PIL) != 0; } -static inline bool arch_irqs_disabled(void) +static inline notrace bool arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h index aab969c82c2b..23cd27f6beb4 100644 --- a/arch/sparc/include/asm/irqflags_64.h +++ b/arch/sparc/include/asm/irqflags_64.h @@ -14,7 +14,7 @@ #ifndef __ASSEMBLY__ -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long arch_local_save_flags(void) { unsigned long flags; @@ -26,7 +26,7 @@ static inline unsigned long arch_local_save_flags(void) return flags; } -static inline void arch_local_irq_restore(unsigned long flags) +static inline notrace void arch_local_irq_restore(unsigned long flags) { __asm__ __volatile__( "wrpr %0, %%pil" @@ -36,7 +36,7 @@ static inline void arch_local_irq_restore(unsigned long flags) ); } -static inline void arch_local_irq_disable(void) +static inline notrace void arch_local_irq_disable(void) { __asm__ __volatile__( "wrpr %0, %%pil" @@ -46,7 +46,7 @@ static inline void arch_local_irq_disable(void) ); } -static inline void arch_local_irq_enable(void) +static inline notrace void arch_local_irq_enable(void) { __asm__ __volatile__( "wrpr 0, %%pil" @@ -56,17 +56,17 @@ static inline void arch_local_irq_enable(void) ); } -static inline int arch_irqs_disabled_flags(unsigned long flags) +static inline notrace int arch_irqs_disabled_flags(unsigned long flags) { return (flags > 0); } -static inline int arch_irqs_disabled(void) +static inline notrace int arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } -static inline unsigned long arch_local_irq_save(void) +static inline notrace unsigned long arch_local_irq_save(void) { unsigned long flags, tmp; diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index 862e3ce92b15..02939abd356c 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h @@ -42,9 +42,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif -struct device_node; -extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); - #endif /* __KERNEL__ */ #ifndef CONFIG_LEON_PCI diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 948b686ec089..2614d96141c9 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -91,9 +91,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return PCI_IRQ_NONE; } -struct device_node; -extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); - #define HAVE_ARCH_PCI_RESOURCE_TO_USER extern void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 4e5e0878144f..40b2d7a7023d 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h @@ -5,7 +5,6 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/quicklist.h> #include <asm/spitfire.h> #include <asm/cpudata.h> @@ -14,71 +13,114 @@ /* Page table allocation/freeing. */ +extern struct kmem_cache *pgtable_cache; + static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return quicklist_alloc(0, GFP_KERNEL, NULL); + return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - quicklist_free(0, NULL, pgd); + kmem_cache_free(pgtable_cache, pgd); } #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - return quicklist_alloc(0, GFP_KERNEL, NULL); + return kmem_cache_alloc(pgtable_cache, + GFP_KERNEL|__GFP_REPEAT); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { - quicklist_free(0, NULL, pmd); + kmem_cache_free(pgtable_cache, pmd); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return quicklist_alloc(0, GFP_KERNEL, NULL); + return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *page; - void *pg; + pte_t *pte; - pg = quicklist_alloc(0, GFP_KERNEL, NULL); - if (!pg) + pte = pte_alloc_one_kernel(mm, address); + if (!pte) return NULL; - page = virt_to_page(pg); + page = virt_to_page(pte); pgtable_page_ctor(page); return page; } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - quicklist_free(0, NULL, pte); + free_page((unsigned long)pte); } static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { pgtable_page_dtor(ptepage); - quicklist_free_page(0, NULL, ptepage); + __free_page(ptepage); } - #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) #define pmd_populate(MM,PMD,PTE_PAGE) \ pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) #define pmd_pgtable(pmd) pmd_page(pmd) -static inline void check_pgt_cache(void) +#define check_pgt_cache() do { } while (0) + +static inline void pgtable_free(void *table, bool is_page) +{ + if (is_page) + free_page((unsigned long)table); + else + kmem_cache_free(pgtable_cache, table); +} + +#ifdef CONFIG_SMP + +struct mmu_gather; +extern void tlb_remove_table(struct mmu_gather *, void *); + +static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) +{ + unsigned long pgf = (unsigned long)table; + if (is_page) + pgf |= 0x1UL; + tlb_remove_table(tlb, (void *)pgf); +} + +static inline void __tlb_remove_table(void *_table) +{ + void *table = (void *)((unsigned long)_table & ~0x1UL); + bool is_page = false; + + if ((unsigned long)_table & 0x1UL) + is_page = true; + pgtable_free(table, is_page); +} +#else /* CONFIG_SMP */ +static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) { - quicklist_trim(0, NULL, 25, 16); + pgtable_free(table, is_page); +} +#endif /* !CONFIG_SMP */ + +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, + unsigned long address) +{ + pgtable_page_dtor(ptepage); + pgtable_free_tlb(tlb, page_address(ptepage), true); } -#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) -#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) +#define __pmd_free_tlb(tlb, pmd, addr) \ + pgtable_free_tlb(tlb, pmd, false) #endif /* _SPARC64_PGALLOC_H */ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 1e03c5a6b4f7..adf89329af59 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -95,6 +95,10 @@ /* PTE bits which are the same in SUN4U and SUN4V format. */ #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ +#define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ + +/* Advertise support for _PAGE_SPECIAL */ +#define __HAVE_ARCH_PTE_SPECIAL /* SUN4U pte bits... */ #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */ @@ -104,6 +108,7 @@ #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */ #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ +#define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */ #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ @@ -133,6 +138,7 @@ #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */ #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ +#define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */ #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ @@ -302,10 +308,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) : "=r" (mask), "=r" (tmp) : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | - _PAGE_SZBITS_4U), + _PAGE_SZBITS_4U | _PAGE_SPECIAL), "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | - _PAGE_SZBITS_4V)); + _PAGE_SZBITS_4V | _PAGE_SPECIAL)); return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); } @@ -502,6 +508,7 @@ static inline pte_t pte_mkyoung(pte_t pte) static inline pte_t pte_mkspecial(pte_t pte) { + pte_val(pte) |= _PAGE_SPECIAL; return pte; } @@ -607,9 +614,9 @@ static inline unsigned long pte_present(pte_t pte) return val; } -static inline int pte_special(pte_t pte) +static inline unsigned long pte_special(pte_t pte) { - return 0; + return pte_val(pte) & _PAGE_SPECIAL; } #define pmd_set(pmdp, ptep) \ diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index 56bbaadef646..edd3d3cde460 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h @@ -21,7 +21,7 @@ #include <linux/of_pdt.h> #include <linux/proc_fs.h> #include <linux/mutex.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2 #define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index c7ad3fe2b252..a0e1bcf843a1 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h @@ -205,6 +205,7 @@ do { current_thread_info()->syscall_noerror = 1; \ } while (0) #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) #define instruction_pointer(regs) ((regs)->tpc) +#define instruction_pointer_set(regs, val) ((regs)->tpc = (val)) #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) #define regs_return_value(regs) ((regs)->u_regs[UREG_I0]) #ifdef CONFIG_SMP @@ -212,7 +213,6 @@ extern unsigned long profile_pc(struct pt_regs *); #else #define profile_pc(regs) instruction_pointer(regs) #endif -extern void show_regs(struct pt_regs *); #endif /* (__KERNEL__) */ #else /* __ASSEMBLY__ */ @@ -256,7 +256,6 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs) #define instruction_pointer(regs) ((regs)->pc) #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) unsigned long profile_pc(struct pt_regs *); -extern void show_regs(struct pt_regs *); #endif /* (__KERNEL__) */ #else /* (!__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index 093f10843ff2..01c51c704341 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -22,7 +22,7 @@ #include <asm/ptrace.h> #include <asm/asi.h> -#include <asm/atomic.h> +#include <linux/atomic.h> /* * Private routines/data diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index 20bca8950710..29862a9e9065 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -27,7 +27,7 @@ */ #include <linux/bitops.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/percpu.h> DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 9fe08a1ea6c6..f445e98463e6 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -293,7 +293,7 @@ maybe_smp4m_msg: WRITE_PAUSE wr %l4, PSR_ET, %psr WRITE_PAUSE - sll %o3, 28, %o2 ! shift for simpler checks below + srl %o3, 28, %o2 ! shift for simpler checks below maybe_smp4m_msg_check_single: andcc %o2, 0x1, %g0 beq,a maybe_smp4m_msg_check_mask diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 1c9c80a1a86a..6ffccd6e0156 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -228,7 +228,7 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) } pa &= PAGE_MASK; - sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1); + sparc_mapiorange(bus, pa, res->start, resource_size(res)); return (void __iomem *)(unsigned long)(res->start + offset); } @@ -240,7 +240,7 @@ static void _sparc_free_io(struct resource *res) { unsigned long plen; - plen = res->end - res->start + 1; + plen = resource_size(res); BUG_ON((plen & (PAGE_SIZE-1)) != 0); sparc_unmapiorange(res->start, plen); release_resource(res); @@ -331,9 +331,9 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p, } n = PAGE_ALIGN(n); - if ((res->end-res->start)+1 != n) { + if (resource_size(res) != n) { printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", - (long)((res->end-res->start)+1), n); + (long)resource_size(res), n); return; } @@ -504,9 +504,9 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, } n = PAGE_ALIGN(n); - if ((res->end-res->start)+1 != n) { + if (resource_size(res) != n) { printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", - (long)((res->end-res->start)+1), (long)n); + (long)resource_size(res), (long)n); return; } diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 4e78862d12fd..0dd8422a469c 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -26,7 +26,7 @@ #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/system.h> #include <asm/irq.h> #include <asm/io.h> diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index fe8fb44c609c..1210fde18740 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -28,7 +28,7 @@ #include <asm/tlbflush.h> #include <asm/ptrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/irq_regs.h> #include <asm/traps.h> diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index 99ba5baa9497..da0c6c70ccb2 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c @@ -68,12 +68,6 @@ void *module_alloc(unsigned long size) return ret; } -/* Free memory returned from module_core_alloc/module_init_alloc */ -void module_free(struct module *mod, void *module_region) -{ - vfree(module_region); -} - /* Make generic code ignore STT_REGISTER dummy undefined symbols. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, @@ -107,17 +101,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, return 0; } -int apply_relocate(Elf_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *me) -{ - printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n", - me->name); - return -ENOEXEC; -} - int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, @@ -239,15 +222,4 @@ int module_finalize(const Elf_Ehdr *hdr, return 0; } -#else -int module_finalize(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - struct module *me) -{ - return 0; -} #endif /* CONFIG_SPARC64 */ - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 713dc91020a6..1e94f946570e 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -284,7 +284,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->sysdata = node; dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; - dev->dev.of_node = node; + dev->dev.of_node = of_node_get(node); dev->devfn = devfn; dev->multifunction = 0; /* maybe a lie? */ set_pcie_port_type(dev); @@ -820,11 +820,9 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc unsigned long space_size, user_offset, user_size; if (mmap_state == pci_mmap_io) { - space_size = (pbm->io_space.end - - pbm->io_space.start) + 1; + space_size = resource_size(&pbm->io_space); } else { - space_size = (pbm->mem_space.end - - pbm->mem_space.start) + 1; + space_size = resource_size(&pbm->mem_space); } /* Make sure the request is in range. */ @@ -1021,12 +1019,6 @@ void arch_teardown_msi_irq(unsigned int irq) } #endif /* !(CONFIG_PCI_MSI) */ -struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - return pdev->dev.of_node; -} -EXPORT_SYMBOL(pci_device_to_OF_node); - static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) { struct pci_dev *ali_isa_bridge; diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 948601a066ff..a19f04195478 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -885,14 +885,6 @@ int pcibios_assign_resource(struct pci_dev *pdev, int resource) return -ENXIO; } -struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - struct pcidev_cookie *pc = pdev->sysdata; - - return pc->prom_node; -} -EXPORT_SYMBOL(pci_device_to_OF_node); - /* * This probably belongs here rather than ioport.c because * we do not want this crud linked into SBus kernels. diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2cb0e1c001e2..171e8d84dc3f 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -22,7 +22,7 @@ #include <asm/stacktrace.h> #include <asm/cpudata.h> #include <asm/uaccess.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/nmi.h> #include <asm/pcr.h> @@ -246,6 +246,20 @@ static const cache_map_t ultra3_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu ultra3_pmu = { @@ -361,6 +375,20 @@ static const cache_map_t niagara1_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu niagara1_pmu = { @@ -473,6 +501,20 @@ static const cache_map_t niagara2_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu niagara2_pmu = { @@ -1277,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, if (!sparc_perf_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) sparc_pmu_stop(event, 0); } diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 21b125341bf7..f671e7fd6ddc 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -22,7 +22,7 @@ #include <linux/delay.h> #include <asm/ptrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/irq.h> #include <asm/page.h> diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 99cb17251bb5..4a442c32e117 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -28,7 +28,7 @@ #include <asm/head.h> #include <asm/ptrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cpudata.h> diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 4491f4cb2695..7efbb2f9e77f 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) unsigned long addr = compute_effective_address(regs, insn); int err; - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), @@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) } addr = compute_effective_address(regs, insn); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch(dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index b2b019ea8caa..35cff1673aa4 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (asi) { case ASI_NL: case ASI_AIUPL: @@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs) int ret, i, rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); @@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) int asi = decode_asi(insn, regs); int flag = (freg < 32) ? FPRS_DL : FPRS_DU; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); save_and_clear_fpu(); current_thread_info()->xfsr[0] &= ~0x1c000; @@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned long *reg; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); maybe_flush_windows(0, 0, rd, from_kernel); reg = fetch_reg_addr(rd, regs); @@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("lddfmna from kernel", regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { @@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index 36357717d691..32b626c9d815 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn) BUG_ON(regs->tstate & TSTATE_PRIV); - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index d3c7a12ad879..1a371f8ae0b0 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -7,7 +7,7 @@ * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf */ -#include <asm/atomic.h> +#include <linux/atomic.h> #include <linux/spinlock.h> #include <linux/module.h> diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c index a3fccde894ec..aa4d55b0bdf0 100644 --- a/arch/sparc/math-emu/math_32.c +++ b/arch/sparc/math-emu/math_32.c @@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt) int retcode = 0; /* assume all succeed */ unsigned long insn; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); #ifdef DEBUG_MATHEMU printk("In do_mathemu()... pc is %08lx\n", regs->pc); diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c index 56d2c44747b8..e575bd2fe381 100644 --- a/arch/sparc/math-emu/math_64.c +++ b/arch/sparc/math-emu/math_64.c @@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) if (tstate & TSTATE_PRIV) die_if_kernel("unfinished/unimplemented FPop from kernel", regs); - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 79836a7dd00c..e3cda21b5ee9 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -4,7 +4,7 @@ asflags-y := -ansi ccflags-y := -Werror -obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o +obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o obj-y += fault_$(BITS).o obj-y += init_$(BITS).o obj-$(CONFIG_SPARC32) += loadmmu.o diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 7543ddbdadb2..aa1c1b1ce5cc 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, if (in_atomic() || !mm) goto no_context; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); down_read(&mm->mmap_sem); @@ -301,12 +301,10 @@ good_area: } if (fault & VM_FAULT_MAJOR) { current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); return; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index f92ce56a8b22..504c0622f729 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) if (in_atomic() || !mm) goto intr_or_no_mm; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (!down_read_trylock(&mm->mmap_sem)) { if ((regs->tstate & TSTATE_PRIV) && @@ -433,12 +433,10 @@ good_area: } if (fault & VM_FAULT_MAJOR) { current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c new file mode 100644 index 000000000000..a986b5d05712 --- /dev/null +++ b/arch/sparc/mm/gup.c @@ -0,0 +1,181 @@ +/* + * Lockless get_user_pages_fast for sparc, cribbed from powerpc + * + * Copyright (C) 2008 Nick Piggin + * Copyright (C) 2008 Novell Inc. + */ + +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/vmstat.h> +#include <linux/pagemap.h> +#include <linux/rwsem.h> +#include <asm/pgtable.h> + +/* + * The performance critical leaf functions are made noinline otherwise gcc + * inlines everything into a single function which results in too much + * register pressure. + */ +static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + unsigned long mask, result; + pte_t *ptep; + + if (tlb_type == hypervisor) { + result = _PAGE_PRESENT_4V|_PAGE_P_4V; + if (write) + result |= _PAGE_WRITE_4V; + } else { + result = _PAGE_PRESENT_4U|_PAGE_P_4U; + if (write) + result |= _PAGE_WRITE_4U; + } + mask = result | _PAGE_SPECIAL; + + ptep = pte_offset_kernel(&pmd, addr); + do { + struct page *page, *head; + pte_t pte = *ptep; + + if ((pte_val(pte) & mask) != result) + return 0; + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); + + /* The hugepage case is simplified on sparc64 because + * we encode the sub-page pfn offsets into the + * hugepage PTEs. We could optimize this in the future + * use page_cache_add_speculative() for the hugepage case. + */ + page = pte_page(pte); + head = compound_head(page); + if (!page_cache_get_speculative(head)) + return 0; + if (unlikely(pte_val(pte) != pte_val(*ptep))) { + put_page(head); + return 0; + } + + pages[*nr] = page; + (*nr)++; + } while (ptep++, addr += PAGE_SIZE, addr != end); + + return 1; +} + +static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pmd_t *pmdp; + + pmdp = pmd_offset(&pud, addr); + do { + pmd_t pmd = *pmdp; + + next = pmd_addr_end(addr, end); + if (pmd_none(pmd)) + return 0; + if (!gup_pte_range(pmd, addr, next, write, pages, nr)) + return 0; + } while (pmdp++, addr = next, addr != end); + + return 1; +} + +static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, + int write, struct page **pages, int *nr) +{ + unsigned long next; + pud_t *pudp; + + pudp = pud_offset(&pgd, addr); + do { + pud_t pud = *pudp; + + next = pud_addr_end(addr, end); + if (pud_none(pud)) + return 0; + if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + return 0; + } while (pudp++, addr = next, addr != end); + + return 1; +} + +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long addr, len, end; + unsigned long next; + pgd_t *pgdp; + int nr = 0; + + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; + + /* + * XXX: batch / limit 'nr', to avoid large irq off latency + * needs some instrumenting to determine the common sizes used by + * important workloads (eg. DB2), and whether limiting the batch size + * will decrease performance. + * + * It seems like we're in the clear for the moment. Direct-IO is + * the main guy that batches up lots of get_user_pages, and even + * they are limited to 64-at-a-time which is not so many. + */ + /* + * This doesn't prevent pagetable teardown, but does prevent + * the pagetables from being freed on sparc. + * + * So long as we atomically load page table pointers versus teardown, + * we can follow the address down to the the page and take a ref on it. + */ + local_irq_disable(); + + pgdp = pgd_offset(mm, addr); + do { + pgd_t pgd = *pgdp; + + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + goto slow; + if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + goto slow; + } while (pgdp++, addr = next, addr != end); + + local_irq_enable(); + + VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); + return nr; + + { + int ret; + +slow: + local_irq_enable(); + + /* Try to get the remaining pages with get_user_pages */ + start += nr << PAGE_SHIFT; + pages += nr; + + down_read(&mm->mmap_sem); + ret = get_user_pages(current, mm, start, + (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); + up_read(&mm->mmap_sem); + + /* Have to be a bit careful with return values */ + if (nr > 0) { + if (ret < 0) + ret = nr; + else + ret += nr; + } + + return ret; + } +} diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c index c0e01297e64e..e485a6804998 100644 --- a/arch/sparc/mm/leon_mm.c +++ b/arch/sparc/mm/leon_mm.c @@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs) * Leon2 and Leon3 differ in their way of telling cache information * */ -int leon_flush_needed(void) +int __init leon_flush_needed(void) { int flush_needed = -1; unsigned int ssize, sets; diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a5f51b22fcbe..536412d8f416 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -236,6 +236,8 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign } } +struct kmem_cache *pgtable_cache __read_mostly; + static struct kmem_cache *tsb_caches[8] __read_mostly; static const char *tsb_cache_names[8] = { @@ -253,6 +255,15 @@ void __init pgtable_cache_init(void) { unsigned long i; + pgtable_cache = kmem_cache_create("pgtable_cache", + PAGE_SIZE, PAGE_SIZE, + 0, + _clear_page); + if (!pgtable_cache) { + prom_printf("pgtable_cache_init(): Could not create!\n"); + prom_halt(); + } + for (i = 0; i < 8; i++) { unsigned long size = 8192 << i; const char *name = tsb_cache_names[i]; |