diff options
86 files changed, 743 insertions, 545 deletions
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt index 4bfb9ffbdbc1..38dc06d0a791 100644 --- a/Documentation/arm/memory.txt +++ b/Documentation/arm/memory.txt @@ -41,16 +41,9 @@ fffe8000 fffeffff DTCM mapping area for platforms with fffe0000 fffe7fff ITCM mapping area for platforms with ITCM mounted inside the CPU. -fff00000 fffdffff Fixmap mapping region. Addresses provided +ffc00000 ffdfffff Fixmap mapping region. Addresses provided by fix_to_virt() will be located here. -ffc00000 ffefffff DMA memory mapping region. Memory returned - by the dma_alloc_xxx functions will be - dynamically mapped here. - -ff000000 ffbfffff Reserved for future expansion of DMA - mapping region. - fee00000 feffffff Mapping of PCI I/O space. This is a static mapping within the vmalloc space. diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt index fe5cef8976cb..75ef91d08f3b 100644 --- a/Documentation/devicetree/bindings/arm/pmu.txt +++ b/Documentation/devicetree/bindings/arm/pmu.txt @@ -8,6 +8,7 @@ Required properties: - compatible : should be one of "arm,armv8-pmuv3" + "arm,cortex-a17-pmu" "arm,cortex-a15-pmu" "arm,cortex-a12-pmu" "arm,cortex-a9-pmu" diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c9d7196fd0bd..8615dfa604c4 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -165,12 +165,9 @@ config TRACE_IRQFLAGS_SUPPORT bool default y -config RWSEM_GENERIC_SPINLOCK - bool - default y - config RWSEM_XCHGADD_ALGORITHM bool + default y config ARCH_HAS_ILOG2_U32 bool @@ -1105,11 +1102,6 @@ source "arch/arm/firmware/Kconfig" source arch/arm/mm/Kconfig -config ARM_NR_BANKS - int - default 16 if ARCH_EP93XX - default 8 - config IWMMXT bool "Enable iWMMXt support" depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B @@ -2244,6 +2236,11 @@ config ARCH_SUSPEND_POSSIBLE config ARM_CPU_SUSPEND def_bool PM_SLEEP +config ARCH_HIBERNATION_POSSIBLE + bool + depends on MMU + default y if ARCH_SUSPEND_POSSIBLE + endmenu source "net/Kconfig" diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c index d1153c8a765a..9448aa0c6686 100644 --- a/arch/arm/boot/compressed/atags_to_fdt.c +++ b/arch/arm/boot/compressed/atags_to_fdt.c @@ -7,6 +7,8 @@ #define do_extend_cmdline 0 #endif +#define NR_BANKS 16 + static int node_offset(void *fdt, const char *node_path) { int offset = fdt_path_offset(fdt, node_path); diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index f01c0ee0c87e..490f3dced749 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c @@ -433,8 +433,12 @@ static void bL_switcher_restore_cpus(void) { int i; - for_each_cpu(i, &bL_switcher_removed_logical_cpus) - cpu_up(i); + for_each_cpu(i, &bL_switcher_removed_logical_cpus) { + struct device *cpu_dev = get_cpu_device(i); + int ret = device_online(cpu_dev); + if (ret) + dev_err(cpu_dev, "switcher: unable to restore CPU\n"); + } } static int bL_switcher_halve_cpus(void) @@ -521,7 +525,7 @@ static int bL_switcher_halve_cpus(void) continue; } - ret = cpu_down(i); + ret = device_offline(get_cpu_device(i)); if (ret) { bL_switcher_restore_cpus(); return ret; diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 86fd60fefbc9..f91136ab447e 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -106,14 +106,14 @@ void mcpm_cpu_power_down(void) BUG(); } -int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster) +int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) { int ret; - if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish)) + if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) return -EUNATCH; - ret = platform_ops->power_down_finish(cpu, cluster); + ret = platform_ops->wait_for_powerdown(cpu, cluster); if (ret) pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", __func__, cpu, cluster, ret); diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 177251a4dd9a..92e54d7c6f46 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c @@ -62,7 +62,7 @@ static int mcpm_cpu_kill(unsigned int cpu) cpu_to_pcpu(cpu, &pcpu, &pcluster); - return !mcpm_cpu_power_down_finish(pcpu, pcluster); + return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster); } static int mcpm_cpu_disable(unsigned int cpu) diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 23e728ecf8ab..f5a357601983 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -21,6 +21,7 @@ generic-y += parport.h generic-y += poll.h generic-y += preempt.h generic-y += resource.h +generic-y += rwsem.h generic-y += sections.h generic-y += segment.h generic-y += sembuf.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index b974184f9941..57f0584e8d97 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -312,7 +312,7 @@ * you cannot return to the original mode. */ .macro safe_svcmode_maskall reg:req -#if __LINUX_ARM_ARCH__ >= 6 +#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M) mrs \reg , cpsr eor \reg, \reg, #HYP_MODE tst \reg, #MODE_MASK diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 8b8b61685a34..fd43f7f55b70 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -212,7 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, static inline void __flush_icache_all(void) { __flush_icache_preferred(); - dsb(); + dsb(ishst); } /* @@ -487,4 +487,6 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, + void *kaddr, unsigned long len); #endif diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 6493802f880a..c3f11524f10c 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -42,24 +42,23 @@ #ifndef __ASSEMBLY__ #if __LINUX_ARM_ARCH__ >= 4 -#define vectors_high() (cr_alignment & CR_V) +#define vectors_high() (get_cr() & CR_V) #else #define vectors_high() (0) #endif #ifdef CONFIG_CPU_CP15 -extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ extern unsigned long cr_alignment; /* defined in entry-armv.S */ -static inline unsigned int get_cr(void) +static inline unsigned long get_cr(void) { - unsigned int val; + unsigned long val; asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); return val; } -static inline void set_cr(unsigned int val) +static inline void set_cr(unsigned long val) { asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" : : "r" (val) : "cc"); @@ -80,10 +79,6 @@ static inline void set_auxcr(unsigned int val) isb(); } -#ifndef CONFIG_SMP -extern void adjust_cr(unsigned long mask, unsigned long set); -#endif - #define CPACC_FULL(n) (3 << (n * 2)) #define CPACC_SVC(n) (1 << (n * 2)) #define CPACC_DISABLE(n) (0 << (n * 2)) @@ -106,13 +101,17 @@ static inline void set_copro_access(unsigned int val) #else /* ifdef CONFIG_CPU_CP15 */ /* - * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the - * minds of the developers). Yielding 0 for machines without a cp15 (and making - * it read-only) is fine for most cases and saves quite some #ifdeffery. + * cr_alignment is tightly coupled to cp15 (at least in the minds of the + * developers). Yielding 0 for machines without a cp15 (and making it + * read-only) is fine for most cases and saves quite some #ifdeffery. */ -#define cr_no_alignment UL(0) #define cr_alignment UL(0) +static inline unsigned long get_cr(void) +{ + return 0; +} + #endif /* ifdef CONFIG_CPU_CP15 / else */ #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 4764344367d4..8c2b7321a478 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -72,6 +72,7 @@ #define ARM_CPU_PART_CORTEX_A15 0xC0F0 #define ARM_CPU_PART_CORTEX_A7 0xC070 #define ARM_CPU_PART_CORTEX_A12 0xC0D0 +#define ARM_CPU_PART_CORTEX_A17 0xC0E0 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 #define ARM_CPU_XSCALE_ARCH_V1 0x2000 diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index bbae919bceb4..74124b0d0d79 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -1,24 +1,11 @@ #ifndef _ASM_FIXMAP_H #define _ASM_FIXMAP_H -/* - * Nothing too fancy for now. - * - * On ARM we already have well known fixed virtual addresses imposed by - * the architecture such as the vector page which is located at 0xffff0000, - * therefore a second level page table is already allocated covering - * 0xfff00000 upwards. - * - * The cache flushing code in proc-xscale.S uses the virtual area between - * 0xfffe0000 and 0xfffeffff. - */ - -#define FIXADDR_START 0xfff00000UL -#define FIXADDR_TOP 0xfffe0000UL +#define FIXADDR_START 0xffc00000UL +#define FIXADDR_TOP 0xffe00000UL #define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) -#define FIX_KMAP_BEGIN 0 -#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT) +#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT) #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) @@ -27,7 +14,7 @@ extern void __this_fixmap_does_not_exist(void); static inline unsigned long fix_to_virt(const unsigned int idx) { - if (idx >= FIX_KMAP_END) + if (idx >= FIX_KMAP_NR_PTES) __this_fixmap_does_not_exist(); return __fix_to_virt(idx); } diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index 6b70f1b46a6e..04e18b656659 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h @@ -31,14 +31,6 @@ #undef CPU_DABORT_HANDLER #undef MULTI_DABORT -#if defined(CONFIG_CPU_ARM710) -# ifdef CPU_DABORT_HANDLER -# define MULTI_DABORT 1 -# else -# define CPU_DABORT_HANDLER cpu_arm7_data_abort -# endif -#endif - #ifdef CONFIG_CPU_ABRT_EV4 # ifdef CPU_DABORT_HANDLER # define MULTI_DABORT 1 diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 91b99abe7a95..535579511ed0 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -18,6 +18,7 @@ } while (0) extern pte_t *pkmap_page_table; +extern pte_t *fixmap_page_table; extern void *kmap_high(struct page *page); extern void kunmap_high(struct page *page); diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 8aa4cca74501..3d23418cbddd 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -179,6 +179,12 @@ static inline void __iomem *__typesafe_io(unsigned long addr) /* PCI fixed i/o mapping */ #define PCI_IO_VIRT_BASE 0xfee00000 +#if defined(CONFIG_PCI) +void pci_ioremap_set_mem_type(int mem_type); +#else +static inline void pci_ioremap_set_mem_type(int mem_type) {} +#endif + extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); /* diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 5249cc3c52f4..060a75e99263 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -14,7 +14,6 @@ #include <linux/reboot.h> struct tag; -struct meminfo; struct pt_regs; struct smp_operations; #ifdef CONFIG_SMP @@ -50,8 +49,7 @@ struct machine_desc { void (*l2c_write_sec)(unsigned long, unsigned); struct smp_operations *smp; /* SMP operations */ bool (*smp_init)(void); - void (*fixup)(struct tag *, char **, - struct meminfo *); + void (*fixup)(struct tag *, char **); void (*init_meminfo)(void); void (*reserve)(void);/* reserve mem blocks */ void (*map_io)(void);/* IO mapping function */ diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index a5ff410dcdb6..d9702eb0b02b 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -98,14 +98,14 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); * previously in which case the caller should take appropriate action. * * On success, the CPU is not guaranteed to be truly halted until - * mcpm_cpu_power_down_finish() subsequently returns non-zero for the + * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the * specified cpu. Until then, other CPUs should make sure they do not * trash memory the target CPU might be executing/accessing. */ void mcpm_cpu_power_down(void); /** - * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and + * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and * make sure it is powered off * * @cpu: CPU number within given cluster @@ -127,7 +127,7 @@ void mcpm_cpu_power_down(void); * - zero if the CPU is in a safely parked state * - nonzero otherwise (e.g., timeout) */ -int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster); +int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); /** * mcpm_cpu_suspend - bring the calling CPU in a suspended state @@ -171,7 +171,7 @@ int mcpm_cpu_powered_up(void); struct mcpm_platform_ops { int (*power_up)(unsigned int cpu, unsigned int cluster); void (*power_down)(void); - int (*power_down_finish)(unsigned int cpu, unsigned int cluster); + int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); void (*suspend)(u64); void (*powered_up)(void); }; diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h index c2f5102ae659..bf47a6c110a2 100644 --- a/arch/arm/include/asm/memblock.h +++ b/arch/arm/include/asm/memblock.h @@ -1,10 +1,9 @@ #ifndef _ASM_ARM_MEMBLOCK_H #define _ASM_ARM_MEMBLOCK_H -struct meminfo; struct machine_desc; -void arm_memblock_init(struct meminfo *, const struct machine_desc *); +void arm_memblock_init(const struct machine_desc *); phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align); #endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 02fa2558f662..2b751464d6ff 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -83,8 +83,6 @@ */ #define IOREMAP_MAX_ORDER 24 -#define CONSISTENT_END (0xffe00000UL) - #else /* CONFIG_MMU */ /* diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 8d6a089dfb76..e0adb9f1bf94 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -21,34 +21,6 @@ #define __tagtable(tag, fn) \ static const struct tagtable __tagtable_##fn __tag = { tag, fn } -/* - * Memory map description - */ -#define NR_BANKS CONFIG_ARM_NR_BANKS - -struct membank { - phys_addr_t start; - phys_addr_t size; - unsigned int highmem; -}; - -struct meminfo { - int nr_banks; - struct membank bank[NR_BANKS]; -}; - -extern struct meminfo meminfo; - -#define for_each_bank(iter,mi) \ - for (iter = 0; iter < (mi)->nr_banks; iter++) - -#define bank_pfn_start(bank) __phys_to_pfn((bank)->start) -#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) -#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT) -#define bank_phys_start(bank) (bank)->start -#define bank_phys_end(bank) ((bank)->start + (bank)->size) -#define bank_phys_size(bank) (bank)->size - extern int arm_add_memory(u64 start, u64 size); extern void early_print(const char *str, ...); extern void dump_machine_table(void); diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 12c3a5decc60..75d95799b6e6 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long); #define __put_user_check(x,p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ + const typeof(*(p)) __user *__tmp_p = (p); \ register const typeof(*(p)) __r2 asm("r2") = (x); \ - register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 040619c32d68..38ddd9f83d0e 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o +obj-$(CONFIG_HIBERNATION) += hibernate.o obj-$(CONFIG_SMP) += smp.o ifdef CONFIG_MMU obj-$(CONFIG_SMP) += smp_tlb.o diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index 8c14de8180c0..7807ef58a2ab 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -22,6 +22,7 @@ #include <linux/fs.h> #include <linux/root_dev.h> #include <linux/screen_info.h> +#include <linux/memblock.h> #include <asm/setup.h> #include <asm/system_info.h> @@ -222,10 +223,10 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) } if (mdesc->fixup) - mdesc->fixup(tags, &from, &meminfo); + mdesc->fixup(tags, &from); if (tags->hdr.tag == ATAG_CORE) { - if (meminfo.nr_banks != 0) + if (memblock_phys_mem_size()) squash_mem_tags(tags); save_atags(tags); parse_tags(tags); diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index c7419a585ddc..679a83d470cc 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -27,11 +27,6 @@ #include <asm/mach/arch.h> #include <asm/mach-types.h> -void __init early_init_dt_add_memory_arch(u64 base, u64 size) -{ - arm_add_memory(base, size); -} - void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { return memblock_virt_alloc(size, align); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 1879e8dd2acc..52a949a8077d 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -344,7 +344,7 @@ ENDPROC(__pabt_svc) @ @ Enable the alignment trap while in kernel mode @ - alignment_trap r0 + alignment_trap r0, .LCcralign @ @ Clear FP to mark the first stack frame @@ -413,6 +413,11 @@ __und_usr: @ adr r9, BSYM(ret_from_exception) + @ IRQs must be enabled before attempting to read the instruction from + @ user space since that could cause a page/translation fault if the + @ page table was modified by another CPU. + enable_irq + tst r3, #PSR_T_BIT @ Thumb mode? bne __und_usr_thumb sub r4, r2, #4 @ ARM instr at LR - 4 @@ -484,7 +489,8 @@ ENDPROC(__und_usr) */ .pushsection .fixup, "ax" .align 2 -4: mov pc, r9 +4: str r4, [sp, #S_PC] @ retry current instruction + mov pc, r9 .popsection .pushsection __ex_table,"a" .long 1b, 4b @@ -517,7 +523,7 @@ ENDPROC(__und_usr) * r9 = normal "successful" return address * r10 = this threads thread_info structure * lr = unrecognised instruction return address - * IRQs disabled, FIQs enabled. + * IRQs enabled, FIQs enabled. */ @ @ Fall-through from Thumb-2 __und_usr @@ -624,7 +630,6 @@ call_fpe: #endif do_fpe: - enable_irq ldr r4, .LCfp add r10, r10, #TI_FPSTATE @ r10 = workspace ldr pc, [r4] @ Call FP module USR entry point @@ -652,8 +657,7 @@ __und_usr_fault_32: b 1f __und_usr_fault_16: mov r1, #2 -1: enable_irq - mov r0, sp +1: mov r0, sp adr lr, BSYM(ret_from_exception) b __und_fault ENDPROC(__und_usr_fault_32) @@ -1143,11 +1147,8 @@ __vectors_start: .data .globl cr_alignment - .globl cr_no_alignment cr_alignment: .space 4 -cr_no_alignment: - .space 4 #ifdef CONFIG_MULTI_IRQ_HANDLER .globl handle_arch_irq diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index a2dcafdf1bc8..7139d4a7dea7 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -365,13 +365,7 @@ ENTRY(vector_swi) str r0, [sp, #S_OLD_R0] @ Save OLD_R0 #endif zero_fp - -#ifdef CONFIG_ALIGNMENT_TRAP - ldr ip, __cr_alignment - ldr ip, [ip] - mcr p15, 0, ip, c1, c0 @ update control register -#endif - + alignment_trap ip, __cr_alignment enable_irq ct_user_exit get_thread_info tsk diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 1420725142ca..5d702f8900b1 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -37,9 +37,9 @@ #endif .endm - .macro alignment_trap, rtemp + .macro alignment_trap, rtemp, label #ifdef CONFIG_ALIGNMENT_TRAP - ldr \rtemp, .LCcralign + ldr \rtemp, \label ldr \rtemp, [\rtemp] mcr p15, 0, \rtemp, c1, c0 #endif @@ -132,6 +132,10 @@ orrne r5, V7M_xPSR_FRAMEPTRALIGN biceq r5, V7M_xPSR_FRAMEPTRALIGN + @ ensure bit 0 is cleared in the PC, otherwise behaviour is + @ unpredictable + bic r4, #1 + @ write basic exception frame stmdb r2!, {r1, r3-r5} ldmia sp, {r1, r3-r5} diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index c108ddcb9ba4..af9a8a927a4e 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -14,6 +14,7 @@ #include <linux/ftrace.h> #include <linux/uaccess.h> +#include <linux/module.h> #include <asm/cacheflush.h> #include <asm/opcodes.h> @@ -63,6 +64,18 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) } #endif +int ftrace_arch_code_modify_prepare(void) +{ + set_all_modules_text_rw(); + return 0; +} + +int ftrace_arch_code_modify_post_process(void) +{ + set_all_modules_text_ro(); + return 0; +} + static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) { return arm_gen_branch_link(pc, addr); diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index c96ecacb2021..572a38335c96 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -99,8 +99,7 @@ __mmap_switched: str r1, [r5] @ Save machine type str r2, [r6] @ Save atags pointer cmp r7, #0 - bicne r4, r0, #CR_A @ Clear 'A' bit - stmneia r7, {r0, r4} @ Save control register values + strne r0, [r7] @ Save control register values b start_kernel ENDPROC(__mmap_switched) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 591d6e4a6492..2c35f0ff2fdc 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -475,7 +475,7 @@ ENDPROC(__turn_mmu_on) #ifdef CONFIG_SMP_ON_UP - __INIT + __HEAD __fixup_smp: and r3, r9, #0x000f0000 @ architecture version teq r3, #0x000f0000 @ CPU ID supported? diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c new file mode 100644 index 000000000000..bb8b79648643 --- /dev/null +++ b/arch/arm/kernel/hibernate.c @@ -0,0 +1,107 @@ +/* + * Hibernation support specific for ARM + * + * Derived from work on ARM hibernation support by: + * + * Ubuntu project, hibernation support for mach-dove + * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) + * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) + * https://lkml.org/lkml/2010/6/18/4 + * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html + * https://patchwork.kernel.org/patch/96442/ + * + * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/mm.h> +#include <linux/suspend.h> +#include <asm/system_misc.h> +#include <asm/idmap.h> +#include <asm/suspend.h> +#include <asm/memory.h> + +extern const void __nosave_begin, __nosave_end; + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin); + unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1); + + return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn); +} + +void notrace save_processor_state(void) +{ + WARN_ON(num_online_cpus() != 1); + local_fiq_disable(); +} + +void notrace restore_processor_state(void) +{ + local_fiq_enable(); +} + +/* + * Snapshot kernel memory and reset the system. + * + * swsusp_save() is executed in the suspend finisher so that the CPU + * context pointer and memory are part of the saved image, which is + * required by the resume kernel image to restart execution from + * swsusp_arch_suspend(). + * + * soft_restart is not technically needed, but is used to get success + * returned from cpu_suspend. + * + * When soft reboot completes, the hibernation snapshot is written out. + */ +static int notrace arch_save_image(unsigned long unused) +{ + int ret; + + ret = swsusp_save(); + if (ret == 0) + soft_restart(virt_to_phys(cpu_resume)); + return ret; +} + +/* + * Save the current CPU state before suspend / poweroff. + */ +int notrace swsusp_arch_suspend(void) +{ + return cpu_suspend(0, arch_save_image); +} + +/* + * Restore page contents for physical pages that were in use during loading + * hibernation image. Switch to idmap_pgd so the physical page tables + * are overwritten with the same contents. + */ +static void notrace arch_restore_image(void *unused) +{ + struct pbe *pbe; + + cpu_switch_mm(idmap_pgd, &init_mm); + for (pbe = restore_pblist; pbe; pbe = pbe->next) + copy_page(pbe->orig_address, pbe->address); + + soft_restart(virt_to_phys(cpu_resume)); +} + +static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; + +/* + * Resume from the hibernation image. + * Due to the kernel heap / data restore, stack contents change underneath + * and that would make function calls impossible; switch to a temporary + * stack within the nosave region to avoid that problem. + */ +int swsusp_arch_resume(void) +{ + extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); + call_with_stack(arch_restore_image, 0, + resume_stack + ARRAY_SIZE(resume_stack)); + return 0; +} diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index 2452dd1bef53..a5599cfc43cb 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -18,6 +18,7 @@ #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/assembler.h> #if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) #define PJ4(code...) code @@ -65,17 +66,18 @@ * r9 = ret_from_exception * lr = undefined instr exit * - * called from prefetch exception handler with interrupts disabled + * called from prefetch exception handler with interrupts enabled */ ENTRY(iwmmxt_task_enable) + inc_preempt_count r10, r3 XSC(mrc p15, 0, r2, c15, c1, 0) PJ4(mrc p15, 0, r2, c1, c0, 2) @ CP0 and CP1 accessible? XSC(tst r2, #0x3) PJ4(tst r2, #0xf) - movne pc, lr @ if so no business here + bne 4f @ if so no business here @ enable access to CP0 and CP1 XSC(orr r2, r2, #0x3) XSC(mcr p15, 0, r2, c15, c1, 0) @@ -136,7 +138,7 @@ concan_dump: wstrd wR15, [r1, #MMX_WR15] 2: teq r0, #0 @ anything to load? - moveq pc, lr + beq 3f concan_load: @@ -169,8 +171,14 @@ concan_load: @ clear CUP/MUP (only if r1 != 0) teq r1, #0 mov r2, #0 - moveq pc, lr + beq 3f tmcr wCon, r2 + +3: +#ifdef CONFIG_PREEMPT_COUNT + get_thread_info r10 +#endif +4: dec_preempt_count r10, r3 mov pc, lr /* diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 51798d7854ac..a71ae1523620 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -221,6 +221,7 @@ static struct notifier_block cpu_pmu_hotplug_notifier = { * PMU platform driver and devicetree bindings. */ static struct of_device_id cpu_pmu_of_device_ids[] = { + {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f4ef3981ed02..2037f7205987 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1599,6 +1599,13 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) return 0; } +static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) +{ + armv7_a12_pmu_init(cpu_pmu); + cpu_pmu->name = "ARMv7 Cortex-A17"; + return 0; +} + /* * Krait Performance Monitor Region Event Selection Register (PMRESRn) * @@ -2021,6 +2028,11 @@ static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) return -ENODEV; } +static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) +{ + return -ENODEV; +} + static inline int krait_pmu_init(struct arm_pmu *cpu_pmu) { return -ENODEV; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 50e198c1e9c8..8a16ee5d8a95 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -72,6 +72,7 @@ static int __init fpe_setup(char *line) __setup("fpe=", fpe_setup); #endif +extern void init_default_cache_policy(unsigned long); extern void paging_init(const struct machine_desc *desc); extern void early_paging_init(const struct machine_desc *, struct proc_info_list *); @@ -590,7 +591,7 @@ static void __init setup_processor(void) pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", cpu_name, read_cpuid_id(), read_cpuid_id() & 15, - proc_arch[cpu_architecture()], cr_alignment); + proc_arch[cpu_architecture()], get_cr()); snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", list->arch_name, ENDIANNESS); @@ -603,7 +604,9 @@ static void __init setup_processor(void) #ifndef CONFIG_ARM_THUMB elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif - +#ifdef CONFIG_MMU + init_default_cache_policy(list->__cpu_mm_mmu_flags); +#endif erratum_a15_798181_init(); feat_v6_fixup(); @@ -628,15 +631,8 @@ void __init dump_machine_table(void) int __init arm_add_memory(u64 start, u64 size) { - struct membank *bank = &meminfo.bank[meminfo.nr_banks]; u64 aligned_start; - if (meminfo.nr_banks >= NR_BANKS) { - pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n", - (long long)start); - return -EINVAL; - } - /* * Ensure that start/size are aligned to a page boundary. * Size is appropriately rounded down, start is rounded up. @@ -677,17 +673,17 @@ int __init arm_add_memory(u64 start, u64 size) aligned_start = PHYS_OFFSET; } - bank->start = aligned_start; - bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); + start = aligned_start; + size = size & ~(phys_addr_t)(PAGE_SIZE - 1); /* * Check whether this memory region has non-zero size or * invalid node number. */ - if (bank->size == 0) + if (size == 0) return -EINVAL; - meminfo.nr_banks++; + memblock_add(start, size); return 0; } @@ -695,6 +691,7 @@ int __init arm_add_memory(u64 start, u64 size) * Pick out the memory size. We look for mem=size@start, * where start and size are "size[KkMm]" */ + static int __init early_mem(char *p) { static int usermem __initdata = 0; @@ -709,7 +706,8 @@ static int __init early_mem(char *p) */ if (usermem == 0) { usermem = 1; - meminfo.nr_banks = 0; + memblock_remove(memblock_start_of_DRAM(), + memblock_end_of_DRAM() - memblock_start_of_DRAM()); } start = PHYS_OFFSET; @@ -854,13 +852,6 @@ static void __init reserve_crashkernel(void) static inline void reserve_crashkernel(void) {} #endif /* CONFIG_KEXEC */ -static int __init meminfo_cmp(const void *_a, const void *_b) -{ - const struct membank *a = _a, *b = _b; - long cmp = bank_pfn_start(a) - bank_pfn_start(b); - return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; -} - void __init hyp_mode_check(void) { #ifdef CONFIG_ARM_VIRT_EXT @@ -903,12 +894,10 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); - sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); - early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); setup_dma_zone(mdesc); sanity_check_meminfo(); - arm_memblock_init(&meminfo, mdesc); + arm_memblock_init(mdesc); paging_init(mdesc); request_standard_resources(mdesc); diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index b907d9b790ab..1b880db2a033 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -127,6 +127,10 @@ ENDPROC(cpu_resume_after_mmu) .align ENTRY(cpu_resume) ARM_BE8(setend be) @ ensure we are in BE mode +#ifdef CONFIG_ARM_VIRT_EXT + bl __hyp_stub_install_secondary +#endif + safe_svcmode_maskall r1 mov r1, #0 ALT_SMP(mrc p15, 0, r0, c0, c0, 5) ALT_UP_B(1f) @@ -144,7 +148,6 @@ ARM_BE8(setend be) @ ensure we are in BE mode ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] ldr r0, [r0, r1, lsl #2] - setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off @ load phys pgd, stack, resume fn ARM( ldmia r0!, {r1, sp, pc} ) THUMB( ldmia r0!, {r1, r2, r3} ) diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index af4e8c8a5422..f065eb05d254 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -3,6 +3,7 @@ #include <linux/stacktrace.h> #include <asm/stacktrace.h> +#include <asm/traps.h> #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) /* @@ -61,6 +62,7 @@ EXPORT_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE struct stack_trace_data { struct stack_trace *trace; + unsigned long last_pc; unsigned int no_sched_functions; unsigned int skip; }; @@ -69,6 +71,7 @@ static int save_trace(struct stackframe *frame, void *d) { struct stack_trace_data *data = d; struct stack_trace *trace = data->trace; + struct pt_regs *regs; unsigned long addr = frame->pc; if (data->no_sched_functions && in_sched_functions(addr)) @@ -80,16 +83,39 @@ static int save_trace(struct stackframe *frame, void *d) trace->entries[trace->nr_entries++] = addr; + if (trace->nr_entries >= trace->max_entries) + return 1; + + /* + * in_exception_text() is designed to test if the PC is one of + * the functions which has an exception stack above it, but + * unfortunately what is in frame->pc is the return LR value, + * not the saved PC value. So, we need to track the previous + * frame PC value when doing this. + */ + addr = data->last_pc; + data->last_pc = frame->pc; + if (!in_exception_text(addr)) + return 0; + + regs = (struct pt_regs *)frame->sp; + + trace->entries[trace->nr_entries++] = regs->ARM_pc; + return trace->nr_entries >= trace->max_entries; } -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +/* This must be noinline to so that our skip calculation works correctly */ +static noinline void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; + data.last_pc = ULONG_MAX; data.skip = trace->skip; + data.no_sched_functions = nosched; if (tsk != current) { #ifdef CONFIG_SMP @@ -102,7 +128,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else - data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; /* recovered from the stack */ @@ -111,11 +136,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) } else { register unsigned long current_sp asm ("sp"); - data.no_sched_functions = 0; + /* We don't want this function nor the caller */ + data.skip += 2; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)save_stack_trace_tsk; + frame.pc = (unsigned long)__save_stack_trace; } walk_stackframe(&frame, save_trace, &data); @@ -123,9 +149,33 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; } +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + struct stack_trace_data data; + struct stackframe frame; + + data.trace = trace; + data.skip = trace->skip; + data.no_sched_functions = 0; + + frame.fp = regs->ARM_fp; + frame.sp = regs->ARM_sp; + frame.lr = regs->ARM_lr; + frame.pc = regs->ARM_pc; + + walk_stackframe(&frame, save_trace, &data); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} + void save_stack_trace(struct stack_trace *trace) { - save_stack_trace_tsk(current, trace); + __save_stack_trace(current, trace, 0); } EXPORT_SYMBOL_GPL(save_stack_trace); #endif diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 0bc94b1fd1ae..0fa8825cea04 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -91,13 +91,13 @@ static void __init parse_dt_topology(void) { const struct cpu_efficiency *cpu_eff; struct device_node *cn = NULL; - unsigned long min_capacity = (unsigned long)(-1); + unsigned long min_capacity = ULONG_MAX; unsigned long max_capacity = 0; unsigned long capacity = 0; - int alloc_size, cpu = 0; + int cpu = 0; - alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity); - __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); + __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), + GFP_NOWAIT); for_each_possible_cpu(cpu) { const u32 *rate; diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 3c217694ebec..cb791ac6a003 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -285,7 +285,7 @@ static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl, if (unwind_pop_register(ctrl, &vsp, reg)) return -URC_FAILURE; - if (insn & 0x80) + if (insn & 0x8) if (unwind_pop_register(ctrl, &vsp, 14)) return -URC_FAILURE; diff --git a/arch/arm/kernel/uprobes.c b/arch/arm/kernel/uprobes.c index f9bacee973bf..56adf9c1fde0 100644 --- a/arch/arm/kernel/uprobes.c +++ b/arch/arm/kernel/uprobes.c @@ -113,6 +113,26 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, return 0; } +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + void *xol_page_kaddr = kmap_atomic(page); + void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK); + + preempt_disable(); + + /* Initialize the slot */ + memcpy(dst, src, len); + + /* flush caches (dcache/icache) */ + flush_uprobe_xol_access(page, vaddr, dst, len); + + preempt_enable(); + + kunmap_atomic(xol_page_kaddr); +} + + int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; diff --git a/arch/arm/mach-clps711x/board-clep7312.c b/arch/arm/mach-clps711x/board-clep7312.c index 221b9de32dd6..94a7add88a3f 100644 --- a/arch/arm/mach-clps711x/board-clep7312.c +++ b/arch/arm/mach-clps711x/board-clep7312.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/string.h> +#include <linux/memblock.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -26,11 +27,9 @@ #include "common.h" static void __init -fixup_clep7312(struct tag *tags, char **cmdline, struct meminfo *mi) +fixup_clep7312(struct tag *tags, char **cmdline) { - mi->nr_banks=1; - mi->bank[0].start = 0xc0000000; - mi->bank[0].size = 0x01000000; + memblock_add(0xc0000000, 0x01000000); } MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312") diff --git a/arch/arm/mach-clps711x/board-edb7211.c b/arch/arm/mach-clps711x/board-edb7211.c index 077609841f14..f9828f89972a 100644 --- a/arch/arm/mach-clps711x/board-edb7211.c +++ b/arch/arm/mach-clps711x/board-edb7211.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/backlight.h> #include <linux/platform_device.h> +#include <linux/memblock.h> #include <linux/mtd/physmap.h> #include <linux/mtd/partitions.h> @@ -133,7 +134,7 @@ static void __init edb7211_reserve(void) } static void __init -fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi) +fixup_edb7211(struct tag *tags, char **cmdline) { /* * Bank start addresses are not present in the information @@ -143,11 +144,8 @@ fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi) * Banks sizes _are_ present in the param block, but we're * not using that information yet. */ - mi->bank[0].start = 0xc0000000; - mi->bank[0].size = SZ_8M; - mi->bank[1].start = 0xc1000000; - mi->bank[1].size = SZ_8M; - mi->nr_banks = 2; + memblock_add(0xc0000000, SZ_8M); + memblock_add(0xc1000000, SZ_8M); } static void __init edb7211_init(void) diff --git a/arch/arm/mach-clps711x/board-p720t.c b/arch/arm/mach-clps711x/board-p720t.c index 67b733744ed7..0cf0e51e6546 100644 --- a/arch/arm/mach-clps711x/board-p720t.c +++ b/arch/arm/mach-clps711x/board-p720t.c @@ -295,7 +295,7 @@ static struct generic_bl_info p720t_lcd_backlight_pdata = { }; static void __init -fixup_p720t(struct tag *tag, char **cmdline, struct meminfo *mi) +fixup_p720t(struct tag *tag, char **cmdline) { /* * Our bootloader doesn't setup any tags (yet). diff --git a/arch/arm/mach-ep93xx/crunch-bits.S b/arch/arm/mach-ep93xx/crunch-bits.S index 0ec9bb48fab9..e96923a3017b 100644 --- a/arch/arm/mach-ep93xx/crunch-bits.S +++ b/arch/arm/mach-ep93xx/crunch-bits.S @@ -16,6 +16,7 @@ #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/assembler.h> #include <mach/ep93xx-regs.h> /* @@ -62,14 +63,16 @@ * r9 = ret_from_exception * lr = undefined instr exit * - * called from prefetch exception handler with interrupts disabled + * called from prefetch exception handler with interrupts enabled */ ENTRY(crunch_task_enable) + inc_preempt_count r10, r3 + ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr ldr r1, [r8, #0x80] tst r1, #0x00800000 @ access to crunch enabled? - movne pc, lr @ if so no business here + bne 2f @ if so no business here mov r3, #0xaa @ unlock syscon swlock str r3, [r8, #0xc0] orr r1, r1, #0x00800000 @ enable access to crunch @@ -142,7 +145,7 @@ crunch_save: teq r0, #0 @ anything to load? cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered - moveq pc, lr + beq 1f crunch_load: cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word @@ -190,6 +193,11 @@ crunch_load: cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] +1: +#ifdef CONFIG_PREEMPT_COUNT + get_thread_info r10 +#endif +2: dec_preempt_count r10, r3 mov pc, lr /* diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c index da0415094856..8f05489671b7 100644 --- a/arch/arm/mach-footbridge/cats-hw.c +++ b/arch/arm/mach-footbridge/cats-hw.c @@ -76,7 +76,7 @@ __initcall(cats_hw_init); * hard reboots fail on early boards. */ static void __init -fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi) +fixup_cats(struct tag *tags, char **cmdline) { #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) screen_info.orig_video_lines = 25; diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index eb1fa5c84723..cdee08c6d239 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c @@ -620,7 +620,7 @@ __initcall(nw_hw_init); * the parameter page. */ static void __init -fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi) +fixup_netwinder(struct tag *tags, char **cmdline) { #ifdef CONFIG_ISAPNP extern int isapnp_disable; diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index a77529887cbc..61bfe584a9d7 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c @@ -83,11 +83,6 @@ static void __init halibut_init(void) platform_add_devices(devices, ARRAY_SIZE(devices)); } -static void __init halibut_fixup(struct tag *tags, char **cmdline, - struct meminfo *mi) -{ -} - static void __init halibut_map_io(void) { msm_map_common_io(); @@ -100,7 +95,6 @@ static void __init halibut_init_late(void) MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)") .atag_offset = 0x100, - .fixup = halibut_fixup, .map_io = halibut_map_io, .init_early = halibut_init_early, .init_irq = halibut_init_irq, diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c index 7d9981cb400e..873c3ca3cd7e 100644 --- a/arch/arm/mach-msm/board-mahimahi.c +++ b/arch/arm/mach-msm/board-mahimahi.c @@ -22,6 +22,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/platform_device.h> +#include <linux/memblock.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -52,16 +53,10 @@ static void __init mahimahi_init(void) platform_add_devices(devices, ARRAY_SIZE(devices)); } -static void __init mahimahi_fixup(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init mahimahi_fixup(struct tag *tags, char **cmdline) { - mi->nr_banks = 2; - mi->bank[0].start = PHYS_OFFSET; - mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET); - mi->bank[0].size = (219*1024*1024); - mi->bank[1].start = MSM_HIGHMEM_BASE; - mi->bank[1].node = PHYS_TO_NID(MSM_HIGHMEM_BASE); - mi->bank[1].size = MSM_HIGHMEM_SIZE; + memblock_add(PHYS_OFFSET, 219*SZ_1M); + memblock_add(MSM_HIGHMEM_BASE, MSM_HIGHMEM_SIZE); } static void __init mahimahi_map_io(void) diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c index 46de789ad3ae..b621b23a5ecc 100644 --- a/arch/arm/mach-msm/board-msm7x30.c +++ b/arch/arm/mach-msm/board-msm7x30.c @@ -40,8 +40,7 @@ #include "proc_comm.h" #include "common.h" -static void __init msm7x30_fixup(struct tag *tag, char **cmdline, - struct meminfo *mi) +static void __init msm7x30_fixup(struct tag *tag, char **cmdline) { for (; tag->hdr.size; tag = tag_next(tag)) if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) { diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c index 327605174d63..e50967926dcd 100644 --- a/arch/arm/mach-msm/board-sapphire.c +++ b/arch/arm/mach-msm/board-sapphire.c @@ -35,6 +35,7 @@ #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> +#include <linux/memblock.h> #include "gpio_chip.h" #include "board-sapphire.h" @@ -74,22 +75,18 @@ static struct map_desc sapphire_io_desc[] __initdata = { } }; -static void __init sapphire_fixup(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init sapphire_fixup(struct tag *tags, char **cmdline) { int smi_sz = parse_tag_smi((const struct tag *)tags); - mi->nr_banks = 1; - mi->bank[0].start = PHYS_OFFSET; - mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET); if (smi_sz == 32) { - mi->bank[0].size = (84*1024*1024); + memblock_add(PHYS_OFFSET, 84*SZ_1M); } else if (smi_sz == 64) { - mi->bank[0].size = (101*1024*1024); + memblock_add(PHYS_OFFSET, 101*SZ_1M); } else { + memblock_add(PHYS_OFFSET, 101*SZ_1M); /* Give a default value when not get smi size */ smi_sz = 64; - mi->bank[0].size = (101*1024*1024); } } diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c index 015d544aa017..58826cfab6b0 100644 --- a/arch/arm/mach-msm/board-trout.c +++ b/arch/arm/mach-msm/board-trout.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clkdev.h> +#include <linux/memblock.h> #include <asm/system_info.h> #include <asm/mach-types.h> @@ -55,12 +56,9 @@ static void __init trout_init_irq(void) msm_init_irq(); } -static void __init trout_fixup(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init trout_fixup(struct tag *tags, char **cmdline) { - mi->nr_banks = 1; - mi->bank[0].start = PHYS_OFFSET; - mi->bank[0].size = (101*1024*1024); + memblock_add(PHYS_OFFSET, 101*SZ_1M); } static void __init trout_init(void) diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 3f1de1111e0f..6bbb7b55c6d1 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -365,8 +365,7 @@ void orion5x_restart(enum reboot_mode mode, const char *cmd) * Many orion-based systems have buggy bootloader implementations. * This is a common fixup for bogus memory tags. */ -void __init tag_fixup_mem32(struct tag *t, char **from, - struct meminfo *meminfo) +void __init tag_fixup_mem32(struct tag *t, char **from) { for (; t->hdr.size; t = tag_next(t)) if (t->hdr.tag == ATAG_MEM && diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h index 7548db2bfb8a..ca3803017c59 100644 --- a/arch/arm/mach-orion5x/common.h +++ b/arch/arm/mach-orion5x/common.h @@ -71,9 +71,8 @@ void edmini_v2_init(void); static inline void edmini_v2_init(void) {}; #endif -struct meminfo; struct tag; -extern void __init tag_fixup_mem32(struct tag *, char **, struct meminfo *); +extern void __init tag_fixup_mem32(struct tag *, char **); /***************************************************************************** * Helpers to access Orion registers diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index 584439bfa59f..4d3588d26c2a 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c @@ -837,8 +837,7 @@ static void __init cm_x300_init(void) cm_x300_init_bl(); } -static void __init cm_x300_fixup(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init cm_x300_fixup(struct tag *tags, char **cmdline) { /* Make sure that mi->bank[0].start = PHYS_ADDR */ for (; tags->hdr.size; tags = tag_next(tags)) diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index 57d60542f982..91dd1c7cdbcd 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -34,6 +34,7 @@ #include <linux/input/matrix_keypad.h> #include <linux/gpio_keys.h> #include <linux/module.h> +#include <linux/memblock.h> #include <video/w100fb.h> #include <asm/setup.h> @@ -753,16 +754,13 @@ static void __init corgi_init(void) platform_add_devices(devices, ARRAY_SIZE(devices)); } -static void __init fixup_corgi(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init fixup_corgi(struct tag *tags, char **cmdline) { sharpsl_save_param(); - mi->nr_banks=1; - mi->bank[0].start = 0xa0000000; if (machine_is_corgi()) - mi->bank[0].size = (32*1024*1024); + memblock_add(0xa0000000, SZ_32M); else - mi->bank[0].size = (64*1024*1024); + memblock_add(0xa0000000, SZ_64M); } #ifdef CONFIG_MACH_CORGI diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c index 8280ebcaab9f..cfb864173ce3 100644 --- a/arch/arm/mach-pxa/eseries.c +++ b/arch/arm/mach-pxa/eseries.c @@ -21,6 +21,7 @@ #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/usb/gpio_vbus.h> +#include <linux/memblock.h> #include <video/w100fb.h> @@ -41,14 +42,12 @@ #include "clock.h" /* Only e800 has 128MB RAM */ -void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) +void __init eseries_fixup(struct tag *tags, char **cmdline) { - mi->nr_banks=1; - mi->bank[0].start = 0xa0000000; if (machine_is_e800()) - mi->bank[0].size = (128*1024*1024); + memblock_add(0xa0000000, SZ_128M); else - mi->bank[0].size = (64*1024*1024); + memblock_add(0xa0000000, SZ_64M); } struct gpio_vbus_mach_info e7xx_udc_info = { diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index aedf053a1de5..131991629116 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c @@ -29,6 +29,7 @@ #include <linux/spi/ads7846.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/mtd/sharpsl.h> +#include <linux/memblock.h> #include <mach/hardware.h> #include <asm/mach-types.h> @@ -456,13 +457,10 @@ static void __init poodle_init(void) poodle_init_spi(); } -static void __init fixup_poodle(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init fixup_poodle(struct tag *tags, char **cmdline) { sharpsl_save_param(); - mi->nr_banks=1; - mi->bank[0].start = 0xa0000000; - mi->bank[0].size = (32*1024*1024); + memblock_add(0xa0000000, SZ_32M); } MACHINE_START(POODLE, "SHARP Poodle") diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 0b11c1af51c4..840c3a48e720 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -32,6 +32,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/reboot.h> +#include <linux/memblock.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -971,13 +972,10 @@ static void __init spitz_init(void) spitz_i2c_init(); } -static void __init spitz_fixup(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init spitz_fixup(struct tag *tags, char **cmdline) { sharpsl_save_param(); - mi->nr_banks = 1; - mi->bank[0].start = 0xa0000000; - mi->bank[0].size = (64*1024*1024); + memblock_add(0xa0000000, SZ_64M); } #ifdef CONFIG_MACH_SPITZ diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index ef5557b807ed..c158a6e3e0aa 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -37,6 +37,7 @@ #include <linux/i2c/pxa-i2c.h> #include <linux/usb/gpio_vbus.h> #include <linux/reboot.h> +#include <linux/memblock.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -960,13 +961,10 @@ static void __init tosa_init(void) platform_add_devices(devices, ARRAY_SIZE(devices)); } -static void __init fixup_tosa(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init fixup_tosa(struct tag *tags, char **cmdline) { sharpsl_save_param(); - mi->nr_banks=1; - mi->bank[0].start = 0xa0000000; - mi->bank[0].size = (64*1024*1024); + memblock_add(0xa0000000, SZ_64M); } MACHINE_START(TOSA, "SHARP Tosa") diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 1d5ee5c9a1dc..c2fae3a5aad8 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -31,6 +31,7 @@ #include <linux/amba/mmci.h> #include <linux/gfp.h> #include <linux/mtd/physmap.h> +#include <linux/memblock.h> #include <mach/hardware.h> #include <asm/irq.h> @@ -370,19 +371,15 @@ void __init realview_timer_init(unsigned int timer_irq) /* * Setup the memory banks. */ -void realview_fixup(struct tag *tags, char **from, struct meminfo *meminfo) +void realview_fixup(struct tag *tags, char **from) { /* * Most RealView platforms have 512MB contiguous RAM at 0x70000000. * Half of this is mirrored at 0. */ #ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET - meminfo->bank[0].start = 0x70000000; - meminfo->bank[0].size = SZ_512M; - meminfo->nr_banks = 1; + memblock_add(0x70000000, SZ_512M); #else - meminfo->bank[0].start = 0; - meminfo->bank[0].size = SZ_256M; - meminfo->nr_banks = 1; + memblock_add(0, SZ_256M); #endif } diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h index 602ca5ec52c5..844946da3c66 100644 --- a/arch/arm/mach-realview/core.h +++ b/arch/arm/mach-realview/core.h @@ -51,8 +51,7 @@ extern int realview_flash_register(struct resource *res, u32 num); extern int realview_eth_register(const char *name, struct resource *res); extern int realview_usb_register(struct resource *res); extern void realview_init_early(void); -extern void realview_fixup(struct tag *tags, char **from, - struct meminfo *meminfo); +extern void realview_fixup(struct tag *tags, char **from); extern struct smp_operations realview_smp_ops; extern void realview_cpu_die(unsigned int cpu); diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index e3bddb5ab10f..aad9c5a40d47 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c @@ -32,6 +32,7 @@ #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> #include <linux/reboot.h> +#include <linux/memblock.h> #include <mach/hardware.h> #include <asm/irq.h> @@ -339,15 +340,12 @@ static void realview_pb1176_restart(enum reboot_mode mode, const char *cmd) dsb(); } -static void realview_pb1176_fixup(struct tag *tags, char **from, - struct meminfo *meminfo) +static void realview_pb1176_fixup(struct tag *tags, char **from) { /* * RealView PB1176 only has 128MB of RAM mapped at 0. */ - meminfo->bank[0].start = 0; - meminfo->bank[0].size = SZ_128M; - meminfo->nr_banks = 1; + memblock_add(0, SZ_128M); } static void __init realview_pb1176_init(void) diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index f0cfd7e7e569..535697abfd91 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c @@ -29,6 +29,7 @@ #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> #include <linux/reboot.h> +#include <linux/memblock.h> #include <asm/irq.h> #include <asm/mach-types.h> @@ -325,23 +326,19 @@ static void __init realview_pbx_timer_init(void) realview_pbx_twd_init(); } -static void realview_pbx_fixup(struct tag *tags, char **from, - struct meminfo *meminfo) +static void realview_pbx_fixup(struct tag *tags, char **from) { #ifdef CONFIG_SPARSEMEM /* * Memory configuration with SPARSEMEM enabled on RealView PBX (see * asm/mach/memory.h for more information). */ - meminfo->bank[0].start = 0; - meminfo->bank[0].size = SZ_256M; - meminfo->bank[1].start = 0x20000000; - meminfo->bank[1].size = SZ_512M; - meminfo->bank[2].start = 0x80000000; - meminfo->bank[2].size = SZ_256M; - meminfo->nr_banks = 3; + + memblock_add(0, SZ_256M); + memblock_add(0x20000000, SZ_512M); + memblock_add(0x80000000, SZ_256M); #else - realview_fixup(tags, from, meminfo); + realview_fixup(tags, from); #endif } diff --git a/arch/arm/mach-s3c24xx/mach-smdk2413.c b/arch/arm/mach-s3c24xx/mach-smdk2413.c index 233fe52d2015..a03c855ee854 100644 --- a/arch/arm/mach-s3c24xx/mach-smdk2413.c +++ b/arch/arm/mach-s3c24xx/mach-smdk2413.c @@ -22,6 +22,7 @@ #include <linux/serial_s3c.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/memblock.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -93,13 +94,10 @@ static struct platform_device *smdk2413_devices[] __initdata = { &s3c2412_device_dma, }; -static void __init smdk2413_fixup(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init smdk2413_fixup(struct tag *tags, char **cmdline) { if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { - mi->nr_banks=1; - mi->bank[0].start = 0x30000000; - mi->bank[0].size = SZ_64M; + memblock_add(0x30000000, SZ_64M); } } diff --git a/arch/arm/mach-s3c24xx/mach-vstms.c b/arch/arm/mach-s3c24xx/mach-vstms.c index 40868c0e0a68..a79af7843aed 100644 --- a/arch/arm/mach-s3c24xx/mach-vstms.c +++ b/arch/arm/mach-s3c24xx/mach-vstms.c @@ -23,6 +23,7 @@ #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> +#include <linux/memblock.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -129,13 +130,10 @@ static struct platform_device *vstms_devices[] __initdata = { &s3c2412_device_dma, }; -static void __init vstms_fixup(struct tag *tags, char **cmdline, - struct meminfo *mi) +static void __init vstms_fixup(struct tag *tags, char **cmdline) { if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { - mi->nr_banks=1; - mi->bank[0].start = 0x30000000; - mi->bank[0].size = SZ_64M; + memblock_add(0x30000000, SZ_64M); } } diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 8443a27bca2f..7dd894ece9ae 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -531,7 +531,7 @@ static void __init get_assabet_scr(void) } static void __init -fixup_assabet(struct tag *tags, char **cmdline, struct meminfo *mi) +fixup_assabet(struct tag *tags, char **cmdline) { /* This must be done before any call to machine_has_neponset() */ map_sa1100_gpio_regs(); diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index 29e7785a54bc..b743a0ae02ce 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c @@ -209,7 +209,7 @@ static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) #define POLL_MSEC 10 #define TIMEOUT_MSEC 1000 -static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster) +static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) { unsigned tries; @@ -290,7 +290,7 @@ static void tc2_pm_powered_up(void) static const struct mcpm_platform_ops tc2_pm_power_ops = { .power_up = tc2_pm_power_up, .power_down = tc2_pm_power_down, - .power_down_finish = tc2_pm_power_down_finish, + .wait_for_powerdown = tc2_pm_wait_for_powerdown, .suspend = tc2_pm_suspend, .powered_up = tc2_pm_powered_up, }; diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 924036473b16..b8cb1a2688a0 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -28,6 +28,7 @@ #include <asm/opcodes.h> #include "fault.h" +#include "mm.h" /* * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998 @@ -81,6 +82,7 @@ static unsigned long ai_word; static unsigned long ai_dword; static unsigned long ai_multi; static int ai_usermode; +static unsigned long cr_no_alignment; core_param(alignment, ai_usermode, int, 0600); @@ -91,7 +93,7 @@ core_param(alignment, ai_usermode, int, 0600); /* Return true if and only if the ARMv6 unaligned access model is in use. */ static bool cpu_is_v6_unaligned(void) { - return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U); + return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U; } static int safe_usermode(int new_usermode, bool warn) @@ -949,6 +951,13 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) return 0; } +static int __init noalign_setup(char *__unused) +{ + set_cr(__clear_cr(CR_A)); + return 1; +} +__setup("noalign", noalign_setup); + /* * This needs to be done after sysctl_init, otherwise sys/ will be * overwritten. Actually, this shouldn't be in sys/ at all since @@ -966,14 +975,12 @@ static int __init alignment_init(void) return -ENOMEM; #endif -#ifdef CONFIG_CPU_CP15 if (cpu_is_v6_unaligned()) { - cr_alignment &= ~CR_A; - cr_no_alignment &= ~CR_A; - set_cr(cr_alignment); + set_cr(__clear_cr(CR_A)); ai_usermode = safe_usermode(ai_usermode, false); } -#endif + + cr_no_alignment = get_cr() & ~CR_A; hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, "alignment exception"); diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 778bcf88ee79..615c99e38ba1 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -59,7 +59,7 @@ ENTRY(v7_invalidate_l1) bgt 2b cmp r2, #0 bgt 1b - dsb + dsb st isb mov pc, lr ENDPROC(v7_invalidate_l1) @@ -166,7 +166,7 @@ skip: finished: mov r10, #0 @ swith back to cache level 0 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr - dsb + dsb st isb mov pc, lr ENDPROC(v7_flush_dcache_all) @@ -335,7 +335,7 @@ ENTRY(v7_flush_kern_dcache_area) add r0, r0, r2 cmp r0, r1 blo 1b - dsb + dsb st mov pc, lr ENDPROC(v7_flush_kern_dcache_area) @@ -368,7 +368,7 @@ v7_dma_inv_range: add r0, r0, r2 cmp r0, r1 blo 1b - dsb + dsb st mov pc, lr ENDPROC(v7_dma_inv_range) @@ -390,7 +390,7 @@ v7_dma_clean_range: add r0, r0, r2 cmp r0, r1 blo 1b - dsb + dsb st mov pc, lr ENDPROC(v7_dma_clean_range) @@ -412,7 +412,7 @@ ENTRY(v7_dma_flush_range) add r0, r0, r2 cmp r0, r1 blo 1b - dsb + dsb st mov pc, lr ENDPROC(v7_dma_flush_range) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6b00be1f971e..b05e08c4734c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -904,11 +904,12 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, unsigned long paddr = page_to_phys(page) + off; /* FIXME: non-speculating: not required */ - /* don't bother invalidating if DMA to device */ - if (dir != DMA_TO_DEVICE) + /* in any case, don't bother invalidating if DMA to device */ + if (dir != DMA_TO_DEVICE) { outer_inv_range(paddr, paddr + size); - dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + } /* * Mark the D-cache clean for these pages to avoid extra flushing. diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 3387e60e4ea3..43d54f5b26b9 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -104,17 +104,20 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig #define flush_icache_alias(pfn,vaddr,len) do { } while (0) #endif +#define FLAG_PA_IS_EXEC 1 +#define FLAG_PA_CORE_IN_MM 2 + static void flush_ptrace_access_other(void *args) { __flush_icache_all(); } -static -void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, unsigned long len) +static inline +void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr, + unsigned long len, unsigned int flags) { if (cache_is_vivt()) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + if (flags & FLAG_PA_CORE_IN_MM) { unsigned long addr = (unsigned long)kaddr; __cpuc_coherent_kern_range(addr, addr + len); } @@ -128,7 +131,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing D-cache */ - if (vma->vm_flags & VM_EXEC) { + if (flags & FLAG_PA_IS_EXEC) { unsigned long addr = (unsigned long)kaddr; if (icache_is_vipt_aliasing()) flush_icache_alias(page_to_pfn(page), uaddr, len); @@ -140,6 +143,26 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } } +static +void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, unsigned long len) +{ + unsigned int flags = 0; + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) + flags |= FLAG_PA_CORE_IN_MM; + if (vma->vm_flags & VM_EXEC) + flags |= FLAG_PA_IS_EXEC; + __flush_ptrace_access(page, uaddr, kaddr, len, flags); +} + +void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, + void *kaddr, unsigned long len) +{ + unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC; + + __flush_ptrace_access(page, uaddr, kaddr, len, flags); +} + /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 21b9e1bf9b77..45aeaaca9052 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -18,6 +18,21 @@ #include <asm/tlbflush.h> #include "mm.h" +pte_t *fixmap_page_table; + +static inline void set_fixmap_pte(int idx, pte_t pte) +{ + unsigned long vaddr = __fix_to_virt(idx); + set_pte_ext(fixmap_page_table + idx, pte, 0); + local_flush_tlb_kernel_page(vaddr); +} + +static inline pte_t get_fixmap_pte(unsigned long vaddr) +{ + unsigned long idx = __virt_to_fix(vaddr); + return *(fixmap_page_table + idx); +} + void *kmap(struct page *page) { might_sleep(); @@ -63,20 +78,20 @@ void *kmap_atomic(struct page *page) type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM /* * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ - BUG_ON(!pte_none(get_top_pte(vaddr))); + BUG_ON(!pte_none(*(fixmap_page_table + idx))); #endif /* * When debugging is off, kunmap_atomic leaves the previous mapping * in place, so the contained TLB flush ensures the TLB is updated * with the new mapping. */ - set_top_pte(vaddr, mk_pte(page, kmap_prot)); + set_fixmap_pte(idx, mk_pte(page, kmap_prot)); return (void *)vaddr; } @@ -94,8 +109,8 @@ void __kunmap_atomic(void *kvaddr) if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); - set_top_pte(vaddr, __pte(0)); + BUG_ON(vaddr != __fix_to_virt(idx)); + set_fixmap_pte(idx, __pte(0)); #else (void) idx; /* to kill a warning */ #endif @@ -117,11 +132,11 @@ void *kmap_atomic_pfn(unsigned long pfn) type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(get_top_pte(vaddr))); + BUG_ON(!pte_none(*(fixmap_page_table + idx))); #endif - set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); + set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); return (void *)vaddr; } @@ -133,5 +148,5 @@ struct page *kmap_atomic_to_page(const void *ptr) if (vaddr < FIXADDR_START) return virt_to_page(ptr); - return pte_page(get_top_pte(vaddr)); + return pte_page(get_fixmap_pte(vaddr)); } diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2a77ba8796ae..5958ac05181e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -23,6 +23,7 @@ #include <linux/dma-contiguous.h> #include <linux/sizes.h> +#include <asm/cp15.h> #include <asm/mach-types.h> #include <asm/memblock.h> #include <asm/prom.h> @@ -36,6 +37,14 @@ #include "mm.h" +#ifdef CONFIG_CPU_CP15_MMU +unsigned long __init __clear_cr(unsigned long mask) +{ + cr_alignment = cr_alignment & ~mask; + return cr_alignment; +} +#endif + static phys_addr_t phys_initrd_start __initdata = 0; static unsigned long phys_initrd_size __initdata = 0; @@ -81,24 +90,21 @@ __tagtable(ATAG_INITRD2, parse_tag_initrd2); * initialization functions, as well as show_mem() for the skipping * of holes in the memory map. It is populated by arm_add_memory(). */ -struct meminfo meminfo; - void show_mem(unsigned int filter) { int free = 0, total = 0, reserved = 0; - int shared = 0, cached = 0, slab = 0, i; - struct meminfo * mi = &meminfo; + int shared = 0, cached = 0, slab = 0; + struct memblock_region *reg; printk("Mem-info:\n"); show_free_areas(filter); - for_each_bank (i, mi) { - struct membank *bank = &mi->bank[i]; + for_each_memblock (memory, reg) { unsigned int pfn1, pfn2; struct page *page, *end; - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); + pfn1 = memblock_region_memory_base_pfn(reg); + pfn2 = memblock_region_memory_end_pfn(reg); page = pfn_to_page(pfn1); end = pfn_to_page(pfn2 - 1) + 1; @@ -115,8 +121,9 @@ void show_mem(unsigned int filter) free++; else shared += page_count(page) - 1; - page++; - } while (page < end); + pfn1++; + page = pfn_to_page(pfn1); + } while (pfn1 < pfn2); } printk("%d pages of RAM\n", total); @@ -130,16 +137,9 @@ void show_mem(unsigned int filter) static void __init find_limits(unsigned long *min, unsigned long *max_low, unsigned long *max_high) { - struct meminfo *mi = &meminfo; - int i; - - /* This assumes the meminfo array is properly sorted */ - *min = bank_pfn_start(&mi->bank[0]); - for_each_bank (i, mi) - if (mi->bank[i].highmem) - break; - *max_low = bank_pfn_end(&mi->bank[i - 1]); - *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); + *max_low = PFN_DOWN(memblock_get_current_limit()); + *min = PFN_UP(memblock_start_of_DRAM()); + *max_high = PFN_DOWN(memblock_end_of_DRAM()); } #ifdef CONFIG_ZONE_DMA @@ -274,14 +274,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) return phys; } -void __init arm_memblock_init(struct meminfo *mi, - const struct machine_desc *mdesc) +void __init arm_memblock_init(const struct machine_desc *mdesc) { - int i; - - for (i = 0; i < mi->nr_banks; i++) - memblock_add(mi->bank[i].start, mi->bank[i].size); - /* Register the kernel text, kernel data and initrd with memblock. */ #ifdef CONFIG_XIP_KERNEL memblock_reserve(__pa(_sdata), _end - _sdata); @@ -413,54 +407,53 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) /* * The mem_map array can get very big. Free the unused area of the memory map. */ -static void __init free_unused_memmap(struct meminfo *mi) +static void __init free_unused_memmap(void) { - unsigned long bank_start, prev_bank_end = 0; - unsigned int i; + unsigned long start, prev_end = 0; + struct memblock_region *reg; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ - for_each_bank(i, mi) { - struct membank *bank = &mi->bank[i]; - - bank_start = bank_pfn_start(bank); + for_each_memblock(memory, reg) { + start = memblock_region_memory_base_pfn(reg); #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist * due to SPARSEMEM sections which aren't present. */ - bank_start = min(bank_start, - ALIGN(prev_bank_end, PAGES_PER_SECTION)); + start = min(start, + ALIGN(prev_end, PAGES_PER_SECTION)); #else /* * Align down here since the VM subsystem insists that the * memmap entries are valid from the bank start aligned to * MAX_ORDER_NR_PAGES. */ - bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); + start = round_down(start, MAX_ORDER_NR_PAGES); #endif /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ - if (prev_bank_end && prev_bank_end < bank_start) - free_memmap(prev_bank_end, bank_start); + if (prev_end && prev_end < start) + free_memmap(prev_end, start); /* * Align up here since the VM subsystem insists that the * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ - prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); + prev_end = ALIGN(memblock_region_memory_end_pfn(reg), + MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM - if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) - free_memmap(prev_bank_end, - ALIGN(prev_bank_end, PAGES_PER_SECTION)); + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + free_memmap(prev_end, + ALIGN(prev_end, PAGES_PER_SECTION)); #endif } @@ -536,7 +529,7 @@ void __init mem_init(void) set_max_mapnr(pfn_to_page(max_pfn) - mem_map); /* this will put all unused low memory onto the freelists */ - free_unused_memmap(&meminfo); + free_unused_memmap(); free_all_bootmem(); #ifdef CONFIG_SA1111 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index f9c32ba73544..d1e5ad7ab3bc 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -438,6 +438,13 @@ void __arm_iounmap(volatile void __iomem *io_addr) EXPORT_SYMBOL(__arm_iounmap); #ifdef CONFIG_PCI +static int pci_ioremap_mem_type = MT_DEVICE; + +void pci_ioremap_set_mem_type(int mem_type) +{ + pci_ioremap_mem_type = mem_type; +} + int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) { BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); @@ -445,7 +452,7 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) return ioremap_page_range(PCI_IO_VIRT_BASE + offset, PCI_IO_VIRT_BASE + offset + SZ_64K, phys_addr, - __pgprot(get_mem_type(MT_DEVICE)->prot_pte)); + __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); } EXPORT_SYMBOL_GPL(pci_ioremap_io); #endif diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 7ea641b7aa7d..ce727d47275c 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -2,6 +2,8 @@ #include <linux/list.h> #include <linux/vmalloc.h> +#include <asm/pgtable.h> + /* the upper-most page table pointer */ extern pmd_t *top_pmd; @@ -93,3 +95,5 @@ extern phys_addr_t arm_lowmem_limit; void __init bootmem_init(void); void arm_mm_memblock_reserve(void); void dma_contiguous_remap(void); + +unsigned long __clear_cr(unsigned long mask); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b68c6b22e1c8..ab14b79b03f0 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -35,6 +35,7 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/pci.h> +#include <asm/fixmap.h> #include "mm.h" #include "tcm.h" @@ -117,28 +118,54 @@ static struct cachepolicy cache_policies[] __initdata = { }; #ifdef CONFIG_CPU_CP15 +static unsigned long initial_pmd_value __initdata = 0; + /* - * These are useful for identifying cache coherency - * problems by allowing the cache or the cache and - * writebuffer to be turned off. (Note: the write - * buffer should not be on and the cache off). + * Initialise the cache_policy variable with the initial state specified + * via the "pmd" value. This is used to ensure that on ARMv6 and later, + * the C code sets the page tables up with the same policy as the head + * assembly code, which avoids an illegal state where the TLBs can get + * confused. See comments in early_cachepolicy() for more information. */ -static int __init early_cachepolicy(char *p) +void __init init_default_cache_policy(unsigned long pmd) { int i; + initial_pmd_value = pmd; + + pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE; + + for (i = 0; i < ARRAY_SIZE(cache_policies); i++) + if (cache_policies[i].pmd == pmd) { + cachepolicy = i; + break; + } + + if (i == ARRAY_SIZE(cache_policies)) + pr_err("ERROR: could not find cache policy\n"); +} + +/* + * These are useful for identifying cache coherency problems by allowing + * the cache or the cache and writebuffer to be turned off. (Note: the + * write buffer should not be on and the cache off). + */ +static int __init early_cachepolicy(char *p) +{ + int i, selected = -1; + for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { int len = strlen(cache_policies[i].policy); if (memcmp(p, cache_policies[i].policy, len) == 0) { - cachepolicy = i; - cr_alignment &= ~cache_policies[i].cr_mask; - cr_no_alignment &= ~cache_policies[i].cr_mask; + selected = i; break; } } - if (i == ARRAY_SIZE(cache_policies)) - printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); + + if (selected == -1) + pr_err("ERROR: unknown or unsupported cache policy\n"); + /* * This restriction is partly to do with the way we boot; it is * unpredictable to have memory mapped using two different sets of @@ -146,12 +173,18 @@ static int __init early_cachepolicy(char *p) * change these attributes once the initial assembly has setup the * page tables. */ - if (cpu_architecture() >= CPU_ARCH_ARMv6) { - printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); - cachepolicy = CPOLICY_WRITEBACK; + if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) { + pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n", + cache_policies[cachepolicy].policy); + return 0; + } + + if (selected != cachepolicy) { + unsigned long cr = __clear_cr(cache_policies[selected].cr_mask); + cachepolicy = selected; + flush_cache_all(); + set_cr(cr); } - flush_cache_all(); - set_cr(cr_alignment); return 0; } early_param("cachepolicy", early_cachepolicy); @@ -186,35 +219,6 @@ static int __init early_ecc(char *p) early_param("ecc", early_ecc); #endif -static int __init noalign_setup(char *__unused) -{ - cr_alignment &= ~CR_A; - cr_no_alignment &= ~CR_A; - set_cr(cr_alignment); - return 1; -} -__setup("noalign", noalign_setup); - -#ifndef CONFIG_SMP -void adjust_cr(unsigned long mask, unsigned long set) -{ - unsigned long flags; - - mask &= ~CR_A; - - set &= mask; - - local_irq_save(flags); - - cr_no_alignment = (cr_no_alignment & ~mask) | set; - cr_alignment = (cr_alignment & ~mask) | set; - - set_cr((get_cr() & ~mask) | set); - - local_irq_restore(flags); -} -#endif - #else /* ifdef CONFIG_CPU_CP15 */ static int __init early_cachepolicy(char *p) @@ -414,8 +418,17 @@ static void __init build_mem_type_table(void) cachepolicy = CPOLICY_WRITEBACK; ecc_mask = 0; } - if (is_smp()) - cachepolicy = CPOLICY_WRITEALLOC; + + if (is_smp()) { + if (cachepolicy != CPOLICY_WRITEALLOC) { + pr_warn("Forcing write-allocate cache policy for SMP\n"); + cachepolicy = CPOLICY_WRITEALLOC; + } + if (!(initial_pmd_value & PMD_SECT_S)) { + pr_warn("Forcing shared mappings for SMP\n"); + initial_pmd_value |= PMD_SECT_S; + } + } /* * Strip out features not present on earlier architectures. @@ -539,11 +552,12 @@ static void __init build_mem_type_table(void) mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; #endif - if (is_smp()) { - /* - * Mark memory with the "shared" attribute - * for SMP systems - */ + /* + * If the initial page tables were created with the S bit + * set, then we need to do the same here for the same + * reasons given in early_cachepolicy(). + */ + if (initial_pmd_value & PMD_SECT_S) { user_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; @@ -1061,74 +1075,47 @@ phys_addr_t arm_lowmem_limit __initdata = 0; void __init sanity_check_meminfo(void) { phys_addr_t memblock_limit = 0; - int i, j, highmem = 0; + int highmem = 0; phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; + struct memblock_region *reg; - for (i = 0, j = 0; i < meminfo.nr_banks; i++) { - struct membank *bank = &meminfo.bank[j]; - phys_addr_t size_limit; - - *bank = meminfo.bank[i]; - size_limit = bank->size; + for_each_memblock(memory, reg) { + phys_addr_t block_start = reg->base; + phys_addr_t block_end = reg->base + reg->size; + phys_addr_t size_limit = reg->size; - if (bank->start >= vmalloc_limit) + if (reg->base >= vmalloc_limit) highmem = 1; else - size_limit = vmalloc_limit - bank->start; + size_limit = vmalloc_limit - reg->base; - bank->highmem = highmem; -#ifdef CONFIG_HIGHMEM - /* - * Split those memory banks which are partially overlapping - * the vmalloc area greatly simplifying things later. - */ - if (!highmem && bank->size > size_limit) { - if (meminfo.nr_banks >= NR_BANKS) { - printk(KERN_CRIT "NR_BANKS too low, " - "ignoring high memory\n"); - } else { - memmove(bank + 1, bank, - (meminfo.nr_banks - i) * sizeof(*bank)); - meminfo.nr_banks++; - i++; - bank[1].size -= size_limit; - bank[1].start = vmalloc_limit; - bank[1].highmem = highmem = 1; - j++; + if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { + + if (highmem) { + pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", + &block_start, &block_end); + memblock_remove(reg->base, reg->size); + continue; } - bank->size = size_limit; - } -#else - /* - * Highmem banks not allowed with !CONFIG_HIGHMEM. - */ - if (highmem) { - printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " - "(!CONFIG_HIGHMEM).\n", - (unsigned long long)bank->start, - (unsigned long long)bank->start + bank->size - 1); - continue; - } - /* - * Check whether this memory bank would partially overlap - * the vmalloc area. - */ - if (bank->size > size_limit) { - printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " - "to -%.8llx (vmalloc region overlap).\n", - (unsigned long long)bank->start, - (unsigned long long)bank->start + bank->size - 1, - (unsigned long long)bank->start + size_limit - 1); - bank->size = size_limit; + if (reg->size > size_limit) { + phys_addr_t overlap_size = reg->size - size_limit; + + pr_notice("Truncating RAM at %pa-%pa to -%pa", + &block_start, &block_end, &vmalloc_limit); + memblock_remove(vmalloc_limit, overlap_size); + block_end = vmalloc_limit; + } } -#endif - if (!bank->highmem) { - phys_addr_t bank_end = bank->start + bank->size; - if (bank_end > arm_lowmem_limit) - arm_lowmem_limit = bank_end; + if (!highmem) { + if (block_end > arm_lowmem_limit) { + if (reg->size > size_limit) + arm_lowmem_limit = vmalloc_limit; + else + arm_lowmem_limit = block_end; + } /* * Find the first non-section-aligned page, and point @@ -1144,35 +1131,15 @@ void __init sanity_check_meminfo(void) * occurs before any free memory is mapped. */ if (!memblock_limit) { - if (!IS_ALIGNED(bank->start, SECTION_SIZE)) - memblock_limit = bank->start; - else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) - memblock_limit = bank_end; + if (!IS_ALIGNED(block_start, SECTION_SIZE)) + memblock_limit = block_start; + else if (!IS_ALIGNED(block_end, SECTION_SIZE)) + memblock_limit = arm_lowmem_limit; } - } - j++; - } -#ifdef CONFIG_HIGHMEM - if (highmem) { - const char *reason = NULL; - if (cache_is_vipt_aliasing()) { - /* - * Interactions between kmap and other mappings - * make highmem support with aliasing VIPT caches - * rather difficult. - */ - reason = "with VIPT aliasing cache"; - } - if (reason) { - printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", - reason); - while (j > 0 && meminfo.bank[j - 1].highmem) - j--; } } -#endif - meminfo.nr_banks = j; + high_memory = __va(arm_lowmem_limit - 1) + 1; /* @@ -1359,6 +1326,9 @@ static void __init kmap_init(void) #ifdef CONFIG_HIGHMEM pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), PKMAP_BASE, _PAGE_KERNEL_TABLE); + + fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START), + FIXADDR_START, _PAGE_KERNEL_TABLE); #endif } @@ -1461,7 +1431,7 @@ void __init early_paging_init(const struct machine_desc *mdesc, * just complicate the code. */ flush_cache_louis(); - dsb(); + dsb(ishst); isb(); /* remap level 1 table */ diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 55764a7ef1f0..da1874f9f8cf 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -88,30 +88,35 @@ static unsigned long irbar_read(void) void __init sanity_check_meminfo_mpu(void) { int i; - struct membank *bank = meminfo.bank; phys_addr_t phys_offset = PHYS_OFFSET; phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; - - /* Initially only use memory continuous from PHYS_OFFSET */ - if (bank_phys_start(&bank[0]) != phys_offset) - panic("First memory bank must be contiguous from PHYS_OFFSET"); - - /* Banks have already been sorted by start address */ - for (i = 1; i < meminfo.nr_banks; i++) { - if (bank[i].start <= bank_phys_end(&bank[0]) && - bank_phys_end(&bank[i]) > bank_phys_end(&bank[0])) { - bank[0].size = bank_phys_end(&bank[i]) - bank[0].start; + struct memblock_region *reg; + bool first = true; + phys_addr_t mem_start; + phys_addr_t mem_end; + + for_each_memblock(memory, reg) { + if (first) { + /* + * Initially only use memory continuous from + * PHYS_OFFSET */ + if (reg->base != phys_offset) + panic("First memory bank must be contiguous from PHYS_OFFSET"); + + mem_start = reg->base; + mem_end = reg->base + reg->size; + specified_mem_size = reg->size; + first = false; } else { - pr_notice("Ignoring RAM after 0x%.8lx. " - "First non-contiguous (ignored) bank start: 0x%.8lx\n", - (unsigned long)bank_phys_end(&bank[0]), - (unsigned long)bank_phys_start(&bank[i])); - break; + /* + * memblock auto merges contiguous blocks, remove + * all blocks afterwards + */ + pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", + &mem_start, ®->base); + memblock_remove(reg->base, reg->size); } } - /* All contiguous banks are now merged in to the first bank */ - meminfo.nr_banks = 1; - specified_mem_size = bank[0].size; /* * MPU has curious alignment requirements: Size must be power of 2, and @@ -128,23 +133,24 @@ void __init sanity_check_meminfo_mpu(void) */ aligned_region_size = (phys_offset - 1) ^ (phys_offset); /* Find the max power-of-two sized region that fits inside our bank */ - rounded_mem_size = (1 << __fls(bank[0].size)) - 1; + rounded_mem_size = (1 << __fls(specified_mem_size)) - 1; /* The actual region size is the smaller of the two */ aligned_region_size = aligned_region_size < rounded_mem_size ? aligned_region_size + 1 : rounded_mem_size + 1; - if (aligned_region_size != specified_mem_size) - pr_warn("Truncating memory from 0x%.8lx to 0x%.8lx (MPU region constraints)", - (unsigned long)specified_mem_size, - (unsigned long)aligned_region_size); + if (aligned_region_size != specified_mem_size) { + pr_warn("Truncating memory from %pa to %pa (MPU region constraints)", + &specified_mem_size, &aligned_region_size); + memblock_remove(mem_start + aligned_region_size, + specified_mem_size - aligned_round_size); + + mem_end = mem_start + aligned_region_size; + } - meminfo.bank[0].size = aligned_region_size; - pr_debug("MPU Region from 0x%.8lx size 0x%.8lx (end 0x%.8lx))\n", - (unsigned long)phys_offset, - (unsigned long)aligned_region_size, - (unsigned long)bank_phys_end(&bank[0])); + pr_debug("MPU Region from %pa size %pa (end %pa))\n", + &phys_offset, &aligned_region_size, &mem_end); } @@ -292,7 +298,7 @@ void __init sanity_check_meminfo(void) { phys_addr_t end; sanity_check_meminfo_mpu(); - end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); + end = memblock_end_of_DRAM(); high_memory = __va(end - 1) + 1; } diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 01a719e18bb0..22e3ad63500c 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -64,6 +64,14 @@ ENTRY(cpu_v7_switch_mm) mov pc, lr ENDPROC(cpu_v7_switch_mm) +#ifdef __ARMEB__ +#define rl r3 +#define rh r2 +#else +#define rl r2 +#define rh r3 +#endif + /* * cpu_v7_set_pte_ext(ptep, pte) * @@ -73,13 +81,13 @@ ENDPROC(cpu_v7_switch_mm) */ ENTRY(cpu_v7_set_pte_ext) #ifdef CONFIG_MMU - tst r2, #L_PTE_VALID + tst rl, #L_PTE_VALID beq 1f - tst r3, #1 << (57 - 32) @ L_PTE_NONE - bicne r2, #L_PTE_VALID + tst rh, #1 << (57 - 32) @ L_PTE_NONE + bicne rl, #L_PTE_VALID bne 1f - tst r3, #1 << (55 - 32) @ L_PTE_DIRTY - orreq r2, #L_PTE_RDONLY + tst rh, #1 << (55 - 32) @ L_PTE_DIRTY + orreq rl, #L_PTE_RDONLY 1: strd r2, r3, [r0] ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 195731d3813b..3db2c2f04a30 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -169,9 +169,31 @@ ENDPROC(cpu_pj4b_do_idle) globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle #endif globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area - globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend - globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume - globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size +#ifdef CONFIG_ARM_CPU_SUSPEND +ENTRY(cpu_pj4b_do_suspend) + stmfd sp!, {r6 - r10} + mrc p15, 1, r6, c15, c1, 0 @ save CP15 - extra features + mrc p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0 + mrc p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2 + mrc p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1 + mrc p15, 0, r10, c9, c14, 0 @ save CP15 - PMC + stmia r0!, {r6 - r10} + ldmfd sp!, {r6 - r10} + b cpu_v7_do_suspend +ENDPROC(cpu_pj4b_do_suspend) + +ENTRY(cpu_pj4b_do_resume) + ldmia r0!, {r6 - r10} + mcr p15, 1, r6, c15, c1, 0 @ save CP15 - extra features + mcr p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0 + mcr p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2 + mcr p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1 + mcr p15, 0, r10, c9, c14, 0 @ save CP15 - PMC + b cpu_v7_do_resume +ENDPROC(cpu_pj4b_do_resume) +#endif +.globl cpu_pj4b_suspend_size +.equ cpu_pj4b_suspend_size, 4 * 14 #endif @@ -194,6 +216,7 @@ __v7_cr7mp_setup: __v7_ca7mp_setup: __v7_ca12mp_setup: __v7_ca15mp_setup: +__v7_ca17mp_setup: mov r10, #0 1: #ifdef CONFIG_SMP @@ -505,6 +528,16 @@ __v7_ca15mp_proc_info: .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info /* + * ARM Ltd. Cortex A17 processor. + */ + .type __v7_ca17mp_proc_info, #object +__v7_ca17mp_proc_info: + .long 0x410fc0e0 + .long 0xff0ffff0 + __v7_proc __v7_ca17mp_setup + .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info + + /* * Qualcomm Inc. Krait processors. */ .type __krait_proc_info, #object diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S index 0c93588fcb91..1ca37c72f12f 100644 --- a/arch/arm/mm/proc-v7m.S +++ b/arch/arm/mm/proc-v7m.S @@ -123,6 +123,11 @@ __v7m_setup: mov pc, lr ENDPROC(__v7m_setup) + .align 2 +__v7m_setup_stack: + .space 4 * 8 @ 8 registers +__v7m_setup_stack_top: + define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" @@ -152,6 +157,3 @@ __v7m_proc_info: .long nop_cache_fns @ proc_info_list.cache .size __v7m_proc_info, . - __v7m_proc_info -__v7m_setup_stack: - .space 4 * 8 @ 8 registers -__v7m_setup_stack_top: diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index f0759e70fb86..fe6ca574d093 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S @@ -22,11 +22,10 @@ @ r9 = normal "successful" return address @ r10 = this threads thread_info structure @ lr = unrecognised instruction return address -@ IRQs disabled. +@ IRQs enabled. @ ENTRY(do_vfp) inc_preempt_count r10, r4 - enable_irq ldr r4, .LCvfp ldr r11, [r10, #TI_CPU] @ CPU number add r10, r10, #TI_VFPSTATE @ r10 = workspace diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 63b5eff0a80f..fdd7e1b61f60 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -47,6 +47,7 @@ struct amba_driver { enum amba_vendor { AMBA_VENDOR_ARM = 0x41, AMBA_VENDOR_ST = 0x80, + AMBA_VENDOR_QCOM = 0x51, }; extern struct bus_type amba_bustype; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f73cabf59012..38bbf95109da 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -320,6 +320,8 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); extern int hibernate(void); extern bool system_entering_hibernation(void); +asmlinkage int swsusp_save(void); +extern struct pbe *restore_pblist; #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index edff2b97b864..c52f827ba6ce 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -32,6 +32,7 @@ struct vm_area_struct; struct mm_struct; struct inode; struct notifier_block; +struct page; #define UPROBE_HANDLER_REMOVE 1 #define UPROBE_HANDLER_MASK 1 @@ -127,6 +128,8 @@ extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned l extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); +extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len); #else /* !CONFIG_UPROBES */ struct uprobes_state { }; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 04709b66369d..4968213c63fa 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1296,14 +1296,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) if (unlikely(!xol_vaddr)) return 0; - /* Initialize the slot */ - copy_to_page(area->page, xol_vaddr, - &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); - /* - * We probably need flush_icache_user_range() but it needs vma. - * This should work on supported architectures too. - */ - flush_dcache_page(area->page); + arch_uprobe_copy_ixol(area->page, xol_vaddr, + &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); return xol_vaddr; } @@ -1346,6 +1340,21 @@ static void xol_free_insn_slot(struct task_struct *tsk) } } +void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + /* Initialize the slot */ + copy_to_page(page, vaddr, src, len); + + /* + * We probably need flush_icache_user_range() but it needs vma. + * This should work on most of architectures by default. If + * architecture needs to do something different it can define + * its own version of the function. + */ + flush_dcache_page(page); +} + /** * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs * @regs: Reflects the saved state of the task after it has hit a breakpoint |