diff options
Diffstat (limited to 'arch')
1320 files changed, 18844 insertions, 9484 deletions
diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c index be61670d4096..2a542a506557 100644 --- a/arch/alpha/boot/bootp.c +++ b/arch/alpha/boot/bootp.c @@ -13,7 +13,6 @@ #include <generated/utsrelease.h> #include <linux/mm.h> -#include <asm/system.h> #include <asm/console.h> #include <asm/hwrpb.h> #include <asm/pgtable.h> diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c index c98865f21423..d6ad191698da 100644 --- a/arch/alpha/boot/bootpz.c +++ b/arch/alpha/boot/bootpz.c @@ -15,7 +15,6 @@ #include <generated/utsrelease.h> #include <linux/mm.h> -#include <asm/system.h> #include <asm/console.h> #include <asm/hwrpb.h> #include <asm/pgtable.h> diff --git a/arch/alpha/boot/head.S b/arch/alpha/boot/head.S index f3d98089b3dc..b06812bcac83 100644 --- a/arch/alpha/boot/head.S +++ b/arch/alpha/boot/head.S @@ -4,7 +4,6 @@ * initial bootloader stuff.. */ -#include <asm/system.h> .set noreorder .globl __start diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c index ded57d9a80e1..3baf2d1e908d 100644 --- a/arch/alpha/boot/main.c +++ b/arch/alpha/boot/main.c @@ -11,7 +11,6 @@ #include <generated/utsrelease.h> #include <linux/mm.h> -#include <asm/system.h> #include <asm/console.h> #include <asm/hwrpb.h> #include <asm/pgtable.h> diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 640f909ddd41..f62251e82ffa 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -3,7 +3,6 @@ #include <linux/types.h> #include <asm/barrier.h> -#include <asm/system.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -169,6 +168,73 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) return result; } +/* + * Atomic exchange routines. + */ + +#define __ASM__MB +#define ____xchg(type, args...) __xchg ## type ## _local(args) +#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) +#include <asm/xchg.h> + +#define xchg_local(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ + }) + +#define cmpxchg_local(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, \ + sizeof(*(ptr))); \ + }) + +#define cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ + }) + +#ifdef CONFIG_SMP +#undef __ASM__MB +#define __ASM__MB "\tmb\n" +#endif +#undef ____xchg +#undef ____cmpxchg +#define ____xchg(type, args...) __xchg ##type(args) +#define ____cmpxchg(type, args...) __cmpxchg ##type(args) +#include <asm/xchg.h> + +#define xchg(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ + }) + +#define cmpxchg(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr)));\ + }) + +#define cmpxchg64(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg((ptr), (o), (n)); \ + }) + +#undef __ASM__MB +#undef ____cmpxchg + +#define __HAVE_ARCH_CMPXCHG 1 + #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) diff --git a/arch/alpha/include/asm/auxvec.h b/arch/alpha/include/asm/auxvec.h index e96fe880e310..a3a579dfdb4d 100644 --- a/arch/alpha/include/asm/auxvec.h +++ b/arch/alpha/include/asm/auxvec.h @@ -21,4 +21,6 @@ #define AT_L2_CACHESHAPE 36 #define AT_L3_CACHESHAPE 37 +#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */ + #endif /* __ASM_ALPHA_AUXVEC_H */ diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h index f7cb4b460954..8ee6c516279c 100644 --- a/arch/alpha/include/asm/core_lca.h +++ b/arch/alpha/include/asm/core_lca.h @@ -1,8 +1,8 @@ #ifndef __ALPHA_LCA__H__ #define __ALPHA_LCA__H__ -#include <asm/system.h> #include <asm/compiler.h> +#include <asm/mce.h> /* * Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068, diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h index 9f67a056b461..ad44bef29fba 100644 --- a/arch/alpha/include/asm/core_mcpcia.h +++ b/arch/alpha/include/asm/core_mcpcia.h @@ -7,6 +7,7 @@ #include <linux/types.h> #include <asm/compiler.h> +#include <asm/mce.h> /* * MCPCIA is the internal name for a core logic chipset which provides diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h index 91b46801b290..ade9d92e68b4 100644 --- a/arch/alpha/include/asm/core_t2.h +++ b/arch/alpha/include/asm/core_t2.h @@ -7,7 +7,6 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <asm/compiler.h> -#include <asm/system.h> /* * T2 is the internal name for the core logic chipset which provides diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h index da5449e22175..968d9991f5ee 100644 --- a/arch/alpha/include/asm/elf.h +++ b/arch/alpha/include/asm/elf.h @@ -2,6 +2,7 @@ #define __ASM_ALPHA_ELF_H #include <asm/auxvec.h> +#include <asm/special_insns.h> /* Special values for the st_other field in the symbol table. */ diff --git a/arch/alpha/include/asm/exec.h b/arch/alpha/include/asm/exec.h new file mode 100644 index 000000000000..4a5a41f30779 --- /dev/null +++ b/arch/alpha/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef __ALPHA_EXEC_H +#define __ALPHA_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ALPHA_EXEC_H */ diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h index ecb17a72acc3..db00f7885faa 100644 --- a/arch/alpha/include/asm/fpu.h +++ b/arch/alpha/include/asm/fpu.h @@ -1,6 +1,8 @@ #ifndef __ASM_ALPHA_FPU_H #define __ASM_ALPHA_FPU_H +#include <asm/special_insns.h> + /* * Alpha floating-point control register defines: */ diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index 56ff96501350..7a3d38d5ed6b 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -6,7 +6,6 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <asm/compiler.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/machvec.h> #include <asm/hwrpb.h> diff --git a/arch/alpha/include/asm/irqflags.h b/arch/alpha/include/asm/irqflags.h index 299bbc7e9d71..ffb1726484af 100644 --- a/arch/alpha/include/asm/irqflags.h +++ b/arch/alpha/include/asm/irqflags.h @@ -1,7 +1,7 @@ #ifndef __ALPHA_IRQFLAGS_H #define __ALPHA_IRQFLAGS_H -#include <asm/system.h> +#include <asm/pal.h> #define IPL_MIN 0 #define IPL_SW0 1 diff --git a/arch/alpha/include/asm/mce.h b/arch/alpha/include/asm/mce.h new file mode 100644 index 000000000000..660285b9aca8 --- /dev/null +++ b/arch/alpha/include/asm/mce.h @@ -0,0 +1,83 @@ +#ifndef __ALPHA_MCE_H +#define __ALPHA_MCE_H + +/* + * This is the logout header that should be common to all platforms + * (assuming they are running OSF/1 PALcode, I guess). + */ +struct el_common { + unsigned int size; /* size in bytes of logout area */ + unsigned int sbz1 : 30; /* should be zero */ + unsigned int err2 : 1; /* second error */ + unsigned int retry : 1; /* retry flag */ + unsigned int proc_offset; /* processor-specific offset */ + unsigned int sys_offset; /* system-specific offset */ + unsigned int code; /* machine check code */ + unsigned int frame_rev; /* frame revision */ +}; + +/* Machine Check Frame for uncorrectable errors (Large format) + * --- This is used to log uncorrectable errors such as + * double bit ECC errors. + * --- These errors are detected by both processor and systems. + */ +struct el_common_EV5_uncorrectable_mcheck { + unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ + unsigned long paltemp[24]; /* PAL TEMP REGS. */ + unsigned long exc_addr; /* Address of excepting instruction*/ + unsigned long exc_sum; /* Summary of arithmetic traps. */ + unsigned long exc_mask; /* Exception mask (from exc_sum). */ + unsigned long pal_base; /* Base address for PALcode. */ + unsigned long isr; /* Interrupt Status Reg. */ + unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ + unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity + <12> set TAG parity*/ + unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: + <2> Data error in bank 0 + <3> Data error in bank 1 + <4> Tag error in bank 0 + <5> Tag error in bank 1 */ + unsigned long va; /* Effective VA of fault or miss. */ + unsigned long mm_stat; /* Holds the reason for D-stream + fault or D-cache parity errors */ + unsigned long sc_addr; /* Address that was being accessed + when EV5 detected Secondary cache + failure. */ + unsigned long sc_stat; /* Helps determine if the error was + TAG/Data parity(Secondary Cache)*/ + unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ + unsigned long ei_addr; /* Physical address of any transfer + that is logged in EV5 EI_STAT */ + unsigned long fill_syndrome; /* For correcting ECC errors. */ + unsigned long ei_stat; /* Helps identify reason of any + processor uncorrectable error + at its external interface. */ + unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ +}; + +struct el_common_EV6_mcheck { + unsigned int FrameSize; /* Bytes, including this field */ + unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ + unsigned int CpuOffset; /* Offset to CPU-specific info */ + unsigned int SystemOffset; /* Offset to system-specific info */ + unsigned int MCHK_Code; + unsigned int MCHK_Frame_Rev; + unsigned long I_STAT; /* EV6 Internal Processor Registers */ + unsigned long DC_STAT; /* (See the 21264 Spec) */ + unsigned long C_ADDR; + unsigned long DC1_SYNDROME; + unsigned long DC0_SYNDROME; + unsigned long C_STAT; + unsigned long C_STS; + unsigned long MM_STAT; + unsigned long EXC_ADDR; + unsigned long IER_CM; + unsigned long ISUM; + unsigned long RESERVED0; + unsigned long PAL_BASE; + unsigned long I_CTL; + unsigned long PCTX; +}; + + +#endif /* __ALPHA_MCE_H */ diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h index 86c08a02d239..4c51c05333c6 100644 --- a/arch/alpha/include/asm/mmu_context.h +++ b/arch/alpha/include/asm/mmu_context.h @@ -7,7 +7,6 @@ * Copyright (C) 1996, Linus Torvalds */ -#include <asm/system.h> #include <asm/machvec.h> #include <asm/compiler.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/alpha/include/asm/pal.h b/arch/alpha/include/asm/pal.h index 9b4ba0d6f00b..6699ee583429 100644 --- a/arch/alpha/include/asm/pal.h +++ b/arch/alpha/include/asm/pal.h @@ -48,4 +48,116 @@ #define PAL_retsys 61 #define PAL_rti 63 +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +extern void halt(void) __attribute__((noreturn)); +#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) + +#define imb() \ +__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") + +#define draina() \ +__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") + +#define __CALL_PAL_R0(NAME, TYPE) \ +extern inline TYPE NAME(void) \ +{ \ + register TYPE __r0 __asm__("$0"); \ + __asm__ __volatile__( \ + "call_pal %1 # " #NAME \ + :"=r" (__r0) \ + :"i" (PAL_ ## NAME) \ + :"$1", "$16", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_PAL_W1(NAME, TYPE0) \ +extern inline void NAME(TYPE0 arg0) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "call_pal %1 # "#NAME \ + : "=r"(__r16) \ + : "i"(PAL_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ +extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "call_pal %2 # "#NAME \ + : "=r"(__r16), "=r"(__r17) \ + : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ +extern inline RTYPE NAME(TYPE0 arg0) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "call_pal %2 # "#NAME \ + : "=r"(__r16), "=r"(__r0) \ + : "i"(PAL_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ +extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "call_pal %3 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ + : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +__CALL_PAL_W1(cflush, unsigned long); +__CALL_PAL_R0(rdmces, unsigned long); +__CALL_PAL_R0(rdps, unsigned long); +__CALL_PAL_R0(rdusp, unsigned long); +__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); +__CALL_PAL_R0(whami, unsigned long); +__CALL_PAL_W2(wrent, void*, unsigned long); +__CALL_PAL_W1(wripir, unsigned long); +__CALL_PAL_W1(wrkgp, unsigned long); +__CALL_PAL_W1(wrmces, unsigned long); +__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); +__CALL_PAL_W1(wrusp, unsigned long); +__CALL_PAL_W1(wrvptptr, unsigned long); + +/* + * TB routines.. + */ +#define __tbi(nr,arg,arg1...) \ +({ \ + register unsigned long __r16 __asm__("$16") = (nr); \ + register unsigned long __r17 __asm__("$17"); arg; \ + __asm__ __volatile__( \ + "call_pal %3 #__tbi" \ + :"=r" (__r16),"=r" (__r17) \ + :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ + :"$0", "$1", "$22", "$23", "$24", "$25"); \ +}) + +#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) +#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) +#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) +#define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) +#define tbiap() __tbi(-1, /* no second argument */) +#define tbia() __tbi(-2, /* no second argument */) + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + #endif /* __ALPHA_PAL_H */ diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index de98a732683d..81a4342d5a3f 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -15,6 +15,7 @@ #include <asm/page.h> #include <asm/processor.h> /* For TASK_SIZE */ #include <asm/machvec.h> +#include <asm/setup.h> struct mm_struct; struct vm_area_struct; diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/asm/setup.h index 2e023a4aa317..b50014b30909 100644 --- a/arch/alpha/include/asm/setup.h +++ b/arch/alpha/include/asm/setup.h @@ -3,4 +3,40 @@ #define COMMAND_LINE_SIZE 256 +/* + * We leave one page for the initial stack page, and one page for + * the initial process structure. Also, the console eats 3 MB for + * the initial bootloader (one of which we can reclaim later). + */ +#define BOOT_PCB 0x20000000 +#define BOOT_ADDR 0x20000000 +/* Remove when official MILO sources have ELF support: */ +#define BOOT_SIZE (16*1024) + +#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS +#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ +#else +#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ +#endif + +#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) +#define SWAPPER_PGD KERNEL_START +#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) +#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) +#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) +#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) + +#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) + +/* + * This is setup by the secondary bootstrap loader. Because + * the zero page is zeroed out as soon as the vm system is + * initialized, we need to copy things out into a more permanent + * place. + */ +#define PARAM ZERO_PGE +#define COMMAND_LINE ((char*)(PARAM + 0x0000)) +#define INITRD_START (*(unsigned long *) (PARAM+0x100)) +#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) + #endif diff --git a/arch/alpha/include/asm/special_insns.h b/arch/alpha/include/asm/special_insns.h new file mode 100644 index 000000000000..88d3452b21f0 --- /dev/null +++ b/arch/alpha/include/asm/special_insns.h @@ -0,0 +1,41 @@ +#ifndef __ALPHA_SPECIAL_INSNS_H +#define __ALPHA_SPECIAL_INSNS_H + +enum implver_enum { + IMPLVER_EV4, + IMPLVER_EV5, + IMPLVER_EV6 +}; + +#ifdef CONFIG_ALPHA_GENERIC +#define implver() \ +({ unsigned long __implver; \ + __asm__ ("implver %0" : "=r"(__implver)); \ + (enum implver_enum) __implver; }) +#else +/* Try to eliminate some dead code. */ +#ifdef CONFIG_ALPHA_EV4 +#define implver() IMPLVER_EV4 +#endif +#ifdef CONFIG_ALPHA_EV5 +#define implver() IMPLVER_EV5 +#endif +#if defined(CONFIG_ALPHA_EV6) +#define implver() IMPLVER_EV6 +#endif +#endif + +enum amask_enum { + AMASK_BWX = (1UL << 0), + AMASK_FIX = (1UL << 1), + AMASK_CIX = (1UL << 2), + AMASK_MAX = (1UL << 8), + AMASK_PRECISE_TRAP = (1UL << 9), +}; + +#define amask(mask) \ +({ unsigned long __amask, __input = (mask); \ + __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ + __amask; }) + +#endif /* __ALPHA_SPECIAL_INSNS_H */ diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index d0faca1e992d..3bba21e41b81 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -1,7 +1,6 @@ #ifndef _ALPHA_SPINLOCK_H #define _ALPHA_SPINLOCK_H -#include <asm/system.h> #include <linux/kernel.h> #include <asm/current.h> diff --git a/arch/alpha/include/asm/switch_to.h b/arch/alpha/include/asm/switch_to.h new file mode 100644 index 000000000000..44c0d4f2c0b2 --- /dev/null +++ b/arch/alpha/include/asm/switch_to.h @@ -0,0 +1,14 @@ +#ifndef __ALPHA_SWITCH_TO_H +#define __ALPHA_SWITCH_TO_H + + +struct task_struct; +extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct *); + +#define switch_to(P,N,L) \ + do { \ + (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \ + check_mmu_context(); \ + } while (0) + +#endif /* __ALPHA_SWITCH_TO_H */ diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h deleted file mode 100644 index 9f78e6934637..000000000000 --- a/arch/alpha/include/asm/system.h +++ /dev/null @@ -1,354 +0,0 @@ -#ifndef __ALPHA_SYSTEM_H -#define __ALPHA_SYSTEM_H - -#include <asm/pal.h> -#include <asm/page.h> -#include <asm/barrier.h> - -/* - * System defines.. Note that this is included both from .c and .S - * files, so it does only defines, not any C code. - */ - -/* - * We leave one page for the initial stack page, and one page for - * the initial process structure. Also, the console eats 3 MB for - * the initial bootloader (one of which we can reclaim later). - */ -#define BOOT_PCB 0x20000000 -#define BOOT_ADDR 0x20000000 -/* Remove when official MILO sources have ELF support: */ -#define BOOT_SIZE (16*1024) - -#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS -#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ -#else -#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ -#endif - -#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) -#define SWAPPER_PGD KERNEL_START -#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) -#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) -#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) -#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) - -#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) - -/* - * This is setup by the secondary bootstrap loader. Because - * the zero page is zeroed out as soon as the vm system is - * initialized, we need to copy things out into a more permanent - * place. - */ -#define PARAM ZERO_PGE -#define COMMAND_LINE ((char*)(PARAM + 0x0000)) -#define INITRD_START (*(unsigned long *) (PARAM+0x100)) -#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) - -#ifndef __ASSEMBLY__ -#include <linux/kernel.h> -#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */ - -/* - * This is the logout header that should be common to all platforms - * (assuming they are running OSF/1 PALcode, I guess). - */ -struct el_common { - unsigned int size; /* size in bytes of logout area */ - unsigned int sbz1 : 30; /* should be zero */ - unsigned int err2 : 1; /* second error */ - unsigned int retry : 1; /* retry flag */ - unsigned int proc_offset; /* processor-specific offset */ - unsigned int sys_offset; /* system-specific offset */ - unsigned int code; /* machine check code */ - unsigned int frame_rev; /* frame revision */ -}; - -/* Machine Check Frame for uncorrectable errors (Large format) - * --- This is used to log uncorrectable errors such as - * double bit ECC errors. - * --- These errors are detected by both processor and systems. - */ -struct el_common_EV5_uncorrectable_mcheck { - unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ - unsigned long paltemp[24]; /* PAL TEMP REGS. */ - unsigned long exc_addr; /* Address of excepting instruction*/ - unsigned long exc_sum; /* Summary of arithmetic traps. */ - unsigned long exc_mask; /* Exception mask (from exc_sum). */ - unsigned long pal_base; /* Base address for PALcode. */ - unsigned long isr; /* Interrupt Status Reg. */ - unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ - unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity - <12> set TAG parity*/ - unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: - <2> Data error in bank 0 - <3> Data error in bank 1 - <4> Tag error in bank 0 - <5> Tag error in bank 1 */ - unsigned long va; /* Effective VA of fault or miss. */ - unsigned long mm_stat; /* Holds the reason for D-stream - fault or D-cache parity errors */ - unsigned long sc_addr; /* Address that was being accessed - when EV5 detected Secondary cache - failure. */ - unsigned long sc_stat; /* Helps determine if the error was - TAG/Data parity(Secondary Cache)*/ - unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ - unsigned long ei_addr; /* Physical address of any transfer - that is logged in EV5 EI_STAT */ - unsigned long fill_syndrome; /* For correcting ECC errors. */ - unsigned long ei_stat; /* Helps identify reason of any - processor uncorrectable error - at its external interface. */ - unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ -}; - -struct el_common_EV6_mcheck { - unsigned int FrameSize; /* Bytes, including this field */ - unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ - unsigned int CpuOffset; /* Offset to CPU-specific info */ - unsigned int SystemOffset; /* Offset to system-specific info */ - unsigned int MCHK_Code; - unsigned int MCHK_Frame_Rev; - unsigned long I_STAT; /* EV6 Internal Processor Registers */ - unsigned long DC_STAT; /* (See the 21264 Spec) */ - unsigned long C_ADDR; - unsigned long DC1_SYNDROME; - unsigned long DC0_SYNDROME; - unsigned long C_STAT; - unsigned long C_STS; - unsigned long MM_STAT; - unsigned long EXC_ADDR; - unsigned long IER_CM; - unsigned long ISUM; - unsigned long RESERVED0; - unsigned long PAL_BASE; - unsigned long I_CTL; - unsigned long PCTX; -}; - -extern void halt(void) __attribute__((noreturn)); -#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) - -#define switch_to(P,N,L) \ - do { \ - (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \ - check_mmu_context(); \ - } while (0) - -struct task_struct; -extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); - -#define imb() \ -__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") - -#define draina() \ -__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") - -enum implver_enum { - IMPLVER_EV4, - IMPLVER_EV5, - IMPLVER_EV6 -}; - -#ifdef CONFIG_ALPHA_GENERIC -#define implver() \ -({ unsigned long __implver; \ - __asm__ ("implver %0" : "=r"(__implver)); \ - (enum implver_enum) __implver; }) -#else -/* Try to eliminate some dead code. */ -#ifdef CONFIG_ALPHA_EV4 -#define implver() IMPLVER_EV4 -#endif -#ifdef CONFIG_ALPHA_EV5 -#define implver() IMPLVER_EV5 -#endif -#if defined(CONFIG_ALPHA_EV6) -#define implver() IMPLVER_EV6 -#endif -#endif - -enum amask_enum { - AMASK_BWX = (1UL << 0), - AMASK_FIX = (1UL << 1), - AMASK_CIX = (1UL << 2), - AMASK_MAX = (1UL << 8), - AMASK_PRECISE_TRAP = (1UL << 9), -}; - -#define amask(mask) \ -({ unsigned long __amask, __input = (mask); \ - __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ - __amask; }) - -#define __CALL_PAL_R0(NAME, TYPE) \ -extern inline TYPE NAME(void) \ -{ \ - register TYPE __r0 __asm__("$0"); \ - __asm__ __volatile__( \ - "call_pal %1 # " #NAME \ - :"=r" (__r0) \ - :"i" (PAL_ ## NAME) \ - :"$1", "$16", "$22", "$23", "$24", "$25"); \ - return __r0; \ -} - -#define __CALL_PAL_W1(NAME, TYPE0) \ -extern inline void NAME(TYPE0 arg0) \ -{ \ - register TYPE0 __r16 __asm__("$16") = arg0; \ - __asm__ __volatile__( \ - "call_pal %1 # "#NAME \ - : "=r"(__r16) \ - : "i"(PAL_ ## NAME), "0"(__r16) \ - : "$1", "$22", "$23", "$24", "$25"); \ -} - -#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ -extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \ -{ \ - register TYPE0 __r16 __asm__("$16") = arg0; \ - register TYPE1 __r17 __asm__("$17") = arg1; \ - __asm__ __volatile__( \ - "call_pal %2 # "#NAME \ - : "=r"(__r16), "=r"(__r17) \ - : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ - : "$1", "$22", "$23", "$24", "$25"); \ -} - -#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ -extern inline RTYPE NAME(TYPE0 arg0) \ -{ \ - register RTYPE __r0 __asm__("$0"); \ - register TYPE0 __r16 __asm__("$16") = arg0; \ - __asm__ __volatile__( \ - "call_pal %2 # "#NAME \ - : "=r"(__r16), "=r"(__r0) \ - : "i"(PAL_ ## NAME), "0"(__r16) \ - : "$1", "$22", "$23", "$24", "$25"); \ - return __r0; \ -} - -#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ -extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ -{ \ - register RTYPE __r0 __asm__("$0"); \ - register TYPE0 __r16 __asm__("$16") = arg0; \ - register TYPE1 __r17 __asm__("$17") = arg1; \ - __asm__ __volatile__( \ - "call_pal %3 # "#NAME \ - : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ - : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ - : "$1", "$22", "$23", "$24", "$25"); \ - return __r0; \ -} - -__CALL_PAL_W1(cflush, unsigned long); -__CALL_PAL_R0(rdmces, unsigned long); -__CALL_PAL_R0(rdps, unsigned long); -__CALL_PAL_R0(rdusp, unsigned long); -__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); -__CALL_PAL_R0(whami, unsigned long); -__CALL_PAL_W2(wrent, void*, unsigned long); -__CALL_PAL_W1(wripir, unsigned long); -__CALL_PAL_W1(wrkgp, unsigned long); -__CALL_PAL_W1(wrmces, unsigned long); -__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); -__CALL_PAL_W1(wrusp, unsigned long); -__CALL_PAL_W1(wrvptptr, unsigned long); - -/* - * TB routines.. - */ -#define __tbi(nr,arg,arg1...) \ -({ \ - register unsigned long __r16 __asm__("$16") = (nr); \ - register unsigned long __r17 __asm__("$17"); arg; \ - __asm__ __volatile__( \ - "call_pal %3 #__tbi" \ - :"=r" (__r16),"=r" (__r17) \ - :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ - :"$0", "$1", "$22", "$23", "$24", "$25"); \ -}) - -#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) -#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) -#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) -#define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) -#define tbiap() __tbi(-1, /* no second argument */) -#define tbia() __tbi(-2, /* no second argument */) - -/* - * Atomic exchange routines. - */ - -#define __ASM__MB -#define ____xchg(type, args...) __xchg ## type ## _local(args) -#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) -#include <asm/xchg.h> - -#define xchg_local(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ - sizeof(*(ptr))); \ - }) - -#define cmpxchg_local(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, \ - sizeof(*(ptr))); \ - }) - -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) - -#ifdef CONFIG_SMP -#undef __ASM__MB -#define __ASM__MB "\tmb\n" -#endif -#undef ____xchg -#undef ____cmpxchg -#define ____xchg(type, args...) __xchg ##type(args) -#define ____cmpxchg(type, args...) __cmpxchg ##type(args) -#include <asm/xchg.h> - -#define xchg(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \ - sizeof(*(ptr))); \ - }) - -#define cmpxchg(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr)));\ - }) - -#define cmpxchg64(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ - }) - -#undef __ASM__MB -#undef ____cmpxchg - -#define __HAVE_ARCH_CMPXCHG 1 - -#endif /* __ASSEMBLY__ */ - -#define arch_align_stack(x) (x) - -#endif diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h index beba1b803e0d..1d1b436fbff2 100644 --- a/arch/alpha/include/asm/xchg.h +++ b/arch/alpha/include/asm/xchg.h @@ -1,4 +1,4 @@ -#ifndef __ALPHA_SYSTEM_H +#ifndef _ALPHA_ATOMIC_H #error Do not include xchg.h directly! #else /* diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c index ca46b2c24457..708c831efa76 100644 --- a/arch/alpha/kernel/core_apecs.c +++ b/arch/alpha/kernel/core_apecs.c @@ -21,6 +21,7 @@ #include <asm/ptrace.h> #include <asm/smp.h> +#include <asm/mce.h> #include "proto.h" #include "pci_impl.h" diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c index 1d6ee6c985f9..c44339e176c1 100644 --- a/arch/alpha/kernel/core_cia.c +++ b/arch/alpha/kernel/core_cia.c @@ -23,6 +23,7 @@ #include <linux/bootmem.h> #include <asm/ptrace.h> +#include <asm/mce.h> #include "proto.h" #include "pci_impl.h" diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c index 2f770e994289..3ada4f7b085d 100644 --- a/arch/alpha/kernel/core_t2.c +++ b/arch/alpha/kernel/core_t2.c @@ -21,6 +21,7 @@ #include <asm/ptrace.h> #include <asm/delay.h> +#include <asm/mce.h> #include "proto.h" #include "pci_impl.h" diff --git a/arch/alpha/kernel/err_impl.h b/arch/alpha/kernel/err_impl.h index 0c010ca4611e..ae529c416037 100644 --- a/arch/alpha/kernel/err_impl.h +++ b/arch/alpha/kernel/err_impl.h @@ -7,6 +7,8 @@ * implementations. */ +#include <asm/mce.h> + union el_timestamp; struct el_subpacket; struct ev7_lf_subpackets; diff --git a/arch/alpha/kernel/head.S b/arch/alpha/kernel/head.S index 4bdd1d2ff353..c352499ab9f8 100644 --- a/arch/alpha/kernel/head.S +++ b/arch/alpha/kernel/head.S @@ -8,14 +8,12 @@ */ #include <linux/init.h> -#include <asm/system.h> #include <asm/asm-offsets.h> +#include <asm/pal.h> +#include <asm/setup.h> __HEAD -.globl swapper_pg_dir .globl _stext -swapper_pg_dir=SWAPPER_PGD - .set noreorder .globl __start .ent __start diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 381431a2d6d9..2872accd2215 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -26,7 +26,6 @@ #include <linux/profile.h> #include <linux/bitops.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/uaccess.h> diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 51b7fbd9e4c1..772ddfdb71a8 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -11,6 +11,7 @@ #include <asm/machvec.h> #include <asm/dma.h> #include <asm/perf_event.h> +#include <asm/mce.h> #include "proto.h" #include "irq_impl.h" diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 01e8715e26d9..49ee3193477a 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -40,7 +40,6 @@ #include <asm/fpu.h> #include <asm/io.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/sysinfo.h> #include <asm/thread_info.h> #include <asm/hwrpb.h> diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 89bbe5b41145..153d3fce3e8e 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -31,7 +31,6 @@ #include <asm/reg.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/hwrpb.h> diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c index e2af5eb59bb4..54616f496aed 100644 --- a/arch/alpha/kernel/ptrace.c +++ b/arch/alpha/kernel/ptrace.c @@ -16,7 +16,6 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/fpu.h> #include "proto.h" diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 32de56067e63..9e3107cc5ebb 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -55,7 +55,6 @@ static struct notifier_block alpha_panic_block = { #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/hwrpb.h> #include <asm/dma.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index 8606d77e5163..118dc6af1805 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c @@ -18,7 +18,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index 1029619fb6c0..4c50f8f40cbb 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c @@ -18,7 +18,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index 13f0717fc7fe..5bf401f7ea97 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c @@ -21,7 +21,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index 3c6c13cd8b19..ad40a425e841 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c @@ -17,7 +17,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index 35f480db7719..79d69d7f63f8 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c @@ -18,7 +18,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index 7f1a87f176e2..5a0af11b3a61 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c @@ -15,7 +15,6 @@ #include <linux/init.h> #include <asm/ptrace.h> -#include <asm/system.h> #define __EXTERN_INLINE inline #include <asm/io.h> diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index fc8b12508611..14a4b6a7cf59 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -13,7 +13,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c index 258da684670b..d5b9776a608d 100644 --- a/arch/alpha/kernel/sys_miata.c +++ b/arch/alpha/kernel/sys_miata.c @@ -17,7 +17,6 @@ #include <linux/reboot.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index c0fd7284dec3..5e82dc1ad6f2 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c @@ -17,7 +17,7 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> +#include <asm/mce.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 4112200307c7..4d4c046f708d 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -35,7 +35,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index 21725283cdd7..063e594fd969 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c @@ -18,7 +18,7 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> +#include <asm/mce.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index a125d6bea7e1..dfd510ae5d8c 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c @@ -16,7 +16,6 @@ #include <linux/init.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c index 2581cbec6fc2..a3f485257170 100644 --- a/arch/alpha/kernel/sys_ruffian.c +++ b/arch/alpha/kernel/sys_ruffian.c @@ -18,7 +18,6 @@ #include <linux/init.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index b172b27555a7..08ee737d4fba 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c @@ -17,7 +17,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index 98d1dbffe98f..8a0aa6d67b53 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c @@ -16,7 +16,6 @@ #include <linux/init.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index 47bec1e97d1c..febd24eba7a6 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c @@ -20,7 +20,6 @@ #include <asm/compiler.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c index 73e1c317afcb..d063b360efed 100644 --- a/arch/alpha/kernel/sys_sx164.c +++ b/arch/alpha/kernel/sys_sx164.c @@ -17,7 +17,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> @@ -26,6 +25,7 @@ #include <asm/core_cia.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> +#include <asm/special_insns.h> #include "proto.h" #include "irq_impl.h" diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index 2ae99ad6975e..dd0f1eae3c68 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c @@ -16,7 +16,6 @@ #include <linux/init.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index b8eafa053539..2533db280d9b 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -21,7 +21,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index 17c85a65e7b0..ee1874887776 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c @@ -15,7 +15,6 @@ #include <linux/bitops.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 0414e021a91c..80d987c0e9aa 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -24,6 +24,7 @@ #include <asm/sysinfo.h> #include <asm/hwrpb.h> #include <asm/mmu_context.h> +#include <asm/special_insns.h> #include "proto.h" diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index f937ad123852..647b84c15382 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -2,6 +2,7 @@ #include <asm/thread_info.h> #include <asm/cache.h> #include <asm/page.h> +#include <asm/setup.h> OUTPUT_FORMAT("elf64-alpha") OUTPUT_ARCH(alpha) @@ -25,6 +26,7 @@ SECTIONS *(.fixup) *(.gnu.warning) } :kernel + swapper_pg_dir = SWAPPER_PGD; _etext = .; /* End of text section */ NOTES :kernel :note diff --git a/arch/alpha/lib/stacktrace.c b/arch/alpha/lib/stacktrace.c index 6d432e42aedc..5e832161e6d2 100644 --- a/arch/alpha/lib/stacktrace.c +++ b/arch/alpha/lib/stacktrace.c @@ -1,5 +1,4 @@ #include <linux/kernel.h> -#include <asm/system.h> typedef unsigned int instr; diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index fadd5f882ff9..5eecab1a84ef 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -24,7 +24,6 @@ #include <linux/interrupt.h> #include <linux/module.h> -#include <asm/system.h> #include <asm/uaccess.h> extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 69d0c5761e2f..1ad6ca74bed2 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -22,7 +22,6 @@ #include <linux/vmalloc.h> #include <linux/gfp.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -31,6 +30,7 @@ #include <asm/mmu_context.h> #include <asm/console.h> #include <asm/tlb.h> +#include <asm/setup.h> extern void die_if_kernel(char *,struct pt_regs *,long); diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c index bd8ac533a504..a0a5d27aa215 100644 --- a/arch/alpha/oprofile/common.c +++ b/arch/alpha/oprofile/common.c @@ -12,7 +12,6 @@ #include <linux/smp.h> #include <linux/errno.h> #include <asm/ptrace.h> -#include <asm/system.h> #include "op_impl.h" diff --git a/arch/alpha/oprofile/op_model_ev4.c b/arch/alpha/oprofile/op_model_ev4.c index 80d764dbf22f..18aa9b4f94f1 100644 --- a/arch/alpha/oprofile/op_model_ev4.c +++ b/arch/alpha/oprofile/op_model_ev4.c @@ -11,7 +11,6 @@ #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> -#include <asm/system.h> #include "op_impl.h" diff --git a/arch/alpha/oprofile/op_model_ev5.c b/arch/alpha/oprofile/op_model_ev5.c index ceea6e1ad79a..c32f8a0ad925 100644 --- a/arch/alpha/oprofile/op_model_ev5.c +++ b/arch/alpha/oprofile/op_model_ev5.c @@ -11,7 +11,6 @@ #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> -#include <asm/system.h> #include "op_impl.h" diff --git a/arch/alpha/oprofile/op_model_ev6.c b/arch/alpha/oprofile/op_model_ev6.c index 0869f85f5748..1c84cc257fc7 100644 --- a/arch/alpha/oprofile/op_model_ev6.c +++ b/arch/alpha/oprofile/op_model_ev6.c @@ -11,7 +11,6 @@ #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> -#include <asm/system.h> #include "op_impl.h" diff --git a/arch/alpha/oprofile/op_model_ev67.c b/arch/alpha/oprofile/op_model_ev67.c index 5b9d178e0228..34a57a126553 100644 --- a/arch/alpha/oprofile/op_model_ev67.c +++ b/arch/alpha/oprofile/op_model_ev67.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> -#include <asm/system.h> #include "op_impl.h" diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 0106f75530c0..dcb088e868fe 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -180,6 +180,7 @@ machine-$(CONFIG_ARCH_S5P64X0) := s5p64x0 machine-$(CONFIG_ARCH_S5PC100) := s5pc100 machine-$(CONFIG_ARCH_S5PV210) := s5pv210 machine-$(CONFIG_ARCH_EXYNOS4) := exynos +machine-$(CONFIG_ARCH_EXYNOS5) := exynos machine-$(CONFIG_ARCH_SA1100) := sa1100 machine-$(CONFIG_ARCH_SHARK) := shark machine-$(CONFIG_ARCH_SHMOBILE) := shmobile diff --git a/arch/arm/boot/dts/at91sam9g20.dtsi b/arch/arm/boot/dts/at91sam9g20.dtsi index a100db03ec90..92f36627e7f8 100644 --- a/arch/arm/boot/dts/at91sam9g20.dtsi +++ b/arch/arm/boot/dts/at91sam9g20.dtsi @@ -59,6 +59,26 @@ reg = <0xfffff000 0x200>; }; + ramc0: ramc@ffffea00 { + compatible = "atmel,at91sam9260-sdramc"; + reg = <0xffffea00 0x200>; + }; + + pmc: pmc@fffffc00 { + compatible = "atmel,at91rm9200-pmc"; + reg = <0xfffffc00 0x100>; + }; + + rstc@fffffd00 { + compatible = "atmel,at91sam9260-rstc"; + reg = <0xfffffd00 0x10>; + }; + + shdwc@fffffd10 { + compatible = "atmel,at91sam9260-shdwc"; + reg = <0xfffffd10 0x10>; + }; + pit: timer@fffffd30 { compatible = "atmel,at91sam9260-pit"; reg = <0xfffffd30 0xf>; @@ -171,6 +191,49 @@ interrupts = <21 4>; status = "disabled"; }; + + usb1: gadget@fffa4000 { + compatible = "atmel,at91rm9200-udc"; + reg = <0xfffa4000 0x4000>; + interrupts = <10 4>; + status = "disabled"; + }; + }; + + nand0: nand@40000000 { + compatible = "atmel,at91rm9200-nand"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x40000000 0x10000000 + 0xffffe800 0x200 + >; + atmel,nand-addr-offset = <21>; + atmel,nand-cmd-offset = <22>; + gpios = <&pioC 13 0 + &pioC 14 0 + 0 + >; + status = "disabled"; }; + + usb0: ohci@00500000 { + compatible = "atmel,at91rm9200-ohci", "usb-ohci"; + reg = <0x00500000 0x100000>; + interrupts = <20 4>; + status = "disabled"; + }; + }; + + i2c@0 { + compatible = "i2c-gpio"; + gpios = <&pioA 23 0 /* sda */ + &pioA 24 0 /* scl */ + >; + i2c-gpio,sda-open-drain; + i2c-gpio,scl-open-drain; + i2c-gpio,delay-us = <2>; /* ~100 kHz */ + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts index e64eb932083b..ac0dc0031dda 100644 --- a/arch/arm/boot/dts/at91sam9g25ek.dts +++ b/arch/arm/boot/dts/at91sam9g25ek.dts @@ -15,7 +15,7 @@ compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; chosen { - bootargs = "128M console=ttyS0,115200 mtdparts=atmel_nand:8M(bootstrap/uboot/kernel)ro,-(rootfs) root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; + bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; }; ahb { @@ -33,5 +33,17 @@ status = "okay"; }; }; + + usb0: ohci@00600000 { + status = "okay"; + num-ports = <2>; + atmel,vbus-gpio = <&pioD 19 0 + &pioD 20 0 + >; + }; + + usb1: ehci@00700000 { + status = "okay"; + }; }; }; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index f779667159b1..3d0c32fb218f 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -60,6 +60,22 @@ reg = <0xfffff000 0x200>; }; + ramc0: ramc@ffffe400 { + compatible = "atmel,at91sam9g45-ddramc"; + reg = <0xffffe400 0x200 + 0xffffe600 0x200>; + }; + + pmc: pmc@fffffc00 { + compatible = "atmel,at91rm9200-pmc"; + reg = <0xfffffc00 0x100>; + }; + + rstc@fffffd00 { + compatible = "atmel,at91sam9g45-rstc"; + reg = <0xfffffd00 0x10>; + }; + pit: timer@fffffd30 { compatible = "atmel,at91sam9260-pit"; reg = <0xfffffd30 0xf>; @@ -67,6 +83,11 @@ }; + shdwc@fffffd10 { + compatible = "atmel,at91sam9rl-shdwc"; + reg = <0xfffffd10 0x10>; + }; + tcb0: timer@fff7c000 { compatible = "atmel,at91rm9200-tcb"; reg = <0xfff7c000 0x100>; @@ -180,5 +201,48 @@ status = "disabled"; }; }; + + nand0: nand@40000000 { + compatible = "atmel,at91rm9200-nand"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x40000000 0x10000000 + 0xffffe200 0x200 + >; + atmel,nand-addr-offset = <21>; + atmel,nand-cmd-offset = <22>; + gpios = <&pioC 8 0 + &pioC 14 0 + 0 + >; + status = "disabled"; + }; + + usb0: ohci@00700000 { + compatible = "atmel,at91rm9200-ohci", "usb-ohci"; + reg = <0x00700000 0x100000>; + interrupts = <22 4>; + status = "disabled"; + }; + + usb1: ehci@00800000 { + compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; + reg = <0x00800000 0x100000>; + interrupts = <22 4>; + status = "disabled"; + }; + }; + + i2c@0 { + compatible = "i2c-gpio"; + gpios = <&pioA 20 0 /* sda */ + &pioA 21 0 /* scl */ + >; + i2c-gpio,sda-open-drain; + i2c-gpio,scl-open-drain; + i2c-gpio,delay-us = <5>; /* ~100 kHz */ + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts index 15e25f903cad..c4c8ae4123d5 100644 --- a/arch/arm/boot/dts/at91sam9m10g45ek.dts +++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts @@ -14,13 +14,24 @@ compatible = "atmel,at91sam9m10g45ek", "atmel,at91sam9g45", "atmel,at91sam9"; chosen { - bootargs = "mem=64M console=ttyS0,115200 mtdparts=atmel_nand:4M(bootstrap/uboot/kernel)ro,60M(rootfs),-(data) root=/dev/mtdblock1 rw rootfstype=jffs2"; + bootargs = "mem=64M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; }; memory@70000000 { reg = <0x70000000 0x4000000>; }; + clocks { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + main_clock: clock@0 { + compatible = "atmel,osc", "fixed-clock"; + clock-frequency = <12000000>; + }; + }; + ahb { apb { dbgu: serial@ffffee00 { @@ -36,6 +47,39 @@ status = "okay"; }; }; + + nand0: nand@40000000 { + nand-bus-width = <8>; + nand-ecc-mode = "soft"; + nand-on-flash-bbt; + status = "okay"; + + boot@0 { + label = "bootstrap/uboot/kernel"; + reg = <0x0 0x400000>; + }; + + rootfs@400000 { + label = "rootfs"; + reg = <0x400000 0x3C00000>; + }; + + data@4000000 { + label = "data"; + reg = <0x4000000 0xC000000>; + }; + }; + + usb0: ohci@00700000 { + status = "okay"; + num-ports = <2>; + atmel,vbus-gpio = <&pioD 1 0 + &pioD 3 0>; + }; + + usb1: ehci@00800000 { + status = "okay"; + }; }; leds { diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index a02e636d8a57..c111001f254e 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -58,6 +58,26 @@ reg = <0xfffff000 0x200>; }; + ramc0: ramc@ffffe800 { + compatible = "atmel,at91sam9g45-ddramc"; + reg = <0xffffe800 0x200>; + }; + + pmc: pmc@fffffc00 { + compatible = "atmel,at91rm9200-pmc"; + reg = <0xfffffc00 0x100>; + }; + + rstc@fffffe00 { + compatible = "atmel,at91sam9g45-rstc"; + reg = <0xfffffe00 0x10>; + }; + + shdwc@fffffe10 { + compatible = "atmel,at91sam9x5-shdwc"; + reg = <0xfffffe10 0x10>; + }; + pit: timer@fffffe30 { compatible = "atmel,at91sam9260-pit"; reg = <0xfffffe30 0xf>; @@ -172,5 +192,73 @@ status = "disabled"; }; }; + + nand0: nand@40000000 { + compatible = "atmel,at91rm9200-nand"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x40000000 0x10000000 + >; + atmel,nand-addr-offset = <21>; + atmel,nand-cmd-offset = <22>; + gpios = <&pioC 8 0 + &pioC 14 0 + 0 + >; + status = "disabled"; + }; + + usb0: ohci@00600000 { + compatible = "atmel,at91rm9200-ohci", "usb-ohci"; + reg = <0x00600000 0x100000>; + interrupts = <22 4>; + status = "disabled"; + }; + + usb1: ehci@00700000 { + compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; + reg = <0x00700000 0x100000>; + interrupts = <22 4>; + status = "disabled"; + }; + }; + + i2c@0 { + compatible = "i2c-gpio"; + gpios = <&pioA 30 0 /* sda */ + &pioA 31 0 /* scl */ + >; + i2c-gpio,sda-open-drain; + i2c-gpio,scl-open-drain; + i2c-gpio,delay-us = <2>; /* ~100 kHz */ + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c@1 { + compatible = "i2c-gpio"; + gpios = <&pioC 0 0 /* sda */ + &pioC 1 0 /* scl */ + >; + i2c-gpio,sda-open-drain; + i2c-gpio,scl-open-drain; + i2c-gpio,delay-us = <2>; /* ~100 kHz */ + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c@2 { + compatible = "i2c-gpio"; + gpios = <&pioB 4 0 /* sda */ + &pioB 5 0 /* scl */ + >; + i2c-gpio,sda-open-drain; + i2c-gpio,scl-open-drain; + i2c-gpio,delay-us = <2>; /* ~100 kHz */ + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi index 64ae3e890259..67936f83c694 100644 --- a/arch/arm/boot/dts/at91sam9x5cm.dtsi +++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi @@ -12,6 +12,51 @@ reg = <0x20000000 0x8000000>; }; + clocks { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + main_clock: clock@0 { + compatible = "atmel,osc", "fixed-clock"; + clock-frequency = <12000000>; + }; + }; + + ahb { + nand0: nand@40000000 { + nand-bus-width = <8>; + nand-ecc-mode = "soft"; + nand-on-flash-bbt; + status = "okay"; + + at91bootstrap@0 { + label = "at91bootstrap"; + reg = <0x0 0x40000>; + }; + + uboot@40000 { + label = "u-boot"; + reg = <0x40000 0x80000>; + }; + + ubootenv@c0000 { + label = "U-Boot Env"; + reg = <0xc0000 0x140000>; + }; + + kernel@200000 { + label = "kernel"; + reg = <0x200000 0x600000>; + }; + + rootfs@800000 { + label = "rootfs"; + reg = <0x800000 0x1f800000>; + }; + }; + }; + leds { compatible = "gpio-leds"; diff --git a/arch/arm/boot/dts/db8500.dtsi b/arch/arm/boot/dts/db8500.dtsi new file mode 100644 index 000000000000..d73dce645667 --- /dev/null +++ b/arch/arm/boot/dts/db8500.dtsi @@ -0,0 +1,275 @@ +/* + * Copyright 2012 Linaro Ltd + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +/include/ "skeleton.dtsi" + +/ { + soc-u9500 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "stericsson,db8500"; + interrupt-parent = <&intc>; + ranges; + + intc: interrupt-controller@a0411000 { + compatible = "arm,cortex-a9-gic"; + #interrupt-cells = <3>; + #address-cells = <1>; + interrupt-controller; + interrupt-parent; + reg = <0xa0411000 0x1000>, + <0xa0410100 0x100>; + }; + + L2: l2-cache { + compatible = "arm,pl310-cache"; + reg = <0xa0412000 0x1000>; + interrupts = <0 13 4>; + cache-unified; + cache-level = <2>; + }; + + pmu { + compatible = "arm,cortex-a9-pmu"; + interrupts = <0 7 0x4>; + }; + + timer@a0410600 { + compatible = "arm,cortex-a9-twd-timer"; + reg = <0xa0410600 0x20>; + interrupts = <1 13 0x304>; + }; + + rtc@80154000 { + compatible = "stericsson,db8500-rtc"; + reg = <0x80154000 0x1000>; + interrupts = <0 18 0x4>; + }; + + gpio0: gpio@8012e000 { + compatible = "stericsson,db8500-gpio", + "stmicroelectronics,nomadik-gpio"; + reg = <0x8012e000 0x80>; + interrupts = <0 119 0x4>; + supports-sleepmode; + gpio-controller; + }; + + gpio1: gpio@8012e080 { + compatible = "stericsson,db8500-gpio", + "stmicroelectronics,nomadik-gpio"; + reg = <0x8012e080 0x80>; + interrupts = <0 120 0x4>; + supports-sleepmode; + gpio-controller; + }; + + gpio2: gpio@8000e000 { + compatible = "stericsson,db8500-gpio", + "stmicroelectronics,nomadik-gpio"; + reg = <0x8000e000 0x80>; + interrupts = <0 121 0x4>; + supports-sleepmode; + gpio-controller; + }; + + gpio3: gpio@8000e080 { + compatible = "stericsson,db8500-gpio", + "stmicroelectronics,nomadik-gpio"; + reg = <0x8000e080 0x80>; + interrupts = <0 122 0x4>; + supports-sleepmode; + gpio-controller; + }; + + gpio4: gpio@8000e100 { + compatible = "stericsson,db8500-gpio", + "stmicroelectronics,nomadik-gpio"; + reg = <0x8000e100 0x80>; + interrupts = <0 123 0x4>; + supports-sleepmode; + gpio-controller; + }; + + gpio5: gpio@8000e180 { + compatible = "stericsson,db8500-gpio", + "stmicroelectronics,nomadik-gpio"; + reg = <0x8000e180 0x80>; + interrupts = <0 124 0x4>; + supports-sleepmode; + gpio-controller; + }; + + gpio6: gpio@8011e000 { + compatible = "stericsson,db8500-gpio", + "stmicroelectronics,nomadik-gpio"; + reg = <0x8011e000 0x80>; + interrupts = <0 125 0x4>; + supports-sleepmode; + gpio-controller; + }; + + gpio7: gpio@8011e080 { + compatible = "stericsson,db8500-gpio", + "stmicroelectronics,nomadik-gpio"; + reg = <0x8011e080 0x80>; + interrupts = <0 126 0x4>; + supports-sleepmode; + gpio-controller; + }; + + gpio8: gpio@a03fe000 { + compatible = "stericsson,db8500-gpio", + "stmicroelectronics,nomadik-gpio"; + reg = <0xa03fe000 0x80>; + interrupts = <0 127 0x4>; + supports-sleepmode; + gpio-controller; + }; + + usb@a03e0000 { + compatible = "stericsson,db8500-musb", + "mentor,musb"; + reg = <0xa03e0000 0x10000>; + interrupts = <0 23 0x4>; + }; + + dma-controller@801C0000 { + compatible = "stericsson,db8500-dma40", + "stericsson,dma40"; + reg = <0x801C0000 0x1000 0x40010000 0x800>; + interrupts = <0 25 0x4>; + }; + + prcmu@80157000 { + compatible = "stericsson,db8500-prcmu"; + reg = <0x80157000 0x1000>; + interrupts = <46 47>; + #address-cells = <1>; + #size-cells = <0>; + + ab8500@5 { + compatible = "stericsson,ab8500"; + reg = <5>; /* mailbox 5 is i2c */ + interrupts = <0 40 0x4>; + }; + }; + + i2c@80004000 { + compatible = "stericsson,db8500-i2c", "stmicroelectronics,nomadik-i2c"; + reg = <0x80004000 0x1000>; + interrupts = <0 21 0x4>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c@80122000 { + compatible = "stericsson,db8500-i2c", "stmicroelectronics,nomadik-i2c"; + reg = <0x80122000 0x1000>; + interrupts = <0 22 0x4>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c@80128000 { + compatible = "stericsson,db8500-i2c", "stmicroelectronics,nomadik-i2c"; + reg = <0x80128000 0x1000>; + interrupts = <0 55 0x4>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c@80110000 { + compatible = "stericsson,db8500-i2c", "stmicroelectronics,nomadik-i2c"; + reg = <0x80110000 0x1000>; + interrupts = <0 12 0x4>; + #address-cells = <1>; + #size-cells = <0>; + }; + + i2c@8012a000 { + compatible = "stericsson,db8500-i2c", "stmicroelectronics,nomadik-i2c"; + reg = <0x8012a000 0x1000>; + interrupts = <0 51 0x4>; + #address-cells = <1>; + #size-cells = <0>; + }; + + ssp@80002000 { + compatible = "arm,pl022", "arm,primecell"; + reg = <80002000 0x1000>; + interrupts = <0 14 0x4>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + // Add one of these for each child device + cs-gpios = <&gpio0 31 &gpio4 14 &gpio4 16 &gpio6 22 &gpio7 0>; + + }; + + uart@80120000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x80120000 0x1000>; + interrupts = <0 11 0x4>; + status = "disabled"; + }; + uart@80121000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x80121000 0x1000>; + interrupts = <0 19 0x4>; + status = "disabled"; + }; + uart@80007000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x80007000 0x1000>; + interrupts = <0 26 0x4>; + status = "disabled"; + }; + + sdi@80126000 { + compatible = "arm,pl18x", "arm,primecell"; + reg = <0x80126000 0x1000>; + interrupts = <0 60 0x4>; + status = "disabled"; + }; + sdi@80118000 { + compatible = "arm,pl18x", "arm,primecell"; + reg = <0x80118000 0x1000>; + interrupts = <0 50 0x4>; + status = "disabled"; + }; + sdi@80005000 { + compatible = "arm,pl18x", "arm,primecell"; + reg = <0x80005000 0x1000>; + interrupts = <0 41 0x4>; + status = "disabled"; + }; + sdi@80119000 { + compatible = "arm,pl18x", "arm,primecell"; + reg = <0x80119000 0x1000>; + interrupts = <0 59 0x4>; + status = "disabled"; + }; + sdi@80114000 { + compatible = "arm,pl18x", "arm,primecell"; + reg = <0x80114000 0x1000>; + interrupts = <0 99 0x4>; + status = "disabled"; + }; + sdi@80008000 { + compatible = "arm,pl18x", "arm,primecell"; + reg = <0x80114000 0x1000>; + interrupts = <0 100 0x4>; + status = "disabled"; + }; + }; +}; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts new file mode 100644 index 000000000000..399d17b231d2 --- /dev/null +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -0,0 +1,26 @@ +/* + * SAMSUNG SMDK5250 board device tree source + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +/dts-v1/; +/include/ "exynos5250.dtsi" + +/ { + model = "SAMSUNG SMDK5250 board based on EXYNOS5250"; + compatible = "samsung,smdk5250", "samsung,exynos5250"; + + memory { + reg = <0x40000000 0x80000000>; + }; + + chosen { + bootargs = "root=/dev/ram0 rw ramdisk=8192 console=ttySAC1,115200"; + }; +}; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi new file mode 100644 index 000000000000..dfc433599436 --- /dev/null +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -0,0 +1,413 @@ +/* + * SAMSUNG EXYNOS5250 SoC device tree source + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * SAMSUNG EXYNOS5250 SoC device nodes are listed in this file. + * EXYNOS5250 based board files can include this file and provide + * values for board specfic bindings. + * + * Note: This file does not include device nodes for all the controllers in + * EXYNOS5250 SoC. As device tree coverage for EXYNOS5250 increases, + * additional nodes can be added to this file. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +/include/ "skeleton.dtsi" + +/ { + compatible = "samsung,exynos5250"; + interrupt-parent = <&gic>; + + gic:interrupt-controller@10490000 { + compatible = "arm,cortex-a9-gic"; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x10490000 0x1000>, <0x10480000 0x100>; + }; + + watchdog { + compatible = "samsung,s3c2410-wdt"; + reg = <0x101D0000 0x100>; + interrupts = <0 42 0>; + }; + + rtc { + compatible = "samsung,s3c6410-rtc"; + reg = <0x101E0000 0x100>; + interrupts = <0 43 0>, <0 44 0>; + }; + + sdhci@12200000 { + compatible = "samsung,exynos4210-sdhci"; + reg = <0x12200000 0x100>; + interrupts = <0 75 0>; + }; + + sdhci@12210000 { + compatible = "samsung,exynos4210-sdhci"; + reg = <0x12210000 0x100>; + interrupts = <0 76 0>; + }; + + sdhci@12220000 { + compatible = "samsung,exynos4210-sdhci"; + reg = <0x12220000 0x100>; + interrupts = <0 77 0>; + }; + + sdhci@12230000 { + compatible = "samsung,exynos4210-sdhci"; + reg = <0x12230000 0x100>; + interrupts = <0 78 0>; + }; + + serial@12C00000 { + compatible = "samsung,exynos4210-uart"; + reg = <0x12C00000 0x100>; + interrupts = <0 51 0>; + }; + + serial@12C10000 { + compatible = "samsung,exynos4210-uart"; + reg = <0x12C10000 0x100>; + interrupts = <0 52 0>; + }; + + serial@12C20000 { + compatible = "samsung,exynos4210-uart"; + reg = <0x12C20000 0x100>; + interrupts = <0 53 0>; + }; + + serial@12C30000 { + compatible = "samsung,exynos4210-uart"; + reg = <0x12C30000 0x100>; + interrupts = <0 54 0>; + }; + + i2c@12C60000 { + compatible = "samsung,s3c2440-i2c"; + reg = <0x12C60000 0x100>; + interrupts = <0 56 0>; + }; + + i2c@12C70000 { + compatible = "samsung,s3c2440-i2c"; + reg = <0x12C70000 0x100>; + interrupts = <0 57 0>; + }; + + i2c@12C80000 { + compatible = "samsung,s3c2440-i2c"; + reg = <0x12C80000 0x100>; + interrupts = <0 58 0>; + }; + + i2c@12C90000 { + compatible = "samsung,s3c2440-i2c"; + reg = <0x12C90000 0x100>; + interrupts = <0 59 0>; + }; + + i2c@12CA0000 { + compatible = "samsung,s3c2440-i2c"; + reg = <0x12CA0000 0x100>; + interrupts = <0 60 0>; + }; + + i2c@12CB0000 { + compatible = "samsung,s3c2440-i2c"; + reg = <0x12CB0000 0x100>; + interrupts = <0 61 0>; + }; + + i2c@12CC0000 { + compatible = "samsung,s3c2440-i2c"; + reg = <0x12CC0000 0x100>; + interrupts = <0 62 0>; + }; + + i2c@12CD0000 { + compatible = "samsung,s3c2440-i2c"; + reg = <0x12CD0000 0x100>; + interrupts = <0 63 0>; + }; + + amba { + #address-cells = <1>; + #size-cells = <1>; + compatible = "arm,amba-bus"; + interrupt-parent = <&gic>; + ranges; + + pdma0: pdma@121A0000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0x121A0000 0x1000>; + interrupts = <0 34 0>; + }; + + pdma1: pdma@121B0000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0x121B0000 0x1000>; + interrupts = <0 35 0>; + }; + + mdma0: pdma@10800000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0x10800000 0x1000>; + interrupts = <0 33 0>; + }; + + mdma1: pdma@11C10000 { + compatible = "arm,pl330", "arm,primecell"; + reg = <0x11C10000 0x1000>; + interrupts = <0 124 0>; + }; + }; + + gpio-controllers { + #address-cells = <1>; + #size-cells = <1>; + gpio-controller; + ranges; + + gpa0: gpio-controller@11400000 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400000 0x20>; + #gpio-cells = <4>; + }; + + gpa1: gpio-controller@11400020 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400020 0x20>; + #gpio-cells = <4>; + }; + + gpa2: gpio-controller@11400040 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400040 0x20>; + #gpio-cells = <4>; + }; + + gpb0: gpio-controller@11400060 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400060 0x20>; + #gpio-cells = <4>; + }; + + gpb1: gpio-controller@11400080 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400080 0x20>; + #gpio-cells = <4>; + }; + + gpb2: gpio-controller@114000A0 { + compatible = "samsung,exynos4-gpio"; + reg = <0x114000A0 0x20>; + #gpio-cells = <4>; + }; + + gpb3: gpio-controller@114000C0 { + compatible = "samsung,exynos4-gpio"; + reg = <0x114000C0 0x20>; + #gpio-cells = <4>; + }; + + gpc0: gpio-controller@114000E0 { + compatible = "samsung,exynos4-gpio"; + reg = <0x114000E0 0x20>; + #gpio-cells = <4>; + }; + + gpc1: gpio-controller@11400100 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400100 0x20>; + #gpio-cells = <4>; + }; + + gpc2: gpio-controller@11400120 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400120 0x20>; + #gpio-cells = <4>; + }; + + gpc3: gpio-controller@11400140 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400140 0x20>; + #gpio-cells = <4>; + }; + + gpd0: gpio-controller@11400160 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400160 0x20>; + #gpio-cells = <4>; + }; + + gpd1: gpio-controller@11400180 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400180 0x20>; + #gpio-cells = <4>; + }; + + gpy0: gpio-controller@114001A0 { + compatible = "samsung,exynos4-gpio"; + reg = <0x114001A0 0x20>; + #gpio-cells = <4>; + }; + + gpy1: gpio-controller@114001C0 { + compatible = "samsung,exynos4-gpio"; + reg = <0x114001C0 0x20>; + #gpio-cells = <4>; + }; + + gpy2: gpio-controller@114001E0 { + compatible = "samsung,exynos4-gpio"; + reg = <0x114001E0 0x20>; + #gpio-cells = <4>; + }; + + gpy3: gpio-controller@11400200 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400200 0x20>; + #gpio-cells = <4>; + }; + + gpy4: gpio-controller@11400220 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400220 0x20>; + #gpio-cells = <4>; + }; + + gpy5: gpio-controller@11400240 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400240 0x20>; + #gpio-cells = <4>; + }; + + gpy6: gpio-controller@11400260 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400260 0x20>; + #gpio-cells = <4>; + }; + + gpx0: gpio-controller@11400C00 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400C00 0x20>; + #gpio-cells = <4>; + }; + + gpx1: gpio-controller@11400C20 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400C20 0x20>; + #gpio-cells = <4>; + }; + + gpx2: gpio-controller@11400C40 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400C40 0x20>; + #gpio-cells = <4>; + }; + + gpx3: gpio-controller@11400C60 { + compatible = "samsung,exynos4-gpio"; + reg = <0x11400C60 0x20>; + #gpio-cells = <4>; + }; + + gpe0: gpio-controller@13400000 { + compatible = "samsung,exynos4-gpio"; + reg = <0x13400000 0x20>; + #gpio-cells = <4>; + }; + + gpe1: gpio-controller@13400020 { + compatible = "samsung,exynos4-gpio"; + reg = <0x13400020 0x20>; + #gpio-cells = <4>; + }; + + gpf0: gpio-controller@13400040 { + compatible = "samsung,exynos4-gpio"; + reg = <0x13400040 0x20>; + #gpio-cells = <4>; + }; + + gpf1: gpio-controller@13400060 { + compatible = "samsung,exynos4-gpio"; + reg = <0x13400060 0x20>; + #gpio-cells = <4>; + }; + + gpg0: gpio-controller@13400080 { + compatible = "samsung,exynos4-gpio"; + reg = <0x13400080 0x20>; + #gpio-cells = <4>; + }; + + gpg1: gpio-controller@134000A0 { + compatible = "samsung,exynos4-gpio"; + reg = <0x134000A0 0x20>; + #gpio-cells = <4>; + }; + + gpg2: gpio-controller@134000C0 { + compatible = "samsung,exynos4-gpio"; + reg = <0x134000C0 0x20>; + #gpio-cells = <4>; + }; + + gph0: gpio-controller@134000E0 { + compatible = "samsung,exynos4-gpio"; + reg = <0x134000E0 0x20>; + #gpio-cells = <4>; + }; + + gph1: gpio-controller@13400100 { + compatible = "samsung,exynos4-gpio"; + reg = <0x13400100 0x20>; + #gpio-cells = <4>; + }; + + gpv0: gpio-controller@10D10000 { + compatible = "samsung,exynos4-gpio"; + reg = <0x10D10000 0x20>; + #gpio-cells = <4>; + }; + + gpv1: gpio-controller@10D10020 { + compatible = "samsung,exynos4-gpio"; + reg = <0x10D10020 0x20>; + #gpio-cells = <4>; + }; + + gpv2: gpio-controller@10D10040 { + compatible = "samsung,exynos4-gpio"; + reg = <0x10D10040 0x20>; + #gpio-cells = <4>; + }; + + gpv3: gpio-controller@10D10060 { + compatible = "samsung,exynos4-gpio"; + reg = <0x10D10060 0x20>; + #gpio-cells = <4>; + }; + + gpv4: gpio-controller@10D10080 { + compatible = "samsung,exynos4-gpio"; + reg = <0x10D10080 0x20>; + #gpio-cells = <4>; + }; + + gpz: gpio-controller@03860000 { + compatible = "samsung,exynos4-gpio"; + reg = <0x03860000 0x20>; + #gpio-cells = <4>; + }; + }; +}; diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts index 8a5dff807b45..a5376b84227f 100644 --- a/arch/arm/boot/dts/kirkwood-dreamplug.dts +++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts @@ -4,7 +4,7 @@ / { model = "Globalscale Technologies Dreamplug"; - compatible = "globalscale,dreamplug-003-ds2001", "globalscale,dreamplug", "marvell,kirkwood-88f6281", "marvell,kirkwood"; + compatible = "globalscale,dreamplug-003-ds2001", "globalscale,dreamplug", "mrvl,kirkwood-88f6281", "mrvl,kirkwood"; memory { device_type = "memory"; @@ -15,11 +15,10 @@ bootargs = "console=ttyS0,115200n8 earlyprintk"; }; - serial@f1012000 { - compatible = "ns16550a"; - reg = <0xf1012000 0xff>; - reg-shift = <2>; - interrupts = <33>; - clock-frequency = <200000000>; + ocp@f1000000 { + serial@12000 { + clock-frequency = <200000000>; + status = "ok"; + }; }; }; diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi index 771c6bbeb29a..3474ef890945 100644 --- a/arch/arm/boot/dts/kirkwood.dtsi +++ b/arch/arm/boot/dts/kirkwood.dtsi @@ -1,6 +1,36 @@ /include/ "skeleton.dtsi" / { - compatible = "marvell,kirkwood"; -}; + compatible = "mrvl,kirkwood"; + + ocp@f1000000 { + compatible = "simple-bus"; + ranges = <0 0xf1000000 0x1000000>; + #address-cells = <1>; + #size-cells = <1>; + + serial@12000 { + compatible = "ns16550a"; + reg = <0x12000 0x100>; + reg-shift = <2>; + interrupts = <33>; + /* set clock-frequency in board dts */ + status = "disabled"; + }; + serial@12100 { + compatible = "ns16550a"; + reg = <0x12100 0x100>; + reg-shift = <2>; + interrupts = <34>; + /* set clock-frequency in board dts */ + status = "disabled"; + }; + + rtc@10300 { + compatible = "mrvl,kirkwood-rtc", "mrvl,orion-rtc"; + reg = <0x10300 0x20>; + interrupts = <53>; + }; + }; +}; diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts new file mode 100644 index 000000000000..359c6d679156 --- /dev/null +++ b/arch/arm/boot/dts/snowball.dts @@ -0,0 +1,139 @@ +/* + * Copyright 2011 ST-Ericsson AB + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +/dts-v1/; +/include/ "db8500.dtsi" + +/ { + model = "Calao Systems Snowball platform with device tree"; + compatible = "calaosystems,snowball-a9500"; + + memory { + reg = <0x00000000 0x20000000>; + }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + + button@1 { + debounce_interval = <50>; + wakeup = <1>; + linux,code = <2>; + label = "userpb"; + gpios = <&gpio1 0>; + }; + button@2 { + debounce_interval = <50>; + wakeup = <1>; + linux,code = <3>; + label = "userpb"; + gpios = <&gpio4 23>; + }; + button@3 { + debounce_interval = <50>; + wakeup = <1>; + linux,code = <4>; + label = "userpb"; + gpios = <&gpio4 23>; + }; + button@4 { + debounce_interval = <50>; + wakeup = <1>; + linux,code = <5>; + label = "userpb"; + gpios = <&gpio5 1>; + }; + button@5 { + debounce_interval = <50>; + wakeup = <1>; + linux,code = <6>; + label = "userpb"; + gpios = <&gpio5 2>; + }; + }; + + leds { + compatible = "gpio-leds"; + used-led { + label = "user_led"; + gpios = <&gpio4 14>; + }; + }; + + soc-u9500 { + + external-bus@50000000 { + compatible = "simple-bus"; + reg = <0x50000000 0x10000000>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + ethernet@50000000 { + compatible = "smsc,9111"; + reg = <0x50000000 0x10000>; + interrupts = <12>; + interrupt-parent = <&gpio4>; + }; + }; + + sdi@80126000 { + status = "enabled"; + cd-gpios = <&gpio6 26>; + }; + + sdi@80114000 { + status = "enabled"; + }; + + uart@80120000 { + status = "okay"; + }; + + uart@80121000 { + status = "okay"; + }; + + uart@80007000 { + status = "okay"; + }; + + i2c@80004000 { + tc3589x@42 { + //compatible = "tc3589x"; + reg = <0x42>; + interrupts = <25>; + interrupt-parent = <&gpio6>; + }; + tps61052@33 { + //compatible = "tps61052"; + reg = <0x33>; + }; + }; + + i2c@80128000 { + lp5521@0x33 { + // compatible = "lp5521"; + reg = <0x33>; + }; + lp5521@0x34 { + // compatible = "lp5521"; + reg = <0x34>; + }; + bh1780@0x29 { + // compatible = "rohm,bh1780gli"; + reg = <0x33>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/spear600-evb.dts b/arch/arm/boot/dts/spear600-evb.dts new file mode 100644 index 000000000000..636292e18c90 --- /dev/null +++ b/arch/arm/boot/dts/spear600-evb.dts @@ -0,0 +1,47 @@ +/* + * Copyright 2012 Stefan Roese <sr@denx.de> + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +/dts-v1/; +/include/ "spear600.dtsi" + +/ { + model = "ST SPEAr600 Evaluation Board"; + compatible = "st,spear600-evb", "st,spear600"; + #address-cells = <1>; + #size-cells = <1>; + + memory { + device_type = "memory"; + reg = <0 0x10000000>; + }; + + ahb { + gmac: ethernet@e0800000 { + phy-mode = "gmii"; + status = "okay"; + }; + + apb { + serial@d0000000 { + status = "okay"; + }; + + serial@d0080000 { + status = "okay"; + }; + + i2c@d0200000 { + clock-frequency = <400000>; + status = "okay"; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi new file mode 100644 index 000000000000..ebe0885a2b98 --- /dev/null +++ b/arch/arm/boot/dts/spear600.dtsi @@ -0,0 +1,174 @@ +/* + * Copyright 2012 Stefan Roese <sr@denx.de> + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +/include/ "skeleton.dtsi" + +/ { + compatible = "st,spear600"; + + cpus { + cpu@0 { + compatible = "arm,arm926ejs"; + }; + }; + + memory { + device_type = "memory"; + reg = <0 0x40000000>; + }; + + ahb { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + ranges = <0xd0000000 0xd0000000 0x30000000>; + + vic0: interrupt-controller@f1100000 { + compatible = "arm,pl190-vic"; + interrupt-controller; + reg = <0xf1100000 0x1000>; + #interrupt-cells = <1>; + }; + + vic1: interrupt-controller@f1000000 { + compatible = "arm,pl190-vic"; + interrupt-controller; + reg = <0xf1000000 0x1000>; + #interrupt-cells = <1>; + }; + + gmac: ethernet@e0800000 { + compatible = "st,spear600-gmac"; + reg = <0xe0800000 0x8000>; + interrupt-parent = <&vic1>; + interrupts = <24 23>; + interrupt-names = "macirq", "eth_wake_irq"; + status = "disabled"; + }; + + fsmc: flash@d1800000 { + compatible = "st,spear600-fsmc-nand"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0xd1800000 0x1000 /* FSMC Register */ + 0xd2000000 0x4000>; /* NAND Base */ + reg-names = "fsmc_regs", "nand_data"; + st,ale-off = <0x20000>; + st,cle-off = <0x10000>; + status = "disabled"; + }; + + smi: flash@fc000000 { + compatible = "st,spear600-smi"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0xfc000000 0x1000>; + interrupt-parent = <&vic1>; + interrupts = <12>; + status = "disabled"; + }; + + ehci@e1800000 { + compatible = "st,spear600-ehci", "usb-ehci"; + reg = <0xe1800000 0x1000>; + interrupt-parent = <&vic1>; + interrupts = <27>; + status = "disabled"; + }; + + ehci@e2000000 { + compatible = "st,spear600-ehci", "usb-ehci"; + reg = <0xe2000000 0x1000>; + interrupt-parent = <&vic1>; + interrupts = <29>; + status = "disabled"; + }; + + ohci@e1900000 { + compatible = "st,spear600-ohci", "usb-ohci"; + reg = <0xe1900000 0x1000>; + interrupt-parent = <&vic1>; + interrupts = <26>; + status = "disabled"; + }; + + ohci@e2100000 { + compatible = "st,spear600-ohci", "usb-ohci"; + reg = <0xe2100000 0x1000>; + interrupt-parent = <&vic1>; + interrupts = <28>; + status = "disabled"; + }; + + apb { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + ranges = <0xd0000000 0xd0000000 0x30000000>; + + serial@d0000000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0xd0000000 0x1000>; + interrupt-parent = <&vic0>; + interrupts = <24>; + status = "disabled"; + }; + + serial@d0080000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0xd0080000 0x1000>; + interrupt-parent = <&vic0>; + interrupts = <25>; + status = "disabled"; + }; + + /* local/cpu GPIO */ + gpio0: gpio@f0100000 { + #gpio-cells = <2>; + compatible = "arm,pl061", "arm,primecell"; + gpio-controller; + reg = <0xf0100000 0x1000>; + interrupt-parent = <&vic0>; + interrupts = <18>; + }; + + /* basic GPIO */ + gpio1: gpio@fc980000 { + #gpio-cells = <2>; + compatible = "arm,pl061", "arm,primecell"; + gpio-controller; + reg = <0xfc980000 0x1000>; + interrupt-parent = <&vic1>; + interrupts = <19>; + }; + + /* appl GPIO */ + gpio2: gpio@d8100000 { + #gpio-cells = <2>; + compatible = "arm,pl061", "arm,primecell"; + gpio-controller; + reg = <0xd8100000 0x1000>; + interrupt-parent = <&vic1>; + interrupts = <4>; + }; + + i2c@d0200000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xd0200000 0x1000>; + interrupt-parent = <&vic0>; + interrupts = <28>; + status = "disabled"; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/tegra-cardhu.dts b/arch/arm/boot/dts/tegra-cardhu.dts index 73263501f581..ac3fb7558459 100644 --- a/arch/arm/boot/dts/tegra-cardhu.dts +++ b/arch/arm/boot/dts/tegra-cardhu.dts @@ -14,6 +14,22 @@ clock-frequency = < 408000000 >; }; + serial@70006040 { + status = "disable"; + }; + + serial@70006200 { + status = "disable"; + }; + + serial@70006300 { + status = "disable"; + }; + + serial@70006400 { + status = "disable"; + }; + i2c@7000c000 { clock-frequency = <100000>; }; diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra-seaboard.dts index 876d5c92ce36..dbf1c5a171c2 100644 --- a/arch/arm/boot/dts/tegra-seaboard.dts +++ b/arch/arm/boot/dts/tegra-seaboard.dts @@ -112,6 +112,7 @@ usb@c5000000 { nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */ + dr_mode = "otg"; }; gpio-keys { diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index aff8a175aa40..108e894a8926 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -190,6 +190,7 @@ reg = <0xc5000000 0x4000>; interrupts = < 0 20 0x04 >; phy_type = "utmi"; + nvidia,has-legacy-mode; }; usb@c5004000 { diff --git a/arch/arm/boot/dts/usb_a9g20-dab-mmx.dtsi b/arch/arm/boot/dts/usb_a9g20-dab-mmx.dtsi new file mode 100644 index 000000000000..ad3eca17c436 --- /dev/null +++ b/arch/arm/boot/dts/usb_a9g20-dab-mmx.dtsi @@ -0,0 +1,96 @@ +/* + * calao-dab-mmx.dtsi - Device Tree Include file for Calao DAB-MMX Daughter Board + * + * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> + * + * Licensed under GPLv2. + */ + +/ { + ahb { + apb { + usart1: serial@fffb4000 { + status = "okay"; + }; + + usart3: serial@fffd0000 { + status = "okay"; + }; + }; + }; + + i2c-gpio@0 { + status = "okay"; + }; + + leds { + compatible = "gpio-leds"; + + user_led1 { + label = "user_led1"; + gpios = <&pioB 20 1>; + }; + +/* +* led already used by mother board but active as high +* user_led2 { +* label = "user_led2"; +* gpios = <&pioB 21 1>; +* }; +*/ + user_led3 { + label = "user_led3"; + gpios = <&pioB 22 1>; + }; + + user_led4 { + label = "user_led4"; + gpios = <&pioB 23 1>; + }; + + red { + label = "red"; + gpios = <&pioB 24 1>; + }; + + orange { + label = "orange"; + gpios = <&pioB 30 1>; + }; + + green { + label = "green"; + gpios = <&pioB 31 1>; + }; + }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + + user_pb1 { + label = "user_pb1"; + gpios = <&pioB 25 1>; + linux,code = <0x100>; + }; + + user_pb2 { + label = "user_pb2"; + gpios = <&pioB 13 1>; + linux,code = <0x101>; + }; + + user_pb3 { + label = "user_pb3"; + gpios = <&pioA 26 1>; + linux,code = <0x102>; + }; + + user_pb4 { + label = "user_pb4"; + gpios = <&pioC 9 1>; + linux,code = <0x103>; + }; + }; +}; diff --git a/arch/arm/boot/dts/usb_a9g20.dts b/arch/arm/boot/dts/usb_a9g20.dts index d74545a2a77c..3b3c4e0fa79f 100644 --- a/arch/arm/boot/dts/usb_a9g20.dts +++ b/arch/arm/boot/dts/usb_a9g20.dts @@ -13,13 +13,24 @@ compatible = "calao,usb-a9g20", "atmel,at91sam9g20", "atmel,at91sam9"; chosen { - bootargs = "mem=64M console=ttyS0,115200 mtdparts=atmel_nand:128k(at91bootstrap),256k(barebox)ro,128k(bareboxenv),128k(bareboxenv2),4M(kernel),120M(rootfs),-(data) root=/dev/mtdblock5 rw rootfstype=ubifs"; + bootargs = "mem=64M console=ttyS0,115200 root=/dev/mtdblock5 rw rootfstype=ubifs"; }; memory@20000000 { reg = <0x20000000 0x4000000>; }; + clocks { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + main_clock: clock@0 { + compatible = "atmel,osc", "fixed-clock"; + clock-frequency = <12000000>; + }; + }; + ahb { apb { dbgu: serial@fffff200 { @@ -30,6 +41,58 @@ phy-mode = "rmii"; status = "okay"; }; + + usb1: gadget@fffa4000 { + atmel,vbus-gpio = <&pioC 5 0>; + status = "okay"; + }; + }; + + nand0: nand@40000000 { + nand-bus-width = <8>; + nand-ecc-mode = "soft"; + nand-on-flash-bbt; + status = "okay"; + + at91bootstrap@0 { + label = "at91bootstrap"; + reg = <0x0 0x20000>; + }; + + barebox@20000 { + label = "barebox"; + reg = <0x20000 0x40000>; + }; + + bareboxenv@60000 { + label = "bareboxenv"; + reg = <0x60000 0x20000>; + }; + + bareboxenv2@80000 { + label = "bareboxenv2"; + reg = <0x80000 0x20000>; + }; + + kernel@a0000 { + label = "kernel"; + reg = <0xa0000 0x400000>; + }; + + rootfs@4a0000 { + label = "rootfs"; + reg = <0x4a0000 0x7800000>; + }; + + data@7ca0000 { + label = "data"; + reg = <0x7ca0000 0x8360000>; + }; + }; + + usb0: ohci@00500000 { + num-ports = <2>; + status = "okay"; }; }; @@ -55,4 +118,13 @@ gpio-key,wakeup; }; }; + + i2c@0 { + status = "okay"; + + rv3029c2@56 { + compatible = "rv3029c2"; + reg = <0x56>; + }; + }; }; diff --git a/arch/arm/common/via82c505.c b/arch/arm/common/via82c505.c index 67dd2affc57a..1171a5010aea 100644 --- a/arch/arm/common/via82c505.c +++ b/arch/arm/common/via82c505.c @@ -6,7 +6,6 @@ #include <linux/ioport.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/mach/pci.h> diff --git a/arch/arm/configs/at91sam9g20_defconfig b/arch/arm/configs/at91sam9g20_defconfig index 9123568d9a8d..994d331b2319 100644 --- a/arch/arm/configs/at91sam9g20_defconfig +++ b/arch/arm/configs/at91sam9g20_defconfig @@ -74,6 +74,8 @@ CONFIG_LEGACY_PTY_COUNT=16 CONFIG_SERIAL_ATMEL=y CONFIG_SERIAL_ATMEL_CONSOLE=y CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_I2C_GPIO=y CONFIG_SPI=y CONFIG_SPI_ATMEL=y CONFIG_SPI_SPIDEV=y @@ -105,6 +107,7 @@ CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_RV3029C2=y CONFIG_RTC_DRV_AT91SAM9=y CONFIG_EXT2_FS=y CONFIG_MSDOS_FS=y diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index 2d7b6e7b7271..889d73ac1ae1 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -13,6 +13,7 @@ CONFIG_UX500_SOC_DB8500=y CONFIG_MACH_HREFV60=y CONFIG_MACH_SNOWBALL=y CONFIG_MACH_U5500=y +CONFIG_MACH_UX500_DT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_SMP=y diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 86976d034382..68374ba6a943 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -13,7 +13,9 @@ #include <linux/compiler.h> #include <linux/types.h> -#include <asm/system.h> +#include <linux/irqflags.h> +#include <asm/barrier.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h new file mode 100644 index 000000000000..44f4a09ff37b --- /dev/null +++ b/arch/arm/include/asm/barrier.h @@ -0,0 +1,69 @@ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ + +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#if __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#endif + +#if __LINUX_ARM_ARCH__ >= 7 +#define isb() __asm__ __volatile__ ("isb" : : : "memory") +#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") +#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#elif defined(CONFIG_CPU_FA526) +#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("" : : : "memory") +#else +#define isb() __asm__ __volatile__ ("" : : : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("" : : : "memory") +#endif + +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include <mach/barriers.h> +#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#include <asm/outercache.h> +#define mb() do { dsb(); outer_sync(); } while (0) +#define rmb() dsb() +#define wmb() mb() +#else +#include <asm/memory.h> +#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#endif + +#ifndef CONFIG_SMP +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#else +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() +#endif + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index f7419ef9c8f9..e691ec91e4d3 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -24,7 +24,7 @@ #endif #include <linux/compiler.h> -#include <asm/system.h> +#include <linux/irqflags.h> #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb() diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index fac79dceb736..7af5c6c3653a 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -1,6 +1,7 @@ #ifndef _ASMARM_BUG_H #define _ASMARM_BUG_H +#include <linux/linkage.h> #ifdef CONFIG_BUG @@ -57,4 +58,33 @@ do { \ #include <asm-generic/bug.h> +struct pt_regs; +void die(const char *msg, struct pt_regs *regs, int err); + +struct siginfo; +void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, + unsigned long err, unsigned long trap); + +#ifdef CONFIG_ARM_LPAE +#define FAULT_CODE_ALIGNMENT 33 +#define FAULT_CODE_DEBUG 34 +#else +#define FAULT_CODE_ALIGNMENT 1 +#define FAULT_CODE_DEBUG 2 +#endif + +void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, + struct pt_regs *), + int sig, int code, const char *name); + +void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, + struct pt_regs *), + int sig, int code, const char *name); + +extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +struct mm_struct; +extern void show_pte(struct mm_struct *mm, unsigned long addr); +extern void __show_regs(struct pt_regs *); + #endif diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h new file mode 100644 index 000000000000..d41d7cbf0ada --- /dev/null +++ b/arch/arm/include/asm/cmpxchg.h @@ -0,0 +1,295 @@ +#ifndef __ASM_ARM_CMPXCHG_H +#define __ASM_ARM_CMPXCHG_H + +#include <linux/irqflags.h> +#include <asm/barrier.h> + +#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) +/* + * On the StrongARM, "swp" is terminally broken since it bypasses the + * cache totally. This means that the cache becomes inconsistent, and, + * since we use normal loads/stores as well, this is really bad. + * Typically, this causes oopsen in filp_close, but could have other, + * more disastrous effects. There are two work-arounds: + * 1. Disable interrupts and emulate the atomic swap + * 2. Clean the cache, perform atomic swap, flush the cache + * + * We choose (1) since its the "easiest" to achieve here and is not + * dependent on the processor type. + * + * NOTE that this solution won't work on an SMP system, so explcitly + * forbid it here. + */ +#define swp_is_buggy +#endif + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + extern void __bad_xchg(volatile void *, int); + unsigned long ret; +#ifdef swp_is_buggy + unsigned long flags; +#endif +#if __LINUX_ARM_ARCH__ >= 6 + unsigned int tmp; +#endif + + smp_mb(); + + switch (size) { +#if __LINUX_ARM_ARCH__ >= 6 + case 1: + asm volatile("@ __xchg1\n" + "1: ldrexb %0, [%3]\n" + " strexb %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + "1: ldrex %0, [%3]\n" + " strex %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#elif defined(swp_is_buggy) +#ifdef CONFIG_SMP +#error SMP is not supported on this platform +#endif + case 1: + raw_local_irq_save(flags); + ret = *(volatile unsigned char *)ptr; + *(volatile unsigned char *)ptr = x; + raw_local_irq_restore(flags); + break; + + case 4: + raw_local_irq_save(flags); + ret = *(volatile unsigned long *)ptr; + *(volatile unsigned long *)ptr = x; + raw_local_irq_restore(flags); + break; +#else + case 1: + asm volatile("@ __xchg1\n" + " swpb %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + " swp %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#endif + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + smp_mb(); + + return ret; +} + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +#include <asm-generic/cmpxchg-local.h> + +#if __LINUX_ARM_ARCH__ < 6 +/* min ARCH < ARMv6 */ + +#ifdef CONFIG_SMP +#error "SMP is not supported on this platform" +#endif + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#ifndef CONFIG_SMP +#include <asm-generic/cmpxchg.h> +#endif + +#else /* min ARCH >= ARMv6 */ + +extern void __bad_cmpxchg(volatile void *ptr, int size); + +/* + * cmpxchg only support 32-bits operands on ARMv6. + */ + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long oldval, res; + + switch (size) { +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ + case 1: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexb %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexbeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + case 2: + do { + asm volatile("@ __cmpxchg1\n" + " ldrexh %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexheq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; +#endif + case 4: + do { + asm volatile("@ __cmpxchg4\n" + " ldrex %1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " strexeq %0, %4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (old), "r" (new) + : "memory", "cc"); + } while (res); + break; + default: + __bad_cmpxchg(ptr, size); + oldval = 0; + } + + return oldval; +} + +static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + smp_mb(); + ret = __cmpxchg(ptr, old, new, size); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + switch (size) { +#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ + case 1: + case 2: + ret = __cmpxchg_local_generic(ptr, old, new, size); + break; +#endif + default: + ret = __cmpxchg(ptr, old, new, size); + } + + return ret; +} + +#define cmpxchg_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) + +#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ + +/* + * Note : ARMv7-M (currently unsupported by Linux) does not support + * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should + * not be allowed to use __cmpxchg64. + */ +static inline unsigned long long __cmpxchg64(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + register unsigned long long oldval asm("r0"); + register unsigned long long __old asm("r2") = old; + register unsigned long long __new asm("r4") = new; + unsigned long res; + + do { + asm volatile( + " @ __cmpxchg8\n" + " ldrexd %1, %H1, [%2]\n" + " mov %0, #0\n" + " teq %1, %3\n" + " teqeq %H1, %H3\n" + " strexdeq %0, %4, %H4, [%2]\n" + : "=&r" (res), "=&r" (oldval) + : "r" (ptr), "Ir" (__old), "r" (__new) + : "memory", "cc"); + } while (res); + + return oldval; +} + +static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + unsigned long long ret; + + smp_mb(); + ret = __cmpxchg64(ptr, old, new); + smp_mb(); + + return ret; +} + +#define cmpxchg64(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#define cmpxchg64_local(ptr,o,n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#else /* min ARCH = ARMv6 */ + +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#endif + +#endif /* __LINUX_ARM_ARCH__ >= 6 */ + +#endif /* __ASM_ARM_CMPXCHG_H */ diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h new file mode 100644 index 000000000000..8155db2f7fa1 --- /dev/null +++ b/arch/arm/include/asm/compiler.h @@ -0,0 +1,15 @@ +#ifndef __ASM_ARM_COMPILER_H +#define __ASM_ARM_COMPILER_H + +/* + * This is used to ensure the compiler did actually allocate the register we + * asked it for some inline assembly sequences. Apparently we can't trust + * the compiler from one version to another so a bit of paranoia won't hurt. + * This string is meant to be concatenated with the inline asm string and + * will cause compilation to stop on mismatch. + * (for details, see gcc PR 15089) + */ +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" + + +#endif /* __ASM_ARM_COMPILER_H */ diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h new file mode 100644 index 000000000000..5ef4d8015a60 --- /dev/null +++ b/arch/arm/include/asm/cp15.h @@ -0,0 +1,87 @@ +#ifndef __ASM_ARM_CP15_H +#define __ASM_ARM_CP15_H + +#include <asm/barrier.h> + +/* + * CR1 bits (CP#15 CR1) + */ +#define CR_M (1 << 0) /* MMU enable */ +#define CR_A (1 << 1) /* Alignment abort enable */ +#define CR_C (1 << 2) /* Dcache enable */ +#define CR_W (1 << 3) /* Write buffer enable */ +#define CR_P (1 << 4) /* 32-bit exception handler */ +#define CR_D (1 << 5) /* 32-bit data address range */ +#define CR_L (1 << 6) /* Implementation defined */ +#define CR_B (1 << 7) /* Big endian */ +#define CR_S (1 << 8) /* System MMU protection */ +#define CR_R (1 << 9) /* ROM MMU protection */ +#define CR_F (1 << 10) /* Implementation defined */ +#define CR_Z (1 << 11) /* Implementation defined */ +#define CR_I (1 << 12) /* Icache enable */ +#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ +#define CR_RR (1 << 14) /* Round Robin cache replacement */ +#define CR_L4 (1 << 15) /* LDR pc can set T bit */ +#define CR_DT (1 << 16) +#define CR_IT (1 << 18) +#define CR_ST (1 << 19) +#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ +#define CR_U (1 << 22) /* Unaligned access operation */ +#define CR_XP (1 << 23) /* Extended page tables */ +#define CR_VE (1 << 24) /* Vectored interrupts */ +#define CR_EE (1 << 25) /* Exception (Big) Endian */ +#define CR_TRE (1 << 28) /* TEX remap enable */ +#define CR_AFE (1 << 29) /* Access flag enable */ +#define CR_TE (1 << 30) /* Thumb exception enable */ + +#ifndef __ASSEMBLY__ + +#if __LINUX_ARM_ARCH__ >= 4 +#define vectors_high() (cr_alignment & CR_V) +#else +#define vectors_high() (0) +#endif + +extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ +extern unsigned long cr_alignment; /* defined in entry-armv.S */ + +static inline unsigned int get_cr(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); + return val; +} + +static inline void set_cr(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" + : : "r" (val) : "cc"); + isb(); +} + +#ifndef CONFIG_SMP +extern void adjust_cr(unsigned long mask, unsigned long set); +#endif + +#define CPACC_FULL(n) (3 << (n * 2)) +#define CPACC_SVC(n) (1 << (n * 2)) +#define CPACC_DISABLE(n) (0 << (n * 2)) + +static inline unsigned int get_copro_access(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" + : "=r" (val) : : "cc"); + return val; +} + +static inline void set_copro_access(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" + : : "r" (val) : "cc"); + isb(); +} + +#endif + +#endif diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index d3f0a9eee9f6..fe92ccf1d0b0 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -1,8 +1,8 @@ #ifndef __ASM_ARM_DIV64 #define __ASM_ARM_DIV64 -#include <asm/system.h> #include <linux/types.h> +#include <asm/compiler.h> /* * The semantics of do_div() are: diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 69a5b0b6455c..5694a0d6576b 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -19,7 +19,6 @@ * It should not be re-used except for that purpose. */ #include <linux/spinlock.h> -#include <asm/system.h> #include <asm/scatterlist.h> #include <mach/isa-dma.h> diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index b5dc173d336f..3d2220498abc 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -10,6 +10,10 @@ #ifndef __ASM_PROC_DOMAIN_H #define __ASM_PROC_DOMAIN_H +#ifndef __ASSEMBLY__ +#include <asm/barrier.h> +#endif + /* * Domain numbers * diff --git a/arch/arm/include/asm/exec.h b/arch/arm/include/asm/exec.h new file mode 100644 index 000000000000..7c4fbef72b3a --- /dev/null +++ b/arch/arm/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef __ASM_ARM_EXEC_H +#define __ASM_ARM_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ASM_ARM_EXEC_H */ diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 077c32326c63..2ff2c75a4639 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -231,6 +231,9 @@ extern int iop3xx_get_init_atu(void); #ifndef __ASSEMBLY__ + +#include <linux/types.h> + void iop3xx_map_io(void); void iop_init_cp6_handler(void); void iop_init_time(unsigned long tickrate); diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 9275828feb3d..bae7eb6011d2 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -26,7 +26,6 @@ #include <linux/types.h> #include <asm/byteorder.h> #include <asm/memory.h> -#include <asm/system.h> #include <asm-generic/pci_iomap.h> /* @@ -99,6 +98,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr) /* IO barriers */ #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#include <asm/barrier.h> #define __iormb() rmb() #define __iowmb() wmb() #else diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 14965658a923..b8e580a297e4 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -34,4 +34,11 @@ typedef struct { #endif +/* + * switch_mm() may do a full cache flush over the context switch, + * so enable interrupts over the context switch to avoid high + * latency. + */ +#define __ARCH_WANT_INTERRUPTS_ON_CTXSW + #endif diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index cb8d638924fd..f4d7f56ee51f 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -22,7 +22,6 @@ #include <asm/hw_breakpoint.h> #include <asm/ptrace.h> #include <asm/types.h> -#include <asm/system.h> #ifdef __KERNEL__ #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ @@ -90,6 +89,8 @@ unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() #endif +void cpu_idle_wait(void); + /* * Create a new kernel thread */ diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h new file mode 100644 index 000000000000..fa09e6b49bf1 --- /dev/null +++ b/arch/arm/include/asm/switch_to.h @@ -0,0 +1,18 @@ +#ifndef __ASM_ARM_SWITCH_TO_H +#define __ASM_ARM_SWITCH_TO_H + +#include <linux/thread_info.h> + +/* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'. schedule() itself + * contains the memory barrier to tell GCC not to cache `current'. + */ +extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); + +#define switch_to(prev,next,last) \ +do { \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ +} while (0) + +#endif /* __ASM_ARM_SWITCH_TO_H */ diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 424aa458c487..74542c52f9be 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -1,544 +1,8 @@ -#ifndef __ASM_ARM_SYSTEM_H -#define __ASM_ARM_SYSTEM_H - -#ifdef __KERNEL__ - -#define CPU_ARCH_UNKNOWN 0 -#define CPU_ARCH_ARMv3 1 -#define CPU_ARCH_ARMv4 2 -#define CPU_ARCH_ARMv4T 3 -#define CPU_ARCH_ARMv5 4 -#define CPU_ARCH_ARMv5T 5 -#define CPU_ARCH_ARMv5TE 6 -#define CPU_ARCH_ARMv5TEJ 7 -#define CPU_ARCH_ARMv6 8 -#define CPU_ARCH_ARMv7 9 - -/* - * CR1 bits (CP#15 CR1) - */ -#define CR_M (1 << 0) /* MMU enable */ -#define CR_A (1 << 1) /* Alignment abort enable */ -#define CR_C (1 << 2) /* Dcache enable */ -#define CR_W (1 << 3) /* Write buffer enable */ -#define CR_P (1 << 4) /* 32-bit exception handler */ -#define CR_D (1 << 5) /* 32-bit data address range */ -#define CR_L (1 << 6) /* Implementation defined */ -#define CR_B (1 << 7) /* Big endian */ -#define CR_S (1 << 8) /* System MMU protection */ -#define CR_R (1 << 9) /* ROM MMU protection */ -#define CR_F (1 << 10) /* Implementation defined */ -#define CR_Z (1 << 11) /* Implementation defined */ -#define CR_I (1 << 12) /* Icache enable */ -#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ -#define CR_RR (1 << 14) /* Round Robin cache replacement */ -#define CR_L4 (1 << 15) /* LDR pc can set T bit */ -#define CR_DT (1 << 16) -#define CR_IT (1 << 18) -#define CR_ST (1 << 19) -#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ -#define CR_U (1 << 22) /* Unaligned access operation */ -#define CR_XP (1 << 23) /* Extended page tables */ -#define CR_VE (1 << 24) /* Vectored interrupts */ -#define CR_EE (1 << 25) /* Exception (Big) Endian */ -#define CR_TRE (1 << 28) /* TEX remap enable */ -#define CR_AFE (1 << 29) /* Access flag enable */ -#define CR_TE (1 << 30) /* Thumb exception enable */ - -/* - * This is used to ensure the compiler did actually allocate the register we - * asked it for some inline assembly sequences. Apparently we can't trust - * the compiler from one version to another so a bit of paranoia won't hurt. - * This string is meant to be concatenated with the inline asm string and - * will cause compilation to stop on mismatch. - * (for details, see gcc PR 15089) - */ -#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" - -#ifndef __ASSEMBLY__ - -#include <linux/compiler.h> -#include <linux/linkage.h> -#include <linux/irqflags.h> - -#include <asm/outercache.h> - -struct thread_info; -struct task_struct; - -/* information about the system we're running on */ -extern unsigned int system_rev; -extern unsigned int system_serial_low; -extern unsigned int system_serial_high; -extern unsigned int mem_fclk_21285; - -struct pt_regs; - -void die(const char *msg, struct pt_regs *regs, int err); - -struct siginfo; -void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, - unsigned long err, unsigned long trap); - -#ifdef CONFIG_ARM_LPAE -#define FAULT_CODE_ALIGNMENT 33 -#define FAULT_CODE_DEBUG 34 -#else -#define FAULT_CODE_ALIGNMENT 1 -#define FAULT_CODE_DEBUG 2 -#endif - -void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, - struct pt_regs *), - int sig, int code, const char *name); - -void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, - struct pt_regs *), - int sig, int code, const char *name); - -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); - -struct mm_struct; -extern void show_pte(struct mm_struct *mm, unsigned long addr); -extern void __show_regs(struct pt_regs *); - -extern int __pure cpu_architecture(void); -extern void cpu_init(void); - -void soft_restart(unsigned long); -extern void (*arm_pm_restart)(char str, const char *cmd); -extern void (*arm_pm_idle)(void); - -#define UDBG_UNDEFINED (1 << 0) -#define UDBG_SYSCALL (1 << 1) -#define UDBG_BADABORT (1 << 2) -#define UDBG_SEGV (1 << 3) -#define UDBG_BUS (1 << 4) - -extern unsigned int user_debug; - -#if __LINUX_ARM_ARCH__ >= 4 -#define vectors_high() (cr_alignment & CR_V) -#else -#define vectors_high() (0) -#endif - -#if __LINUX_ARM_ARCH__ >= 7 || \ - (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) -#define sev() __asm__ __volatile__ ("sev" : : : "memory") -#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") -#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") -#endif - -#if __LINUX_ARM_ARCH__ >= 7 -#define isb() __asm__ __volatile__ ("isb" : : : "memory") -#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") -#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") -#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ - : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ - : : "r" (0) : "memory") -#elif defined(CONFIG_CPU_FA526) -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ - : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") -#else -#define isb() __asm__ __volatile__ ("" : : : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ - : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") -#endif - -#ifdef CONFIG_ARCH_HAS_BARRIERS -#include <mach/barriers.h> -#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) -#define mb() do { dsb(); outer_sync(); } while (0) -#define rmb() dsb() -#define wmb() mb() -#else -#include <asm/memory.h> -#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#endif - -#ifndef CONFIG_SMP -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else -#define smp_mb() dmb() -#define smp_rmb() dmb() -#define smp_wmb() dmb() -#endif - -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) -#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); - -extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ -extern unsigned long cr_alignment; /* defined in entry-armv.S */ - -static inline unsigned int get_cr(void) -{ - unsigned int val; - asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); - return val; -} - -static inline void set_cr(unsigned int val) -{ - asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" - : : "r" (val) : "cc"); - isb(); -} - -#ifndef CONFIG_SMP -extern void adjust_cr(unsigned long mask, unsigned long set); -#endif - -#define CPACC_FULL(n) (3 << (n * 2)) -#define CPACC_SVC(n) (1 << (n * 2)) -#define CPACC_DISABLE(n) (0 << (n * 2)) - -static inline unsigned int get_copro_access(void) -{ - unsigned int val; - asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" - : "=r" (val) : : "cc"); - return val; -} - -static inline void set_copro_access(unsigned int val) -{ - asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" - : : "r" (val) : "cc"); - isb(); -} - -/* - * switch_mm() may do a full cache flush over the context switch, - * so enable interrupts over the context switch to avoid high - * latency. - */ -#define __ARCH_WANT_INTERRUPTS_ON_CTXSW - -/* - * switch_to(prev, next) should switch from task `prev' to `next' - * `prev' will never be the same as `next'. schedule() itself - * contains the memory barrier to tell GCC not to cache `current'. - */ -extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); - -#define switch_to(prev,next,last) \ -do { \ - last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ -} while (0) - -#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) -/* - * On the StrongARM, "swp" is terminally broken since it bypasses the - * cache totally. This means that the cache becomes inconsistent, and, - * since we use normal loads/stores as well, this is really bad. - * Typically, this causes oopsen in filp_close, but could have other, - * more disastrous effects. There are two work-arounds: - * 1. Disable interrupts and emulate the atomic swap - * 2. Clean the cache, perform atomic swap, flush the cache - * - * We choose (1) since its the "easiest" to achieve here and is not - * dependent on the processor type. - * - * NOTE that this solution won't work on an SMP system, so explcitly - * forbid it here. - */ -#define swp_is_buggy -#endif - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ - extern void __bad_xchg(volatile void *, int); - unsigned long ret; -#ifdef swp_is_buggy - unsigned long flags; -#endif -#if __LINUX_ARM_ARCH__ >= 6 - unsigned int tmp; -#endif - - smp_mb(); - - switch (size) { -#if __LINUX_ARM_ARCH__ >= 6 - case 1: - asm volatile("@ __xchg1\n" - "1: ldrexb %0, [%3]\n" - " strexb %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: - asm volatile("@ __xchg4\n" - "1: ldrex %0, [%3]\n" - " strex %1, %2, [%3]\n" - " teq %1, #0\n" - " bne 1b" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; -#elif defined(swp_is_buggy) -#ifdef CONFIG_SMP -#error SMP is not supported on this platform -#endif - case 1: - raw_local_irq_save(flags); - ret = *(volatile unsigned char *)ptr; - *(volatile unsigned char *)ptr = x; - raw_local_irq_restore(flags); - break; - - case 4: - raw_local_irq_save(flags); - ret = *(volatile unsigned long *)ptr; - *(volatile unsigned long *)ptr = x; - raw_local_irq_restore(flags); - break; -#else - case 1: - asm volatile("@ __xchg1\n" - " swpb %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: - asm volatile("@ __xchg4\n" - " swp %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; -#endif - default: - __bad_xchg(ptr, size), ret = 0; - break; - } - smp_mb(); - - return ret; -} - -extern void disable_hlt(void); -extern void enable_hlt(void); - -void cpu_idle_wait(void); - -#include <asm-generic/cmpxchg-local.h> - -#if __LINUX_ARM_ARCH__ < 6 -/* min ARCH < ARMv6 */ - -#ifdef CONFIG_SMP -#error "SMP is not supported on this platform" -#endif - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#ifndef CONFIG_SMP -#include <asm-generic/cmpxchg.h> -#endif - -#else /* min ARCH >= ARMv6 */ - -extern void __bad_cmpxchg(volatile void *ptr, int size); - -/* - * cmpxchg only support 32-bits operands on ARMv6. - */ - -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long oldval, res; - - switch (size) { -#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ - case 1: - do { - asm volatile("@ __cmpxchg1\n" - " ldrexb %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexbeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; - case 2: - do { - asm volatile("@ __cmpxchg1\n" - " ldrexh %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexheq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; -#endif - case 4: - do { - asm volatile("@ __cmpxchg4\n" - " ldrex %1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " strexeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); - } while (res); - break; - default: - __bad_cmpxchg(ptr, size); - oldval = 0; - } - - return oldval; -} - -static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long ret; - - smp_mb(); - ret = __cmpxchg(ptr, old, new, size); - smp_mb(); - - return ret; -} - -#define cmpxchg(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - unsigned long ret; - - switch (size) { -#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ - case 1: - case 2: - ret = __cmpxchg_local_generic(ptr, old, new, size); - break; -#endif - default: - ret = __cmpxchg(ptr, old, new, size); - } - - return ret; -} - -#define cmpxchg_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) - -#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ - -/* - * Note : ARMv7-M (currently unsupported by Linux) does not support - * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should - * not be allowed to use __cmpxchg64. - */ -static inline unsigned long long __cmpxchg64(volatile void *ptr, - unsigned long long old, - unsigned long long new) -{ - register unsigned long long oldval asm("r0"); - register unsigned long long __old asm("r2") = old; - register unsigned long long __new asm("r4") = new; - unsigned long res; - - do { - asm volatile( - " @ __cmpxchg8\n" - " ldrexd %1, %H1, [%2]\n" - " mov %0, #0\n" - " teq %1, %3\n" - " teqeq %H1, %H3\n" - " strexdeq %0, %4, %H4, [%2]\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (__old), "r" (__new) - : "memory", "cc"); - } while (res); - - return oldval; -} - -static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, - unsigned long long old, - unsigned long long new) -{ - unsigned long long ret; - - smp_mb(); - ret = __cmpxchg64(ptr, old, new); - smp_mb(); - - return ret; -} - -#define cmpxchg64(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#define cmpxchg64_local(ptr,o,n) \ - ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#else /* min ARCH = ARMv6 */ - -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#endif - -#endif /* __LINUX_ARM_ARCH__ >= 6 */ - -#endif /* __ASSEMBLY__ */ - -#define arch_align_stack(x) (x) - -#endif /* __KERNEL__ */ - -#endif +/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ +#include <asm/barrier.h> +#include <asm/compiler.h> +#include <asm/cmpxchg.h> +#include <asm/exec.h> +#include <asm/switch_to.h> +#include <asm/system_info.h> +#include <asm/system_misc.h> diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h new file mode 100644 index 000000000000..dfd386d0c022 --- /dev/null +++ b/arch/arm/include/asm/system_info.h @@ -0,0 +1,27 @@ +#ifndef __ASM_ARM_SYSTEM_INFO_H +#define __ASM_ARM_SYSTEM_INFO_H + +#define CPU_ARCH_UNKNOWN 0 +#define CPU_ARCH_ARMv3 1 +#define CPU_ARCH_ARMv4 2 +#define CPU_ARCH_ARMv4T 3 +#define CPU_ARCH_ARMv5 4 +#define CPU_ARCH_ARMv5T 5 +#define CPU_ARCH_ARMv5TE 6 +#define CPU_ARCH_ARMv5TEJ 7 +#define CPU_ARCH_ARMv6 8 +#define CPU_ARCH_ARMv7 9 + +#ifndef __ASSEMBLY__ + +/* information about the system we're running on */ +extern unsigned int system_rev; +extern unsigned int system_serial_low; +extern unsigned int system_serial_high; +extern unsigned int mem_fclk_21285; + +extern int __pure cpu_architecture(void); + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_ARM_SYSTEM_INFO_H */ diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h new file mode 100644 index 000000000000..5a85f148b607 --- /dev/null +++ b/arch/arm/include/asm/system_misc.h @@ -0,0 +1,29 @@ +#ifndef __ASM_ARM_SYSTEM_MISC_H +#define __ASM_ARM_SYSTEM_MISC_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/linkage.h> +#include <linux/irqflags.h> + +extern void cpu_init(void); + +void soft_restart(unsigned long); +extern void (*arm_pm_restart)(char str, const char *cmd); +extern void (*arm_pm_idle)(void); + +#define UDBG_UNDEFINED (1 << 0) +#define UDBG_SYSCALL (1 << 1) +#define UDBG_BADABORT (1 << 2) +#define UDBG_SEGV (1 << 3) +#define UDBG_BUS (1 << 4) + +extern unsigned int user_debug; + +extern void disable_hlt(void); +extern void enable_hlt(void); + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_ARM_SYSTEM_MISC_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 2958976d867b..71f6536d17ac 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -16,8 +16,8 @@ #include <asm/errno.h> #include <asm/memory.h> #include <asm/domain.h> -#include <asm/system.h> #include <asm/unified.h> +#include <asm/compiler.h> #define VERIFY_READ 0 #define VERIFY_WRITE 1 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 5b0bce61eb69..b57c75e0b01f 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -18,7 +18,6 @@ #include <linux/io.h> #include <asm/checksum.h> -#include <asm/system.h> #include <asm/ftrace.h> /* diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index ddba41d1fcf1..d0d1e83150c9 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c @@ -3,6 +3,7 @@ #include <linux/personality.h> #include <linux/binfmts.h> #include <linux/elf.h> +#include <asm/system_info.h> int elf_check_arch(const struct elf32_hdr *x) { diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 22f0ed324f37..8ec5eed55e37 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -26,7 +26,7 @@ #include <asm/unwind.h> #include <asm/unistd.h> #include <asm/tls.h> -#include <asm/system.h> +#include <asm/system_info.h> #include "entry-header.S" #include <asm/entry-macro-multi.S> diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 4c164ece5891..c32f8456aa09 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -42,9 +42,9 @@ #include <linux/seq_file.h> #include <asm/cacheflush.h> +#include <asm/cp15.h> #include <asm/fiq.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/traps.h> static unsigned long no_fiq_insn; diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index d46f25968bec..278cfc144f44 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -17,8 +17,8 @@ #include <asm/assembler.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> +#include <asm/cp15.h> #include <asm/thread_info.h> -#include <asm/system.h> /* * Kernel startup entry point. diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 6d5791144066..a2e9694a68ee 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -15,12 +15,12 @@ #include <linux/init.h> #include <asm/assembler.h> +#include <asm/cp15.h> #include <asm/domain.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/memory.h> #include <asm/thread_info.h> -#include <asm/system.h> #include <asm/pgtable.h> #ifdef CONFIG_DEBUG_LL diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index d6a95ef9131d..ba386bd94107 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -34,7 +34,6 @@ #include <asm/current.h> #include <asm/hw_breakpoint.h> #include <asm/kdebug.h> -#include <asm/system.h> #include <asm/traps.h> /* Breakpoint currently in use for each BRP. */ diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 3efd82cc95f0..6a6a097edd61 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -36,7 +36,6 @@ #include <linux/proc_fs.h> #include <asm/exception.h> -#include <asm/system.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c index a5394fb4e4e0..18a76282970e 100644 --- a/arch/arm/kernel/kprobes-common.c +++ b/arch/arm/kernel/kprobes-common.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/kprobes.h> +#include <asm/system_info.h> #include "kprobes.h" diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 764bd456d84f..56995983eed8 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -12,7 +12,7 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/mach-types.h> -#include <asm/system.h> +#include <asm/system_misc.h> extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d3eca4524533..7b9cddef6e53 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -35,7 +35,6 @@ #include <asm/cacheflush.h> #include <asm/leds.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/thread_notify.h> #include <asm/stacktrace.h> #include <asm/mach/time.h> diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ede6443c34d9..45956c9d0ef0 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -26,7 +26,6 @@ #include <linux/audit.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/traps.h> #define REG_PC 15 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index a255c39612ca..9e0fdb3a1988 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -33,6 +33,7 @@ #include <linux/sort.h> #include <asm/unified.h> +#include <asm/cp15.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/elf.h> @@ -44,12 +45,13 @@ #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/tlbflush.h> -#include <asm/system.h> #include <asm/prom.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> +#include <asm/system_info.h> +#include <asm/system_misc.h> #include <asm/traps.h> #include <asm/unwind.h> #include <asm/memblock.h> diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 1f268bda4552..987dcf33415c 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -4,7 +4,6 @@ #include <asm/assembler.h> #include <asm/glue-cache.h> #include <asm/glue-proc.h> -#include <asm/system.h> .text /* diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 01ec453bb924..30ae6bb4a310 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -16,6 +16,7 @@ #include <asm/cputype.h> #include <asm/mach/map.h> #include <asm/memory.h> +#include <asm/system_info.h> #include "tcm.h" static struct gen_pool *tcm_pool; diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c index 9cb7aaca159f..aab899764053 100644 --- a/arch/arm/kernel/thumbee.c +++ b/arch/arm/kernel/thumbee.c @@ -20,6 +20,7 @@ #include <linux/kernel.h> #include <linux/init.h> +#include <asm/system_info.h> #include <asm/thread_notify.h> /* diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index f84dfe67724f..cd77743472a2 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -29,11 +29,11 @@ #include <linux/atomic.h> #include <asm/cacheflush.h> #include <asm/exception.h> -#include <asm/system.h> #include <asm/unistd.h> #include <asm/traps.h> #include <asm/unwind.h> #include <asm/tls.h> +#include <asm/system_misc.h> #include "signal.h" diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index e55cdcbd81fb..45db05d8d94c 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -20,9 +20,11 @@ config HAVE_AT91_USART5 config AT91_SAM9_ALT_RESET bool + default !ARCH_AT91X40 config AT91_SAM9G45_RESET bool + default !ARCH_AT91X40 menu "Atmel AT91 System-on-Chip" @@ -45,7 +47,6 @@ config ARCH_AT91SAM9260 select HAVE_AT91_USART4 select HAVE_AT91_USART5 select HAVE_NET_MACB - select AT91_SAM9_ALT_RESET config ARCH_AT91SAM9261 bool "AT91SAM9261" @@ -53,7 +54,6 @@ config ARCH_AT91SAM9261 select GENERIC_CLOCKEVENTS select HAVE_FB_ATMEL select HAVE_AT91_DBGU0 - select AT91_SAM9_ALT_RESET config ARCH_AT91SAM9G10 bool "AT91SAM9G10" @@ -61,7 +61,6 @@ config ARCH_AT91SAM9G10 select GENERIC_CLOCKEVENTS select HAVE_AT91_DBGU0 select HAVE_FB_ATMEL - select AT91_SAM9_ALT_RESET config ARCH_AT91SAM9263 bool "AT91SAM9263" @@ -70,7 +69,6 @@ config ARCH_AT91SAM9263 select HAVE_FB_ATMEL select HAVE_NET_MACB select HAVE_AT91_DBGU1 - select AT91_SAM9_ALT_RESET config ARCH_AT91SAM9RL bool "AT91SAM9RL" @@ -79,7 +77,6 @@ config ARCH_AT91SAM9RL select HAVE_AT91_USART3 select HAVE_FB_ATMEL select HAVE_AT91_DBGU0 - select AT91_SAM9_ALT_RESET config ARCH_AT91SAM9G20 bool "AT91SAM9G20" @@ -90,7 +87,6 @@ config ARCH_AT91SAM9G20 select HAVE_AT91_USART4 select HAVE_AT91_USART5 select HAVE_NET_MACB - select AT91_SAM9_ALT_RESET config ARCH_AT91SAM9G45 bool "AT91SAM9G45" @@ -100,7 +96,6 @@ config ARCH_AT91SAM9G45 select HAVE_FB_ATMEL select HAVE_NET_MACB select HAVE_AT91_DBGU1 - select AT91_SAM9G45_RESET config ARCH_AT91SAM9X5 bool "AT91SAM9x5 family" @@ -109,7 +104,6 @@ config ARCH_AT91SAM9X5 select HAVE_FB_ATMEL select HAVE_NET_MACB select HAVE_AT91_DBGU0 - select AT91_SAM9G45_RESET config ARCH_AT91X40 bool "AT91x40" diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index 0df1045311e4..364c19357e60 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -15,6 +15,7 @@ #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> +#include <asm/system_misc.h> #include <mach/at91rm9200.h> #include <mach/at91_pmc.h> #include <mach/at91_st.h> diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c index 14b5a9c9a514..46f774233298 100644 --- a/arch/arm/mach-at91/at91sam9260.c +++ b/arch/arm/mach-at91/at91sam9260.c @@ -16,6 +16,7 @@ #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> +#include <asm/system_misc.h> #include <mach/cpu.h> #include <mach/at91_dbgu.h> #include <mach/at91sam9260.h> @@ -216,6 +217,7 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("t0_clk", "fffdc000.timer", &tc3_clk), CLKDEV_CON_DEV_ID("t1_clk", "fffdc000.timer", &tc4_clk), CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk), + CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk), /* fake hclk clock */ CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk), CLKDEV_CON_ID("pioA", &pioA_clk), diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c index 684c5dfd92ac..7de81e6222f1 100644 --- a/arch/arm/mach-at91/at91sam9261.c +++ b/arch/arm/mach-at91/at91sam9261.c @@ -16,6 +16,7 @@ #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> +#include <asm/system_misc.h> #include <mach/cpu.h> #include <mach/at91sam9261.h> #include <mach/at91_pmc.h> diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c index 0b4fa5a7f685..ef301be66575 100644 --- a/arch/arm/mach-at91/at91sam9263.c +++ b/arch/arm/mach-at91/at91sam9263.c @@ -16,6 +16,7 @@ #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> +#include <asm/system_misc.h> #include <mach/at91sam9263.h> #include <mach/at91_pmc.h> #include <mach/at91_rstc.h> diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 0014573dfe17..d222f8333dab 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c @@ -16,6 +16,7 @@ #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> +#include <asm/system_misc.h> #include <mach/at91sam9g45.h> #include <mach/at91_pmc.h> #include <mach/cpu.h> @@ -232,6 +233,8 @@ static struct clk_lookup periph_clocks_lookups[] = { /* more tc lookup table for DT entries */ CLKDEV_CON_DEV_ID("t0_clk", "fff7c000.timer", &tcb0_clk), CLKDEV_CON_DEV_ID("t0_clk", "fffd4000.timer", &tcb0_clk), + CLKDEV_CON_DEV_ID("hclk", "700000.ohci", &uhphs_clk), + CLKDEV_CON_DEV_ID("ehci_clk", "800000.ehci", &uhphs_clk), /* fake hclk clock */ CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &uhphs_clk), CLKDEV_CON_ID("pioA", &pioA_clk), diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c index 63d9372eb18e..d9f2774f385e 100644 --- a/arch/arm/mach-at91/at91sam9rl.c +++ b/arch/arm/mach-at91/at91sam9rl.c @@ -15,6 +15,7 @@ #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> +#include <asm/system_misc.h> #include <mach/cpu.h> #include <mach/at91_dbgu.h> #include <mach/at91sam9rl.h> diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c index a34d96afa746..b6831eeb7b76 100644 --- a/arch/arm/mach-at91/at91sam9x5.c +++ b/arch/arm/mach-at91/at91sam9x5.c @@ -131,7 +131,7 @@ static struct clk dma1_clk = { .type = CLK_TYPE_PERIPHERAL, }; static struct clk uhphs_clk = { - .name = "uhphs_clk", + .name = "uhphs", .pmc_mask = 1 << AT91SAM9X5_ID_UHPHS, .type = CLK_TYPE_PERIPHERAL, }; @@ -230,6 +230,9 @@ static struct clk_lookup periph_clocks_lookups[] = { /* additional fake clock for macb_hclk */ CLKDEV_CON_DEV_ID("hclk", "f802c000.ethernet", &macb0_clk), CLKDEV_CON_DEV_ID("hclk", "f8030000.ethernet", &macb1_clk), + CLKDEV_CON_DEV_ID("hclk", "600000.ohci", &uhphs_clk), + CLKDEV_CON_DEV_ID("ohci_clk", "600000.ohci", &uhphs_clk), + CLKDEV_CON_DEV_ID("ehci_clk", "700000.ehci", &uhphs_clk), }; /* @@ -299,14 +302,8 @@ static void __init at91sam9x5_map_io(void) at91_init_sram(0, AT91SAM9X5_SRAM_BASE, AT91SAM9X5_SRAM_SIZE); } -static void __init at91sam9x5_ioremap_registers(void) -{ - at91_ioremap_ramc(0, AT91SAM9X5_BASE_DDRSDRC0, 512); -} - void __init at91sam9x5_initialize(void) { - arm_pm_restart = at91sam9g45_restart; at91_extern_irq = (1 << AT91SAM9X5_ID_IRQ0); /* Register GPIO subsystem (using DT) */ @@ -314,11 +311,6 @@ void __init at91sam9x5_initialize(void) } /* -------------------------------------------------------------------- - * AT91SAM9x5 devices (temporary before modification of code) - * -------------------------------------------------------------------- */ -void __init at91_add_device_nand(struct atmel_nand_data *data) {} - -/* -------------------------------------------------------------------- * Interrupt initialization * -------------------------------------------------------------------- */ /* @@ -362,7 +354,6 @@ static unsigned int at91sam9x5_default_irq_priority[NR_AIC_IRQS] __initdata = { struct at91_init_soc __initdata at91sam9x5_soc = { .map_io = at91sam9x5_map_io, .default_irq_priority = at91sam9x5_default_irq_priority, - .ioremap_registers = at91sam9x5_ioremap_registers, .register_clocks = at91sam9x5_register_clocks, .init = at91sam9x5_initialize, }; diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c index 3bb40694b02d..161efbaa1029 100644 --- a/arch/arm/mach-at91/board-afeb-9260v1.c +++ b/arch/arm/mach-at91/board-afeb-9260v1.c @@ -138,6 +138,7 @@ static struct atmel_nand_data __initdata afeb9260_nand_data = { .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .bus_width_16 = 0, + .ecc_mode = NAND_ECC_SOFT, .parts = afeb9260_nand_partition, .num_parts = ARRAY_SIZE(afeb9260_nand_partition), .det_pin = -EINVAL, diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c index 8510e9e54988..c6d44ee0c77e 100644 --- a/arch/arm/mach-at91/board-cam60.c +++ b/arch/arm/mach-at91/board-cam60.c @@ -140,6 +140,7 @@ static struct atmel_nand_data __initdata cam60_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PA9, .enable_pin = AT91_PIN_PA7, + .ecc_mode = NAND_ECC_SOFT, .parts = cam60_nand_partition, .num_parts = ARRAY_SIZE(cam60_nand_partition), }; diff --git a/arch/arm/mach-at91/board-cpu9krea.c b/arch/arm/mach-at91/board-cpu9krea.c index 989e1c5a9ca0..5f3680e7c883 100644 --- a/arch/arm/mach-at91/board-cpu9krea.c +++ b/arch/arm/mach-at91/board-cpu9krea.c @@ -117,6 +117,7 @@ static struct atmel_nand_data __initdata cpu9krea_nand_data = { .enable_pin = AT91_PIN_PC14, .bus_width_16 = 0, .det_pin = -EINVAL, + .ecc_mode = NAND_ECC_SOFT, }; #ifdef CONFIG_MACH_CPU9260 diff --git a/arch/arm/mach-at91/board-dt.c b/arch/arm/mach-at91/board-dt.c index 583b72472ad9..c18d4d307801 100644 --- a/arch/arm/mach-at91/board-dt.c +++ b/arch/arm/mach-at91/board-dt.c @@ -19,10 +19,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> -#include <mach/hardware.h> #include <mach/board.h> -#include <mach/system_rev.h> -#include <mach/at91sam9_smc.h> #include <asm/setup.h> #include <asm/irq.h> @@ -30,58 +27,9 @@ #include <asm/mach/map.h> #include <asm/mach/irq.h> -#include "sam9_smc.h" #include "generic.h" -static void __init ek_init_early(void) -{ - /* Initialize processor: 12.000 MHz crystal */ - at91_initialize(12000000); -} - -/* det_pin is not connected */ -static struct atmel_nand_data __initdata ek_nand_data = { - .ale = 21, - .cle = 22, - .det_pin = -EINVAL, - .rdy_pin = AT91_PIN_PC8, - .enable_pin = AT91_PIN_PC14, -}; - -static struct sam9_smc_config __initdata ek_nand_smc_config = { - .ncs_read_setup = 0, - .nrd_setup = 2, - .ncs_write_setup = 0, - .nwe_setup = 2, - - .ncs_read_pulse = 4, - .nrd_pulse = 4, - .ncs_write_pulse = 4, - .nwe_pulse = 4, - - .read_cycle = 7, - .write_cycle = 7, - - .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE, - .tdf_cycles = 3, -}; - -static void __init ek_add_device_nand(void) -{ - ek_nand_data.bus_width_16 = board_have_nand_16bit(); - /* setup bus-width (8 or 16) */ - if (ek_nand_data.bus_width_16) - ek_nand_smc_config.mode |= AT91_SMC_DBW_16; - else - ek_nand_smc_config.mode |= AT91_SMC_DBW_8; - - /* configure chip-select 3 (NAND) */ - sam9_smc_configure(0, 3, &ek_nand_smc_config); - - at91_add_device_nand(&ek_nand_data); -} - static const struct of_device_id irq_of_match[] __initconst = { { .compatible = "atmel,at91rm9200-aic", .data = at91_aic_of_init }, @@ -98,9 +46,6 @@ static void __init at91_dt_init_irq(void) static void __init at91_dt_device_init(void) { of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); - - /* NAND */ - ek_add_device_nand(); } static const char *at91_dt_board_compat[] __initdata = { @@ -114,7 +59,7 @@ DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM (Device Tree)") /* Maintainer: Atmel */ .timer = &at91sam926x_timer, .map_io = at91_map_io, - .init_early = ek_init_early, + .init_early = at91_dt_initialize, .init_irq = at91_dt_init_irq, .init_machine = at91_dt_device_init, .dt_compat = at91_dt_board_compat, diff --git a/arch/arm/mach-at91/board-kb9202.c b/arch/arm/mach-at91/board-kb9202.c index bb9914582013..59b92aab9bcf 100644 --- a/arch/arm/mach-at91/board-kb9202.c +++ b/arch/arm/mach-at91/board-kb9202.c @@ -108,6 +108,7 @@ static struct atmel_nand_data __initdata kb9202_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PC29, .enable_pin = AT91_PIN_PC28, + .ecc_mode = NAND_ECC_SOFT, .parts = kb9202_nand_partition, .num_parts = ARRAY_SIZE(kb9202_nand_partition), }; diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c index 3f8617c0e04e..57d5f6a4726a 100644 --- a/arch/arm/mach-at91/board-neocore926.c +++ b/arch/arm/mach-at91/board-neocore926.c @@ -190,6 +190,7 @@ static struct atmel_nand_data __initdata neocore926_nand_data = { .rdy_pin = AT91_PIN_PB19, .rdy_pin_active_low = 1, .enable_pin = AT91_PIN_PD15, + .ecc_mode = NAND_ECC_SOFT, .parts = neocore926_nand_partition, .num_parts = ARRAY_SIZE(neocore926_nand_partition), .det_pin = -EINVAL, diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c index e029d220cb84..b6ed5ed7081a 100644 --- a/arch/arm/mach-at91/board-qil-a9260.c +++ b/arch/arm/mach-at91/board-qil-a9260.c @@ -138,6 +138,8 @@ static struct atmel_nand_data __initdata ek_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, + .ecc_mode = NAND_ECC_SOFT, + .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; diff --git a/arch/arm/mach-at91/board-rm9200dk.c b/arch/arm/mach-at91/board-rm9200dk.c index 9083df04e7ed..01332aa538b2 100644 --- a/arch/arm/mach-at91/board-rm9200dk.c +++ b/arch/arm/mach-at91/board-rm9200dk.c @@ -150,6 +150,8 @@ static struct atmel_nand_data __initdata dk_nand_data = { .det_pin = AT91_PIN_PB1, .rdy_pin = AT91_PIN_PC2, .enable_pin = -EINVAL, + .ecc_mode = NAND_ECC_SOFT, + .on_flash_bbt = 1, .parts = dk_nand_partition, .num_parts = ARRAY_SIZE(dk_nand_partition), }; diff --git a/arch/arm/mach-at91/board-sam9-l9260.c b/arch/arm/mach-at91/board-sam9-l9260.c index 84bce587735f..e8b116b6cba6 100644 --- a/arch/arm/mach-at91/board-sam9-l9260.c +++ b/arch/arm/mach-at91/board-sam9-l9260.c @@ -139,6 +139,7 @@ static struct atmel_nand_data __initdata ek_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, + .ecc_mode = NAND_ECC_SOFT, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c index be8233bcabdc..d5aec55b0eb4 100644 --- a/arch/arm/mach-at91/board-sam9260ek.c +++ b/arch/arm/mach-at91/board-sam9260ek.c @@ -181,6 +181,8 @@ static struct atmel_nand_data __initdata ek_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, + .ecc_mode = NAND_ECC_SOFT, + .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index 40895072a1a7..c3f994462864 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c @@ -187,6 +187,8 @@ static struct atmel_nand_data __initdata ek_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PC15, .enable_pin = AT91_PIN_PC14, + .ecc_mode = NAND_ECC_SOFT, + .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c index 29f66052fe63..66f0ddf4b2ae 100644 --- a/arch/arm/mach-at91/board-sam9263ek.c +++ b/arch/arm/mach-at91/board-sam9263ek.c @@ -187,6 +187,8 @@ static struct atmel_nand_data __initdata ek_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PA22, .enable_pin = AT91_PIN_PD15, + .ecc_mode = NAND_ECC_SOFT, + .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c index 843d6286c6f4..8923ec9f5831 100644 --- a/arch/arm/mach-at91/board-sam9g20ek.c +++ b/arch/arm/mach-at91/board-sam9g20ek.c @@ -166,6 +166,8 @@ static struct atmel_nand_data __initdata ek_nand_data = { .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .det_pin = -EINVAL, + .ecc_mode = NAND_ECC_SOFT, + .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c index 57497e2b8878..e1bea73e6b30 100644 --- a/arch/arm/mach-at91/board-sam9m10g45ek.c +++ b/arch/arm/mach-at91/board-sam9m10g45ek.c @@ -148,6 +148,8 @@ static struct atmel_nand_data __initdata ek_nand_data = { .rdy_pin = AT91_PIN_PC8, .enable_pin = AT91_PIN_PC14, .det_pin = -EINVAL, + .ecc_mode = NAND_ECC_SOFT, + .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c index c1366d0032bf..b109ce2ba864 100644 --- a/arch/arm/mach-at91/board-sam9rlek.c +++ b/arch/arm/mach-at91/board-sam9rlek.c @@ -94,6 +94,8 @@ static struct atmel_nand_data __initdata ek_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PD17, .enable_pin = AT91_PIN_PB6, + .ecc_mode = NAND_ECC_SOFT, + .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c index 3c2e3fcc310c..ebc9d01ce742 100644 --- a/arch/arm/mach-at91/board-snapper9260.c +++ b/arch/arm/mach-at91/board-snapper9260.c @@ -110,6 +110,7 @@ static struct atmel_nand_data __initdata snapper9260_nand_data = { .bus_width_16 = 0, .enable_pin = -EINVAL, .det_pin = -EINVAL, + .ecc_mode = NAND_ECC_SOFT, }; static struct sam9_smc_config __initdata snapper9260_nand_smc_config = { diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c index 72eb3b4d9ab6..7640049410a0 100644 --- a/arch/arm/mach-at91/board-stamp9g20.c +++ b/arch/arm/mach-at91/board-stamp9g20.c @@ -86,6 +86,7 @@ static struct atmel_nand_data __initdata nand_data = { .enable_pin = AT91_PIN_PC14, .bus_width_16 = 0, .det_pin = -EINVAL, + .ecc_mode = NAND_ECC_SOFT, }; static struct sam9_smc_config __initdata nand_smc_config = { diff --git a/arch/arm/mach-at91/board-usb-a926x.c b/arch/arm/mach-at91/board-usb-a926x.c index 26c36fc2d1e5..b7483a3d0980 100644 --- a/arch/arm/mach-at91/board-usb-a926x.c +++ b/arch/arm/mach-at91/board-usb-a926x.c @@ -198,6 +198,8 @@ static struct atmel_nand_data __initdata ek_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PA22, .enable_pin = AT91_PIN_PD15, + .ecc_mode = NAND_ECC_SOFT, + .on_flash_bbt = 1, .parts = ek_nand_partition, .num_parts = ARRAY_SIZE(ek_nand_partition), }; diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c index 52f460768f71..38dd279d30b2 100644 --- a/arch/arm/mach-at91/board-yl-9200.c +++ b/arch/arm/mach-at91/board-yl-9200.c @@ -182,6 +182,7 @@ static struct atmel_nand_data __initdata yl9200_nand_data = { .det_pin = -EINVAL, .rdy_pin = AT91_PIN_PC14, /* R/!B (Sheet10) */ .enable_pin = AT91_PIN_PC15, /* !CE (Sheet10) */ + .ecc_mode = NAND_ECC_SOFT, .parts = yl9200_nand_partition, .num_parts = ARRAY_SIZE(yl9200_nand_partition), }; diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c index be51ca7f694d..a0f4d7424cdc 100644 --- a/arch/arm/mach-at91/clock.c +++ b/arch/arm/mach-at91/clock.c @@ -23,6 +23,7 @@ #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/of_address.h> #include <mach/hardware.h> #include <mach/at91_pmc.h> @@ -671,16 +672,12 @@ static void __init at91_upll_usbfs_clock_init(unsigned long main_clock) uhpck.rate_hz /= 1 + ((at91_pmc_read(AT91_PMC_USB) & AT91_PMC_OHCIUSBDIV) >> 8); } -int __init at91_clock_init(unsigned long main_clock) +static int __init at91_pmc_init(unsigned long main_clock) { unsigned tmp, freq, mckr; int i; int pll_overclock = false; - at91_pmc_base = ioremap(AT91_PMC, 256); - if (!at91_pmc_base) - panic("Impossible to ioremap AT91_PMC 0x%x\n", AT91_PMC); - /* * When the bootloader initialized the main oscillator correctly, * there's no problem using the cycle counter. But if it didn't, @@ -802,6 +799,55 @@ int __init at91_clock_init(unsigned long main_clock) return 0; } +#if defined(CONFIG_OF) +static struct of_device_id pmc_ids[] = { + { .compatible = "atmel,at91rm9200-pmc" }, + { /*sentinel*/ } +}; + +static struct of_device_id osc_ids[] = { + { .compatible = "atmel,osc" }, + { /*sentinel*/ } +}; + +int __init at91_dt_clock_init(void) +{ + struct device_node *np; + u32 main_clock = 0; + + np = of_find_matching_node(NULL, pmc_ids); + if (!np) + panic("unable to find compatible pmc node in dtb\n"); + + at91_pmc_base = of_iomap(np, 0); + if (!at91_pmc_base) + panic("unable to map pmc cpu registers\n"); + + of_node_put(np); + + /* retrieve the freqency of fixed clocks from device tree */ + np = of_find_matching_node(NULL, osc_ids); + if (np) { + u32 rate; + if (!of_property_read_u32(np, "clock-frequency", &rate)) + main_clock = rate; + } + + of_node_put(np); + + return at91_pmc_init(main_clock); +} +#endif + +int __init at91_clock_init(unsigned long main_clock) +{ + at91_pmc_base = ioremap(AT91_PMC, 256); + if (!at91_pmc_base) + panic("Impossible to ioremap AT91_PMC 0x%x\n", AT91_PMC); + + return at91_pmc_init(main_clock); +} + /* * Several unused clocks may be active. Turn them off. */ diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h index 459f01a4a546..dd9b346c451d 100644 --- a/arch/arm/mach-at91/generic.h +++ b/arch/arm/mach-at91/generic.h @@ -20,6 +20,7 @@ extern void __init at91_init_sram(int bank, unsigned long base, extern void __init at91rm9200_set_type(int type); extern void __init at91_initialize(unsigned long main_clock); extern void __init at91x40_initialize(unsigned long main_clock); +extern void __init at91_dt_initialize(void); /* Interrupts */ extern void __init at91_init_irq_default(void); @@ -52,6 +53,7 @@ extern void __init at91sam9rl_set_console_clock(int id); extern void __init at91sam9g45_set_console_clock(int id); #ifdef CONFIG_AT91_PMC_UNIT extern int __init at91_clock_init(unsigned long main_clock); +extern int __init at91_dt_clock_init(void); #else static int inline at91_clock_init(unsigned long main_clock) { return 0; } #endif diff --git a/arch/arm/mach-at91/include/mach/at91_shdwc.h b/arch/arm/mach-at91/include/mach/at91_shdwc.h index 1d4fe822c77a..60478ea8bd46 100644 --- a/arch/arm/mach-at91/include/mach/at91_shdwc.h +++ b/arch/arm/mach-at91/include/mach/at91_shdwc.h @@ -36,9 +36,11 @@ extern void __iomem *at91_shdwc_base; #define AT91_SHDW_WKMODE0_HIGH 1 #define AT91_SHDW_WKMODE0_LOW 2 #define AT91_SHDW_WKMODE0_ANYLEVEL 3 -#define AT91_SHDW_CPTWK0 (0xf << 4) /* Counter On Wake Up 0 */ +#define AT91_SHDW_CPTWK0_MAX 0xf /* Maximum Counter On Wake Up 0 */ +#define AT91_SHDW_CPTWK0 (AT91_SHDW_CPTWK0_MAX << 4) /* Counter On Wake Up 0 */ #define AT91_SHDW_CPTWK0_(x) ((x) << 4) #define AT91_SHDW_RTTWKEN (1 << 16) /* Real Time Timer Wake-up Enable */ +#define AT91_SHDW_RTCWKEN (1 << 17) /* Real Time Clock Wake-up Enable */ #define AT91_SHDW_SR 0x08 /* Shut Down Status Register */ #define AT91_SHDW_WAKEUP0 (1 << 0) /* Wake-up 0 Status */ diff --git a/arch/arm/mach-at91/include/mach/at91sam9x5.h b/arch/arm/mach-at91/include/mach/at91sam9x5.h index a297a77d88e2..88e43d534cdf 100644 --- a/arch/arm/mach-at91/include/mach/at91sam9x5.h +++ b/arch/arm/mach-at91/include/mach/at91sam9x5.h @@ -55,11 +55,6 @@ #define AT91SAM9X5_BASE_USART2 0xf8024000 /* - * System Peripherals - */ -#define AT91SAM9X5_BASE_DDRSDRC0 0xffffe800 - -/* * Base addresses for early serial code (uncompress.h) */ #define AT91_DBGU AT91_BASE_DBGU0 diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h index dc8d6d4f17cf..544a5d5ce416 100644 --- a/arch/arm/mach-at91/include/mach/board.h +++ b/arch/arm/mach-at91/include/mach/board.h @@ -41,6 +41,7 @@ #include <sound/atmel-ac97c.h> #include <linux/serial.h> #include <linux/platform_data/macb.h> +#include <linux/platform_data/atmel.h> /* USB Device */ struct at91_udc_data { @@ -98,20 +99,6 @@ extern void __init at91_add_device_usbh(struct at91_usbh_data *data); extern void __init at91_add_device_usbh_ohci(struct at91_usbh_data *data); extern void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data); - /* NAND / SmartMedia */ -struct atmel_nand_data { - int enable_pin; /* chip enable */ - int det_pin; /* card detect */ - int rdy_pin; /* ready/busy */ - u8 rdy_pin_active_low; /* rdy_pin value is inverted */ - u8 ale; /* address line number connected to ALE */ - u8 cle; /* address line number connected to CLE */ - u8 bus_width_16; /* buswidth is 16 bit */ - u8 correction_cap; /* PMECC correction capability */ - u16 sector_size; /* Sector size for PMECC */ - struct mtd_partition *parts; - unsigned int num_parts; -}; extern void __init at91_add_device_nand(struct atmel_nand_data *data); /* I2C*/ diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h index ec164a4124c9..ef79a9aafc08 100644 --- a/arch/arm/mach-at91/include/mach/system_rev.h +++ b/arch/arm/mach-at91/include/mach/system_rev.h @@ -7,6 +7,8 @@ #ifndef __ARCH_SYSTEM_REV_H__ #define __ARCH_SYSTEM_REV_H__ +#include <asm/system_info.h> + /* * board revision encoding * mach specific diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 6c9d5e69ac28..f630250c6b87 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -197,19 +197,6 @@ extern void at91_slow_clock(void __iomem *pmc, void __iomem *ramc0, extern u32 at91_slow_clock_sz; #endif -void __iomem *at91_ramc_base[2]; - -void __init at91_ioremap_ramc(int id, u32 addr, u32 size) -{ - if (id < 0 || id > 1) { - pr_emerg("Wrong RAM controller id (%d), cannot continue\n", id); - BUG(); - } - at91_ramc_base[id] = ioremap(addr, size); - if (!at91_ramc_base[id]) - panic("Impossible to ioremap ramc.%d 0x%x\n", id, addr); -} - static int at91_pm_enter(suspend_state_t state) { at91_gpio_suspend(); diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c index 372396c2ecb6..1083739e3065 100644 --- a/arch/arm/mach-at91/setup.c +++ b/arch/arm/mach-at91/setup.c @@ -9,6 +9,7 @@ #include <linux/io.h> #include <linux/mm.h> #include <linux/pm.h> +#include <linux/of_address.h> #include <asm/mach/map.h> @@ -51,6 +52,19 @@ void __init at91_init_interrupts(unsigned int *priority) at91_gpio_irq_setup(); } +void __iomem *at91_ramc_base[2]; + +void __init at91_ioremap_ramc(int id, u32 addr, u32 size) +{ + if (id < 0 || id > 1) { + pr_emerg("Wrong RAM controller id (%d), cannot continue\n", id); + BUG(); + } + at91_ramc_base[id] = ioremap(addr, size); + if (!at91_ramc_base[id]) + panic("Impossible to ioremap ramc.%d 0x%x\n", id, addr); +} + static struct map_desc sram_desc[2] __initdata; void __init at91_init_sram(int bank, unsigned long base, unsigned int length) @@ -285,6 +299,150 @@ void __init at91_ioremap_matrix(u32 base_addr) panic("Impossible to ioremap at91_matrix_base\n"); } +#if defined(CONFIG_OF) +static struct of_device_id rstc_ids[] = { + { .compatible = "atmel,at91sam9260-rstc", .data = at91sam9_alt_restart }, + { .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart }, + { /*sentinel*/ } +}; + +static void at91_dt_rstc(void) +{ + struct device_node *np; + const struct of_device_id *of_id; + + np = of_find_matching_node(NULL, rstc_ids); + if (!np) + panic("unable to find compatible rstc node in dtb\n"); + + at91_rstc_base = of_iomap(np, 0); + if (!at91_rstc_base) + panic("unable to map rstc cpu registers\n"); + + of_id = of_match_node(rstc_ids, np); + if (!of_id) + panic("AT91: rtsc no restart function availlable\n"); + + arm_pm_restart = of_id->data; + + of_node_put(np); +} + +static struct of_device_id ramc_ids[] = { + { .compatible = "atmel,at91sam9260-sdramc" }, + { .compatible = "atmel,at91sam9g45-ddramc" }, + { /*sentinel*/ } +}; + +static void at91_dt_ramc(void) +{ + struct device_node *np; + + np = of_find_matching_node(NULL, ramc_ids); + if (!np) + panic("unable to find compatible ram conroller node in dtb\n"); + + at91_ramc_base[0] = of_iomap(np, 0); + if (!at91_ramc_base[0]) + panic("unable to map ramc[0] cpu registers\n"); + /* the controller may have 2 banks */ + at91_ramc_base[1] = of_iomap(np, 1); + + of_node_put(np); +} + +static struct of_device_id shdwc_ids[] = { + { .compatible = "atmel,at91sam9260-shdwc", }, + { .compatible = "atmel,at91sam9rl-shdwc", }, + { .compatible = "atmel,at91sam9x5-shdwc", }, + { /*sentinel*/ } +}; + +static const char *shdwc_wakeup_modes[] = { + [AT91_SHDW_WKMODE0_NONE] = "none", + [AT91_SHDW_WKMODE0_HIGH] = "high", + [AT91_SHDW_WKMODE0_LOW] = "low", + [AT91_SHDW_WKMODE0_ANYLEVEL] = "any", +}; + +const int at91_dtget_shdwc_wakeup_mode(struct device_node *np) +{ + const char *pm; + int err, i; + + err = of_property_read_string(np, "atmel,wakeup-mode", &pm); + if (err < 0) + return AT91_SHDW_WKMODE0_ANYLEVEL; + + for (i = 0; i < ARRAY_SIZE(shdwc_wakeup_modes); i++) + if (!strcasecmp(pm, shdwc_wakeup_modes[i])) + return i; + + return -ENODEV; +} + +static void at91_dt_shdwc(void) +{ + struct device_node *np; + int wakeup_mode; + u32 reg; + u32 mode = 0; + + np = of_find_matching_node(NULL, shdwc_ids); + if (!np) { + pr_debug("AT91: unable to find compatible shutdown (shdwc) conroller node in dtb\n"); + return; + } + + at91_shdwc_base = of_iomap(np, 0); + if (!at91_shdwc_base) + panic("AT91: unable to map shdwc cpu registers\n"); + + wakeup_mode = at91_dtget_shdwc_wakeup_mode(np); + if (wakeup_mode < 0) { + pr_warn("AT91: shdwc unknown wakeup mode\n"); + goto end; + } + + if (!of_property_read_u32(np, "atmel,wakeup-counter", ®)) { + if (reg > AT91_SHDW_CPTWK0_MAX) { + pr_warn("AT91: shdwc wakeup conter 0x%x > 0x%x reduce it to 0x%x\n", + reg, AT91_SHDW_CPTWK0_MAX, AT91_SHDW_CPTWK0_MAX); + reg = AT91_SHDW_CPTWK0_MAX; + } + mode |= AT91_SHDW_CPTWK0_(reg); + } + + if (of_property_read_bool(np, "atmel,wakeup-rtc-timer")) + mode |= AT91_SHDW_RTCWKEN; + + if (of_property_read_bool(np, "atmel,wakeup-rtt-timer")) + mode |= AT91_SHDW_RTTWKEN; + + at91_shdwc_write(AT91_SHDW_MR, wakeup_mode | mode); + +end: + pm_power_off = at91sam9_poweroff; + + of_node_put(np); +} + +void __init at91_dt_initialize(void) +{ + at91_dt_rstc(); + at91_dt_ramc(); + at91_dt_shdwc(); + + /* Init clock subsystem */ + at91_dt_clock_init(); + + /* Register the processor-specific clocks */ + at91_boot_soc.register_clocks(); + + at91_boot_soc.init(); +} +#endif + void __init at91_initialize(unsigned long main_clock) { at91_boot_soc.ioremap_registers(); diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c index 8736c1acc166..3c5b5bbf24e5 100644 --- a/arch/arm/mach-clps711x/common.c +++ b/arch/arm/mach-clps711x/common.c @@ -37,6 +37,7 @@ #include <asm/mach/map.h> #include <asm/mach/time.h> #include <asm/hardware/clps7111.h> +#include <asm/system_misc.h> /* * This maps the generic CLPS711x registers diff --git a/arch/arm/mach-clps711x/p720t-leds.c b/arch/arm/mach-clps711x/p720t-leds.c index 15121446efc8..dd9a6cdbeb02 100644 --- a/arch/arm/mach-clps711x/p720t-leds.c +++ b/arch/arm/mach-clps711x/p720t-leds.c @@ -25,7 +25,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include <asm/mach-types.h> #include <asm/hardware/clps7111.h> diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index d5088900af6c..a70de24d1cbc 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -36,6 +36,7 @@ #include <asm/mach-types.h> #include <asm/mach/arch.h> +#include <asm/system_info.h> #include <mach/cp_intc.h> #include <mach/da8xx.h> diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 864f676eccac..3683306e0245 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -613,6 +613,113 @@ static void __init evm_init_i2c(void) i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info)); } +#define VENC_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL) + +/* venc standard timings */ +static struct vpbe_enc_mode_info dm644xevm_enc_std_timing[] = { + { + .name = "ntsc", + .timings_type = VPBE_ENC_STD, + .timings = {V4L2_STD_525_60}, + .interlaced = 1, + .xres = 720, + .yres = 480, + .aspect = {11, 10}, + .fps = {30000, 1001}, + .left_margin = 0x79, + .upper_margin = 0x10, + }, + { + .name = "pal", + .timings_type = VPBE_ENC_STD, + .timings = {V4L2_STD_625_50}, + .interlaced = 1, + .xres = 720, + .yres = 576, + .aspect = {54, 59}, + .fps = {25, 1}, + .left_margin = 0x7e, + .upper_margin = 0x16, + }, +}; + +/* venc dv preset timings */ +static struct vpbe_enc_mode_info dm644xevm_enc_preset_timing[] = { + { + .name = "480p59_94", + .timings_type = VPBE_ENC_DV_PRESET, + .timings = {V4L2_DV_480P59_94}, + .interlaced = 0, + .xres = 720, + .yres = 480, + .aspect = {1, 1}, + .fps = {5994, 100}, + .left_margin = 0x80, + .upper_margin = 0x20, + }, + { + .name = "576p50", + .timings_type = VPBE_ENC_DV_PRESET, + .timings = {V4L2_DV_576P50}, + .interlaced = 0, + .xres = 720, + .yres = 576, + .aspect = {1, 1}, + .fps = {50, 1}, + .left_margin = 0x7e, + .upper_margin = 0x30, + }, +}; + +/* + * The outputs available from VPBE + encoders. Keep the order same + * as that of encoders. First those from venc followed by that from + * encoders. Index in the output refers to index on a particular encoder. + * Driver uses this index to pass it to encoder when it supports more + * than one output. Userspace applications use index of the array to + * set an output. + */ +static struct vpbe_output dm644xevm_vpbe_outputs[] = { + { + .output = { + .index = 0, + .name = "Composite", + .type = V4L2_OUTPUT_TYPE_ANALOG, + .std = VENC_STD_ALL, + .capabilities = V4L2_OUT_CAP_STD, + }, + .subdev_name = VPBE_VENC_SUBDEV_NAME, + .default_mode = "ntsc", + .num_modes = ARRAY_SIZE(dm644xevm_enc_std_timing), + .modes = dm644xevm_enc_std_timing, + }, + { + .output = { + .index = 1, + .name = "Component", + .type = V4L2_OUTPUT_TYPE_ANALOG, + .capabilities = V4L2_OUT_CAP_PRESETS, + }, + .subdev_name = VPBE_VENC_SUBDEV_NAME, + .default_mode = "480p59_94", + .num_modes = ARRAY_SIZE(dm644xevm_enc_preset_timing), + .modes = dm644xevm_enc_preset_timing, + }, +}; + +static struct vpbe_config dm644xevm_display_cfg = { + .module_name = "dm644x-vpbe-display", + .i2c_adapter_id = 1, + .osd = { + .module_name = VPBE_OSD_SUBDEV_NAME, + }, + .venc = { + .module_name = VPBE_VENC_SUBDEV_NAME, + }, + .num_outputs = ARRAY_SIZE(dm644xevm_vpbe_outputs), + .outputs = dm644xevm_vpbe_outputs, +}; + static struct platform_device *davinci_evm_devices[] __initdata = { &davinci_fb_device, &rtc_dev, @@ -696,7 +803,7 @@ static __init void davinci_evm_init(void) evm_init_i2c(); davinci_setup_mmc(0, &dm6446evm_mmc_config); - dm644x_init_video(&dm644xevm_capture_cfg); + dm644x_init_video(&dm644xevm_capture_cfg, &dm644xevm_display_cfg); davinci_serial_init(&uart_config); dm644x_init_asp(&dm644x_evm_snd_data); diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h index 9d708034b57f..3e519dad5bb9 100644 --- a/arch/arm/mach-davinci/davinci.h +++ b/arch/arm/mach-davinci/davinci.h @@ -29,9 +29,15 @@ #include <media/davinci/vpfe_capture.h> #include <media/davinci/vpif_types.h> +#include <media/davinci/vpss.h> +#include <media/davinci/vpbe_types.h> +#include <media/davinci/vpbe_venc.h> +#include <media/davinci/vpbe.h> +#include <media/davinci/vpbe_osd.h> #define DAVINCI_SYSTEM_MODULE_BASE 0x01c40000 #define SYSMOD_VIDCLKCTL 0x38 +#define SYSMOD_VPSS_CLKCTL 0x44 #define SYSMOD_VDD3P3VPWDN 0x48 #define SYSMOD_VSCLKDIS 0x6c #define SYSMOD_PUPDCTL1 0x7c @@ -83,7 +89,7 @@ void dm365_set_vpfe_config(struct vpfe_config *cfg); /* DM644x function declarations */ void __init dm644x_init(void); void __init dm644x_init_asp(struct snd_platform_data *pdata); -int __init dm644x_init_video(struct vpfe_config *); +int __init dm644x_init_video(struct vpfe_config *, struct vpbe_config *); /* DM646x function declarations */ void __init dm646x_init(void); diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 23e81cafba8d..c8b866657fcb 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c @@ -627,7 +627,7 @@ static struct resource dm644x_vpfe_resources[] = { }, }; -static u64 vpfe_capture_dma_mask = DMA_BIT_MASK(32); +static u64 dm644x_video_dma_mask = DMA_BIT_MASK(32); static struct resource dm644x_ccdc_resource[] = { /* CCDC Base address */ { @@ -643,7 +643,7 @@ static struct platform_device dm644x_ccdc_dev = { .num_resources = ARRAY_SIZE(dm644x_ccdc_resource), .resource = dm644x_ccdc_resource, .dev = { - .dma_mask = &vpfe_capture_dma_mask, + .dma_mask = &dm644x_video_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -654,7 +654,134 @@ static struct platform_device dm644x_vpfe_dev = { .num_resources = ARRAY_SIZE(dm644x_vpfe_resources), .resource = dm644x_vpfe_resources, .dev = { - .dma_mask = &vpfe_capture_dma_mask, + .dma_mask = &dm644x_video_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +#define DM644X_OSD_BASE 0x01c72600 + +static struct resource dm644x_osd_resources[] = { + { + .start = DM644X_OSD_BASE, + .end = DM644X_OSD_BASE + 0x1ff, + .flags = IORESOURCE_MEM, + }, +}; + +static struct osd_platform_data dm644x_osd_data = { + .vpbe_type = VPBE_VERSION_1, +}; + +static struct platform_device dm644x_osd_dev = { + .name = VPBE_OSD_SUBDEV_NAME, + .id = -1, + .num_resources = ARRAY_SIZE(dm644x_osd_resources), + .resource = dm644x_osd_resources, + .dev = { + .dma_mask = &dm644x_video_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = &dm644x_osd_data, + }, +}; + +#define DM644X_VENC_BASE 0x01c72400 + +static struct resource dm644x_venc_resources[] = { + { + .start = DM644X_VENC_BASE, + .end = DM644X_VENC_BASE + 0x17f, + .flags = IORESOURCE_MEM, + }, +}; + +#define DM644X_VPSS_MUXSEL_PLL2_MODE BIT(0) +#define DM644X_VPSS_MUXSEL_VPBECLK_MODE BIT(1) +#define DM644X_VPSS_VENCLKEN BIT(3) +#define DM644X_VPSS_DACCLKEN BIT(4) + +static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type, + unsigned int mode) +{ + int ret = 0; + u32 v = DM644X_VPSS_VENCLKEN; + + switch (type) { + case VPBE_ENC_STD: + v |= DM644X_VPSS_DACCLKEN; + writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); + break; + case VPBE_ENC_DV_PRESET: + switch (mode) { + case V4L2_DV_480P59_94: + case V4L2_DV_576P50: + v |= DM644X_VPSS_MUXSEL_PLL2_MODE | + DM644X_VPSS_DACCLKEN; + writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); + break; + case V4L2_DV_720P60: + case V4L2_DV_1080I60: + case V4L2_DV_1080P30: + /* + * For HD, use external clock source since + * HD requires higher clock rate + */ + v |= DM644X_VPSS_MUXSEL_VPBECLK_MODE; + writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); + break; + default: + ret = -EINVAL; + break; + } + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static struct resource dm644x_v4l2_disp_resources[] = { + { + .start = IRQ_VENCINT, + .end = IRQ_VENCINT, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device dm644x_vpbe_display = { + .name = "vpbe-v4l2", + .id = -1, + .num_resources = ARRAY_SIZE(dm644x_v4l2_disp_resources), + .resource = dm644x_v4l2_disp_resources, + .dev = { + .dma_mask = &dm644x_video_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, +}; + +static struct venc_platform_data dm644x_venc_pdata = { + .venc_type = VPBE_VERSION_1, + .setup_clock = dm644x_venc_setup_clock, +}; + +static struct platform_device dm644x_venc_dev = { + .name = VPBE_VENC_SUBDEV_NAME, + .id = -1, + .num_resources = ARRAY_SIZE(dm644x_venc_resources), + .resource = dm644x_venc_resources, + .dev = { + .dma_mask = &dm644x_video_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = &dm644x_venc_pdata, + }, +}; + +static struct platform_device dm644x_vpbe_dev = { + .name = "vpbe_controller", + .id = -1, + .dev = { + .dma_mask = &dm644x_video_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -786,17 +913,30 @@ void __init dm644x_init(void) davinci_map_sysmod(); } -int __init dm644x_init_video(struct vpfe_config *vpfe_cfg) +int __init dm644x_init_video(struct vpfe_config *vpfe_cfg, + struct vpbe_config *vpbe_cfg) { - dm644x_vpfe_dev.dev.platform_data = vpfe_cfg; - - /* Add ccdc clock aliases */ - clk_add_alias("master", dm644x_ccdc_dev.name, "vpss_master", NULL); - clk_add_alias("slave", dm644x_ccdc_dev.name, "vpss_slave", NULL); - - platform_device_register(&dm644x_vpss_device); - platform_device_register(&dm644x_ccdc_dev); - platform_device_register(&dm644x_vpfe_dev); + if (vpfe_cfg || vpbe_cfg) + platform_device_register(&dm644x_vpss_device); + + if (vpfe_cfg) { + dm644x_vpfe_dev.dev.platform_data = vpfe_cfg; + platform_device_register(&dm644x_ccdc_dev); + platform_device_register(&dm644x_vpfe_dev); + /* Add ccdc clock aliases */ + clk_add_alias("master", dm644x_ccdc_dev.name, + "vpss_master", NULL); + clk_add_alias("slave", dm644x_ccdc_dev.name, + "vpss_slave", NULL); + } + + if (vpbe_cfg) { + dm644x_vpbe_dev.dev.platform_data = vpbe_cfg; + platform_device_register(&dm644x_osd_dev); + platform_device_register(&dm644x_venc_dev); + platform_device_register(&dm644x_vpbe_dev); + platform_device_register(&dm644x_vpbe_display); + } return 0; } diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index e400d75d11ae..8c9f56a3e8ec 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c @@ -22,7 +22,7 @@ #include <asm/mach-types.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> +#include <asm/system_misc.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> diff --git a/arch/arm/mach-ebsa110/leds.c b/arch/arm/mach-ebsa110/leds.c index d43121a30aa7..99e14e362500 100644 --- a/arch/arm/mach-ebsa110/leds.c +++ b/arch/arm/mach-ebsa110/leds.c @@ -17,7 +17,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include <asm/mach-types.h> #include "core.h" diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 2bf7d6e23989..0491ceef1cda 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -11,18 +11,19 @@ if ARCH_EXYNOS menu "SAMSUNG EXYNOS SoCs Support" -choice - prompt "EXYNOS System Type" - default ARCH_EXYNOS4 - config ARCH_EXYNOS4 bool "SAMSUNG EXYNOS4" + default y select HAVE_SMP select MIGHT_HAVE_CACHE_L2X0 help Samsung EXYNOS4 SoCs based systems -endchoice +config ARCH_EXYNOS5 + bool "SAMSUNG EXYNOS5" + select HAVE_SMP + help + Samsung EXYNOS5 (Cortex-A15) SoC based systems comment "EXYNOS SoCs" @@ -56,6 +57,13 @@ config SOC_EXYNOS4412 help Enable EXYNOS4412 SoC support +config SOC_EXYNOS5250 + bool "SAMSUNG EXYNOS5250" + default y + depends on ARCH_EXYNOS5 + help + Enable EXYNOS5250 SoC support + config EXYNOS4_MCT bool default y @@ -356,7 +364,7 @@ config MACH_SMDK4412 Machine support for Samsung SMDK4412 endif -comment "Flattened Device Tree based board for Exynos4 based SoC" +comment "Flattened Device Tree based board for EXYNOS SoCs" config MACH_EXYNOS4_DT bool "Samsung Exynos4 Machine using device tree" @@ -370,6 +378,15 @@ config MACH_EXYNOS4_DT Note: This is under development and not all peripherals can be supported with this machine file. +config MACH_EXYNOS5_DT + bool "SAMSUNG EXYNOS5 Machine using device tree" + select SOC_EXYNOS5250 + select USE_OF + select ARM_AMBA + help + Machine support for Samsung Exynos4 machine with device tree enabled. + Select this if a fdt blob is available for the EXYNOS4 SoC based board. + if ARCH_EXYNOS4 comment "Configuration for HSMMC 8-bit bus width" diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile index 9a4c09896509..8631840d1b5e 100644 --- a/arch/arm/mach-exynos/Makefile +++ b/arch/arm/mach-exynos/Makefile @@ -14,6 +14,7 @@ obj- := obj-$(CONFIG_ARCH_EXYNOS) += common.o obj-$(CONFIG_ARCH_EXYNOS4) += clock-exynos4.o +obj-$(CONFIG_ARCH_EXYNOS5) += clock-exynos5.o obj-$(CONFIG_CPU_EXYNOS4210) += clock-exynos4210.o obj-$(CONFIG_SOC_EXYNOS4212) += clock-exynos4212.o @@ -42,9 +43,11 @@ obj-$(CONFIG_MACH_SMDK4212) += mach-smdk4x12.o obj-$(CONFIG_MACH_SMDK4412) += mach-smdk4x12.o obj-$(CONFIG_MACH_EXYNOS4_DT) += mach-exynos4-dt.o +obj-$(CONFIG_MACH_EXYNOS5_DT) += mach-exynos5-dt.o # device support +obj-y += dev-uart.o obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o @@ -52,7 +55,7 @@ obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o obj-$(CONFIG_EXYNOS4_DEV_DMA) += dma.o obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI) += dev-ohci.o -obj-$(CONFIG_ARCH_EXYNOS4) += setup-i2c0.o +obj-$(CONFIG_ARCH_EXYNOS) += setup-i2c0.o obj-$(CONFIG_EXYNOS4_SETUP_FIMC) += setup-fimc.o obj-$(CONFIG_EXYNOS4_SETUP_FIMD0) += setup-fimd0.o obj-$(CONFIG_EXYNOS4_SETUP_I2C1) += setup-i2c1.o diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c index 200159dcb341..df54c2a92225 100644 --- a/arch/arm/mach-exynos/clock-exynos4.c +++ b/arch/arm/mach-exynos/clock-exynos4.c @@ -496,11 +496,6 @@ static struct clk exynos4_init_clocks_off[] = { .enable = exynos4_clk_ip_cam_ctrl, .ctrlbit = (1 << 3), }, { - .name = "fimd", - .devname = "exynos4-fb.0", - .enable = exynos4_clk_ip_lcd0_ctrl, - .ctrlbit = (1 << 0), - }, { .name = "hsmmc", .devname = "s3c-sdhci.0", .parent = &exynos4_clk_aclk_133.clk, @@ -796,6 +791,13 @@ static struct clk exynos4_clk_mdma1 = { .ctrlbit = ((1 << 8) | (1 << 5) | (1 << 2)), }; +static struct clk exynos4_clk_fimd0 = { + .name = "fimd", + .devname = "exynos4-fb.0", + .enable = exynos4_clk_ip_lcd0_ctrl, + .ctrlbit = (1 << 0), +}; + struct clk *exynos4_clkset_group_list[] = { [0] = &clk_ext_xtal_mux, [1] = &clk_xusbxti, @@ -1315,6 +1317,7 @@ static struct clk *exynos4_clk_cdev[] = { &exynos4_clk_pdma0, &exynos4_clk_pdma1, &exynos4_clk_mdma1, + &exynos4_clk_fimd0, }; static struct clksrc_clk *exynos4_clksrc_cdev[] = { @@ -1341,6 +1344,7 @@ static struct clk_lookup exynos4_clk_lookup[] = { CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk), CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk), CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk), + CLKDEV_INIT("exynos4-fb.0", "lcd", &exynos4_clk_fimd0), CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos4_clk_pdma0), CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos4_clk_pdma1), CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos4_clk_mdma1), diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c new file mode 100644 index 000000000000..d013982d0f8e --- /dev/null +++ b/arch/arm/mach-exynos/clock-exynos5.c @@ -0,0 +1,1247 @@ +/* + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Clock support for EXYNOS5 SoCs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/syscore_ops.h> + +#include <plat/cpu-freq.h> +#include <plat/clock.h> +#include <plat/cpu.h> +#include <plat/pll.h> +#include <plat/s5p-clock.h> +#include <plat/clock-clksrc.h> +#include <plat/pm.h> + +#include <mach/map.h> +#include <mach/regs-clock.h> +#include <mach/sysmmu.h> + +#include "common.h" + +#ifdef CONFIG_PM_SLEEP +static struct sleep_save exynos5_clock_save[] = { + /* will be implemented */ +}; +#endif + +static struct clk exynos5_clk_sclk_dptxphy = { + .name = "sclk_dptx", +}; + +static struct clk exynos5_clk_sclk_hdmi24m = { + .name = "sclk_hdmi24m", + .rate = 24000000, +}; + +static struct clk exynos5_clk_sclk_hdmi27m = { + .name = "sclk_hdmi27m", + .rate = 27000000, +}; + +static struct clk exynos5_clk_sclk_hdmiphy = { + .name = "sclk_hdmiphy", +}; + +static struct clk exynos5_clk_sclk_usbphy = { + .name = "sclk_usbphy", + .rate = 48000000, +}; + +static int exynos5_clksrc_mask_top_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_TOP, clk, enable); +} + +static int exynos5_clksrc_mask_disp1_0_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_DISP1_0, clk, enable); +} + +static int exynos5_clksrc_mask_fsys_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_FSYS, clk, enable); +} + +static int exynos5_clksrc_mask_gscl_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_GSCL, clk, enable); +} + +static int exynos5_clksrc_mask_peric0_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC0, clk, enable); +} + +static int exynos5_clk_ip_core_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKGATE_IP_CORE, clk, enable); +} + +static int exynos5_clk_ip_disp1_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKGATE_IP_DISP1, clk, enable); +} + +static int exynos5_clk_ip_fsys_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKGATE_IP_FSYS, clk, enable); +} + +static int exynos5_clk_block_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKGATE_BLOCK, clk, enable); +} + +static int exynos5_clk_ip_gen_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKGATE_IP_GEN, clk, enable); +} + +static int exynos5_clk_ip_gps_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKGATE_IP_GPS, clk, enable); +} + +static int exynos5_clk_ip_mfc_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKGATE_IP_MFC, clk, enable); +} + +static int exynos5_clk_ip_peric_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKGATE_IP_PERIC, clk, enable); +} + +static int exynos5_clk_ip_peris_ctrl(struct clk *clk, int enable) +{ + return s5p_gatectrl(EXYNOS5_CLKGATE_IP_PERIS, clk, enable); +} + +/* Core list of CMU_CPU side */ + +static struct clksrc_clk exynos5_clk_mout_apll = { + .clk = { + .name = "mout_apll", + }, + .sources = &clk_src_apll, + .reg_src = { .reg = EXYNOS5_CLKSRC_CPU, .shift = 0, .size = 1 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_apll = { + .clk = { + .name = "sclk_apll", + .parent = &exynos5_clk_mout_apll.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_CPU0, .shift = 24, .size = 3 }, +}; + +static struct clksrc_clk exynos5_clk_mout_bpll = { + .clk = { + .name = "mout_bpll", + }, + .sources = &clk_src_bpll, + .reg_src = { .reg = EXYNOS5_CLKSRC_CDREX, .shift = 0, .size = 1 }, +}; + +static struct clk *exynos5_clk_src_bpll_user_list[] = { + [0] = &clk_fin_mpll, + [1] = &exynos5_clk_mout_bpll.clk, +}; + +static struct clksrc_sources exynos5_clk_src_bpll_user = { + .sources = exynos5_clk_src_bpll_user_list, + .nr_sources = ARRAY_SIZE(exynos5_clk_src_bpll_user_list), +}; + +static struct clksrc_clk exynos5_clk_mout_bpll_user = { + .clk = { + .name = "mout_bpll_user", + }, + .sources = &exynos5_clk_src_bpll_user, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 24, .size = 1 }, +}; + +static struct clksrc_clk exynos5_clk_mout_cpll = { + .clk = { + .name = "mout_cpll", + }, + .sources = &clk_src_cpll, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 8, .size = 1 }, +}; + +static struct clksrc_clk exynos5_clk_mout_epll = { + .clk = { + .name = "mout_epll", + }, + .sources = &clk_src_epll, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 12, .size = 1 }, +}; + +struct clksrc_clk exynos5_clk_mout_mpll = { + .clk = { + .name = "mout_mpll", + }, + .sources = &clk_src_mpll, + .reg_src = { .reg = EXYNOS5_CLKSRC_CORE1, .shift = 8, .size = 1 }, +}; + +static struct clk *exynos_clkset_vpllsrc_list[] = { + [0] = &clk_fin_vpll, + [1] = &exynos5_clk_sclk_hdmi27m, +}; + +static struct clksrc_sources exynos5_clkset_vpllsrc = { + .sources = exynos_clkset_vpllsrc_list, + .nr_sources = ARRAY_SIZE(exynos_clkset_vpllsrc_list), +}; + +static struct clksrc_clk exynos5_clk_vpllsrc = { + .clk = { + .name = "vpll_src", + .enable = exynos5_clksrc_mask_top_ctrl, + .ctrlbit = (1 << 0), + }, + .sources = &exynos5_clkset_vpllsrc, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 0, .size = 1 }, +}; + +static struct clk *exynos5_clkset_sclk_vpll_list[] = { + [0] = &exynos5_clk_vpllsrc.clk, + [1] = &clk_fout_vpll, +}; + +static struct clksrc_sources exynos5_clkset_sclk_vpll = { + .sources = exynos5_clkset_sclk_vpll_list, + .nr_sources = ARRAY_SIZE(exynos5_clkset_sclk_vpll_list), +}; + +static struct clksrc_clk exynos5_clk_sclk_vpll = { + .clk = { + .name = "sclk_vpll", + }, + .sources = &exynos5_clkset_sclk_vpll, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 16, .size = 1 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_pixel = { + .clk = { + .name = "sclk_pixel", + .parent = &exynos5_clk_sclk_vpll.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_DISP1_0, .shift = 28, .size = 4 }, +}; + +static struct clk *exynos5_clkset_sclk_hdmi_list[] = { + [0] = &exynos5_clk_sclk_pixel.clk, + [1] = &exynos5_clk_sclk_hdmiphy, +}; + +static struct clksrc_sources exynos5_clkset_sclk_hdmi = { + .sources = exynos5_clkset_sclk_hdmi_list, + .nr_sources = ARRAY_SIZE(exynos5_clkset_sclk_hdmi_list), +}; + +static struct clksrc_clk exynos5_clk_sclk_hdmi = { + .clk = { + .name = "sclk_hdmi", + .enable = exynos5_clksrc_mask_disp1_0_ctrl, + .ctrlbit = (1 << 20), + }, + .sources = &exynos5_clkset_sclk_hdmi, + .reg_src = { .reg = EXYNOS5_CLKSRC_DISP1_0, .shift = 20, .size = 1 }, +}; + +static struct clksrc_clk *exynos5_sclk_tv[] = { + &exynos5_clk_sclk_pixel, + &exynos5_clk_sclk_hdmi, +}; + +static struct clk *exynos5_clk_src_mpll_user_list[] = { + [0] = &clk_fin_mpll, + [1] = &exynos5_clk_mout_mpll.clk, +}; + +static struct clksrc_sources exynos5_clk_src_mpll_user = { + .sources = exynos5_clk_src_mpll_user_list, + .nr_sources = ARRAY_SIZE(exynos5_clk_src_mpll_user_list), +}; + +static struct clksrc_clk exynos5_clk_mout_mpll_user = { + .clk = { + .name = "mout_mpll_user", + }, + .sources = &exynos5_clk_src_mpll_user, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 20, .size = 1 }, +}; + +static struct clk *exynos5_clkset_mout_cpu_list[] = { + [0] = &exynos5_clk_mout_apll.clk, + [1] = &exynos5_clk_mout_mpll.clk, +}; + +static struct clksrc_sources exynos5_clkset_mout_cpu = { + .sources = exynos5_clkset_mout_cpu_list, + .nr_sources = ARRAY_SIZE(exynos5_clkset_mout_cpu_list), +}; + +static struct clksrc_clk exynos5_clk_mout_cpu = { + .clk = { + .name = "mout_cpu", + }, + .sources = &exynos5_clkset_mout_cpu, + .reg_src = { .reg = EXYNOS5_CLKSRC_CPU, .shift = 16, .size = 1 }, +}; + +static struct clksrc_clk exynos5_clk_dout_armclk = { + .clk = { + .name = "dout_armclk", + .parent = &exynos5_clk_mout_cpu.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_CPU0, .shift = 0, .size = 3 }, +}; + +static struct clksrc_clk exynos5_clk_dout_arm2clk = { + .clk = { + .name = "dout_arm2clk", + .parent = &exynos5_clk_dout_armclk.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_CPU0, .shift = 28, .size = 3 }, +}; + +static struct clk exynos5_clk_armclk = { + .name = "armclk", + .parent = &exynos5_clk_dout_arm2clk.clk, +}; + +/* Core list of CMU_CDREX side */ + +static struct clk *exynos5_clkset_cdrex_list[] = { + [0] = &exynos5_clk_mout_mpll.clk, + [1] = &exynos5_clk_mout_bpll.clk, +}; + +static struct clksrc_sources exynos5_clkset_cdrex = { + .sources = exynos5_clkset_cdrex_list, + .nr_sources = ARRAY_SIZE(exynos5_clkset_cdrex_list), +}; + +static struct clksrc_clk exynos5_clk_cdrex = { + .clk = { + .name = "clk_cdrex", + }, + .sources = &exynos5_clkset_cdrex, + .reg_src = { .reg = EXYNOS5_CLKSRC_CDREX, .shift = 4, .size = 1 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_CDREX, .shift = 16, .size = 3 }, +}; + +static struct clksrc_clk exynos5_clk_aclk_acp = { + .clk = { + .name = "aclk_acp", + .parent = &exynos5_clk_mout_mpll.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_ACP, .shift = 0, .size = 3 }, +}; + +static struct clksrc_clk exynos5_clk_pclk_acp = { + .clk = { + .name = "pclk_acp", + .parent = &exynos5_clk_aclk_acp.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_ACP, .shift = 4, .size = 3 }, +}; + +/* Core list of CMU_TOP side */ + +struct clk *exynos5_clkset_aclk_top_list[] = { + [0] = &exynos5_clk_mout_mpll_user.clk, + [1] = &exynos5_clk_mout_bpll_user.clk, +}; + +struct clksrc_sources exynos5_clkset_aclk = { + .sources = exynos5_clkset_aclk_top_list, + .nr_sources = ARRAY_SIZE(exynos5_clkset_aclk_top_list), +}; + +static struct clksrc_clk exynos5_clk_aclk_400 = { + .clk = { + .name = "aclk_400", + }, + .sources = &exynos5_clkset_aclk, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP0, .shift = 20, .size = 1 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_TOP0, .shift = 24, .size = 3 }, +}; + +struct clk *exynos5_clkset_aclk_333_166_list[] = { + [0] = &exynos5_clk_mout_cpll.clk, + [1] = &exynos5_clk_mout_mpll_user.clk, +}; + +struct clksrc_sources exynos5_clkset_aclk_333_166 = { + .sources = exynos5_clkset_aclk_333_166_list, + .nr_sources = ARRAY_SIZE(exynos5_clkset_aclk_333_166_list), +}; + +static struct clksrc_clk exynos5_clk_aclk_333 = { + .clk = { + .name = "aclk_333", + }, + .sources = &exynos5_clkset_aclk_333_166, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP0, .shift = 16, .size = 1 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_TOP0, .shift = 20, .size = 3 }, +}; + +static struct clksrc_clk exynos5_clk_aclk_166 = { + .clk = { + .name = "aclk_166", + }, + .sources = &exynos5_clkset_aclk_333_166, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP0, .shift = 8, .size = 1 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_TOP0, .shift = 8, .size = 3 }, +}; + +static struct clksrc_clk exynos5_clk_aclk_266 = { + .clk = { + .name = "aclk_266", + .parent = &exynos5_clk_mout_mpll_user.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_TOP0, .shift = 16, .size = 3 }, +}; + +static struct clksrc_clk exynos5_clk_aclk_200 = { + .clk = { + .name = "aclk_200", + }, + .sources = &exynos5_clkset_aclk, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP0, .shift = 12, .size = 1 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_TOP0, .shift = 12, .size = 3 }, +}; + +static struct clksrc_clk exynos5_clk_aclk_66_pre = { + .clk = { + .name = "aclk_66_pre", + .parent = &exynos5_clk_mout_mpll_user.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_TOP1, .shift = 24, .size = 3 }, +}; + +static struct clksrc_clk exynos5_clk_aclk_66 = { + .clk = { + .name = "aclk_66", + .parent = &exynos5_clk_aclk_66_pre.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_TOP0, .shift = 0, .size = 3 }, +}; + +static struct clk exynos5_init_clocks_off[] = { + { + .name = "timers", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 24), + }, { + .name = "rtc", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peris_ctrl, + .ctrlbit = (1 << 20), + }, { + .name = "hsmmc", + .devname = "s3c-sdhci.0", + .parent = &exynos5_clk_aclk_200.clk, + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 12), + }, { + .name = "hsmmc", + .devname = "s3c-sdhci.1", + .parent = &exynos5_clk_aclk_200.clk, + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 13), + }, { + .name = "hsmmc", + .devname = "s3c-sdhci.2", + .parent = &exynos5_clk_aclk_200.clk, + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 14), + }, { + .name = "hsmmc", + .devname = "s3c-sdhci.3", + .parent = &exynos5_clk_aclk_200.clk, + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 15), + }, { + .name = "dwmci", + .parent = &exynos5_clk_aclk_200.clk, + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 16), + }, { + .name = "sata", + .devname = "ahci", + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 6), + }, { + .name = "sata_phy", + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 24), + }, { + .name = "sata_phy_i2c", + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 25), + }, { + .name = "mfc", + .devname = "s5p-mfc", + .enable = exynos5_clk_ip_mfc_ctrl, + .ctrlbit = (1 << 0), + }, { + .name = "hdmi", + .devname = "exynos4-hdmi", + .enable = exynos5_clk_ip_disp1_ctrl, + .ctrlbit = (1 << 6), + }, { + .name = "mixer", + .devname = "s5p-mixer", + .enable = exynos5_clk_ip_disp1_ctrl, + .ctrlbit = (1 << 5), + }, { + .name = "jpeg", + .enable = exynos5_clk_ip_gen_ctrl, + .ctrlbit = (1 << 2), + }, { + .name = "dsim0", + .enable = exynos5_clk_ip_disp1_ctrl, + .ctrlbit = (1 << 3), + }, { + .name = "iis", + .devname = "samsung-i2s.1", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 20), + }, { + .name = "iis", + .devname = "samsung-i2s.2", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 21), + }, { + .name = "pcm", + .devname = "samsung-pcm.1", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 22), + }, { + .name = "pcm", + .devname = "samsung-pcm.2", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 23), + }, { + .name = "spdif", + .devname = "samsung-spdif", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 26), + }, { + .name = "ac97", + .devname = "samsung-ac97", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 27), + }, { + .name = "usbhost", + .enable = exynos5_clk_ip_fsys_ctrl , + .ctrlbit = (1 << 18), + }, { + .name = "usbotg", + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 7), + }, { + .name = "gps", + .enable = exynos5_clk_ip_gps_ctrl, + .ctrlbit = ((1 << 3) | (1 << 2) | (1 << 0)), + }, { + .name = "nfcon", + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 22), + }, { + .name = "iop", + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = ((1 << 30) | (1 << 26) | (1 << 23)), + }, { + .name = "core_iop", + .enable = exynos5_clk_ip_core_ctrl, + .ctrlbit = ((1 << 21) | (1 << 3)), + }, { + .name = "mcu_iop", + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 0), + }, { + .name = "i2c", + .devname = "s3c2440-i2c.0", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 6), + }, { + .name = "i2c", + .devname = "s3c2440-i2c.1", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 7), + }, { + .name = "i2c", + .devname = "s3c2440-i2c.2", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 8), + }, { + .name = "i2c", + .devname = "s3c2440-i2c.3", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 9), + }, { + .name = "i2c", + .devname = "s3c2440-i2c.4", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 10), + }, { + .name = "i2c", + .devname = "s3c2440-i2c.5", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 11), + }, { + .name = "i2c", + .devname = "s3c2440-i2c.6", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 12), + }, { + .name = "i2c", + .devname = "s3c2440-i2c.7", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 13), + }, { + .name = "i2c", + .devname = "s3c2440-hdmiphy-i2c", + .parent = &exynos5_clk_aclk_66.clk, + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 14), + } +}; + +static struct clk exynos5_init_clocks_on[] = { + { + .name = "uart", + .devname = "s5pv210-uart.0", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 0), + }, { + .name = "uart", + .devname = "s5pv210-uart.1", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 1), + }, { + .name = "uart", + .devname = "s5pv210-uart.2", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 2), + }, { + .name = "uart", + .devname = "s5pv210-uart.3", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 3), + }, { + .name = "uart", + .devname = "s5pv210-uart.4", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 4), + }, { + .name = "uart", + .devname = "s5pv210-uart.5", + .enable = exynos5_clk_ip_peric_ctrl, + .ctrlbit = (1 << 5), + } +}; + +static struct clk exynos5_clk_pdma0 = { + .name = "dma", + .devname = "dma-pl330.0", + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 1), +}; + +static struct clk exynos5_clk_pdma1 = { + .name = "dma", + .devname = "dma-pl330.1", + .enable = exynos5_clk_ip_fsys_ctrl, + .ctrlbit = (1 << 1), +}; + +static struct clk exynos5_clk_mdma1 = { + .name = "dma", + .devname = "dma-pl330.2", + .enable = exynos5_clk_ip_gen_ctrl, + .ctrlbit = (1 << 4), +}; + +struct clk *exynos5_clkset_group_list[] = { + [0] = &clk_ext_xtal_mux, + [1] = NULL, + [2] = &exynos5_clk_sclk_hdmi24m, + [3] = &exynos5_clk_sclk_dptxphy, + [4] = &exynos5_clk_sclk_usbphy, + [5] = &exynos5_clk_sclk_hdmiphy, + [6] = &exynos5_clk_mout_mpll_user.clk, + [7] = &exynos5_clk_mout_epll.clk, + [8] = &exynos5_clk_sclk_vpll.clk, + [9] = &exynos5_clk_mout_cpll.clk, +}; + +struct clksrc_sources exynos5_clkset_group = { + .sources = exynos5_clkset_group_list, + .nr_sources = ARRAY_SIZE(exynos5_clkset_group_list), +}; + +/* Possible clock sources for aclk_266_gscl_sub Mux */ +static struct clk *clk_src_gscl_266_list[] = { + [0] = &clk_ext_xtal_mux, + [1] = &exynos5_clk_aclk_266.clk, +}; + +static struct clksrc_sources clk_src_gscl_266 = { + .sources = clk_src_gscl_266_list, + .nr_sources = ARRAY_SIZE(clk_src_gscl_266_list), +}; + +static struct clksrc_clk exynos5_clk_dout_mmc0 = { + .clk = { + .name = "dout_mmc0", + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_FSYS, .shift = 0, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS1, .shift = 0, .size = 4 }, +}; + +static struct clksrc_clk exynos5_clk_dout_mmc1 = { + .clk = { + .name = "dout_mmc1", + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_FSYS, .shift = 4, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS1, .shift = 16, .size = 4 }, +}; + +static struct clksrc_clk exynos5_clk_dout_mmc2 = { + .clk = { + .name = "dout_mmc2", + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_FSYS, .shift = 8, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS2, .shift = 0, .size = 4 }, +}; + +static struct clksrc_clk exynos5_clk_dout_mmc3 = { + .clk = { + .name = "dout_mmc3", + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_FSYS, .shift = 12, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS2, .shift = 16, .size = 4 }, +}; + +static struct clksrc_clk exynos5_clk_dout_mmc4 = { + .clk = { + .name = "dout_mmc4", + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_FSYS, .shift = 16, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS3, .shift = 0, .size = 4 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_uart0 = { + .clk = { + .name = "uclk1", + .devname = "exynos4210-uart.0", + .enable = exynos5_clksrc_mask_peric0_ctrl, + .ctrlbit = (1 << 0), + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_PERIC0, .shift = 0, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC0, .shift = 0, .size = 4 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_uart1 = { + .clk = { + .name = "uclk1", + .devname = "exynos4210-uart.1", + .enable = exynos5_clksrc_mask_peric0_ctrl, + .ctrlbit = (1 << 4), + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_PERIC0, .shift = 4, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC0, .shift = 4, .size = 4 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_uart2 = { + .clk = { + .name = "uclk1", + .devname = "exynos4210-uart.2", + .enable = exynos5_clksrc_mask_peric0_ctrl, + .ctrlbit = (1 << 8), + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_PERIC0, .shift = 8, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC0, .shift = 8, .size = 4 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_uart3 = { + .clk = { + .name = "uclk1", + .devname = "exynos4210-uart.3", + .enable = exynos5_clksrc_mask_peric0_ctrl, + .ctrlbit = (1 << 12), + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_PERIC0, .shift = 12, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC0, .shift = 12, .size = 4 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_mmc0 = { + .clk = { + .name = "sclk_mmc", + .devname = "s3c-sdhci.0", + .parent = &exynos5_clk_dout_mmc0.clk, + .enable = exynos5_clksrc_mask_fsys_ctrl, + .ctrlbit = (1 << 0), + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS1, .shift = 8, .size = 8 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_mmc1 = { + .clk = { + .name = "sclk_mmc", + .devname = "s3c-sdhci.1", + .parent = &exynos5_clk_dout_mmc1.clk, + .enable = exynos5_clksrc_mask_fsys_ctrl, + .ctrlbit = (1 << 4), + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS1, .shift = 24, .size = 8 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_mmc2 = { + .clk = { + .name = "sclk_mmc", + .devname = "s3c-sdhci.2", + .parent = &exynos5_clk_dout_mmc2.clk, + .enable = exynos5_clksrc_mask_fsys_ctrl, + .ctrlbit = (1 << 8), + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS2, .shift = 8, .size = 8 }, +}; + +static struct clksrc_clk exynos5_clk_sclk_mmc3 = { + .clk = { + .name = "sclk_mmc", + .devname = "s3c-sdhci.3", + .parent = &exynos5_clk_dout_mmc3.clk, + .enable = exynos5_clksrc_mask_fsys_ctrl, + .ctrlbit = (1 << 12), + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS2, .shift = 24, .size = 8 }, +}; + +static struct clksrc_clk exynos5_clksrcs[] = { + { + .clk = { + .name = "sclk_dwmci", + .parent = &exynos5_clk_dout_mmc4.clk, + .enable = exynos5_clksrc_mask_fsys_ctrl, + .ctrlbit = (1 << 16), + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_FSYS3, .shift = 8, .size = 8 }, + }, { + .clk = { + .name = "sclk_fimd", + .devname = "s3cfb.1", + .enable = exynos5_clksrc_mask_disp1_0_ctrl, + .ctrlbit = (1 << 0), + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_DISP1_0, .shift = 0, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_DISP1_0, .shift = 0, .size = 4 }, + }, { + .clk = { + .name = "aclk_266_gscl", + }, + .sources = &clk_src_gscl_266, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP3, .shift = 8, .size = 1 }, + }, { + .clk = { + .name = "sclk_g3d", + .devname = "mali-t604.0", + .enable = exynos5_clk_block_ctrl, + .ctrlbit = (1 << 1), + }, + .sources = &exynos5_clkset_aclk, + .reg_src = { .reg = EXYNOS5_CLKSRC_TOP0, .shift = 20, .size = 1 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_TOP0, .shift = 24, .size = 3 }, + }, { + .clk = { + .name = "sclk_gscl_wrap", + .devname = "s5p-mipi-csis.0", + .enable = exynos5_clksrc_mask_gscl_ctrl, + .ctrlbit = (1 << 24), + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_GSCL, .shift = 24, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_GSCL, .shift = 24, .size = 4 }, + }, { + .clk = { + .name = "sclk_gscl_wrap", + .devname = "s5p-mipi-csis.1", + .enable = exynos5_clksrc_mask_gscl_ctrl, + .ctrlbit = (1 << 28), + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_GSCL, .shift = 28, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_GSCL, .shift = 28, .size = 4 }, + }, { + .clk = { + .name = "sclk_cam0", + .enable = exynos5_clksrc_mask_gscl_ctrl, + .ctrlbit = (1 << 16), + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_GSCL, .shift = 16, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_GSCL, .shift = 16, .size = 4 }, + }, { + .clk = { + .name = "sclk_cam1", + .enable = exynos5_clksrc_mask_gscl_ctrl, + .ctrlbit = (1 << 20), + }, + .sources = &exynos5_clkset_group, + .reg_src = { .reg = EXYNOS5_CLKSRC_GSCL, .shift = 20, .size = 4 }, + .reg_div = { .reg = EXYNOS5_CLKDIV_GSCL, .shift = 20, .size = 4 }, + }, { + .clk = { + .name = "sclk_jpeg", + .parent = &exynos5_clk_mout_cpll.clk, + }, + .reg_div = { .reg = EXYNOS5_CLKDIV_GEN, .shift = 4, .size = 3 }, + }, +}; + +/* Clock initialization code */ +static struct clksrc_clk *exynos5_sysclks[] = { + &exynos5_clk_mout_apll, + &exynos5_clk_sclk_apll, + &exynos5_clk_mout_bpll, + &exynos5_clk_mout_bpll_user, + &exynos5_clk_mout_cpll, + &exynos5_clk_mout_epll, + &exynos5_clk_mout_mpll, + &exynos5_clk_mout_mpll_user, + &exynos5_clk_vpllsrc, + &exynos5_clk_sclk_vpll, + &exynos5_clk_mout_cpu, + &exynos5_clk_dout_armclk, + &exynos5_clk_dout_arm2clk, + &exynos5_clk_cdrex, + &exynos5_clk_aclk_400, + &exynos5_clk_aclk_333, + &exynos5_clk_aclk_266, + &exynos5_clk_aclk_200, + &exynos5_clk_aclk_166, + &exynos5_clk_aclk_66_pre, + &exynos5_clk_aclk_66, + &exynos5_clk_dout_mmc0, + &exynos5_clk_dout_mmc1, + &exynos5_clk_dout_mmc2, + &exynos5_clk_dout_mmc3, + &exynos5_clk_dout_mmc4, + &exynos5_clk_aclk_acp, + &exynos5_clk_pclk_acp, +}; + +static struct clk *exynos5_clk_cdev[] = { + &exynos5_clk_pdma0, + &exynos5_clk_pdma1, + &exynos5_clk_mdma1, +}; + +static struct clksrc_clk *exynos5_clksrc_cdev[] = { + &exynos5_clk_sclk_uart0, + &exynos5_clk_sclk_uart1, + &exynos5_clk_sclk_uart2, + &exynos5_clk_sclk_uart3, + &exynos5_clk_sclk_mmc0, + &exynos5_clk_sclk_mmc1, + &exynos5_clk_sclk_mmc2, + &exynos5_clk_sclk_mmc3, +}; + +static struct clk_lookup exynos5_clk_lookup[] = { + CLKDEV_INIT("exynos4210-uart.0", "clk_uart_baud0", &exynos5_clk_sclk_uart0.clk), + CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos5_clk_sclk_uart1.clk), + CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos5_clk_sclk_uart2.clk), + CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos5_clk_sclk_uart3.clk), + CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk), + CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk), + CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk), + CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk), + CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos5_clk_pdma0), + CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos5_clk_pdma1), + CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos5_clk_mdma1), +}; + +static unsigned long exynos5_epll_get_rate(struct clk *clk) +{ + return clk->rate; +} + +static struct clk *exynos5_clks[] __initdata = { + &exynos5_clk_sclk_hdmi27m, + &exynos5_clk_sclk_hdmiphy, + &clk_fout_bpll, + &clk_fout_cpll, + &exynos5_clk_armclk, +}; + +static u32 epll_div[][6] = { + { 192000000, 0, 48, 3, 1, 0 }, + { 180000000, 0, 45, 3, 1, 0 }, + { 73728000, 1, 73, 3, 3, 47710 }, + { 67737600, 1, 90, 4, 3, 20762 }, + { 49152000, 0, 49, 3, 3, 9961 }, + { 45158400, 0, 45, 3, 3, 10381 }, + { 180633600, 0, 45, 3, 1, 10381 }, +}; + +static int exynos5_epll_set_rate(struct clk *clk, unsigned long rate) +{ + unsigned int epll_con, epll_con_k; + unsigned int i; + unsigned int tmp; + unsigned int epll_rate; + unsigned int locktime; + unsigned int lockcnt; + + /* Return if nothing changed */ + if (clk->rate == rate) + return 0; + + if (clk->parent) + epll_rate = clk_get_rate(clk->parent); + else + epll_rate = clk_ext_xtal_mux.rate; + + if (epll_rate != 24000000) { + pr_err("Invalid Clock : recommended clock is 24MHz.\n"); + return -EINVAL; + } + + epll_con = __raw_readl(EXYNOS5_EPLL_CON0); + epll_con &= ~(0x1 << 27 | \ + PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT | \ + PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT | \ + PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT); + + for (i = 0; i < ARRAY_SIZE(epll_div); i++) { + if (epll_div[i][0] == rate) { + epll_con_k = epll_div[i][5] << 0; + epll_con |= epll_div[i][1] << 27; + epll_con |= epll_div[i][2] << PLL46XX_MDIV_SHIFT; + epll_con |= epll_div[i][3] << PLL46XX_PDIV_SHIFT; + epll_con |= epll_div[i][4] << PLL46XX_SDIV_SHIFT; + break; + } + } + + if (i == ARRAY_SIZE(epll_div)) { + printk(KERN_ERR "%s: Invalid Clock EPLL Frequency\n", + __func__); + return -EINVAL; + } + + epll_rate /= 1000000; + + /* 3000 max_cycls : specification data */ + locktime = 3000 / epll_rate * epll_div[i][3]; + lockcnt = locktime * 10000 / (10000 / epll_rate); + + __raw_writel(lockcnt, EXYNOS5_EPLL_LOCK); + + __raw_writel(epll_con, EXYNOS5_EPLL_CON0); + __raw_writel(epll_con_k, EXYNOS5_EPLL_CON1); + + do { + tmp = __raw_readl(EXYNOS5_EPLL_CON0); + } while (!(tmp & 0x1 << EXYNOS5_EPLLCON0_LOCKED_SHIFT)); + + clk->rate = rate; + + return 0; +} + +static struct clk_ops exynos5_epll_ops = { + .get_rate = exynos5_epll_get_rate, + .set_rate = exynos5_epll_set_rate, +}; + +static int xtal_rate; + +static unsigned long exynos5_fout_apll_get_rate(struct clk *clk) +{ + return s5p_get_pll35xx(xtal_rate, __raw_readl(EXYNOS5_APLL_CON0)); +} + +static struct clk_ops exynos5_fout_apll_ops = { + .get_rate = exynos5_fout_apll_get_rate, +}; + +#ifdef CONFIG_PM +static int exynos5_clock_suspend(void) +{ + s3c_pm_do_save(exynos5_clock_save, ARRAY_SIZE(exynos5_clock_save)); + + return 0; +} + +static void exynos5_clock_resume(void) +{ + s3c_pm_do_restore_core(exynos5_clock_save, ARRAY_SIZE(exynos5_clock_save)); +} +#else +#define exynos5_clock_suspend NULL +#define exynos5_clock_resume NULL +#endif + +struct syscore_ops exynos5_clock_syscore_ops = { + .suspend = exynos5_clock_suspend, + .resume = exynos5_clock_resume, +}; + +void __init_or_cpufreq exynos5_setup_clocks(void) +{ + struct clk *xtal_clk; + unsigned long apll; + unsigned long bpll; + unsigned long cpll; + unsigned long mpll; + unsigned long epll; + unsigned long vpll; + unsigned long vpllsrc; + unsigned long xtal; + unsigned long armclk; + unsigned long mout_cdrex; + unsigned long aclk_400; + unsigned long aclk_333; + unsigned long aclk_266; + unsigned long aclk_200; + unsigned long aclk_166; + unsigned long aclk_66; + unsigned int ptr; + + printk(KERN_DEBUG "%s: registering clocks\n", __func__); + + xtal_clk = clk_get(NULL, "xtal"); + BUG_ON(IS_ERR(xtal_clk)); + + xtal = clk_get_rate(xtal_clk); + + xtal_rate = xtal; + + clk_put(xtal_clk); + + printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal); + + apll = s5p_get_pll35xx(xtal, __raw_readl(EXYNOS5_APLL_CON0)); + bpll = s5p_get_pll35xx(xtal, __raw_readl(EXYNOS5_BPLL_CON0)); + cpll = s5p_get_pll35xx(xtal, __raw_readl(EXYNOS5_CPLL_CON0)); + mpll = s5p_get_pll35xx(xtal, __raw_readl(EXYNOS5_MPLL_CON0)); + epll = s5p_get_pll36xx(xtal, __raw_readl(EXYNOS5_EPLL_CON0), + __raw_readl(EXYNOS5_EPLL_CON1)); + + vpllsrc = clk_get_rate(&exynos5_clk_vpllsrc.clk); + vpll = s5p_get_pll36xx(vpllsrc, __raw_readl(EXYNOS5_VPLL_CON0), + __raw_readl(EXYNOS5_VPLL_CON1)); + + clk_fout_apll.ops = &exynos5_fout_apll_ops; + clk_fout_bpll.rate = bpll; + clk_fout_cpll.rate = cpll; + clk_fout_mpll.rate = mpll; + clk_fout_epll.rate = epll; + clk_fout_vpll.rate = vpll; + + printk(KERN_INFO "EXYNOS5: PLL settings, A=%ld, B=%ld, C=%ld\n" + "M=%ld, E=%ld V=%ld", + apll, bpll, cpll, mpll, epll, vpll); + + armclk = clk_get_rate(&exynos5_clk_armclk); + mout_cdrex = clk_get_rate(&exynos5_clk_cdrex.clk); + + aclk_400 = clk_get_rate(&exynos5_clk_aclk_400.clk); + aclk_333 = clk_get_rate(&exynos5_clk_aclk_333.clk); + aclk_266 = clk_get_rate(&exynos5_clk_aclk_266.clk); + aclk_200 = clk_get_rate(&exynos5_clk_aclk_200.clk); + aclk_166 = clk_get_rate(&exynos5_clk_aclk_166.clk); + aclk_66 = clk_get_rate(&exynos5_clk_aclk_66.clk); + + printk(KERN_INFO "EXYNOS5: ARMCLK=%ld, CDREX=%ld, ACLK400=%ld\n" + "ACLK333=%ld, ACLK266=%ld, ACLK200=%ld\n" + "ACLK166=%ld, ACLK66=%ld\n", + armclk, mout_cdrex, aclk_400, + aclk_333, aclk_266, aclk_200, + aclk_166, aclk_66); + + + clk_fout_epll.ops = &exynos5_epll_ops; + + if (clk_set_parent(&exynos5_clk_mout_epll.clk, &clk_fout_epll)) + printk(KERN_ERR "Unable to set parent %s of clock %s.\n", + clk_fout_epll.name, exynos5_clk_mout_epll.clk.name); + + clk_set_rate(&exynos5_clk_sclk_apll.clk, 100000000); + clk_set_rate(&exynos5_clk_aclk_266.clk, 300000000); + + clk_set_rate(&exynos5_clk_aclk_acp.clk, 267000000); + clk_set_rate(&exynos5_clk_pclk_acp.clk, 134000000); + + for (ptr = 0; ptr < ARRAY_SIZE(exynos5_clksrcs); ptr++) + s3c_set_clksrc(&exynos5_clksrcs[ptr], true); +} + +void __init exynos5_register_clocks(void) +{ + int ptr; + + s3c24xx_register_clocks(exynos5_clks, ARRAY_SIZE(exynos5_clks)); + + for (ptr = 0; ptr < ARRAY_SIZE(exynos5_sysclks); ptr++) + s3c_register_clksrc(exynos5_sysclks[ptr], 1); + + for (ptr = 0; ptr < ARRAY_SIZE(exynos5_sclk_tv); ptr++) + s3c_register_clksrc(exynos5_sclk_tv[ptr], 1); + + for (ptr = 0; ptr < ARRAY_SIZE(exynos5_clksrc_cdev); ptr++) + s3c_register_clksrc(exynos5_clksrc_cdev[ptr], 1); + + s3c_register_clksrc(exynos5_clksrcs, ARRAY_SIZE(exynos5_clksrcs)); + s3c_register_clocks(exynos5_init_clocks_on, ARRAY_SIZE(exynos5_init_clocks_on)); + + s3c24xx_register_clocks(exynos5_clk_cdev, ARRAY_SIZE(exynos5_clk_cdev)); + for (ptr = 0; ptr < ARRAY_SIZE(exynos5_clk_cdev); ptr++) + s3c_disable_clocks(exynos5_clk_cdev[ptr], 1); + + s3c_register_clocks(exynos5_init_clocks_off, ARRAY_SIZE(exynos5_init_clocks_off)); + s3c_disable_clocks(exynos5_init_clocks_off, ARRAY_SIZE(exynos5_init_clocks_off)); + clkdev_add_table(exynos5_clk_lookup, ARRAY_SIZE(exynos5_clk_lookup)); + + register_syscore_ops(&exynos5_clock_syscore_ops); + s3c_pwmclk_init(); +} diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index 97ca2592ce83..e6cc50e94a58 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c @@ -53,6 +53,14 @@ static const char name_exynos4210[] = "EXYNOS4210"; static const char name_exynos4212[] = "EXYNOS4212"; static const char name_exynos4412[] = "EXYNOS4412"; +static const char name_exynos5250[] = "EXYNOS5250"; + +static void exynos4_map_io(void); +static void exynos5_map_io(void); +static void exynos4_init_clocks(int xtal); +static void exynos5_init_clocks(int xtal); +static void exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no); +static int exynos_init(void); static struct cpu_table cpu_ids[] __initdata = { { @@ -60,7 +68,7 @@ static struct cpu_table cpu_ids[] __initdata = { .idmask = EXYNOS4_CPU_MASK, .map_io = exynos4_map_io, .init_clocks = exynos4_init_clocks, - .init_uarts = exynos4_init_uarts, + .init_uarts = exynos_init_uarts, .init = exynos_init, .name = name_exynos4210, }, { @@ -68,7 +76,7 @@ static struct cpu_table cpu_ids[] __initdata = { .idmask = EXYNOS4_CPU_MASK, .map_io = exynos4_map_io, .init_clocks = exynos4_init_clocks, - .init_uarts = exynos4_init_uarts, + .init_uarts = exynos_init_uarts, .init = exynos_init, .name = name_exynos4212, }, { @@ -76,9 +84,17 @@ static struct cpu_table cpu_ids[] __initdata = { .idmask = EXYNOS4_CPU_MASK, .map_io = exynos4_map_io, .init_clocks = exynos4_init_clocks, - .init_uarts = exynos4_init_uarts, + .init_uarts = exynos_init_uarts, .init = exynos_init, .name = name_exynos4412, + }, { + .idcode = EXYNOS5250_SOC_ID, + .idmask = EXYNOS5_SOC_MASK, + .map_io = exynos5_map_io, + .init_clocks = exynos5_init_clocks, + .init_uarts = exynos_init_uarts, + .init = exynos_init, + .name = name_exynos5250, }, }; @@ -87,10 +103,14 @@ static struct cpu_table cpu_ids[] __initdata = { static struct map_desc exynos_iodesc[] __initdata = { { .virtual = (unsigned long)S5P_VA_CHIPID, - .pfn = __phys_to_pfn(EXYNOS4_PA_CHIPID), + .pfn = __phys_to_pfn(EXYNOS_PA_CHIPID), .length = SZ_4K, .type = MT_DEVICE, - }, { + }, +}; + +static struct map_desc exynos4_iodesc[] __initdata = { + { .virtual = (unsigned long)S3C_VA_SYS, .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON), .length = SZ_64K, @@ -140,11 +160,7 @@ static struct map_desc exynos_iodesc[] __initdata = { .pfn = __phys_to_pfn(EXYNOS4_PA_UART), .length = SZ_512K, .type = MT_DEVICE, - }, -}; - -static struct map_desc exynos4_iodesc[] __initdata = { - { + }, { .virtual = (unsigned long)S5P_VA_CMU, .pfn = __phys_to_pfn(EXYNOS4_PA_CMU), .length = SZ_128K, @@ -160,21 +176,6 @@ static struct map_desc exynos4_iodesc[] __initdata = { .length = SZ_4K, .type = MT_DEVICE, }, { - .virtual = (unsigned long)S5P_VA_GPIO1, - .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO1), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = (unsigned long)S5P_VA_GPIO2, - .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO2), - .length = SZ_4K, - .type = MT_DEVICE, - }, { - .virtual = (unsigned long)S5P_VA_GPIO3, - .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO3), - .length = SZ_256, - .type = MT_DEVICE, - }, { .virtual = (unsigned long)S5P_VA_DMC0, .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0), .length = SZ_64K, @@ -210,11 +211,80 @@ static struct map_desc exynos4_iodesc1[] __initdata = { }, }; +static struct map_desc exynos5_iodesc[] __initdata = { + { + .virtual = (unsigned long)S3C_VA_SYS, + .pfn = __phys_to_pfn(EXYNOS5_PA_SYSCON), + .length = SZ_64K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S3C_VA_TIMER, + .pfn = __phys_to_pfn(EXYNOS5_PA_TIMER), + .length = SZ_16K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S3C_VA_WATCHDOG, + .pfn = __phys_to_pfn(EXYNOS5_PA_WATCHDOG), + .length = SZ_4K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S5P_VA_SROMC, + .pfn = __phys_to_pfn(EXYNOS5_PA_SROMC), + .length = SZ_4K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S5P_VA_SYSTIMER, + .pfn = __phys_to_pfn(EXYNOS5_PA_SYSTIMER), + .length = SZ_4K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S5P_VA_SYSRAM, + .pfn = __phys_to_pfn(EXYNOS5_PA_SYSRAM), + .length = SZ_4K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S5P_VA_CMU, + .pfn = __phys_to_pfn(EXYNOS5_PA_CMU), + .length = 144 * SZ_1K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S5P_VA_PMU, + .pfn = __phys_to_pfn(EXYNOS5_PA_PMU), + .length = SZ_64K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S5P_VA_COMBINER_BASE, + .pfn = __phys_to_pfn(EXYNOS5_PA_COMBINER), + .length = SZ_4K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S3C_VA_UART, + .pfn = __phys_to_pfn(EXYNOS5_PA_UART), + .length = SZ_512K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S5P_VA_GIC_CPU, + .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_CPU), + .length = SZ_64K, + .type = MT_DEVICE, + }, { + .virtual = (unsigned long)S5P_VA_GIC_DIST, + .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_DIST), + .length = SZ_64K, + .type = MT_DEVICE, + }, +}; + void exynos4_restart(char mode, const char *cmd) { __raw_writel(0x1, S5P_SWRESET); } +void exynos5_restart(char mode, const char *cmd) +{ + __raw_writel(0x1, EXYNOS_SWRESET); +} + /* * exynos_map_io * @@ -234,7 +304,7 @@ void __init exynos_init_io(struct map_desc *mach_desc, int size) s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids)); } -void __init exynos4_map_io(void) +static void __init exynos4_map_io(void) { iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc)); @@ -265,7 +335,22 @@ void __init exynos4_map_io(void) s5p_hdmi_setname("exynos4-hdmi"); } -void __init exynos4_init_clocks(int xtal) +static void __init exynos5_map_io(void) +{ + iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc)); + + s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0); + s3c_device_i2c0.resource[0].end = EXYNOS5_PA_IIC(0) + SZ_4K - 1; + s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC; + s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC; + + /* The I2C bus controllers are directly compatible with s3c2440 */ + s3c_i2c0_setname("s3c2440-i2c"); + s3c_i2c1_setname("s3c2440-i2c"); + s3c_i2c2_setname("s3c2440-i2c"); +} + +static void __init exynos4_init_clocks(int xtal) { printk(KERN_DEBUG "%s: initializing clocks\n", __func__); @@ -281,6 +366,17 @@ void __init exynos4_init_clocks(int xtal) exynos4_setup_clocks(); } +static void __init exynos5_init_clocks(int xtal) +{ + printk(KERN_DEBUG "%s: initializing clocks\n", __func__); + + s3c24xx_register_baseclocks(xtal); + s5p_register_clocks(xtal); + + exynos5_register_clocks(); + exynos5_setup_clocks(); +} + #define COMBINER_ENABLE_SET 0x0 #define COMBINER_ENABLE_CLEAR 0x4 #define COMBINER_INT_STATUS 0xC @@ -354,7 +450,14 @@ static struct irq_chip combiner_chip = { static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq) { - if (combiner_nr >= MAX_COMBINER_NR) + unsigned int max_nr; + + if (soc_is_exynos5250()) + max_nr = EXYNOS5_MAX_COMBINER_NR; + else + max_nr = EXYNOS4_MAX_COMBINER_NR; + + if (combiner_nr >= max_nr) BUG(); if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0) BUG(); @@ -365,8 +468,14 @@ static void __init combiner_init(unsigned int combiner_nr, void __iomem *base, unsigned int irq_start) { unsigned int i; + unsigned int max_nr; - if (combiner_nr >= MAX_COMBINER_NR) + if (soc_is_exynos5250()) + max_nr = EXYNOS5_MAX_COMBINER_NR; + else + max_nr = EXYNOS4_MAX_COMBINER_NR; + + if (combiner_nr >= max_nr) BUG(); combiner_data[combiner_nr].base = base; @@ -409,8 +518,28 @@ void __init exynos4_init_irq(void) of_irq_init(exynos4_dt_irq_match); #endif - for (irq = 0; irq < MAX_COMBINER_NR; irq++) { + for (irq = 0; irq < EXYNOS4_MAX_COMBINER_NR; irq++) { + + combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq), + COMBINER_IRQ(irq, 0)); + combiner_cascade_irq(irq, IRQ_SPI(irq)); + } + + /* + * The parameters of s5p_init_irq() are for VIC init. + * Theses parameters should be NULL and 0 because EXYNOS4 + * uses GIC instead of VIC. + */ + s5p_init_irq(NULL, 0); +} + +void __init exynos5_init_irq(void) +{ + int irq; + + gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU); + for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) { combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq), COMBINER_IRQ(irq, 0)); combiner_cascade_irq(irq, IRQ_SPI(irq)); @@ -429,19 +558,34 @@ struct bus_type exynos4_subsys = { .dev_name = "exynos4-core", }; +struct bus_type exynos5_subsys = { + .name = "exynos5-core", + .dev_name = "exynos5-core", +}; + static struct device exynos4_dev = { .bus = &exynos4_subsys, }; -static int __init exynos4_core_init(void) +static struct device exynos5_dev = { + .bus = &exynos5_subsys, +}; + +static int __init exynos_core_init(void) { - return subsys_system_register(&exynos4_subsys, NULL); + if (soc_is_exynos5250()) + return subsys_system_register(&exynos5_subsys, NULL); + else + return subsys_system_register(&exynos4_subsys, NULL); } -core_initcall(exynos4_core_init); +core_initcall(exynos_core_init); #ifdef CONFIG_CACHE_L2X0 static int __init exynos4_l2x0_cache_init(void) { + if (soc_is_exynos5250()) + return 0; + int ret; ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK); if (!ret) { @@ -486,19 +630,47 @@ static int __init exynos4_l2x0_cache_init(void) l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK); return 0; } - early_initcall(exynos4_l2x0_cache_init); #endif -int __init exynos_init(void) +static int __init exynos5_l2_cache_init(void) +{ + unsigned int val; + + if (!soc_is_exynos5250()) + return 0; + + asm volatile("mrc p15, 0, %0, c1, c0, 0\n" + "bic %0, %0, #(1 << 2)\n" /* cache disable */ + "mcr p15, 0, %0, c1, c0, 0\n" + "mrc p15, 1, %0, c9, c0, 2\n" + : "=r"(val)); + + val |= (1 << 9) | (1 << 5) | (2 << 6) | (2 << 0); + + asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val)); + asm volatile("mrc p15, 0, %0, c1, c0, 0\n" + "orr %0, %0, #(1 << 2)\n" /* cache enable */ + "mcr p15, 0, %0, c1, c0, 0\n" + : : "r"(val)); + + return 0; +} +early_initcall(exynos5_l2_cache_init); + +static int __init exynos_init(void) { printk(KERN_INFO "EXYNOS: Initializing architecture\n"); - return device_register(&exynos4_dev); + + if (soc_is_exynos5250()) + return device_register(&exynos5_dev); + else + return device_register(&exynos4_dev); } /* uart registration process */ -void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no) +static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no) { struct s3c2410_uartcfg *tcfg = cfg; u32 ucnt; @@ -506,69 +678,138 @@ void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no) for (ucnt = 0; ucnt < no; ucnt++, tcfg++) tcfg->has_fracval = 1; - s3c24xx_init_uartdevs("exynos4210-uart", s5p_uart_resources, cfg, no); + if (soc_is_exynos5250()) + s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no); + else + s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no); } +static void __iomem *exynos_eint_base; + static DEFINE_SPINLOCK(eint_lock); static unsigned int eint0_15_data[16]; -static unsigned int exynos4_get_irq_nr(unsigned int number) +static inline int exynos4_irq_to_gpio(unsigned int irq) { - u32 ret = 0; + if (irq < IRQ_EINT(0)) + return -EINVAL; - switch (number) { - case 0 ... 3: - ret = (number + IRQ_EINT0); - break; - case 4 ... 7: - ret = (number + (IRQ_EINT4 - 4)); - break; - case 8 ... 15: - ret = (number + (IRQ_EINT8 - 8)); - break; - default: - printk(KERN_ERR "number available : %d\n", number); - } + irq -= IRQ_EINT(0); + if (irq < 8) + return EXYNOS4_GPX0(irq); + + irq -= 8; + if (irq < 8) + return EXYNOS4_GPX1(irq); + + irq -= 8; + if (irq < 8) + return EXYNOS4_GPX2(irq); + + irq -= 8; + if (irq < 8) + return EXYNOS4_GPX3(irq); + + return -EINVAL; +} - return ret; +static inline int exynos5_irq_to_gpio(unsigned int irq) +{ + if (irq < IRQ_EINT(0)) + return -EINVAL; + + irq -= IRQ_EINT(0); + if (irq < 8) + return EXYNOS5_GPX0(irq); + + irq -= 8; + if (irq < 8) + return EXYNOS5_GPX1(irq); + + irq -= 8; + if (irq < 8) + return EXYNOS5_GPX2(irq); + + irq -= 8; + if (irq < 8) + return EXYNOS5_GPX3(irq); + + return -EINVAL; } -static inline void exynos4_irq_eint_mask(struct irq_data *data) +static unsigned int exynos4_eint0_15_src_int[16] = { + EXYNOS4_IRQ_EINT0, + EXYNOS4_IRQ_EINT1, + EXYNOS4_IRQ_EINT2, + EXYNOS4_IRQ_EINT3, + EXYNOS4_IRQ_EINT4, + EXYNOS4_IRQ_EINT5, + EXYNOS4_IRQ_EINT6, + EXYNOS4_IRQ_EINT7, + EXYNOS4_IRQ_EINT8, + EXYNOS4_IRQ_EINT9, + EXYNOS4_IRQ_EINT10, + EXYNOS4_IRQ_EINT11, + EXYNOS4_IRQ_EINT12, + EXYNOS4_IRQ_EINT13, + EXYNOS4_IRQ_EINT14, + EXYNOS4_IRQ_EINT15, +}; + +static unsigned int exynos5_eint0_15_src_int[16] = { + EXYNOS5_IRQ_EINT0, + EXYNOS5_IRQ_EINT1, + EXYNOS5_IRQ_EINT2, + EXYNOS5_IRQ_EINT3, + EXYNOS5_IRQ_EINT4, + EXYNOS5_IRQ_EINT5, + EXYNOS5_IRQ_EINT6, + EXYNOS5_IRQ_EINT7, + EXYNOS5_IRQ_EINT8, + EXYNOS5_IRQ_EINT9, + EXYNOS5_IRQ_EINT10, + EXYNOS5_IRQ_EINT11, + EXYNOS5_IRQ_EINT12, + EXYNOS5_IRQ_EINT13, + EXYNOS5_IRQ_EINT14, + EXYNOS5_IRQ_EINT15, +}; +static inline void exynos_irq_eint_mask(struct irq_data *data) { u32 mask; spin_lock(&eint_lock); - mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq))); - mask |= eint_irq_to_bit(data->irq); - __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq))); + mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq)); + mask |= EINT_OFFSET_BIT(data->irq); + __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq)); spin_unlock(&eint_lock); } -static void exynos4_irq_eint_unmask(struct irq_data *data) +static void exynos_irq_eint_unmask(struct irq_data *data) { u32 mask; spin_lock(&eint_lock); - mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq))); - mask &= ~(eint_irq_to_bit(data->irq)); - __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq))); + mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq)); + mask &= ~(EINT_OFFSET_BIT(data->irq)); + __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq)); spin_unlock(&eint_lock); } -static inline void exynos4_irq_eint_ack(struct irq_data *data) +static inline void exynos_irq_eint_ack(struct irq_data *data) { - __raw_writel(eint_irq_to_bit(data->irq), - S5P_EINT_PEND(EINT_REG_NR(data->irq))); + __raw_writel(EINT_OFFSET_BIT(data->irq), + EINT_PEND(exynos_eint_base, data->irq)); } -static void exynos4_irq_eint_maskack(struct irq_data *data) +static void exynos_irq_eint_maskack(struct irq_data *data) { - exynos4_irq_eint_mask(data); - exynos4_irq_eint_ack(data); + exynos_irq_eint_mask(data); + exynos_irq_eint_ack(data); } -static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type) +static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type) { int offs = EINT_OFFSET(data->irq); int shift; @@ -605,39 +846,27 @@ static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type) mask = 0x7 << shift; spin_lock(&eint_lock); - ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq))); + ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq)); ctrl &= ~mask; ctrl |= newvalue << shift; - __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq))); + __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq)); spin_unlock(&eint_lock); - switch (offs) { - case 0 ... 7: - s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE); - break; - case 8 ... 15: - s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE); - break; - case 16 ... 23: - s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE); - break; - case 24 ... 31: - s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE); - break; - default: - printk(KERN_ERR "No such irq number %d", offs); - } + if (soc_is_exynos5250()) + s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf)); + else + s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf)); return 0; } -static struct irq_chip exynos4_irq_eint = { - .name = "exynos4-eint", - .irq_mask = exynos4_irq_eint_mask, - .irq_unmask = exynos4_irq_eint_unmask, - .irq_mask_ack = exynos4_irq_eint_maskack, - .irq_ack = exynos4_irq_eint_ack, - .irq_set_type = exynos4_irq_eint_set_type, +static struct irq_chip exynos_irq_eint = { + .name = "exynos-eint", + .irq_mask = exynos_irq_eint_mask, + .irq_unmask = exynos_irq_eint_unmask, + .irq_mask_ack = exynos_irq_eint_maskack, + .irq_ack = exynos_irq_eint_ack, + .irq_set_type = exynos_irq_eint_set_type, #ifdef CONFIG_PM .irq_set_wake = s3c_irqext_wake, #endif @@ -652,12 +881,12 @@ static struct irq_chip exynos4_irq_eint = { * * Each EINT pend/mask registers handle eight of them. */ -static inline void exynos4_irq_demux_eint(unsigned int start) +static inline void exynos_irq_demux_eint(unsigned int start) { unsigned int irq; - u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start))); - u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start))); + u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start)); + u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start)); status &= ~mask; status &= 0xff; @@ -669,16 +898,16 @@ static inline void exynos4_irq_demux_eint(unsigned int start) } } -static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) +static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_get_chip(irq); chained_irq_enter(chip, desc); - exynos4_irq_demux_eint(IRQ_EINT(16)); - exynos4_irq_demux_eint(IRQ_EINT(24)); + exynos_irq_demux_eint(IRQ_EINT(16)); + exynos_irq_demux_eint(IRQ_EINT(24)); chained_irq_exit(chip, desc); } -static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc) +static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc) { u32 *irq_data = irq_get_handler_data(irq); struct irq_chip *chip = irq_get_chip(irq); @@ -695,27 +924,44 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc) chained_irq_exit(chip, desc); } -static int __init exynos4_init_irq_eint(void) +static int __init exynos_init_irq_eint(void) { int irq; + if (soc_is_exynos5250()) + exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K); + else + exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K); + + if (exynos_eint_base == NULL) { + pr_err("unable to ioremap for EINT base address\n"); + return -ENOMEM; + } + for (irq = 0 ; irq <= 31 ; irq++) { - irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint, + irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint, handle_level_irq); set_irq_flags(IRQ_EINT(irq), IRQF_VALID); } - irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31); + irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31); for (irq = 0 ; irq <= 15 ; irq++) { eint0_15_data[irq] = IRQ_EINT(irq); - irq_set_handler_data(exynos4_get_irq_nr(irq), - &eint0_15_data[irq]); - irq_set_chained_handler(exynos4_get_irq_nr(irq), - exynos4_irq_eint0_15); + if (soc_is_exynos5250()) { + irq_set_handler_data(exynos5_eint0_15_src_int[irq], + &eint0_15_data[irq]); + irq_set_chained_handler(exynos5_eint0_15_src_int[irq], + exynos_irq_eint0_15); + } else { + irq_set_handler_data(exynos4_eint0_15_src_int[irq], + &eint0_15_data[irq]); + irq_set_chained_handler(exynos4_eint0_15_src_int[irq], + exynos_irq_eint0_15); + } } return 0; } -arch_initcall(exynos4_init_irq_eint); +arch_initcall(exynos_init_irq_eint); diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index 8c1efe692c20..677b5467df18 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h @@ -12,39 +12,44 @@ #ifndef __ARCH_ARM_MACH_EXYNOS_COMMON_H #define __ARCH_ARM_MACH_EXYNOS_COMMON_H +extern struct sys_timer exynos4_timer; + void exynos_init_io(struct map_desc *mach_desc, int size); void exynos4_init_irq(void); +void exynos5_init_irq(void); +void exynos4_restart(char mode, const char *cmd); +void exynos5_restart(char mode, const char *cmd); #ifdef CONFIG_ARCH_EXYNOS4 void exynos4_register_clocks(void); void exynos4_setup_clocks(void); -void exynos4210_register_clocks(void); -void exynos4212_register_clocks(void); - #else #define exynos4_register_clocks() #define exynos4_setup_clocks() +#endif -#define exynos4210_register_clocks() -#define exynos4212_register_clocks() +#ifdef CONFIG_ARCH_EXYNOS5 +void exynos5_register_clocks(void); +void exynos5_setup_clocks(void); + +#else +#define exynos5_register_clocks() +#define exynos5_setup_clocks() #endif -void exynos4_restart(char mode, const char *cmd); +#ifdef CONFIG_CPU_EXYNOS4210 +void exynos4210_register_clocks(void); -extern struct sys_timer exynos4_timer; +#else +#define exynos4210_register_clocks() +#endif -#ifdef CONFIG_ARCH_EXYNOS -extern int exynos_init(void); -extern void exynos4_map_io(void); -extern void exynos4_init_clocks(int xtal); -extern void exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no); +#ifdef CONFIG_SOC_EXYNOS4212 +void exynos4212_register_clocks(void); #else -#define exynos4_init_clocks NULL -#define exynos4_init_uarts NULL -#define exynos4_map_io NULL -#define exynos_init NULL +#define exynos4212_register_clocks() #endif #endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */ diff --git a/arch/arm/mach-exynos/dev-ahci.c b/arch/arm/mach-exynos/dev-ahci.c index f57a3de8e1d2..50ce5b0adcf1 100644 --- a/arch/arm/mach-exynos/dev-ahci.c +++ b/arch/arm/mach-exynos/dev-ahci.c @@ -242,8 +242,8 @@ static struct resource exynos4_ahci_resource[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = IRQ_SATA, - .end = IRQ_SATA, + .start = EXYNOS4_IRQ_SATA, + .end = EXYNOS4_IRQ_SATA, .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/arm/mach-exynos/dev-audio.c b/arch/arm/mach-exynos/dev-audio.c index 5a9f9c2e53bf..7199e1ae79b4 100644 --- a/arch/arm/mach-exynos/dev-audio.c +++ b/arch/arm/mach-exynos/dev-audio.c @@ -304,8 +304,8 @@ static struct resource exynos4_ac97_resource[] = { .flags = IORESOURCE_DMA, }, [4] = { - .start = IRQ_AC97, - .end = IRQ_AC97, + .start = EXYNOS4_IRQ_AC97, + .end = EXYNOS4_IRQ_AC97, .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/arm/mach-exynos/dev-uart.c b/arch/arm/mach-exynos/dev-uart.c new file mode 100644 index 000000000000..2e85c022fd16 --- /dev/null +++ b/arch/arm/mach-exynos/dev-uart.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Base EXYNOS UART resource and device definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> + +#include <asm/mach/arch.h> +#include <asm/mach/irq.h> +#include <mach/hardware.h> +#include <mach/map.h> + +#include <plat/devs.h> + +#define EXYNOS_UART_RESOURCE(_series, _nr) \ +static struct resource exynos##_series##_uart##_nr##_resource[] = { \ + [0] = DEFINE_RES_MEM(EXYNOS##_series##_PA_UART##_nr, EXYNOS##_series##_SZ_UART), \ + [1] = DEFINE_RES_IRQ(EXYNOS##_series##_IRQ_UART##_nr), \ +}; + +EXYNOS_UART_RESOURCE(4, 0) +EXYNOS_UART_RESOURCE(4, 1) +EXYNOS_UART_RESOURCE(4, 2) +EXYNOS_UART_RESOURCE(4, 3) + +struct s3c24xx_uart_resources exynos4_uart_resources[] __initdata = { + [0] = { + .resources = exynos4_uart0_resource, + .nr_resources = ARRAY_SIZE(exynos4_uart0_resource), + }, + [1] = { + .resources = exynos4_uart1_resource, + .nr_resources = ARRAY_SIZE(exynos4_uart1_resource), + }, + [2] = { + .resources = exynos4_uart2_resource, + .nr_resources = ARRAY_SIZE(exynos4_uart2_resource), + }, + [3] = { + .resources = exynos4_uart3_resource, + .nr_resources = ARRAY_SIZE(exynos4_uart3_resource), + }, +}; + +EXYNOS_UART_RESOURCE(5, 0) +EXYNOS_UART_RESOURCE(5, 1) +EXYNOS_UART_RESOURCE(5, 2) +EXYNOS_UART_RESOURCE(5, 3) + +struct s3c24xx_uart_resources exynos5_uart_resources[] __initdata = { + [0] = { + .resources = exynos5_uart0_resource, + .nr_resources = ARRAY_SIZE(exynos5_uart0_resource), + }, + [1] = { + .resources = exynos5_uart1_resource, + .nr_resources = ARRAY_SIZE(exynos5_uart0_resource), + }, + [2] = { + .resources = exynos5_uart2_resource, + .nr_resources = ARRAY_SIZE(exynos5_uart2_resource), + }, + [3] = { + .resources = exynos5_uart3_resource, + .nr_resources = ARRAY_SIZE(exynos5_uart3_resource), + }, +}; diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c index 13607c4328b3..3983abee4264 100644 --- a/arch/arm/mach-exynos/dma.c +++ b/arch/arm/mach-exynos/dma.c @@ -108,7 +108,7 @@ static u8 exynos4212_pdma0_peri[] = { struct dma_pl330_platdata exynos4_pdma0_pdata; static AMBA_AHB_DEVICE(exynos4_pdma0, "dma-pl330.0", 0x00041330, - EXYNOS4_PA_PDMA0, {IRQ_PDMA0}, &exynos4_pdma0_pdata); + EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos4_pdma0_pdata); static u8 exynos4210_pdma1_peri[] = { DMACH_PCM0_RX, @@ -174,7 +174,7 @@ static u8 exynos4212_pdma1_peri[] = { static struct dma_pl330_platdata exynos4_pdma1_pdata; static AMBA_AHB_DEVICE(exynos4_pdma1, "dma-pl330.1", 0x00041330, - EXYNOS4_PA_PDMA1, {IRQ_PDMA1}, &exynos4_pdma1_pdata); + EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos4_pdma1_pdata); static u8 mdma_peri[] = { DMACH_MTOM_0, @@ -193,7 +193,7 @@ static struct dma_pl330_platdata exynos4_mdma1_pdata = { }; static AMBA_AHB_DEVICE(exynos4_mdma1, "dma-pl330.2", 0x00041330, - EXYNOS4_PA_MDMA1, {IRQ_MDMA1}, &exynos4_mdma1_pdata); + EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos4_mdma1_pdata); static int __init exynos4_dma_init(void) { diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c index dd1ad55524c9..9c17a0a43858 100644 --- a/arch/arm/mach-exynos/hotplug.c +++ b/arch/arm/mach-exynos/hotplug.c @@ -16,6 +16,7 @@ #include <linux/io.h> #include <asm/cacheflush.h> +#include <asm/cp15.h> #include <asm/smp_plat.h> #include <mach/regs-pmu.h> diff --git a/arch/arm/mach-exynos/include/mach/debug-macro.S b/arch/arm/mach-exynos/include/mach/debug-macro.S index 6cacf16a67a6..6c857ff0b5d8 100644 --- a/arch/arm/mach-exynos/include/mach/debug-macro.S +++ b/arch/arm/mach-exynos/include/mach/debug-macro.S @@ -21,8 +21,13 @@ */ .macro addruart, rp, rv, tmp - ldr \rp, = S3C_PA_UART - ldr \rv, = S3C_VA_UART + mov \rp, #0x10000000 + ldr \rp, [\rp, #0x0] + and \rp, \rp, #0xf00000 + teq \rp, #0x500000 @@ EXYNOS5 + ldreq \rp, =EXYNOS5_PA_UART + movne \rp, #EXYNOS4_PA_UART @@ EXYNOS4 + ldr \rv, =S3C_VA_UART #if CONFIG_DEBUG_S3C_UART != 0 add \rp, \rp, #(0x10000 * CONFIG_DEBUG_S3C_UART) add \rv, \rv, #(0x10000 * CONFIG_DEBUG_S3C_UART) diff --git a/arch/arm/mach-exynos/include/mach/gpio.h b/arch/arm/mach-exynos/include/mach/gpio.h index 80523ca9bb49..d7498afe036a 100644 --- a/arch/arm/mach-exynos/include/mach/gpio.h +++ b/arch/arm/mach-exynos/include/mach/gpio.h @@ -1,9 +1,8 @@ -/* linux/arch/arm/mach-exynos4/include/mach/gpio.h - * - * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. +/* + * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * EXYNOS4 - GPIO lib support + * EXYNOS - GPIO lib support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -13,9 +12,13 @@ #ifndef __ASM_ARCH_GPIO_H #define __ASM_ARCH_GPIO_H __FILE__ -/* Practically, GPIO banks up to GPZ are the configurable gpio banks */ +/* Macro for EXYNOS GPIO numbering */ + +#define EXYNOS_GPIO_NEXT(__gpio) \ + ((__gpio##_START) + (__gpio##_NR) + CONFIG_S3C_GPIO_SPACE + 1) + +/* EXYNOS4 GPIO bank sizes */ -/* GPIO bank sizes */ #define EXYNOS4_GPIO_A0_NR (8) #define EXYNOS4_GPIO_A1_NR (6) #define EXYNOS4_GPIO_B_NR (8) @@ -54,52 +57,50 @@ #define EXYNOS4_GPIO_Y6_NR (8) #define EXYNOS4_GPIO_Z_NR (7) -/* GPIO bank numbers */ - -#define EXYNOS4_GPIO_NEXT(__gpio) \ - ((__gpio##_START) + (__gpio##_NR) + CONFIG_S3C_GPIO_SPACE + 1) +/* EXYNOS4 GPIO bank numbers */ -enum s5p_gpio_number { +enum exynos4_gpio_number { EXYNOS4_GPIO_A0_START = 0, - EXYNOS4_GPIO_A1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_A0), - EXYNOS4_GPIO_B_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_A1), - EXYNOS4_GPIO_C0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_B), - EXYNOS4_GPIO_C1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_C0), - EXYNOS4_GPIO_D0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_C1), - EXYNOS4_GPIO_D1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_D0), - EXYNOS4_GPIO_E0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_D1), - EXYNOS4_GPIO_E1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E0), - EXYNOS4_GPIO_E2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E1), - EXYNOS4_GPIO_E3_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E2), - EXYNOS4_GPIO_E4_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E3), - EXYNOS4_GPIO_F0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_E4), - EXYNOS4_GPIO_F1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_F0), - EXYNOS4_GPIO_F2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_F1), - EXYNOS4_GPIO_F3_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_F2), - EXYNOS4_GPIO_J0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_F3), - EXYNOS4_GPIO_J1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_J0), - EXYNOS4_GPIO_K0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_J1), - EXYNOS4_GPIO_K1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_K0), - EXYNOS4_GPIO_K2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_K1), - EXYNOS4_GPIO_K3_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_K2), - EXYNOS4_GPIO_L0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_K3), - EXYNOS4_GPIO_L1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_L0), - EXYNOS4_GPIO_L2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_L1), - EXYNOS4_GPIO_X0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_L2), - EXYNOS4_GPIO_X1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_X0), - EXYNOS4_GPIO_X2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_X1), - EXYNOS4_GPIO_X3_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_X2), - EXYNOS4_GPIO_Y0_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_X3), - EXYNOS4_GPIO_Y1_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_Y0), - EXYNOS4_GPIO_Y2_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_Y1), - EXYNOS4_GPIO_Y3_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_Y2), - EXYNOS4_GPIO_Y4_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_Y3), - EXYNOS4_GPIO_Y5_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_Y4), - EXYNOS4_GPIO_Y6_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_Y5), - EXYNOS4_GPIO_Z_START = EXYNOS4_GPIO_NEXT(EXYNOS4_GPIO_Y6), + EXYNOS4_GPIO_A1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_A0), + EXYNOS4_GPIO_B_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_A1), + EXYNOS4_GPIO_C0_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_B), + EXYNOS4_GPIO_C1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_C0), + EXYNOS4_GPIO_D0_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_C1), + EXYNOS4_GPIO_D1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_D0), + EXYNOS4_GPIO_E0_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_D1), + EXYNOS4_GPIO_E1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_E0), + EXYNOS4_GPIO_E2_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_E1), + EXYNOS4_GPIO_E3_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_E2), + EXYNOS4_GPIO_E4_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_E3), + EXYNOS4_GPIO_F0_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_E4), + EXYNOS4_GPIO_F1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_F0), + EXYNOS4_GPIO_F2_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_F1), + EXYNOS4_GPIO_F3_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_F2), + EXYNOS4_GPIO_J0_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_F3), + EXYNOS4_GPIO_J1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_J0), + EXYNOS4_GPIO_K0_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_J1), + EXYNOS4_GPIO_K1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_K0), + EXYNOS4_GPIO_K2_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_K1), + EXYNOS4_GPIO_K3_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_K2), + EXYNOS4_GPIO_L0_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_K3), + EXYNOS4_GPIO_L1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_L0), + EXYNOS4_GPIO_L2_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_L1), + EXYNOS4_GPIO_X0_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_L2), + EXYNOS4_GPIO_X1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_X0), + EXYNOS4_GPIO_X2_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_X1), + EXYNOS4_GPIO_X3_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_X2), + EXYNOS4_GPIO_Y0_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_X3), + EXYNOS4_GPIO_Y1_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_Y0), + EXYNOS4_GPIO_Y2_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_Y1), + EXYNOS4_GPIO_Y3_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_Y2), + EXYNOS4_GPIO_Y4_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_Y3), + EXYNOS4_GPIO_Y5_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_Y4), + EXYNOS4_GPIO_Y6_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_Y5), + EXYNOS4_GPIO_Z_START = EXYNOS_GPIO_NEXT(EXYNOS4_GPIO_Y6), }; /* EXYNOS4 GPIO number definitions */ + #define EXYNOS4_GPA0(_nr) (EXYNOS4_GPIO_A0_START + (_nr)) #define EXYNOS4_GPA1(_nr) (EXYNOS4_GPIO_A1_START + (_nr)) #define EXYNOS4_GPB(_nr) (EXYNOS4_GPIO_B_START + (_nr)) @@ -139,11 +140,147 @@ enum s5p_gpio_number { #define EXYNOS4_GPZ(_nr) (EXYNOS4_GPIO_Z_START + (_nr)) /* the end of the EXYNOS4 specific gpios */ + #define EXYNOS4_GPIO_END (EXYNOS4_GPZ(EXYNOS4_GPIO_Z_NR) + 1) -#define S3C_GPIO_END EXYNOS4_GPIO_END -/* define the number of gpios we need to the one after the GPZ() range */ -#define ARCH_NR_GPIOS (EXYNOS4_GPZ(EXYNOS4_GPIO_Z_NR) + \ - CONFIG_SAMSUNG_GPIO_EXTRA + 1) +/* EXYNOS5 GPIO bank sizes */ + +#define EXYNOS5_GPIO_A0_NR (8) +#define EXYNOS5_GPIO_A1_NR (6) +#define EXYNOS5_GPIO_A2_NR (8) +#define EXYNOS5_GPIO_B0_NR (5) +#define EXYNOS5_GPIO_B1_NR (5) +#define EXYNOS5_GPIO_B2_NR (4) +#define EXYNOS5_GPIO_B3_NR (4) +#define EXYNOS5_GPIO_C0_NR (7) +#define EXYNOS5_GPIO_C1_NR (7) +#define EXYNOS5_GPIO_C2_NR (7) +#define EXYNOS5_GPIO_C3_NR (7) +#define EXYNOS5_GPIO_D0_NR (8) +#define EXYNOS5_GPIO_D1_NR (8) +#define EXYNOS5_GPIO_Y0_NR (6) +#define EXYNOS5_GPIO_Y1_NR (4) +#define EXYNOS5_GPIO_Y2_NR (6) +#define EXYNOS5_GPIO_Y3_NR (8) +#define EXYNOS5_GPIO_Y4_NR (8) +#define EXYNOS5_GPIO_Y5_NR (8) +#define EXYNOS5_GPIO_Y6_NR (8) +#define EXYNOS5_GPIO_X0_NR (8) +#define EXYNOS5_GPIO_X1_NR (8) +#define EXYNOS5_GPIO_X2_NR (8) +#define EXYNOS5_GPIO_X3_NR (8) +#define EXYNOS5_GPIO_E0_NR (8) +#define EXYNOS5_GPIO_E1_NR (2) +#define EXYNOS5_GPIO_F0_NR (4) +#define EXYNOS5_GPIO_F1_NR (4) +#define EXYNOS5_GPIO_G0_NR (8) +#define EXYNOS5_GPIO_G1_NR (8) +#define EXYNOS5_GPIO_G2_NR (2) +#define EXYNOS5_GPIO_H0_NR (4) +#define EXYNOS5_GPIO_H1_NR (8) +#define EXYNOS5_GPIO_V0_NR (8) +#define EXYNOS5_GPIO_V1_NR (8) +#define EXYNOS5_GPIO_V2_NR (8) +#define EXYNOS5_GPIO_V3_NR (8) +#define EXYNOS5_GPIO_V4_NR (2) +#define EXYNOS5_GPIO_Z_NR (7) + +/* EXYNOS5 GPIO bank numbers */ + +enum exynos5_gpio_number { + EXYNOS5_GPIO_A0_START = 0, + EXYNOS5_GPIO_A1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_A0), + EXYNOS5_GPIO_A2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_A1), + EXYNOS5_GPIO_B0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_A2), + EXYNOS5_GPIO_B1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_B0), + EXYNOS5_GPIO_B2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_B1), + EXYNOS5_GPIO_B3_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_B2), + EXYNOS5_GPIO_C0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_B3), + EXYNOS5_GPIO_C1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C0), + EXYNOS5_GPIO_C2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C1), + EXYNOS5_GPIO_C3_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C2), + EXYNOS5_GPIO_D0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3), + EXYNOS5_GPIO_D1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D0), + EXYNOS5_GPIO_Y0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D1), + EXYNOS5_GPIO_Y1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y0), + EXYNOS5_GPIO_Y2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y1), + EXYNOS5_GPIO_Y3_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y2), + EXYNOS5_GPIO_Y4_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y3), + EXYNOS5_GPIO_Y5_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y4), + EXYNOS5_GPIO_Y6_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y5), + EXYNOS5_GPIO_X0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y6), + EXYNOS5_GPIO_X1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_X0), + EXYNOS5_GPIO_X2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_X1), + EXYNOS5_GPIO_X3_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_X2), + EXYNOS5_GPIO_E0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_X3), + EXYNOS5_GPIO_E1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_E0), + EXYNOS5_GPIO_F0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_E1), + EXYNOS5_GPIO_F1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_F0), + EXYNOS5_GPIO_G0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_F1), + EXYNOS5_GPIO_G1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_G0), + EXYNOS5_GPIO_G2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_G1), + EXYNOS5_GPIO_H0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_G2), + EXYNOS5_GPIO_H1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_H0), + EXYNOS5_GPIO_V0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_H1), + EXYNOS5_GPIO_V1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_V0), + EXYNOS5_GPIO_V2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_V1), + EXYNOS5_GPIO_V3_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_V2), + EXYNOS5_GPIO_V4_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_V3), + EXYNOS5_GPIO_Z_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_V4), +}; + +/* EXYNOS5 GPIO number definitions */ + +#define EXYNOS5_GPA0(_nr) (EXYNOS5_GPIO_A0_START + (_nr)) +#define EXYNOS5_GPA1(_nr) (EXYNOS5_GPIO_A1_START + (_nr)) +#define EXYNOS5_GPA2(_nr) (EXYNOS5_GPIO_A2_START + (_nr)) +#define EXYNOS5_GPB0(_nr) (EXYNOS5_GPIO_B0_START + (_nr)) +#define EXYNOS5_GPB1(_nr) (EXYNOS5_GPIO_B1_START + (_nr)) +#define EXYNOS5_GPB2(_nr) (EXYNOS5_GPIO_B2_START + (_nr)) +#define EXYNOS5_GPB3(_nr) (EXYNOS5_GPIO_B3_START + (_nr)) +#define EXYNOS5_GPC0(_nr) (EXYNOS5_GPIO_C0_START + (_nr)) +#define EXYNOS5_GPC1(_nr) (EXYNOS5_GPIO_C1_START + (_nr)) +#define EXYNOS5_GPC2(_nr) (EXYNOS5_GPIO_C2_START + (_nr)) +#define EXYNOS5_GPC3(_nr) (EXYNOS5_GPIO_C3_START + (_nr)) +#define EXYNOS5_GPD0(_nr) (EXYNOS5_GPIO_D0_START + (_nr)) +#define EXYNOS5_GPD1(_nr) (EXYNOS5_GPIO_D1_START + (_nr)) +#define EXYNOS5_GPY0(_nr) (EXYNOS5_GPIO_Y0_START + (_nr)) +#define EXYNOS5_GPY1(_nr) (EXYNOS5_GPIO_Y1_START + (_nr)) +#define EXYNOS5_GPY2(_nr) (EXYNOS5_GPIO_Y2_START + (_nr)) +#define EXYNOS5_GPY3(_nr) (EXYNOS5_GPIO_Y3_START + (_nr)) +#define EXYNOS5_GPY4(_nr) (EXYNOS5_GPIO_Y4_START + (_nr)) +#define EXYNOS5_GPY5(_nr) (EXYNOS5_GPIO_Y5_START + (_nr)) +#define EXYNOS5_GPY6(_nr) (EXYNOS5_GPIO_Y6_START + (_nr)) +#define EXYNOS5_GPX0(_nr) (EXYNOS5_GPIO_X0_START + (_nr)) +#define EXYNOS5_GPX1(_nr) (EXYNOS5_GPIO_X1_START + (_nr)) +#define EXYNOS5_GPX2(_nr) (EXYNOS5_GPIO_X2_START + (_nr)) +#define EXYNOS5_GPX3(_nr) (EXYNOS5_GPIO_X3_START + (_nr)) +#define EXYNOS5_GPE0(_nr) (EXYNOS5_GPIO_E0_START + (_nr)) +#define EXYNOS5_GPE1(_nr) (EXYNOS5_GPIO_E1_START + (_nr)) +#define EXYNOS5_GPF0(_nr) (EXYNOS5_GPIO_F0_START + (_nr)) +#define EXYNOS5_GPF1(_nr) (EXYNOS5_GPIO_F1_START + (_nr)) +#define EXYNOS5_GPG0(_nr) (EXYNOS5_GPIO_G0_START + (_nr)) +#define EXYNOS5_GPG1(_nr) (EXYNOS5_GPIO_G1_START + (_nr)) +#define EXYNOS5_GPG2(_nr) (EXYNOS5_GPIO_G2_START + (_nr)) +#define EXYNOS5_GPH0(_nr) (EXYNOS5_GPIO_H0_START + (_nr)) +#define EXYNOS5_GPH1(_nr) (EXYNOS5_GPIO_H1_START + (_nr)) +#define EXYNOS5_GPV0(_nr) (EXYNOS5_GPIO_V0_START + (_nr)) +#define EXYNOS5_GPV1(_nr) (EXYNOS5_GPIO_V1_START + (_nr)) +#define EXYNOS5_GPV2(_nr) (EXYNOS5_GPIO_V2_START + (_nr)) +#define EXYNOS5_GPV3(_nr) (EXYNOS5_GPIO_V3_START + (_nr)) +#define EXYNOS5_GPV4(_nr) (EXYNOS5_GPIO_V4_START + (_nr)) +#define EXYNOS5_GPZ(_nr) (EXYNOS5_GPIO_Z_START + (_nr)) + +/* the end of the EXYNOS5 specific gpios */ + +#define EXYNOS5_GPIO_END (EXYNOS5_GPZ(EXYNOS5_GPIO_Z_NR) + 1) + +/* actually, EXYNOS5_GPIO_END is bigger than EXYNOS4 */ + +#define S3C_GPIO_END (EXYNOS5_GPIO_END) + +/* define the number of gpios */ + +#define ARCH_NR_GPIOS (CONFIG_SAMSUNG_GPIO_EXTRA + S3C_GPIO_END) #endif /* __ASM_ARCH_GPIO_H */ diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h index 1d401c957835..9bee8535d9e0 100644 --- a/arch/arm/mach-exynos/include/mach/irqs.h +++ b/arch/arm/mach-exynos/include/mach/irqs.h @@ -1,9 +1,8 @@ -/* linux/arch/arm/mach-exynos4/include/mach/irqs.h - * - * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. +/* + * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * EXYNOS4 - IRQ definitions + * EXYNOS - IRQ definitions * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -17,160 +16,450 @@ /* PPI: Private Peripheral Interrupt */ -#define IRQ_PPI(x) (x+16) - -#define IRQ_MCT_LOCALTIMER IRQ_PPI(12) +#define IRQ_PPI(x) (x + 16) /* SPI: Shared Peripheral Interrupt */ -#define IRQ_SPI(x) (x+32) - -#define IRQ_EINT0 IRQ_SPI(16) -#define IRQ_EINT1 IRQ_SPI(17) -#define IRQ_EINT2 IRQ_SPI(18) -#define IRQ_EINT3 IRQ_SPI(19) -#define IRQ_EINT4 IRQ_SPI(20) -#define IRQ_EINT5 IRQ_SPI(21) -#define IRQ_EINT6 IRQ_SPI(22) -#define IRQ_EINT7 IRQ_SPI(23) -#define IRQ_EINT8 IRQ_SPI(24) -#define IRQ_EINT9 IRQ_SPI(25) -#define IRQ_EINT10 IRQ_SPI(26) -#define IRQ_EINT11 IRQ_SPI(27) -#define IRQ_EINT12 IRQ_SPI(28) -#define IRQ_EINT13 IRQ_SPI(29) -#define IRQ_EINT14 IRQ_SPI(30) -#define IRQ_EINT15 IRQ_SPI(31) -#define IRQ_EINT16_31 IRQ_SPI(32) - -#define IRQ_MDMA0 IRQ_SPI(33) -#define IRQ_MDMA1 IRQ_SPI(34) -#define IRQ_PDMA0 IRQ_SPI(35) -#define IRQ_PDMA1 IRQ_SPI(36) -#define IRQ_TIMER0_VIC IRQ_SPI(37) -#define IRQ_TIMER1_VIC IRQ_SPI(38) -#define IRQ_TIMER2_VIC IRQ_SPI(39) -#define IRQ_TIMER3_VIC IRQ_SPI(40) -#define IRQ_TIMER4_VIC IRQ_SPI(41) -#define IRQ_MCT_L0 IRQ_SPI(42) -#define IRQ_WDT IRQ_SPI(43) -#define IRQ_RTC_ALARM IRQ_SPI(44) -#define IRQ_RTC_TIC IRQ_SPI(45) -#define IRQ_GPIO_XB IRQ_SPI(46) -#define IRQ_GPIO_XA IRQ_SPI(47) -#define IRQ_MCT_L1 IRQ_SPI(48) - -#define IRQ_UART0 IRQ_SPI(52) -#define IRQ_UART1 IRQ_SPI(53) -#define IRQ_UART2 IRQ_SPI(54) -#define IRQ_UART3 IRQ_SPI(55) -#define IRQ_UART4 IRQ_SPI(56) -#define IRQ_MCT_G0 IRQ_SPI(57) -#define IRQ_IIC IRQ_SPI(58) -#define IRQ_IIC1 IRQ_SPI(59) -#define IRQ_IIC2 IRQ_SPI(60) -#define IRQ_IIC3 IRQ_SPI(61) -#define IRQ_IIC4 IRQ_SPI(62) -#define IRQ_IIC5 IRQ_SPI(63) -#define IRQ_IIC6 IRQ_SPI(64) -#define IRQ_IIC7 IRQ_SPI(65) -#define IRQ_SPI0 IRQ_SPI(66) -#define IRQ_SPI1 IRQ_SPI(67) -#define IRQ_SPI2 IRQ_SPI(68) - -#define IRQ_USB_HOST IRQ_SPI(70) -#define IRQ_USB_HSOTG IRQ_SPI(71) -#define IRQ_MODEM_IF IRQ_SPI(72) -#define IRQ_HSMMC0 IRQ_SPI(73) -#define IRQ_HSMMC1 IRQ_SPI(74) -#define IRQ_HSMMC2 IRQ_SPI(75) -#define IRQ_HSMMC3 IRQ_SPI(76) -#define IRQ_DWMCI IRQ_SPI(77) - -#define IRQ_MIPI_CSIS0 IRQ_SPI(78) -#define IRQ_MIPI_CSIS1 IRQ_SPI(80) - -#define IRQ_ONENAND_AUDI IRQ_SPI(82) -#define IRQ_ROTATOR IRQ_SPI(83) -#define IRQ_FIMC0 IRQ_SPI(84) -#define IRQ_FIMC1 IRQ_SPI(85) -#define IRQ_FIMC2 IRQ_SPI(86) -#define IRQ_FIMC3 IRQ_SPI(87) -#define IRQ_JPEG IRQ_SPI(88) -#define IRQ_2D IRQ_SPI(89) -#define IRQ_PCIE IRQ_SPI(90) - -#define IRQ_MIXER IRQ_SPI(91) -#define IRQ_HDMI IRQ_SPI(92) -#define IRQ_IIC_HDMIPHY IRQ_SPI(93) -#define IRQ_MFC IRQ_SPI(94) -#define IRQ_SDO IRQ_SPI(95) - -#define IRQ_AUDIO_SS IRQ_SPI(96) -#define IRQ_I2S0 IRQ_SPI(97) -#define IRQ_I2S1 IRQ_SPI(98) -#define IRQ_I2S2 IRQ_SPI(99) -#define IRQ_AC97 IRQ_SPI(100) - -#define IRQ_SPDIF IRQ_SPI(104) -#define IRQ_ADC0 IRQ_SPI(105) -#define IRQ_PEN0 IRQ_SPI(106) -#define IRQ_ADC1 IRQ_SPI(107) -#define IRQ_PEN1 IRQ_SPI(108) -#define IRQ_KEYPAD IRQ_SPI(109) -#define IRQ_PMU IRQ_SPI(110) -#define IRQ_GPS IRQ_SPI(111) -#define IRQ_INTFEEDCTRL_SSS IRQ_SPI(112) -#define IRQ_SLIMBUS IRQ_SPI(113) - -#define IRQ_TSI IRQ_SPI(115) -#define IRQ_SATA IRQ_SPI(116) - -#define MAX_IRQ_IN_COMBINER 8 -#define COMBINER_GROUP(x) ((x) * MAX_IRQ_IN_COMBINER + IRQ_SPI(128)) -#define COMBINER_IRQ(x, y) (COMBINER_GROUP(x) + y) - -#define IRQ_SYSMMU_MDMA0_0 COMBINER_IRQ(4, 0) -#define IRQ_SYSMMU_SSS_0 COMBINER_IRQ(4, 1) -#define IRQ_SYSMMU_FIMC0_0 COMBINER_IRQ(4, 2) -#define IRQ_SYSMMU_FIMC1_0 COMBINER_IRQ(4, 3) -#define IRQ_SYSMMU_FIMC2_0 COMBINER_IRQ(4, 4) -#define IRQ_SYSMMU_FIMC3_0 COMBINER_IRQ(4, 5) -#define IRQ_SYSMMU_JPEG_0 COMBINER_IRQ(4, 6) -#define IRQ_SYSMMU_2D_0 COMBINER_IRQ(4, 7) - -#define IRQ_SYSMMU_ROTATOR_0 COMBINER_IRQ(5, 0) -#define IRQ_SYSMMU_MDMA1_0 COMBINER_IRQ(5, 1) -#define IRQ_SYSMMU_LCD0_M0_0 COMBINER_IRQ(5, 2) -#define IRQ_SYSMMU_LCD1_M1_0 COMBINER_IRQ(5, 3) -#define IRQ_SYSMMU_TV_M0_0 COMBINER_IRQ(5, 4) -#define IRQ_SYSMMU_MFC_M0_0 COMBINER_IRQ(5, 5) -#define IRQ_SYSMMU_MFC_M1_0 COMBINER_IRQ(5, 6) -#define IRQ_SYSMMU_PCIE_0 COMBINER_IRQ(5, 7) - -#define IRQ_FIMD0_FIFO COMBINER_IRQ(11, 0) -#define IRQ_FIMD0_VSYNC COMBINER_IRQ(11, 1) -#define IRQ_FIMD0_SYSTEM COMBINER_IRQ(11, 2) - -#define MAX_COMBINER_NR 16 - -#define IRQ_ADC IRQ_ADC0 -#define IRQ_TC IRQ_PEN0 - -#define S5P_IRQ_EINT_BASE COMBINER_IRQ(MAX_COMBINER_NR, 0) - -#define S5P_EINT_BASE1 (S5P_IRQ_EINT_BASE + 0) -#define S5P_EINT_BASE2 (S5P_IRQ_EINT_BASE + 16) - -/* optional GPIO interrupts */ -#define S5P_GPIOINT_BASE (S5P_IRQ_EINT_BASE + 32) -#define IRQ_GPIO1_NR_GROUPS 16 -#define IRQ_GPIO2_NR_GROUPS 9 -#define IRQ_GPIO_END (S5P_GPIOINT_BASE + S5P_GPIOINT_COUNT) - -#define IRQ_TIMER_BASE (IRQ_GPIO_END + 64) +#define IRQ_SPI(x) (x + 32) + +/* COMBINER */ + +#define MAX_IRQ_IN_COMBINER 8 +#define COMBINER_GROUP(x) ((x) * MAX_IRQ_IN_COMBINER + IRQ_SPI(128)) +#define COMBINER_IRQ(x, y) (COMBINER_GROUP(x) + y) + +/* For EXYNOS4 and EXYNOS5 */ + +#define EXYNOS_IRQ_MCT_LOCALTIMER IRQ_PPI(12) + +#define EXYNOS_IRQ_EINT16_31 IRQ_SPI(32) + +/* For EXYNOS4 SoCs */ + +#define EXYNOS4_IRQ_EINT0 IRQ_SPI(16) +#define EXYNOS4_IRQ_EINT1 IRQ_SPI(17) +#define EXYNOS4_IRQ_EINT2 IRQ_SPI(18) +#define EXYNOS4_IRQ_EINT3 IRQ_SPI(19) +#define EXYNOS4_IRQ_EINT4 IRQ_SPI(20) +#define EXYNOS4_IRQ_EINT5 IRQ_SPI(21) +#define EXYNOS4_IRQ_EINT6 IRQ_SPI(22) +#define EXYNOS4_IRQ_EINT7 IRQ_SPI(23) +#define EXYNOS4_IRQ_EINT8 IRQ_SPI(24) +#define EXYNOS4_IRQ_EINT9 IRQ_SPI(25) +#define EXYNOS4_IRQ_EINT10 IRQ_SPI(26) +#define EXYNOS4_IRQ_EINT11 IRQ_SPI(27) +#define EXYNOS4_IRQ_EINT12 IRQ_SPI(28) +#define EXYNOS4_IRQ_EINT13 IRQ_SPI(29) +#define EXYNOS4_IRQ_EINT14 IRQ_SPI(30) +#define EXYNOS4_IRQ_EINT15 IRQ_SPI(31) + +#define EXYNOS4_IRQ_MDMA0 IRQ_SPI(33) +#define EXYNOS4_IRQ_MDMA1 IRQ_SPI(34) +#define EXYNOS4_IRQ_PDMA0 IRQ_SPI(35) +#define EXYNOS4_IRQ_PDMA1 IRQ_SPI(36) +#define EXYNOS4_IRQ_TIMER0_VIC IRQ_SPI(37) +#define EXYNOS4_IRQ_TIMER1_VIC IRQ_SPI(38) +#define EXYNOS4_IRQ_TIMER2_VIC IRQ_SPI(39) +#define EXYNOS4_IRQ_TIMER3_VIC IRQ_SPI(40) +#define EXYNOS4_IRQ_TIMER4_VIC IRQ_SPI(41) +#define EXYNOS4_IRQ_MCT_L0 IRQ_SPI(42) +#define EXYNOS4_IRQ_WDT IRQ_SPI(43) +#define EXYNOS4_IRQ_RTC_ALARM IRQ_SPI(44) +#define EXYNOS4_IRQ_RTC_TIC IRQ_SPI(45) +#define EXYNOS4_IRQ_GPIO_XB IRQ_SPI(46) +#define EXYNOS4_IRQ_GPIO_XA IRQ_SPI(47) +#define EXYNOS4_IRQ_MCT_L1 IRQ_SPI(48) + +#define EXYNOS4_IRQ_UART0 IRQ_SPI(52) +#define EXYNOS4_IRQ_UART1 IRQ_SPI(53) +#define EXYNOS4_IRQ_UART2 IRQ_SPI(54) +#define EXYNOS4_IRQ_UART3 IRQ_SPI(55) +#define EXYNOS4_IRQ_UART4 IRQ_SPI(56) +#define EXYNOS4_IRQ_MCT_G0 IRQ_SPI(57) +#define EXYNOS4_IRQ_IIC IRQ_SPI(58) +#define EXYNOS4_IRQ_IIC1 IRQ_SPI(59) +#define EXYNOS4_IRQ_IIC2 IRQ_SPI(60) +#define EXYNOS4_IRQ_IIC3 IRQ_SPI(61) +#define EXYNOS4_IRQ_IIC4 IRQ_SPI(62) +#define EXYNOS4_IRQ_IIC5 IRQ_SPI(63) +#define EXYNOS4_IRQ_IIC6 IRQ_SPI(64) +#define EXYNOS4_IRQ_IIC7 IRQ_SPI(65) +#define EXYNOS4_IRQ_SPI0 IRQ_SPI(66) +#define EXYNOS4_IRQ_SPI1 IRQ_SPI(67) +#define EXYNOS4_IRQ_SPI2 IRQ_SPI(68) + +#define EXYNOS4_IRQ_USB_HOST IRQ_SPI(70) +#define EXYNOS4_IRQ_USB_HSOTG IRQ_SPI(71) +#define EXYNOS4_IRQ_MODEM_IF IRQ_SPI(72) +#define EXYNOS4_IRQ_HSMMC0 IRQ_SPI(73) +#define EXYNOS4_IRQ_HSMMC1 IRQ_SPI(74) +#define EXYNOS4_IRQ_HSMMC2 IRQ_SPI(75) +#define EXYNOS4_IRQ_HSMMC3 IRQ_SPI(76) +#define EXYNOS4_IRQ_DWMCI IRQ_SPI(77) + +#define EXYNOS4_IRQ_MIPI_CSIS0 IRQ_SPI(78) +#define EXYNOS4_IRQ_MIPI_CSIS1 IRQ_SPI(80) + +#define EXYNOS4_IRQ_ONENAND_AUDI IRQ_SPI(82) +#define EXYNOS4_IRQ_ROTATOR IRQ_SPI(83) +#define EXYNOS4_IRQ_FIMC0 IRQ_SPI(84) +#define EXYNOS4_IRQ_FIMC1 IRQ_SPI(85) +#define EXYNOS4_IRQ_FIMC2 IRQ_SPI(86) +#define EXYNOS4_IRQ_FIMC3 IRQ_SPI(87) +#define EXYNOS4_IRQ_JPEG IRQ_SPI(88) +#define EXYNOS4_IRQ_2D IRQ_SPI(89) +#define EXYNOS4_IRQ_PCIE IRQ_SPI(90) + +#define EXYNOS4_IRQ_MIXER IRQ_SPI(91) +#define EXYNOS4_IRQ_HDMI IRQ_SPI(92) +#define EXYNOS4_IRQ_IIC_HDMIPHY IRQ_SPI(93) +#define EXYNOS4_IRQ_MFC IRQ_SPI(94) +#define EXYNOS4_IRQ_SDO IRQ_SPI(95) + +#define EXYNOS4_IRQ_AUDIO_SS IRQ_SPI(96) +#define EXYNOS4_IRQ_I2S0 IRQ_SPI(97) +#define EXYNOS4_IRQ_I2S1 IRQ_SPI(98) +#define EXYNOS4_IRQ_I2S2 IRQ_SPI(99) +#define EXYNOS4_IRQ_AC97 IRQ_SPI(100) + +#define EXYNOS4_IRQ_SPDIF IRQ_SPI(104) +#define EXYNOS4_IRQ_ADC0 IRQ_SPI(105) +#define EXYNOS4_IRQ_PEN0 IRQ_SPI(106) +#define EXYNOS4_IRQ_ADC1 IRQ_SPI(107) +#define EXYNOS4_IRQ_PEN1 IRQ_SPI(108) +#define EXYNOS4_IRQ_KEYPAD IRQ_SPI(109) +#define EXYNOS4_IRQ_PMU IRQ_SPI(110) +#define EXYNOS4_IRQ_GPS IRQ_SPI(111) +#define EXYNOS4_IRQ_INTFEEDCTRL_SSS IRQ_SPI(112) +#define EXYNOS4_IRQ_SLIMBUS IRQ_SPI(113) + +#define EXYNOS4_IRQ_TSI IRQ_SPI(115) +#define EXYNOS4_IRQ_SATA IRQ_SPI(116) + +#define EXYNOS4_IRQ_SYSMMU_MDMA0_0 COMBINER_IRQ(4, 0) +#define EXYNOS4_IRQ_SYSMMU_SSS_0 COMBINER_IRQ(4, 1) +#define EXYNOS4_IRQ_SYSMMU_FIMC0_0 COMBINER_IRQ(4, 2) +#define EXYNOS4_IRQ_SYSMMU_FIMC1_0 COMBINER_IRQ(4, 3) +#define EXYNOS4_IRQ_SYSMMU_FIMC2_0 COMBINER_IRQ(4, 4) +#define EXYNOS4_IRQ_SYSMMU_FIMC3_0 COMBINER_IRQ(4, 5) +#define EXYNOS4_IRQ_SYSMMU_JPEG_0 COMBINER_IRQ(4, 6) +#define EXYNOS4_IRQ_SYSMMU_2D_0 COMBINER_IRQ(4, 7) + +#define EXYNOS4_IRQ_SYSMMU_ROTATOR_0 COMBINER_IRQ(5, 0) +#define EXYNOS4_IRQ_SYSMMU_MDMA1_0 COMBINER_IRQ(5, 1) +#define EXYNOS4_IRQ_SYSMMU_LCD0_M0_0 COMBINER_IRQ(5, 2) +#define EXYNOS4_IRQ_SYSMMU_LCD1_M1_0 COMBINER_IRQ(5, 3) +#define EXYNOS4_IRQ_SYSMMU_TV_M0_0 COMBINER_IRQ(5, 4) +#define EXYNOS4_IRQ_SYSMMU_MFC_M0_0 COMBINER_IRQ(5, 5) +#define EXYNOS4_IRQ_SYSMMU_MFC_M1_0 COMBINER_IRQ(5, 6) +#define EXYNOS4_IRQ_SYSMMU_PCIE_0 COMBINER_IRQ(5, 7) + +#define EXYNOS4_IRQ_FIMD0_FIFO COMBINER_IRQ(11, 0) +#define EXYNOS4_IRQ_FIMD0_VSYNC COMBINER_IRQ(11, 1) +#define EXYNOS4_IRQ_FIMD0_SYSTEM COMBINER_IRQ(11, 2) + +#define EXYNOS4_MAX_COMBINER_NR 16 + +#define EXYNOS4_IRQ_GPIO1_NR_GROUPS 16 +#define EXYNOS4_IRQ_GPIO2_NR_GROUPS 9 + +/* + * For Compatibility: + * the default is for EXYNOS4, and + * for exynos5, should be re-mapped at function + */ + +#define IRQ_TIMER0_VIC EXYNOS4_IRQ_TIMER0_VIC +#define IRQ_TIMER1_VIC EXYNOS4_IRQ_TIMER1_VIC +#define IRQ_TIMER2_VIC EXYNOS4_IRQ_TIMER2_VIC +#define IRQ_TIMER3_VIC EXYNOS4_IRQ_TIMER3_VIC +#define IRQ_TIMER4_VIC EXYNOS4_IRQ_TIMER4_VIC + +#define IRQ_WDT EXYNOS4_IRQ_WDT +#define IRQ_RTC_ALARM EXYNOS4_IRQ_RTC_ALARM +#define IRQ_RTC_TIC EXYNOS4_IRQ_RTC_TIC +#define IRQ_GPIO_XB EXYNOS4_IRQ_GPIO_XB +#define IRQ_GPIO_XA EXYNOS4_IRQ_GPIO_XA + +#define IRQ_IIC EXYNOS4_IRQ_IIC +#define IRQ_IIC1 EXYNOS4_IRQ_IIC1 +#define IRQ_IIC3 EXYNOS4_IRQ_IIC3 +#define IRQ_IIC5 EXYNOS4_IRQ_IIC5 +#define IRQ_IIC6 EXYNOS4_IRQ_IIC6 +#define IRQ_IIC7 EXYNOS4_IRQ_IIC7 + +#define IRQ_USB_HOST EXYNOS4_IRQ_USB_HOST + +#define IRQ_HSMMC0 EXYNOS4_IRQ_HSMMC0 +#define IRQ_HSMMC1 EXYNOS4_IRQ_HSMMC1 +#define IRQ_HSMMC2 EXYNOS4_IRQ_HSMMC2 +#define IRQ_HSMMC3 EXYNOS4_IRQ_HSMMC3 + +#define IRQ_MIPI_CSIS0 EXYNOS4_IRQ_MIPI_CSIS0 + +#define IRQ_ONENAND_AUDI EXYNOS4_IRQ_ONENAND_AUDI + +#define IRQ_FIMC0 EXYNOS4_IRQ_FIMC0 +#define IRQ_FIMC1 EXYNOS4_IRQ_FIMC1 +#define IRQ_FIMC2 EXYNOS4_IRQ_FIMC2 +#define IRQ_FIMC3 EXYNOS4_IRQ_FIMC3 +#define IRQ_JPEG EXYNOS4_IRQ_JPEG +#define IRQ_2D EXYNOS4_IRQ_2D + +#define IRQ_MIXER EXYNOS4_IRQ_MIXER +#define IRQ_HDMI EXYNOS4_IRQ_HDMI +#define IRQ_IIC_HDMIPHY EXYNOS4_IRQ_IIC_HDMIPHY +#define IRQ_MFC EXYNOS4_IRQ_MFC +#define IRQ_SDO EXYNOS4_IRQ_SDO + +#define IRQ_ADC EXYNOS4_IRQ_ADC0 +#define IRQ_TC EXYNOS4_IRQ_PEN0 + +#define IRQ_KEYPAD EXYNOS4_IRQ_KEYPAD +#define IRQ_PMU EXYNOS4_IRQ_PMU + +#define IRQ_SYSMMU_MDMA0_0 EXYNOS4_IRQ_SYSMMU_MDMA0_0 +#define IRQ_SYSMMU_SSS_0 EXYNOS4_IRQ_SYSMMU_SSS_0 +#define IRQ_SYSMMU_FIMC0_0 EXYNOS4_IRQ_SYSMMU_FIMC0_0 +#define IRQ_SYSMMU_FIMC1_0 EXYNOS4_IRQ_SYSMMU_FIMC1_0 +#define IRQ_SYSMMU_FIMC2_0 EXYNOS4_IRQ_SYSMMU_FIMC2_0 +#define IRQ_SYSMMU_FIMC3_0 EXYNOS4_IRQ_SYSMMU_FIMC3_0 +#define IRQ_SYSMMU_JPEG_0 EXYNOS4_IRQ_SYSMMU_JPEG_0 +#define IRQ_SYSMMU_2D_0 EXYNOS4_IRQ_SYSMMU_2D_0 + +#define IRQ_SYSMMU_ROTATOR_0 EXYNOS4_IRQ_SYSMMU_ROTATOR_0 +#define IRQ_SYSMMU_MDMA1_0 EXYNOS4_IRQ_SYSMMU_MDMA1_0 +#define IRQ_SYSMMU_LCD0_M0_0 EXYNOS4_IRQ_SYSMMU_LCD0_M0_0 +#define IRQ_SYSMMU_LCD1_M1_0 EXYNOS4_IRQ_SYSMMU_LCD1_M1_0 +#define IRQ_SYSMMU_TV_M0_0 EXYNOS4_IRQ_SYSMMU_TV_M0_0 +#define IRQ_SYSMMU_MFC_M0_0 EXYNOS4_IRQ_SYSMMU_MFC_M0_0 +#define IRQ_SYSMMU_MFC_M1_0 EXYNOS4_IRQ_SYSMMU_MFC_M1_0 +#define IRQ_SYSMMU_PCIE_0 EXYNOS4_IRQ_SYSMMU_PCIE_0 + +#define IRQ_FIMD0_FIFO EXYNOS4_IRQ_FIMD0_FIFO +#define IRQ_FIMD0_VSYNC EXYNOS4_IRQ_FIMD0_VSYNC +#define IRQ_FIMD0_SYSTEM EXYNOS4_IRQ_FIMD0_SYSTEM + +#define IRQ_GPIO1_NR_GROUPS EXYNOS4_IRQ_GPIO1_NR_GROUPS +#define IRQ_GPIO2_NR_GROUPS EXYNOS4_IRQ_GPIO2_NR_GROUPS + +/* For EXYNOS5 SoCs */ + +#define EXYNOS5_IRQ_MDMA0 IRQ_SPI(33) +#define EXYNOS5_IRQ_PDMA0 IRQ_SPI(34) +#define EXYNOS5_IRQ_PDMA1 IRQ_SPI(35) +#define EXYNOS5_IRQ_TIMER0_VIC IRQ_SPI(36) +#define EXYNOS5_IRQ_TIMER1_VIC IRQ_SPI(37) +#define EXYNOS5_IRQ_TIMER2_VIC IRQ_SPI(38) +#define EXYNOS5_IRQ_TIMER3_VIC IRQ_SPI(39) +#define EXYNOS5_IRQ_TIMER4_VIC IRQ_SPI(40) +#define EXYNOS5_IRQ_RTIC IRQ_SPI(41) +#define EXYNOS5_IRQ_WDT IRQ_SPI(42) +#define EXYNOS5_IRQ_RTC_ALARM IRQ_SPI(43) +#define EXYNOS5_IRQ_RTC_TIC IRQ_SPI(44) +#define EXYNOS5_IRQ_GPIO_XB IRQ_SPI(45) +#define EXYNOS5_IRQ_GPIO_XA IRQ_SPI(46) +#define EXYNOS5_IRQ_GPIO IRQ_SPI(47) +#define EXYNOS5_IRQ_IEM_IEC IRQ_SPI(48) +#define EXYNOS5_IRQ_IEM_APC IRQ_SPI(49) +#define EXYNOS5_IRQ_GPIO_C2C IRQ_SPI(50) +#define EXYNOS5_IRQ_UART0 IRQ_SPI(51) +#define EXYNOS5_IRQ_UART1 IRQ_SPI(52) +#define EXYNOS5_IRQ_UART2 IRQ_SPI(53) +#define EXYNOS5_IRQ_UART3 IRQ_SPI(54) +#define EXYNOS5_IRQ_UART4 IRQ_SPI(55) +#define EXYNOS5_IRQ_IIC IRQ_SPI(56) +#define EXYNOS5_IRQ_IIC1 IRQ_SPI(57) +#define EXYNOS5_IRQ_IIC2 IRQ_SPI(58) +#define EXYNOS5_IRQ_IIC3 IRQ_SPI(59) +#define EXYNOS5_IRQ_IIC4 IRQ_SPI(60) +#define EXYNOS5_IRQ_IIC5 IRQ_SPI(61) +#define EXYNOS5_IRQ_IIC6 IRQ_SPI(62) +#define EXYNOS5_IRQ_IIC7 IRQ_SPI(63) +#define EXYNOS5_IRQ_IIC_HDMIPHY IRQ_SPI(64) +#define EXYNOS5_IRQ_TMU IRQ_SPI(65) +#define EXYNOS5_IRQ_FIQ_0 IRQ_SPI(66) +#define EXYNOS5_IRQ_FIQ_1 IRQ_SPI(67) +#define EXYNOS5_IRQ_SPI0 IRQ_SPI(68) +#define EXYNOS5_IRQ_SPI1 IRQ_SPI(69) +#define EXYNOS5_IRQ_SPI2 IRQ_SPI(70) +#define EXYNOS5_IRQ_USB_HOST IRQ_SPI(71) +#define EXYNOS5_IRQ_USB3_DRD IRQ_SPI(72) +#define EXYNOS5_IRQ_MIPI_HSI IRQ_SPI(73) +#define EXYNOS5_IRQ_USB_HSOTG IRQ_SPI(74) +#define EXYNOS5_IRQ_HSMMC0 IRQ_SPI(75) +#define EXYNOS5_IRQ_HSMMC1 IRQ_SPI(76) +#define EXYNOS5_IRQ_HSMMC2 IRQ_SPI(77) +#define EXYNOS5_IRQ_HSMMC3 IRQ_SPI(78) +#define EXYNOS5_IRQ_MIPICSI0 IRQ_SPI(79) +#define EXYNOS5_IRQ_MIPICSI1 IRQ_SPI(80) +#define EXYNOS5_IRQ_EFNFCON_DMA_ABORT IRQ_SPI(81) +#define EXYNOS5_IRQ_MIPIDSI0 IRQ_SPI(82) +#define EXYNOS5_IRQ_ROTATOR IRQ_SPI(84) +#define EXYNOS5_IRQ_GSC0 IRQ_SPI(85) +#define EXYNOS5_IRQ_GSC1 IRQ_SPI(86) +#define EXYNOS5_IRQ_GSC2 IRQ_SPI(87) +#define EXYNOS5_IRQ_GSC3 IRQ_SPI(88) +#define EXYNOS5_IRQ_JPEG IRQ_SPI(89) +#define EXYNOS5_IRQ_EFNFCON_DMA IRQ_SPI(90) +#define EXYNOS5_IRQ_2D IRQ_SPI(91) +#define EXYNOS5_IRQ_SFMC0 IRQ_SPI(92) +#define EXYNOS5_IRQ_SFMC1 IRQ_SPI(93) +#define EXYNOS5_IRQ_MIXER IRQ_SPI(94) +#define EXYNOS5_IRQ_HDMI IRQ_SPI(95) +#define EXYNOS5_IRQ_MFC IRQ_SPI(96) +#define EXYNOS5_IRQ_AUDIO_SS IRQ_SPI(97) +#define EXYNOS5_IRQ_I2S0 IRQ_SPI(98) +#define EXYNOS5_IRQ_I2S1 IRQ_SPI(99) +#define EXYNOS5_IRQ_I2S2 IRQ_SPI(100) +#define EXYNOS5_IRQ_AC97 IRQ_SPI(101) +#define EXYNOS5_IRQ_PCM0 IRQ_SPI(102) +#define EXYNOS5_IRQ_PCM1 IRQ_SPI(103) +#define EXYNOS5_IRQ_PCM2 IRQ_SPI(104) +#define EXYNOS5_IRQ_SPDIF IRQ_SPI(105) +#define EXYNOS5_IRQ_ADC0 IRQ_SPI(106) + +#define EXYNOS5_IRQ_SATA_PHY IRQ_SPI(108) +#define EXYNOS5_IRQ_SATA_PMEMREQ IRQ_SPI(109) +#define EXYNOS5_IRQ_CAM_C IRQ_SPI(110) +#define EXYNOS5_IRQ_EAGLE_PMU IRQ_SPI(111) +#define EXYNOS5_IRQ_INTFEEDCTRL_SSS IRQ_SPI(112) +#define EXYNOS5_IRQ_DP1_INTP1 IRQ_SPI(113) +#define EXYNOS5_IRQ_CEC IRQ_SPI(114) +#define EXYNOS5_IRQ_SATA IRQ_SPI(115) +#define EXYNOS5_IRQ_NFCON IRQ_SPI(116) + +#define EXYNOS5_IRQ_MMC44 IRQ_SPI(123) +#define EXYNOS5_IRQ_MDMA1 IRQ_SPI(124) +#define EXYNOS5_IRQ_FIMC_LITE0 IRQ_SPI(125) +#define EXYNOS5_IRQ_FIMC_LITE1 IRQ_SPI(126) +#define EXYNOS5_IRQ_RP_TIMER IRQ_SPI(127) + +#define EXYNOS5_IRQ_PMU COMBINER_IRQ(1, 2) +#define EXYNOS5_IRQ_PMU_CPU1 COMBINER_IRQ(1, 6) + +#define EXYNOS5_IRQ_SYSMMU_GSC0_0 COMBINER_IRQ(2, 0) +#define EXYNOS5_IRQ_SYSMMU_GSC0_1 COMBINER_IRQ(2, 1) +#define EXYNOS5_IRQ_SYSMMU_GSC1_0 COMBINER_IRQ(2, 2) +#define EXYNOS5_IRQ_SYSMMU_GSC1_1 COMBINER_IRQ(2, 3) +#define EXYNOS5_IRQ_SYSMMU_GSC2_0 COMBINER_IRQ(2, 4) +#define EXYNOS5_IRQ_SYSMMU_GSC2_1 COMBINER_IRQ(2, 5) +#define EXYNOS5_IRQ_SYSMMU_GSC3_0 COMBINER_IRQ(2, 6) +#define EXYNOS5_IRQ_SYSMMU_GSC3_1 COMBINER_IRQ(2, 7) + +#define EXYNOS5_IRQ_SYSMMU_FIMD1_0 COMBINER_IRQ(3, 2) +#define EXYNOS5_IRQ_SYSMMU_FIMD1_1 COMBINER_IRQ(3, 3) +#define EXYNOS5_IRQ_SYSMMU_LITE0_0 COMBINER_IRQ(3, 4) +#define EXYNOS5_IRQ_SYSMMU_LITE0_1 COMBINER_IRQ(3, 5) +#define EXYNOS5_IRQ_SYSMMU_SCALERPISP_0 COMBINER_IRQ(3, 6) +#define EXYNOS5_IRQ_SYSMMU_SCALERPISP_1 COMBINER_IRQ(3, 7) + +#define EXYNOS5_IRQ_SYSMMU_ROTATOR_0 COMBINER_IRQ(4, 0) +#define EXYNOS5_IRQ_SYSMMU_ROTATOR_1 COMBINER_IRQ(4, 1) +#define EXYNOS5_IRQ_SYSMMU_JPEG_0 COMBINER_IRQ(4, 2) +#define EXYNOS5_IRQ_SYSMMU_JPEG_1 COMBINER_IRQ(4, 3) + +#define EXYNOS5_IRQ_SYSMMU_FD_0 COMBINER_IRQ(5, 0) +#define EXYNOS5_IRQ_SYSMMU_FD_1 COMBINER_IRQ(5, 1) +#define EXYNOS5_IRQ_SYSMMU_SCALERCISP_0 COMBINER_IRQ(5, 2) +#define EXYNOS5_IRQ_SYSMMU_SCALERCISP_1 COMBINER_IRQ(5, 3) +#define EXYNOS5_IRQ_SYSMMU_MCUISP_0 COMBINER_IRQ(5, 4) +#define EXYNOS5_IRQ_SYSMMU_MCUISP_1 COMBINER_IRQ(5, 5) +#define EXYNOS5_IRQ_SYSMMU_3DNR_0 COMBINER_IRQ(5, 6) +#define EXYNOS5_IRQ_SYSMMU_3DNR_1 COMBINER_IRQ(5, 7) + +#define EXYNOS5_IRQ_SYSMMU_ARM_0 COMBINER_IRQ(6, 0) +#define EXYNOS5_IRQ_SYSMMU_ARM_1 COMBINER_IRQ(6, 1) +#define EXYNOS5_IRQ_SYSMMU_MFC_L_0 COMBINER_IRQ(6, 2) +#define EXYNOS5_IRQ_SYSMMU_MFC_L_1 COMBINER_IRQ(6, 3) +#define EXYNOS5_IRQ_SYSMMU_RTIC_0 COMBINER_IRQ(6, 4) +#define EXYNOS5_IRQ_SYSMMU_RTIC_1 COMBINER_IRQ(6, 5) +#define EXYNOS5_IRQ_SYSMMU_SSS_0 COMBINER_IRQ(6, 6) +#define EXYNOS5_IRQ_SYSMMU_SSS_1 COMBINER_IRQ(6, 7) + +#define EXYNOS5_IRQ_SYSMMU_MDMA0_0 COMBINER_IRQ(7, 0) +#define EXYNOS5_IRQ_SYSMMU_MDMA0_1 COMBINER_IRQ(7, 1) +#define EXYNOS5_IRQ_SYSMMU_MDMA1_0 COMBINER_IRQ(7, 2) +#define EXYNOS5_IRQ_SYSMMU_MDMA1_1 COMBINER_IRQ(7, 3) +#define EXYNOS5_IRQ_SYSMMU_TV_0 COMBINER_IRQ(7, 4) +#define EXYNOS5_IRQ_SYSMMU_TV_1 COMBINER_IRQ(7, 5) +#define EXYNOS5_IRQ_SYSMMU_GPSX_0 COMBINER_IRQ(7, 6) +#define EXYNOS5_IRQ_SYSMMU_GPSX_1 COMBINER_IRQ(7, 7) + +#define EXYNOS5_IRQ_SYSMMU_MFC_R_0 COMBINER_IRQ(8, 5) +#define EXYNOS5_IRQ_SYSMMU_MFC_R_1 COMBINER_IRQ(8, 6) + +#define EXYNOS5_IRQ_SYSMMU_DIS1_0 COMBINER_IRQ(9, 4) +#define EXYNOS5_IRQ_SYSMMU_DIS1_1 COMBINER_IRQ(9, 5) + +#define EXYNOS5_IRQ_DP COMBINER_IRQ(10, 3) +#define EXYNOS5_IRQ_SYSMMU_DIS0_0 COMBINER_IRQ(10, 4) +#define EXYNOS5_IRQ_SYSMMU_DIS0_1 COMBINER_IRQ(10, 5) +#define EXYNOS5_IRQ_SYSMMU_ISP_0 COMBINER_IRQ(10, 6) +#define EXYNOS5_IRQ_SYSMMU_ISP_1 COMBINER_IRQ(10, 7) + +#define EXYNOS5_IRQ_SYSMMU_ODC_0 COMBINER_IRQ(11, 0) +#define EXYNOS5_IRQ_SYSMMU_ODC_1 COMBINER_IRQ(11, 1) +#define EXYNOS5_IRQ_SYSMMU_DRC_0 COMBINER_IRQ(11, 6) +#define EXYNOS5_IRQ_SYSMMU_DRC_1 COMBINER_IRQ(11, 7) + +#define EXYNOS5_IRQ_FIMD1_FIFO COMBINER_IRQ(18, 4) +#define EXYNOS5_IRQ_FIMD1_VSYNC COMBINER_IRQ(18, 5) +#define EXYNOS5_IRQ_FIMD1_SYSTEM COMBINER_IRQ(18, 6) + +#define EXYNOS5_IRQ_EINT0 COMBINER_IRQ(23, 0) +#define EXYNOS5_IRQ_MCT_L0 COMBINER_IRQ(23, 1) +#define EXYNOS5_IRQ_MCT_L1 COMBINER_IRQ(23, 2) +#define EXYNOS5_IRQ_MCT_G0 COMBINER_IRQ(23, 3) +#define EXYNOS5_IRQ_MCT_G1 COMBINER_IRQ(23, 4) +#define EXYNOS5_IRQ_MCT_G2 COMBINER_IRQ(23, 5) +#define EXYNOS5_IRQ_MCT_G3 COMBINER_IRQ(23, 6) + +#define EXYNOS5_IRQ_EINT1 COMBINER_IRQ(24, 0) +#define EXYNOS5_IRQ_SYSMMU_LITE1_0 COMBINER_IRQ(24, 1) +#define EXYNOS5_IRQ_SYSMMU_LITE1_1 COMBINER_IRQ(24, 2) +#define EXYNOS5_IRQ_SYSMMU_2D_0 COMBINER_IRQ(24, 5) +#define EXYNOS5_IRQ_SYSMMU_2D_1 COMBINER_IRQ(24, 6) + +#define EXYNOS5_IRQ_EINT2 COMBINER_IRQ(25, 0) +#define EXYNOS5_IRQ_EINT3 COMBINER_IRQ(25, 1) + +#define EXYNOS5_IRQ_EINT4 COMBINER_IRQ(26, 0) +#define EXYNOS5_IRQ_EINT5 COMBINER_IRQ(26, 1) + +#define EXYNOS5_IRQ_EINT6 COMBINER_IRQ(27, 0) +#define EXYNOS5_IRQ_EINT7 COMBINER_IRQ(27, 1) + +#define EXYNOS5_IRQ_EINT8 COMBINER_IRQ(28, 0) +#define EXYNOS5_IRQ_EINT9 COMBINER_IRQ(28, 1) + +#define EXYNOS5_IRQ_EINT10 COMBINER_IRQ(29, 0) +#define EXYNOS5_IRQ_EINT11 COMBINER_IRQ(29, 1) + +#define EXYNOS5_IRQ_EINT12 COMBINER_IRQ(30, 0) +#define EXYNOS5_IRQ_EINT13 COMBINER_IRQ(30, 1) + +#define EXYNOS5_IRQ_EINT14 COMBINER_IRQ(31, 0) +#define EXYNOS5_IRQ_EINT15 COMBINER_IRQ(31, 1) + +#define EXYNOS5_MAX_COMBINER_NR 32 + +#define EXYNOS5_IRQ_GPIO1_NR_GROUPS 13 +#define EXYNOS5_IRQ_GPIO2_NR_GROUPS 9 +#define EXYNOS5_IRQ_GPIO3_NR_GROUPS 5 +#define EXYNOS5_IRQ_GPIO4_NR_GROUPS 1 + +#define MAX_COMBINER_NR (EXYNOS4_MAX_COMBINER_NR > EXYNOS5_MAX_COMBINER_NR ? \ + EXYNOS4_MAX_COMBINER_NR : EXYNOS5_MAX_COMBINER_NR) + +#define S5P_EINT_BASE1 COMBINER_IRQ(MAX_COMBINER_NR, 0) +#define S5P_EINT_BASE2 (S5P_EINT_BASE1 + 16) +#define S5P_GPIOINT_BASE (S5P_EINT_BASE1 + 32) +#define IRQ_GPIO_END (S5P_GPIOINT_BASE + S5P_GPIOINT_COUNT) +#define IRQ_TIMER_BASE (IRQ_GPIO_END + 64) /* Set the default NR_IRQS */ -#define NR_IRQS (IRQ_TIMER_BASE + IRQ_TIMER_COUNT) + +#define NR_IRQS (IRQ_TIMER_BASE + IRQ_TIMER_COUNT) #endif /* __ASM_ARCH_IRQS_H */ diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h index 609127df9b02..024d38ff1718 100644 --- a/arch/arm/mach-exynos/include/mach/map.h +++ b/arch/arm/mach-exynos/include/mach/map.h @@ -25,6 +25,7 @@ #define EXYNOS4_PA_SYSRAM0 0x02025000 #define EXYNOS4_PA_SYSRAM1 0x02020000 +#define EXYNOS5_PA_SYSRAM 0x02020000 #define EXYNOS4_PA_FIMC0 0x11800000 #define EXYNOS4_PA_FIMC1 0x11810000 @@ -48,14 +49,23 @@ #define EXYNOS4_PA_ONENAND 0x0C000000 #define EXYNOS4_PA_ONENAND_DMA 0x0C600000 -#define EXYNOS4_PA_CHIPID 0x10000000 +#define EXYNOS_PA_CHIPID 0x10000000 #define EXYNOS4_PA_SYSCON 0x10010000 +#define EXYNOS5_PA_SYSCON 0x10050100 + #define EXYNOS4_PA_PMU 0x10020000 +#define EXYNOS5_PA_PMU 0x10040000 + #define EXYNOS4_PA_CMU 0x10030000 +#define EXYNOS5_PA_CMU 0x10010000 #define EXYNOS4_PA_SYSTIMER 0x10050000 +#define EXYNOS5_PA_SYSTIMER 0x101C0000 + #define EXYNOS4_PA_WATCHDOG 0x10060000 +#define EXYNOS5_PA_WATCHDOG 0x101D0000 + #define EXYNOS4_PA_RTC 0x10070000 #define EXYNOS4_PA_KEYPAD 0x100A0000 @@ -64,9 +74,12 @@ #define EXYNOS4_PA_DMC1 0x10410000 #define EXYNOS4_PA_COMBINER 0x10440000 +#define EXYNOS5_PA_COMBINER 0x10440000 #define EXYNOS4_PA_GIC_CPU 0x10480000 #define EXYNOS4_PA_GIC_DIST 0x10490000 +#define EXYNOS5_PA_GIC_CPU 0x10480000 +#define EXYNOS5_PA_GIC_DIST 0x10490000 #define EXYNOS4_PA_COREPERI 0x10500000 #define EXYNOS4_PA_TWD 0x10500600 @@ -97,10 +110,13 @@ #define EXYNOS4_PA_SPI1 0x13930000 #define EXYNOS4_PA_SPI2 0x13940000 - #define EXYNOS4_PA_GPIO1 0x11400000 #define EXYNOS4_PA_GPIO2 0x11000000 #define EXYNOS4_PA_GPIO3 0x03860000 +#define EXYNOS5_PA_GPIO1 0x11400000 +#define EXYNOS5_PA_GPIO2 0x13400000 +#define EXYNOS5_PA_GPIO3 0x10D10000 +#define EXYNOS5_PA_GPIO4 0x03860000 #define EXYNOS4_PA_MIPI_CSIS0 0x11880000 #define EXYNOS4_PA_MIPI_CSIS1 0x11890000 @@ -115,6 +131,7 @@ #define EXYNOS4_PA_SATAPHY_CTRL 0x126B0000 #define EXYNOS4_PA_SROMC 0x12570000 +#define EXYNOS5_PA_SROMC 0x12250000 #define EXYNOS4_PA_EHCI 0x12580000 #define EXYNOS4_PA_OHCI 0x12590000 @@ -122,6 +139,7 @@ #define EXYNOS4_PA_MFC 0x13400000 #define EXYNOS4_PA_UART 0x13800000 +#define EXYNOS5_PA_UART 0x12C00000 #define EXYNOS4_PA_VP 0x12C00000 #define EXYNOS4_PA_MIXER 0x12C10000 @@ -130,6 +148,7 @@ #define EXYNOS4_PA_IIC_HDMIPHY 0x138E0000 #define EXYNOS4_PA_IIC(x) (0x13860000 + ((x) * 0x10000)) +#define EXYNOS5_PA_IIC(x) (0x12C60000 + ((x) * 0x10000)) #define EXYNOS4_PA_ADC 0x13910000 #define EXYNOS4_PA_ADC1 0x13911000 @@ -139,8 +158,10 @@ #define EXYNOS4_PA_SPDIF 0x139B0000 #define EXYNOS4_PA_TIMER 0x139D0000 +#define EXYNOS5_PA_TIMER 0x12DD0000 #define EXYNOS4_PA_SDRAM 0x40000000 +#define EXYNOS5_PA_SDRAM 0x40000000 /* Compatibiltiy Defines */ @@ -158,7 +179,6 @@ #define S3C_PA_IIC7 EXYNOS4_PA_IIC(7) #define S3C_PA_RTC EXYNOS4_PA_RTC #define S3C_PA_WDT EXYNOS4_PA_WATCHDOG -#define S3C_PA_UART EXYNOS4_PA_UART #define S3C_PA_SPI0 EXYNOS4_PA_SPI0 #define S3C_PA_SPI1 EXYNOS4_PA_SPI1 #define S3C_PA_SPI2 EXYNOS4_PA_SPI2 @@ -189,15 +209,18 @@ /* Compatibility UART */ -#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) +#define EXYNOS4_PA_UART0 0x13800000 +#define EXYNOS4_PA_UART1 0x13810000 +#define EXYNOS4_PA_UART2 0x13820000 +#define EXYNOS4_PA_UART3 0x13830000 +#define EXYNOS4_SZ_UART SZ_256 -#define S5P_PA_UART(x) (EXYNOS4_PA_UART + ((x) * S3C_UART_OFFSET)) -#define S5P_PA_UART0 S5P_PA_UART(0) -#define S5P_PA_UART1 S5P_PA_UART(1) -#define S5P_PA_UART2 S5P_PA_UART(2) -#define S5P_PA_UART3 S5P_PA_UART(3) -#define S5P_PA_UART4 S5P_PA_UART(4) +#define EXYNOS5_PA_UART0 0x12C00000 +#define EXYNOS5_PA_UART1 0x12C10000 +#define EXYNOS5_PA_UART2 0x12C20000 +#define EXYNOS5_PA_UART3 0x12C30000 +#define EXYNOS5_SZ_UART SZ_256 -#define S5P_SZ_UART SZ_256 +#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) #endif /* __ASM_ARCH_MAP_H */ diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h index 1e4abd64a547..e141c1fd68d8 100644 --- a/arch/arm/mach-exynos/include/mach/regs-clock.h +++ b/arch/arm/mach-exynos/include/mach/regs-clock.h @@ -253,6 +253,68 @@ #define EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT (0) #define EXYNOS4_CLKDIV_CAM1_JPEG_MASK (0xf << EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT) +/* For EXYNOS5250 */ + +#define EXYNOS5_APLL_CON0 EXYNOS_CLKREG(0x00100) +#define EXYNOS5_CLKSRC_CPU EXYNOS_CLKREG(0x00200) +#define EXYNOS5_CLKDIV_CPU0 EXYNOS_CLKREG(0x00500) +#define EXYNOS5_MPLL_CON0 EXYNOS_CLKREG(0x04100) +#define EXYNOS5_CLKSRC_CORE1 EXYNOS_CLKREG(0x04204) + +#define EXYNOS5_CLKGATE_IP_CORE EXYNOS_CLKREG(0x04900) + +#define EXYNOS5_CLKDIV_ACP EXYNOS_CLKREG(0x08500) + +#define EXYNOS5_CLKSRC_TOP2 EXYNOS_CLKREG(0x10218) +#define EXYNOS5_EPLL_CON0 EXYNOS_CLKREG(0x10130) +#define EXYNOS5_EPLL_CON1 EXYNOS_CLKREG(0x10134) +#define EXYNOS5_VPLL_CON0 EXYNOS_CLKREG(0x10140) +#define EXYNOS5_VPLL_CON1 EXYNOS_CLKREG(0x10144) +#define EXYNOS5_CPLL_CON0 EXYNOS_CLKREG(0x10120) + +#define EXYNOS5_CLKSRC_TOP0 EXYNOS_CLKREG(0x10210) +#define EXYNOS5_CLKSRC_TOP3 EXYNOS_CLKREG(0x1021C) +#define EXYNOS5_CLKSRC_GSCL EXYNOS_CLKREG(0x10220) +#define EXYNOS5_CLKSRC_DISP1_0 EXYNOS_CLKREG(0x1022C) +#define EXYNOS5_CLKSRC_FSYS EXYNOS_CLKREG(0x10244) +#define EXYNOS5_CLKSRC_PERIC0 EXYNOS_CLKREG(0x10250) + +#define EXYNOS5_CLKSRC_MASK_TOP EXYNOS_CLKREG(0x10310) +#define EXYNOS5_CLKSRC_MASK_GSCL EXYNOS_CLKREG(0x10320) +#define EXYNOS5_CLKSRC_MASK_DISP1_0 EXYNOS_CLKREG(0x1032C) +#define EXYNOS5_CLKSRC_MASK_FSYS EXYNOS_CLKREG(0x10340) +#define EXYNOS5_CLKSRC_MASK_PERIC0 EXYNOS_CLKREG(0x10350) + +#define EXYNOS5_CLKDIV_TOP0 EXYNOS_CLKREG(0x10510) +#define EXYNOS5_CLKDIV_TOP1 EXYNOS_CLKREG(0x10514) +#define EXYNOS5_CLKDIV_GSCL EXYNOS_CLKREG(0x10520) +#define EXYNOS5_CLKDIV_DISP1_0 EXYNOS_CLKREG(0x1052C) +#define EXYNOS5_CLKDIV_GEN EXYNOS_CLKREG(0x1053C) +#define EXYNOS5_CLKDIV_FSYS0 EXYNOS_CLKREG(0x10548) +#define EXYNOS5_CLKDIV_FSYS1 EXYNOS_CLKREG(0x1054C) +#define EXYNOS5_CLKDIV_FSYS2 EXYNOS_CLKREG(0x10550) +#define EXYNOS5_CLKDIV_FSYS3 EXYNOS_CLKREG(0x10554) +#define EXYNOS5_CLKDIV_PERIC0 EXYNOS_CLKREG(0x10558) + +#define EXYNOS5_CLKGATE_IP_ACP EXYNOS_CLKREG(0x08800) +#define EXYNOS5_CLKGATE_IP_GSCL EXYNOS_CLKREG(0x10920) +#define EXYNOS5_CLKGATE_IP_DISP1 EXYNOS_CLKREG(0x10928) +#define EXYNOS5_CLKGATE_IP_MFC EXYNOS_CLKREG(0x1092C) +#define EXYNOS5_CLKGATE_IP_GEN EXYNOS_CLKREG(0x10934) +#define EXYNOS5_CLKGATE_IP_FSYS EXYNOS_CLKREG(0x10944) +#define EXYNOS5_CLKGATE_IP_GPS EXYNOS_CLKREG(0x1094C) +#define EXYNOS5_CLKGATE_IP_PERIC EXYNOS_CLKREG(0x10950) +#define EXYNOS5_CLKGATE_IP_PERIS EXYNOS_CLKREG(0x10960) +#define EXYNOS5_CLKGATE_BLOCK EXYNOS_CLKREG(0x10980) + +#define EXYNOS5_BPLL_CON0 EXYNOS_CLKREG(0x20110) +#define EXYNOS5_CLKSRC_CDREX EXYNOS_CLKREG(0x20200) +#define EXYNOS5_CLKDIV_CDREX EXYNOS_CLKREG(0x20500) + +#define EXYNOS5_EPLL_LOCK EXYNOS_CLKREG(0x10030) + +#define EXYNOS5_EPLLCON0_LOCKED_SHIFT (29) + /* Compatibility defines and inclusion */ #include <mach/regs-pmu.h> diff --git a/arch/arm/mach-exynos/include/mach/regs-gpio.h b/arch/arm/mach-exynos/include/mach/regs-gpio.h index 1401b21663a5..e4b5b60dcb85 100644 --- a/arch/arm/mach-exynos/include/mach/regs-gpio.h +++ b/arch/arm/mach-exynos/include/mach/regs-gpio.h @@ -16,6 +16,15 @@ #include <mach/map.h> #include <mach/irqs.h> +#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3) +#define EINT_CON(b, x) (b + 0xE00 + (EINT_REG_NR(x) * 4)) +#define EINT_FLTCON(b, x) (b + 0xE80 + (EINT_REG_NR(x) * 4)) +#define EINT_MASK(b, x) (b + 0xF00 + (EINT_REG_NR(x) * 4)) +#define EINT_PEND(b, x) (b + 0xF40 + (EINT_REG_NR(x) * 4)) + +#define EINT_OFFSET_BIT(x) (1 << (EINT_OFFSET(x) & 0x7)) + +/* compatibility for plat-s5p/irq-pm.c */ #define EXYNOS4_EINT40CON (S5P_VA_GPIO2 + 0xE00) #define S5P_EINT_CON(x) (EXYNOS4_EINT40CON + ((x) * 0x4)) @@ -28,15 +37,4 @@ #define EXYNOS4_EINT40PEND (S5P_VA_GPIO2 + 0xF40) #define S5P_EINT_PEND(x) (EXYNOS4_EINT40PEND + ((x) * 0x4)) -#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3) - -#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7)) - -#define EINT_MODE S3C_GPIO_SFN(0xf) - -#define EINT_GPIO_0(x) EXYNOS4_GPX0(x) -#define EINT_GPIO_1(x) EXYNOS4_GPX1(x) -#define EINT_GPIO_2(x) EXYNOS4_GPX2(x) -#define EINT_GPIO_3(x) EXYNOS4_GPX3(x) - #endif /* __ASM_ARCH_REGS_GPIO_H */ diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h index 4fff8e938fec..4c53f38b5a9e 100644 --- a/arch/arm/mach-exynos/include/mach/regs-pmu.h +++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h @@ -31,6 +31,7 @@ #define S5P_USE_STANDBYWFE_ISP_ARM (1 << 26) #define S5P_SWRESET S5P_PMUREG(0x0400) +#define EXYNOS_SWRESET S5P_PMUREG(0x0400) #define S5P_WAKEUP_STAT S5P_PMUREG(0x0600) #define S5P_EINT_WAKEUP_MASK S5P_PMUREG(0x0604) diff --git a/arch/arm/mach-exynos/include/mach/uncompress.h b/arch/arm/mach-exynos/include/mach/uncompress.h index 21d97bcd9acb..493f4f365ddf 100644 --- a/arch/arm/mach-exynos/include/mach/uncompress.h +++ b/arch/arm/mach-exynos/include/mach/uncompress.h @@ -1,9 +1,8 @@ -/* linux/arch/arm/mach-exynos4/include/mach/uncompress.h - * - * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. +/* + * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * EXYNOS4 - uncompress code + * EXYNOS - uncompress code * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -13,12 +12,20 @@ #ifndef __ASM_ARCH_UNCOMPRESS_H #define __ASM_ARCH_UNCOMPRESS_H __FILE__ +#include <asm/mach-types.h> + #include <mach/map.h> + +volatile u8 *uart_base; + #include <plat/uncompress.h> static void arch_detect_cpu(void) { - /* we do not need to do any cpu detection here at the moment. */ + if (machine_is_smdk5250()) + uart_base = (volatile u8 *)EXYNOS5_PA_UART + (S3C_UART_OFFSET * CONFIG_S3C_LOWLEVEL_UART_PORT); + else + uart_base = (volatile u8 *)EXYNOS4_PA_UART + (S3C_UART_OFFSET * CONFIG_S3C_LOWLEVEL_UART_PORT); /* * For preventing FIFO overrun or infinite loop of UART console, diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c index e6b02fdf1b09..8245f1c761d9 100644 --- a/arch/arm/mach-exynos/mach-exynos4-dt.c +++ b/arch/arm/mach-exynos/mach-exynos4-dt.c @@ -37,13 +37,13 @@ * data from the device tree. */ static const struct of_dev_auxdata exynos4210_auxdata_lookup[] __initconst = { - OF_DEV_AUXDATA("samsung,exynos4210-uart", S5P_PA_UART0, + OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS4_PA_UART0, "exynos4210-uart.0", NULL), - OF_DEV_AUXDATA("samsung,exynos4210-uart", S5P_PA_UART1, + OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS4_PA_UART1, "exynos4210-uart.1", NULL), - OF_DEV_AUXDATA("samsung,exynos4210-uart", S5P_PA_UART2, + OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS4_PA_UART2, "exynos4210-uart.2", NULL), - OF_DEV_AUXDATA("samsung,exynos4210-uart", S5P_PA_UART3, + OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS4_PA_UART3, "exynos4210-uart.3", NULL), OF_DEV_AUXDATA("samsung,exynos4210-sdhci", EXYNOS4_PA_HSMMC(0), "exynos4-sdhci.0", NULL), diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c new file mode 100644 index 000000000000..0d26f50081ad --- /dev/null +++ b/arch/arm/mach-exynos/mach-exynos5-dt.c @@ -0,0 +1,78 @@ +/* + * SAMSUNG EXYNOS5250 Flattened Device Tree enabled machine + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/of_platform.h> +#include <linux/serial_core.h> + +#include <asm/mach/arch.h> +#include <asm/hardware/gic.h> +#include <mach/map.h> + +#include <plat/cpu.h> +#include <plat/regs-serial.h> + +#include "common.h" + +/* + * The following lookup table is used to override device names when devices + * are registered from device tree. This is temporarily added to enable + * device tree support addition for the EXYNOS5 architecture. + * + * For drivers that require platform data to be provided from the machine + * file, a platform data pointer can also be supplied along with the + * devices names. Usually, the platform data elements that cannot be parsed + * from the device tree by the drivers (example: function pointers) are + * supplied. But it should be noted that this is a temporary mechanism and + * at some point, the drivers should be capable of parsing all the platform + * data from the device tree. + */ +static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = { + OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART0, + "exynos4210-uart.0", NULL), + OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART1, + "exynos4210-uart.1", NULL), + OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART2, + "exynos4210-uart.2", NULL), + OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART3, + "exynos4210-uart.3", NULL), + OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL), + OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL), + OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.2", NULL), + {}, +}; + +static void __init exynos5250_dt_map_io(void) +{ + exynos_init_io(NULL, 0); + s3c24xx_init_clocks(24000000); +} + +static void __init exynos5250_dt_machine_init(void) +{ + of_platform_populate(NULL, of_default_bus_match_table, + exynos5250_auxdata_lookup, NULL); +} + +static char const *exynos5250_dt_compat[] __initdata = { + "samsung,exynos5250", + NULL +}; + +DT_MACHINE_START(EXYNOS5_DT, "SAMSUNG EXYNOS5 (Flattened Device Tree)") + /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ + .init_irq = exynos5_init_irq, + .map_io = exynos5250_dt_map_io, + .handle_irq = gic_handle_irq, + .init_machine = exynos5250_dt_machine_init, + .timer = &exynos4_timer, + .dt_compat = exynos5250_dt_compat, + .restart = exynos5_restart, +MACHINE_END diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c index e8a1caaf1902..897d9a9cf226 100644 --- a/arch/arm/mach-exynos/mct.c +++ b/arch/arm/mach-exynos/mct.c @@ -261,7 +261,10 @@ static void exynos4_clockevent_init(void) mct_comp_device.cpumask = cpumask_of(0); clockevents_register_device(&mct_comp_device); - setup_irq(IRQ_MCT_G0, &mct_comp_event_irq); + if (soc_is_exynos5250()) + setup_irq(EXYNOS5_IRQ_MCT_G0, &mct_comp_event_irq); + else + setup_irq(EXYNOS4_IRQ_MCT_G0, &mct_comp_event_irq); } #ifdef CONFIG_LOCAL_TIMERS @@ -412,16 +415,16 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt) if (mct_int_type == MCT_INT_SPI) { if (cpu == 0) { mct_tick0_event_irq.dev_id = mevt; - evt->irq = IRQ_MCT_L0; - setup_irq(IRQ_MCT_L0, &mct_tick0_event_irq); + evt->irq = EXYNOS4_IRQ_MCT_L0; + setup_irq(EXYNOS4_IRQ_MCT_L0, &mct_tick0_event_irq); } else { mct_tick1_event_irq.dev_id = mevt; - evt->irq = IRQ_MCT_L1; - setup_irq(IRQ_MCT_L1, &mct_tick1_event_irq); - irq_set_affinity(IRQ_MCT_L1, cpumask_of(1)); + evt->irq = EXYNOS4_IRQ_MCT_L1; + setup_irq(EXYNOS4_IRQ_MCT_L1, &mct_tick1_event_irq); + irq_set_affinity(EXYNOS4_IRQ_MCT_L1, cpumask_of(1)); } } else { - enable_percpu_irq(IRQ_MCT_LOCALTIMER, 0); + enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0); } return 0; @@ -437,7 +440,7 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt) else remove_irq(evt->irq, &mct_tick1_event_irq); else - disable_percpu_irq(IRQ_MCT_LOCALTIMER); + disable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER); } static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = { @@ -457,11 +460,11 @@ static void __init exynos4_timer_resources(void) if (mct_int_type == MCT_INT_PPI) { int err; - err = request_percpu_irq(IRQ_MCT_LOCALTIMER, + err = request_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, exynos4_mct_tick_isr, "MCT", &percpu_mct_tick); WARN(err, "MCT: can't request IRQ %d (%d)\n", - IRQ_MCT_LOCALTIMER, err); + EXYNOS_IRQ_MCT_LOCALTIMER, err); } local_timer_register(&exynos4_mct_tick_ops); diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 0f2035a1eb6e..36c3984aaa47 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -166,7 +166,10 @@ void __init smp_init_cpus(void) void __iomem *scu_base = scu_base_addr(); unsigned int i, ncores; - ncores = scu_base ? scu_get_core_count(scu_base) : 1; + if (soc_is_exynos5250()) + ncores = 2; + else + ncores = scu_base ? scu_get_core_count(scu_base) : 1; /* sanity check */ if (ncores > nr_cpu_ids) { @@ -183,8 +186,8 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - - scu_enable(scu_base_addr()); + if (!soc_is_exynos5250()) + scu_enable(scu_base_addr()); /* * Write the address of secondary startup into the diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 0b04af2b13cc..13b306808b42 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -183,6 +183,12 @@ static __init int exynos4_pm_init_power_domain(void) #ifdef CONFIG_S5P_DEV_CSIS1 exynos_pm_add_dev_to_genpd(&s5p_device_mipi_csis1, &exynos4_pd_cam); #endif +#ifdef CONFIG_S5P_DEV_G2D + exynos_pm_add_dev_to_genpd(&s5p_device_g2d, &exynos4_pd_lcd0); +#endif +#ifdef CONFIG_S5P_DEV_JPEG + exynos_pm_add_dev_to_genpd(&s5p_device_jpeg, &exynos4_pd_cam); +#endif return 0; } arch_initcall(exynos4_pm_init_power_domain); diff --git a/arch/arm/mach-exynos/setup-i2c0.c b/arch/arm/mach-exynos/setup-i2c0.c index d395bd17c38b..b90d94c17f7c 100644 --- a/arch/arm/mach-exynos/setup-i2c0.c +++ b/arch/arm/mach-exynos/setup-i2c0.c @@ -1,7 +1,5 @@ /* - * linux/arch/arm/mach-exynos4/setup-i2c0.c - * - * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. + * Copyright (c) 2009-2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * I2C0 GPIO configuration. @@ -18,9 +16,14 @@ struct platform_device; /* don't need the contents */ #include <linux/gpio.h> #include <plat/iic.h> #include <plat/gpio-cfg.h> +#include <plat/cpu.h> void s3c_i2c0_cfg_gpio(struct platform_device *dev) { + if (soc_is_exynos5250()) + /* will be implemented with gpio function */ + return; + s3c_gpio_cfgall_range(EXYNOS4_GPD1(0), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); } diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 41978ee4f9d0..3e6aaa6361da 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -21,6 +21,7 @@ #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/setup.h> +#include <asm/system_misc.h> #include <asm/hardware/dec21285.h> #include <asm/mach/irq.h> diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index 121ad1d4fa39..3b54196447c7 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c @@ -14,6 +14,7 @@ #include <asm/hardware/dec21285.h> #include <asm/mach/time.h> +#include <asm/system_info.h> #include "common.h" diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c index 3194d3f73503..e17e11de4f5e 100644 --- a/arch/arm/mach-footbridge/dc21285.c +++ b/arch/arm/mach-footbridge/dc21285.c @@ -21,7 +21,6 @@ #include <video/vga.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/mach/pci.h> #include <asm/hardware/dec21285.h> diff --git a/arch/arm/mach-footbridge/ebsa285-leds.c b/arch/arm/mach-footbridge/ebsa285-leds.c index 4e10090cd87f..5bd266754b95 100644 --- a/arch/arm/mach-footbridge/ebsa285-leds.c +++ b/arch/arm/mach-footbridge/ebsa285-leds.c @@ -24,7 +24,6 @@ #include <mach/hardware.h> #include <asm/leds.h> #include <asm/mach-types.h> -#include <asm/system.h> #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index 80a1c5cc9071..cac9f67e7da7 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c @@ -17,6 +17,7 @@ #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/setup.h> +#include <asm/system_misc.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mach-footbridge/netwinder-leds.c b/arch/arm/mach-footbridge/netwinder-leds.c index e57102e871fc..5a2bd89cbdca 100644 --- a/arch/arm/mach-footbridge/netwinder-leds.c +++ b/arch/arm/mach-footbridge/netwinder-leds.c @@ -24,7 +24,6 @@ #include <mach/hardware.h> #include <asm/leds.h> #include <asm/mach-types.h> -#include <asm/system.h> #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c index 42afc29a7da8..3189a6004cf9 100644 --- a/arch/arm/mach-imx/dma-v1.c +++ b/arch/arm/mach-imx/dma-v1.c @@ -32,7 +32,6 @@ #include <linux/scatterlist.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/dma-v1.h> diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 7696dfa2bdba..da6c1d9af768 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c @@ -26,6 +26,7 @@ #include <asm/hardware/gic.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> +#include <asm/system_misc.h> #include <mach/common.h> #include <mach/hardware.h> diff --git a/arch/arm/mach-imx/mach-mx51_efikamx.c b/arch/arm/mach-imx/mach-mx51_efikamx.c index 3a5ed2dd885a..586e9f822124 100644 --- a/arch/arm/mach-imx/mach-mx51_efikamx.c +++ b/arch/arm/mach-imx/mach-mx51_efikamx.c @@ -33,6 +33,7 @@ #include <mach/iomux-mx51.h> #include <asm/setup.h> +#include <asm/system_info.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> diff --git a/arch/arm/mach-imx/mach-mx51_efikasb.c b/arch/arm/mach-imx/mach-mx51_efikasb.c index ea5f65b0381a..24aded9e109f 100644 --- a/arch/arm/mach-imx/mach-mx51_efikasb.c +++ b/arch/arm/mach-imx/mach-mx51_efikasb.c @@ -36,6 +36,7 @@ #include <mach/iomux-mx51.h> #include <asm/setup.h> +#include <asm/system_info.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 15b87f26ac96..1a65d77bd55d 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -27,7 +27,6 @@ #include <mach/platform.h> #include <asm/irq.h> #include <mach/cm.h> -#include <asm/system.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/mach/time.h> diff --git a/arch/arm/mach-integrator/leds.c b/arch/arm/mach-integrator/leds.c index 28be186adb89..466defa97842 100644 --- a/arch/arm/mach-integrator/leds.c +++ b/arch/arm/mach-integrator/leds.c @@ -29,7 +29,6 @@ #include <mach/hardware.h> #include <mach/platform.h> #include <asm/leds.h> -#include <asm/system.h> #include <asm/mach-types.h> #include <mach/cm.h> diff --git a/arch/arm/mach-integrator/pci.c b/arch/arm/mach-integrator/pci.c index 520b6bf81bb1..36068f438f2b 100644 --- a/arch/arm/mach-integrator/pci.c +++ b/arch/arm/mach-integrator/pci.c @@ -27,7 +27,6 @@ #include <linux/init.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index 015be770c1d8..4be172c3cbe0 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c @@ -32,7 +32,6 @@ #include <mach/platform.h> #include <asm/irq.h> #include <asm/signal.h> -#include <asm/system.h> #include <asm/mach/pci.h> #include <asm/irq_regs.h> diff --git a/arch/arm/mach-iop33x/uart.c b/arch/arm/mach-iop33x/uart.c index cdae24e46eea..bbf54d794ce8 100644 --- a/arch/arm/mach-iop33x/uart.c +++ b/arch/arm/mach-iop33x/uart.c @@ -22,7 +22,6 @@ #include <asm/page.h> #include <asm/mach/map.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/memory.h> #include <mach/hardware.h> #include <asm/hardware/iop3xx.h> diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c index 81c45370a4e6..f214cdff01cb 100644 --- a/arch/arm/mach-ixp2000/core.c +++ b/arch/arm/mach-ixp2000/core.c @@ -32,7 +32,6 @@ #include <asm/memory.h> #include <mach/hardware.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> diff --git a/arch/arm/mach-ixp2000/enp2611.c b/arch/arm/mach-ixp2000/enp2611.c index e872d238cd0f..4867f408617c 100644 --- a/arch/arm/mach-ixp2000/enp2611.c +++ b/arch/arm/mach-ixp2000/enp2611.c @@ -36,7 +36,6 @@ #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> #include <mach/hardware.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-ixp2000/ixdp2400.c b/arch/arm/mach-ixp2000/ixdp2400.c index d519944653ad..915ad49e3b8f 100644 --- a/arch/arm/mach-ixp2000/ixdp2400.c +++ b/arch/arm/mach-ixp2000/ixdp2400.c @@ -29,7 +29,6 @@ #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> #include <mach/hardware.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-ixp2000/ixdp2800.c b/arch/arm/mach-ixp2000/ixdp2800.c index b415febd2025..a9f1819ea049 100644 --- a/arch/arm/mach-ixp2000/ixdp2800.c +++ b/arch/arm/mach-ixp2000/ixdp2800.c @@ -29,7 +29,6 @@ #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> #include <mach/hardware.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-ixp2000/ixdp2x00.c b/arch/arm/mach-ixp2000/ixdp2x00.c index dd9838299068..421e38dc0fac 100644 --- a/arch/arm/mach-ixp2000/ixdp2x00.c +++ b/arch/arm/mach-ixp2000/ixdp2x00.c @@ -30,7 +30,6 @@ #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> #include <mach/hardware.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-ixp2000/ixdp2x01.c b/arch/arm/mach-ixp2000/ixdp2x01.c index 7632beadabf6..5196c39cdba4 100644 --- a/arch/arm/mach-ixp2000/ixdp2x01.c +++ b/arch/arm/mach-ixp2000/ixdp2x01.c @@ -34,7 +34,6 @@ #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> #include <mach/hardware.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-ixp2000/pci.c b/arch/arm/mach-ixp2000/pci.c index 49c36f3cd602..9c02de932fac 100644 --- a/arch/arm/mach-ixp2000/pci.c +++ b/arch/arm/mach-ixp2000/pci.c @@ -26,7 +26,6 @@ #include <linux/io.h> #include <asm/irq.h> -#include <asm/system.h> #include <mach/hardware.h> #include <asm/mach/pci.h> diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c index 7c1495e4fe7a..d2c2dc35cbdd 100644 --- a/arch/arm/mach-ixp23xx/core.c +++ b/arch/arm/mach-ixp23xx/core.c @@ -34,7 +34,6 @@ #include <asm/memory.h> #include <mach/hardware.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> diff --git a/arch/arm/mach-ixp23xx/espresso.c b/arch/arm/mach-ixp23xx/espresso.c index 8f2487e1fc4e..d142d45dea12 100644 --- a/arch/arm/mach-ixp23xx/espresso.c +++ b/arch/arm/mach-ixp23xx/espresso.c @@ -32,7 +32,6 @@ #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c index 5d5dd3e8d069..b0e07db5ceaf 100644 --- a/arch/arm/mach-ixp23xx/ixdp2351.c +++ b/arch/arm/mach-ixp23xx/ixdp2351.c @@ -36,7 +36,6 @@ #include <asm/memory.h> #include <mach/hardware.h> #include <asm/mach-types.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> diff --git a/arch/arm/mach-ixp23xx/pci.c b/arch/arm/mach-ixp23xx/pci.c index 3cbbd3208fa8..911f5a58e006 100644 --- a/arch/arm/mach-ixp23xx/pci.c +++ b/arch/arm/mach-ixp23xx/pci.c @@ -28,7 +28,6 @@ #include <asm/irq.h> #include <asm/sizes.h> -#include <asm/system.h> #include <asm/mach/pci.h> #include <mach/hardware.h> diff --git a/arch/arm/mach-ixp23xx/roadrunner.c b/arch/arm/mach-ixp23xx/roadrunner.c index 377283fc658c..eaaa3fa9fd05 100644 --- a/arch/arm/mach-ixp23xx/roadrunner.c +++ b/arch/arm/mach-ixp23xx/roadrunner.c @@ -36,7 +36,6 @@ #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 8508882b13f0..d5719eb42591 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -32,7 +32,6 @@ #include <asm/cputype.h> #include <asm/irq.h> #include <asm/sizes.h> -#include <asm/system.h> #include <asm/mach/pci.h> #include <mach/hardware.h> diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c index c0e3d69a8aec..78ae12c46261 100644 --- a/arch/arm/mach-ixp4xx/goramo_mlr.c +++ b/arch/arm/mach-ixp4xx/goramo_mlr.c @@ -12,7 +12,6 @@ #include <linux/pci.h> #include <linux/serial_8250.h> #include <asm/mach-types.h> -#include <asm/system.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/pci.h> diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile index acbc5e1db06f..e299a9576bf0 100644 --- a/arch/arm/mach-kirkwood/Makefile +++ b/arch/arm/mach-kirkwood/Makefile @@ -21,3 +21,4 @@ obj-$(CONFIG_MACH_T5325) += t5325-setup.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_ARCH_KIRKWOOD_DT) += board-dt.o +obj-$(CONFIG_MACH_DREAMPLUG_DT) += board-dreamplug.o diff --git a/arch/arm/mach-kirkwood/board-dreamplug.c b/arch/arm/mach-kirkwood/board-dreamplug.c new file mode 100644 index 000000000000..985453994dd3 --- /dev/null +++ b/arch/arm/mach-kirkwood/board-dreamplug.c @@ -0,0 +1,152 @@ +/* + * Copyright 2012 (C), Jason Cooper <jason@lakedaemon.net> + * + * arch/arm/mach-kirkwood/board-dreamplug.c + * + * Marvell DreamPlug Reference Board Init for drivers not converted to + * flattened device tree yet. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/mtd/partitions.h> +#include <linux/ata_platform.h> +#include <linux/mv643xx_eth.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/gpio.h> +#include <linux/leds.h> +#include <linux/mtd/physmap.h> +#include <linux/spi/flash.h> +#include <linux/spi/spi.h> +#include <linux/spi/orion_spi.h> +#include <asm/mach-types.h> +#include <asm/mach/arch.h> +#include <asm/mach/map.h> +#include <mach/kirkwood.h> +#include <mach/bridge-regs.h> +#include <plat/mvsdio.h> +#include "common.h" +#include "mpp.h" + +struct mtd_partition dreamplug_partitions[] = { + { + .name = "u-boot", + .size = SZ_512K, + .offset = 0, + }, + { + .name = "u-boot env", + .size = SZ_64K, + .offset = SZ_512K + SZ_512K, + }, + { + .name = "dtb", + .size = SZ_64K, + .offset = SZ_512K + SZ_512K + SZ_512K, + }, +}; + +static const struct flash_platform_data dreamplug_spi_slave_data = { + .type = "mx25l1606e", + .name = "spi_flash", + .parts = dreamplug_partitions, + .nr_parts = ARRAY_SIZE(dreamplug_partitions), +}; + +static struct spi_board_info __initdata dreamplug_spi_slave_info[] = { + { + .modalias = "m25p80", + .platform_data = &dreamplug_spi_slave_data, + .irq = -1, + .max_speed_hz = 50000000, + .bus_num = 0, + .chip_select = 0, + }, +}; + +static struct mv643xx_eth_platform_data dreamplug_ge00_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(0), +}; + +static struct mv643xx_eth_platform_data dreamplug_ge01_data = { + .phy_addr = MV643XX_ETH_PHY_ADDR(1), +}; + +static struct mv_sata_platform_data dreamplug_sata_data = { + .n_ports = 1, +}; + +static struct mvsdio_platform_data dreamplug_mvsdio_data = { + /* unfortunately the CD signal has not been connected */ +}; + +static struct gpio_led dreamplug_led_pins[] = { + { + .name = "dreamplug:blue:bluetooth", + .gpio = 47, + .active_low = 1, + }, + { + .name = "dreamplug:green:wifi", + .gpio = 48, + .active_low = 1, + }, + { + .name = "dreamplug:green:wifi_ap", + .gpio = 49, + .active_low = 1, + }, +}; + +static struct gpio_led_platform_data dreamplug_led_data = { + .leds = dreamplug_led_pins, + .num_leds = ARRAY_SIZE(dreamplug_led_pins), +}; + +static struct platform_device dreamplug_leds = { + .name = "leds-gpio", + .id = -1, + .dev = { + .platform_data = &dreamplug_led_data, + } +}; + +static unsigned int dreamplug_mpp_config[] __initdata = { + MPP0_SPI_SCn, + MPP1_SPI_MOSI, + MPP2_SPI_SCK, + MPP3_SPI_MISO, + MPP47_GPIO, /* Bluetooth LED */ + MPP48_GPIO, /* Wifi LED */ + MPP49_GPIO, /* Wifi AP LED */ + 0 +}; + +void __init dreamplug_init(void) +{ + /* + * Basic setup. Needs to be called early. + */ + kirkwood_mpp_conf(dreamplug_mpp_config); + + spi_register_board_info(dreamplug_spi_slave_info, + ARRAY_SIZE(dreamplug_spi_slave_info)); + kirkwood_spi_init(); + + kirkwood_ehci_init(); + kirkwood_ge00_init(&dreamplug_ge00_data); + kirkwood_ge01_init(&dreamplug_ge01_data); + kirkwood_sata_init(&dreamplug_sata_data); + kirkwood_sdio_init(&dreamplug_mvsdio_data); + + platform_device_register(&dreamplug_leds); +} diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index fbe6405602ed..1c672d9e6656 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -3,7 +3,7 @@ * * arch/arm/mach-kirkwood/board-dt.c * - * Marvell DreamPlug Reference Board Setup + * Flattened Device Tree board initialization * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -12,150 +12,45 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/mtd/partitions.h> -#include <linux/ata_platform.h> -#include <linux/mv643xx_eth.h> #include <linux/of.h> -#include <linux/of_address.h> -#include <linux/of_fdt.h> -#include <linux/of_irq.h> #include <linux/of_platform.h> -#include <linux/gpio.h> -#include <linux/leds.h> -#include <linux/mtd/physmap.h> -#include <linux/spi/flash.h> -#include <linux/spi/spi.h> -#include <linux/spi/orion_spi.h> -#include <asm/mach-types.h> #include <asm/mach/arch.h> -#include <mach/kirkwood.h> -#include <plat/mvsdio.h> +#include <asm/mach/map.h> +#include <mach/bridge-regs.h> #include "common.h" -#include "mpp.h" static struct of_device_id kirkwood_dt_match_table[] __initdata = { { .compatible = "simple-bus", }, { } }; -struct mtd_partition dreamplug_partitions[] = { - { - .name = "u-boot", - .size = SZ_512K, - .offset = 0, - }, - { - .name = "u-boot env", - .size = SZ_64K, - .offset = SZ_512K + SZ_512K, - }, - { - .name = "dtb", - .size = SZ_64K, - .offset = SZ_512K + SZ_512K + SZ_512K, - }, -}; - -static const struct flash_platform_data dreamplug_spi_slave_data = { - .type = "mx25l1606e", - .name = "spi_flash", - .parts = dreamplug_partitions, - .nr_parts = ARRAY_SIZE(dreamplug_partitions), -}; - -static struct spi_board_info __initdata dreamplug_spi_slave_info[] = { - { - .modalias = "m25p80", - .platform_data = &dreamplug_spi_slave_data, - .irq = -1, - .max_speed_hz = 50000000, - .bus_num = 0, - .chip_select = 0, - }, -}; - -static struct mv643xx_eth_platform_data dreamplug_ge00_data = { - .phy_addr = MV643XX_ETH_PHY_ADDR(0), -}; - -static struct mv643xx_eth_platform_data dreamplug_ge01_data = { - .phy_addr = MV643XX_ETH_PHY_ADDR(1), -}; - -static struct mv_sata_platform_data dreamplug_sata_data = { - .n_ports = 1, -}; - -static struct mvsdio_platform_data dreamplug_mvsdio_data = { - /* unfortunately the CD signal has not been connected */ -}; - -static struct gpio_led dreamplug_led_pins[] = { - { - .name = "dreamplug:blue:bluetooth", - .gpio = 47, - .active_low = 1, - }, - { - .name = "dreamplug:green:wifi", - .gpio = 48, - .active_low = 1, - }, - { - .name = "dreamplug:green:wifi_ap", - .gpio = 49, - .active_low = 1, - }, -}; - -static struct gpio_led_platform_data dreamplug_led_data = { - .leds = dreamplug_led_pins, - .num_leds = ARRAY_SIZE(dreamplug_led_pins), -}; - -static struct platform_device dreamplug_leds = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &dreamplug_led_data, - } -}; - -static unsigned int dreamplug_mpp_config[] __initdata = { - MPP0_SPI_SCn, - MPP1_SPI_MOSI, - MPP2_SPI_SCK, - MPP3_SPI_MISO, - MPP47_GPIO, /* Bluetooth LED */ - MPP48_GPIO, /* Wifi LED */ - MPP49_GPIO, /* Wifi AP LED */ - 0 -}; - -static void __init dreamplug_init(void) +static void __init kirkwood_dt_init(void) { + pr_info("Kirkwood: %s, TCLK=%d.\n", kirkwood_id(), kirkwood_tclk); + /* - * Basic setup. Needs to be called early. + * Disable propagation of mbus errors to the CPU local bus, + * as this causes mbus errors (which can occur for example + * for PCI aborts) to throw CPU aborts, which we're not set + * up to deal with. */ - kirkwood_mpp_conf(dreamplug_mpp_config); + writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG); - spi_register_board_info(dreamplug_spi_slave_info, - ARRAY_SIZE(dreamplug_spi_slave_info)); - kirkwood_spi_init(); + kirkwood_setup_cpu_mbus(); - kirkwood_ehci_init(); - kirkwood_ge00_init(&dreamplug_ge00_data); - kirkwood_ge01_init(&dreamplug_ge01_data); - kirkwood_sata_init(&dreamplug_sata_data); - kirkwood_sdio_init(&dreamplug_mvsdio_data); +#ifdef CONFIG_CACHE_FEROCEON_L2 + kirkwood_l2_init(); +#endif - platform_device_register(&dreamplug_leds); -} + /* internal devices that every board has */ + kirkwood_wdt_init(); + kirkwood_xor0_init(); + kirkwood_xor1_init(); + kirkwood_crypto_init(); -static void __init kirkwood_dt_init(void) -{ - kirkwood_init(); +#ifdef CONFIG_KEXEC + kexec_reinit = kirkwood_enable_pcie; +#endif if (of_machine_is_compatible("globalscale,dreamplug")) dreamplug_init(); diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 77d4852e19f2..a02cae881f2f 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -279,7 +279,7 @@ void __init kirkwood_crypto_init(void) /***************************************************************************** * XOR0 ****************************************************************************/ -static void __init kirkwood_xor0_init(void) +void __init kirkwood_xor0_init(void) { kirkwood_clk_ctrl |= CGC_XOR0; @@ -291,7 +291,7 @@ static void __init kirkwood_xor0_init(void) /***************************************************************************** * XOR1 ****************************************************************************/ -static void __init kirkwood_xor1_init(void) +void __init kirkwood_xor1_init(void) { kirkwood_clk_ctrl |= CGC_XOR1; @@ -303,7 +303,7 @@ static void __init kirkwood_xor1_init(void) /***************************************************************************** * Watchdog ****************************************************************************/ -static void __init kirkwood_wdt_init(void) +void __init kirkwood_wdt_init(void) { orion_wdt_init(kirkwood_tclk); } @@ -392,7 +392,7 @@ void __init kirkwood_audio_init(void) /* * Identify device ID and revision. */ -static char * __init kirkwood_id(void) +char * __init kirkwood_id(void) { u32 dev, rev; @@ -435,7 +435,7 @@ static char * __init kirkwood_id(void) } } -static void __init kirkwood_l2_init(void) +void __init kirkwood_l2_init(void) { #ifdef CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH writel(readl(L2_CONFIG_REG) | L2_WRITETHROUGH, L2_CONFIG_REG); @@ -450,7 +450,6 @@ void __init kirkwood_init(void) { printk(KERN_INFO "Kirkwood: %s, TCLK=%d.\n", kirkwood_id(), kirkwood_tclk); - kirkwood_i2s_data.tclk = kirkwood_tclk; /* * Disable propagation of mbus errors to the CPU local bus, diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h index 9071a397136d..fa8e7689c436 100644 --- a/arch/arm/mach-kirkwood/common.h +++ b/arch/arm/mach-kirkwood/common.h @@ -51,6 +51,21 @@ void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev void kirkwood_audio_init(void); void kirkwood_restart(char, const char *); +/* board init functions for boards not fully converted to fdt */ +#ifdef CONFIG_MACH_DREAMPLUG_DT +void dreamplug_init(void); +#else +static inline void dreamplug_init(void) {}; +#endif + +/* early init functions not converted to fdt yet */ +char *kirkwood_id(void); +void kirkwood_l2_init(void); +void kirkwood_wdt_init(void); +void kirkwood_xor0_init(void); +void kirkwood_xor1_init(void); +void kirkwood_crypto_init(void); + extern int kirkwood_tclk; extern struct sys_timer kirkwood_timer; diff --git a/arch/arm/mach-ks8695/time.c b/arch/arm/mach-ks8695/time.c index 37dfcd5bd2ad..ec783a3070ae 100644 --- a/arch/arm/mach-ks8695/time.c +++ b/arch/arm/mach-ks8695/time.c @@ -27,6 +27,7 @@ #include <linux/io.h> #include <asm/mach/time.h> +#include <asm/system_misc.h> #include <mach/regs-timer.h> #include <mach/regs-irq.h> diff --git a/arch/arm/mach-lpc32xx/Kconfig b/arch/arm/mach-lpc32xx/Kconfig index fde663508696..75946ac89ee9 100644 --- a/arch/arm/mach-lpc32xx/Kconfig +++ b/arch/arm/mach-lpc32xx/Kconfig @@ -29,5 +29,30 @@ config ARCH_LPC32XX_UART6_SELECT endmenu +menu "LPC32XX chip components" + +config ARCH_LPC32XX_IRAM_FOR_NET + bool "Use IRAM for network buffers" + default y + help + Say Y here to use the LPC internal fast IRAM (i.e. 256KB SRAM) as + network buffer. If the total combined required buffer sizes is + larger than the size of IRAM, then SDRAM will be used instead. + + This can be enabled safely if the IRAM is not intended for other + uses. + +config ARCH_LPC32XX_MII_SUPPORT + bool "Check to enable MII support or leave disabled for RMII support" + help + Say Y here to enable MII support, or N for RMII support. Regardless of + which support is selected, the ethernet interface driver needs to be + selected in the device driver networking section. + + The PHY3250 reference board uses RMII, so users of this board should + say N. + +endmenu + endif diff --git a/arch/arm/mach-lpc32xx/clock.c b/arch/arm/mach-lpc32xx/clock.c index f55c772d1816..b7ef51119d37 100644 --- a/arch/arm/mach-lpc32xx/clock.c +++ b/arch/arm/mach-lpc32xx/clock.c @@ -87,6 +87,7 @@ #include <linux/list.h> #include <linux/errno.h> #include <linux/device.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/amba/bus.h> @@ -100,6 +101,8 @@ static DEFINE_SPINLOCK(global_clkregs_lock); +static int usb_pll_enable, usb_pll_valid; + static struct clk clk_armpll; static struct clk clk_usbpll; @@ -384,30 +387,62 @@ static u32 local_clk_usbpll_setup(struct clk_pll_setup *pHCLKPllSetup) static int local_usbpll_enable(struct clk *clk, int enable) { u32 reg; - int ret = -ENODEV; - unsigned long timeout = jiffies + msecs_to_jiffies(10); + int ret = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(20); reg = __raw_readl(LPC32XX_CLKPWR_USB_CTRL); - if (enable == 0) { - reg &= ~(LPC32XX_CLKPWR_USBCTRL_CLK_EN1 | - LPC32XX_CLKPWR_USBCTRL_CLK_EN2); - __raw_writel(reg, LPC32XX_CLKPWR_USB_CTRL); - } else if (reg & LPC32XX_CLKPWR_USBCTRL_PLL_PWRUP) { + __raw_writel(reg & ~(LPC32XX_CLKPWR_USBCTRL_CLK_EN2 | + LPC32XX_CLKPWR_USBCTRL_PLL_PWRUP), + LPC32XX_CLKPWR_USB_CTRL); + __raw_writel(reg & ~LPC32XX_CLKPWR_USBCTRL_CLK_EN1, + LPC32XX_CLKPWR_USB_CTRL); + + if (enable && usb_pll_valid && usb_pll_enable) { + ret = -ENODEV; + /* + * If the PLL rate has been previously set, then the rate + * in the PLL register is valid and can be enabled here. + * Otherwise, it needs to be enabled as part of setrate. + */ + + /* + * Gate clock into PLL + */ reg |= LPC32XX_CLKPWR_USBCTRL_CLK_EN1; __raw_writel(reg, LPC32XX_CLKPWR_USB_CTRL); - /* Wait for PLL lock */ + /* + * Enable PLL + */ + reg |= LPC32XX_CLKPWR_USBCTRL_PLL_PWRUP; + __raw_writel(reg, LPC32XX_CLKPWR_USB_CTRL); + + /* + * Wait for PLL to lock + */ while (time_before(jiffies, timeout) && (ret == -ENODEV)) { reg = __raw_readl(LPC32XX_CLKPWR_USB_CTRL); if (reg & LPC32XX_CLKPWR_USBCTRL_PLL_STS) ret = 0; + else + udelay(10); } + /* + * Gate clock from PLL if PLL is locked + */ if (ret == 0) { - reg |= LPC32XX_CLKPWR_USBCTRL_CLK_EN2; - __raw_writel(reg, LPC32XX_CLKPWR_USB_CTRL); + __raw_writel(reg | LPC32XX_CLKPWR_USBCTRL_CLK_EN2, + LPC32XX_CLKPWR_USB_CTRL); + } else { + __raw_writel(reg & ~(LPC32XX_CLKPWR_USBCTRL_CLK_EN1 | + LPC32XX_CLKPWR_USBCTRL_PLL_PWRUP), + LPC32XX_CLKPWR_USB_CTRL); } + } else if ((enable == 0) && usb_pll_valid && usb_pll_enable) { + usb_pll_valid = 0; + usb_pll_enable = 0; } return ret; @@ -425,7 +460,7 @@ static unsigned long local_usbpll_round_rate(struct clk *clk, */ rate = rate * 1000; - clkin = clk->parent->rate; + clkin = clk->get_rate(clk); usbdiv = (__raw_readl(LPC32XX_CLKPWR_USBCLK_PDIV) & LPC32XX_CLKPWR_USBPDIV_PLL_MASK) + 1; clkin = clkin / usbdiv; @@ -439,7 +474,8 @@ static unsigned long local_usbpll_round_rate(struct clk *clk, static int local_usbpll_set_rate(struct clk *clk, unsigned long rate) { - u32 clkin, reg, usbdiv; + int ret = -ENODEV; + u32 clkin, usbdiv; struct clk_pll_setup pllsetup; /* @@ -448,7 +484,7 @@ static int local_usbpll_set_rate(struct clk *clk, unsigned long rate) */ rate = rate * 1000; - clkin = clk->get_rate(clk); + clkin = clk->get_rate(clk->parent); usbdiv = (__raw_readl(LPC32XX_CLKPWR_USBCLK_PDIV) & LPC32XX_CLKPWR_USBPDIV_PLL_MASK) + 1; clkin = clkin / usbdiv; @@ -457,22 +493,25 @@ static int local_usbpll_set_rate(struct clk *clk, unsigned long rate) if (local_clk_find_pll_cfg(clkin, rate, &pllsetup) == 0) return -EINVAL; + /* + * Disable PLL clocks during PLL change + */ local_usbpll_enable(clk, 0); - - reg = __raw_readl(LPC32XX_CLKPWR_USB_CTRL); - reg |= LPC32XX_CLKPWR_USBCTRL_CLK_EN1; - __raw_writel(reg, LPC32XX_CLKPWR_USB_CTRL); - - pllsetup.analog_on = 1; + pllsetup.analog_on = 0; local_clk_usbpll_setup(&pllsetup); - clk->rate = clk_check_pll_setup(clkin, &pllsetup); + /* + * Start USB PLL and check PLL status + */ + + usb_pll_valid = 1; + usb_pll_enable = 1; - reg = __raw_readl(LPC32XX_CLKPWR_USB_CTRL); - reg |= LPC32XX_CLKPWR_USBCTRL_CLK_EN2; - __raw_writel(reg, LPC32XX_CLKPWR_USB_CTRL); + ret = local_usbpll_enable(clk, 1); + if (ret >= 0) + clk->rate = clk_check_pll_setup(clkin, &pllsetup); - return 0; + return ret; } static struct clk clk_usbpll = { diff --git a/arch/arm/mach-lpc32xx/common.c b/arch/arm/mach-lpc32xx/common.c index 6c76bb36559b..bbbf063a74c2 100644 --- a/arch/arm/mach-lpc32xx/common.c +++ b/arch/arm/mach-lpc32xx/common.c @@ -160,6 +160,53 @@ struct platform_device lpc32xx_adc_device = { }; /* + * USB support + */ +/* The dmamask must be set for OHCI to work */ +static u64 ohci_dmamask = ~(u32) 0; +static struct resource ohci_resources[] = { + { + .start = IO_ADDRESS(LPC32XX_USB_BASE), + .end = IO_ADDRESS(LPC32XX_USB_BASE + 0x100 - 1), + .flags = IORESOURCE_MEM, + }, { + .start = IRQ_LPC32XX_USB_HOST, + .flags = IORESOURCE_IRQ, + }, +}; +struct platform_device lpc32xx_ohci_device = { + .name = "usb-ohci", + .id = -1, + .dev = { + .dma_mask = &ohci_dmamask, + .coherent_dma_mask = 0xFFFFFFFF, + }, + .num_resources = ARRAY_SIZE(ohci_resources), + .resource = ohci_resources, +}; + +/* + * Network Support + */ +static struct resource net_resources[] = { + [0] = DEFINE_RES_MEM(LPC32XX_ETHERNET_BASE, SZ_4K), + [1] = DEFINE_RES_MEM(LPC32XX_IRAM_BASE, SZ_128K), + [2] = DEFINE_RES_IRQ(IRQ_LPC32XX_ETHERNET), +}; + +static u64 lpc32xx_mac_dma_mask = 0xffffffffUL; +struct platform_device lpc32xx_net_device = { + .name = "lpc-eth", + .id = 0, + .dev = { + .dma_mask = &lpc32xx_mac_dma_mask, + .coherent_dma_mask = 0xffffffffUL, + }, + .num_resources = ARRAY_SIZE(net_resources), + .resource = net_resources, +}; + +/* * Returns the unique ID for the device */ void lpc32xx_get_uid(u32 devid[4]) diff --git a/arch/arm/mach-lpc32xx/common.h b/arch/arm/mach-lpc32xx/common.h index 68f2e46d98ad..68e45e8c9486 100644 --- a/arch/arm/mach-lpc32xx/common.h +++ b/arch/arm/mach-lpc32xx/common.h @@ -19,6 +19,7 @@ #ifndef __LPC32XX_COMMON_H #define __LPC32XX_COMMON_H +#include <mach/board.h> #include <linux/platform_device.h> /* @@ -31,6 +32,8 @@ extern struct platform_device lpc32xx_i2c2_device; extern struct platform_device lpc32xx_tsc_device; extern struct platform_device lpc32xx_adc_device; extern struct platform_device lpc32xx_rtc_device; +extern struct platform_device lpc32xx_ohci_device; +extern struct platform_device lpc32xx_net_device; /* * Other arch specific structures and functions @@ -67,7 +70,6 @@ extern u32 clk_get_pclk_div(void); extern void lpc32xx_get_uid(u32 devid[4]); extern u32 lpc32xx_return_iram_size(void); - /* * Pointers used for sizing and copying suspend function data */ diff --git a/arch/arm/mach-lpc32xx/include/mach/board.h b/arch/arm/mach-lpc32xx/include/mach/board.h new file mode 100644 index 000000000000..52531ca7bd1d --- /dev/null +++ b/arch/arm/mach-lpc32xx/include/mach/board.h @@ -0,0 +1,24 @@ +/* + * arm/arch/mach-lpc32xx/include/mach/board.h + * + * Author: Kevin Wells <kevin.wells@nxp.com> + * + * Copyright (C) 2010 NXP Semiconductors + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARCH_BOARD_H +#define __ASM_ARCH_BOARD_H + +extern u32 lpc32xx_return_iram_size(void); + +#endif /* __ASM_ARCH_BOARD_H */ diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c index c74de01ab5b6..d080cb1123dd 100644 --- a/arch/arm/mach-lpc32xx/irq.c +++ b/arch/arm/mach-lpc32xx/irq.c @@ -150,6 +150,10 @@ static const struct lpc32xx_event_info lpc32xx_events[NR_IRQS] = { .event_group = &lpc32xx_event_int_regs, .mask = LPC32XX_CLKPWR_INTSRC_KEY_BIT, }, + [IRQ_LPC32XX_ETHERNET] = { + .event_group = &lpc32xx_event_int_regs, + .mask = LPC32XX_CLKPWR_INTSRC_MAC_BIT, + }, [IRQ_LPC32XX_USB_OTG_ATX] = { .event_group = &lpc32xx_event_int_regs, .mask = LPC32XX_CLKPWR_INTSRC_USBATXINT_BIT, diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index 0d79a3f8a5e0..7f7401ec7487 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -37,6 +37,7 @@ #include <mach/hardware.h> #include <mach/platform.h> +#include <mach/board.h> #include <mach/gpio-lpc32xx.h> #include "common.h" @@ -255,6 +256,8 @@ static struct platform_device *phy3250_devs[] __initdata = { &lpc32xx_watchdog_device, &lpc32xx_gpio_led_device, &lpc32xx_adc_device, + &lpc32xx_ohci_device, + &lpc32xx_net_device, }; static struct amba_device *amba_devs[] __initdata = { diff --git a/arch/arm/mach-mmp/common.c b/arch/arm/mach-mmp/common.c index 062b5b93c50e..9292b7966e3b 100644 --- a/arch/arm/mach-mmp/common.c +++ b/arch/arm/mach-mmp/common.c @@ -14,6 +14,7 @@ #include <asm/page.h> #include <asm/mach/map.h> +#include <asm/system_misc.h> #include <mach/addr-map.h> #include <mach/cputype.h> diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index f7d59c03fc67..b24d2c32cba9 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <asm/mach/time.h> +#include <asm/system_misc.h> #include <mach/addr-map.h> #include <mach/cputype.h> #include <mach/regs-apbc.h> diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c index 97b8191d9d38..4a8ea0d40b6f 100644 --- a/arch/arm/mach-msm/board-sapphire.c +++ b/arch/arm/mach-msm/board-sapphire.c @@ -27,7 +27,6 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> -#include <asm/system.h> #include <mach/system.h> #include <mach/vreg.h> #include <mach/board.h> diff --git a/arch/arm/mach-mxs/system.c b/arch/arm/mach-mxs/system.c index 7aa5ac5d78bf..80ac1fca8a00 100644 --- a/arch/arm/mach-mxs/system.c +++ b/arch/arm/mach-mxs/system.c @@ -25,7 +25,7 @@ #include <linux/module.h> #include <asm/proc-fns.h> -#include <asm/system.h> +#include <asm/system_misc.h> #include <mach/mxs.h> #include <mach/common.h> diff --git a/arch/arm/mach-omap1/id.c b/arch/arm/mach-omap1/id.c index f24c1e2c5044..2b28e1da14b0 100644 --- a/arch/arm/mach-omap1/id.c +++ b/arch/arm/mach-omap1/id.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> +#include <asm/system_info.h> #include <plat/cpu.h> diff --git a/arch/arm/mach-omap1/leds-h2p2-debug.c b/arch/arm/mach-omap1/leds-h2p2-debug.c index 4b818eb9f911..f6b14a14a957 100644 --- a/arch/arm/mach-omap1/leds-h2p2-debug.c +++ b/arch/arm/mach-omap1/leds-h2p2-debug.c @@ -17,7 +17,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include <asm/mach-types.h> #include <plat/fpga.h> diff --git a/arch/arm/mach-omap1/leds-innovator.c b/arch/arm/mach-omap1/leds-innovator.c index 9b99c2894623..3a066ee8d02c 100644 --- a/arch/arm/mach-omap1/leds-innovator.c +++ b/arch/arm/mach-omap1/leds-innovator.c @@ -5,7 +5,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include "leds.h" diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c index da09f4364979..936ed426b84f 100644 --- a/arch/arm/mach-omap1/leds-osk.c +++ b/arch/arm/mach-omap1/leds-osk.c @@ -8,7 +8,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include "leds.h" diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c index 5fdef7a34828..087dba0df47e 100644 --- a/arch/arm/mach-omap1/mux.c +++ b/arch/arm/mach-omap1/mux.c @@ -27,7 +27,6 @@ #include <linux/io.h> #include <linux/spinlock.h> -#include <asm/system.h> #include <plat/mux.h> diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 2fae6a2740f1..4d8dd9a1b04c 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -44,7 +44,6 @@ #include <linux/clockchips.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/leds.h> #include <asm/irq.h> #include <asm/sched_clock.h> diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c index a2e6d0709df2..325b9a0aa4a0 100644 --- a/arch/arm/mach-omap1/timer32k.c +++ b/arch/arm/mach-omap1/timer32k.c @@ -46,7 +46,6 @@ #include <linux/clockchips.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/leds.h> #include <asm/irq.h> #include <asm/mach/irq.h> diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 30768c2f53fd..37dcb1bc025e 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -490,21 +490,22 @@ static struct platform_device omap_vwlan_device = { static int omap4_twl6030_hsmmc_late_init(struct device *dev) { - int ret = 0; + int irq = 0; struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct omap_mmc_platform_data *pdata = dev->platform_data; /* Setting MMC1 Card detect Irq */ if (pdev->id == 0) { - ret = twl6030_mmc_card_detect_config(); - if (ret) + irq = twl6030_mmc_card_detect_config(); + if (irq < 0) { pr_err("Failed configuring MMC1 card detect\n"); - pdata->slots[0].card_detect_irq = TWL6030_IRQ_BASE + - MMCDETECT_INTR_OFFSET; + return irq; + } + pdata->slots[0].card_detect_irq = irq; pdata->slots[0].card_detect = twl6030_mmc_card_detect; } - return ret; + return 0; } static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev) diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c index 8842e04aef01..ae2251fa4a69 100644 --- a/arch/arm/mach-omap2/board-omap3touchbook.c +++ b/arch/arm/mach-omap2/board-omap3touchbook.c @@ -42,6 +42,7 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> +#include <asm/system_info.h> #include <plat/board.h> #include "common.h" diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index e9071a57c37b..8bf8e99c358e 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -238,7 +238,7 @@ struct wl12xx_platform_data omap_panda_wlan_data __initdata = { static int omap4_twl6030_hsmmc_late_init(struct device *dev) { - int ret = 0; + int irq = 0; struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct omap_mmc_platform_data *pdata = dev->platform_data; @@ -249,14 +249,15 @@ static int omap4_twl6030_hsmmc_late_init(struct device *dev) } /* Setting MMC1 Card detect Irq */ if (pdev->id == 0) { - ret = twl6030_mmc_card_detect_config(); - if (ret) + irq = twl6030_mmc_card_detect_config(); + if (irq < 0) { dev_err(dev, "%s: Error card detect config(%d)\n", - __func__, ret); - else - pdata->slots[0].card_detect = twl6030_mmc_card_detect; + __func__, irq); + return irq; + } + pdata->slots[0].card_detect = twl6030_mmc_card_detect; } - return ret; + return 0; } static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev) diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index 16aebfb8a7ec..f120997309af 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -25,6 +25,7 @@ #include <linux/gpio_keys.h> #include <linux/mmc/host.h> #include <linux/power/isp1704_charger.h> +#include <asm/system_info.h> #include <plat/mcspi.h> #include <plat/board.h> diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index f26b2faa1694..65c33911341f 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -35,7 +35,6 @@ #include <linux/irq.h> #include <linux/interrupt.h> -#include <asm/system.h> #include <plat/omap_hwmod.h> diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 63ab686834c1..13670aa84e58 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -46,7 +46,6 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/smp_scu.h> -#include <asm/system.h> #include <asm/pgalloc.h> #include <asm/suspend.h> #include <asm/hardware/cache-l2x0.h> diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c index 5ca45ca76946..95442b69ae27 100644 --- a/arch/arm/mach-omap2/pm24xx.c +++ b/arch/arm/mach-omap2/pm24xx.c @@ -33,6 +33,7 @@ #include <asm/mach/time.h> #include <asm/mach/irq.h> #include <asm/mach-types.h> +#include <asm/system_misc.h> #include <plat/clock.h> #include <plat/sram.h> diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 027a537d72b2..238defc6f6df 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -31,6 +31,7 @@ #include <trace/events/power.h> #include <asm/suspend.h> +#include <asm/system_misc.h> #include <plat/sram.h> #include "clockdomain.h" diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 91e0b1c9b76c..9ccaadc2cf07 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -16,6 +16,7 @@ #include <linux/list.h> #include <linux/err.h> #include <linux/slab.h> +#include <asm/system_misc.h> #include "common.h" #include "clockdomain.h" diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index abd283400490..9f6b83d1b193 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -10,7 +10,6 @@ */ #include <linux/linkage.h> -#include <asm/system.h> #include <asm/smp_scu.h> #include <asm/memory.h> #include <asm/hardware/cache-l2x0.h> diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 5dad38ec00ea..24481666d2cd 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -21,6 +21,7 @@ #include <net/dsa.h> #include <asm/page.h> #include <asm/setup.h> +#include <asm/system_misc.h> #include <asm/timex.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c index 91b0f4788597..c3ed15b8ea25 100644 --- a/arch/arm/mach-orion5x/dns323-setup.c +++ b/arch/arm/mach-orion5x/dns323-setup.c @@ -32,6 +32,7 @@ #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> +#include <asm/system_info.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c index 527213169db0..0c9e413b5805 100644 --- a/arch/arm/mach-orion5x/ls-chl-setup.c +++ b/arch/arm/mach-orion5x/ls-chl-setup.c @@ -22,7 +22,6 @@ #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> -#include <asm/system.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" diff --git a/arch/arm/mach-orion5x/ls_hgl-setup.c b/arch/arm/mach-orion5x/ls_hgl-setup.c index 9a8697b97dd7..c1b5d8a58037 100644 --- a/arch/arm/mach-orion5x/ls_hgl-setup.c +++ b/arch/arm/mach-orion5x/ls_hgl-setup.c @@ -21,7 +21,6 @@ #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> -#include <asm/system.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" diff --git a/arch/arm/mach-orion5x/lsmini-setup.c b/arch/arm/mach-orion5x/lsmini-setup.c index 09c73659f467..949eaa8f12e3 100644 --- a/arch/arm/mach-orion5x/lsmini-setup.c +++ b/arch/arm/mach-orion5x/lsmini-setup.c @@ -21,7 +21,6 @@ #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> -#include <asm/system.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" diff --git a/arch/arm/mach-pnx4008/core.c b/arch/arm/mach-pnx4008/core.c index 4cfb40b2ec19..be4c92858509 100644 --- a/arch/arm/mach-pnx4008/core.c +++ b/arch/arm/mach-pnx4008/core.c @@ -32,7 +32,7 @@ #include <asm/mach-types.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> +#include <asm/system_misc.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-pnx4008/dma.c b/arch/arm/mach-pnx4008/dma.c index 7fa4bf2e2125..a4739e9fb2fb 100644 --- a/arch/arm/mach-pnx4008/dma.c +++ b/arch/arm/mach-pnx4008/dma.c @@ -24,7 +24,6 @@ #include <linux/io.h> #include <linux/gfp.h> -#include <asm/system.h> #include <mach/hardware.h> #include <mach/dma.h> #include <asm/dma-mapping.h> diff --git a/arch/arm/mach-pnx4008/irq.c b/arch/arm/mach-pnx4008/irq.c index 7608c7a288cf..41e4201972d5 100644 --- a/arch/arm/mach-pnx4008/irq.c +++ b/arch/arm/mach-pnx4008/irq.c @@ -28,7 +28,6 @@ #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-pnx4008/time.c b/arch/arm/mach-pnx4008/time.c index 0c8aad4bb0dc..0cfe8af3d3be 100644 --- a/arch/arm/mach-pnx4008/time.c +++ b/arch/arm/mach-pnx4008/time.c @@ -24,7 +24,6 @@ #include <linux/irq.h> #include <linux/io.h> -#include <asm/system.h> #include <mach/hardware.h> #include <asm/leds.h> #include <asm/mach/time.h> diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index 4b981b82d2a5..895ee8c45009 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c @@ -44,6 +44,7 @@ #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/setup.h> +#include <asm/system_info.h> #include <mach/pxa300.h> #include <mach/pxa27x-udc.h> diff --git a/arch/arm/mach-pxa/colibri-pxa3xx.c b/arch/arm/mach-pxa/colibri-pxa3xx.c index 2b8ca0de8a3d..68cc75fac219 100644 --- a/arch/arm/mach-pxa/colibri-pxa3xx.c +++ b/arch/arm/mach-pxa/colibri-pxa3xx.c @@ -18,6 +18,7 @@ #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/sizes.h> +#include <asm/system_info.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <mach/pxa3xx-regs.h> diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index 11f1e735966e..de9d45e673fd 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -40,7 +40,6 @@ #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index 5432ecb15def..42254175fcf4 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c @@ -22,7 +22,6 @@ #include <linux/init.h> #include <mach/hardware.h> -#include <asm/system.h> #include <asm/mach/map.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-pxa/leds-idp.c b/arch/arm/mach-pxa/leds-idp.c index 8b9c17142d5a..06b060025d11 100644 --- a/arch/arm/mach-pxa/leds-idp.c +++ b/arch/arm/mach-pxa/leds-idp.c @@ -16,7 +16,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include <mach/pxa25x.h> #include <mach/idp.h> diff --git a/arch/arm/mach-pxa/leds-lubbock.c b/arch/arm/mach-pxa/leds-lubbock.c index e26d5efe1969..0bd85c884a7c 100644 --- a/arch/arm/mach-pxa/leds-lubbock.c +++ b/arch/arm/mach-pxa/leds-lubbock.c @@ -15,7 +15,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include <mach/pxa25x.h> #include <mach/lubbock.h> diff --git a/arch/arm/mach-pxa/leds-mainstone.c b/arch/arm/mach-pxa/leds-mainstone.c index db4af5eee8b2..4058ab340fe6 100644 --- a/arch/arm/mach-pxa/leds-mainstone.c +++ b/arch/arm/mach-pxa/leds-mainstone.c @@ -14,7 +14,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include <mach/pxa27x.h> #include <mach/mainstone.h> diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 5e26f3e93fdd..6f4785b347c2 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -34,6 +34,7 @@ #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> +#include <asm/system_info.h> #include <mach/pxa27x.h> #include <mach/magician.h> diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index 744baee12c0c..89d98c832189 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c @@ -34,7 +34,6 @@ #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index c8497b00cdfe..b4528899ef08 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c @@ -9,6 +9,7 @@ #include <linux/gpio.h> #include <linux/io.h> #include <asm/proc-fns.h> +#include <asm/system_misc.h> #include <mach/regs-ost.h> #include <mach/reset.h> diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 023d6ca789de..7a3d342a7732 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c @@ -57,6 +57,7 @@ #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/sizes.h> +#include <asm/system_info.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index a4dd1c347050..af3d4f7646d7 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -32,6 +32,7 @@ #include <asm/mach-types.h> #include <asm/suspend.h> +#include <asm/system_info.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index acd329afc3ac..45868bb43cbd 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -33,7 +33,6 @@ #include <linux/clkdev.h> #include <linux/mtd/physmap.h> -#include <asm/system.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/leds.h> diff --git a/arch/arm/mach-realview/hotplug.c b/arch/arm/mach-realview/hotplug.c index eb55f05bef3a..57d9efba2956 100644 --- a/arch/arm/mach-realview/hotplug.c +++ b/arch/arm/mach-realview/hotplug.c @@ -13,6 +13,7 @@ #include <linux/smp.h> #include <asm/cacheflush.h> +#include <asm/cp15.h> #include <asm/smp_plat.h> extern volatile int pen_release; diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c index 731552d68adf..f3fa259ce01f 100644 --- a/arch/arm/mach-rpc/riscpc.c +++ b/arch/arm/mach-rpc/riscpc.c @@ -28,6 +28,7 @@ #include <asm/page.h> #include <asm/domain.h> #include <asm/setup.h> +#include <asm/system_misc.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mach-s3c24xx/s3c2410.c b/arch/arm/mach-s3c24xx/s3c2410.c index 061b6bb1a557..a3c5cb086ee2 100644 --- a/arch/arm/mach-s3c24xx/s3c2410.c +++ b/arch/arm/mach-s3c24xx/s3c2410.c @@ -30,6 +30,7 @@ #include <mach/hardware.h> #include <asm/irq.h> +#include <asm/system_misc.h> #include <plat/cpu-freq.h> diff --git a/arch/arm/mach-s3c24xx/s3c2412.c b/arch/arm/mach-s3c24xx/s3c2412.c index c6eac9871093..d4bc7f960bbb 100644 --- a/arch/arm/mach-s3c24xx/s3c2412.c +++ b/arch/arm/mach-s3c24xx/s3c2412.c @@ -31,6 +31,7 @@ #include <mach/hardware.h> #include <asm/proc-fns.h> #include <asm/irq.h> +#include <asm/system_misc.h> #include <plat/cpu-freq.h> diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c index 0e9a71c90ed7..7743fade50df 100644 --- a/arch/arm/mach-s3c24xx/s3c2416.c +++ b/arch/arm/mach-s3c24xx/s3c2416.c @@ -43,6 +43,7 @@ #include <mach/hardware.h> #include <asm/proc-fns.h> #include <asm/irq.h> +#include <asm/system_misc.h> #include <mach/regs-s3c2443-clock.h> diff --git a/arch/arm/mach-s3c24xx/s3c2443.c b/arch/arm/mach-s3c24xx/s3c2443.c index b7778a9dafaf..ab648ad8fa50 100644 --- a/arch/arm/mach-s3c24xx/s3c2443.c +++ b/arch/arm/mach-s3c24xx/s3c2443.c @@ -29,6 +29,7 @@ #include <mach/hardware.h> #include <asm/irq.h> +#include <asm/system_misc.h> #include <mach/regs-s3c2443-clock.h> diff --git a/arch/arm/mach-s3c24xx/s3c244x.c b/arch/arm/mach-s3c24xx/s3c244x.c index d15852f642b7..6f74118f60c6 100644 --- a/arch/arm/mach-s3c24xx/s3c244x.c +++ b/arch/arm/mach-s3c24xx/s3c244x.c @@ -23,6 +23,7 @@ #include <linux/clk.h> #include <linux/io.h> +#include <asm/system_misc.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index bee7dcd4df7c..b313380342a5 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c @@ -29,6 +29,7 @@ #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/hardware/vic.h> +#include <asm/system_misc.h> #include <mach/map.h> #include <mach/hardware.h> diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c index b6a67728cc88..0ace108c3e3d 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c @@ -17,6 +17,8 @@ #include <linux/mfd/wm831x/gpio.h> #include <linux/mfd/wm8994/pdata.h> +#include <linux/regulator/machine.h> + #include <sound/wm5100.h> #include <sound/wm8996.h> #include <sound/wm8962.h> @@ -153,6 +155,14 @@ static const struct i2c_board_info wm1259_devs[] = { }, }; +static struct regulator_init_data wm8994_ldo1 = { + .supply_regulator = "WALLVDD", +}; + +static struct regulator_init_data wm8994_ldo2 = { + .supply_regulator = "WALLVDD", +}; + static struct wm8994_pdata wm8994_pdata = { .gpio_base = CODEC_GPIO_BASE, .gpio_defaults = { @@ -160,8 +170,8 @@ static struct wm8994_pdata wm8994_pdata = { }, .irq_base = CODEC_IRQ_BASE, .ldo = { - { .supply = "WALLVDD" }, - { .supply = "WALLVDD" }, + { .init_data = &wm8994_ldo1, }, + { .init_data = &wm8994_ldo2, }, }, }; diff --git a/arch/arm/mach-s5p64x0/common.c b/arch/arm/mach-s5p64x0/common.c index 9143f8b19962..6e6a0a9d6778 100644 --- a/arch/arm/mach-s5p64x0/common.c +++ b/arch/arm/mach-s5p64x0/common.c @@ -27,6 +27,7 @@ #include <asm/irq.h> #include <asm/proc-fns.h> +#include <asm/system_misc.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> diff --git a/arch/arm/mach-s5pc100/common.c b/arch/arm/mach-s5pc100/common.c index ff71e2d467c6..621908658861 100644 --- a/arch/arm/mach-s5pc100/common.c +++ b/arch/arm/mach-s5pc100/common.c @@ -27,6 +27,7 @@ #include <asm/irq.h> #include <asm/proc-fns.h> +#include <asm/system_misc.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 1d0f71b17a26..7c524b4e415d 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -23,10 +23,10 @@ #include <video/sa1100fb.h> #include <asm/div64.h> -#include <asm/system.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> #include <asm/irq.h> +#include <asm/system_misc.h> #include <mach/hardware.h> #include <mach/irqs.h> diff --git a/arch/arm/mach-sa1100/leds-assabet.c b/arch/arm/mach-sa1100/leds-assabet.c index 64e9b4b11b54..3699176bca94 100644 --- a/arch/arm/mach-sa1100/leds-assabet.c +++ b/arch/arm/mach-sa1100/leds-assabet.c @@ -13,7 +13,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include <mach/assabet.h> #include "leds.h" diff --git a/arch/arm/mach-sa1100/leds-badge4.c b/arch/arm/mach-sa1100/leds-badge4.c index cf1e38458b81..f99fac3eedb6 100644 --- a/arch/arm/mach-sa1100/leds-badge4.c +++ b/arch/arm/mach-sa1100/leds-badge4.c @@ -14,7 +14,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include "leds.h" diff --git a/arch/arm/mach-sa1100/leds-cerf.c b/arch/arm/mach-sa1100/leds-cerf.c index 259b48e0be89..040540fb7d8a 100644 --- a/arch/arm/mach-sa1100/leds-cerf.c +++ b/arch/arm/mach-sa1100/leds-cerf.c @@ -7,7 +7,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include "leds.h" diff --git a/arch/arm/mach-sa1100/leds-hackkit.c b/arch/arm/mach-sa1100/leds-hackkit.c index 2bce137462e4..6a2352436e62 100644 --- a/arch/arm/mach-sa1100/leds-hackkit.c +++ b/arch/arm/mach-sa1100/leds-hackkit.c @@ -13,7 +13,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include "leds.h" diff --git a/arch/arm/mach-sa1100/leds-lart.c b/arch/arm/mach-sa1100/leds-lart.c index 0505a1fdcdb2..a51830c60e53 100644 --- a/arch/arm/mach-sa1100/leds-lart.c +++ b/arch/arm/mach-sa1100/leds-lart.c @@ -13,7 +13,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include "leds.h" diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index bf85b8b259d5..2fa499ec6afe 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c @@ -30,7 +30,6 @@ #include <mach/hardware.h> #include <asm/memory.h> #include <asm/suspend.h> -#include <asm/system.h> #include <asm/mach/time.h> extern int sa1100_finish_suspend(unsigned long); diff --git a/arch/arm/mach-shark/leds.c b/arch/arm/mach-shark/leds.c index ccd49189bbd0..25609076921f 100644 --- a/arch/arm/mach-shark/leds.c +++ b/arch/arm/mach-shark/leds.c @@ -23,7 +23,6 @@ #include <linux/io.h> #include <asm/leds.h> -#include <asm/system.h> #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 262f8def5577..b56dde2732bb 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -1186,6 +1186,7 @@ static struct i2c_board_info i2c1_devices[] = { }, }; + #define GPIO_PORT9CR 0xE6051009 #define GPIO_PORT10CR 0xE605100A #define USCCR1 0xE6058144 diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index bd4253ba05b6..ca609502d6cd 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1327,15 +1327,6 @@ static struct i2c_board_info i2c1_devices[] = { }, }; -static void __init mackerel_map_io(void) -{ - sh7372_map_io(); - /* DMA memory at 0xff200000 - 0xffdfffff. The default 2MB size isn't - * enough to allocate the frame buffer memory. - */ - init_consistent_dma_size(12 << 20); -} - #define GPIO_PORT9CR 0xE6051009 #define GPIO_PORT10CR 0xE605100A #define GPIO_PORT167CR 0xE60520A7 @@ -1555,7 +1546,7 @@ static void __init mackerel_init(void) } MACHINE_START(MACKEREL, "mackerel") - .map_io = mackerel_map_io, + .map_io = sh7372_map_io, .init_early = sh7372_add_early_devices, .init_irq = sh7372_init_irq, .handle_irq = shmobile_handle_irq_intc, diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c index 1b2334277e85..21b09b6455e4 100644 --- a/arch/arm/mach-shmobile/cpuidle.c +++ b/arch/arm/mach-shmobile/cpuidle.c @@ -13,7 +13,6 @@ #include <linux/suspend.h> #include <linux/module.h> #include <linux/err.h> -#include <asm/system.h> #include <asm/io.h> static void shmobile_enter_wfi(void) diff --git a/arch/arm/mach-shmobile/include/mach/system.h b/arch/arm/mach-shmobile/include/mach/system.h index 3bbcb3fa0775..540eaff08f34 100644 --- a/arch/arm/mach-shmobile/include/mach/system.h +++ b/arch/arm/mach-shmobile/include/mach/system.h @@ -1,6 +1,8 @@ #ifndef __ASM_ARCH_SYSTEM_H #define __ASM_ARCH_SYSTEM_H +#include <asm/system_misc.h> + static inline void arch_reset(char mode, const char *cmd) { soft_restart(0); diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c index c38ba7b43ef8..a18a4ae16d2b 100644 --- a/arch/arm/mach-shmobile/pm-r8a7779.c +++ b/arch/arm/mach-shmobile/pm-r8a7779.c @@ -18,7 +18,6 @@ #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/console.h> -#include <asm/system.h> #include <asm/io.h> #include <mach/common.h> #include <mach/r8a7779.h> diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index fcf8b1761aef..a3bdb12acde9 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -21,7 +21,6 @@ #include <linux/irq.h> #include <linux/bitrev.h> #include <linux/console.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/suspend.h> diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 5375325d7ca7..4e818b7de781 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -31,6 +31,7 @@ #include <linux/sh_intc.h> #include <linux/sh_timer.h> #include <linux/pm_domain.h> +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/sh7372.h> #include <mach/common.h> @@ -54,6 +55,12 @@ static struct map_desc sh7372_io_desc[] __initdata = { void __init sh7372_map_io(void) { iotable_init(sh7372_io_desc, ARRAY_SIZE(sh7372_io_desc)); + + /* + * DMA memory at 0xff200000 - 0xffdfffff. The default 2MB size isn't + * enough to allocate the frame buffer memory. + */ + init_consistent_dma_size(12 << 20); } /* SCIFA0 */ diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c index c1febe13f709..4d1b86a49923 100644 --- a/arch/arm/mach-shmobile/suspend.c +++ b/arch/arm/mach-shmobile/suspend.c @@ -12,8 +12,8 @@ #include <linux/suspend.h> #include <linux/module.h> #include <linux/err.h> -#include <asm/system.h> #include <asm/io.h> +#include <asm/system_misc.h> static int shmobile_suspend_default_enter(suspend_state_t suspend_state) { diff --git a/arch/arm/mach-spear6xx/Kconfig b/arch/arm/mach-spear6xx/Kconfig index ff4ae5ba00f1..fbe298bd1d92 100644 --- a/arch/arm/mach-spear6xx/Kconfig +++ b/arch/arm/mach-spear6xx/Kconfig @@ -5,11 +5,12 @@ if ARCH_SPEAR6XX menu "SPEAr6xx Implementations" -config BOARD_SPEAR600_EVB - bool "SPEAr600 Evaluation Board" +config BOARD_SPEAR600_DT + bool "SPEAr600 generic board configured via device-tree" select MACH_SPEAR600 + select USE_OF help - Supports ST SPEAr600 Evaluation Board + Supports ST SPEAr600 boards configured via the device-tree endmenu diff --git a/arch/arm/mach-spear6xx/Makefile b/arch/arm/mach-spear6xx/Makefile index cc1a4d82d459..76e5750552fc 100644 --- a/arch/arm/mach-spear6xx/Makefile +++ b/arch/arm/mach-spear6xx/Makefile @@ -4,9 +4,3 @@ # common files obj-y += clock.o spear6xx.o - -# spear600 specific files -obj-$(CONFIG_MACH_SPEAR600) += spear600.o - -# spear600 boards files -obj-$(CONFIG_BOARD_SPEAR600_EVB) += spear600_evb.o diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c index ac70e0d88fef..358f2800f17b 100644 --- a/arch/arm/mach-spear6xx/clock.c +++ b/arch/arm/mach-spear6xx/clock.c @@ -641,8 +641,8 @@ static struct clk_lookup spear_clk_lookups[] = { { .con_id = "gpt0_synth_clk", .clk = &gpt0_synth_clk}, { .con_id = "gpt2_synth_clk", .clk = &gpt2_synth_clk}, { .con_id = "gpt3_synth_clk", .clk = &gpt3_synth_clk}, - { .dev_id = "uart0", .clk = &uart0_clk}, - { .dev_id = "uart1", .clk = &uart1_clk}, + { .dev_id = "d0000000.serial", .clk = &uart0_clk}, + { .dev_id = "d0080000.serial", .clk = &uart1_clk}, { .dev_id = "firda", .clk = &firda_clk}, { .dev_id = "clcd", .clk = &clcd_clk}, { .dev_id = "gpt0", .clk = &gpt0_clk}, @@ -655,20 +655,20 @@ static struct clk_lookup spear_clk_lookups[] = { { .con_id = "usbh.1_clk", .clk = &usbh1_clk}, /* clock derived from ahb clk */ { .con_id = "apb_clk", .clk = &apb_clk}, - { .dev_id = "i2c_designware.0", .clk = &i2c_clk}, + { .dev_id = "d0200000.i2c", .clk = &i2c_clk}, { .dev_id = "dma", .clk = &dma_clk}, { .dev_id = "jpeg", .clk = &jpeg_clk}, { .dev_id = "gmac", .clk = &gmac_clk}, { .dev_id = "smi", .clk = &smi_clk}, - { .con_id = "fsmc", .clk = &fsmc_clk}, + { .dev_id = "fsmc-nand", .clk = &fsmc_clk}, /* clock derived from apb clk */ { .dev_id = "adc", .clk = &adc_clk}, { .dev_id = "ssp-pl022.0", .clk = &ssp0_clk}, { .dev_id = "ssp-pl022.1", .clk = &ssp1_clk}, { .dev_id = "ssp-pl022.2", .clk = &ssp2_clk}, - { .dev_id = "gpio0", .clk = &gpio0_clk}, - { .dev_id = "gpio1", .clk = &gpio1_clk}, - { .dev_id = "gpio2", .clk = &gpio2_clk}, + { .dev_id = "f0100000.gpio", .clk = &gpio0_clk}, + { .dev_id = "fc980000.gpio", .clk = &gpio1_clk}, + { .dev_id = "d8100000.gpio", .clk = &gpio2_clk}, }; void __init spear6xx_clk_init(void) diff --git a/arch/arm/mach-spear6xx/spear600.c b/arch/arm/mach-spear6xx/spear600.c deleted file mode 100644 index d0e6eeae9b04..000000000000 --- a/arch/arm/mach-spear6xx/spear600.c +++ /dev/null @@ -1,25 +0,0 @@ -/* - * arch/arm/mach-spear6xx/spear600.c - * - * SPEAr600 machine source file - * - * Copyright (C) 2009 ST Microelectronics - * Rajeev Kumar<rajeev-dlh.kumar@st.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include <linux/ptrace.h> -#include <asm/irq.h> -#include <mach/generic.h> -#include <mach/hardware.h> - -/* Add spear600 specific devices here */ - -void __init spear600_init(void) -{ - /* call spear6xx family common init function */ - spear6xx_init(); -} diff --git a/arch/arm/mach-spear6xx/spear600_evb.c b/arch/arm/mach-spear6xx/spear600_evb.c deleted file mode 100644 index c6e4254741cc..000000000000 --- a/arch/arm/mach-spear6xx/spear600_evb.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * arch/arm/mach-spear6xx/spear600_evb.c - * - * SPEAr600 evaluation board source file - * - * Copyright (C) 2009 ST Microelectronics - * Viresh Kumar<viresh.kumar@st.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include <asm/hardware/vic.h> -#include <asm/mach/arch.h> -#include <asm/mach-types.h> -#include <mach/generic.h> -#include <mach/hardware.h> - -static struct amba_device *amba_devs[] __initdata = { - &gpio_device[0], - &gpio_device[1], - &gpio_device[2], - &uart_device[0], - &uart_device[1], -}; - -static struct platform_device *plat_devs[] __initdata = { -}; - -static void __init spear600_evb_init(void) -{ - unsigned int i; - - /* call spear600 machine init function */ - spear600_init(); - - /* Add Platform Devices */ - platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs)); - - /* Add Amba Devices */ - for (i = 0; i < ARRAY_SIZE(amba_devs); i++) - amba_device_register(amba_devs[i], &iomem_resource); -} - -MACHINE_START(SPEAR600, "ST-SPEAR600-EVB") - .atag_offset = 0x100, - .map_io = spear6xx_map_io, - .init_irq = spear6xx_init_irq, - .handle_irq = vic_handle_irq, - .timer = &spear6xx_timer, - .init_machine = spear600_evb_init, - .restart = spear_restart, -MACHINE_END diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c index b997b1b10ba0..2ed8b14c82c8 100644 --- a/arch/arm/mach-spear6xx/spear6xx.c +++ b/arch/arm/mach-spear6xx/spear6xx.c @@ -6,111 +6,21 @@ * Copyright (C) 2009 ST Microelectronics * Rajeev Kumar<rajeev-dlh.kumar@st.com> * + * Copyright 2012 Stefan Roese <sr@denx.de> + * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ -#include <linux/types.h> -#include <linux/amba/pl061.h> -#include <linux/ptrace.h> -#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> #include <asm/hardware/vic.h> -#include <asm/irq.h> #include <asm/mach/arch.h> #include <mach/generic.h> #include <mach/hardware.h> -#include <mach/irqs.h> - -/* Add spear6xx machines common devices here */ -/* uart device registration */ -struct amba_device uart_device[] = { - { - .dev = { - .init_name = "uart0", - }, - .res = { - .start = SPEAR6XX_ICM1_UART0_BASE, - .end = SPEAR6XX_ICM1_UART0_BASE + SZ_4K - 1, - .flags = IORESOURCE_MEM, - }, - .irq = {IRQ_UART_0}, - }, { - .dev = { - .init_name = "uart1", - }, - .res = { - .start = SPEAR6XX_ICM1_UART1_BASE, - .end = SPEAR6XX_ICM1_UART1_BASE + SZ_4K - 1, - .flags = IORESOURCE_MEM, - }, - .irq = {IRQ_UART_1}, - } -}; - -/* gpio device registration */ -static struct pl061_platform_data gpio_plat_data[] = { - { - .gpio_base = 0, - .irq_base = SPEAR_GPIO0_INT_BASE, - }, { - .gpio_base = 8, - .irq_base = SPEAR_GPIO1_INT_BASE, - }, { - .gpio_base = 16, - .irq_base = SPEAR_GPIO2_INT_BASE, - }, -}; - -struct amba_device gpio_device[] = { - { - .dev = { - .init_name = "gpio0", - .platform_data = &gpio_plat_data[0], - }, - .res = { - .start = SPEAR6XX_CPU_GPIO_BASE, - .end = SPEAR6XX_CPU_GPIO_BASE + SZ_4K - 1, - .flags = IORESOURCE_MEM, - }, - .irq = {IRQ_LOCAL_GPIO}, - }, { - .dev = { - .init_name = "gpio1", - .platform_data = &gpio_plat_data[1], - }, - .res = { - .start = SPEAR6XX_ICM3_GPIO_BASE, - .end = SPEAR6XX_ICM3_GPIO_BASE + SZ_4K - 1, - .flags = IORESOURCE_MEM, - }, - .irq = {IRQ_BASIC_GPIO}, - }, { - .dev = { - .init_name = "gpio2", - .platform_data = &gpio_plat_data[2], - }, - .res = { - .start = SPEAR6XX_ICM2_GPIO_BASE, - .end = SPEAR6XX_ICM2_GPIO_BASE + SZ_4K - 1, - .flags = IORESOURCE_MEM, - }, - .irq = {IRQ_APPL_GPIO}, - } -}; - -/* This will add devices, and do machine specific tasks */ -void __init spear6xx_init(void) -{ - /* nothing to do for now */ -} - -/* This will initialize vic */ -void __init spear6xx_init_irq(void) -{ - vic_init((void __iomem *)VA_SPEAR6XX_CPU_VIC_PRI_BASE, 0, ~0, 0); - vic_init((void __iomem *)VA_SPEAR6XX_CPU_VIC_SEC_BASE, 32, ~0, 0); -} /* Following will create static virtual/physical mappings */ static struct map_desc spear6xx_io_desc[] __initdata = { @@ -181,3 +91,33 @@ static void __init spear6xx_timer_init(void) struct sys_timer spear6xx_timer = { .init = spear6xx_timer_init, }; + +static void __init spear600_dt_init(void) +{ + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); +} + +static const char *spear600_dt_board_compat[] = { + "st,spear600", + NULL +}; + +static const struct of_device_id vic_of_match[] __initconst = { + { .compatible = "arm,pl190-vic", .data = vic_of_init, }, + { /* Sentinel */ } +}; + +static void __init spear6xx_dt_init_irq(void) +{ + of_irq_init(vic_of_match); +} + +DT_MACHINE_START(SPEAR600_DT, "ST SPEAr600 (Flattened Device Tree)") + .map_io = spear6xx_map_io, + .init_irq = spear6xx_dt_init_irq, + .handle_irq = vic_handle_irq, + .timer = &spear6xx_timer, + .init_machine = spear600_dt_init, + .restart = spear_restart, + .dt_compat = spear600_dt_board_compat, +MACHINE_END diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index 1dd2726986cf..d87d968115ec 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -8,6 +8,7 @@ obj-y += timer.o obj-y += pinmux.o obj-y += fuse.o obj-y += pmc.o +obj-y += flowctrl.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_CPU_IDLE) += sleep.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += powergate.o @@ -18,6 +19,7 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += pinmux-tegra30-tables.o obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += board-dt-tegra30.o obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30_clocks.o obj-$(CONFIG_SMP) += platsmp.o headsmp.o +obj-$(CONFIG_SMP) += reset.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o apbio.o obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o diff --git a/arch/arm/mach-tegra/board-dt-tegra30.c b/arch/arm/mach-tegra/board-dt-tegra30.c index 96f6c0d030bd..5f7c03e972f3 100644 --- a/arch/arm/mach-tegra/board-dt-tegra30.c +++ b/arch/arm/mach-tegra/board-dt-tegra30.c @@ -56,7 +56,7 @@ struct of_dev_auxdata tegra30_auxdata_lookup[] __initdata = { static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = { /* name parent rate enabled */ - { "uartd", "pll_p", 408000000, true }, + { "uarta", "pll_p", 408000000, true }, { NULL, NULL, 0, 0}, }; diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c index 2f86fcca64a6..22df10fb9972 100644 --- a/arch/arm/mach-tegra/common.c +++ b/arch/arm/mach-tegra/common.c @@ -27,6 +27,7 @@ #include <asm/hardware/gic.h> #include <mach/iomap.h> +#include <mach/powergate.h> #include "board.h" #include "clock.h" @@ -118,13 +119,16 @@ void __init tegra20_init_early(void) tegra_clk_init_from_table(tegra20_clk_init_table); tegra_init_cache(0x331, 0x441); tegra_pmc_init(); + tegra_powergate_init(); } #endif #ifdef CONFIG_ARCH_TEGRA_3x_SOC void __init tegra30_init_early(void) { + tegra_init_fuse(); tegra30_init_clocks(); tegra_init_cache(0x441, 0x551); tegra_pmc_init(); + tegra_powergate_init(); } #endif diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index bb5ce39b733b..7a065f0cf633 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c @@ -30,7 +30,6 @@ #include <linux/io.h> #include <linux/suspend.h> -#include <asm/system.h> #include <mach/clk.h> diff --git a/arch/arm/mach-tegra/flowctrl.c b/arch/arm/mach-tegra/flowctrl.c new file mode 100644 index 000000000000..fef66a7486ed --- /dev/null +++ b/arch/arm/mach-tegra/flowctrl.c @@ -0,0 +1,62 @@ +/* + * arch/arm/mach-tegra/flowctrl.c + * + * functions and macros to control the flowcontroller + * + * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> + +#include <mach/iomap.h> + +#include "flowctrl.h" + +u8 flowctrl_offset_halt_cpu[] = { + FLOW_CTRL_HALT_CPU0_EVENTS, + FLOW_CTRL_HALT_CPU1_EVENTS, + FLOW_CTRL_HALT_CPU1_EVENTS + 8, + FLOW_CTRL_HALT_CPU1_EVENTS + 16, +}; + +u8 flowctrl_offset_cpu_csr[] = { + FLOW_CTRL_CPU0_CSR, + FLOW_CTRL_CPU1_CSR, + FLOW_CTRL_CPU1_CSR + 8, + FLOW_CTRL_CPU1_CSR + 16, +}; + +static void flowctrl_update(u8 offset, u32 value) +{ + void __iomem *addr = IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + offset; + + writel(value, addr); + + /* ensure the update has reached the flow controller */ + wmb(); + readl_relaxed(addr); +} + +void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value) +{ + return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); +} + +void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) +{ + return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); +} diff --git a/arch/arm/mach-tegra/flowctrl.h b/arch/arm/mach-tegra/flowctrl.h index 74c6efbe52fa..19428173855e 100644 --- a/arch/arm/mach-tegra/flowctrl.h +++ b/arch/arm/mach-tegra/flowctrl.h @@ -34,4 +34,9 @@ #define FLOW_CTRL_HALT_CPU1_EVENTS 0x14 #define FLOW_CTRL_CPU1_CSR 0x18 +#ifndef __ASSEMBLY__ +void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value); +void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value); +#endif + #endif diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c index c1afb2738769..f946d129423c 100644 --- a/arch/arm/mach-tegra/fuse.c +++ b/arch/arm/mach-tegra/fuse.c @@ -34,6 +34,7 @@ int tegra_sku_id; int tegra_cpu_process_id; int tegra_core_process_id; +int tegra_chip_id; enum tegra_revision tegra_revision; /* The BCT to use at boot is specified by board straps that can be read @@ -66,12 +67,9 @@ static inline bool get_spare_fuse(int bit) return tegra_fuse_readl(FUSE_SPARE_BIT + bit * 4); } -static enum tegra_revision tegra_get_revision(void) +static enum tegra_revision tegra_get_revision(u32 id) { - void __iomem *chip_id = IO_ADDRESS(TEGRA_APB_MISC_BASE) + 0x804; - u32 id = readl(chip_id); u32 minor_rev = (id >> 16) & 0xf; - u32 chipid = (id >> 8) & 0xff; switch (minor_rev) { case 1: @@ -79,7 +77,8 @@ static enum tegra_revision tegra_get_revision(void) case 2: return TEGRA_REVISION_A02; case 3: - if (chipid == 0x20 && (get_spare_fuse(18) || get_spare_fuse(19))) + if (tegra_chip_id == TEGRA20 && + (get_spare_fuse(18) || get_spare_fuse(19))) return TEGRA_REVISION_A03p; else return TEGRA_REVISION_A03; @@ -92,6 +91,8 @@ static enum tegra_revision tegra_get_revision(void) void tegra_init_fuse(void) { + u32 id; + u32 reg = readl(IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48)); reg |= 1 << 28; writel(reg, IO_TO_VIRT(TEGRA_CLK_RESET_BASE + 0x48)); @@ -108,10 +109,13 @@ void tegra_init_fuse(void) reg = tegra_apb_readl(TEGRA_APB_MISC_BASE + STRAP_OPT); tegra_bct_strapping = (reg & RAM_ID_MASK) >> RAM_CODE_SHIFT; - tegra_revision = tegra_get_revision(); + id = readl_relaxed(IO_ADDRESS(TEGRA_APB_MISC_BASE) + 0x804); + tegra_chip_id = (id >> 8) & 0xff; + + tegra_revision = tegra_get_revision(id); pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n", - tegra_revision_name[tegra_get_revision()], + tegra_revision_name[tegra_revision], tegra_sku_id, tegra_cpu_process_id, tegra_core_process_id); } diff --git a/arch/arm/mach-tegra/fuse.h b/arch/arm/mach-tegra/fuse.h index d65d2abf803b..d2107b2cb85a 100644 --- a/arch/arm/mach-tegra/fuse.h +++ b/arch/arm/mach-tegra/fuse.h @@ -35,9 +35,13 @@ enum tegra_revision { #define SKU_ID_AP25E 27 #define SKU_ID_T25E 28 +#define TEGRA20 0x20 +#define TEGRA30 0x30 + extern int tegra_sku_id; extern int tegra_cpu_process_id; extern int tegra_core_process_id; +extern int tegra_chip_id; extern enum tegra_revision tegra_revision; extern int tegra_bct_strapping; diff --git a/arch/arm/mach-tegra/headsmp.S b/arch/arm/mach-tegra/headsmp.S index b5349b2f13d2..fef9c2c51370 100644 --- a/arch/arm/mach-tegra/headsmp.S +++ b/arch/arm/mach-tegra/headsmp.S @@ -1,6 +1,23 @@ #include <linux/linkage.h> #include <linux/init.h> +#include <asm/cache.h> + +#include <mach/iomap.h> + +#include "flowctrl.h" +#include "reset.h" + +#define APB_MISC_GP_HIDREV 0x804 +#define PMC_SCRATCH41 0x140 + +#define RESET_DATA(x) ((TEGRA_RESET_##x)*4) + + .macro mov32, reg, val + movw \reg, #:lower16:\val + movt \reg, #:upper16:\val + .endm + .section ".text.head", "ax" __CPUINIT @@ -47,15 +64,149 @@ ENTRY(v7_invalidate_l1) mov pc, lr ENDPROC(v7_invalidate_l1) + ENTRY(tegra_secondary_startup) - msr cpsr_fsxc, #0xd3 bl v7_invalidate_l1 - mrc p15, 0, r0, c0, c0, 5 - and r0, r0, #15 - ldr r1, =0x6000f100 - str r0, [r1] -1: ldr r2, [r1] - cmp r0, r2 - beq 1b + /* Enable coresight */ + mov32 r0, 0xC5ACCE55 + mcr p14, 0, r0, c7, c12, 6 b secondary_startup ENDPROC(tegra_secondary_startup) + + .align L1_CACHE_SHIFT +ENTRY(__tegra_cpu_reset_handler_start) + +/* + * __tegra_cpu_reset_handler: + * + * Common handler for all CPU reset events. + * + * Register usage within the reset handler: + * + * R7 = CPU present (to the OS) mask + * R8 = CPU in LP1 state mask + * R9 = CPU in LP2 state mask + * R10 = CPU number + * R11 = CPU mask + * R12 = pointer to reset handler data + * + * NOTE: This code is copied to IRAM. All code and data accesses + * must be position-independent. + */ + + .align L1_CACHE_SHIFT +ENTRY(__tegra_cpu_reset_handler) + + cpsid aif, 0x13 @ SVC mode, interrupts disabled + mrc p15, 0, r10, c0, c0, 5 @ MPIDR + and r10, r10, #0x3 @ R10 = CPU number + mov r11, #1 + mov r11, r11, lsl r10 @ R11 = CPU mask + adr r12, __tegra_cpu_reset_handler_data + +#ifdef CONFIG_SMP + /* Does the OS know about this CPU? */ + ldr r7, [r12, #RESET_DATA(MASK_PRESENT)] + tst r7, r11 @ if !present + bleq __die @ CPU not present (to OS) +#endif + +#ifdef CONFIG_ARCH_TEGRA_2x_SOC + /* Are we on Tegra20? */ + mov32 r6, TEGRA_APB_MISC_BASE + ldr r0, [r6, #APB_MISC_GP_HIDREV] + and r0, r0, #0xff00 + cmp r0, #(0x20 << 8) + bne 1f + /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */ + mov32 r6, TEGRA_PMC_BASE + mov r0, #0 + cmp r10, #0 + strne r0, [r6, #PMC_SCRATCH41] +1: +#endif + +#ifdef CONFIG_SMP + /* + * Can only be secondary boot (initial or hotplug) but CPU 0 + * cannot be here. + */ + cmp r10, #0 + bleq __die @ CPU0 cannot be here + ldr lr, [r12, #RESET_DATA(STARTUP_SECONDARY)] + cmp lr, #0 + bleq __die @ no secondary startup handler + bx lr +#endif + +/* + * We don't know why the CPU reset. Just kill it. + * The LR register will contain the address we died at + 4. + */ + +__die: + sub lr, lr, #4 + mov32 r7, TEGRA_PMC_BASE + str lr, [r7, #PMC_SCRATCH41] + + mov32 r7, TEGRA_CLK_RESET_BASE + + /* Are we on Tegra20? */ + mov32 r6, TEGRA_APB_MISC_BASE + ldr r0, [r6, #APB_MISC_GP_HIDREV] + and r0, r0, #0xff00 + cmp r0, #(0x20 << 8) + bne 1f + +#ifdef CONFIG_ARCH_TEGRA_2x_SOC + mov32 r0, 0x1111 + mov r1, r0, lsl r10 + str r1, [r7, #0x340] @ CLK_RST_CPU_CMPLX_SET +#endif +1: +#ifdef CONFIG_ARCH_TEGRA_3x_SOC + mov32 r6, TEGRA_FLOW_CTRL_BASE + + cmp r10, #0 + moveq r1, #FLOW_CTRL_HALT_CPU0_EVENTS + moveq r2, #FLOW_CTRL_CPU0_CSR + movne r1, r10, lsl #3 + addne r2, r1, #(FLOW_CTRL_CPU1_CSR-8) + addne r1, r1, #(FLOW_CTRL_HALT_CPU1_EVENTS-8) + + /* Clear CPU "event" and "interrupt" flags and power gate + it when halting but not before it is in the "WFI" state. */ + ldr r0, [r6, +r2] + orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG + orr r0, r0, #FLOW_CTRL_CSR_ENABLE + str r0, [r6, +r2] + + /* Unconditionally halt this CPU */ + mov r0, #FLOW_CTRL_WAITEVENT + str r0, [r6, +r1] + ldr r0, [r6, +r1] @ memory barrier + + dsb + isb + wfi @ CPU should be power gated here + + /* If the CPU didn't power gate above just kill it's clock. */ + + mov r0, r11, lsl #8 + str r0, [r7, #348] @ CLK_CPU_CMPLX_SET +#endif + + /* If the CPU still isn't dead, just spin here. */ + b . +ENDPROC(__tegra_cpu_reset_handler) + + .align L1_CACHE_SHIFT + .type __tegra_cpu_reset_handler_data, %object + .globl __tegra_cpu_reset_handler_data +__tegra_cpu_reset_handler_data: + .rept TEGRA_RESET_DATA_SIZE + .long 0 + .endr + .align L1_CACHE_SHIFT + +ENTRY(__tegra_cpu_reset_handler_end) diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c index f3294040d357..d8dc9ddd6d18 100644 --- a/arch/arm/mach-tegra/hotplug.c +++ b/arch/arm/mach-tegra/hotplug.c @@ -13,6 +13,7 @@ #include <linux/smp.h> #include <asm/cacheflush.h> +#include <asm/cp15.h> static inline void cpu_enter_lowpower(void) { diff --git a/arch/arm/mach-tegra/include/mach/iomap.h b/arch/arm/mach-tegra/include/mach/iomap.h index 67644c905d8e..cff672a344f4 100644 --- a/arch/arm/mach-tegra/include/mach/iomap.h +++ b/arch/arm/mach-tegra/include/mach/iomap.h @@ -113,6 +113,9 @@ #define TEGRA_AHB_GIZMO_BASE 0x6000C004 #define TEGRA_AHB_GIZMO_SIZE 0x10C +#define TEGRA_SB_BASE 0x6000C200 +#define TEGRA_SB_SIZE 256 + #define TEGRA_STATMON_BASE 0x6000C400 #define TEGRA_STATMON_SIZE SZ_1K diff --git a/arch/arm/mach-tegra/include/mach/powergate.h b/arch/arm/mach-tegra/include/mach/powergate.h index 39c396d2ddb0..4752b1a68f35 100644 --- a/arch/arm/mach-tegra/include/mach/powergate.h +++ b/arch/arm/mach-tegra/include/mach/powergate.h @@ -27,8 +27,21 @@ #define TEGRA_POWERGATE_VDEC 4 #define TEGRA_POWERGATE_L2 5 #define TEGRA_POWERGATE_MPE 6 -#define TEGRA_NUM_POWERGATE 7 +#define TEGRA_POWERGATE_HEG 7 +#define TEGRA_POWERGATE_SATA 8 +#define TEGRA_POWERGATE_CPU1 9 +#define TEGRA_POWERGATE_CPU2 10 +#define TEGRA_POWERGATE_CPU3 11 +#define TEGRA_POWERGATE_CELP 12 +#define TEGRA_POWERGATE_3D1 13 +#define TEGRA_POWERGATE_CPU0 TEGRA_POWERGATE_CPU +#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D + +int __init tegra_powergate_init(void); + +int tegra_cpu_powergate_id(int cpuid); +int tegra_powergate_is_powered(int id); int tegra_powergate_power_on(int id); int tegra_powergate_power_off(int id); int tegra_powergate_remove_clamping(int id); diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c index 7d2b5d03c1df..1a208dbf682f 100644 --- a/arch/arm/mach-tegra/platsmp.c +++ b/arch/arm/mach-tegra/platsmp.c @@ -24,19 +24,31 @@ #include <asm/mach-types.h> #include <asm/smp_scu.h> +#include <mach/clk.h> #include <mach/iomap.h> +#include <mach/powergate.h> + +#include "fuse.h" +#include "flowctrl.h" +#include "reset.h" extern void tegra_secondary_startup(void); -static DEFINE_SPINLOCK(boot_lock); static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE); #define EVP_CPU_RESET_VECTOR \ (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100) #define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c) +#define CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET \ + (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x340) #define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \ (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344) +#define CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR \ + (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x34c) + +#define CPU_CLOCK(cpu) (0x1<<(8+cpu)) +#define CPU_RESET(cpu) (0x1111ul<<(cpu)) void __cpuinit platform_secondary_init(unsigned int cpu) { @@ -47,63 +59,106 @@ void __cpuinit platform_secondary_init(unsigned int cpu) */ gic_secondary_init(0); - /* - * Synchronise with the boot thread. - */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); } -int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +static int tegra20_power_up_cpu(unsigned int cpu) { - unsigned long old_boot_vector; - unsigned long boot_vector; - unsigned long timeout; u32 reg; - /* - * set synchronisation state between this boot processor - * and the secondary one - */ - spin_lock(&boot_lock); + /* Enable the CPU clock. */ + reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX); + writel(reg & ~CPU_CLOCK(cpu), CLK_RST_CONTROLLER_CLK_CPU_CMPLX); + barrier(); + reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX); + /* Clear flow controller CSR. */ + flowctrl_write_cpu_csr(cpu, 0); - /* set the reset vector to point to the secondary_startup routine */ + return 0; +} - boot_vector = virt_to_phys(tegra_secondary_startup); - old_boot_vector = readl(EVP_CPU_RESET_VECTOR); - writel(boot_vector, EVP_CPU_RESET_VECTOR); +static int tegra30_power_up_cpu(unsigned int cpu) +{ + u32 reg; + int ret, pwrgateid; + unsigned long timeout; - /* enable cpu clock on cpu1 */ - reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX); - writel(reg & ~(1<<9), CLK_RST_CONTROLLER_CLK_CPU_CMPLX); + pwrgateid = tegra_cpu_powergate_id(cpu); + if (pwrgateid < 0) + return pwrgateid; + + /* If this is the first boot, toggle powergates directly. */ + if (!tegra_powergate_is_powered(pwrgateid)) { + ret = tegra_powergate_power_on(pwrgateid); + if (ret) + return ret; + + /* Wait for the power to come up. */ + timeout = jiffies + 10*HZ; + while (tegra_powergate_is_powered(pwrgateid)) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + udelay(10); + } + } - reg = (1<<13) | (1<<9) | (1<<5) | (1<<1); - writel(reg, CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR); + /* CPU partition is powered. Enable the CPU clock. */ + writel(CPU_CLOCK(cpu), CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR); + reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX_CLR); + udelay(10); - smp_wmb(); - flush_cache_all(); + /* Remove I/O clamps. */ + ret = tegra_powergate_remove_clamping(pwrgateid); + udelay(10); - /* unhalt the cpu */ - writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14); + /* Clear flow controller CSR. */ + flowctrl_write_cpu_csr(cpu, 0); - timeout = jiffies + (1 * HZ); - while (time_before(jiffies, timeout)) { - if (readl(EVP_CPU_RESET_VECTOR) != boot_vector) - break; - udelay(10); - } + return 0; +} + +int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +{ + int status; - /* put the old boot vector back */ - writel(old_boot_vector, EVP_CPU_RESET_VECTOR); + /* + * Force the CPU into reset. The CPU must remain in reset when the + * flow controller state is cleared (which will cause the flow + * controller to stop driving reset if the CPU has been power-gated + * via the flow controller). This will have no effect on first boot + * of the CPU since it should already be in reset. + */ + writel(CPU_RESET(cpu), CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET); + dmb(); /* - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish + * Unhalt the CPU. If the flow controller was used to power-gate the + * CPU this will cause the flow controller to stop driving reset. + * The CPU will remain in reset because the clock and reset block + * is now driving reset. */ - spin_unlock(&boot_lock); + flowctrl_write_cpu_halt(cpu, 0); + + switch (tegra_chip_id) { + case TEGRA20: + status = tegra20_power_up_cpu(cpu); + break; + case TEGRA30: + status = tegra30_power_up_cpu(cpu); + break; + default: + status = -EINVAL; + break; + } - return 0; + if (status) + goto done; + + /* Take the CPU out of reset. */ + writel(CPU_RESET(cpu), CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR); + wmb(); +done: + return status; } /* @@ -128,6 +183,6 @@ void __init smp_init_cpus(void) void __init platform_smp_prepare_cpus(unsigned int max_cpus) { - + tegra_cpu_reset_handler_init(); scu_enable(scu_base); } diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c index 948306491a59..c238699ae86f 100644 --- a/arch/arm/mach-tegra/powergate.c +++ b/arch/arm/mach-tegra/powergate.c @@ -31,6 +31,8 @@ #include <mach/iomap.h> #include <mach/powergate.h> +#include "fuse.h" + #define PWRGATE_TOGGLE 0x30 #define PWRGATE_TOGGLE_START (1 << 8) @@ -38,6 +40,16 @@ #define PWRGATE_STATUS 0x38 +static int tegra_num_powerdomains; +static int tegra_num_cpu_domains; +static u8 *tegra_cpu_domains; +static u8 tegra30_cpu_domains[] = { + TEGRA_POWERGATE_CPU0, + TEGRA_POWERGATE_CPU1, + TEGRA_POWERGATE_CPU2, + TEGRA_POWERGATE_CPU3, +}; + static DEFINE_SPINLOCK(tegra_powergate_lock); static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); @@ -75,7 +87,7 @@ static int tegra_powergate_set(int id, bool new_state) int tegra_powergate_power_on(int id) { - if (id < 0 || id >= TEGRA_NUM_POWERGATE) + if (id < 0 || id >= tegra_num_powerdomains) return -EINVAL; return tegra_powergate_set(id, true); @@ -83,17 +95,18 @@ int tegra_powergate_power_on(int id) int tegra_powergate_power_off(int id) { - if (id < 0 || id >= TEGRA_NUM_POWERGATE) + if (id < 0 || id >= tegra_num_powerdomains) return -EINVAL; return tegra_powergate_set(id, false); } -static bool tegra_powergate_is_powered(int id) +int tegra_powergate_is_powered(int id) { u32 status; - WARN_ON(id < 0 || id >= TEGRA_NUM_POWERGATE); + if (id < 0 || id >= tegra_num_powerdomains) + return -EINVAL; status = pmc_read(PWRGATE_STATUS) & (1 << id); return !!status; @@ -103,7 +116,7 @@ int tegra_powergate_remove_clamping(int id) { u32 mask; - if (id < 0 || id >= TEGRA_NUM_POWERGATE) + if (id < 0 || id >= tegra_num_powerdomains) return -EINVAL; /* @@ -156,6 +169,34 @@ err_power: return ret; } +int tegra_cpu_powergate_id(int cpuid) +{ + if (cpuid > 0 && cpuid < tegra_num_cpu_domains) + return tegra_cpu_domains[cpuid]; + + return -EINVAL; +} + +int __init tegra_powergate_init(void) +{ + switch (tegra_chip_id) { + case TEGRA20: + tegra_num_powerdomains = 7; + break; + case TEGRA30: + tegra_num_powerdomains = 14; + tegra_num_cpu_domains = 4; + tegra_cpu_domains = tegra30_cpu_domains; + break; + default: + /* Unknown Tegra variant. Disable powergating */ + tegra_num_powerdomains = 0; + break; + } + + return 0; +} + #ifdef CONFIG_DEBUG_FS static const char * const powergate_name[] = { @@ -175,7 +216,7 @@ static int powergate_show(struct seq_file *s, void *data) seq_printf(s, " powergate powered\n"); seq_printf(s, "------------------\n"); - for (i = 0; i < TEGRA_NUM_POWERGATE; i++) + for (i = 0; i < tegra_num_powerdomains; i++) seq_printf(s, " %9s %7s\n", powergate_name[i], tegra_powergate_is_powered(i) ? "yes" : "no"); return 0; diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c new file mode 100644 index 000000000000..4d6a2ee99c3b --- /dev/null +++ b/arch/arm/mach-tegra/reset.c @@ -0,0 +1,84 @@ +/* + * arch/arm/mach-tegra/reset.c + * + * Copyright (C) 2011,2012 NVIDIA Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/init.h> +#include <linux/io.h> +#include <linux/cpumask.h> +#include <linux/bitops.h> + +#include <asm/cacheflush.h> +#include <asm/hardware/cache-l2x0.h> + +#include <mach/iomap.h> +#include <mach/irammap.h> + +#include "reset.h" +#include "fuse.h" + +#define TEGRA_IRAM_RESET_BASE (TEGRA_IRAM_BASE + \ + TEGRA_IRAM_RESET_HANDLER_OFFSET) + +static bool is_enabled; + +static void tegra_cpu_reset_handler_enable(void) +{ + void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE); + void __iomem *evp_cpu_reset = + IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE + 0x100); + void __iomem *sb_ctrl = IO_ADDRESS(TEGRA_SB_BASE); + u32 reg; + + BUG_ON(is_enabled); + BUG_ON(tegra_cpu_reset_handler_size > TEGRA_IRAM_RESET_HANDLER_SIZE); + + memcpy(iram_base, (void *)__tegra_cpu_reset_handler_start, + tegra_cpu_reset_handler_size); + + /* + * NOTE: This must be the one and only write to the EVP CPU reset + * vector in the entire system. + */ + writel(TEGRA_IRAM_RESET_BASE + tegra_cpu_reset_handler_offset, + evp_cpu_reset); + wmb(); + reg = readl(evp_cpu_reset); + + /* + * Prevent further modifications to the physical reset vector. + * NOTE: Has no effect on chips prior to Tegra30. + */ + if (tegra_chip_id != TEGRA20) { + reg = readl(sb_ctrl); + reg |= 2; + writel(reg, sb_ctrl); + wmb(); + } + + is_enabled = true; +} + +void __init tegra_cpu_reset_handler_init(void) +{ + +#ifdef CONFIG_SMP + __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] = + *((u32 *)cpu_present_mask); + __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] = + virt_to_phys((void *)tegra_secondary_startup); +#endif + + tegra_cpu_reset_handler_enable(); +} diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h new file mode 100644 index 000000000000..de88bf851dd3 --- /dev/null +++ b/arch/arm/mach-tegra/reset.h @@ -0,0 +1,50 @@ +/* + * arch/arm/mach-tegra/reset.h + * + * CPU reset dispatcher. + * + * Copyright (c) 2011, NVIDIA Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MACH_TEGRA_RESET_H +#define __MACH_TEGRA_RESET_H + +#define TEGRA_RESET_MASK_PRESENT 0 +#define TEGRA_RESET_MASK_LP1 1 +#define TEGRA_RESET_MASK_LP2 2 +#define TEGRA_RESET_STARTUP_SECONDARY 3 +#define TEGRA_RESET_STARTUP_LP2 4 +#define TEGRA_RESET_STARTUP_LP1 5 +#define TEGRA_RESET_DATA_SIZE 6 + +#ifndef __ASSEMBLY__ + +extern unsigned long __tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE]; + +void __tegra_cpu_reset_handler_start(void); +void __tegra_cpu_reset_handler(void); +void __tegra_cpu_reset_handler_end(void); +void tegra_secondary_startup(void); + +#define tegra_cpu_reset_handler_offset \ + ((u32)__tegra_cpu_reset_handler - \ + (u32)__tegra_cpu_reset_handler_start) + +#define tegra_cpu_reset_handler_size \ + (__tegra_cpu_reset_handler_end - \ + __tegra_cpu_reset_handler_start) + +void __init tegra_cpu_reset_handler_init(void); + +#endif +#endif diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 9ec635812349..880d02ec89d4 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -27,6 +27,7 @@ config MACH_MOP500 select UX500_SOC_DB8500 select I2C select I2C_NOMADIK + select SOC_BUS help Include support for the MOP500 development platform. @@ -57,6 +58,12 @@ config UX500_AUTO_PLATFORM At least one platform needs to be selected in order to build a working kernel. If everything else is disabled, this automatically enables MACH_MOP500. + +config MACH_UX500_DT + bool "Generic U8500 support using device tree" + depends on MACH_MOP500 + select USE_OF + endmenu config UX500_DEBUG_UART diff --git a/arch/arm/mach-ux500/Makefile.boot b/arch/arm/mach-ux500/Makefile.boot index ff0a4b5b0a82..dd5cd00e2554 100644 --- a/arch/arm/mach-ux500/Makefile.boot +++ b/arch/arm/mach-ux500/Makefile.boot @@ -2,3 +2,4 @@ params_phys-y := 0x00000100 initrd_phys-y := 0x00800000 +dtb-$(CONFIG_MACH_SNOWBALL) += snowball.dtb diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 1daead3e583e..920251cf834c 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c @@ -99,7 +99,7 @@ static struct mmci_platform_data mop500_sdi0_data = { #endif }; -static void sdi0_configure(void) +static void sdi0_configure(struct device *parent) { int ret; @@ -118,15 +118,15 @@ static void sdi0_configure(void) gpio_direction_output(sdi0_en, 1); /* Add the device, force v2 to subrevision 1 */ - db8500_add_sdi0(&mop500_sdi0_data, U8500_SDI_V2_PERIPHID); + db8500_add_sdi0(parent, &mop500_sdi0_data, U8500_SDI_V2_PERIPHID); } -void mop500_sdi_tc35892_init(void) +void mop500_sdi_tc35892_init(struct device *parent) { mop500_sdi0_data.gpio_cd = GPIO_SDMMC_CD; sdi0_en = GPIO_SDMMC_EN; sdi0_vsel = GPIO_SDMMC_1V8_3V_SEL; - sdi0_configure(); + sdi0_configure(parent); } /* @@ -241,12 +241,13 @@ static struct mmci_platform_data mop500_sdi4_data = { #endif }; -void __init mop500_sdi_init(void) +void __init mop500_sdi_init(struct device *parent) { /* PoP:ed eMMC */ - db8500_add_sdi2(&mop500_sdi2_data, U8500_SDI_V2_PERIPHID); + db8500_add_sdi2(parent, &mop500_sdi2_data, U8500_SDI_V2_PERIPHID); /* On-board eMMC */ - db8500_add_sdi4(&mop500_sdi4_data, U8500_SDI_V2_PERIPHID); + db8500_add_sdi4(parent, &mop500_sdi4_data, U8500_SDI_V2_PERIPHID); + /* * On boards with the TC35892 GPIO expander, sdi0 will finally * be added when the TC35892 initializes and calls @@ -254,31 +255,31 @@ void __init mop500_sdi_init(void) */ } -void __init snowball_sdi_init(void) +void __init snowball_sdi_init(struct device *parent) { /* On Snowball MMC_CAP_SD_HIGHSPEED isn't supported (Hardware issue?) */ mop500_sdi0_data.capabilities &= ~MMC_CAP_SD_HIGHSPEED; /* On-board eMMC */ - db8500_add_sdi4(&mop500_sdi4_data, U8500_SDI_V2_PERIPHID); + db8500_add_sdi4(parent, &mop500_sdi4_data, U8500_SDI_V2_PERIPHID); /* External Micro SD slot */ mop500_sdi0_data.gpio_cd = SNOWBALL_SDMMC_CD_GPIO; mop500_sdi0_data.cd_invert = true; sdi0_en = SNOWBALL_SDMMC_EN_GPIO; sdi0_vsel = SNOWBALL_SDMMC_1V8_3V_GPIO; - sdi0_configure(); + sdi0_configure(parent); } -void __init hrefv60_sdi_init(void) +void __init hrefv60_sdi_init(struct device *parent) { /* PoP:ed eMMC */ - db8500_add_sdi2(&mop500_sdi2_data, U8500_SDI_V2_PERIPHID); + db8500_add_sdi2(parent, &mop500_sdi2_data, U8500_SDI_V2_PERIPHID); /* On-board eMMC */ - db8500_add_sdi4(&mop500_sdi4_data, U8500_SDI_V2_PERIPHID); + db8500_add_sdi4(parent, &mop500_sdi4_data, U8500_SDI_V2_PERIPHID); /* External Micro SD slot */ mop500_sdi0_data.gpio_cd = HREFV60_SDMMC_CD_GPIO; sdi0_en = HREFV60_SDMMC_EN_GPIO; sdi0_vsel = HREFV60_SDMMC_1V8_3V_GPIO; - sdi0_configure(); + sdi0_configure(parent); /* WLAN SDIO channel */ - db8500_add_sdi1(&mop500_sdi1_data, U8500_SDI_V2_PERIPHID); + db8500_add_sdi1(parent, &mop500_sdi1_data, U8500_SDI_V2_PERIPHID); } diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index 6d672a556df8..77d03c1fbd04 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -30,6 +30,9 @@ #include <linux/gpio_keys.h> #include <linux/delay.h> +#include <linux/of.h> +#include <linux/of_platform.h> + #include <linux/leds.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -226,7 +229,12 @@ static struct tps6105x_platform_data mop500_tps61052_data = { static void mop500_tc35892_init(struct tc3589x *tc3589x, unsigned int base) { - mop500_sdi_tc35892_init(); + struct device *parent = NULL; +#if 0 + /* FIXME: Is the sdi actually part of tc3589x? */ + parent = tc3589x->dev; +#endif + mop500_sdi_tc35892_init(parent); } static struct tc3589x_gpio_platform_data mop500_tc35892_gpio_data = { @@ -353,12 +361,12 @@ U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST); -static void __init mop500_i2c_init(void) +static void __init mop500_i2c_init(struct device *parent) { - db8500_add_i2c0(&u8500_i2c0_data); - db8500_add_i2c1(&u8500_i2c1_data); - db8500_add_i2c2(&u8500_i2c2_data); - db8500_add_i2c3(&u8500_i2c3_data); + db8500_add_i2c0(parent, &u8500_i2c0_data); + db8500_add_i2c1(parent, &u8500_i2c1_data); + db8500_add_i2c2(parent, &u8500_i2c2_data); + db8500_add_i2c3(parent, &u8500_i2c3_data); } static struct gpio_keys_button mop500_gpio_keys[] = { @@ -435,7 +443,7 @@ static struct stedma40_chan_cfg ssp0_dma_cfg_tx = { }; #endif -static struct pl022_ssp_controller ssp0_platform_data = { +static struct pl022_ssp_controller ssp0_plat = { .bus_id = 0, #ifdef CONFIG_STE_DMA40 .enable_dma = 1, @@ -451,9 +459,9 @@ static struct pl022_ssp_controller ssp0_platform_data = { .num_chipselect = 5, }; -static void __init mop500_spi_init(void) +static void __init mop500_spi_init(struct device *parent) { - db8500_add_ssp0(&ssp0_platform_data); + db8500_add_ssp0(parent, &ssp0_plat); } #ifdef CONFIG_STE_DMA40 @@ -587,11 +595,11 @@ static struct amba_pl011_data uart2_plat = { #endif }; -static void __init mop500_uart_init(void) +static void __init mop500_uart_init(struct device *parent) { - db8500_add_uart0(&uart0_plat); - db8500_add_uart1(&uart1_plat); - db8500_add_uart2(&uart2_plat); + db8500_add_uart0(parent, &uart0_plat); + db8500_add_uart1(parent, &uart1_plat); + db8500_add_uart2(parent, &uart2_plat); } static struct platform_device *snowball_platform_devs[] __initdata = { @@ -603,21 +611,27 @@ static struct platform_device *snowball_platform_devs[] __initdata = { static void __init mop500_init_machine(void) { + struct device *parent = NULL; int i2c0_devs; + int i; mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR; - u8500_init_devices(); + parent = u8500_init_devices(); mop500_pins_init(); + /* FIXME: parent of ab8500 should be prcmu */ + for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) + mop500_platform_devs[i]->dev.parent = parent; + platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); - mop500_i2c_init(); - mop500_sdi_init(); - mop500_spi_init(); - mop500_uart_init(); + mop500_i2c_init(parent); + mop500_sdi_init(parent); + mop500_spi_init(parent); + mop500_uart_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); @@ -631,19 +645,24 @@ static void __init mop500_init_machine(void) static void __init snowball_init_machine(void) { + struct device *parent = NULL; int i2c0_devs; + int i; - u8500_init_devices(); + parent = u8500_init_devices(); snowball_pins_init(); + for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) + snowball_platform_devs[i]->dev.parent = parent; + platform_add_devices(snowball_platform_devs, ARRAY_SIZE(snowball_platform_devs)); - mop500_i2c_init(); - snowball_sdi_init(); - mop500_spi_init(); - mop500_uart_init(); + mop500_i2c_init(parent); + snowball_sdi_init(parent); + mop500_spi_init(parent); + mop500_uart_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); @@ -656,7 +675,9 @@ static void __init snowball_init_machine(void) static void __init hrefv60_init_machine(void) { + struct device *parent = NULL; int i2c0_devs; + int i; /* * The HREFv60 board removed a GPIO expander and routed @@ -665,17 +686,20 @@ static void __init hrefv60_init_machine(void) */ mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO; - u8500_init_devices(); + parent = u8500_init_devices(); hrefv60_pins_init(); + for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) + mop500_platform_devs[i]->dev.parent = parent; + platform_add_devices(mop500_platform_devs, ARRAY_SIZE(mop500_platform_devs)); - mop500_i2c_init(); - hrefv60_sdi_init(); - mop500_spi_init(); - mop500_uart_init(); + mop500_i2c_init(parent); + hrefv60_sdi_init(parent); + mop500_spi_init(parent); + mop500_uart_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); @@ -718,3 +742,94 @@ MACHINE_START(SNOWBALL, "Calao Systems Snowball platform") .handle_irq = gic_handle_irq, .init_machine = snowball_init_machine, MACHINE_END + +#ifdef CONFIG_MACH_UX500_DT + +struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { + OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat), + OF_DEV_AUXDATA("arm,pl011", 0x80121000, "uart1", &uart1_plat), + OF_DEV_AUXDATA("arm,pl011", 0x80007000, "uart2", &uart2_plat), + OF_DEV_AUXDATA("arm,pl022", 0x80002000, "ssp0", &ssp0_plat), + {}, +}; + +static const struct of_device_id u8500_soc_node[] = { + /* only create devices below soc node */ + { .compatible = "stericsson,db8500", }, + { }, +}; + +static void __init u8500_init_machine(void) +{ + struct device *parent = NULL; + int i2c0_devs; + int i; + + parent = u8500_init_devices(); + i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); + + for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) + mop500_platform_devs[i]->dev.parent = parent; + for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) + snowball_platform_devs[i]->dev.parent = parent; + + /* automatically probe child nodes of db8500 device */ + of_platform_populate(NULL, u8500_soc_node, u8500_auxdata_lookup, parent); + + if (of_machine_is_compatible("st-ericsson,mop500")) { + mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR; + mop500_pins_init(); + + platform_add_devices(mop500_platform_devs, + ARRAY_SIZE(mop500_platform_devs)); + + mop500_sdi_init(parent); + } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { + snowball_pins_init(); + platform_add_devices(snowball_platform_devs, + ARRAY_SIZE(snowball_platform_devs)); + + snowball_sdi_init(parent); + } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) { + /* + * The HREFv60 board removed a GPIO expander and routed + * all these GPIO pins to the internal GPIO controller + * instead. + */ + mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO; + i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; + hrefv60_pins_init(); + platform_add_devices(mop500_platform_devs, + ARRAY_SIZE(mop500_platform_devs)); + + hrefv60_sdi_init(parent); + } + mop500_i2c_init(parent); + + i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); + i2c_register_board_info(2, mop500_i2c2_devices, + ARRAY_SIZE(mop500_i2c2_devices)); + + /* This board has full regulator constraints */ + regulator_has_full_constraints(); +} + +static const char * u8500_dt_board_compat[] = { + "calaosystems,snowball-a9500", + "st-ericsson,hrefv60+", + "st-ericsson,u8500", + "st-ericsson,mop500", + NULL, +}; + + +DT_MACHINE_START(U8500_DT, "ST-Ericsson U8500 platform (Device Tree Support)") + .map_io = u8500_map_io, + .init_irq = ux500_init_irq, + /* we re-use nomadik timer here */ + .timer = &ux500_timer, + .handle_irq = gic_handle_irq, + .init_machine = u8500_init_machine, + .dt_compat = u8500_dt_board_compat, +MACHINE_END +#endif diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index 7ff6cbffc104..fdcfa8721bb4 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h @@ -75,10 +75,10 @@ struct i2c_board_info; -extern void mop500_sdi_init(void); -extern void snowball_sdi_init(void); -extern void hrefv60_sdi_init(void); -extern void mop500_sdi_tc35892_init(void); +extern void mop500_sdi_init(struct device *parent); +extern void snowball_sdi_init(struct device *parent); +extern void hrefv60_sdi_init(struct device *parent); +extern void mop500_sdi_tc35892_init(struct device *parent); void __init mop500_u8500uib_init(void); void __init mop500_stuib_init(void); void __init mop500_pins_init(void); diff --git a/arch/arm/mach-ux500/board-u5500-sdi.c b/arch/arm/mach-ux500/board-u5500-sdi.c index 63c3f8058ffc..836112eedde7 100644 --- a/arch/arm/mach-ux500/board-u5500-sdi.c +++ b/arch/arm/mach-ux500/board-u5500-sdi.c @@ -66,9 +66,9 @@ static struct mmci_platform_data u5500_sdi0_data = { #endif }; -void __init u5500_sdi_init(void) +void __init u5500_sdi_init(struct device *parent) { nmk_config_pins(u5500_sdi_pins, ARRAY_SIZE(u5500_sdi_pins)); - db5500_add_sdi0(&u5500_sdi0_data); + db5500_add_sdi0(parent, &u5500_sdi0_data); } diff --git a/arch/arm/mach-ux500/board-u5500.c b/arch/arm/mach-ux500/board-u5500.c index 9de9e9c4dbbb..0ff4be72a809 100644 --- a/arch/arm/mach-ux500/board-u5500.c +++ b/arch/arm/mach-ux500/board-u5500.c @@ -97,9 +97,9 @@ static struct i2c_board_info __initdata u5500_i2c2_devices[] = { }, }; -static void __init u5500_i2c_init(void) +static void __init u5500_i2c_init(struct device *parent) { - db5500_add_i2c2(&u5500_i2c2_data); + db5500_add_i2c2(parent, &u5500_i2c2_data); i2c_register_board_info(2, ARRAY_AND_SIZE(u5500_i2c2_devices)); } @@ -126,20 +126,27 @@ static struct platform_device *u5500_platform_devices[] __initdata = { &ab5500_device, }; -static void __init u5500_uart_init(void) +static void __init u5500_uart_init(struct device *parent) { - db5500_add_uart0(NULL); - db5500_add_uart1(NULL); - db5500_add_uart2(NULL); + db5500_add_uart0(parent, NULL); + db5500_add_uart1(parent, NULL); + db5500_add_uart2(parent, NULL); } static void __init u5500_init_machine(void) { - u5500_init_devices(); + struct device *parent = NULL; + int i; + + parent = u5500_init_devices(); nmk_config_pins(u5500_pins, ARRAY_SIZE(u5500_pins)); - u5500_i2c_init(); - u5500_sdi_init(); - u5500_uart_init(); + + u5500_i2c_init(parent); + u5500_sdi_init(parent); + u5500_uart_init(parent); + + for (i = 0; i < ARRAY_SIZE(u5500_platform_devices); i++) + u5500_platform_devices[i]->dev.parent = parent; platform_add_devices(u5500_platform_devices, ARRAY_SIZE(u5500_platform_devices)); diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c index da5569d83d58..77a75ed0df67 100644 --- a/arch/arm/mach-ux500/cache-l2x0.c +++ b/arch/arm/mach-ux500/cache-l2x0.c @@ -5,6 +5,8 @@ */ #include <linux/io.h> +#include <linux/of.h> + #include <asm/cacheflush.h> #include <asm/hardware/cache-l2x0.h> #include <mach/hardware.h> @@ -45,7 +47,10 @@ static int __init ux500_l2x0_init(void) ux500_l2x0_unlock(); /* 64KB way size, 8 way associativity, force WA */ - l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff); + if (of_have_populated_dt()) + l2x0_of_init(0x3e060000, 0xc0000fff); + else + l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff); /* * We can't disable l2 as we are in non secure mode, currently diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c index 18aa5c05c69e..bca47f32082f 100644 --- a/arch/arm/mach-ux500/cpu-db5500.c +++ b/arch/arm/mach-ux500/cpu-db5500.c @@ -147,13 +147,13 @@ static resource_size_t __initdata db5500_gpio_base[] = { U5500_GPIOBANK7_BASE, }; -static void __init db5500_add_gpios(void) +static void __init db5500_add_gpios(struct device *parent) { struct nmk_gpio_platform_data pdata = { /* No custom data yet */ }; - dbx500_add_gpios(ARRAY_AND_SIZE(db5500_gpio_base), + dbx500_add_gpios(parent, ARRAY_AND_SIZE(db5500_gpio_base), IRQ_DB5500_GPIO0, &pdata); } @@ -212,14 +212,36 @@ static int usb_db5500_tx_dma_cfg[] = { DB5500_DMA_DEV38_USB_OTG_OEP_8 }; -void __init u5500_init_devices(void) +static const char *db5500_read_soc_id(void) { - db5500_add_gpios(); + return kasprintf(GFP_KERNEL, "u5500 currently unsupported\n"); +} + +static struct device * __init db5500_soc_device_init(void) +{ + const char *soc_id = db5500_read_soc_id(); + + return ux500_soc_device_init(soc_id); +} + +struct device * __init u5500_init_devices(void) +{ + struct device *parent; + int i; + + parent = db5500_soc_device_init(); + + db5500_add_gpios(parent); db5500_pmu_init(); - db5500_dma_init(); - db5500_add_rtc(); - db5500_add_usb(usb_db5500_rx_dma_cfg, usb_db5500_tx_dma_cfg); + db5500_dma_init(parent); + db5500_add_rtc(parent); + db5500_add_usb(parent, usb_db5500_rx_dma_cfg, usb_db5500_tx_dma_cfg); + + for (i = 0; i < ARRAY_SIZE(db5500_platform_devs); i++) + db5500_platform_devs[i]->dev.parent = parent; platform_add_devices(db5500_platform_devs, ARRAY_SIZE(db5500_platform_devs)); + + return parent; } diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 7176ee7491ab..9bd8163896cf 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -24,6 +24,7 @@ #include <mach/setup.h> #include <mach/devices.h> #include <mach/usb.h> +#include <mach/db8500-regs.h> #include "devices-db8500.h" #include "ste-dma40-db8500.h" @@ -132,13 +133,13 @@ static resource_size_t __initdata db8500_gpio_base[] = { U8500_GPIOBANK8_BASE, }; -static void __init db8500_add_gpios(void) +static void __init db8500_add_gpios(struct device *parent) { struct nmk_gpio_platform_data pdata = { .supports_sleepmode = true, }; - dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base), + dbx500_add_gpios(parent, ARRAY_AND_SIZE(db8500_gpio_base), IRQ_DB8500_GPIO0, &pdata); } @@ -164,17 +165,44 @@ static int usb_db8500_tx_dma_cfg[] = { DB8500_DMA_DEV39_USB_OTG_OEP_8 }; +static const char *db8500_read_soc_id(void) +{ + void __iomem *uid = __io_address(U8500_BB_UID_BASE); + + return kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x", + readl((u32 *)uid+1), + readl((u32 *)uid+1), readl((u32 *)uid+2), + readl((u32 *)uid+3), readl((u32 *)uid+4)); +} + +static struct device * __init db8500_soc_device_init(void) +{ + const char *soc_id = db8500_read_soc_id(); + + return ux500_soc_device_init(soc_id); +} + /* * This function is called from the board init */ -void __init u8500_init_devices(void) +struct device * __init u8500_init_devices(void) { - db8500_add_rtc(); - db8500_add_gpios(); - db8500_add_usb(usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg); + struct device *parent; + int i; + + parent = db8500_soc_device_init(); + + db8500_add_rtc(parent); + db8500_add_gpios(parent); + db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg); + + platform_device_register_data(parent, + "cpufreq-u8500", -1, NULL, 0); + + for (i = 0; i < ARRAY_SIZE(platform_devs); i++) + platform_devs[i]->dev.parent = parent; - platform_device_register_simple("cpufreq-u8500", -1, NULL, 0); platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); - return ; + return parent; } diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index 851308bf6424..d11f3892a27d 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c @@ -2,6 +2,7 @@ * Copyright (C) ST-Ericsson SA 2010 * * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson + * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson * License terms: GNU General Public License (GPL) version 2 */ @@ -11,6 +12,12 @@ #include <linux/mfd/db8500-prcmu.h> #include <linux/mfd/db5500-prcmu.h> #include <linux/clksrc-dbx500-prcmu.h> +#include <linux/sys_soc.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/stat.h> +#include <linux/of.h> +#include <linux/of_irq.h> #include <asm/hardware/gic.h> #include <asm/mach/map.h> @@ -23,6 +30,11 @@ void __iomem *_PRCMU_BASE; +static const struct of_device_id ux500_dt_irq_match[] = { + { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, }, + {}, +}; + void __init ux500_init_irq(void) { void __iomem *dist_base; @@ -37,7 +49,12 @@ void __init ux500_init_irq(void) } else ux500_unknown_soc(); - gic_init(0, 29, dist_base, cpu_base); +#ifdef CONFIG_OF + if (of_have_populated_dt()) + of_irq_init(ux500_dt_irq_match); + else +#endif + gic_init(0, 29, dist_base, cpu_base); /* * Init clocks here so that they are available for system timer @@ -49,3 +66,73 @@ void __init ux500_init_irq(void) db8500_prcmu_early_init(); clk_init(); } + +static const char * __init ux500_get_machine(void) +{ + return kasprintf(GFP_KERNEL, "DB%4x", dbx500_partnumber()); +} + +static const char * __init ux500_get_family(void) +{ + return kasprintf(GFP_KERNEL, "ux500"); +} + +static const char * __init ux500_get_revision(void) +{ + unsigned int rev = dbx500_revision(); + + if (rev == 0x01) + return kasprintf(GFP_KERNEL, "%s", "ED"); + else if (rev >= 0xA0) + return kasprintf(GFP_KERNEL, "%d.%d", + (rev >> 4) - 0xA + 1, rev & 0xf); + + return kasprintf(GFP_KERNEL, "%s", "Unknown"); +} + +static ssize_t ux500_get_process(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + if (dbx500_id.process == 0x00) + return sprintf(buf, "Standard\n"); + + return sprintf(buf, "%02xnm\n", dbx500_id.process); +} + +static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr, + const char *soc_id) +{ + soc_dev_attr->soc_id = soc_id; + soc_dev_attr->machine = ux500_get_machine(); + soc_dev_attr->family = ux500_get_family(); + soc_dev_attr->revision = ux500_get_revision(); +} + +struct device_attribute ux500_soc_attr = + __ATTR(process, S_IRUGO, ux500_get_process, NULL); + +struct device * __init ux500_soc_device_init(const char *soc_id) +{ + struct device *parent; + struct soc_device *soc_dev; + struct soc_device_attribute *soc_dev_attr; + + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return ERR_PTR(-ENOMEM); + + soc_info_populate(soc_dev_attr, soc_id); + + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR_OR_NULL(soc_dev)) { + kfree(soc_dev_attr); + return NULL; + } + + parent = soc_device_to_device(soc_dev); + if (!IS_ERR_OR_NULL(parent)) + device_create_file(parent, &ux500_soc_attr); + + return parent; +} diff --git a/arch/arm/mach-ux500/devices-common.c b/arch/arm/mach-ux500/devices-common.c index 898a64517b09..c5312a4b49f5 100644 --- a/arch/arm/mach-ux500/devices-common.c +++ b/arch/arm/mach-ux500/devices-common.c @@ -20,8 +20,9 @@ #include "devices-common.h" struct amba_device * -dbx500_add_amba_device(const char *name, resource_size_t base, - int irq, void *pdata, unsigned int periphid) +dbx500_add_amba_device(struct device *parent, const char *name, + resource_size_t base, int irq, void *pdata, + unsigned int periphid) { struct amba_device *dev; int ret; @@ -39,6 +40,8 @@ dbx500_add_amba_device(const char *name, resource_size_t base, dev->dev.platform_data = pdata; + dev->dev.parent = parent; + ret = amba_device_add(dev, &iomem_resource); if (ret) { amba_device_put(dev); @@ -49,60 +52,7 @@ dbx500_add_amba_device(const char *name, resource_size_t base, } static struct platform_device * -dbx500_add_platform_device(const char *name, int id, void *pdata, - struct resource *res, int resnum) -{ - struct platform_device *dev; - int ret; - - dev = platform_device_alloc(name, id); - if (!dev) - return ERR_PTR(-ENOMEM); - - dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - dev->dev.dma_mask = &dev->dev.coherent_dma_mask; - - ret = platform_device_add_resources(dev, res, resnum); - if (ret) - goto out_free; - - dev->dev.platform_data = pdata; - - ret = platform_device_add(dev); - if (ret) - goto out_free; - - return dev; - -out_free: - platform_device_put(dev); - return ERR_PTR(ret); -} - -struct platform_device * -dbx500_add_platform_device_4k1irq(const char *name, int id, - resource_size_t base, - int irq, void *pdata) -{ - struct resource resources[] = { - [0] = { - .start = base, - .end = base + SZ_4K - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = irq, - .end = irq, - .flags = IORESOURCE_IRQ, - } - }; - - return dbx500_add_platform_device(name, id, pdata, resources, - ARRAY_SIZE(resources)); -} - -static struct platform_device * -dbx500_add_gpio(int id, resource_size_t addr, int irq, +dbx500_add_gpio(struct device *parent, int id, resource_size_t addr, int irq, struct nmk_gpio_platform_data *pdata) { struct resource resources[] = { @@ -118,13 +68,18 @@ dbx500_add_gpio(int id, resource_size_t addr, int irq, } }; - return platform_device_register_resndata(NULL, "gpio", id, - resources, ARRAY_SIZE(resources), - pdata, sizeof(*pdata)); + return platform_device_register_resndata( + parent, + "gpio", + id, + resources, + ARRAY_SIZE(resources), + pdata, + sizeof(*pdata)); } -void dbx500_add_gpios(resource_size_t *base, int num, int irq, - struct nmk_gpio_platform_data *pdata) +void dbx500_add_gpios(struct device *parent, resource_size_t *base, int num, + int irq, struct nmk_gpio_platform_data *pdata) { int first = 0; int i; @@ -134,6 +89,6 @@ void dbx500_add_gpios(resource_size_t *base, int num, int irq, pdata->first_irq = NOMADIK_GPIO_TO_IRQ(first); pdata->num_gpio = 32; - dbx500_add_gpio(i, base[i], irq, pdata); + dbx500_add_gpio(parent, i, base[i], irq, pdata); } } diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h index 7825705033bf..39c74ec82add 100644 --- a/arch/arm/mach-ux500/devices-common.h +++ b/arch/arm/mach-ux500/devices-common.h @@ -8,80 +8,89 @@ #ifndef __DEVICES_COMMON_H #define __DEVICES_COMMON_H -extern struct amba_device * -dbx500_add_amba_device(const char *name, resource_size_t base, - int irq, void *pdata, unsigned int periphid); +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/sys_soc.h> +#include <plat/i2c.h> -extern struct platform_device * -dbx500_add_platform_device_4k1irq(const char *name, int id, - resource_size_t base, - int irq, void *pdata); +extern struct amba_device * +dbx500_add_amba_device(struct device *parent, const char *name, + resource_size_t base, int irq, void *pdata, + unsigned int periphid); struct spi_master_cntlr; static inline struct amba_device * -dbx500_add_msp_spi(const char *name, resource_size_t base, int irq, +dbx500_add_msp_spi(struct device *parent, const char *name, + resource_size_t base, int irq, struct spi_master_cntlr *pdata) { - return dbx500_add_amba_device(name, base, irq, pdata, 0); + return dbx500_add_amba_device(parent, name, base, irq, + pdata, 0); } static inline struct amba_device * -dbx500_add_spi(const char *name, resource_size_t base, int irq, - struct spi_master_cntlr *pdata, +dbx500_add_spi(struct device *parent, const char *name, resource_size_t base, + int irq, struct spi_master_cntlr *pdata, u32 periphid) { - return dbx500_add_amba_device(name, base, irq, pdata, periphid); + return dbx500_add_amba_device(parent, name, base, irq, + pdata, periphid); } struct mmci_platform_data; static inline struct amba_device * -dbx500_add_sdi(const char *name, resource_size_t base, int irq, - struct mmci_platform_data *pdata, - u32 periphid) +dbx500_add_sdi(struct device *parent, const char *name, resource_size_t base, + int irq, struct mmci_platform_data *pdata, u32 periphid) { - return dbx500_add_amba_device(name, base, irq, pdata, periphid); + return dbx500_add_amba_device(parent, name, base, irq, + pdata, periphid); } struct amba_pl011_data; static inline struct amba_device * -dbx500_add_uart(const char *name, resource_size_t base, int irq, - struct amba_pl011_data *pdata) +dbx500_add_uart(struct device *parent, const char *name, resource_size_t base, + int irq, struct amba_pl011_data *pdata) { - return dbx500_add_amba_device(name, base, irq, pdata, 0); + return dbx500_add_amba_device(parent, name, base, irq, pdata, 0); } struct nmk_i2c_controller; static inline struct platform_device * -dbx500_add_i2c(int id, resource_size_t base, int irq, - struct nmk_i2c_controller *pdata) -{ - return dbx500_add_platform_device_4k1irq("nmk-i2c", id, base, irq, - pdata); -} - -struct msp_i2s_platform_data; - -static inline struct platform_device * -dbx500_add_msp_i2s(int id, resource_size_t base, int irq, - struct msp_i2s_platform_data *pdata) +dbx500_add_i2c(struct device *parent, int id, resource_size_t base, int irq, + struct nmk_i2c_controller *data) { - return dbx500_add_platform_device_4k1irq("MSP_I2S", id, base, irq, - pdata); + struct resource res[] = { + DEFINE_RES_MEM(base, SZ_4K), + DEFINE_RES_IRQ(irq), + }; + + struct platform_device_info pdevinfo = { + .parent = parent, + .name = "nmk-i2c", + .id = id, + .res = res, + .num_res = ARRAY_SIZE(res), + .data = data, + .size_data = sizeof(*data), + .dma_mask = DMA_BIT_MASK(32), + }; + + return platform_device_register_full(&pdevinfo); } static inline struct amba_device * -dbx500_add_rtc(resource_size_t base, int irq) +dbx500_add_rtc(struct device *parent, resource_size_t base, int irq) { - return dbx500_add_amba_device("rtc-pl031", base, irq, NULL, 0); + return dbx500_add_amba_device(parent, "rtc-pl031", base, irq, NULL, 0); } struct nmk_gpio_platform_data; -void dbx500_add_gpios(resource_size_t *base, int num, int irq, - struct nmk_gpio_platform_data *pdata); +void dbx500_add_gpios(struct device *parent, resource_size_t *base, int num, + int irq, struct nmk_gpio_platform_data *pdata); #endif diff --git a/arch/arm/mach-ux500/devices-db5500.h b/arch/arm/mach-ux500/devices-db5500.h index 0c4bccd02b90..e70955502c35 100644 --- a/arch/arm/mach-ux500/devices-db5500.h +++ b/arch/arm/mach-ux500/devices-db5500.h @@ -10,70 +10,90 @@ #include "devices-common.h" -#define db5500_add_i2c1(pdata) \ - dbx500_add_i2c(1, U5500_I2C1_BASE, IRQ_DB5500_I2C1, pdata) -#define db5500_add_i2c2(pdata) \ - dbx500_add_i2c(2, U5500_I2C2_BASE, IRQ_DB5500_I2C2, pdata) -#define db5500_add_i2c3(pdata) \ - dbx500_add_i2c(3, U5500_I2C3_BASE, IRQ_DB5500_I2C3, pdata) +#define db5500_add_i2c1(parent, pdata) \ + dbx500_add_i2c(parent, 1, U5500_I2C1_BASE, IRQ_DB5500_I2C1, pdata) +#define db5500_add_i2c2(parent, pdata) \ + dbx500_add_i2c(parent, 2, U5500_I2C2_BASE, IRQ_DB5500_I2C2, pdata) +#define db5500_add_i2c3(parent, pdata) \ + dbx500_add_i2c(parent, 3, U5500_I2C3_BASE, IRQ_DB5500_I2C3, pdata) -#define db5500_add_msp0_i2s(pdata) \ - dbx500_add_msp_i2s(0, U5500_MSP0_BASE, IRQ_DB5500_MSP0, pdata) -#define db5500_add_msp1_i2s(pdata) \ - dbx500_add_msp_i2s(1, U5500_MSP1_BASE, IRQ_DB5500_MSP1, pdata) -#define db5500_add_msp2_i2s(pdata) \ - dbx500_add_msp_i2s(2, U5500_MSP2_BASE, IRQ_DB5500_MSP2, pdata) +#define db5500_add_msp0_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp0", U5500_MSP0_BASE, \ + IRQ_DB5500_MSP0, pdata) +#define db5500_add_msp1_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp1", U5500_MSP1_BASE, \ + IRQ_DB5500_MSP1, pdata) +#define db5500_add_msp2_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp2", U5500_MSP2_BASE, \ + IRQ_DB5500_MSP2, pdata) -#define db5500_add_msp0_spi(pdata) \ - dbx500_add_msp_spi("msp0", U5500_MSP0_BASE, IRQ_DB5500_MSP0, pdata) -#define db5500_add_msp1_spi(pdata) \ - dbx500_add_msp_spi("msp1", U5500_MSP1_BASE, IRQ_DB5500_MSP1, pdata) -#define db5500_add_msp2_spi(pdata) \ - dbx500_add_msp_spi("msp2", U5500_MSP2_BASE, IRQ_DB5500_MSP2, pdata) +#define db5500_add_msp0_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp0", U5500_MSP0_BASE, \ + IRQ_DB5500_MSP0, pdata) +#define db5500_add_msp1_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp1", U5500_MSP1_BASE, \ + IRQ_DB5500_MSP1, pdata) +#define db5500_add_msp2_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp2", U5500_MSP2_BASE, \ + IRQ_DB5500_MSP2, pdata) -#define db5500_add_rtc() \ - dbx500_add_rtc(U5500_RTC_BASE, IRQ_DB5500_RTC); +#define db5500_add_rtc(parent) \ + dbx500_add_rtc(parent, U5500_RTC_BASE, IRQ_DB5500_RTC); -#define db5500_add_usb(rx_cfg, tx_cfg) \ - ux500_add_usb(U5500_USBOTG_BASE, IRQ_DB5500_USBOTG, rx_cfg, tx_cfg) +#define db5500_add_usb(parent, rx_cfg, tx_cfg) \ + ux500_add_usb(parent, U5500_USBOTG_BASE, \ + IRQ_DB5500_USBOTG, rx_cfg, tx_cfg) -#define db5500_add_sdi0(pdata) \ - dbx500_add_sdi("sdi0", U5500_SDI0_BASE, IRQ_DB5500_SDMMC0, pdata, \ +#define db5500_add_sdi0(parent, pdata) \ + dbx500_add_sdi(parent, "sdi0", U5500_SDI0_BASE, \ + IRQ_DB5500_SDMMC0, pdata, \ 0x10480180) -#define db5500_add_sdi1(pdata) \ - dbx500_add_sdi("sdi1", U5500_SDI1_BASE, IRQ_DB5500_SDMMC1, pdata, \ +#define db5500_add_sdi1(parent, pdata) \ + dbx500_add_sdi(parent, "sdi1", U5500_SDI1_BASE, \ + IRQ_DB5500_SDMMC1, pdata, \ 0x10480180) -#define db5500_add_sdi2(pdata) \ - dbx500_add_sdi("sdi2", U5500_SDI2_BASE, IRQ_DB5500_SDMMC2, pdata \ +#define db5500_add_sdi2(parent, pdata) \ + dbx500_add_sdi(parent, "sdi2", U5500_SDI2_BASE, \ + IRQ_DB5500_SDMMC2, pdata \ 0x10480180) -#define db5500_add_sdi3(pdata) \ - dbx500_add_sdi("sdi3", U5500_SDI3_BASE, IRQ_DB5500_SDMMC3, pdata \ +#define db5500_add_sdi3(parent, pdata) \ + dbx500_add_sdi(parent, "sdi3", U5500_SDI3_BASE, \ + IRQ_DB5500_SDMMC3, pdata \ 0x10480180) -#define db5500_add_sdi4(pdata) \ - dbx500_add_sdi("sdi4", U5500_SDI4_BASE, IRQ_DB5500_SDMMC4, pdata \ +#define db5500_add_sdi4(parent, pdata) \ + dbx500_add_sdi(parent, "sdi4", U5500_SDI4_BASE, \ + IRQ_DB5500_SDMMC4, pdata \ 0x10480180) /* This one has a bad peripheral ID in the U5500 silicon */ -#define db5500_add_spi0(pdata) \ - dbx500_add_spi("spi0", U5500_SPI0_BASE, IRQ_DB5500_SPI0, pdata, \ +#define db5500_add_spi0(parent, pdata) \ + dbx500_add_spi(parent, "spi0", U5500_SPI0_BASE, \ + IRQ_DB5500_SPI0, pdata, \ 0x10080023) -#define db5500_add_spi1(pdata) \ - dbx500_add_spi("spi1", U5500_SPI1_BASE, IRQ_DB5500_SPI1, pdata, \ +#define db5500_add_spi1(parent, pdata) \ + dbx500_add_spi(parent, "spi1", U5500_SPI1_BASE, \ + IRQ_DB5500_SPI1, pdata, \ 0x10080023) -#define db5500_add_spi2(pdata) \ - dbx500_add_spi("spi2", U5500_SPI2_BASE, IRQ_DB5500_SPI2, pdata \ +#define db5500_add_spi2(parent, pdata) \ + dbx500_add_spi(parent, "spi2", U5500_SPI2_BASE, \ + IRQ_DB5500_SPI2, pdata \ 0x10080023) -#define db5500_add_spi3(pdata) \ - dbx500_add_spi("spi3", U5500_SPI3_BASE, IRQ_DB5500_SPI3, pdata \ +#define db5500_add_spi3(parent, pdata) \ + dbx500_add_spi(parent, "spi3", U5500_SPI3_BASE, \ + IRQ_DB5500_SPI3, pdata \ 0x10080023) -#define db5500_add_uart0(plat) \ - dbx500_add_uart("uart0", U5500_UART0_BASE, IRQ_DB5500_UART0, plat) -#define db5500_add_uart1(plat) \ - dbx500_add_uart("uart1", U5500_UART1_BASE, IRQ_DB5500_UART1, plat) -#define db5500_add_uart2(plat) \ - dbx500_add_uart("uart2", U5500_UART2_BASE, IRQ_DB5500_UART2, plat) -#define db5500_add_uart3(plat) \ - dbx500_add_uart("uart3", U5500_UART3_BASE, IRQ_DB5500_UART3, plat) +#define db5500_add_uart0(parent, plat) \ + dbx500_add_uart(parent, "uart0", U5500_UART0_BASE, \ + IRQ_DB5500_UART0, plat) +#define db5500_add_uart1(parent, plat) \ + dbx500_add_uart(parent, "uart1", U5500_UART1_BASE, \ + IRQ_DB5500_UART1, plat) +#define db5500_add_uart2(parent, plat) \ + dbx500_add_uart(parent, "uart2", U5500_UART2_BASE, \ + IRQ_DB5500_UART2, plat) +#define db5500_add_uart3(parent, plat) \ + dbx500_add_uart(parent, "uart3", U5500_UART3_BASE, \ + IRQ_DB5500_UART3, plat) #endif diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h index cbd4a9ae8109..9fd93e9da529 100644 --- a/arch/arm/mach-ux500/devices-db8500.h +++ b/arch/arm/mach-ux500/devices-db8500.h @@ -14,88 +14,114 @@ struct ske_keypad_platform_data; struct pl022_ssp_controller; static inline struct platform_device * -db8500_add_ske_keypad(struct ske_keypad_platform_data *pdata) +db8500_add_ske_keypad(struct device *parent, + struct ske_keypad_platform_data *pdata, + size_t size) { - return dbx500_add_platform_device_4k1irq("nmk-ske-keypad", -1, - U8500_SKE_BASE, - IRQ_DB8500_KB, pdata); + struct resource resources[] = { + DEFINE_RES_MEM(U8500_SKE_BASE, SZ_4K), + DEFINE_RES_IRQ(IRQ_DB8500_KB), + }; + + return platform_device_register_resndata(parent, "nmk-ske-keypad", -1, + resources, 2, pdata, size); } static inline struct amba_device * -db8500_add_ssp(const char *name, resource_size_t base, int irq, - struct pl022_ssp_controller *pdata) +db8500_add_ssp(struct device *parent, const char *name, resource_size_t base, + int irq, struct pl022_ssp_controller *pdata) { - return dbx500_add_amba_device(name, base, irq, pdata, 0); + return dbx500_add_amba_device(parent, name, base, irq, pdata, 0); } -#define db8500_add_i2c0(pdata) \ - dbx500_add_i2c(0, U8500_I2C0_BASE, IRQ_DB8500_I2C0, pdata) -#define db8500_add_i2c1(pdata) \ - dbx500_add_i2c(1, U8500_I2C1_BASE, IRQ_DB8500_I2C1, pdata) -#define db8500_add_i2c2(pdata) \ - dbx500_add_i2c(2, U8500_I2C2_BASE, IRQ_DB8500_I2C2, pdata) -#define db8500_add_i2c3(pdata) \ - dbx500_add_i2c(3, U8500_I2C3_BASE, IRQ_DB8500_I2C3, pdata) -#define db8500_add_i2c4(pdata) \ - dbx500_add_i2c(4, U8500_I2C4_BASE, IRQ_DB8500_I2C4, pdata) - -#define db8500_add_msp0_i2s(pdata) \ - dbx500_add_msp_i2s(0, U8500_MSP0_BASE, IRQ_DB8500_MSP0, pdata) -#define db8500_add_msp1_i2s(pdata) \ - dbx500_add_msp_i2s(1, U8500_MSP1_BASE, IRQ_DB8500_MSP1, pdata) -#define db8500_add_msp2_i2s(pdata) \ - dbx500_add_msp_i2s(2, U8500_MSP2_BASE, IRQ_DB8500_MSP2, pdata) -#define db8500_add_msp3_i2s(pdata) \ - dbx500_add_msp_i2s(3, U8500_MSP3_BASE, IRQ_DB8500_MSP1, pdata) - -#define db8500_add_msp0_spi(pdata) \ - dbx500_add_msp_spi("msp0", U8500_MSP0_BASE, IRQ_DB8500_MSP0, pdata) -#define db8500_add_msp1_spi(pdata) \ - dbx500_add_msp_spi("msp1", U8500_MSP1_BASE, IRQ_DB8500_MSP1, pdata) -#define db8500_add_msp2_spi(pdata) \ - dbx500_add_msp_spi("msp2", U8500_MSP2_BASE, IRQ_DB8500_MSP2, pdata) -#define db8500_add_msp3_spi(pdata) \ - dbx500_add_msp_spi("msp3", U8500_MSP3_BASE, IRQ_DB8500_MSP1, pdata) - -#define db8500_add_rtc() \ - dbx500_add_rtc(U8500_RTC_BASE, IRQ_DB8500_RTC); - -#define db8500_add_usb(rx_cfg, tx_cfg) \ - ux500_add_usb(U8500_USBOTG_BASE, IRQ_DB8500_USBOTG, rx_cfg, tx_cfg) - -#define db8500_add_sdi0(pdata, pid) \ - dbx500_add_sdi("sdi0", U8500_SDI0_BASE, IRQ_DB8500_SDMMC0, pdata, pid) -#define db8500_add_sdi1(pdata, pid) \ - dbx500_add_sdi("sdi1", U8500_SDI1_BASE, IRQ_DB8500_SDMMC1, pdata, pid) -#define db8500_add_sdi2(pdata, pid) \ - dbx500_add_sdi("sdi2", U8500_SDI2_BASE, IRQ_DB8500_SDMMC2, pdata, pid) -#define db8500_add_sdi3(pdata, pid) \ - dbx500_add_sdi("sdi3", U8500_SDI3_BASE, IRQ_DB8500_SDMMC3, pdata, pid) -#define db8500_add_sdi4(pdata, pid) \ - dbx500_add_sdi("sdi4", U8500_SDI4_BASE, IRQ_DB8500_SDMMC4, pdata, pid) -#define db8500_add_sdi5(pdata, pid) \ - dbx500_add_sdi("sdi5", U8500_SDI5_BASE, IRQ_DB8500_SDMMC5, pdata, pid) - -#define db8500_add_ssp0(pdata) \ - db8500_add_ssp("ssp0", U8500_SSP0_BASE, IRQ_DB8500_SSP0, pdata) -#define db8500_add_ssp1(pdata) \ - db8500_add_ssp("ssp1", U8500_SSP1_BASE, IRQ_DB8500_SSP1, pdata) - -#define db8500_add_spi0(pdata) \ - dbx500_add_spi("spi0", U8500_SPI0_BASE, IRQ_DB8500_SPI0, pdata, 0) -#define db8500_add_spi1(pdata) \ - dbx500_add_spi("spi1", U8500_SPI1_BASE, IRQ_DB8500_SPI1, pdata, 0) -#define db8500_add_spi2(pdata) \ - dbx500_add_spi("spi2", U8500_SPI2_BASE, IRQ_DB8500_SPI2, pdata, 0) -#define db8500_add_spi3(pdata) \ - dbx500_add_spi("spi3", U8500_SPI3_BASE, IRQ_DB8500_SPI3, pdata, 0) - -#define db8500_add_uart0(pdata) \ - dbx500_add_uart("uart0", U8500_UART0_BASE, IRQ_DB8500_UART0, pdata) -#define db8500_add_uart1(pdata) \ - dbx500_add_uart("uart1", U8500_UART1_BASE, IRQ_DB8500_UART1, pdata) -#define db8500_add_uart2(pdata) \ - dbx500_add_uart("uart2", U8500_UART2_BASE, IRQ_DB8500_UART2, pdata) +#define db8500_add_i2c0(parent, pdata) \ + dbx500_add_i2c(parent, 0, U8500_I2C0_BASE, IRQ_DB8500_I2C0, pdata) +#define db8500_add_i2c1(parent, pdata) \ + dbx500_add_i2c(parent, 1, U8500_I2C1_BASE, IRQ_DB8500_I2C1, pdata) +#define db8500_add_i2c2(parent, pdata) \ + dbx500_add_i2c(parent, 2, U8500_I2C2_BASE, IRQ_DB8500_I2C2, pdata) +#define db8500_add_i2c3(parent, pdata) \ + dbx500_add_i2c(parent, 3, U8500_I2C3_BASE, IRQ_DB8500_I2C3, pdata) +#define db8500_add_i2c4(parent, pdata) \ + dbx500_add_i2c(parent, 4, U8500_I2C4_BASE, IRQ_DB8500_I2C4, pdata) + +#define db8500_add_msp0_i2s(parent, pdata) \ + dbx500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0, pdata) +#define db8500_add_msp1_i2s(parent, pdata) \ + dbx500_add_msp_i2s(parent, 1, U8500_MSP1_BASE, IRQ_DB8500_MSP1, pdata) +#define db8500_add_msp2_i2s(parent, pdata) \ + dbx500_add_msp_i2s(parent, 2, U8500_MSP2_BASE, IRQ_DB8500_MSP2, pdata) +#define db8500_add_msp3_i2s(parent, pdata) \ + dbx500_add_msp_i2s(parent, 3, U8500_MSP3_BASE, IRQ_DB8500_MSP1, pdata) + +#define db8500_add_msp0_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp0", U8500_MSP0_BASE, \ + IRQ_DB8500_MSP0, pdata) +#define db8500_add_msp1_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp1", U8500_MSP1_BASE, \ + IRQ_DB8500_MSP1, pdata) +#define db8500_add_msp2_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp2", U8500_MSP2_BASE, \ + IRQ_DB8500_MSP2, pdata) +#define db8500_add_msp3_spi(parent, pdata) \ + dbx500_add_msp_spi(parent, "msp3", U8500_MSP3_BASE, \ + IRQ_DB8500_MSP1, pdata) + +#define db8500_add_rtc(parent) \ + dbx500_add_rtc(parent, U8500_RTC_BASE, IRQ_DB8500_RTC); + +#define db8500_add_usb(parent, rx_cfg, tx_cfg) \ + ux500_add_usb(parent, U8500_USBOTG_BASE, \ + IRQ_DB8500_USBOTG, rx_cfg, tx_cfg) + +#define db8500_add_sdi0(parent, pdata, pid) \ + dbx500_add_sdi(parent, "sdi0", U8500_SDI0_BASE, \ + IRQ_DB8500_SDMMC0, pdata, pid) +#define db8500_add_sdi1(parent, pdata, pid) \ + dbx500_add_sdi(parent, "sdi1", U8500_SDI1_BASE, \ + IRQ_DB8500_SDMMC1, pdata, pid) +#define db8500_add_sdi2(parent, pdata, pid) \ + dbx500_add_sdi(parent, "sdi2", U8500_SDI2_BASE, \ + IRQ_DB8500_SDMMC2, pdata, pid) +#define db8500_add_sdi3(parent, pdata, pid) \ + dbx500_add_sdi(parent, "sdi3", U8500_SDI3_BASE, \ + IRQ_DB8500_SDMMC3, pdata, pid) +#define db8500_add_sdi4(parent, pdata, pid) \ + dbx500_add_sdi(parent, "sdi4", U8500_SDI4_BASE, \ + IRQ_DB8500_SDMMC4, pdata, pid) +#define db8500_add_sdi5(parent, pdata, pid) \ + dbx500_add_sdi(parent, "sdi5", U8500_SDI5_BASE, \ + IRQ_DB8500_SDMMC5, pdata, pid) + +#define db8500_add_ssp0(parent, pdata) \ + db8500_add_ssp(parent, "ssp0", U8500_SSP0_BASE, \ + IRQ_DB8500_SSP0, pdata) +#define db8500_add_ssp1(parent, pdata) \ + db8500_add_ssp(parent, "ssp1", U8500_SSP1_BASE, \ + IRQ_DB8500_SSP1, pdata) + +#define db8500_add_spi0(parent, pdata) \ + dbx500_add_spi(parent, "spi0", U8500_SPI0_BASE, \ + IRQ_DB8500_SPI0, pdata, 0) +#define db8500_add_spi1(parent, pdata) \ + dbx500_add_spi(parent, "spi1", U8500_SPI1_BASE, \ + IRQ_DB8500_SPI1, pdata, 0) +#define db8500_add_spi2(parent, pdata) \ + dbx500_add_spi(parent, "spi2", U8500_SPI2_BASE, \ + IRQ_DB8500_SPI2, pdata, 0) +#define db8500_add_spi3(parent, pdata) \ + dbx500_add_spi(parent, "spi3", U8500_SPI3_BASE, \ + IRQ_DB8500_SPI3, pdata, 0) + +#define db8500_add_uart0(parent, pdata) \ + dbx500_add_uart(parent, "uart0", U8500_UART0_BASE, \ + IRQ_DB8500_UART0, pdata) +#define db8500_add_uart1(parent, pdata) \ + dbx500_add_uart(parent, "uart1", U8500_UART1_BASE, \ + IRQ_DB8500_UART1, pdata) +#define db8500_add_uart2(parent, pdata) \ + dbx500_add_uart(parent, "uart2", U8500_UART2_BASE, \ + IRQ_DB8500_UART2, pdata) #endif diff --git a/arch/arm/mach-ux500/dma-db5500.c b/arch/arm/mach-ux500/dma-db5500.c index 1cfab68ae417..41e9470fa0e6 100644 --- a/arch/arm/mach-ux500/dma-db5500.c +++ b/arch/arm/mach-ux500/dma-db5500.c @@ -125,10 +125,11 @@ static struct platform_device dma40_device = { .resource = dma40_resources }; -void __init db5500_dma_init(void) +void __init db5500_dma_init(struct device *parent) { int ret; + dma40_device.dev.parent = parent; ret = platform_device_register(&dma40_device); if (ret) dev_err(&dma40_device.dev, "unable to register device: %d\n", ret); diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h index 80e10f50282e..9ec20b96d8f2 100644 --- a/arch/arm/mach-ux500/include/mach/db8500-regs.h +++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h @@ -161,4 +161,7 @@ #define U8500_MODEM_BASE 0xe000000 #define U8500_APE_BASE 0x6000000 +/* SoC identification number information */ +#define U8500_BB_UID_BASE (U8500_BACKUPRAM1_BASE + 0xFC0) + #endif diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h index d2d4131435a6..7d34c52798b5 100644 --- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h +++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h @@ -13,7 +13,7 @@ #define MOP500_AB8500_IRQ_BASE IRQ_BOARD_START #define MOP500_AB8500_IRQ_END (MOP500_AB8500_IRQ_BASE \ - + AB8500_NR_IRQS) + + AB8500_MAX_NR_IRQS) /* TC35892 */ #define TC35892_NR_INTERNAL_IRQS 8 diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h index 93d403955eaa..3dc00ffa7bfa 100644 --- a/arch/arm/mach-ux500/include/mach/setup.h +++ b/arch/arm/mach-ux500/include/mach/setup.h @@ -18,14 +18,16 @@ void __init ux500_map_io(void); extern void __init u5500_map_io(void); extern void __init u8500_map_io(void); -extern void __init u5500_init_devices(void); -extern void __init u8500_init_devices(void); +extern struct device * __init u5500_init_devices(void); +extern struct device * __init u8500_init_devices(void); extern void __init ux500_init_irq(void); -extern void __init u5500_sdi_init(void); +extern void __init u5500_sdi_init(struct device *parent); -extern void __init db5500_dma_init(void); +extern void __init db5500_dma_init(struct device *parent); + +extern struct device *ux500_soc_device_init(const char *soc_id); struct amba_device; extern void __init amba_add_devices(struct amba_device *devs[], int num); diff --git a/arch/arm/mach-ux500/include/mach/usb.h b/arch/arm/mach-ux500/include/mach/usb.h index d3739d418813..4c1cc50a595a 100644 --- a/arch/arm/mach-ux500/include/mach/usb.h +++ b/arch/arm/mach-ux500/include/mach/usb.h @@ -20,6 +20,6 @@ struct ux500_musb_board_data { bool (*dma_filter)(struct dma_chan *chan, void *filter_param); }; -void ux500_add_usb(resource_size_t base, int irq, int *dma_rx_cfg, - int *dma_tx_cfg); +void ux500_add_usb(struct device *parent, resource_size_t base, + int irq, int *dma_rx_cfg, int *dma_tx_cfg); #endif diff --git a/arch/arm/mach-ux500/timer.c b/arch/arm/mach-ux500/timer.c index e9d580702fbb..d37df98b5c32 100644 --- a/arch/arm/mach-ux500/timer.c +++ b/arch/arm/mach-ux500/timer.c @@ -7,6 +7,7 @@ #include <linux/io.h> #include <linux/errno.h> #include <linux/clksrc-dbx500-prcmu.h> +#include <linux/of.h> #include <asm/smp_twd.h> @@ -30,9 +31,13 @@ static void __init ux500_twd_init(void) twd_local_timer = cpu_is_u5500() ? &u5500_twd_local_timer : &u8500_twd_local_timer; - err = twd_local_timer_register(twd_local_timer); - if (err) - pr_err("twd_local_timer_register failed %d\n", err); + if (of_have_populated_dt()) + twd_local_timer_of_register(); + else { + err = twd_local_timer_register(twd_local_timer); + if (err) + pr_err("twd_local_timer_register failed %d\n", err); + } } #else #define ux500_twd_init() do { } while(0) diff --git a/arch/arm/mach-ux500/usb.c b/arch/arm/mach-ux500/usb.c index 9f9e1c203061..a74af389bc63 100644 --- a/arch/arm/mach-ux500/usb.c +++ b/arch/arm/mach-ux500/usb.c @@ -7,6 +7,7 @@ #include <linux/platform_device.h> #include <linux/usb/musb.h> #include <linux/dma-mapping.h> + #include <plat/ste_dma40.h> #include <mach/hardware.h> #include <mach/usb.h> @@ -140,8 +141,8 @@ static inline void ux500_usb_dma_update_tx_ch_config(int *dst_dev_type) musb_dma_tx_ch[idx].dst_dev_type = dst_dev_type[idx]; } -void ux500_add_usb(resource_size_t base, int irq, int *dma_rx_cfg, - int *dma_tx_cfg) +void ux500_add_usb(struct device *parent, resource_size_t base, int irq, + int *dma_rx_cfg, int *dma_tx_cfg) { ux500_musb_device.resource[0].start = base; ux500_musb_device.resource[0].end = base + SZ_64K - 1; @@ -151,5 +152,7 @@ void ux500_add_usb(resource_size_t base, int irq, int *dma_rx_cfg, ux500_usb_dma_update_rx_ch_config(dma_rx_cfg); ux500_usb_dma_update_tx_ch_config(dma_tx_cfg); + ux500_musb_device.dev.parent = parent; + platform_device_register(&ux500_musb_device); } diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 0968772aedbe..6bbd74e950ab 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -36,7 +36,6 @@ #include <linux/clkdev.h> #include <linux/mtd/physmap.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/leds.h> #include <asm/hardware/arm_timer.h> diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index 51733b022d04..a6e23f464528 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -24,7 +24,6 @@ #include <mach/hardware.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/mach/pci.h> /* diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c index 3034a4dab4a1..c504a72b94d6 100644 --- a/arch/arm/mach-vexpress/hotplug.c +++ b/arch/arm/mach-vexpress/hotplug.c @@ -14,7 +14,7 @@ #include <asm/cacheflush.h> #include <asm/smp_plat.h> -#include <asm/system.h> +#include <asm/cp15.h> extern volatile int pen_release; diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c index 9a0661992909..9e4dd8b63c4a 100644 --- a/arch/arm/mach-w90x900/cpu.c +++ b/arch/arm/mach-w90x900/cpu.c @@ -28,6 +28,7 @@ #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/irq.h> +#include <asm/system_misc.h> #include <mach/hardware.h> #include <mach/regs-serial.h> diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index caf14dc059e5..9107231aacc5 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -22,7 +22,8 @@ #include <linux/sched.h> #include <linux/uaccess.h> -#include <asm/system.h> +#include <asm/cp15.h> +#include <asm/system_info.h> #include <asm/unaligned.h> #include "fault.h" diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index e0b0e7a4ec68..dd3d59122cc3 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/highmem.h> #include <asm/cacheflush.h> +#include <asm/cp15.h> #include <plat/cache-feroceon-l2.h> /* diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c index 50868651890f..1fbca05fe906 100644 --- a/arch/arm/mm/cache-tauros2.c +++ b/arch/arm/mm/cache-tauros2.c @@ -16,6 +16,7 @@ #include <linux/init.h> #include <asm/cacheflush.h> +#include <asm/cp15.h> #include <asm/hardware/cache-tauros2.h> diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 5a32020471e3..6c3edeb66e74 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c @@ -18,7 +18,7 @@ */ #include <linux/init.h> #include <linux/highmem.h> -#include <asm/system.h> +#include <asm/cp15.h> #include <asm/cputype.h> #include <asm/cacheflush.h> diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bb7eac381a8e..5bdff5c3e6cb 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -21,8 +21,9 @@ #include <linux/perf_event.h> #include <asm/exception.h> -#include <asm/system.h> #include <asm/pgtable.h> +#include <asm/system_misc.h> +#include <asm/system_info.h> #include <asm/tlbflush.h> #include "fault.h" diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1a8d4aa821be..062d61a1f87d 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -16,7 +16,6 @@ #include <asm/cachetype.h> #include <asm/highmem.h> #include <asm/smp_plat.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include "mm.h" diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index feacf4c76712..ab88ed4f8e08 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -5,6 +5,7 @@ #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/sections.h> +#include <asm/system_info.h> pgd_t *idmap_pgd; diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 80632e8d7538..6780b49f2c69 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -26,12 +26,14 @@ #include <linux/vmalloc.h> #include <linux/io.h> +#include <asm/cp15.h> #include <asm/cputype.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/sizes.h> +#include <asm/system_info.h> #include <asm/mach/map.h> #include "mm.h" diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 94c5a0c94f5e..cd439c1dd506 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -17,6 +17,7 @@ #include <linux/fs.h> #include <linux/vmalloc.h> +#include <asm/cp15.h> #include <asm/cputype.h> #include <asm/sections.h> #include <asm/cachetype.h> @@ -25,6 +26,7 @@ #include <asm/smp_plat.h> #include <asm/tlb.h> #include <asm/highmem.h> +#include <asm/system_info.h> #include <asm/traps.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index a3e78ccabd65..0acb089d0f70 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -12,6 +12,7 @@ #include <linux/highmem.h> #include <linux/slab.h> +#include <asm/cp15.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/tlbflush.h> diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 272558a133a3..d217e9795d74 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -22,7 +22,6 @@ #include <asm/pgtable.h> #include <asm/page.h> #include <asm/ptrace.h> -#include <asm/system.h> #include "proc-macros.S" diff --git a/arch/arm/nwfpe/fpa11.c b/arch/arm/nwfpe/fpa11.c index cc60acde84d9..2782ebcc2ed3 100644 --- a/arch/arm/nwfpe/fpa11.c +++ b/arch/arm/nwfpe/fpa11.c @@ -28,7 +28,6 @@ #include <linux/compiler.h> #include <linux/string.h> -#include <asm/system.h> /* Reset the FPA11 chip. Called to initialize and reset the emulator. */ static void resetFPA11(void) diff --git a/arch/arm/plat-iop/i2c.c b/arch/arm/plat-iop/i2c.c index 4efe392859ee..88215ad031a2 100644 --- a/arch/arm/plat-iop/i2c.c +++ b/arch/arm/plat-iop/i2c.c @@ -23,7 +23,6 @@ #include <asm/page.h> #include <asm/mach/map.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/memory.h> #include <mach/hardware.h> #include <asm/hardware/iop3xx.h> diff --git a/arch/arm/plat-iop/pci.c b/arch/arm/plat-iop/pci.c index 72768356447a..0da42058a20f 100644 --- a/arch/arm/plat-iop/pci.c +++ b/arch/arm/plat-iop/pci.c @@ -20,7 +20,6 @@ #include <linux/io.h> #include <asm/irq.h> #include <asm/signal.h> -#include <asm/system.h> #include <mach/hardware.h> #include <asm/mach/pci.h> #include <asm/hardware/iop3xx.h> diff --git a/arch/arm/plat-iop/restart.c b/arch/arm/plat-iop/restart.c index 6a85a0c502e6..33fa699a4d28 100644 --- a/arch/arm/plat-iop/restart.c +++ b/arch/arm/plat-iop/restart.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include <asm/hardware/iop3xx.h> +#include <asm/system_misc.h> #include <mach/hardware.h> void iop3xx_restart(char mode, const char *cmd) diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c index f30dcacbbd0a..1996c3e3b8fe 100644 --- a/arch/arm/plat-mxc/system.c +++ b/arch/arm/plat-mxc/system.c @@ -25,8 +25,8 @@ #include <mach/hardware.h> #include <mach/common.h> +#include <asm/system_misc.h> #include <asm/proc-fns.h> -#include <asm/system.h> #include <asm/mach-types.h> void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int) = NULL; diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c index 61a1ec2a6af4..39407cbe34c6 100644 --- a/arch/arm/plat-omap/debug-leds.c +++ b/arch/arm/plat-omap/debug-leds.c @@ -15,7 +15,6 @@ #include <mach/hardware.h> #include <asm/leds.h> -#include <asm/system.h> #include <asm/mach-types.h> #include <plat/fpga.h> diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index 74300ae29b71..ecdb3da0dea9 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -36,7 +36,6 @@ #include <linux/slab.h> #include <linux/delay.h> -#include <asm/system.h> #include <mach/hardware.h> #include <plat/dma.h> diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h index cb75b657b04b..b8a96c6a1a30 100644 --- a/arch/arm/plat-omap/include/plat/gpio.h +++ b/arch/arm/plat-omap/include/plat/gpio.h @@ -218,30 +218,14 @@ extern void omap_set_gpio_debounce(int gpio, int enable); extern void omap_set_gpio_debounce_time(int gpio, int enable); /*-------------------------------------------------------------------------*/ -/* Wrappers for "new style" GPIO calls, using the new infrastructure +/* + * Wrappers for "new style" GPIO calls, using the new infrastructure * which lets us plug in FPGA, I2C, and other implementations. - * * + * * The original OMAP-specific calls should eventually be removed. */ #include <linux/errno.h> #include <asm-generic/gpio.h> -static inline int irq_to_gpio(unsigned irq) -{ - int tmp; - - /* omap1 SOC mpuio */ - if (cpu_class_is_omap1() && (irq < (IH_MPUIO_BASE + 16))) - return (irq - IH_MPUIO_BASE) + OMAP_MAX_GPIO_LINES; - - /* SOC gpio */ - tmp = irq - IH_GPIO_BASE; - if (tmp < OMAP_MAX_GPIO_LINES) - return tmp; - - /* we don't supply reverse mappings for non-SOC gpios */ - return -EIO; -} - #endif diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 089899a7db72..74daf5ed1432 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -21,6 +21,7 @@ #include <plat/orion_wdt.h> #include <plat/mv_xor.h> #include <plat/ehci-orion.h> +#include <mach/bridge-regs.h> /* Fill in the resources structure and link it into the platform device structure. There is always a memory region, and nearly @@ -568,13 +569,17 @@ void __init orion_spi_1_init(unsigned long mapbase, ****************************************************************************/ static struct orion_wdt_platform_data orion_wdt_data; +static struct resource orion_wdt_resource = + DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28); + static struct platform_device orion_wdt_device = { .name = "orion_wdt", .id = -1, .dev = { .platform_data = &orion_wdt_data, }, - .num_resources = 0, + .resource = &orion_wdt_resource, + .num_resources = 1, }; void __init orion_wdt_init(unsigned long tclk) diff --git a/arch/arm/plat-orion/include/plat/audio.h b/arch/arm/plat-orion/include/plat/audio.h index 885f8abd927b..d6a55bd2e578 100644 --- a/arch/arm/plat-orion/include/plat/audio.h +++ b/arch/arm/plat-orion/include/plat/audio.h @@ -2,7 +2,6 @@ #define __PLAT_AUDIO_H struct kirkwood_asoc_platform_data { - u32 tclk; int burst; }; #endif diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c index 2d3c19d7c7b1..79ef102e3b2b 100644 --- a/arch/arm/plat-pxa/dma.c +++ b/arch/arm/plat-pxa/dma.c @@ -20,7 +20,6 @@ #include <linux/errno.h> #include <linux/dma-mapping.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/memory.h> #include <mach/hardware.h> diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c index 32a09931350c..0db73ae646bc 100644 --- a/arch/arm/plat-s3c24xx/cpu.c +++ b/arch/arm/plat-s3c24xx/cpu.c @@ -35,6 +35,7 @@ #include <mach/regs-clock.h> #include <asm/irq.h> #include <asm/cacheflush.h> +#include <asm/system_info.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 2bab4c99a234..28f898f75380 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -27,7 +27,6 @@ #include <linux/errno.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/dma.h> diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig index 7a308699f816..96bea3202304 100644 --- a/arch/arm/plat-s5p/Kconfig +++ b/arch/arm/plat-s5p/Kconfig @@ -9,8 +9,8 @@ config PLAT_S5P bool depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS) default y - select ARM_VIC if !ARCH_EXYNOS4 - select ARM_GIC if ARCH_EXYNOS4 + select ARM_VIC if !ARCH_EXYNOS + select ARM_GIC if ARCH_EXYNOS select GIC_NON_BANKED if ARCH_EXYNOS4 select NO_IOPORT select ARCH_REQUIRE_GPIOLIB @@ -40,6 +40,10 @@ config S5P_HRT help Use the High Resolution timer support +config S5P_DEV_UART + def_bool y + depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210) + config S5P_PM bool help diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile index 30d8c3016e6b..4bd824136659 100644 --- a/arch/arm/plat-s5p/Makefile +++ b/arch/arm/plat-s5p/Makefile @@ -12,7 +12,6 @@ obj- := # Core files -obj-y += dev-uart.o obj-y += clock.o obj-y += irq.o obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o @@ -23,5 +22,7 @@ obj-$(CONFIG_S5P_SLEEP) += sleep.o obj-$(CONFIG_S5P_HRT) += s5p-time.o # devices + +obj-$(CONFIG_S5P_DEV_UART) += dev-uart.o obj-$(CONFIG_S5P_DEV_MFC) += dev-mfc.o obj-$(CONFIG_S5P_SETUP_MIPIPHY) += setup-mipiphy.o diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c index 963edea7f7e7..f68a9bb11948 100644 --- a/arch/arm/plat-s5p/clock.c +++ b/arch/arm/plat-s5p/clock.c @@ -61,6 +61,20 @@ struct clk clk_fout_apll = { .id = -1, }; +/* BPLL clock output */ + +struct clk clk_fout_bpll = { + .name = "fout_bpll", + .id = -1, +}; + +/* CPLL clock output */ + +struct clk clk_fout_cpll = { + .name = "fout_cpll", + .id = -1, +}; + /* MPLL clock output * No need .ctrlbit, this is always on */ @@ -101,6 +115,28 @@ struct clksrc_sources clk_src_apll = { .nr_sources = ARRAY_SIZE(clk_src_apll_list), }; +/* Possible clock sources for BPLL Mux */ +static struct clk *clk_src_bpll_list[] = { + [0] = &clk_fin_bpll, + [1] = &clk_fout_bpll, +}; + +struct clksrc_sources clk_src_bpll = { + .sources = clk_src_bpll_list, + .nr_sources = ARRAY_SIZE(clk_src_bpll_list), +}; + +/* Possible clock sources for CPLL Mux */ +static struct clk *clk_src_cpll_list[] = { + [0] = &clk_fin_cpll, + [1] = &clk_fout_cpll, +}; + +struct clksrc_sources clk_src_cpll = { + .sources = clk_src_cpll_list, + .nr_sources = ARRAY_SIZE(clk_src_cpll_list), +}; + /* Possible clock sources for MPLL Mux */ static struct clk *clk_src_mpll_list[] = { [0] = &clk_fin_mpll, diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-s5p/irq-pm.c index 327acb3a4464..d1bfecae6c9f 100644 --- a/arch/arm/plat-s5p/irq-pm.c +++ b/arch/arm/plat-s5p/irq-pm.c @@ -39,19 +39,32 @@ unsigned long s3c_irqwake_eintallow = 0xffffffffL; int s3c_irq_wake(struct irq_data *data, unsigned int state) { unsigned long irqbit; + unsigned int irq_rtc_tic, irq_rtc_alarm; + +#ifdef CONFIG_ARCH_EXYNOS + if (soc_is_exynos5250()) { + irq_rtc_tic = EXYNOS5_IRQ_RTC_TIC; + irq_rtc_alarm = EXYNOS5_IRQ_RTC_ALARM; + } else { + irq_rtc_tic = EXYNOS4_IRQ_RTC_TIC; + irq_rtc_alarm = EXYNOS4_IRQ_RTC_ALARM; + } +#else + irq_rtc_tic = IRQ_RTC_TIC; + irq_rtc_alarm = IRQ_RTC_ALARM; +#endif + + if (data->irq == irq_rtc_tic || data->irq == irq_rtc_alarm) { + irqbit = 1 << (data->irq + 1 - irq_rtc_alarm); - switch (data->irq) { - case IRQ_RTC_TIC: - case IRQ_RTC_ALARM: - irqbit = 1 << (data->irq + 1 - IRQ_RTC_ALARM); if (!state) s3c_irqwake_intmask |= irqbit; else s3c_irqwake_intmask &= ~irqbit; - break; - default: + } else { return -ENOENT; } + return 0; } diff --git a/arch/arm/plat-samsung/cpu.c b/arch/arm/plat-samsung/cpu.c index 81c06d44c11e..46b426e8aff5 100644 --- a/arch/arm/plat-samsung/cpu.c +++ b/arch/arm/plat-samsung/cpu.c @@ -15,7 +15,6 @@ #include <linux/init.h> #include <linux/io.h> -#include <asm/system.h> #include <mach/map.h> #include <plat/cpu.h> diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h index 73cb3cfd0685..787ceaca0be8 100644 --- a/arch/arm/plat-samsung/include/plat/cpu.h +++ b/arch/arm/plat-samsung/include/plat/cpu.h @@ -42,6 +42,9 @@ extern unsigned long samsung_cpu_id; #define EXYNOS4412_CPU_ID 0xE4412200 #define EXYNOS4_CPU_MASK 0xFFFE0000 +#define EXYNOS5250_SOC_ID 0x43520000 +#define EXYNOS5_SOC_MASK 0xFFFFF000 + #define IS_SAMSUNG_CPU(name, id, mask) \ static inline int is_samsung_##name(void) \ { \ @@ -58,6 +61,7 @@ IS_SAMSUNG_CPU(s5pv210, S5PV210_CPU_ID, S5PV210_CPU_MASK) IS_SAMSUNG_CPU(exynos4210, EXYNOS4210_CPU_ID, EXYNOS4_CPU_MASK) IS_SAMSUNG_CPU(exynos4212, EXYNOS4212_CPU_ID, EXYNOS4_CPU_MASK) IS_SAMSUNG_CPU(exynos4412, EXYNOS4412_CPU_ID, EXYNOS4_CPU_MASK) +IS_SAMSUNG_CPU(exynos5250, EXYNOS5250_SOC_ID, EXYNOS5_SOC_MASK) #if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2412) || \ defined(CONFIG_CPU_S3C2416) || defined(CONFIG_CPU_S3C2440) || \ @@ -120,6 +124,12 @@ IS_SAMSUNG_CPU(exynos4412, EXYNOS4412_CPU_ID, EXYNOS4_CPU_MASK) #define EXYNOS4210_REV_1_0 (0x10) #define EXYNOS4210_REV_1_1 (0x11) +#if defined(CONFIG_SOC_EXYNOS5250) +# define soc_is_exynos5250() is_samsung_exynos5250() +#else +# define soc_is_exynos5250() 0 +#endif + #define IODESC_ENT(x) { (unsigned long)S3C24XX_VA_##x, __phys_to_pfn(S3C24XX_PA_##x), S3C24XX_SZ_##x, MT_DEVICE } #ifndef MHZ diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h index 5e7972de3ed5..2155d4af62a3 100644 --- a/arch/arm/plat-samsung/include/plat/devs.h +++ b/arch/arm/plat-samsung/include/plat/devs.h @@ -26,6 +26,8 @@ struct s3c24xx_uart_resources { extern struct s3c24xx_uart_resources s3c2410_uart_resources[]; extern struct s3c24xx_uart_resources s3c64xx_uart_resources[]; extern struct s3c24xx_uart_resources s5p_uart_resources[]; +extern struct s3c24xx_uart_resources exynos4_uart_resources[]; +extern struct s3c24xx_uart_resources exynos5_uart_resources[]; extern struct platform_device *s3c24xx_uart_devs[]; extern struct platform_device *s3c24xx_uart_src[]; diff --git a/arch/arm/plat-samsung/include/plat/s5p-clock.h b/arch/arm/plat-samsung/include/plat/s5p-clock.h index 984bf9e7bc89..1de4b32f98e9 100644 --- a/arch/arm/plat-samsung/include/plat/s5p-clock.h +++ b/arch/arm/plat-samsung/include/plat/s5p-clock.h @@ -18,6 +18,8 @@ #define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1) #define clk_fin_apll clk_ext_xtal_mux +#define clk_fin_bpll clk_ext_xtal_mux +#define clk_fin_cpll clk_ext_xtal_mux #define clk_fin_mpll clk_ext_xtal_mux #define clk_fin_epll clk_ext_xtal_mux #define clk_fin_dpll clk_ext_xtal_mux @@ -29,6 +31,8 @@ extern struct clk clk_xusbxti; extern struct clk clk_48m; extern struct clk s5p_clk_27m; extern struct clk clk_fout_apll; +extern struct clk clk_fout_bpll; +extern struct clk clk_fout_cpll; extern struct clk clk_fout_mpll; extern struct clk clk_fout_epll; extern struct clk clk_fout_dpll; @@ -37,6 +41,8 @@ extern struct clk clk_arm; extern struct clk clk_vpll; extern struct clksrc_sources clk_src_apll; +extern struct clksrc_sources clk_src_bpll; +extern struct clksrc_sources clk_src_cpll; extern struct clksrc_sources clk_src_mpll; extern struct clksrc_sources clk_src_epll; extern struct clksrc_sources clk_src_dpll; diff --git a/arch/arm/plat-samsung/include/plat/uncompress.h b/arch/arm/plat-samsung/include/plat/uncompress.h index ee48e12a1e72..7e068d182c3d 100644 --- a/arch/arm/plat-samsung/include/plat/uncompress.h +++ b/arch/arm/plat-samsung/include/plat/uncompress.h @@ -37,7 +37,9 @@ static void arch_detect_cpu(void); /* how many bytes we allow into the FIFO at a time in FIFO mode */ #define FIFO_MAX (14) +#ifdef S3C_PA_UART #define uart_base S3C_PA_UART + (S3C_UART_OFFSET * CONFIG_S3C_LOWLEVEL_UART_PORT) +#endif static __inline__ void uart_wr(unsigned int reg, unsigned int val) diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c index 51583cd30164..f980cf3d2baa 100644 --- a/arch/arm/plat-samsung/irq-vic-timer.c +++ b/arch/arm/plat-samsung/irq-vic-timer.c @@ -19,6 +19,7 @@ #include <linux/io.h> #include <mach/map.h> +#include <plat/cpu.h> #include <plat/irq-vic-timer.h> #include <plat/regs-timer.h> @@ -57,6 +58,21 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq) struct irq_chip_type *ct; unsigned int i; +#ifdef CONFIG_ARCH_EXYNOS + if (soc_is_exynos5250()) { + pirq[0] = EXYNOS5_IRQ_TIMER0_VIC; + pirq[1] = EXYNOS5_IRQ_TIMER1_VIC; + pirq[2] = EXYNOS5_IRQ_TIMER2_VIC; + pirq[3] = EXYNOS5_IRQ_TIMER3_VIC; + pirq[4] = EXYNOS5_IRQ_TIMER4_VIC; + } else { + pirq[0] = EXYNOS4_IRQ_TIMER0_VIC; + pirq[1] = EXYNOS4_IRQ_TIMER1_VIC; + pirq[2] = EXYNOS4_IRQ_TIMER2_VIC; + pirq[3] = EXYNOS4_IRQ_TIMER3_VIC; + pirq[4] = EXYNOS4_IRQ_TIMER4_VIC; + } +#endif s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, S3C64XX_TINT_CSTAT, handle_level_irq); diff --git a/arch/arm/plat-samsung/time.c b/arch/arm/plat-samsung/time.c index e3bb806bbafe..4dcb11c3d894 100644 --- a/arch/arm/plat-samsung/time.c +++ b/arch/arm/plat-samsung/time.c @@ -28,7 +28,6 @@ #include <linux/io.h> #include <linux/platform_device.h> -#include <asm/system.h> #include <asm/leds.h> #include <asm/mach-types.h> diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c index 2b4e3d82957c..16f203e78d89 100644 --- a/arch/arm/plat-spear/restart.c +++ b/arch/arm/plat-spear/restart.c @@ -11,6 +11,7 @@ * warranty of any kind, whether express or implied. */ #include <linux/io.h> +#include <asm/system_misc.h> #include <asm/hardware/sp810.h> #include <mach/hardware.h> #include <mach/generic.h> diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 8f3ccddbdafd..858748eaa144 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -18,7 +18,9 @@ #include <linux/smp.h> #include <linux/init.h> +#include <asm/cp15.h> #include <asm/cputype.h> +#include <asm/system_info.h> #include <asm/thread_notify.h> #include <asm/vfp.h> diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c index 7c756fb189f7..afeae8978a8d 100644 --- a/arch/avr32/boards/atngw100/setup.c +++ b/arch/avr32/boards/atngw100/setup.c @@ -97,6 +97,7 @@ static struct atmel_nand_data atngw100mkii_nand_data __initdata = { .rdy_pin = GPIO_PIN_PB(28), .enable_pin = GPIO_PIN_PE(23), .bus_width_16 = true, + .ecc_mode = NAND_ECC_SOFT, .parts = nand_partitions, .num_parts = ARRAY_SIZE(nand_partitions), }; diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c index c56ddac85d61..dc5263321480 100644 --- a/arch/avr32/boards/atstk1000/atstk1002.c +++ b/arch/avr32/boards/atstk1000/atstk1002.c @@ -95,6 +95,7 @@ static struct atmel_nand_data atstk1006_nand_data __initdata = { .ale = 22, .rdy_pin = GPIO_PIN_PB(30), .enable_pin = GPIO_PIN_PB(29), + .ecc_mode = NAND_ECC_SOFT, .parts = nand_partitions, .num_parts = ARRAY_SIZE(num_partitions), }; diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index e0ac2631c87e..61407279208a 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -15,7 +15,7 @@ #define __ASM_AVR32_ATOMIC_H #include <linux/types.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/avr32/include/asm/barrier.h b/arch/avr32/include/asm/barrier.h new file mode 100644 index 000000000000..808001c9cf8c --- /dev/null +++ b/arch/avr32/include/asm/barrier.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_AVR32_BARRIER_H +#define __ASM_AVR32_BARRIER_H + +#define mb() asm volatile("" : : : "memory") +#define rmb() mb() +#define wmb() asm volatile("sync 0" : : : "memory") +#define read_barrier_depends() do { } while(0) +#define set_mb(var, value) do { var = value; mb(); } while(0) + +#ifdef CONFIG_SMP +# error "The AVR32 port does not support SMP" +#else +# define smp_mb() barrier() +# define smp_rmb() barrier() +# define smp_wmb() barrier() +# define smp_read_barrier_depends() do { } while(0) +#endif + + +#endif /* __ASM_AVR32_BARRIER_H */ diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h index b70c19bab63a..ebe7ad3f490b 100644 --- a/arch/avr32/include/asm/bitops.h +++ b/arch/avr32/include/asm/bitops.h @@ -13,7 +13,6 @@ #endif #include <asm/byteorder.h> -#include <asm/system.h> /* * clear_bit() doesn't provide any barrier for the compiler diff --git a/arch/avr32/include/asm/bug.h b/arch/avr32/include/asm/bug.h index 2aa373cc61b5..85a92d099adb 100644 --- a/arch/avr32/include/asm/bug.h +++ b/arch/avr32/include/asm/bug.h @@ -70,4 +70,9 @@ #include <asm-generic/bug.h> +struct pt_regs; +void die(const char *str, struct pt_regs *regs, long err); +void _exception(long signr, struct pt_regs *regs, int code, + unsigned long addr); + #endif /* __ASM_AVR32_BUG_H */ diff --git a/arch/avr32/include/asm/system.h b/arch/avr32/include/asm/cmpxchg.h index 62d9ded01635..962a6aeab787 100644 --- a/arch/avr32/include/asm/system.h +++ b/arch/avr32/include/asm/cmpxchg.h @@ -1,76 +1,22 @@ /* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc. + * + * But use these as seldom as possible since they are slower than + * regular operations. + * * Copyright (C) 2004-2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef __ASM_AVR32_SYSTEM_H -#define __ASM_AVR32_SYSTEM_H - -#include <linux/compiler.h> -#include <linux/linkage.h> -#include <linux/types.h> - -#include <asm/ptrace.h> -#include <asm/sysreg.h> +#ifndef __ASM_AVR32_CMPXCHG_H +#define __ASM_AVR32_CMPXCHG_H #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) -#define nop() asm volatile("nop") - -#define mb() asm volatile("" : : : "memory") -#define rmb() mb() -#define wmb() asm volatile("sync 0" : : : "memory") -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while(0) - -/* - * Help PathFinder and other Nexus-compliant debuggers keep track of - * the current PID by emitting an Ownership Trace Message each time we - * switch task. - */ -#ifdef CONFIG_OWNERSHIP_TRACE -#include <asm/ocd.h> -#define finish_arch_switch(prev) \ - do { \ - ocd_write(PID, prev->pid); \ - ocd_write(PID, current->pid); \ - } while(0) -#endif - -/* - * switch_to(prev, next, last) should switch from task `prev' to task - * `next'. `prev' will never be the same as `next'. - * - * We just delegate everything to the __switch_to assembly function, - * which is implemented in arch/avr32/kernel/switch_to.S - * - * mb() tells GCC not to cache `current' across this call. - */ -struct cpu_context; -struct task_struct; -extern struct task_struct *__switch_to(struct task_struct *, - struct cpu_context *, - struct cpu_context *); -#define switch_to(prev, next, last) \ - do { \ - last = __switch_to(prev, &prev->thread.cpu_context + 1, \ - &next->thread.cpu_context); \ - } while (0) - -#ifdef CONFIG_SMP -# error "The AVR32 port does not support SMP" -#else -# define smp_mb() barrier() -# define smp_rmb() barrier() -# define smp_wmb() barrier() -# define smp_read_barrier_depends() do { } while(0) -#endif - -#include <linux/irqflags.h> - extern void __xchg_called_with_bad_pointer(void); static inline unsigned long xchg_u32(u32 val, volatile u32 *m) @@ -168,11 +114,4 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -struct pt_regs; -void die(const char *str, struct pt_regs *regs, long err); -void _exception(long signr, struct pt_regs *regs, int code, - unsigned long addr); - -#define arch_align_stack(x) (x) - -#endif /* __ASM_AVR32_SYSTEM_H */ +#endif /* __ASM_AVR32_CMPXCHG_H */ diff --git a/arch/avr32/include/asm/exec.h b/arch/avr32/include/asm/exec.h new file mode 100644 index 000000000000..f467be8bf823 --- /dev/null +++ b/arch/avr32/include/asm/exec.h @@ -0,0 +1,13 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_AVR32_EXEC_H +#define __ASM_AVR32_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ASM_AVR32_EXEC_H */ diff --git a/arch/avr32/include/asm/special_insns.h b/arch/avr32/include/asm/special_insns.h new file mode 100644 index 000000000000..f922218dfaa5 --- /dev/null +++ b/arch/avr32/include/asm/special_insns.h @@ -0,0 +1,13 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_AVR32_SPECIAL_INSNS_H +#define __ASM_AVR32_SPECIAL_INSNS_H + +#define nop() asm volatile("nop") + +#endif /* __ASM_AVR32_SPECIAL_INSNS_H */ diff --git a/arch/avr32/include/asm/switch_to.h b/arch/avr32/include/asm/switch_to.h new file mode 100644 index 000000000000..9a8e9d5208d4 --- /dev/null +++ b/arch/avr32/include/asm/switch_to.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_AVR32_SWITCH_TO_H +#define __ASM_AVR32_SWITCH_TO_H + +/* + * Help PathFinder and other Nexus-compliant debuggers keep track of + * the current PID by emitting an Ownership Trace Message each time we + * switch task. + */ +#ifdef CONFIG_OWNERSHIP_TRACE +#include <asm/ocd.h> +#define finish_arch_switch(prev) \ + do { \ + ocd_write(PID, prev->pid); \ + ocd_write(PID, current->pid); \ + } while(0) +#endif + +/* + * switch_to(prev, next, last) should switch from task `prev' to task + * `next'. `prev' will never be the same as `next'. + * + * We just delegate everything to the __switch_to assembly function, + * which is implemented in arch/avr32/kernel/switch_to.S + * + * mb() tells GCC not to cache `current' across this call. + */ +struct cpu_context; +struct task_struct; +extern struct task_struct *__switch_to(struct task_struct *, + struct cpu_context *, + struct cpu_context *); +#define switch_to(prev, next, last) \ + do { \ + last = __switch_to(prev, &prev->thread.cpu_context + 1, \ + &next->thread.cpu_context); \ + } while (0) + + +#endif /* __ASM_AVR32_SWITCH_TO_H */ diff --git a/arch/avr32/mach-at32ap/cpufreq.c b/arch/avr32/mach-at32ap/cpufreq.c index 627743326253..18b765629a0c 100644 --- a/arch/avr32/mach-at32ap/cpufreq.c +++ b/arch/avr32/mach-at32ap/cpufreq.c @@ -19,7 +19,6 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/export.h> -#include <asm/system.h> static struct clk *cpuclk; diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h index 67b111ce332d..71733866cb4f 100644 --- a/arch/avr32/mach-at32ap/include/mach/board.h +++ b/arch/avr32/mach-at32ap/include/mach/board.h @@ -7,6 +7,7 @@ #include <linux/types.h> #include <linux/serial.h> #include <linux/platform_data/macb.h> +#include <linux/platform_data/atmel_nand.h> #define GPIO_PIN_NONE (-1) @@ -116,18 +117,6 @@ struct platform_device * at32_add_device_cf(unsigned int id, unsigned int extint, struct cf_platform_data *data); -/* NAND / SmartMedia */ -struct atmel_nand_data { - int enable_pin; /* chip enable */ - int det_pin; /* card detect */ - int rdy_pin; /* ready/busy */ - u8 rdy_pin_active_low; /* rdy_pin value is inverted */ - u8 ale; /* address line number connected to ALE */ - u8 cle; /* address line number connected to CLE */ - u8 bus_width_16; /* buswidth is 16 bit */ - struct mtd_partition *parts; - unsigned int num_parts; -}; struct platform_device * at32_add_device_nand(unsigned int id, struct atmel_nand_data *data); diff --git a/arch/avr32/oprofile/op_model_avr32.c b/arch/avr32/oprofile/op_model_avr32.c index a3e9b3c4845a..f74b7809e089 100644 --- a/arch/avr32/oprofile/op_model_avr32.c +++ b/arch/avr32/oprofile/op_model_avr32.c @@ -17,7 +17,6 @@ #include <linux/types.h> #include <asm/sysreg.h> -#include <asm/system.h> #define AVR32_PERFCTR_IRQ_GROUP 0 #define AVR32_PERFCTR_IRQ_LINE 1 diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h deleted file mode 100644 index a7f40578587c..000000000000 --- a/arch/blackfin/include/asm/system.h +++ /dev/null @@ -1,5 +0,0 @@ -/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ -#include <asm/barrier.h> -#include <asm/cmpxchg.h> -#include <asm/exec.h> -#include <asm/switch_to.h> diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 13dcf78adf91..3af601e31e66 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm generic-y += atomic.h generic-y += auxvec.h generic-y += bitsperlong.h -generic-y += bug.h generic-y += bugs.h generic-y += cputime.h generic-y += current.h diff --git a/arch/c6x/include/asm/barrier.h b/arch/c6x/include/asm/barrier.h new file mode 100644 index 000000000000..538240e85909 --- /dev/null +++ b/arch/c6x/include/asm/barrier.h @@ -0,0 +1,27 @@ +/* + * Port on Texas Instruments TMS320C6x architecture + * + * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated + * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_C6X_BARRIER_H +#define _ASM_C6X_BARRIER_H + +#define nop() asm("NOP\n"); + +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define set_mb(var, value) do { var = value; mb(); } while (0) +#define set_wmb(var, value) do { var = value; wmb(); } while (0) + +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) + +#endif /* _ASM_C6X_BARRIER_H */ diff --git a/arch/c6x/include/asm/bitops.h b/arch/c6x/include/asm/bitops.h index 39ab7e874d96..0bec7e5036a8 100644 --- a/arch/c6x/include/asm/bitops.h +++ b/arch/c6x/include/asm/bitops.h @@ -15,7 +15,6 @@ #include <linux/bitops.h> -#include <asm/system.h> #include <asm/byteorder.h> /* diff --git a/arch/c6x/include/asm/bug.h b/arch/c6x/include/asm/bug.h new file mode 100644 index 000000000000..8d59933dd6fe --- /dev/null +++ b/arch/c6x/include/asm/bug.h @@ -0,0 +1,23 @@ +/* + * Port on Texas Instruments TMS320C6x architecture + * + * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated + * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_C6X_BUG_H +#define _ASM_C6X_BUG_H + +#include <linux/linkage.h> +#include <asm-generic/bug.h> + +struct pt_regs; + +extern void die(char *str, struct pt_regs *fp, int nr); +extern asmlinkage int process_exception(struct pt_regs *regs); +extern asmlinkage void enable_exception(void); + +#endif /* _ASM_C6X_BUG_H */ diff --git a/arch/c6x/include/asm/cmpxchg.h b/arch/c6x/include/asm/cmpxchg.h new file mode 100644 index 000000000000..b27c8cefb8c3 --- /dev/null +++ b/arch/c6x/include/asm/cmpxchg.h @@ -0,0 +1,68 @@ +/* + * Port on Texas Instruments TMS320C6x architecture + * + * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated + * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_C6X_CMPXCHG_H +#define _ASM_C6X_CMPXCHG_H + +#include <linux/irqflags.h> + +/* + * Misc. functions + */ +static inline unsigned int __xchg(unsigned int x, volatile void *ptr, int size) +{ + unsigned int tmp; + unsigned long flags; + + local_irq_save(flags); + + switch (size) { + case 1: + tmp = 0; + tmp = *((unsigned char *) ptr); + *((unsigned char *) ptr) = (unsigned char) x; + break; + case 2: + tmp = 0; + tmp = *((unsigned short *) ptr); + *((unsigned short *) ptr) = x; + break; + case 4: + tmp = 0; + tmp = *((unsigned int *) ptr); + *((unsigned int *) ptr) = x; + break; + } + local_irq_restore(flags); + return tmp; +} + +#define xchg(ptr, x) \ + ((__typeof__(*(ptr)))__xchg((unsigned int)(x), (void *) (ptr), \ + sizeof(*(ptr)))) +#define tas(ptr) xchg((ptr), 1) + + +#include <asm-generic/cmpxchg-local.h> + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \ + (unsigned long)(o), \ + (unsigned long)(n), \ + sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#include <asm-generic/cmpxchg.h> + +#endif /* _ASM_C6X_CMPXCHG_H */ diff --git a/arch/c6x/include/asm/exec.h b/arch/c6x/include/asm/exec.h new file mode 100644 index 000000000000..0fea482cdc84 --- /dev/null +++ b/arch/c6x/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef _ASM_C6X_EXEC_H +#define _ASM_C6X_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _ASM_C6X_EXEC_H */ diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h index 77ecbded1f37..3ff7fab956ba 100644 --- a/arch/c6x/include/asm/processor.h +++ b/arch/c6x/include/asm/processor.h @@ -129,4 +129,13 @@ extern unsigned long get_wchan(struct task_struct *p); extern const struct seq_operations cpuinfo_op; +/* Reset the board */ +#define HARD_RESET_NOW() + +extern unsigned int c6x_core_freq; + + +extern void (*c6x_restart)(void); +extern void (*c6x_halt)(void); + #endif /* ASM_C6X_PROCESSOR_H */ diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h index 1808f279f82e..a01e31896fa9 100644 --- a/arch/c6x/include/asm/setup.h +++ b/arch/c6x/include/asm/setup.h @@ -27,6 +27,7 @@ extern unsigned int c6x_devstat; extern unsigned char c6x_fuse_mac[6]; extern void machine_init(unsigned long dt_ptr); +extern void time_init(void); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_C6X_SETUP_H */ diff --git a/arch/c6x/include/asm/special_insns.h b/arch/c6x/include/asm/special_insns.h new file mode 100644 index 000000000000..59672bca841d --- /dev/null +++ b/arch/c6x/include/asm/special_insns.h @@ -0,0 +1,63 @@ +/* + * Port on Texas Instruments TMS320C6x architecture + * + * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated + * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_C6X_SPECIAL_INSNS_H +#define _ASM_C6X_SPECIAL_INSNS_H + + +#define get_creg(reg) \ + ({ unsigned int __x; \ + asm volatile ("mvc .s2 " #reg ",%0\n" : "=b"(__x)); __x; }) + +#define set_creg(reg, v) \ + do { unsigned int __x = (unsigned int)(v); \ + asm volatile ("mvc .s2 %0," #reg "\n" : : "b"(__x)); \ + } while (0) + +#define or_creg(reg, n) \ + do { unsigned __x, __n = (unsigned)(n); \ + asm volatile ("mvc .s2 " #reg ",%0\n" \ + "or .l2 %1,%0,%0\n" \ + "mvc .s2 %0," #reg "\n" \ + "nop\n" \ + : "=&b"(__x) : "b"(__n)); \ + } while (0) + +#define and_creg(reg, n) \ + do { unsigned __x, __n = (unsigned)(n); \ + asm volatile ("mvc .s2 " #reg ",%0\n" \ + "and .l2 %1,%0,%0\n" \ + "mvc .s2 %0," #reg "\n" \ + "nop\n" \ + : "=&b"(__x) : "b"(__n)); \ + } while (0) + +#define get_coreid() (get_creg(DNUM) & 0xff) + +/* Set/get IST */ +#define set_ist(x) set_creg(ISTP, x) +#define get_ist() get_creg(ISTP) + +/* + * Exception management + */ +#define disable_exception() +#define get_except_type() get_creg(EFR) +#define ack_exception(type) set_creg(ECR, 1 << (type)) +#define get_iexcept() get_creg(IERR) +#define set_iexcept(mask) set_creg(IERR, (mask)) + +#define _extu(x, s, e) \ + ({ unsigned int __x; \ + asm volatile ("extu .S2 %3,%1,%2,%0\n" : \ + "=b"(__x) : "n"(s), "n"(e), "b"(x)); \ + __x; }) + +#endif /* _ASM_C6X_SPECIAL_INSNS_H */ diff --git a/arch/c6x/include/asm/switch_to.h b/arch/c6x/include/asm/switch_to.h new file mode 100644 index 000000000000..af6c71fe75ec --- /dev/null +++ b/arch/c6x/include/asm/switch_to.h @@ -0,0 +1,33 @@ +/* + * Port on Texas Instruments TMS320C6x architecture + * + * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated + * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_C6X_SWITCH_TO_H +#define _ASM_C6X_SWITCH_TO_H + +#include <linux/linkage.h> + +#define prepare_to_switch() do { } while (0) + +struct task_struct; +struct thread_struct; +asmlinkage void *__switch_to(struct thread_struct *prev, + struct thread_struct *next, + struct task_struct *tsk); + +#define switch_to(prev, next, last) \ + do { \ + current->thread.wchan = (u_long) __builtin_return_address(0); \ + (last) = __switch_to(&(prev)->thread, \ + &(next)->thread, (prev)); \ + mb(); \ + current->thread.wchan = 0; \ + } while (0) + +#endif /* _ASM_C6X_SWITCH_TO_H */ diff --git a/arch/c6x/include/asm/system.h b/arch/c6x/include/asm/system.h deleted file mode 100644 index e076dc0eacc8..000000000000 --- a/arch/c6x/include/asm/system.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Port on Texas Instruments TMS320C6x architecture - * - * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated - * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASM_C6X_SYSTEM_H -#define _ASM_C6X_SYSTEM_H - -#include <linux/linkage.h> -#include <linux/irqflags.h> - -#define prepare_to_switch() do { } while (0) - -struct task_struct; -struct thread_struct; -asmlinkage void *__switch_to(struct thread_struct *prev, - struct thread_struct *next, - struct task_struct *tsk); - -#define switch_to(prev, next, last) \ - do { \ - current->thread.wchan = (u_long) __builtin_return_address(0); \ - (last) = __switch_to(&(prev)->thread, \ - &(next)->thread, (prev)); \ - mb(); \ - current->thread.wchan = 0; \ - } while (0) - -/* Reset the board */ -#define HARD_RESET_NOW() - -#define get_creg(reg) \ - ({ unsigned int __x; \ - asm volatile ("mvc .s2 " #reg ",%0\n" : "=b"(__x)); __x; }) - -#define set_creg(reg, v) \ - do { unsigned int __x = (unsigned int)(v); \ - asm volatile ("mvc .s2 %0," #reg "\n" : : "b"(__x)); \ - } while (0) - -#define or_creg(reg, n) \ - do { unsigned __x, __n = (unsigned)(n); \ - asm volatile ("mvc .s2 " #reg ",%0\n" \ - "or .l2 %1,%0,%0\n" \ - "mvc .s2 %0," #reg "\n" \ - "nop\n" \ - : "=&b"(__x) : "b"(__n)); \ - } while (0) - -#define and_creg(reg, n) \ - do { unsigned __x, __n = (unsigned)(n); \ - asm volatile ("mvc .s2 " #reg ",%0\n" \ - "and .l2 %1,%0,%0\n" \ - "mvc .s2 %0," #reg "\n" \ - "nop\n" \ - : "=&b"(__x) : "b"(__n)); \ - } while (0) - -#define get_coreid() (get_creg(DNUM) & 0xff) - -/* Set/get IST */ -#define set_ist(x) set_creg(ISTP, x) -#define get_ist() get_creg(ISTP) - -/* - * Exception management - */ -asmlinkage void enable_exception(void); -#define disable_exception() -#define get_except_type() get_creg(EFR) -#define ack_exception(type) set_creg(ECR, 1 << (type)) -#define get_iexcept() get_creg(IERR) -#define set_iexcept(mask) set_creg(IERR, (mask)) - -/* - * Misc. functions - */ -#define nop() asm("NOP\n"); -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) - -#define xchg(ptr, x) \ - ((__typeof__(*(ptr)))__xchg((unsigned int)(x), (void *) (ptr), \ - sizeof(*(ptr)))) -#define tas(ptr) xchg((ptr), 1) - -unsigned int _lmbd(unsigned int, unsigned int); -unsigned int _bitr(unsigned int); - -struct __xchg_dummy { unsigned int a[100]; }; -#define __xg(x) ((volatile struct __xchg_dummy *)(x)) - -static inline unsigned int __xchg(unsigned int x, volatile void *ptr, int size) -{ - unsigned int tmp; - unsigned long flags; - - local_irq_save(flags); - - switch (size) { - case 1: - tmp = 0; - tmp = *((unsigned char *) ptr); - *((unsigned char *) ptr) = (unsigned char) x; - break; - case 2: - tmp = 0; - tmp = *((unsigned short *) ptr); - *((unsigned short *) ptr) = x; - break; - case 4: - tmp = 0; - tmp = *((unsigned int *) ptr); - *((unsigned int *) ptr) = x; - break; - } - local_irq_restore(flags); - return tmp; -} - -#include <asm-generic/cmpxchg-local.h> - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \ - (unsigned long)(o), \ - (unsigned long)(n), \ - sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#include <asm-generic/cmpxchg.h> - -#define _extu(x, s, e) \ - ({ unsigned int __x; \ - asm volatile ("extu .S2 %3,%1,%2,%0\n" : \ - "=b"(__x) : "n"(s), "n"(e), "b"(x)); \ - __x; }) - - -extern unsigned int c6x_core_freq; - -struct pt_regs; - -extern void die(char *str, struct pt_regs *fp, int nr); -extern asmlinkage int process_exception(struct pt_regs *regs); -extern void time_init(void); -extern void free_initmem(void); - -extern void (*c6x_restart)(void); -extern void (*c6x_halt)(void); - -#endif /* _ASM_C6X_SYSTEM_H */ diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c index d77bcfdf0d8e..65b8ddf54b44 100644 --- a/arch/c6x/kernel/irq.c +++ b/arch/c6x/kernel/irq.c @@ -27,6 +27,7 @@ #include <linux/kernel_stat.h> #include <asm/megamod-pic.h> +#include <asm/special_insns.h> unsigned long irq_err_count; diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c index 0c07921747f4..ce46186600c5 100644 --- a/arch/c6x/kernel/setup.c +++ b/arch/c6x/kernel/setup.c @@ -34,6 +34,7 @@ #include <asm/dscr.h> #include <asm/clock.h> #include <asm/soc.h> +#include <asm/special_insns.h> static const char *c6x_soc_name; diff --git a/arch/c6x/kernel/soc.c b/arch/c6x/kernel/soc.c index dd45bc39af0e..0748c94ebef6 100644 --- a/arch/c6x/kernel/soc.c +++ b/arch/c6x/kernel/soc.c @@ -11,7 +11,6 @@ #include <linux/module.h> #include <linux/ctype.h> #include <linux/etherdevice.h> -#include <asm/system.h> #include <asm/setup.h> #include <asm/soc.h> diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c index 4c9f136165f7..356ee84cad95 100644 --- a/arch/c6x/kernel/time.c +++ b/arch/c6x/kernel/time.c @@ -20,6 +20,7 @@ #include <linux/timex.h> #include <linux/profile.h> +#include <asm/special_insns.h> #include <asm/timer64.h> static u32 sched_clock_multiplier; diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c index f50e3edd6dad..1be74e5b4788 100644 --- a/arch/c6x/kernel/traps.c +++ b/arch/c6x/kernel/traps.c @@ -14,6 +14,7 @@ #include <linux/bug.h> #include <asm/soc.h> +#include <asm/special_insns.h> #include <asm/traps.h> int (*c6x_nmi_handler)(struct pt_regs *regs); diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c index 03c03c249191..3c73d74a4674 100644 --- a/arch/c6x/platforms/timer64.c +++ b/arch/c6x/platforms/timer64.c @@ -15,6 +15,7 @@ #include <linux/of_address.h> #include <asm/soc.h> #include <asm/dscr.h> +#include <asm/special_insns.h> #include <asm/timer64.h> struct timer_regs { diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c index 3d655dcc65da..74f99c688c8d 100644 --- a/arch/cris/arch-v10/drivers/ds1302.c +++ b/arch/cris/arch-v10/drivers/ds1302.c @@ -24,7 +24,6 @@ #include <linux/capability.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <arch/svinto.h> #include <asm/io.h> #include <asm/rtc.h> diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c index a276f0811731..609d5510410e 100644 --- a/arch/cris/arch-v10/drivers/gpio.c +++ b/arch/cris/arch-v10/drivers/gpio.c @@ -24,7 +24,6 @@ #include <asm/etraxgpio.h> #include <arch/svinto.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/irq.h> #include <arch/io_interface_mux.h> diff --git a/arch/cris/arch-v10/drivers/i2c.c b/arch/cris/arch-v10/drivers/i2c.c index c413539d4205..b3d1f9ed1b98 100644 --- a/arch/cris/arch-v10/drivers/i2c.c +++ b/arch/cris/arch-v10/drivers/i2c.c @@ -22,7 +22,6 @@ #include <asm/etraxi2c.h> -#include <asm/system.h> #include <arch/svinto.h> #include <asm/io.h> #include <asm/delay.h> diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c index 1391b731ad1c..9da056860c92 100644 --- a/arch/cris/arch-v10/drivers/pcf8563.c +++ b/arch/cris/arch-v10/drivers/pcf8563.c @@ -29,7 +29,6 @@ #include <linux/mutex.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/rtc.h> diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c index 466af40c5822..c4b71710fb0e 100644 --- a/arch/cris/arch-v10/drivers/sync_serial.c +++ b/arch/cris/arch-v10/drivers/sync_serial.c @@ -27,7 +27,6 @@ #include <asm/io.h> #include <arch/svinto.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/sync_serial.h> #include <arch/io_interface_mux.h> diff --git a/arch/cris/arch-v10/kernel/debugport.c b/arch/cris/arch-v10/kernel/debugport.c index 99851ba8e5fa..f932c85fbde4 100644 --- a/arch/cris/arch-v10/kernel/debugport.c +++ b/arch/cris/arch-v10/kernel/debugport.c @@ -18,7 +18,6 @@ #include <linux/major.h> #include <linux/delay.h> #include <linux/tty.h> -#include <asm/system.h> #include <arch/svinto.h> #include <asm/io.h> /* Get SIMCOUT. */ diff --git a/arch/cris/arch-v10/kernel/dma.c b/arch/cris/arch-v10/kernel/dma.c index d31504b4a19e..5795047359b2 100644 --- a/arch/cris/arch-v10/kernel/dma.c +++ b/arch/cris/arch-v10/kernel/dma.c @@ -8,6 +8,7 @@ #include <asm/dma.h> #include <arch/svinto.h> +#include <arch/system.h> /* Macro to access ETRAX 100 registers */ #define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ diff --git a/arch/cris/arch-v10/kernel/io_interface_mux.c b/arch/cris/arch-v10/kernel/io_interface_mux.c index 29f97e962795..ad64cd1c861a 100644 --- a/arch/cris/arch-v10/kernel/io_interface_mux.c +++ b/arch/cris/arch-v10/kernel/io_interface_mux.c @@ -14,6 +14,7 @@ #include <arch/svinto.h> #include <asm/io.h> #include <arch/io_interface_mux.h> +#include <arch/system.h> #define DBG(s) diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c index 9a57db6907f5..bee8df43c201 100644 --- a/arch/cris/arch-v10/kernel/process.c +++ b/arch/cris/arch-v10/kernel/process.c @@ -16,6 +16,7 @@ #include <linux/fs.h> #include <arch/svinto.h> #include <linux/init.h> +#include <arch/system.h> #ifdef CONFIG_ETRAX_GPIO void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */ diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c index 320065f3cbe5..bfddfb99401f 100644 --- a/arch/cris/arch-v10/kernel/ptrace.c +++ b/arch/cris/arch-v10/kernel/ptrace.c @@ -15,7 +15,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> /* diff --git a/arch/cris/arch-v10/kernel/setup.c b/arch/cris/arch-v10/kernel/setup.c index de27b50b72a2..4f96d71b5154 100644 --- a/arch/cris/arch-v10/kernel/setup.c +++ b/arch/cris/arch-v10/kernel/setup.c @@ -14,6 +14,7 @@ #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/param.h> +#include <arch/system.h> #ifdef CONFIG_PROC_FS #define HAS_FPU 0x0001 diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c index e78fe49a9849..289c584ba499 100644 --- a/arch/cris/arch-v10/kernel/signal.c +++ b/arch/cris/arch-v10/kernel/signal.c @@ -27,6 +27,7 @@ #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/uaccess.h> +#include <arch/system.h> #define DEBUG_SIG 0 diff --git a/arch/cris/arch-v10/kernel/traps.c b/arch/cris/arch-v10/kernel/traps.c index 8bebb96bbca1..7001beda716c 100644 --- a/arch/cris/arch-v10/kernel/traps.c +++ b/arch/cris/arch-v10/kernel/traps.c @@ -11,6 +11,7 @@ #include <linux/ptrace.h> #include <asm/uaccess.h> #include <arch/sv_addr_ag.h> +#include <arch/system.h> void show_registers(struct pt_regs *regs) diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c index ddb23996f11a..3b2c82ce8147 100644 --- a/arch/cris/arch-v32/drivers/i2c.c +++ b/arch/cris/arch-v32/drivers/i2c.c @@ -36,7 +36,6 @@ #include <asm/etraxi2c.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/delay.h> diff --git a/arch/cris/arch-v32/drivers/mach-a3/gpio.c b/arch/cris/arch-v32/drivers/mach-a3/gpio.c index c845831e2225..0b86deedacb9 100644 --- a/arch/cris/arch-v32/drivers/mach-a3/gpio.c +++ b/arch/cris/arch-v32/drivers/mach-a3/gpio.c @@ -31,7 +31,6 @@ #include <hwregs/gio_defs.h> #include <hwregs/intr_vect_defs.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/irq.h> #include <mach/pinmux.h> diff --git a/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/arch/cris/arch-v32/drivers/mach-fs/gpio.c index ee90d2659be7..a2ac0917f1a6 100644 --- a/arch/cris/arch-v32/drivers/mach-fs/gpio.c +++ b/arch/cris/arch-v32/drivers/mach-fs/gpio.c @@ -30,7 +30,6 @@ #include <hwregs/gio_defs.h> #include <hwregs/intr_vect_defs.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/irq.h> #ifdef CONFIG_ETRAX_VIRTUAL_GPIO diff --git a/arch/cris/arch-v32/kernel/debugport.c b/arch/cris/arch-v32/kernel/debugport.c index 794b364d9f7d..610909b003f6 100644 --- a/arch/cris/arch-v32/kernel/debugport.c +++ b/arch/cris/arch-v32/kernel/debugport.c @@ -4,7 +4,6 @@ #include <linux/console.h> #include <linux/init.h> -#include <asm/system.h> #include <hwregs/reg_rdwr.h> #include <hwregs/reg_map.h> #include <hwregs/ser_defs.h> diff --git a/arch/cris/arch-v32/kernel/fasttimer.c b/arch/cris/arch-v32/kernel/fasttimer.c index 111caa1a2efb..ab1551ee43c5 100644 --- a/arch/cris/arch-v32/kernel/fasttimer.c +++ b/arch/cris/arch-v32/kernel/fasttimer.c @@ -17,7 +17,6 @@ #include <linux/delay.h> #include <asm/irq.h> -#include <asm/system.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c index 511ece94a574..f7ad9e8637df 100644 --- a/arch/cris/arch-v32/kernel/ptrace.c +++ b/arch/cris/arch-v32/kernel/ptrace.c @@ -15,7 +15,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <arch/hwregs/supp_reg.h> diff --git a/arch/cris/arch-v32/mach-a3/dma.c b/arch/cris/arch-v32/mach-a3/dma.c index f35e4f65f4ef..47c64bf40eae 100644 --- a/arch/cris/arch-v32/mach-a3/dma.c +++ b/arch/cris/arch-v32/mach-a3/dma.c @@ -9,7 +9,6 @@ #include <hwregs/clkgen_defs.h> #include <hwregs/strmux_defs.h> #include <linux/errno.h> -#include <asm/system.h> #include <arbiter.h> static char used_dma_channels[MAX_DMA_CHANNELS]; diff --git a/arch/cris/arch-v32/mach-fs/dma.c b/arch/cris/arch-v32/mach-fs/dma.c index 2d970d7505c9..fc6416a671ea 100644 --- a/arch/cris/arch-v32/mach-fs/dma.c +++ b/arch/cris/arch-v32/mach-fs/dma.c @@ -9,7 +9,6 @@ #include <hwregs/config_defs.h> #include <hwregs/strmux_defs.h> #include <linux/errno.h> -#include <asm/system.h> #include <mach/arbiter.h> static char used_dma_channels[MAX_DMA_CHANNELS]; diff --git a/arch/cris/include/arch-v10/arch/elf.h b/arch/cris/include/arch-v10/arch/elf.h index 1c38ee728b17..1eb638aeddb4 100644 --- a/arch/cris/include/arch-v10/arch/elf.h +++ b/arch/cris/include/arch-v10/arch/elf.h @@ -1,6 +1,8 @@ #ifndef __ASMCRIS_ARCH_ELF_H #define __ASMCRIS_ARCH_ELF_H +#include <arch/system.h> + #define ELF_MACH EF_CRIS_VARIANT_ANY_V0_V10 /* diff --git a/arch/cris/include/arch-v32/arch/elf.h b/arch/cris/include/arch-v32/arch/elf.h index 1324e505a4d8..c46d58291166 100644 --- a/arch/cris/include/arch-v32/arch/elf.h +++ b/arch/cris/include/arch-v32/arch/elf.h @@ -1,6 +1,8 @@ #ifndef _ASM_CRIS_ELF_H #define _ASM_CRIS_ELF_H +#include <arch/system.h> + #define ELF_CORE_EFLAGS EF_CRIS_VARIANT_V32 /* diff --git a/arch/cris/include/arch-v32/arch/system.h b/arch/cris/include/arch-v32/arch/system.h index 76cea99eaa60..db853fb3a458 100644 --- a/arch/cris/include/arch-v32/arch/system.h +++ b/arch/cris/include/arch-v32/arch/system.h @@ -34,14 +34,4 @@ static inline unsigned long rdsp(void) /* Write the user-mode stack pointer. */ #define wrusp(usp) __asm__ __volatile__ ("move %0, $usp" : : "rm" (usp)) -#define nop() __asm__ __volatile__ ("nop"); - -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long) (x),(ptr),sizeof(*(ptr)))) - -#define tas(ptr) (xchg((ptr),1)) - -struct __xchg_dummy { unsigned long a[100]; }; -#define __xg(x) ((struct __xchg_dummy *)(x)) - #endif /* _ASM_CRIS_ARCH_SYSTEM_H */ diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index bbf093814db2..1056a5dfe04f 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -5,7 +5,7 @@ #include <linux/compiler.h> #include <linux/types.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> #include <arch/atomic.h> /* diff --git a/arch/cris/include/asm/barrier.h b/arch/cris/include/asm/barrier.h new file mode 100644 index 000000000000..198ad7fa6b25 --- /dev/null +++ b/arch/cris/include/asm/barrier.h @@ -0,0 +1,25 @@ +#ifndef __ASM_CRIS_BARRIER_H +#define __ASM_CRIS_BARRIER_H + +#define nop() __asm__ __volatile__ ("nop"); + +#define barrier() __asm__ __volatile__("": : :"memory") +#define mb() barrier() +#define rmb() mb() +#define wmb() mb() +#define read_barrier_depends() do { } while(0) +#define set_mb(var, value) do { var = value; mb(); } while (0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) +#endif + +#endif /* __ASM_CRIS_BARRIER_H */ diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h index a78a2d70cd8b..184066ceb1f6 100644 --- a/arch/cris/include/asm/bitops.h +++ b/arch/cris/include/asm/bitops.h @@ -19,7 +19,6 @@ #endif #include <arch/bitops.h> -#include <asm/system.h> #include <linux/atomic.h> #include <linux/compiler.h> diff --git a/arch/cris/include/asm/system.h b/arch/cris/include/asm/cmpxchg.h index ea10592f7d75..b756dac8aa3f 100644 --- a/arch/cris/include/asm/system.h +++ b/arch/cris/include/asm/cmpxchg.h @@ -1,44 +1,7 @@ -#ifndef __ASM_CRIS_SYSTEM_H -#define __ASM_CRIS_SYSTEM_H +#ifndef __ASM_CRIS_CMPXCHG__ +#define __ASM_CRIS_CMPXCHG__ #include <linux/irqflags.h> -#include <arch/system.h> - -/* the switch_to macro calls resume, an asm function in entry.S which does the actual - * task switching. - */ - -extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int); -#define switch_to(prev,next,last) last = resume(prev,next, \ - (int)&((struct task_struct *)0)->thread) - -#define barrier() __asm__ __volatile__("": : :"memory") -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#define iret() - -/* - * disable hlt during certain critical i/o operations - */ -#define HAVE_DISABLE_HLT -void disable_hlt(void); -void enable_hlt(void); static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { @@ -67,6 +30,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz return x; } +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +#define tas(ptr) (xchg((ptr),1)) + #include <asm-generic/cmpxchg-local.h> /* @@ -82,8 +50,4 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #include <asm-generic/cmpxchg.h> #endif -#define arch_align_stack(x) (x) - -void default_idle(void); - -#endif +#endif /* __ASM_CRIS_CMPXCHG__ */ diff --git a/arch/cris/include/asm/exec.h b/arch/cris/include/asm/exec.h new file mode 100644 index 000000000000..9665dab7e25b --- /dev/null +++ b/arch/cris/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef __ASM_CRIS_EXEC_H +#define __ASM_CRIS_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ASM_CRIS_EXEC_H */ diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h index 3f7248f7a1c9..4210d72a6667 100644 --- a/arch/cris/include/asm/processor.h +++ b/arch/cris/include/asm/processor.h @@ -10,10 +10,10 @@ #ifndef __ASM_CRIS_PROCESSOR_H #define __ASM_CRIS_PROCESSOR_H -#include <asm/system.h> #include <asm/page.h> #include <asm/ptrace.h> #include <arch/processor.h> +#include <arch/system.h> struct task_struct; @@ -72,4 +72,13 @@ static inline void release_thread(struct task_struct *dead_task) #define cpu_relax() barrier() +/* + * disable hlt during certain critical i/o operations + */ +#define HAVE_DISABLE_HLT +void disable_hlt(void); +void enable_hlt(void); + +void default_idle(void); + #endif /* __ASM_CRIS_PROCESSOR_H */ diff --git a/arch/cris/include/asm/switch_to.h b/arch/cris/include/asm/switch_to.h new file mode 100644 index 000000000000..d842e1163ba1 --- /dev/null +++ b/arch/cris/include/asm/switch_to.h @@ -0,0 +1,12 @@ +#ifndef __ASM_CRIS_SWITCH_TO_H +#define __ASM_CRIS_SWITCH_TO_H + +/* the switch_to macro calls resume, an asm function in entry.S which does the actual + * task switching. + */ + +extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int); +#define switch_to(prev,next,last) last = resume(prev,next, \ + (int)&((struct task_struct *)0)->thread) + +#endif /* __ASM_CRIS_SWITCH_TO_H */ diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 788eb2248916..d36836dbbc07 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -36,6 +36,7 @@ #include <linux/spinlock.h> #include <asm/io.h> +#include <arch/system.h> /* called by the assembler IRQ entry functions defined in irq.h * to dispatch the interrupts to registered handlers diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index d8f50ff6fadd..891dad85e8bd 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c @@ -16,7 +16,6 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/irq.h> -#include <asm/system.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/init_task.h> diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c index 48b0f3912632..d114ad3da9b1 100644 --- a/arch/cris/kernel/ptrace.c +++ b/arch/cris/kernel/ptrace.c @@ -21,7 +21,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> diff --git a/arch/cris/kernel/setup.c b/arch/cris/kernel/setup.c index b712f4934c4b..32c3d248868e 100644 --- a/arch/cris/kernel/setup.c +++ b/arch/cris/kernel/setup.c @@ -20,6 +20,7 @@ #include <linux/pfn.h> #include <linux/cpu.h> #include <asm/setup.h> +#include <arch/system.h> /* * Setup options diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c index 8da53f34c7a7..a11ad3229f8c 100644 --- a/arch/cris/kernel/traps.c +++ b/arch/cris/kernel/traps.c @@ -17,6 +17,7 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> +#include <arch/system.h> extern void arch_enable_nmi(void); extern void stop_watchdog(void); diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 9dcac8ec8fa0..b4760d86e1bb 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/wait.h> #include <asm/uaccess.h> +#include <arch/system.h> extern int find_fixup_code(struct pt_regs *); extern void die_if_kernel(const char *, struct pt_regs *, long); diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 0d8a7d661740..b86329d0e316 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -16,7 +16,7 @@ #include <linux/types.h> #include <asm/spr-regs.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> #ifdef CONFIG_SMP #error not SMP safe @@ -181,61 +181,6 @@ static inline void atomic64_dec(atomic64_t *v) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) -/*****************************************************************************/ -/* - * exchange value with memory - */ -extern uint64_t __xchg_64(uint64_t i, volatile void *v); - -#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS - -#define xchg(ptr, x) \ -({ \ - __typeof__(ptr) __xg_ptr = (ptr); \ - __typeof__(*(ptr)) __xg_orig; \ - \ - switch (sizeof(__xg_orig)) { \ - case 4: \ - asm volatile( \ - "swap%I0 %M0,%1" \ - : "+m"(*__xg_ptr), "=r"(__xg_orig) \ - : "1"(x) \ - : "memory" \ - ); \ - break; \ - \ - default: \ - __xg_orig = (__typeof__(__xg_orig))0; \ - asm volatile("break"); \ - break; \ - } \ - \ - __xg_orig; \ -}) - -#else - -extern uint32_t __xchg_32(uint32_t i, volatile void *v); - -#define xchg(ptr, x) \ -({ \ - __typeof__(ptr) __xg_ptr = (ptr); \ - __typeof__(*(ptr)) __xg_orig; \ - \ - switch (sizeof(__xg_orig)) { \ - case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \ - default: \ - __xg_orig = (__typeof__(__xg_orig))0; \ - asm volatile("break"); \ - break; \ - } \ - __xg_orig; \ -}) - -#endif - -#define tas(ptr) (xchg((ptr), 1)) - #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) diff --git a/arch/frv/include/asm/barrier.h b/arch/frv/include/asm/barrier.h new file mode 100644 index 000000000000..06776ad9f5e9 --- /dev/null +++ b/arch/frv/include/asm/barrier.h @@ -0,0 +1,29 @@ +/* FR-V CPU memory barrier definitions + * + * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_BARRIER_H +#define _ASM_BARRIER_H + +#define nop() asm volatile ("nop"::) + +#define mb() asm volatile ("membar" : : :"memory") +#define rmb() asm volatile ("membar" : : :"memory") +#define wmb() asm volatile ("membar" : : :"memory") +#define read_barrier_depends() do { } while (0) + +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do {} while(0) +#define set_mb(var, value) \ + do { var = (value); barrier(); } while (0) + +#endif /* _ASM_BARRIER_H */ diff --git a/arch/frv/include/asm/bug.h b/arch/frv/include/asm/bug.h index 2e054508a2f6..dd01bcf42ee6 100644 --- a/arch/frv/include/asm/bug.h +++ b/arch/frv/include/asm/bug.h @@ -51,4 +51,6 @@ do { \ #include <asm-generic/bug.h> +extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2))); + #endif diff --git a/arch/frv/include/asm/system.h b/arch/frv/include/asm/cmpxchg.h index 6c10fd2c626d..5b04dd0aecab 100644 --- a/arch/frv/include/asm/system.h +++ b/arch/frv/include/asm/cmpxchg.h @@ -1,6 +1,9 @@ -/* system.h: FR-V CPU control definitions +/* xchg and cmpxchg operation emulation for FR-V * - * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. + * For an explanation of how atomic ops work in this arch, see: + * Documentation/frv/atomic-ops.txt + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -8,54 +11,65 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ - -#ifndef _ASM_SYSTEM_H -#define _ASM_SYSTEM_H +#ifndef _ASM_CMPXCHG_H +#define _ASM_CMPXCHG_H #include <linux/types.h> -#include <linux/linkage.h> -#include <linux/kernel.h> - -struct thread_struct; +/*****************************************************************************/ /* - * switch_to(prev, next) should switch from task `prev' to `next' - * `prev' will never be the same as `next'. - * The `mb' is to tell GCC not to cache `current' across this call. + * exchange value with memory */ -extern asmlinkage -struct task_struct *__switch_to(struct thread_struct *prev_thread, - struct thread_struct *next_thread, - struct task_struct *prev); - -#define switch_to(prev, next, last) \ -do { \ - (prev)->thread.sched_lr = \ - (unsigned long) __builtin_return_address(0); \ - (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ - mb(); \ -} while(0) +extern uint64_t __xchg_64(uint64_t i, volatile void *v); -/* - * Force strict CPU ordering. - */ -#define nop() asm volatile ("nop"::) -#define mb() asm volatile ("membar" : : :"memory") -#define rmb() asm volatile ("membar" : : :"memory") -#define wmb() asm volatile ("membar" : : :"memory") -#define read_barrier_depends() do { } while (0) +#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS + +#define xchg(ptr, x) \ +({ \ + __typeof__(ptr) __xg_ptr = (ptr); \ + __typeof__(*(ptr)) __xg_orig; \ + \ + switch (sizeof(__xg_orig)) { \ + case 4: \ + asm volatile( \ + "swap%I0 %M0,%1" \ + : "+m"(*__xg_ptr), "=r"(__xg_orig) \ + : "1"(x) \ + : "memory" \ + ); \ + break; \ + \ + default: \ + __xg_orig = (__typeof__(__xg_orig))0; \ + asm volatile("break"); \ + break; \ + } \ + \ + __xg_orig; \ +}) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do {} while(0) -#define set_mb(var, value) \ - do { var = (value); barrier(); } while (0) +#else -extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2))); -extern void free_initmem(void); +extern uint32_t __xchg_32(uint32_t i, volatile void *v); + +#define xchg(ptr, x) \ +({ \ + __typeof__(ptr) __xg_ptr = (ptr); \ + __typeof__(*(ptr)) __xg_orig; \ + \ + switch (sizeof(__xg_orig)) { \ + case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \ + default: \ + __xg_orig = (__typeof__(__xg_orig))0; \ + asm volatile("break"); \ + break; \ + } \ + __xg_orig; \ +}) + +#endif -#define arch_align_stack(x) (x) +#define tas(ptr) (xchg((ptr), 1)) /*****************************************************************************/ /* @@ -155,4 +169,4 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, (unsigned long)(n), sizeof(*(ptr)))) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif /* _ASM_SYSTEM_H */ +#endif /* _ASM_CMPXCHG_H */ diff --git a/arch/frv/include/asm/exec.h b/arch/frv/include/asm/exec.h new file mode 100644 index 000000000000..65c91305d4a7 --- /dev/null +++ b/arch/frv/include/asm/exec.h @@ -0,0 +1,17 @@ +/* FR-V CPU executable handling + * + * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_EXEC_H +#define _ASM_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _ASM_EXEC_H */ diff --git a/arch/frv/include/asm/switch_to.h b/arch/frv/include/asm/switch_to.h new file mode 100644 index 000000000000..2cf0f6a7fbb1 --- /dev/null +++ b/arch/frv/include/asm/switch_to.h @@ -0,0 +1,35 @@ +/* FR-V CPU basic task switching + * + * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ASM_SWITCH_TO_H +#define _ASM_SWITCH_TO_H + +#include <linux/thread_info.h> + +/* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'. + * The `mb' is to tell GCC not to cache `current' across this call. + */ +extern asmlinkage +struct task_struct *__switch_to(struct thread_struct *prev_thread, + struct thread_struct *next_thread, + struct task_struct *prev); + +#define switch_to(prev, next, last) \ +do { \ + (prev)->thread.sched_lr = \ + (unsigned long) __builtin_return_address(0); \ + (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ + mb(); \ +} while(0) + +#endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/frv/kernel/debug-stub.c b/arch/frv/kernel/debug-stub.c index 2845139c8077..a0228f717ef2 100644 --- a/arch/frv/kernel/debug-stub.c +++ b/arch/frv/kernel/debug-stub.c @@ -17,7 +17,6 @@ #include <linux/serial_reg.h> #include <linux/start_kernel.h> -#include <asm/system.h> #include <asm/serial-regs.h> #include <asm/timer-regs.h> #include <asm/irc-regs.h> diff --git a/arch/frv/kernel/gdb-io.c b/arch/frv/kernel/gdb-io.c index 2ca641d199f8..0707d35079ba 100644 --- a/arch/frv/kernel/gdb-io.c +++ b/arch/frv/kernel/gdb-io.c @@ -19,7 +19,6 @@ #include <linux/serial_reg.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/irc-regs.h> #include <asm/timer-regs.h> #include <asm/gdb-stub.h> diff --git a/arch/frv/kernel/gdb-stub.c b/arch/frv/kernel/gdb-stub.c index a6d5381c94fe..bbe78b0bffec 100644 --- a/arch/frv/kernel/gdb-stub.c +++ b/arch/frv/kernel/gdb-stub.c @@ -126,7 +126,6 @@ #include <asm/asm-offsets.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/gdb-stub.h> #define LEDS(x) do { /* *(u32*)0xe1200004 = ~(x); mb(); */ } while(0) diff --git a/arch/frv/kernel/irq-mb93091.c b/arch/frv/kernel/irq-mb93091.c index 9afc2ea400dc..2cc327a1ca44 100644 --- a/arch/frv/kernel/irq-mb93091.c +++ b/arch/frv/kernel/irq-mb93091.c @@ -20,7 +20,6 @@ #include <linux/bitops.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> diff --git a/arch/frv/kernel/irq-mb93093.c b/arch/frv/kernel/irq-mb93093.c index 4d4ad09d3c91..95e4eb4f1f38 100644 --- a/arch/frv/kernel/irq-mb93093.c +++ b/arch/frv/kernel/irq-mb93093.c @@ -20,7 +20,6 @@ #include <linux/bitops.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> diff --git a/arch/frv/kernel/irq-mb93493.c b/arch/frv/kernel/irq-mb93493.c index 4d034c7840c9..ba648da0932d 100644 --- a/arch/frv/kernel/irq-mb93493.c +++ b/arch/frv/kernel/irq-mb93493.c @@ -20,7 +20,6 @@ #include <linux/bitops.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index 3facbc28cbbc..2239346fa3db 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c @@ -28,7 +28,6 @@ #include <linux/atomic.h> #include <asm/io.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/delay.h> diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index 29cc49783787..d4de48bd5efe 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c @@ -28,7 +28,6 @@ #include <asm/asm-offsets.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/tlb.h> diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c index 9d68f7fac730..3987ff88dab0 100644 --- a/arch/frv/kernel/ptrace.c +++ b/arch/frv/kernel/ptrace.c @@ -26,7 +26,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/unistd.h> diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index 1d2dfe67d442..5cfd1420b091 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c @@ -23,7 +23,6 @@ #include <asm/asm-offsets.h> #include <asm/setup.h> #include <asm/fpu.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/siginfo.h> diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index a325d57a83d5..331c1e2cfb67 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -20,7 +20,6 @@ #include <linux/ptrace.h> #include <linux/hardirq.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/gdb-stub.h> diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index fbe5f0dbae06..a19effcccb34 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c @@ -33,7 +33,6 @@ #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/virtconvert.h> #include <asm/sections.h> diff --git a/arch/frv/mm/kmap.c b/arch/frv/mm/kmap.c index fb78be38ea02..e9217e605aa8 100644 --- a/arch/frv/mm/kmap.c +++ b/arch/frv/mm/kmap.c @@ -21,7 +21,6 @@ #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> -#include <asm/system.h> #undef DEBUG diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index f5a38c1f5489..40901e353c21 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -2,6 +2,7 @@ #define __ARCH_H8300_ATOMIC__ #include <linux/types.h> +#include <asm/cmpxchg.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -13,7 +14,6 @@ #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v, i) (((v)->counter) = i) -#include <asm/system.h> #include <linux/kernel.h> static __inline__ int atomic_add_return(int i, atomic_t *v) @@ -102,8 +102,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int ret; diff --git a/arch/h8300/include/asm/barrier.h b/arch/h8300/include/asm/barrier.h new file mode 100644 index 000000000000..c7283c343c55 --- /dev/null +++ b/arch/h8300/include/asm/barrier.h @@ -0,0 +1,27 @@ +#ifndef _H8300_BARRIER_H +#define _H8300_BARRIER_H + +#define nop() asm volatile ("nop"::) + +/* + * Force strict CPU ordering. + * Not really required on H8... + */ +#define mb() asm volatile ("" : : :"memory") +#define rmb() asm volatile ("" : : :"memory") +#define wmb() asm volatile ("" : : :"memory") +#define set_mb(var, value) do { xchg(&var, value); } while (0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) +#endif + +#endif /* _H8300_BARRIER_H */ diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h index e856c1bb3415..eb34e0cd33d5 100644 --- a/arch/h8300/include/asm/bitops.h +++ b/arch/h8300/include/asm/bitops.h @@ -7,7 +7,6 @@ */ #include <linux/compiler.h> -#include <asm/system.h> #ifdef __KERNEL__ diff --git a/arch/h8300/include/asm/bug.h b/arch/h8300/include/asm/bug.h index 887c19773185..1e1be8119935 100644 --- a/arch/h8300/include/asm/bug.h +++ b/arch/h8300/include/asm/bug.h @@ -5,4 +5,8 @@ #define is_valid_bugaddr(addr) (1) #include <asm-generic/bug.h> + +struct pt_regs; +extern void die(const char *str, struct pt_regs *fp, unsigned long err); + #endif diff --git a/arch/h8300/include/asm/cmpxchg.h b/arch/h8300/include/asm/cmpxchg.h new file mode 100644 index 000000000000..cdb203ef681f --- /dev/null +++ b/arch/h8300/include/asm/cmpxchg.h @@ -0,0 +1,60 @@ +#ifndef __ARCH_H8300_CMPXCHG__ +#define __ARCH_H8300_CMPXCHG__ + +#include <linux/irqflags.h> + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((volatile struct __xchg_dummy *)(x)) + +static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +{ + unsigned long tmp, flags; + + local_irq_save(flags); + + switch (size) { + case 1: + __asm__ __volatile__ + ("mov.b %2,%0\n\t" + "mov.b %1,%2" + : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); + break; + case 2: + __asm__ __volatile__ + ("mov.w %2,%0\n\t" + "mov.w %1,%2" + : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); + break; + case 4: + __asm__ __volatile__ + ("mov.l %2,%0\n\t" + "mov.l %1,%2" + : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); + break; + default: + tmp = 0; + } + local_irq_restore(flags); + return tmp; +} + +#include <asm-generic/cmpxchg-local.h> + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#ifndef CONFIG_SMP +#include <asm-generic/cmpxchg.h> +#endif + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +#endif /* __ARCH_H8300_CMPXCHG__ */ diff --git a/arch/h8300/include/asm/exec.h b/arch/h8300/include/asm/exec.h new file mode 100644 index 000000000000..c01c45ccadf9 --- /dev/null +++ b/arch/h8300/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef _H8300_EXEC_H +#define _H8300_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _H8300_EXEC_H */ diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h index e834b6018897..61fabf1788c6 100644 --- a/arch/h8300/include/asm/processor.h +++ b/arch/h8300/include/asm/processor.h @@ -135,4 +135,9 @@ unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() +#define HARD_RESET_NOW() ({ \ + local_irq_disable(); \ + asm("jmp @@0"); \ +}) + #endif diff --git a/arch/h8300/include/asm/switch_to.h b/arch/h8300/include/asm/switch_to.h new file mode 100644 index 000000000000..cdd8731ce487 --- /dev/null +++ b/arch/h8300/include/asm/switch_to.h @@ -0,0 +1,50 @@ +#ifndef _H8300_SWITCH_TO_H +#define _H8300_SWITCH_TO_H + +/* + * switch_to(n) should switch tasks to task ptr, first checking that + * ptr isn't the current task, in which case it does nothing. This + * also clears the TS-flag if the task we switched to has used the + * math co-processor latest. + */ +/* + * switch_to() saves the extra registers, that are not saved + * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and + * a0-a1. Some of these are used by schedule() and its predecessors + * and so we might get see unexpected behaviors when a task returns + * with unexpected register values. + * + * syscall stores these registers itself and none of them are used + * by syscall after the function in the syscall has been called. + * + * Beware that resume now expects *next to be in d1 and the offset of + * tss to be in a1. This saves a few instructions as we no longer have + * to push them onto the stack and read them back right after. + * + * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) + * + * Changed 96/09/19 by Andreas Schwab + * pass prev in a0, next in a1, offset of tss in d1, and whether + * the mm structures are shared in d2 (to avoid atc flushing). + * + * H8/300 Porting 2002/09/04 Yoshinori Sato + */ + +asmlinkage void resume(void); +#define switch_to(prev,next,last) { \ + void *_last; \ + __asm__ __volatile__( \ + "mov.l %1, er0\n\t" \ + "mov.l %2, er1\n\t" \ + "mov.l %3, er2\n\t" \ + "jsr @_resume\n\t" \ + "mov.l er2,%0\n\t" \ + : "=r" (_last) \ + : "r" (&(prev->thread)), \ + "r" (&(next->thread)), \ + "g" (prev) \ + : "cc", "er0", "er1", "er2", "er3"); \ + (last) = _last; \ +} + +#endif /* _H8300_SWITCH_TO_H */ diff --git a/arch/h8300/include/asm/system.h b/arch/h8300/include/asm/system.h deleted file mode 100644 index 2c2382e50d93..000000000000 --- a/arch/h8300/include/asm/system.h +++ /dev/null @@ -1,140 +0,0 @@ -#ifndef _H8300_SYSTEM_H -#define _H8300_SYSTEM_H - -#include <linux/linkage.h> -#include <linux/irqflags.h> - -struct pt_regs; - -/* - * switch_to(n) should switch tasks to task ptr, first checking that - * ptr isn't the current task, in which case it does nothing. This - * also clears the TS-flag if the task we switched to has used the - * math co-processor latest. - */ -/* - * switch_to() saves the extra registers, that are not saved - * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and - * a0-a1. Some of these are used by schedule() and its predecessors - * and so we might get see unexpected behaviors when a task returns - * with unexpected register values. - * - * syscall stores these registers itself and none of them are used - * by syscall after the function in the syscall has been called. - * - * Beware that resume now expects *next to be in d1 and the offset of - * tss to be in a1. This saves a few instructions as we no longer have - * to push them onto the stack and read them back right after. - * - * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) - * - * Changed 96/09/19 by Andreas Schwab - * pass prev in a0, next in a1, offset of tss in d1, and whether - * the mm structures are shared in d2 (to avoid atc flushing). - * - * H8/300 Porting 2002/09/04 Yoshinori Sato - */ - -asmlinkage void resume(void); -#define switch_to(prev,next,last) { \ - void *_last; \ - __asm__ __volatile__( \ - "mov.l %1, er0\n\t" \ - "mov.l %2, er1\n\t" \ - "mov.l %3, er2\n\t" \ - "jsr @_resume\n\t" \ - "mov.l er2,%0\n\t" \ - : "=r" (_last) \ - : "r" (&(prev->thread)), \ - "r" (&(next->thread)), \ - "g" (prev) \ - : "cc", "er0", "er1", "er2", "er3"); \ - (last) = _last; \ -} - -#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") - -/* - * Force strict CPU ordering. - * Not really required on H8... - */ -#define nop() asm volatile ("nop"::) -#define mb() asm volatile ("" : : :"memory") -#define rmb() asm volatile ("" : : :"memory") -#define wmb() asm volatile ("" : : :"memory") -#define set_mb(var, value) do { xchg(&var, value); } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -struct __xchg_dummy { unsigned long a[100]; }; -#define __xg(x) ((volatile struct __xchg_dummy *)(x)) - -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - unsigned long tmp, flags; - - local_irq_save(flags); - - switch (size) { - case 1: - __asm__ __volatile__ - ("mov.b %2,%0\n\t" - "mov.b %1,%2" - : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); - break; - case 2: - __asm__ __volatile__ - ("mov.w %2,%0\n\t" - "mov.w %1,%2" - : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); - break; - case 4: - __asm__ __volatile__ - ("mov.l %2,%0\n\t" - "mov.l %1,%2" - : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); - break; - default: - tmp = 0; - } - local_irq_restore(flags); - return tmp; -} - -#define HARD_RESET_NOW() ({ \ - local_irq_disable(); \ - asm("jmp @@0"); \ -}) - -#include <asm-generic/cmpxchg-local.h> - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#ifndef CONFIG_SMP -#include <asm-generic/cmpxchg.h> -#endif - -#define arch_align_stack(x) (x) - -extern void die(const char *str, struct pt_regs *fp, unsigned long err); - -#endif /* _H8300_SYSTEM_H */ diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c index 1f67fed476af..2fa8ac7b79b5 100644 --- a/arch/h8300/kernel/irq.c +++ b/arch/h8300/kernel/irq.c @@ -16,7 +16,6 @@ #include <linux/irq.h> #include <linux/interrupt.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/io.h> #include <asm/setup.h> diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 1a173b35f475..0e9c315be104 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -38,7 +38,6 @@ #include <linux/slab.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/setup.h> #include <asm/pgtable.h> diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c index 497fa89b5df4..748cf6585aa4 100644 --- a/arch/h8300/kernel/ptrace.c +++ b/arch/h8300/kernel/ptrace.c @@ -27,7 +27,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/signal.h> diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c index dfa05bd908b6..7833aa3e7c7d 100644 --- a/arch/h8300/kernel/traps.c +++ b/arch/h8300/kernel/traps.c @@ -22,7 +22,6 @@ #include <linux/module.h> #include <linux/bug.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/page.h> diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c index 1d092abebf03..472535977006 100644 --- a/arch/h8300/mm/fault.c +++ b/arch/h8300/mm/fault.c @@ -17,7 +17,6 @@ #include <linux/kernel.h> #include <linux/ptrace.h> -#include <asm/system.h> #include <asm/pgtable.h> /* diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index 7cc3380f250c..973369c32a95 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -36,7 +36,6 @@ #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #undef DEBUG diff --git a/arch/h8300/mm/kmap.c b/arch/h8300/mm/kmap.c index 944a502c2e56..f79edcdadf39 100644 --- a/arch/h8300/mm/kmap.c +++ b/arch/h8300/mm/kmap.c @@ -19,7 +19,6 @@ #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> -#include <asm/system.h> #undef DEBUG diff --git a/arch/h8300/mm/memory.c b/arch/h8300/mm/memory.c index 5552ddfaab5e..06e364641392 100644 --- a/arch/h8300/mm/memory.c +++ b/arch/h8300/mm/memory.c @@ -26,7 +26,6 @@ #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/io.h> diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index e220f9053035..3e258043337b 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -23,6 +23,7 @@ #define _ASM_ATOMIC_H #include <linux/types.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } #define atomic_set(v, i) ((v)->counter = (i)) diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h new file mode 100644 index 000000000000..a4ed6e26cb1d --- /dev/null +++ b/arch/hexagon/include/asm/barrier.h @@ -0,0 +1,41 @@ +/* + * Memory barrier definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_BARRIER_H +#define _ASM_BARRIER_H + +#define rmb() barrier() +#define read_barrier_depends() barrier() +#define wmb() barrier() +#define mb() barrier() +#define smp_rmb() barrier() +#define smp_read_barrier_depends() barrier() +#define smp_wmb() barrier() +#define smp_mb() barrier() +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +/* Set a value and use a memory barrier. Used by the scheduler somewhere. */ +#define set_mb(var, value) \ + do { var = value; mb(); } while (0) + +#endif /* _ASM_BARRIER_H */ diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h index d23461e080ff..4caa649ad78b 100644 --- a/arch/hexagon/include/asm/bitops.h +++ b/arch/hexagon/include/asm/bitops.h @@ -24,7 +24,6 @@ #include <linux/compiler.h> #include <asm/byteorder.h> -#include <asm/system.h> #include <asm/atomic.h> #ifdef __KERNEL__ diff --git a/arch/hexagon/include/asm/system.h b/arch/hexagon/include/asm/cmpxchg.h index 323ed1dd65e2..c5f9527e1df6 100644 --- a/arch/hexagon/include/asm/system.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -1,8 +1,9 @@ /* - * System level definitions for the Hexagon architecture + * xchg/cmpxchg operations for the Hexagon architecture * * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. @@ -18,37 +19,8 @@ * 02110-1301, USA. */ -#ifndef _ASM_SYSTEM_H -#define _ASM_SYSTEM_H - -#include <linux/linkage.h> -#include <linux/irqflags.h> -#include <asm/atomic.h> -#include <asm/hexagon_vm.h> - -struct thread_struct; - -extern struct task_struct *__switch_to(struct task_struct *, - struct task_struct *, - struct task_struct *); - -#define switch_to(p, n, r) do {\ - r = __switch_to((p), (n), (r));\ -} while (0) - - -#define rmb() barrier() -#define read_barrier_depends() barrier() -#define wmb() barrier() -#define mb() barrier() -#define smp_rmb() barrier() -#define smp_read_barrier_depends() barrier() -#define smp_wmb() barrier() -#define smp_mb() barrier() -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() +#ifndef _ASM_CMPXCHG_H +#define _ASM_CMPXCHG_H /* * __xchg - atomically exchange a register and a memory location @@ -87,10 +59,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ sizeof(*(ptr)))) -/* Set a value and use a memory barrier. Used by the scheduler somewhere. */ -#define set_mb(var, value) \ - do { var = value; mb(); } while (0) - /* * see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps. * looks just like atomic_cmpxchg on our arch currently with a bunch of @@ -119,8 +87,4 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, __oldval; \ }) -/* Should probably shoot for an 8-byte aligned stack pointer */ -#define STACK_MASK (~7) -#define arch_align_stack(x) (x & STACK_MASK) - -#endif +#endif /* _ASM_CMPXCHG_H */ diff --git a/arch/hexagon/include/asm/exec.h b/arch/hexagon/include/asm/exec.h new file mode 100644 index 000000000000..350e6d497d44 --- /dev/null +++ b/arch/hexagon/include/asm/exec.h @@ -0,0 +1,28 @@ +/* + * Process execution related definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_EXEC_H +#define _ASM_EXEC_H + +/* Should probably shoot for an 8-byte aligned stack pointer */ +#define STACK_MASK (~7) +#define arch_align_stack(x) (x & STACK_MASK) + +#endif /* _ASM_EXEC_H */ diff --git a/arch/hexagon/include/asm/switch_to.h b/arch/hexagon/include/asm/switch_to.h new file mode 100644 index 000000000000..28ca0dfb6064 --- /dev/null +++ b/arch/hexagon/include/asm/switch_to.h @@ -0,0 +1,34 @@ +/* + * Task switching definitions for the Hexagon architecture + * + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#ifndef _ASM_SWITCH_TO_H +#define _ASM_SWITCH_TO_H + +struct thread_struct; + +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *, + struct task_struct *); + +#define switch_to(p, n, r) do {\ + r = __switch_to((p), (n), (r));\ +} while (0) + +#endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c index bea3f08470fd..32342de1a79c 100644 --- a/arch/hexagon/kernel/ptrace.c +++ b/arch/hexagon/kernel/ptrace.c @@ -29,7 +29,6 @@ #include <linux/regset.h> #include <linux/user.h> -#include <asm/system.h> #include <asm/user.h> static int genregs_get(struct task_struct *target, diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index 0123c63e9a3a..15d1fd22bbc5 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c @@ -29,7 +29,6 @@ #include <linux/smp.h> #include <linux/spinlock.h> -#include <asm/system.h> /* xchg */ #include <asm/time.h> /* timer_interrupt */ #include <asm/hexagon_vm.h> diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c index 986a081e32ec..591fc1b68635 100644 --- a/arch/hexagon/kernel/vm_events.c +++ b/arch/hexagon/kernel/vm_events.c @@ -22,7 +22,6 @@ #include <asm/registers.h> #include <linux/irq.h> #include <linux/hardirq.h> -#include <asm/system.h> /* * show_regs - print pt_regs structure diff --git a/arch/ia64/dig/setup.c b/arch/ia64/dig/setup.c index 9196b330ff7f..98131e1db7a0 100644 --- a/arch/ia64/dig/setup.c +++ b/arch/ia64/dig/setup.c @@ -22,7 +22,7 @@ #include <asm/io.h> #include <asm/machvec.h> -#include <asm/system.h> +#include <asm/setup.h> void __init dig_setup (char **cmdline_p) diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index f5f4ef149aac..f6ea3a3b4a84 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -43,7 +43,6 @@ #include <asm/io.h> #include <asm/page.h> /* PAGE_OFFSET */ #include <asm/dma.h> -#include <asm/system.h> /* wmb() */ #include <asm/acpi-ext.h> diff --git a/arch/ia64/hp/sim/boot/bootloader.c b/arch/ia64/hp/sim/boot/bootloader.c index c5e9baafafe0..28f4b230b8cb 100644 --- a/arch/ia64/hp/sim/boot/bootloader.c +++ b/arch/ia64/hp/sim/boot/bootloader.c @@ -20,7 +20,6 @@ struct task_struct; /* forward declaration for elf.h */ #include <asm/pal.h> #include <asm/pgtable.h> #include <asm/sal.h> -#include <asm/system.h> #include "ssc.h" diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c index 0216e28300fa..271f412bda1a 100644 --- a/arch/ia64/hp/sim/boot/fw-emu.c +++ b/arch/ia64/hp/sim/boot/fw-emu.c @@ -13,6 +13,7 @@ #include <asm/io.h> #include <asm/pal.h> #include <asm/sal.h> +#include <asm/setup.h> #include "ssc.h" diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index a63218e1f6c9..c13064e422df 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -20,7 +20,6 @@ #include <linux/skbuff.h> #include <linux/notifier.h> #include <linux/bitops.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/hpsim.h> diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index a06dfb13d518..301609c3fcec 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -32,7 +32,6 @@ #include <linux/init.h> #include <linux/numa.h> -#include <asm/system.h> #include <asm/numa.h> #define COMPILER_DEPENDENT_INT64 long diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 3fad89ee01cb..7d9116600a36 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -15,7 +15,6 @@ #include <linux/types.h> #include <asm/intrinsics.h> -#include <asm/system.h> #define ATOMIC_INIT(i) ((atomic_t) { (i) }) diff --git a/arch/ia64/include/asm/auxvec.h b/arch/ia64/include/asm/auxvec.h index 23cebe5685b9..58277fc650ef 100644 --- a/arch/ia64/include/asm/auxvec.h +++ b/arch/ia64/include/asm/auxvec.h @@ -8,4 +8,6 @@ #define AT_SYSINFO 32 #define AT_SYSINFO_EHDR 33 +#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ + #endif /* _ASM_IA64_AUXVEC_H */ diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h new file mode 100644 index 000000000000..60576e06b6fb --- /dev/null +++ b/arch/ia64/include/asm/barrier.h @@ -0,0 +1,68 @@ +/* + * Memory barrier definitions. This is based on information published + * in the Processor Abstraction Layer and the System Abstraction Layer + * manual. + * + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> + */ +#ifndef _ASM_IA64_BARRIER_H +#define _ASM_IA64_BARRIER_H + +#include <linux/compiler.h> + +/* + * Macros to force memory ordering. In these descriptions, "previous" + * and "subsequent" refer to program order; "visible" means that all + * architecturally visible effects of a memory access have occurred + * (at a minimum, this means the memory has been read or written). + * + * wmb(): Guarantees that all preceding stores to memory- + * like regions are visible before any subsequent + * stores and that all following stores will be + * visible only after all previous stores. + * rmb(): Like wmb(), but for reads. + * mb(): wmb()/rmb() combo, i.e., all previous memory + * accesses are visible before all subsequent + * accesses and vice versa. This is also known as + * a "fence." + * + * Note: "mb()" and its variants cannot be used as a fence to order + * accesses to memory mapped I/O registers. For that, mf.a needs to + * be used. However, we don't want to always use mf.a because (a) + * it's (presumably) much slower than mf and (b) mf.a is supported for + * sequential memory pages only. + */ +#define mb() ia64_mf() +#define rmb() mb() +#define wmb() mb() +#define read_barrier_depends() do { } while(0) + +#ifdef CONFIG_SMP +# define smp_mb() mb() +# define smp_rmb() rmb() +# define smp_wmb() wmb() +# define smp_read_barrier_depends() read_barrier_depends() +#else +# define smp_mb() barrier() +# define smp_rmb() barrier() +# define smp_wmb() barrier() +# define smp_read_barrier_depends() do { } while(0) +#endif + +/* + * XXX check on this ---I suspect what Linus really wants here is + * acquire vs release semantics but we can't discuss this stuff with + * Linus just yet. Grrr... + */ +#define set_mb(var, value) do { (var) = (value); mb(); } while (0) + +/* + * The group barrier in front of the rsm & ssm are necessary to ensure + * that none of the previous instructions in the same group are + * affected by the rsm/ssm. + */ + +#endif /* _ASM_IA64_BARRIER_H */ diff --git a/arch/ia64/include/asm/exec.h b/arch/ia64/include/asm/exec.h new file mode 100644 index 000000000000..b26242490e36 --- /dev/null +++ b/arch/ia64/include/asm/exec.h @@ -0,0 +1,14 @@ +/* + * Process execution defines. + * + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> + */ +#ifndef _ASM_IA64_EXEC_H +#define _ASM_IA64_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _ASM_IA64_EXEC_H */ diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index 8428525ddb22..0ab82cc2dc8f 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -4,7 +4,6 @@ #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/errno.h> -#include <asm/system.h> #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ do { \ diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index e5a6c3530c6c..2c26321c28c3 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -71,7 +71,6 @@ extern unsigned int num_io_spaces; #include <asm/intrinsics.h> #include <asm/machvec.h> #include <asm/page.h> -#include <asm/system.h> #include <asm-generic/iomap.h> /* diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h index f82d6be2ecd2..2b68d856dc78 100644 --- a/arch/ia64/include/asm/irqflags.h +++ b/arch/ia64/include/asm/irqflags.h @@ -10,6 +10,8 @@ #ifndef _ASM_IA64_IRQFLAGS_H #define _ASM_IA64_IRQFLAGS_H +#include <asm/pal.h> + #ifdef CONFIG_IA64_DEBUG_IRQ extern unsigned long last_cli_ip; static inline void arch_maybe_save_ip(unsigned long flags) diff --git a/arch/ia64/include/asm/kexec.h b/arch/ia64/include/asm/kexec.h index e1d58f819d78..aea2b81b03a3 100644 --- a/arch/ia64/include/asm/kexec.h +++ b/arch/ia64/include/asm/kexec.h @@ -1,6 +1,7 @@ #ifndef _ASM_IA64_KEXEC_H #define _ASM_IA64_KEXEC_H +#include <asm/setup.h> /* Maximum physical address we can use pages from */ #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index bc90c75adf67..b9f82c84f093 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h @@ -261,4 +261,8 @@ struct kvm_debug_exit_arch { struct kvm_guest_debug_arch { }; +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + #endif diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 2689ee54a1c9..e35b3a84a40b 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -459,6 +459,9 @@ struct kvm_sal_data { unsigned long boot_gp; }; +struct kvm_arch_memory_slot { +}; + struct kvm_arch { spinlock_t dirty_log_lock; diff --git a/arch/ia64/include/asm/mca_asm.h b/arch/ia64/include/asm/mca_asm.h index dd2a5b134390..13c1d4994d49 100644 --- a/arch/ia64/include/asm/mca_asm.h +++ b/arch/ia64/include/asm/mca_asm.h @@ -15,6 +15,8 @@ #ifndef _ASM_IA64_MCA_ASM_H #define _ASM_IA64_MCA_ASM_H +#include <asm/percpu.h> + #define PSR_IC 13 #define PSR_I 14 #define PSR_DT 17 diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h index 961a16f43e6b..f1e1b2e3cdb3 100644 --- a/arch/ia64/include/asm/page.h +++ b/arch/ia64/include/asm/page.h @@ -221,4 +221,14 @@ get_order (unsigned long size) (((current->personality & READ_IMPLIES_EXEC) != 0) \ ? VM_EXEC : 0)) +#define GATE_ADDR RGN_BASE(RGN_GATE) + +/* + * 0xa000000000000000+2*PERCPU_PAGE_SIZE + * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) + */ +#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) +#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) +#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) + #endif /* _ASM_IA64_PAGE_H */ diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index b22e5f5fa593..5e04b591e423 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -11,6 +11,14 @@ #include <asm/scatterlist.h> #include <asm/hw_irq.h> +struct pci_vector_struct { + __u16 segment; /* PCI Segment number */ + __u16 bus; /* PCI Bus number */ + __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ + __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ + __u32 irq; /* IRQ assigned */ +}; + /* * Can be used to override the logic in pci_scan_bus for skipping already-configured bus * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 1a97af31ef17..815810cbbedc 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -16,7 +16,6 @@ #include <asm/mman.h> #include <asm/page.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/types.h> #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 691be0b95c1e..483f6c6a4238 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -19,6 +19,9 @@ #include <asm/ptrace.h> #include <asm/ustack.h> +#define __ARCH_WANT_UNLOCKED_CTXSW +#define ARCH_HAS_PREFETCH_SWITCH_STACK + #define IA64_NUM_PHYS_STACK_REG 96 #define IA64_NUM_DBG_REGS 8 @@ -720,6 +723,11 @@ extern unsigned long boot_option_idle_override; enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, IDLE_NOMWAIT, IDLE_POLL}; +void cpu_idle_wait(void); +void default_idle(void); + +#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_IA64_PROCESSOR_H */ diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h index d19ddba4e327..e504f382115e 100644 --- a/arch/ia64/include/asm/sal.h +++ b/arch/ia64/include/asm/sal.h @@ -40,7 +40,6 @@ #include <linux/efi.h> #include <asm/pal.h> -#include <asm/system.h> #include <asm/fpu.h> extern spinlock_t sal_lock; diff --git a/arch/ia64/include/asm/setup.h b/arch/ia64/include/asm/setup.h index 4399a44355b3..8d56458310b3 100644 --- a/arch/ia64/include/asm/setup.h +++ b/arch/ia64/include/asm/setup.h @@ -3,4 +3,22 @@ #define COMMAND_LINE_SIZE 2048 +extern struct ia64_boot_param { + __u64 command_line; /* physical address of command line arguments */ + __u64 efi_systab; /* physical address of EFI system table */ + __u64 efi_memmap; /* physical address of EFI memory map */ + __u64 efi_memmap_size; /* size of EFI memory map */ + __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ + __u32 efi_memdesc_version; /* memory descriptor version */ + struct { + __u16 num_cols; /* number of columns on console output device */ + __u16 num_rows; /* number of rows on console output device */ + __u16 orig_x; /* cursor's x position */ + __u16 orig_y; /* cursor's y position */ + } console_info; + __u64 fpswa; /* physical address of the fpswa interface */ + __u64 initrd_start; + __u64 initrd_size; +} *ia64_boot_param; + #endif diff --git a/arch/ia64/include/asm/sn/pda.h b/arch/ia64/include/asm/sn/pda.h index 1c5108d44d8b..22ae358c8d16 100644 --- a/arch/ia64/include/asm/sn/pda.h +++ b/arch/ia64/include/asm/sn/pda.h @@ -10,7 +10,6 @@ #include <linux/cache.h> #include <asm/percpu.h> -#include <asm/system.h> /* diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index b77768d35f93..54ff557d474e 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -15,7 +15,6 @@ #include <linux/atomic.h> #include <asm/intrinsics.h> -#include <asm/system.h> #define arch_spin_lock_init(x) ((x)->lock = 0) diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h new file mode 100644 index 000000000000..cb2412fcd17f --- /dev/null +++ b/arch/ia64/include/asm/switch_to.h @@ -0,0 +1,87 @@ +/* + * Low-level task switching. This is based on information published in + * the Processor Abstraction Layer and the System Abstraction Layer + * manual. + * + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> + */ +#ifndef _ASM_IA64_SWITCH_TO_H +#define _ASM_IA64_SWITCH_TO_H + +#include <linux/percpu.h> + +struct task_struct; + +/* + * Context switch from one thread to another. If the two threads have + * different address spaces, schedule() has already taken care of + * switching to the new address space by calling switch_mm(). + * + * Disabling access to the fph partition and the debug-register + * context switch MUST be done before calling ia64_switch_to() since a + * newly created thread returns directly to + * ia64_ret_from_syscall_clear_r8. + */ +extern struct task_struct *ia64_switch_to (void *next_task); + +extern void ia64_save_extra (struct task_struct *task); +extern void ia64_load_extra (struct task_struct *task); + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); +# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) +#else +# define IA64_ACCOUNT_ON_SWITCH(p,n) +#endif + +#ifdef CONFIG_PERFMON + DECLARE_PER_CPU(unsigned long, pfm_syst_info); +# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) +#else +# define PERFMON_IS_SYSWIDE() (0) +#endif + +#define IA64_HAS_EXTRA_STATE(t) \ + ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ + || PERFMON_IS_SYSWIDE()) + +#define __switch_to(prev,next,last) do { \ + IA64_ACCOUNT_ON_SWITCH(prev, next); \ + if (IA64_HAS_EXTRA_STATE(prev)) \ + ia64_save_extra(prev); \ + if (IA64_HAS_EXTRA_STATE(next)) \ + ia64_load_extra(next); \ + ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ + (last) = ia64_switch_to((next)); \ +} while (0) + +#ifdef CONFIG_SMP +/* + * In the SMP case, we save the fph state when context-switching away from a thread that + * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can + * pick up the state from task->thread.fph, avoiding the complication of having to fetch + * the latest fph state from another CPU. In other words: eager save, lazy restore. + */ +# define switch_to(prev,next,last) do { \ + if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ + ia64_psr(task_pt_regs(prev))->mfh = 0; \ + (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ + __ia64_save_fpu((prev)->thread.fph); \ + } \ + __switch_to(prev, next, last); \ + /* "next" in old context is "current" in new context */ \ + if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ + (task_cpu(current) != \ + task_thread_info(current)->last_cpu))) { \ + platform_migrate(current); \ + task_thread_info(current)->last_cpu = task_cpu(current); \ + } \ +} while (0) +#else +# define switch_to(prev,next,last) __switch_to(prev, next, last) +#endif + +#endif /* _ASM_IA64_SWITCH_TO_H */ diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h deleted file mode 100644 index 6cca30705d50..000000000000 --- a/arch/ia64/include/asm/system.h +++ /dev/null @@ -1,203 +0,0 @@ -#ifndef _ASM_IA64_SYSTEM_H -#define _ASM_IA64_SYSTEM_H - -/* - * System defines. Note that this is included both from .c and .S - * files, so it does only defines, not any C code. This is based - * on information published in the Processor Abstraction Layer - * and the System Abstraction Layer manual. - * - * Copyright (C) 1998-2003 Hewlett-Packard Co - * David Mosberger-Tang <davidm@hpl.hp.com> - * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> - * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> - */ - -#include <asm/kregs.h> -#include <asm/page.h> -#include <asm/pal.h> -#include <asm/percpu.h> - -#define GATE_ADDR RGN_BASE(RGN_GATE) - -/* - * 0xa000000000000000+2*PERCPU_PAGE_SIZE - * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) - */ -#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000)) -#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) -#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) - -#ifndef __ASSEMBLY__ - -#include <linux/kernel.h> -#include <linux/types.h> - -#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ - -struct pci_vector_struct { - __u16 segment; /* PCI Segment number */ - __u16 bus; /* PCI Bus number */ - __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ - __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ - __u32 irq; /* IRQ assigned */ -}; - -extern struct ia64_boot_param { - __u64 command_line; /* physical address of command line arguments */ - __u64 efi_systab; /* physical address of EFI system table */ - __u64 efi_memmap; /* physical address of EFI memory map */ - __u64 efi_memmap_size; /* size of EFI memory map */ - __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ - __u32 efi_memdesc_version; /* memory descriptor version */ - struct { - __u16 num_cols; /* number of columns on console output device */ - __u16 num_rows; /* number of rows on console output device */ - __u16 orig_x; /* cursor's x position */ - __u16 orig_y; /* cursor's y position */ - } console_info; - __u64 fpswa; /* physical address of the fpswa interface */ - __u64 initrd_start; - __u64 initrd_size; -} *ia64_boot_param; - -/* - * Macros to force memory ordering. In these descriptions, "previous" - * and "subsequent" refer to program order; "visible" means that all - * architecturally visible effects of a memory access have occurred - * (at a minimum, this means the memory has been read or written). - * - * wmb(): Guarantees that all preceding stores to memory- - * like regions are visible before any subsequent - * stores and that all following stores will be - * visible only after all previous stores. - * rmb(): Like wmb(), but for reads. - * mb(): wmb()/rmb() combo, i.e., all previous memory - * accesses are visible before all subsequent - * accesses and vice versa. This is also known as - * a "fence." - * - * Note: "mb()" and its variants cannot be used as a fence to order - * accesses to memory mapped I/O registers. For that, mf.a needs to - * be used. However, we don't want to always use mf.a because (a) - * it's (presumably) much slower than mf and (b) mf.a is supported for - * sequential memory pages only. - */ -#define mb() ia64_mf() -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) - -#ifdef CONFIG_SMP -# define smp_mb() mb() -# define smp_rmb() rmb() -# define smp_wmb() wmb() -# define smp_read_barrier_depends() read_barrier_depends() -#else -# define smp_mb() barrier() -# define smp_rmb() barrier() -# define smp_wmb() barrier() -# define smp_read_barrier_depends() do { } while(0) -#endif - -/* - * XXX check on this ---I suspect what Linus really wants here is - * acquire vs release semantics but we can't discuss this stuff with - * Linus just yet. Grrr... - */ -#define set_mb(var, value) do { (var) = (value); mb(); } while (0) - -/* - * The group barrier in front of the rsm & ssm are necessary to ensure - * that none of the previous instructions in the same group are - * affected by the rsm/ssm. - */ - -#ifdef __KERNEL__ - -/* - * Context switch from one thread to another. If the two threads have - * different address spaces, schedule() has already taken care of - * switching to the new address space by calling switch_mm(). - * - * Disabling access to the fph partition and the debug-register - * context switch MUST be done before calling ia64_switch_to() since a - * newly created thread returns directly to - * ia64_ret_from_syscall_clear_r8. - */ -extern struct task_struct *ia64_switch_to (void *next_task); - -struct task_struct; - -extern void ia64_save_extra (struct task_struct *task); -extern void ia64_load_extra (struct task_struct *task); - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); -# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) -#else -# define IA64_ACCOUNT_ON_SWITCH(p,n) -#endif - -#ifdef CONFIG_PERFMON - DECLARE_PER_CPU(unsigned long, pfm_syst_info); -# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) -#else -# define PERFMON_IS_SYSWIDE() (0) -#endif - -#define IA64_HAS_EXTRA_STATE(t) \ - ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ - || PERFMON_IS_SYSWIDE()) - -#define __switch_to(prev,next,last) do { \ - IA64_ACCOUNT_ON_SWITCH(prev, next); \ - if (IA64_HAS_EXTRA_STATE(prev)) \ - ia64_save_extra(prev); \ - if (IA64_HAS_EXTRA_STATE(next)) \ - ia64_load_extra(next); \ - ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ - (last) = ia64_switch_to((next)); \ -} while (0) - -#ifdef CONFIG_SMP -/* - * In the SMP case, we save the fph state when context-switching away from a thread that - * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can - * pick up the state from task->thread.fph, avoiding the complication of having to fetch - * the latest fph state from another CPU. In other words: eager save, lazy restore. - */ -# define switch_to(prev,next,last) do { \ - if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ - ia64_psr(task_pt_regs(prev))->mfh = 0; \ - (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ - __ia64_save_fpu((prev)->thread.fph); \ - } \ - __switch_to(prev, next, last); \ - /* "next" in old context is "current" in new context */ \ - if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ - (task_cpu(current) != \ - task_thread_info(current)->last_cpu))) { \ - platform_migrate(current); \ - task_thread_info(current)->last_cpu = task_cpu(current); \ - } \ -} while (0) -#else -# define switch_to(prev,next,last) __switch_to(prev, next, last) -#endif - -#define __ARCH_WANT_UNLOCKED_CTXSW -#define ARCH_HAS_PREFETCH_SWITCH_STACK -#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) - -void cpu_idle_wait(void); - -#define arch_align_stack(x) (x) - -void default_idle(void); - -#endif /* __KERNEL__ */ - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_IA64_SYSTEM_H */ diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h index 61b5bdfd980e..8f6cbaa742e9 100644 --- a/arch/ia64/include/asm/uv/uv.h +++ b/arch/ia64/include/asm/uv/uv.h @@ -1,7 +1,6 @@ #ifndef _ASM_IA64_UV_UV_H #define _ASM_IA64_UV_UV_H -#include <asm/system.h> #include <asm/sn/simulator.h> static inline int is_uv_system(void) diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 19bb1eefffb4..ac795d311f44 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -50,7 +50,6 @@ #include <asm/iosapic.h> #include <asm/machvec.h> #include <asm/page.h> -#include <asm/system.h> #include <asm/numa.h> #include <asm/sal.h> #include <asm/cyclone.h> diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index c38d22e5e902..d37bbd48637f 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -39,6 +39,7 @@ #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/mca.h> +#include <asm/setup.h> #include <asm/tlbflush.h> #define EFI_DEBUG 0 diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 331d42bda77a..f15d8601827f 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -21,7 +21,6 @@ #include <asm/thread_info.h> #include <asm/sal.h> #include <asm/signal.h> -#include <asm/system.h> #include <asm/unistd.h> #include "entry.h" diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index 245d3e1ec7e1..b5f8bdd8618e 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S @@ -11,8 +11,9 @@ #include <asm/errno.h> #include <asm/asm-offsets.h> #include <asm/sigcontext.h> -#include <asm/system.h> #include <asm/unistd.h> +#include <asm/kregs.h> +#include <asm/page.h> #include "paravirt_inst.h" /* diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index d32b0855110a..e518f7902af6 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S @@ -5,8 +5,7 @@ * its layout. */ - -#include <asm/system.h> +#include <asm/page.h> #include "paravirt_patchlist.h" SECTIONS diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 17a9fba38930..629a250f7c19 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -30,7 +30,6 @@ #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/mca_asm.h> #include <linux/init.h> #include <linux/linkage.h> diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index b0f9afebb146..ef4b5d877cf2 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -98,7 +98,6 @@ #include <asm/machvec.h> #include <asm/processor.h> #include <asm/ptrace.h> -#include <asm/system.h> #undef DEBUG_INTERRUPT_ROUTING diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 51da77226b29..5c3e0888265a 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -39,7 +39,6 @@ #include <asm/hw_irq.h> #include <asm/machvec.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/tlbflush.h> #ifdef CONFIG_PERFMON diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index d93e396bf599..fa25689fc453 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -54,7 +54,6 @@ #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/errno.h> diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c index d41a40ef80c0..f5a1e5246b3e 100644 --- a/arch/ia64/kernel/machvec.c +++ b/arch/ia64/kernel/machvec.c @@ -1,7 +1,6 @@ #include <linux/module.h> #include <linux/dma-mapping.h> #include <asm/machvec.h> -#include <asm/system.h> #ifdef CONFIG_IA64_GENERIC diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 26dbbd3c3053..65bf9cd39044 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -92,7 +92,6 @@ #include <asm/meminit.h> #include <asm/page.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/sal.h> #include <asm/mca.h> #include <asm/kexec.h> diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 09b4d6828c45..1c2e89406721 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -28,7 +28,6 @@ #include <asm/machvec.h> #include <asm/page.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/sal.h> #include <asm/mca.h> diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index 68a1311db806..1cf091793714 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c @@ -11,7 +11,6 @@ #include <asm/patch.h> #include <asm/processor.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/unistd.h> /* diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index eb1175720050..7cdc89b2483c 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -12,7 +12,6 @@ #include <asm/machvec.h> #include <linux/dma-mapping.h> -#include <asm/system.h> #ifdef CONFIG_INTEL_IOMMU diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index b2c65e034f5d..9d0fd7d5bb82 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -49,7 +49,6 @@ #include <asm/perfmon.h> #include <asm/processor.h> #include <asm/signal.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/delay.h> diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index dad91661ddf9..4265ff64219b 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -26,7 +26,6 @@ #include <asm/processor.h> #include <asm/ptrace_offsets.h> #include <asm/rse.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/unwind.h> #ifdef CONFIG_PERFMON diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 4d1a5508a0ed..aaefd9b94f2f 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -59,7 +59,6 @@ #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/hpsim.h> diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 855197981962..9fcd4e63048f 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -44,7 +44,6 @@ #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/sal.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/mca.h> diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 90916beddf07..796f6a5b966a 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -55,7 +55,6 @@ #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/sal.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/sn/arch.h> diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 43920de425f1..aa94bdda9de8 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -29,7 +29,6 @@ #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/sections.h> -#include <asm/system.h> #include "fsyscall_gtod_data.h" diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index fd80e70018a9..bd42b76000d1 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -22,6 +22,7 @@ #include <asm/intrinsics.h> #include <asm/processor.h> #include <asm/uaccess.h> +#include <asm/setup.h> fpswa_interface_t *fpswa_interface; EXPORT_SYMBOL(fpswa_interface); diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 6a867dc45c05..a96bcf83a735 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -23,7 +23,6 @@ #include <linux/gfp.h> #include <asm/page.h> #include <asm/pal.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <linux/atomic.h> #include <asm/tlbflush.h> diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index fed6afa2e8a9..8f66195999e7 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c @@ -41,7 +41,6 @@ #include <asm/ptrace_offsets.h> #include <asm/rse.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/uaccess.h> #include "entry.h" diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 53c0ba004e9e..0ccb28fab27e 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -1,7 +1,6 @@ #include <asm/cache.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm-generic/vmlinux.lds.h> diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 405052002493..f5104b7c52cd 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -809,10 +809,13 @@ static void kvm_build_io_pmt(struct kvm *kvm) #define GUEST_PHYSICAL_RR4 0x2739 #define VMM_INIT_RR 0x1660 -int kvm_arch_init_vm(struct kvm *kvm) +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { BUG_ON(!kvm); + if (type) + return -EINVAL; + kvm->arch.is_sn2 = ia64_platform_is("sn2"); kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; @@ -1169,6 +1172,11 @@ out: #define PALE_RESET_ENTRY 0x80000000ffffffb0UL +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) +{ + return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL); +} + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct kvm_vcpu *v; @@ -1563,6 +1571,21 @@ out: return r; } +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +void kvm_arch_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ +} + +int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +{ + return 0; +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 20b359376128..02d29c2a132a 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -14,7 +14,6 @@ #include <asm/pgtable.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> extern int die(char *, struct pt_regs *, long); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 13df239dbed1..0eab454867a2 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -30,7 +30,6 @@ #include <asm/pgalloc.h> #include <asm/sal.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/tlb.h> #include <asm/uaccess.h> #include <asm/unistd.h> diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c index f7b798993cea..6a219a946050 100644 --- a/arch/ia64/oprofile/backtrace.c +++ b/arch/ia64/oprofile/backtrace.c @@ -14,7 +14,6 @@ #include <linux/sched.h> #include <linux/mm.h> #include <asm/ptrace.h> -#include <asm/system.h> /* * For IA64 we need to perform a complex little dance to get both diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index d1ce3200147c..524df4295c90 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -24,7 +24,6 @@ #include <asm/machvec.h> #include <asm/page.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/sal.h> #include <asm/smp.h> diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 77db0b514fa4..f82e7b462b7b 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -33,9 +33,9 @@ #include <asm/io.h> #include <asm/sal.h> #include <asm/machvec.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/vga.h> +#include <asm/setup.h> #include <asm/sn/arch.h> #include <asm/sn/addrs.h> #include <asm/sn/pda.h> diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c index e63328818643..20b88cb1881a 100644 --- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c +++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c @@ -12,7 +12,6 @@ #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/nodemask.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/sn/sn_sal.h> #include <asm/sn/sn_cpuid.h> diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index e884ba4e031d..68c845411624 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -26,7 +26,6 @@ #include <asm/processor.h> #include <asm/irq.h> #include <asm/sal.h> -#include <asm/system.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/smp.h> diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index 0f8844e49363..abab8f99e913 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c @@ -14,7 +14,6 @@ #include <linux/clocksource.h> #include <asm/hw_irq.h> -#include <asm/system.h> #include <asm/timex.h> #include <asm/sn/leds.h> diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 2f406f509d44..14c1711238c0 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -14,7 +14,6 @@ #include <linux/capability.h> #include <linux/device.h> #include <linux/delay.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S index b820ed02ab9f..e29519ebe2d2 100644 --- a/arch/ia64/xen/xensetup.S +++ b/arch/ia64/xen/xensetup.S @@ -7,7 +7,6 @@ #include <asm/processor.h> #include <asm/asmmacro.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/paravirt.h> #include <asm/xen/privop.h> #include <linux/elfnote.h> diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 1e7f29fb21f2..0d81697c326c 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -11,7 +11,8 @@ #include <linux/types.h> #include <asm/assembler.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> +#include <asm/dcache_clear.h> /* * Atomic operations that C can't guarantee us. Useful for diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h new file mode 100644 index 000000000000..6976621efd3f --- /dev/null +++ b/arch/m32r/include/asm/barrier.h @@ -0,0 +1,94 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto + * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> + */ +#ifndef _ASM_M32R_BARRIER_H +#define _ASM_M32R_BARRIER_H + +#define nop() __asm__ __volatile__ ("nop" : : ) + +/* + * Memory barrier. + * + * mb() prevents loads and stores being reordered across this point. + * rmb() prevents loads being reordered across this point. + * wmb() prevents stores being reordered across this point. + */ +#define mb() barrier() +#define rmb() mb() +#define wmb() mb() + +/** + * read_barrier_depends - Flush all pending reads that subsequents reads + * depend on. + * + * No data-dependent reads from memory-like regions are ever reordered + * over this barrier. All reads preceding this primitive are guaranteed + * to access memory (but not necessarily other CPUs' caches) before any + * reads following this primitive that depend on the data return by + * any of the preceding reads. This primitive is much lighter weight than + * rmb() on most CPUs, and is never heavier weight than is + * rmb(). + * + * These ordering constraints are respected by both the local CPU + * and the compiler. + * + * Ordering is not guaranteed by anything other than these primitives, + * not even by data dependencies. See the documentation for + * memory_barrier() for examples and URLs to more information. + * + * For example, the following code would force ordering (the initial + * value of "a" is zero, "b" is one, and "p" is "&a"): + * + * <programlisting> + * CPU 0 CPU 1 + * + * b = 2; + * memory_barrier(); + * p = &b; q = p; + * read_barrier_depends(); + * d = *q; + * </programlisting> + * + * + * because the read of "*q" depends on the read of "p" and these + * two reads are separated by a read_barrier_depends(). However, + * the following code, with the same initial values for "a" and "b": + * + * <programlisting> + * CPU 0 CPU 1 + * + * a = 2; + * memory_barrier(); + * b = 3; y = b; + * read_barrier_depends(); + * x = a; + * </programlisting> + * + * does not enforce ordering, since there is no data dependency between + * the read of "a" and the read of "b". Therefore, on some CPUs, such + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() + * in cases like this where there are no data dependencies. + **/ + +#define read_barrier_depends() do { } while (0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#define set_mb(var, value) do { var = value; barrier(); } while (0) +#endif + +#endif /* _ASM_M32R_BARRIER_H */ diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h index 6300f22cdbdb..d3dea9ac7d4e 100644 --- a/arch/m32r/include/asm/bitops.h +++ b/arch/m32r/include/asm/bitops.h @@ -16,9 +16,10 @@ #endif #include <linux/compiler.h> +#include <linux/irqflags.h> #include <asm/assembler.h> -#include <asm/system.h> #include <asm/byteorder.h> +#include <asm/dcache_clear.h> #include <asm/types.h> /* diff --git a/arch/m32r/include/asm/cmpxchg.h b/arch/m32r/include/asm/cmpxchg.h new file mode 100644 index 000000000000..de651db20b43 --- /dev/null +++ b/arch/m32r/include/asm/cmpxchg.h @@ -0,0 +1,221 @@ +#ifndef _ASM_M32R_CMPXCHG_H +#define _ASM_M32R_CMPXCHG_H + +/* + * M32R version: + * Copyright (C) 2001, 2002 Hitoshi Yamamoto + * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> + */ + +#include <linux/irqflags.h> +#include <asm/assembler.h> +#include <asm/dcache_clear.h> + +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +__xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long flags; + unsigned long tmp = 0; + + local_irq_save(flags); + + switch (size) { +#ifndef CONFIG_SMP + case 1: + __asm__ __volatile__ ( + "ldb %0, @%2 \n\t" + "stb %1, @%2 \n\t" + : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); + break; + case 2: + __asm__ __volatile__ ( + "ldh %0, @%2 \n\t" + "sth %1, @%2 \n\t" + : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); + break; + case 4: + __asm__ __volatile__ ( + "ld %0, @%2 \n\t" + "st %1, @%2 \n\t" + : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); + break; +#else /* CONFIG_SMP */ + case 4: + __asm__ __volatile__ ( + DCACHE_CLEAR("%0", "r4", "%2") + "lock %0, @%2; \n\t" + "unlock %1, @%2; \n\t" + : "=&r" (tmp) : "r" (x), "r" (ptr) + : "memory" +#ifdef CONFIG_CHIP_M32700_TS1 + , "r4" +#endif /* CONFIG_CHIP_M32700_TS1 */ + ); + break; +#endif /* CONFIG_SMP */ + default: + __xchg_called_with_bad_pointer(); + } + + local_irq_restore(flags); + + return (tmp); +} + +#define xchg(ptr, x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) + +static __always_inline unsigned long +__xchg_local(unsigned long x, volatile void *ptr, int size) +{ + unsigned long flags; + unsigned long tmp = 0; + + local_irq_save(flags); + + switch (size) { + case 1: + __asm__ __volatile__ ( + "ldb %0, @%2 \n\t" + "stb %1, @%2 \n\t" + : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); + break; + case 2: + __asm__ __volatile__ ( + "ldh %0, @%2 \n\t" + "sth %1, @%2 \n\t" + : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); + break; + case 4: + __asm__ __volatile__ ( + "ld %0, @%2 \n\t" + "st %1, @%2 \n\t" + : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); + break; + default: + __xchg_called_with_bad_pointer(); + } + + local_irq_restore(flags); + + return (tmp); +} + +#define xchg_local(ptr, x) \ + ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \ + sizeof(*(ptr)))) + +#define __HAVE_ARCH_CMPXCHG 1 + +static inline unsigned long +__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) +{ + unsigned long flags; + unsigned int retval; + + local_irq_save(flags); + __asm__ __volatile__ ( + DCACHE_CLEAR("%0", "r4", "%1") + M32R_LOCK" %0, @%1; \n" + " bne %0, %2, 1f; \n" + M32R_UNLOCK" %3, @%1; \n" + " bra 2f; \n" + " .fillinsn \n" + "1:" + M32R_UNLOCK" %0, @%1; \n" + " .fillinsn \n" + "2:" + : "=&r" (retval) + : "r" (p), "r" (old), "r" (new) + : "cbit", "memory" +#ifdef CONFIG_CHIP_M32700_TS1 + , "r4" +#endif /* CONFIG_CHIP_M32700_TS1 */ + ); + local_irq_restore(flags); + + return retval; +} + +static inline unsigned long +__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old, + unsigned int new) +{ + unsigned long flags; + unsigned int retval; + + local_irq_save(flags); + __asm__ __volatile__ ( + DCACHE_CLEAR("%0", "r4", "%1") + "ld %0, @%1; \n" + " bne %0, %2, 1f; \n" + "st %3, @%1; \n" + " bra 2f; \n" + " .fillinsn \n" + "1:" + "st %0, @%1; \n" + " .fillinsn \n" + "2:" + : "=&r" (retval) + : "r" (p), "r" (old), "r" (new) + : "cbit", "memory" +#ifdef CONFIG_CHIP_M32700_TS1 + , "r4" +#endif /* CONFIG_CHIP_M32700_TS1 */ + ); + local_irq_restore(flags); + + return retval; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); +#if 0 /* we don't have __cmpxchg_u64 */ + case 8: + return __cmpxchg_u64(ptr, old, new); +#endif /* 0 */ + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) + +#include <asm-generic/cmpxchg-local.h> + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_local_u32(ptr, old, new); + default: + return __cmpxchg_local_generic(ptr, old, new, size); + } + + return old; +} + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#endif /* _ASM_M32R_CMPXCHG_H */ diff --git a/arch/m32r/include/asm/dcache_clear.h b/arch/m32r/include/asm/dcache_clear.h new file mode 100644 index 000000000000..a0ae06c2e9e7 --- /dev/null +++ b/arch/m32r/include/asm/dcache_clear.h @@ -0,0 +1,29 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto + * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> + */ +#ifndef _ASM_M32R_DCACHE_CLEAR_H +#define _ASM_M32R_DCACHE_CLEAR_H + +#ifdef CONFIG_CHIP_M32700_TS1 +#define DCACHE_CLEAR(reg0, reg1, addr) \ + "seth "reg1", #high(dcache_dummy); \n\t" \ + "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \ + "lock "reg0", @"reg1"; \n\t" \ + "add3 "reg0", "addr", #0x1000; \n\t" \ + "ld "reg0", @"reg0"; \n\t" \ + "add3 "reg0", "addr", #0x2000; \n\t" \ + "ld "reg0", @"reg0"; \n\t" \ + "unlock "reg0", @"reg1"; \n\t" + /* FIXME: This workaround code cannot handle kernel modules + * correctly under SMP environment. + */ +#else /* CONFIG_CHIP_M32700_TS1 */ +#define DCACHE_CLEAR(reg0, reg1, addr) +#endif /* CONFIG_CHIP_M32700_TS1 */ + +#endif /* _ASM_M32R_DCACHE_CLEAR_H */ diff --git a/arch/m32r/include/asm/exec.h b/arch/m32r/include/asm/exec.h new file mode 100644 index 000000000000..c805dbd75b5d --- /dev/null +++ b/arch/m32r/include/asm/exec.h @@ -0,0 +1,14 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto + * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> + */ +#ifndef _ASM_M32R_EXEC_H +#define _ASM_M32R_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _ASM_M32R_EXEC_H */ diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h index 734bca87018a..4045db3e4f65 100644 --- a/arch/m32r/include/asm/local.h +++ b/arch/m32r/include/asm/local.h @@ -12,7 +12,6 @@ #include <linux/percpu.h> #include <asm/assembler.h> -#include <asm/system.h> #include <asm/local.h> /* diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index b0ea2f26da3b..fa13694eaae3 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -11,6 +11,7 @@ #include <linux/compiler.h> #include <linux/atomic.h> +#include <asm/dcache_clear.h> #include <asm/page.h> /* diff --git a/arch/m32r/include/asm/switch_to.h b/arch/m32r/include/asm/switch_to.h new file mode 100644 index 000000000000..4b262f7a8fe9 --- /dev/null +++ b/arch/m32r/include/asm/switch_to.h @@ -0,0 +1,51 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto + * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> + */ +#ifndef _ASM_M32R_SWITCH_TO_H +#define _ASM_M32R_SWITCH_TO_H + +/* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'. + * + * `next' and `prev' should be struct task_struct, but it isn't always defined + */ + +#if defined(CONFIG_FRAME_POINTER) || \ + !defined(CONFIG_SCHED_OMIT_FRAME_POINTER) +#define M32R_PUSH_FP " push fp\n" +#define M32R_POP_FP " pop fp\n" +#else +#define M32R_PUSH_FP "" +#define M32R_POP_FP "" +#endif + +#define switch_to(prev, next, last) do { \ + __asm__ __volatile__ ( \ + " seth lr, #high(1f) \n" \ + " or3 lr, lr, #low(1f) \n" \ + " st lr, @%4 ; store old LR \n" \ + " ld lr, @%5 ; load new LR \n" \ + M32R_PUSH_FP \ + " st sp, @%2 ; store old SP \n" \ + " ld sp, @%3 ; load new SP \n" \ + " push %1 ; store `prev' on new stack \n" \ + " jmp lr \n" \ + " .fillinsn \n" \ + "1: \n" \ + " pop %0 ; restore `__last' from new stack \n" \ + M32R_POP_FP \ + : "=r" (last) \ + : "0" (prev), \ + "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \ + "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \ + : "memory", "lr" \ + ); \ +} while(0) + +#endif /* _ASM_M32R_SWITCH_TO_H */ diff --git a/arch/m32r/include/asm/system.h b/arch/m32r/include/asm/system.h deleted file mode 100644 index 13c46794ccb1..000000000000 --- a/arch/m32r/include/asm/system.h +++ /dev/null @@ -1,367 +0,0 @@ -#ifndef _ASM_M32R_SYSTEM_H -#define _ASM_M32R_SYSTEM_H - -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto - * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> - */ - -#include <linux/compiler.h> -#include <linux/irqflags.h> -#include <asm/assembler.h> - -#ifdef __KERNEL__ - -/* - * switch_to(prev, next) should switch from task `prev' to `next' - * `prev' will never be the same as `next'. - * - * `next' and `prev' should be struct task_struct, but it isn't always defined - */ - -#if defined(CONFIG_FRAME_POINTER) || \ - !defined(CONFIG_SCHED_OMIT_FRAME_POINTER) -#define M32R_PUSH_FP " push fp\n" -#define M32R_POP_FP " pop fp\n" -#else -#define M32R_PUSH_FP "" -#define M32R_POP_FP "" -#endif - -#define switch_to(prev, next, last) do { \ - __asm__ __volatile__ ( \ - " seth lr, #high(1f) \n" \ - " or3 lr, lr, #low(1f) \n" \ - " st lr, @%4 ; store old LR \n" \ - " ld lr, @%5 ; load new LR \n" \ - M32R_PUSH_FP \ - " st sp, @%2 ; store old SP \n" \ - " ld sp, @%3 ; load new SP \n" \ - " push %1 ; store `prev' on new stack \n" \ - " jmp lr \n" \ - " .fillinsn \n" \ - "1: \n" \ - " pop %0 ; restore `__last' from new stack \n" \ - M32R_POP_FP \ - : "=r" (last) \ - : "0" (prev), \ - "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \ - "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \ - : "memory", "lr" \ - ); \ -} while(0) - -#define nop() __asm__ __volatile__ ("nop" : : ) - -#define xchg(ptr, x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) -#define xchg_local(ptr, x) \ - ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \ - sizeof(*(ptr)))) - -extern void __xchg_called_with_bad_pointer(void); - -#ifdef CONFIG_CHIP_M32700_TS1 -#define DCACHE_CLEAR(reg0, reg1, addr) \ - "seth "reg1", #high(dcache_dummy); \n\t" \ - "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \ - "lock "reg0", @"reg1"; \n\t" \ - "add3 "reg0", "addr", #0x1000; \n\t" \ - "ld "reg0", @"reg0"; \n\t" \ - "add3 "reg0", "addr", #0x2000; \n\t" \ - "ld "reg0", @"reg0"; \n\t" \ - "unlock "reg0", @"reg1"; \n\t" - /* FIXME: This workaround code cannot handle kernel modules - * correctly under SMP environment. - */ -#else /* CONFIG_CHIP_M32700_TS1 */ -#define DCACHE_CLEAR(reg0, reg1, addr) -#endif /* CONFIG_CHIP_M32700_TS1 */ - -static __always_inline unsigned long -__xchg(unsigned long x, volatile void *ptr, int size) -{ - unsigned long flags; - unsigned long tmp = 0; - - local_irq_save(flags); - - switch (size) { -#ifndef CONFIG_SMP - case 1: - __asm__ __volatile__ ( - "ldb %0, @%2 \n\t" - "stb %1, @%2 \n\t" - : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); - break; - case 2: - __asm__ __volatile__ ( - "ldh %0, @%2 \n\t" - "sth %1, @%2 \n\t" - : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); - break; - case 4: - __asm__ __volatile__ ( - "ld %0, @%2 \n\t" - "st %1, @%2 \n\t" - : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); - break; -#else /* CONFIG_SMP */ - case 4: - __asm__ __volatile__ ( - DCACHE_CLEAR("%0", "r4", "%2") - "lock %0, @%2; \n\t" - "unlock %1, @%2; \n\t" - : "=&r" (tmp) : "r" (x), "r" (ptr) - : "memory" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r4" -#endif /* CONFIG_CHIP_M32700_TS1 */ - ); - break; -#endif /* CONFIG_SMP */ - default: - __xchg_called_with_bad_pointer(); - } - - local_irq_restore(flags); - - return (tmp); -} - -static __always_inline unsigned long -__xchg_local(unsigned long x, volatile void *ptr, int size) -{ - unsigned long flags; - unsigned long tmp = 0; - - local_irq_save(flags); - - switch (size) { - case 1: - __asm__ __volatile__ ( - "ldb %0, @%2 \n\t" - "stb %1, @%2 \n\t" - : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); - break; - case 2: - __asm__ __volatile__ ( - "ldh %0, @%2 \n\t" - "sth %1, @%2 \n\t" - : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); - break; - case 4: - __asm__ __volatile__ ( - "ld %0, @%2 \n\t" - "st %1, @%2 \n\t" - : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); - break; - default: - __xchg_called_with_bad_pointer(); - } - - local_irq_restore(flags); - - return (tmp); -} - -#define __HAVE_ARCH_CMPXCHG 1 - -static inline unsigned long -__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) -{ - unsigned long flags; - unsigned int retval; - - local_irq_save(flags); - __asm__ __volatile__ ( - DCACHE_CLEAR("%0", "r4", "%1") - M32R_LOCK" %0, @%1; \n" - " bne %0, %2, 1f; \n" - M32R_UNLOCK" %3, @%1; \n" - " bra 2f; \n" - " .fillinsn \n" - "1:" - M32R_UNLOCK" %0, @%1; \n" - " .fillinsn \n" - "2:" - : "=&r" (retval) - : "r" (p), "r" (old), "r" (new) - : "cbit", "memory" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r4" -#endif /* CONFIG_CHIP_M32700_TS1 */ - ); - local_irq_restore(flags); - - return retval; -} - -static inline unsigned long -__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old, - unsigned int new) -{ - unsigned long flags; - unsigned int retval; - - local_irq_save(flags); - __asm__ __volatile__ ( - DCACHE_CLEAR("%0", "r4", "%1") - "ld %0, @%1; \n" - " bne %0, %2, 1f; \n" - "st %3, @%1; \n" - " bra 2f; \n" - " .fillinsn \n" - "1:" - "st %0, @%1; \n" - " .fillinsn \n" - "2:" - : "=&r" (retval) - : "r" (p), "r" (old), "r" (new) - : "cbit", "memory" -#ifdef CONFIG_CHIP_M32700_TS1 - , "r4" -#endif /* CONFIG_CHIP_M32700_TS1 */ - ); - local_irq_restore(flags); - - return retval; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32(ptr, old, new); -#if 0 /* we don't have __cmpxchg_u64 */ - case 8: - return __cmpxchg_u64(ptr, old, new); -#endif /* 0 */ - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) - -#include <asm-generic/cmpxchg-local.h> - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - switch (size) { - case 4: - return __cmpxchg_local_u32(ptr, old, new); - default: - return __cmpxchg_local_generic(ptr, old, new, size); - } - - return old; -} - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#endif /* __KERNEL__ */ - -/* - * Memory barrier. - * - * mb() prevents loads and stores being reordered across this point. - * rmb() prevents loads being reordered across this point. - * wmb() prevents stores being reordered across this point. - */ -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif - -#define arch_align_stack(x) (x) - -#endif /* _ASM_M32R_SYSTEM_H */ diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 20743754f2b2..4c03361537aa 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c @@ -29,7 +29,6 @@ #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/mmu_context.h> diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index ee6a9199561c..3bcb207e5b6d 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -18,7 +18,6 @@ #include <asm/page.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c index 888aab1157ed..80f18cc6f547 100644 --- a/arch/m32r/mm/fault-nommu.c +++ b/arch/m32r/mm/fault-nommu.c @@ -22,7 +22,6 @@ #include <linux/vt_kern.h> /* For unblank_screen() */ #include <asm/m32r.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 2c9aeb453847..3cdfa9c1d091 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -26,7 +26,6 @@ #include <linux/module.h> #include <asm/m32r.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c index 34671d32cefc..e2dd778aeac7 100644 --- a/arch/m32r/platforms/m32104ut/setup.c +++ b/arch/m32r/platforms/m32104ut/setup.c @@ -13,7 +13,6 @@ #include <linux/init.h> #include <linux/device.h> -#include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c index 1053e1cb7401..9a4ba8a8589d 100644 --- a/arch/m32r/platforms/m32700ut/setup.c +++ b/arch/m32r/platforms/m32700ut/setup.c @@ -16,7 +16,6 @@ #include <linux/init.h> #include <linux/platform_device.h> -#include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c index 35130ac3f8d1..767d2f4d6ded 100644 --- a/arch/m32r/platforms/mappi/setup.c +++ b/arch/m32r/platforms/mappi/setup.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <linux/platform_device.h> -#include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c index f3ed6b60a5f8..76d665abf51e 100644 --- a/arch/m32r/platforms/mappi2/setup.c +++ b/arch/m32r/platforms/mappi2/setup.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <linux/platform_device.h> -#include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c index 2408e356ad10..a3646d4b05bd 100644 --- a/arch/m32r/platforms/mappi3/setup.c +++ b/arch/m32r/platforms/mappi3/setup.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <linux/platform_device.h> -#include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c index 83b46b067a17..f8373c069524 100644 --- a/arch/m32r/platforms/oaks32r/setup.c +++ b/arch/m32r/platforms/oaks32r/setup.c @@ -11,7 +11,6 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c index 32660705f5fd..cd0170483e83 100644 --- a/arch/m32r/platforms/opsput/setup.c +++ b/arch/m32r/platforms/opsput/setup.c @@ -17,7 +17,6 @@ #include <linux/init.h> #include <linux/platform_device.h> -#include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c index 0c7a1e8c77b0..dcde0ec777f6 100644 --- a/arch/m32r/platforms/usrv/setup.c +++ b/arch/m32r/platforms/usrv/setup.c @@ -11,7 +11,6 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <asm/system.h> #include <asm/m32r.h> #include <asm/io.h> diff --git a/arch/m68k/amiga/amisound.c b/arch/m68k/amiga/amisound.c index 61e5c54625ae..2559eefc6aff 100644 --- a/arch/m68k/amiga/amisound.c +++ b/arch/m68k/amiga/amisound.c @@ -14,7 +14,6 @@ #include <linux/string.h> #include <linux/module.h> -#include <asm/system.h> #include <asm/amigahw.h> static unsigned short *snd_data; diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index b95a451b1c3a..ee01b7a38e58 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c @@ -29,7 +29,6 @@ #include <asm/bootinfo.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/amigahw.h> #include <asm/amigaints.h> diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c index 8d3eafab1ffe..0a30406b9442 100644 --- a/arch/m68k/apollo/config.c +++ b/arch/m68k/apollo/config.c @@ -9,7 +9,6 @@ #include <asm/setup.h> #include <asm/bootinfo.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/apollohw.h> #include <asm/irq.h> diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c index 8048e1b7e552..783d8f02360d 100644 --- a/arch/m68k/atari/ataints.c +++ b/arch/m68k/atari/ataints.c @@ -42,7 +42,6 @@ #include <linux/seq_file.h> #include <linux/module.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/atarihw.h> diff --git a/arch/m68k/atari/atasound.c b/arch/m68k/atari/atasound.c index d266fe89c125..1c1181ebb947 100644 --- a/arch/m68k/atari/atasound.c +++ b/arch/m68k/atari/atasound.c @@ -25,7 +25,6 @@ #include <linux/module.h> #include <asm/atarihw.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/atariints.h> diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c index c4ac15c4f065..d8eb32747ac5 100644 --- a/arch/m68k/atari/config.c +++ b/arch/m68k/atari/config.c @@ -39,7 +39,6 @@ #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stram.h> -#include <asm/system.h> #include <asm/machdep.h> #include <asm/hwtest.h> #include <asm/io.h> diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 81286476f740..0bf850a20ea2 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c @@ -28,7 +28,6 @@ #include <linux/bcd.h> #include <asm/bootinfo.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c index 1c4d4c7bf4d4..cf12a17dc289 100644 --- a/arch/m68k/bvme6000/rtc.c +++ b/arch/m68k/bvme6000/rtc.c @@ -21,7 +21,6 @@ #include <asm/io.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/setup.h> /* diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c index c87fe69b0728..29a71be9fa5b 100644 --- a/arch/m68k/hp300/time.c +++ b/arch/m68k/hp300/time.c @@ -15,7 +15,6 @@ #include <asm/machdep.h> #include <asm/irq.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/blinken.h> diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 4eba796c00d4..336e6173794f 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -2,7 +2,7 @@ #define __ARCH_M68K_ATOMIC__ #include <linux/types.h> -#include <asm/system.h> +#include <linux/irqflags.h> /* * Atomic operations that C can't guarantee us. Useful for diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h new file mode 100644 index 000000000000..445ce22c23cb --- /dev/null +++ b/arch/m68k/include/asm/barrier.h @@ -0,0 +1,20 @@ +#ifndef _M68K_BARRIER_H +#define _M68K_BARRIER_H + +/* + * Force strict CPU ordering. + * Not really required on m68k... + */ +#define nop() do { asm volatile ("nop"); barrier(); } while (0) +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define read_barrier_depends() ((void)0) +#define set_mb(var, value) ({ (var) = (value); wmb(); }) + +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() ((void)0) + +#endif /* _M68K_BARRIER_H */ diff --git a/arch/m68k/include/asm/system.h b/arch/m68k/include/asm/cmpxchg.h index 8dc68178716c..5c81d0eae5cf 100644 --- a/arch/m68k/include/asm/system.h +++ b/arch/m68k/include/asm/cmpxchg.h @@ -1,74 +1,13 @@ -#ifndef _M68K_SYSTEM_H -#define _M68K_SYSTEM_H +#ifndef __ARCH_M68K_CMPXCHG__ +#define __ARCH_M68K_CMPXCHG__ -#include <linux/linkage.h> -#include <linux/kernel.h> -#include <linux/bug.h> #include <linux/irqflags.h> -#include <asm/segment.h> -#include <asm/entry.h> - -#ifdef __KERNEL__ - -/* - * switch_to(n) should switch tasks to task ptr, first checking that - * ptr isn't the current task, in which case it does nothing. This - * also clears the TS-flag if the task we switched to has used the - * math co-processor latest. - */ -/* - * switch_to() saves the extra registers, that are not saved - * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and - * a0-a1. Some of these are used by schedule() and its predecessors - * and so we might get see unexpected behaviors when a task returns - * with unexpected register values. - * - * syscall stores these registers itself and none of them are used - * by syscall after the function in the syscall has been called. - * - * Beware that resume now expects *next to be in d1 and the offset of - * tss to be in a1. This saves a few instructions as we no longer have - * to push them onto the stack and read them back right after. - * - * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) - * - * Changed 96/09/19 by Andreas Schwab - * pass prev in a0, next in a1 - */ -asmlinkage void resume(void); -#define switch_to(prev,next,last) do { \ - register void *_prev __asm__ ("a0") = (prev); \ - register void *_next __asm__ ("a1") = (next); \ - register void *_last __asm__ ("d1"); \ - __asm__ __volatile__("jbsr resume" \ - : "=a" (_prev), "=a" (_next), "=d" (_last) \ - : "0" (_prev), "1" (_next) \ - : "d0", "d2", "d3", "d4", "d5"); \ - (last) = _last; \ -} while (0) - - -/* - * Force strict CPU ordering. - * Not really required on m68k... - */ -#define nop() do { asm volatile ("nop"); barrier(); } while (0) -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define read_barrier_depends() ((void)0) -#define set_mb(var, value) ({ (var) = (value); wmb(); }) - -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() ((void)0) - -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) struct __xchg_dummy { unsigned long a[100]; }; #define __xg(x) ((volatile struct __xchg_dummy *)(x)) +extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int); + #ifndef CONFIG_RMW_INSNS static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { @@ -93,7 +32,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz x = tmp; break; default: - BUG(); + tmp = __invalid_xchg_size(x, ptr, size); + break; } local_irq_restore(flags); @@ -103,7 +43,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { - case 1: + case 1: __asm__ __volatile__ ("moveb %2,%0\n\t" "1:\n\t" @@ -111,7 +51,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz "jne 1b" : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); break; - case 2: + case 2: __asm__ __volatile__ ("movew %2,%0\n\t" "1:\n\t" @@ -119,7 +59,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz "jne 1b" : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); break; - case 4: + case 4: __asm__ __volatile__ ("movel %2,%0\n\t" "1:\n\t" @@ -127,15 +67,23 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz "jne 1b" : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); break; + default: + x = __invalid_xchg_size(x, ptr, size); + break; } return x; } #endif +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + #include <asm-generic/cmpxchg-local.h> #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +extern unsigned long __invalid_cmpxchg_size(volatile void *, + unsigned long, unsigned long, int); + /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is @@ -163,6 +111,9 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, : "=d" (old), "=m" (*(int *)p) : "d" (new), "0" (old), "m" (*(int *)p)); break; + default: + old = __invalid_cmpxchg_size(p, old, new, size); + break; } return old; } @@ -187,8 +138,4 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, #endif -#define arch_align_stack(x) (x) - -#endif /* __KERNEL__ */ - -#endif /* _M68K_SYSTEM_H */ +#endif /* __ARCH_M68K_CMPXCHG__ */ diff --git a/arch/m68k/include/asm/exec.h b/arch/m68k/include/asm/exec.h new file mode 100644 index 000000000000..0499adf90230 --- /dev/null +++ b/arch/m68k/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef _M68K_EXEC_H +#define _M68K_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _M68K_EXEC_H */ diff --git a/arch/m68k/include/asm/sun3xflop.h b/arch/m68k/include/asm/sun3xflop.h index 32c45f84ac60..95231e2f9d64 100644 --- a/arch/m68k/include/asm/sun3xflop.h +++ b/arch/m68k/include/asm/sun3xflop.h @@ -11,7 +11,6 @@ #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/sun3x.h> diff --git a/arch/m68k/include/asm/switch_to.h b/arch/m68k/include/asm/switch_to.h new file mode 100644 index 000000000000..16fd6b634982 --- /dev/null +++ b/arch/m68k/include/asm/switch_to.h @@ -0,0 +1,41 @@ +#ifndef _M68K_SWITCH_TO_H +#define _M68K_SWITCH_TO_H + +/* + * switch_to(n) should switch tasks to task ptr, first checking that + * ptr isn't the current task, in which case it does nothing. This + * also clears the TS-flag if the task we switched to has used the + * math co-processor latest. + */ +/* + * switch_to() saves the extra registers, that are not saved + * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and + * a0-a1. Some of these are used by schedule() and its predecessors + * and so we might get see unexpected behaviors when a task returns + * with unexpected register values. + * + * syscall stores these registers itself and none of them are used + * by syscall after the function in the syscall has been called. + * + * Beware that resume now expects *next to be in d1 and the offset of + * tss to be in a1. This saves a few instructions as we no longer have + * to push them onto the stack and read them back right after. + * + * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) + * + * Changed 96/09/19 by Andreas Schwab + * pass prev in a0, next in a1 + */ +asmlinkage void resume(void); +#define switch_to(prev,next,last) do { \ + register void *_prev __asm__ ("a0") = (prev); \ + register void *_next __asm__ ("a1") = (next); \ + register void *_last __asm__ ("d1"); \ + __asm__ __volatile__("jbsr resume" \ + : "=a" (_prev), "=a" (_next), "=d" (_last) \ + : "0" (_prev), "1" (_next) \ + : "d0", "d2", "d3", "d4", "d5"); \ + (last) = _last; \ +} while (0) + +#endif /* _M68K_SWITCH_TO_H */ diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c index 74fefac00899..6b32b64bac35 100644 --- a/arch/m68k/kernel/ints.c +++ b/arch/m68k/kernel/ints.c @@ -15,7 +15,6 @@ #include <linux/init.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/page.h> diff --git a/arch/m68k/kernel/irq.c b/arch/m68k/kernel/irq.c index c73988cfa90f..9ab4f550342e 100644 --- a/arch/m68k/kernel/irq.c +++ b/arch/m68k/kernel/irq.c @@ -15,7 +15,6 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/seq_file.h> -#include <asm/system.h> #include <asm/traps.h> asmlinkage void do_IRQ(int irq, struct pt_regs *regs) diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index c54ef927e483..c488e3cfab53 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -27,7 +27,6 @@ #include <linux/mqueue.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/machdep.h> #include <asm/setup.h> diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index 149a05f8b9ee..8b4a2222e658 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c @@ -23,7 +23,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> /* diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index daaa9187654c..388e5cc89599 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -32,7 +32,6 @@ #include <asm/setup.h> #include <asm/fpu.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/traps.h> #include <asm/pgalloc.h> diff --git a/arch/m68k/kernel/vectors.c b/arch/m68k/kernel/vectors.c index 147b03fbc71e..322c977bb9ec 100644 --- a/arch/m68k/kernel/vectors.c +++ b/arch/m68k/kernel/vectors.c @@ -25,7 +25,6 @@ #include <asm/setup.h> #include <asm/fpu.h> -#include <asm/system.h> #include <asm/traps.h> /* assembler routines */ diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index f60ff5f59205..96fa6ed7e799 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -30,7 +30,6 @@ #include <asm/setup.h> #include <asm/bootinfo.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/pgtable.h> diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index eb915551de69..5e085554ac7f 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -19,7 +19,6 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/rtc.h> -#include <asm/system.h> #include <asm/segment.h> #include <asm/setup.h> #include <asm/macintosh.h> diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 2db6099784ba..6b020a8461e7 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -13,7 +13,6 @@ #include <asm/setup.h> #include <asm/traps.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c index 89f3b203814b..f77f258dce3a 100644 --- a/arch/m68k/mm/init_mm.c +++ b/arch/m68k/mm/init_mm.c @@ -23,7 +23,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/machdep.h> #include <asm/io.h> diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c index 1e33d39ca9a0..345ec0d83e3d 100644 --- a/arch/m68k/mm/init_no.c +++ b/arch/m68k/mm/init_no.c @@ -36,7 +36,6 @@ #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/machdep.h> /* diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 1cc2bed4c3dd..568cfad3ceb8 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -20,7 +20,6 @@ #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> -#include <asm/system.h> #undef DEBUG diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index a5dbb74fe1de..250b8b786f4f 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -17,7 +17,6 @@ #include <asm/segment.h> #include <asm/page.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/machdep.h> diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 8b3db1c587fc..0dafa693515b 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -24,7 +24,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/dma.h> diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c index 1b902dbd4376..e0804060501e 100644 --- a/arch/m68k/mm/sun3mmu.c +++ b/arch/m68k/mm/sun3mmu.c @@ -21,7 +21,6 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/machdep.h> #include <asm/io.h> diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 5de924ef42ed..a41c09149e23 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -26,7 +26,6 @@ #include <linux/interrupt.h> #include <asm/bootinfo.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index c3fb3bdd7ed9..b6d7d8a7a3dd 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -29,7 +29,6 @@ #include <linux/module.h> #include <asm/bootinfo.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c index 39c79ebcd18a..6ef7a81a3b12 100644 --- a/arch/m68k/mvme16x/rtc.c +++ b/arch/m68k/mvme16x/rtc.c @@ -20,7 +20,6 @@ #include <asm/io.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/setup.h> /* diff --git a/arch/m68k/platform/68328/config.c b/arch/m68k/platform/68328/config.c index 44b866544314..8c20e891e981 100644 --- a/arch/m68k/platform/68328/config.c +++ b/arch/m68k/platform/68328/config.c @@ -18,7 +18,6 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/rtc.h> -#include <asm/system.h> #include <asm/machdep.h> #include <asm/MC68328.h> #if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD) diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c index b15ddef1ec76..c801c172b822 100644 --- a/arch/m68k/platform/68328/timers.c +++ b/arch/m68k/platform/68328/timers.c @@ -22,7 +22,6 @@ #include <linux/clocksource.h> #include <linux/rtc.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/MC68VZ328.h> diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c index 599a5949f320..255fc03913e9 100644 --- a/arch/m68k/platform/68360/config.c +++ b/arch/m68k/platform/68360/config.c @@ -18,7 +18,6 @@ #include <linux/irq.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/m68360.h> diff --git a/arch/m68k/platform/68EZ328/config.c b/arch/m68k/platform/68EZ328/config.c index dd2c53554341..4f158d551f02 100644 --- a/arch/m68k/platform/68EZ328/config.c +++ b/arch/m68k/platform/68EZ328/config.c @@ -16,7 +16,6 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/rtc.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/MC68EZ328.h> diff --git a/arch/m68k/platform/68VZ328/config.c b/arch/m68k/platform/68VZ328/config.c index 25ec673edc25..2ed8dc305e42 100644 --- a/arch/m68k/platform/68VZ328/config.c +++ b/arch/m68k/platform/68VZ328/config.c @@ -22,7 +22,6 @@ #include <linux/irq.h> #include <linux/rtc.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/MC68VZ328.h> diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index be936480b964..512adb64f7dd 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c @@ -29,7 +29,6 @@ #include <asm/io.h> #include <asm/rtc.h> #include <asm/bootinfo.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/irq.h> diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c index 2b888491f29a..513f9bb17b9c 100644 --- a/arch/m68k/q40/q40ints.c +++ b/arch/m68k/q40/q40ints.c @@ -18,7 +18,6 @@ #include <linux/irq.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/q40_master.h> diff --git a/arch/m68k/sun3/intersil.c b/arch/m68k/sun3/intersil.c index 0116d208d300..94fe8016f1f0 100644 --- a/arch/m68k/sun3/intersil.c +++ b/arch/m68k/sun3/intersil.c @@ -14,7 +14,6 @@ #include <linux/rtc.h> #include <asm/errno.h> -#include <asm/system.h> #include <asm/rtc.h> #include <asm/intersil.h> diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c index 94f81ecfe3f8..8edc510a21be 100644 --- a/arch/m68k/sun3/mmu_emu.c +++ b/arch/m68k/sun3/mmu_emu.c @@ -17,7 +17,6 @@ #include <asm/setup.h> #include <asm/traps.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> diff --git a/arch/m68k/sun3/prom/console.c b/arch/m68k/sun3/prom/console.c index 2bcb6e4bfe54..e92364373b07 100644 --- a/arch/m68k/sun3/prom/console.c +++ b/arch/m68k/sun3/prom/console.c @@ -10,7 +10,6 @@ #include <linux/sched.h> #include <asm/openprom.h> #include <asm/oplib.h> -#include <asm/system.h> #include <linux/string.h> /* Non blocking get character from console input device, returns -1 diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c index fc599fad4a54..dd306c84d36d 100644 --- a/arch/m68k/sun3x/config.c +++ b/arch/m68k/sun3x/config.c @@ -12,7 +12,6 @@ #include <linux/console.h> #include <linux/init.h> -#include <asm/system.h> #include <asm/machdep.h> #include <asm/irq.h> #include <asm/sun3xprom.h> diff --git a/arch/m68k/sun3x/time.c b/arch/m68k/sun3x/time.c index 536a04aaf22f..1d0a72480409 100644 --- a/arch/m68k/sun3x/time.c +++ b/arch/m68k/sun3x/time.c @@ -15,7 +15,6 @@ #include <asm/irq.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/sun3x.h> #include <asm/sun3ints.h> diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h index 615f53992c65..472d8bf726df 100644 --- a/arch/microblaze/include/asm/atomic.h +++ b/arch/microblaze/include/asm/atomic.h @@ -1,6 +1,7 @@ #ifndef _ASM_MICROBLAZE_ATOMIC_H #define _ASM_MICROBLAZE_ATOMIC_H +#include <asm/cmpxchg.h> #include <asm-generic/atomic.h> #include <asm-generic/atomic64.h> diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h new file mode 100644 index 000000000000..df5be3e87044 --- /dev/null +++ b/arch/microblaze/include/asm/barrier.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_BARRIER_H +#define _ASM_MICROBLAZE_BARRIER_H + +#define nop() asm volatile ("nop") + +#define smp_read_barrier_depends() do {} while (0) +#define read_barrier_depends() do {} while (0) + +#define mb() barrier() +#define rmb() mb() +#define wmb() mb() +#define set_mb(var, value) do { var = value; mb(); } while (0) +#define set_wmb(var, value) do { var = value; wmb(); } while (0) + +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() + +#endif /* _ASM_MICROBLAZE_BARRIER_H */ diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h new file mode 100644 index 000000000000..0094859abd9b --- /dev/null +++ b/arch/microblaze/include/asm/cmpxchg.h @@ -0,0 +1,40 @@ +#ifndef _ASM_MICROBLAZE_CMPXCHG_H +#define _ASM_MICROBLAZE_CMPXCHG_H + +void __bad_xchg(volatile void *ptr, int size); + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) +{ + unsigned long ret; + unsigned long flags; + + switch (size) { + case 1: + local_irq_save(flags); + ret = *(volatile unsigned char *)ptr; + *(volatile unsigned char *)ptr = x; + local_irq_restore(flags); + break; + + case 4: + local_irq_save(flags); + ret = *(volatile unsigned long *)ptr; + *(volatile unsigned long *)ptr = x; + local_irq_restore(flags); + break; + default: + __bad_xchg(ptr, size), ret = 0; + break; + } + + return ret; +} + +#define xchg(ptr, x) \ + ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) + +#include <asm-generic/cmpxchg.h> +#include <asm-generic/cmpxchg-local.h> + +#endif /* _ASM_MICROBLAZE_CMPXCHG_H */ diff --git a/arch/microblaze/include/asm/exec.h b/arch/microblaze/include/asm/exec.h new file mode 100644 index 000000000000..e750de1fe8fb --- /dev/null +++ b/arch/microblaze/include/asm/exec.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_EXEC_H +#define _ASM_MICROBLAZE_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _ASM_MICROBLAZE_EXEC_H */ diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 352cc2352bd5..287c5485d286 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -138,6 +138,8 @@ extern unsigned long memory_start; extern unsigned long memory_size; extern unsigned long lowmem_size; +extern unsigned long kernel_tlb; + extern int page_is_ram(unsigned long pfn); # define phys_to_pfn(phys) (PFN_DOWN(phys)) diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 7283bfb2f7e4..510a8e1c16ba 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h @@ -125,7 +125,6 @@ struct thread_struct { .pgdir = swapper_pg_dir, \ } - /* Free all resources held by a thread. */ extern inline void release_thread(struct task_struct *dead_task) { @@ -144,6 +143,8 @@ static inline void exit_thread(void) unsigned long get_wchan(struct task_struct *p); +extern void ret_from_fork(void); + /* The size allocated for kernel stacks. This _must_ be a power of two! */ # define KERNEL_STACK_SIZE 0x2000 @@ -166,6 +167,14 @@ unsigned long get_wchan(struct task_struct *p); # define STACK_TOP TASK_SIZE # define STACK_TOP_MAX STACK_TOP +void disable_hlt(void); +void enable_hlt(void); +void default_idle(void); + +#ifdef CONFIG_DEBUG_FS +extern struct dentry *of_debugfs_root; +#endif + # endif /* __ASSEMBLY__ */ # endif /* CONFIG_MMU */ #endif /* _ASM_MICROBLAZE_PROCESSOR_H */ diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h index 9f195c094731..0061aa13a340 100644 --- a/arch/microblaze/include/asm/setup.h +++ b/arch/microblaze/include/asm/setup.h @@ -20,6 +20,8 @@ extern unsigned int boot_cpuid; /* move to smp.h */ extern char cmd_line[COMMAND_LINE_SIZE]; +extern char *klimit; + void early_printk(const char *fmt, ...); int setup_early_printk(char *opt); @@ -47,6 +49,10 @@ void machine_shutdown(void); void machine_halt(void); void machine_power_off(void); +void free_init_pages(char *what, unsigned long begin, unsigned long end); +extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); +extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); + # endif/* __KERNEL__ */ # endif /* __ASSEMBLY__ */ #endif /* _ASM_MICROBLAZE_SETUP_H */ diff --git a/arch/microblaze/include/asm/switch_to.h b/arch/microblaze/include/asm/switch_to.h new file mode 100644 index 000000000000..f45baa2c5e09 --- /dev/null +++ b/arch/microblaze/include/asm/switch_to.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2006 Atmark Techno, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_MICROBLAZE_SWITCH_TO_H +#define _ASM_MICROBLAZE_SWITCH_TO_H + +struct task_struct; +struct thread_info; + +extern struct task_struct *_switch_to(struct thread_info *prev, + struct thread_info *next); + +#define switch_to(prev, next, last) \ + do { \ + (last) = _switch_to(task_thread_info(prev), \ + task_thread_info(next)); \ + } while (0) + +#endif /* _ASM_MICROBLAZE_SWITCH_TO_H */ diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h deleted file mode 100644 index 01228d2b1351..000000000000 --- a/arch/microblaze/include/asm/system.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _ASM_MICROBLAZE_SYSTEM_H -#define _ASM_MICROBLAZE_SYSTEM_H - -#include <asm/registers.h> -#include <asm/setup.h> -#include <asm/irqflags.h> -#include <asm/cache.h> - -#include <asm-generic/cmpxchg.h> -#include <asm-generic/cmpxchg-local.h> - -struct task_struct; -struct thread_info; - -extern struct task_struct *_switch_to(struct thread_info *prev, - struct thread_info *next); - -#define switch_to(prev, next, last) \ - do { \ - (last) = _switch_to(task_thread_info(prev), \ - task_thread_info(next)); \ - } while (0) - -#define smp_read_barrier_depends() do {} while (0) -#define read_barrier_depends() do {} while (0) - -#define nop() asm volatile ("nop") -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() - -void __bad_xchg(volatile void *ptr, int size); - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, - int size) -{ - unsigned long ret; - unsigned long flags; - - switch (size) { - case 1: - local_irq_save(flags); - ret = *(volatile unsigned char *)ptr; - *(volatile unsigned char *)ptr = x; - local_irq_restore(flags); - break; - - case 4: - local_irq_save(flags); - ret = *(volatile unsigned long *)ptr; - *(volatile unsigned long *)ptr = x; - local_irq_restore(flags); - break; - default: - __bad_xchg(ptr, size), ret = 0; - break; - } - - return ret; -} - -void disable_hlt(void); -void enable_hlt(void); -void default_idle(void); - -#define xchg(ptr, x) \ - ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) - -void free_init_pages(char *what, unsigned long begin, unsigned long end); -void free_initmem(void); -extern char *klimit; -extern unsigned long kernel_tlb; -extern void ret_from_fork(void); - -extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); -extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); - -#ifdef CONFIG_DEBUG_FS -extern struct dentry *of_debugfs_root; -#endif - -#define arch_align_stack(x) (x) - -#endif /* _ASM_MICROBLAZE_SYSTEM_H */ diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c index 488c1ed24e38..3a749d5e71fd 100644 --- a/arch/microblaze/kernel/cpu/pvr.c +++ b/arch/microblaze/kernel/cpu/pvr.c @@ -12,7 +12,6 @@ #include <linux/kernel.h> #include <linux/compiler.h> -#include <asm/system.h> #include <asm/exceptions.h> #include <asm/pvr.h> diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index 49faeb429599..bb4907c828dc 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c @@ -18,7 +18,6 @@ #include <asm/cacheflush.h> #include <linux/io.h> #include <asm/page.h> -#include <asm/system.h> #include <linux/ftrace.h> #include <linux/uaccess.h> diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index 9155f7d92669..883b92789cdf 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -13,7 +13,6 @@ #include <linux/pm.h> #include <linux/tick.h> #include <linux/bitops.h> -#include <asm/system.h> #include <asm/pgalloc.h> #include <asm/uaccess.h> /* for USER_DS macros */ #include <asm/cacheflush.h> diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index 80d314e81901..4a764ccb9f26 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c @@ -36,7 +36,6 @@ #include <asm/processor.h> #include <asm/irq.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/mmu.h> #include <asm/pgtable.h> #include <asm/sections.h> diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 9f79fb3bbfa0..71af974aa24a 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -30,7 +30,6 @@ #include <asm/entry.h> #include <asm/cpuinfo.h> -#include <asm/system.h> #include <asm/prom.h> #include <asm/pgtable.h> diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index cadfd5608afb..522defa7d41f 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -27,7 +27,6 @@ #include <asm/setup.h> #include <asm/prom.h> #include <asm/irq.h> -#include <asm/system.h> #include <linux/cnt32_to_63.h> #ifdef CONFIG_SELFMOD_TIMER diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c index ba034d421ec2..5541ac559593 100644 --- a/arch/microblaze/kernel/traps.c +++ b/arch/microblaze/kernel/traps.c @@ -15,7 +15,6 @@ #include <linux/debug_locks.h> #include <asm/exceptions.h> -#include <asm/system.h> #include <asm/unwind.h> void trap_init(void) diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c index 52746e718dfa..fe9c53fafdea 100644 --- a/arch/microblaze/lib/memcpy.c +++ b/arch/microblaze/lib/memcpy.c @@ -30,7 +30,6 @@ #include <linux/module.h> #include <linux/string.h> -#include <asm/system.h> #ifdef __HAVE_ARCH_MEMCPY #ifndef CONFIG_OPT_LIB_FUNCTION diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index ae97d2ccdc22..c38a265846de 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -33,7 +33,6 @@ #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <linux/uaccess.h> #include <asm/exceptions.h> diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 260b27367347..d3a9f012aa0a 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -24,7 +24,6 @@ #include <asm/processor.h> #include <asm/reboot.h> #include <asm/smp-ops.h> -#include <asm/system.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <asm/bootinfo.h> diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index b1535fe409d4..c3e2b85c3b02 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -15,8 +15,8 @@ #include <linux/module.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/time.h> +#include <asm/setup.h> #include <asm/octeon/octeon.h> diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c index 7abce661b90f..5abf4e894216 100644 --- a/arch/mips/dec/ecc-berr.c +++ b/arch/mips/dec/ecc-berr.c @@ -24,7 +24,6 @@ #include <asm/irq_regs.h> #include <asm/processor.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/dec/ecc.h> diff --git a/arch/mips/dec/kn01-berr.c b/arch/mips/dec/kn01-berr.c index 94d23b4a7dc3..44d8a87a8a68 100644 --- a/arch/mips/dec/kn01-berr.c +++ b/arch/mips/dec/kn01-berr.c @@ -22,7 +22,6 @@ #include <asm/mipsregs.h> #include <asm/page.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/uaccess.h> diff --git a/arch/mips/dec/kn02xa-berr.c b/arch/mips/dec/kn02xa-berr.c index 07ca5405d48d..ebb73c51d821 100644 --- a/arch/mips/dec/kn02xa-berr.c +++ b/arch/mips/dec/kn02xa-berr.c @@ -21,7 +21,6 @@ #include <asm/addrspace.h> #include <asm/irq_regs.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/dec/kn02ca.h> diff --git a/arch/mips/dec/wbflush.c b/arch/mips/dec/wbflush.c index 925c0525344b..43feddd5e19c 100644 --- a/arch/mips/dec/wbflush.c +++ b/arch/mips/dec/wbflush.c @@ -17,8 +17,8 @@ #include <linux/init.h> #include <asm/bootinfo.h> -#include <asm/system.h> #include <asm/wbflush.h> +#include <asm/barrier.h> static void wbflush_kn01(void); static void wbflush_kn210(void); diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c index 7798887a1288..b5f08255d9c7 100644 --- a/arch/mips/emma/markeins/irq.c +++ b/arch/mips/emma/markeins/irq.c @@ -27,7 +27,6 @@ #include <linux/delay.h> #include <asm/irq_cpu.h> -#include <asm/system.h> #include <asm/mipsregs.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c index 29627fbae7ad..7cf80ca2c1d2 100644 --- a/arch/mips/fw/arc/misc.c +++ b/arch/mips/fw/arc/misc.c @@ -17,7 +17,6 @@ #include <asm/fw/arc/types.h> #include <asm/sgialib.h> #include <asm/bootinfo.h> -#include <asm/system.h> VOID ArcHalt(VOID) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 1d93f81d57e7..3f4c5cb6433e 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -18,8 +18,8 @@ #include <linux/types.h> #include <asm/barrier.h> #include <asm/cpu-features.h> +#include <asm/cmpxchg.h> #include <asm/war.h> -#include <asm/system.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index c0884f02d3a6..f7fdc24e972d 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -8,6 +8,8 @@ #ifndef __ASM_BARRIER_H #define __ASM_BARRIER_H +#include <asm/addrspace.h> + /* * read_barrier_depends - Flush all pending reads that subsequents reads * depend on. diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index d8d1c2805ac7..285a41fa0b18 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -9,6 +9,130 @@ #define __ASM_CMPXCHG_H #include <linux/irqflags.h> +#include <asm/war.h> + +static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) +{ + __u32 retval; + + smp_mb__before_llsc(); + + if (kernel_uses_llsc && R10000_LLSC_WAR) { + unsigned long dummy; + + __asm__ __volatile__( + " .set mips3 \n" + "1: ll %0, %3 # xchg_u32 \n" + " .set mips0 \n" + " move %2, %z4 \n" + " .set mips3 \n" + " sc %2, %1 \n" + " beqzl %2, 1b \n" + " .set mips0 \n" + : "=&r" (retval), "=m" (*m), "=&r" (dummy) + : "R" (*m), "Jr" (val) + : "memory"); + } else if (kernel_uses_llsc) { + unsigned long dummy; + + do { + __asm__ __volatile__( + " .set mips3 \n" + " ll %0, %3 # xchg_u32 \n" + " .set mips0 \n" + " move %2, %z4 \n" + " .set mips3 \n" + " sc %2, %1 \n" + " .set mips0 \n" + : "=&r" (retval), "=m" (*m), "=&r" (dummy) + : "R" (*m), "Jr" (val) + : "memory"); + } while (unlikely(!dummy)); + } else { + unsigned long flags; + + raw_local_irq_save(flags); + retval = *m; + *m = val; + raw_local_irq_restore(flags); /* implies memory barrier */ + } + + smp_llsc_mb(); + + return retval; +} + +#ifdef CONFIG_64BIT +static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) +{ + __u64 retval; + + smp_mb__before_llsc(); + + if (kernel_uses_llsc && R10000_LLSC_WAR) { + unsigned long dummy; + + __asm__ __volatile__( + " .set mips3 \n" + "1: lld %0, %3 # xchg_u64 \n" + " move %2, %z4 \n" + " scd %2, %1 \n" + " beqzl %2, 1b \n" + " .set mips0 \n" + : "=&r" (retval), "=m" (*m), "=&r" (dummy) + : "R" (*m), "Jr" (val) + : "memory"); + } else if (kernel_uses_llsc) { + unsigned long dummy; + + do { + __asm__ __volatile__( + " .set mips3 \n" + " lld %0, %3 # xchg_u64 \n" + " move %2, %z4 \n" + " scd %2, %1 \n" + " .set mips0 \n" + : "=&r" (retval), "=m" (*m), "=&r" (dummy) + : "R" (*m), "Jr" (val) + : "memory"); + } while (unlikely(!dummy)); + } else { + unsigned long flags; + + raw_local_irq_save(flags); + retval = *m; + *m = val; + raw_local_irq_restore(flags); /* implies memory barrier */ + } + + smp_llsc_mb(); + + return retval; +} +#else +extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); +#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels +#endif + +static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +{ + switch (size) { + case 4: + return __xchg_u32(ptr, x); + case 8: + return __xchg_u64(ptr, x); + } + + return x; +} + +#define xchg(ptr, x) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \ + \ + ((__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ +}) #define __HAVE_ARCH_CMPXCHG 1 diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h index 2d47da62d5a7..f5097f65a8ab 100644 --- a/arch/mips/include/asm/dma.h +++ b/arch/mips/include/asm/dma.h @@ -15,7 +15,6 @@ #include <asm/io.h> /* need byte IO */ #include <linux/spinlock.h> /* And spinlocks */ #include <linux/delay.h> -#include <asm/system.h> #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h new file mode 100644 index 000000000000..c1f6afa4bc4f --- /dev/null +++ b/arch/mips/include/asm/exec.h @@ -0,0 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#ifndef _ASM_EXEC_H +#define _ASM_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* _ASM_EXEC_H */ diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h index 59f5b55b2200..ba4cf0e91c8b 100644 --- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h +++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h @@ -33,7 +33,6 @@ #include <linux/io.h> /* need byte IO */ #include <linux/spinlock.h> /* And spinlocks */ #include <linux/delay.h> -#include <asm/system.h> #define NUM_AU1000_DMA_CHANNELS 8 diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index c104f1039a69..20e9dcf42b27 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -19,7 +19,6 @@ #include <asm/cpu-info.h> #include <asm/mipsregs.h> #include <asm/prefetch.h> -#include <asm/system.h> /* * Return current * instruction pointer ("program counter"). @@ -356,6 +355,12 @@ unsigned long get_wchan(struct task_struct *p); #define ARCH_HAS_PREFETCHW #define prefetchw(x) __builtin_prefetch((x), 1, 1) +/* + * See Documentation/scheduler/sched-arch.txt; prevents deadlock on SMP + * systems. + */ +#define __ARCH_WANT_UNLOCKED_CTXSW + #endif #endif /* _ASM_PROCESSOR_H */ diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h index 50511aac04e9..6dce6d8d09ab 100644 --- a/arch/mips/include/asm/setup.h +++ b/arch/mips/include/asm/setup.h @@ -5,6 +5,17 @@ #ifdef __KERNEL__ extern void setup_early_printk(void); + +extern void set_handler(unsigned long offset, void *addr, unsigned long len); +extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); + +typedef void (*vi_handler_t)(void); +extern void *set_vi_handler(int n, vi_handler_t addr); + +extern void *set_except_vector(int n, void *addr); +extern unsigned long ebase; +extern void per_cpu_trap_init(void); + #endif /* __KERNEL__ */ #endif /* __SETUP_H */ diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h new file mode 100644 index 000000000000..5d33621b5658 --- /dev/null +++ b/arch/mips/include/asm/switch_to.h @@ -0,0 +1,85 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#ifndef _ASM_SWITCH_TO_H +#define _ASM_SWITCH_TO_H + +#include <asm/cpu-features.h> +#include <asm/watch.h> +#include <asm/dsp.h> + +struct task_struct; + +/* + * switch_to(n) should switch tasks to task nr n, first + * checking that n isn't the current task, in which case it does nothing. + */ +extern asmlinkage void *resume(void *last, void *next, void *next_ti); + +extern unsigned int ll_bit; +extern struct task_struct *ll_task; + +#ifdef CONFIG_MIPS_MT_FPAFF + +/* + * Handle the scheduler resume end of FPU affinity management. We do this + * inline to try to keep the overhead down. If we have been forced to run on + * a "CPU" with an FPU because of a previous high level of FP computation, + * but did not actually use the FPU during the most recent time-slice (CU1 + * isn't set), we undo the restriction on cpus_allowed. + * + * We're not calling set_cpus_allowed() here, because we have no need to + * force prompt migration - we're already switching the current CPU to a + * different thread. + */ + +#define __mips_mt_fpaff_switch_to(prev) \ +do { \ + struct thread_info *__prev_ti = task_thread_info(prev); \ + \ + if (cpu_has_fpu && \ + test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ + (!(KSTK_STATUS(prev) & ST0_CU1))) { \ + clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ + prev->cpus_allowed = prev->thread.user_cpus_allowed; \ + } \ + next->thread.emulated_fp = 0; \ +} while(0) + +#else +#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) +#endif + +#define __clear_software_ll_bit() \ +do { \ + if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ + ll_bit = 0; \ +} while (0) + +#define switch_to(prev, next, last) \ +do { \ + __mips_mt_fpaff_switch_to(prev); \ + if (cpu_has_dsp) \ + __save_dsp(prev); \ + __clear_software_ll_bit(); \ + (last) = resume(prev, next, task_thread_info(next)); \ +} while (0) + +#define finish_arch_switch(prev) \ +do { \ + if (cpu_has_dsp) \ + __restore_dsp(current); \ + if (cpu_has_userlocal) \ + write_c0_userlocal(current_thread_info()->tp_value); \ + __restore_watch(); \ +} while (0) + +#endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h deleted file mode 100644 index 6018c80ce37a..000000000000 --- a/arch/mips/include/asm/system.h +++ /dev/null @@ -1,235 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle - * Copyright (C) 1996 by Paul M. Antoine - * Copyright (C) 1999 Silicon Graphics - * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2000 MIPS Technologies, Inc. - */ -#ifndef _ASM_SYSTEM_H -#define _ASM_SYSTEM_H - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/irqflags.h> - -#include <asm/addrspace.h> -#include <asm/barrier.h> -#include <asm/cmpxchg.h> -#include <asm/cpu-features.h> -#include <asm/dsp.h> -#include <asm/watch.h> -#include <asm/war.h> - - -/* - * switch_to(n) should switch tasks to task nr n, first - * checking that n isn't the current task, in which case it does nothing. - */ -extern asmlinkage void *resume(void *last, void *next, void *next_ti); - -struct task_struct; - -extern unsigned int ll_bit; -extern struct task_struct *ll_task; - -#ifdef CONFIG_MIPS_MT_FPAFF - -/* - * Handle the scheduler resume end of FPU affinity management. We do this - * inline to try to keep the overhead down. If we have been forced to run on - * a "CPU" with an FPU because of a previous high level of FP computation, - * but did not actually use the FPU during the most recent time-slice (CU1 - * isn't set), we undo the restriction on cpus_allowed. - * - * We're not calling set_cpus_allowed() here, because we have no need to - * force prompt migration - we're already switching the current CPU to a - * different thread. - */ - -#define __mips_mt_fpaff_switch_to(prev) \ -do { \ - struct thread_info *__prev_ti = task_thread_info(prev); \ - \ - if (cpu_has_fpu && \ - test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ - (!(KSTK_STATUS(prev) & ST0_CU1))) { \ - clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ - prev->cpus_allowed = prev->thread.user_cpus_allowed; \ - } \ - next->thread.emulated_fp = 0; \ -} while(0) - -#else -#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) -#endif - -#define __clear_software_ll_bit() \ -do { \ - if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ - ll_bit = 0; \ -} while (0) - -#define switch_to(prev, next, last) \ -do { \ - __mips_mt_fpaff_switch_to(prev); \ - if (cpu_has_dsp) \ - __save_dsp(prev); \ - __clear_software_ll_bit(); \ - (last) = resume(prev, next, task_thread_info(next)); \ -} while (0) - -#define finish_arch_switch(prev) \ -do { \ - if (cpu_has_dsp) \ - __restore_dsp(current); \ - if (cpu_has_userlocal) \ - write_c0_userlocal(current_thread_info()->tp_value); \ - __restore_watch(); \ -} while (0) - -static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) -{ - __u32 retval; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - unsigned long dummy; - - __asm__ __volatile__( - " .set mips3 \n" - "1: ll %0, %3 # xchg_u32 \n" - " .set mips0 \n" - " move %2, %z4 \n" - " .set mips3 \n" - " sc %2, %1 \n" - " beqzl %2, 1b \n" - " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) - : "memory"); - } else if (kernel_uses_llsc) { - unsigned long dummy; - - do { - __asm__ __volatile__( - " .set mips3 \n" - " ll %0, %3 # xchg_u32 \n" - " .set mips0 \n" - " move %2, %z4 \n" - " .set mips3 \n" - " sc %2, %1 \n" - " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) - : "memory"); - } while (unlikely(!dummy)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - retval = *m; - *m = val; - raw_local_irq_restore(flags); /* implies memory barrier */ - } - - smp_llsc_mb(); - - return retval; -} - -#ifdef CONFIG_64BIT -static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) -{ - __u64 retval; - - smp_mb__before_llsc(); - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - unsigned long dummy; - - __asm__ __volatile__( - " .set mips3 \n" - "1: lld %0, %3 # xchg_u64 \n" - " move %2, %z4 \n" - " scd %2, %1 \n" - " beqzl %2, 1b \n" - " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) - : "memory"); - } else if (kernel_uses_llsc) { - unsigned long dummy; - - do { - __asm__ __volatile__( - " .set mips3 \n" - " lld %0, %3 # xchg_u64 \n" - " move %2, %z4 \n" - " scd %2, %1 \n" - " .set mips0 \n" - : "=&r" (retval), "=m" (*m), "=&r" (dummy) - : "R" (*m), "Jr" (val) - : "memory"); - } while (unlikely(!dummy)); - } else { - unsigned long flags; - - raw_local_irq_save(flags); - retval = *m; - *m = val; - raw_local_irq_restore(flags); /* implies memory barrier */ - } - - smp_llsc_mb(); - - return retval; -} -#else -extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); -#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels -#endif - -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - switch (size) { - case 4: - return __xchg_u32(ptr, x); - case 8: - return __xchg_u64(ptr, x); - } - - return x; -} - -#define xchg(ptr, x) \ -({ \ - BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \ - \ - ((__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ -}) - -extern void set_handler(unsigned long offset, void *addr, unsigned long len); -extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); - -typedef void (*vi_handler_t)(void); -extern void *set_vi_handler(int n, vi_handler_t addr); - -extern void *set_except_vector(int n, void *addr); -extern unsigned long ebase; -extern void per_cpu_trap_init(void); - -/* - * See include/asm-ia64/system.h; prevents deadlock on SMP - * systems. - */ -#define __ARCH_WANT_UNLOCKED_CTXSW - -extern unsigned long arch_align_stack(unsigned long sp); - -#endif /* _ASM_SYSTEM_H */ diff --git a/arch/mips/include/asm/txx9/jmr3927.h b/arch/mips/include/asm/txx9/jmr3927.h index a409c446bf18..8808d7f82da0 100644 --- a/arch/mips/include/asm/txx9/jmr3927.h +++ b/arch/mips/include/asm/txx9/jmr3927.h @@ -12,7 +12,6 @@ #include <asm/txx9/tx3927.h> #include <asm/addrspace.h> -#include <asm/system.h> #include <asm/txx9irq.h> /* CS */ diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index f305ca14351b..d6a18644365a 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -16,7 +16,7 @@ #include <asm/cpu.h> #include <asm/fpu.h> #include <asm/mipsregs.h> -#include <asm/system.h> +#include <asm/setup.h> static char bug64hit[] __initdata = "reliable operation impossible!\n%s"; diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 0bab464b8e33..5099201fb7bc 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -22,7 +22,6 @@ #include <asm/cpu.h> #include <asm/fpu.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <asm/watch.h> #include <asm/elf.h> #include <asm/spram.h> diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index a8a8977d5887..b0662cf97ea8 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c @@ -16,7 +16,6 @@ #include <asm/irq_cpu.h> #include <asm/mipsregs.h> -#include <asm/system.h> static inline void unmask_rm7k_irq(struct irq_data *d) { diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index 38874a4b9255..1282b9ae81c4 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c @@ -17,7 +17,6 @@ #include <asm/irq_cpu.h> #include <asm/mipsregs.h> -#include <asm/system.h> static inline void unmask_rm9k_irq(struct irq_data *d) { diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7f50318061b5..a5aa43d07c8e 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -23,7 +23,6 @@ #include <linux/ftrace.h> #include <linux/atomic.h> -#include <asm/system.h> #include <asm/uaccess.h> #ifdef CONFIG_KGDB diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 191eb52228c4..972263bcf403 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -35,7 +35,6 @@ #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> -#include <asm/system.h> static inline void unmask_mips_irq(struct irq_data *d) { diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index c23d11f6851d..7f3376b1c219 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -13,7 +13,6 @@ #include <asm/cpu.h> #include <asm/processor.h> #include <linux/atomic.h> -#include <asm/system.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> #include <asm/mipsmtregs.h> diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 61f1cb45a1d5..e9a5fd7277f4 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -32,7 +32,6 @@ #include <asm/dsp.h> #include <asm/fpu.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/mipsregs.h> #include <asm/processor.h> #include <asm/uaccess.h> diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 7786b608d932..7c24c2973c6d 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -34,7 +34,6 @@ #include <asm/mipsmtregs.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/bootinfo.h> #include <asm/reg.h> diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 32644b4a0714..a3b017815eff 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -32,7 +32,6 @@ #include <asm/mipsmtregs.h> #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/bootinfo.h> diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index a9d801dec6b0..b8c18dcdd2c4 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -38,7 +38,6 @@ #include <linux/atomic.h> #include <asm/cpu.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/vpe.h> #include <asm/rtlx.h> diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 058e964e7303..c504b212f8f3 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -31,7 +31,6 @@ #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp-ops.h> -#include <asm/system.h> #include <asm/prom.h> struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index f8524003676a..185ca00c4c84 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -34,6 +34,7 @@ #include <asm/cpu-features.h> #include <asm/war.h> #include <asm/vdso.h> +#include <asm/dsp.h> #include "signal-common.h" diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index aae986613795..06b5da392e24 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -29,10 +29,10 @@ #include <asm/cacheflush.h> #include <asm/sim.h> #include <asm/ucontext.h> -#include <asm/system.h> #include <asm/fpu.h> #include <asm/war.h> #include <asm/vdso.h> +#include <asm/dsp.h> #include "signal-common.h" diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index ee24d814d5b9..ae29e894ab8d 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -35,7 +35,6 @@ #include <asm/sim.h> #include <asm/uaccess.h> #include <asm/ucontext.h> -#include <asm/system.h> #include <asm/fpu.h> #include <asm/cpu-features.h> #include <asm/war.h> diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index d5e950ab8527..ca673569fd24 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -28,7 +28,6 @@ #include <asm/time.h> #include <asm/pgtable.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/bootinfo.h> #include <asm/pmon.h> #include <asm/cacheflush.h> diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index fe3095160655..e7e03ecf5495 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -29,7 +29,6 @@ #include <asm/cacheflush.h> #include <asm/cpu.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> #include <asm/smp.h> diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index ce9e286f0a74..ff17868734cf 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -28,7 +28,6 @@ #include <asm/cacheflush.h> #include <asm/cpu.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> #include <asm/time.h> diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 32c1e954cd37..9c1cce9de35f 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -38,9 +38,9 @@ #include <asm/cpu.h> #include <asm/processor.h> #include <asm/r4k-timer.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/time.h> +#include <asm/setup.h> #ifdef CONFIG_MIPS_MT_SMTC #include <asm/mipsmtregs.h> diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c index 928a5a61e1a6..145771c0ed7a 100644 --- a/arch/mips/kernel/smtc-proc.c +++ b/arch/mips/kernel/smtc-proc.c @@ -11,7 +11,6 @@ #include <asm/cpu.h> #include <asm/processor.h> #include <linux/atomic.h> -#include <asm/system.h> #include <asm/hardirq.h> #include <asm/mmu_context.h> #include <asm/mipsregs.h> diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 0a42ff3ff6a1..c4f75bbc0bd6 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -31,7 +31,6 @@ #include <asm/cpu.h> #include <asm/processor.h> #include <linux/atomic.h> -#include <asm/system.h> #include <asm/hardirq.h> #include <asm/hazards.h> #include <asm/irq.h> diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 1821d12a6410..6af08d896e20 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -15,7 +15,6 @@ #include <asm/fpu.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <asm/r4kcache.h> #include <asm/hazards.h> diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index d02765708ddb..b08220c82113 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -37,6 +37,7 @@ #include <asm/shmparam.h> #include <asm/sysmips.h> #include <asm/uaccess.h> +#include <asm/switch_to.h> /* * For historic reasons the pipe(2) syscall on MIPS has an unusual calling diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index d79ae5437b58..cfdaaa4cffc0 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -45,7 +45,6 @@ #include <asm/pgtable.h> #include <asm/ptrace.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/tlbdebug.h> #include <asm/traps.h> #include <asm/uaccess.h> diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index aedb8941caa5..9c58bdf58f23 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -85,7 +85,6 @@ #include <asm/cop2.h> #include <asm/inst.h> #include <asm/uaccess.h> -#include <asm/system.h> #define STR(x) __STR(x) #define __STR(x) #x diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index bfa12a4f97b9..f6f91523cb1c 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -49,7 +49,6 @@ #include <asm/cpu.h> #include <asm/mips_mt.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/vpe.h> #include <asm/kspd.h> diff --git a/arch/mips/lasat/reset.c b/arch/mips/lasat/reset.c index b1e7a89fb730..e21f0b9a586e 100644 --- a/arch/mips/lasat/reset.c +++ b/arch/mips/lasat/reset.c @@ -21,7 +21,6 @@ #include <linux/pm.h> #include <asm/reboot.h> -#include <asm/system.h> #include <asm/lasat/lasat.h> #include "picvue.h" diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c index 3c4a8c5ba7f2..384a3b0091ea 100644 --- a/arch/mips/math-emu/dsemul.c +++ b/arch/mips/math-emu/dsemul.c @@ -12,7 +12,6 @@ #include <asm/uaccess.h> #include <asm/branch.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <asm/cacheflush.h> #include <asm/fpu_emulator.h> diff --git a/arch/mips/mipssim/sim_smtc.c b/arch/mips/mipssim/sim_smtc.c index 915063991f6e..3c104abd8aa5 100644 --- a/arch/mips/mipssim/sim_smtc.c +++ b/arch/mips/mipssim/sim_smtc.c @@ -28,7 +28,6 @@ #include <asm/cpu.h> #include <asm/processor.h> #include <asm/smtc.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/smtc_ipi.h> diff --git a/arch/mips/mipssim/sim_time.c b/arch/mips/mipssim/sim_time.c index 5492c42f7650..77bad3c04280 100644 --- a/arch/mips/mipssim/sim_time.c +++ b/arch/mips/mipssim/sim_time.c @@ -11,6 +11,7 @@ #include <asm/hardirq.h> #include <asm/div64.h> #include <asm/cpu.h> +#include <asm/setup.h> #include <asm/time.h> #include <asm/irq.h> #include <asm/mc146818-time.h> diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index cf7895db0739..1f9ca07f53c8 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -21,7 +21,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/r4kcache.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/war.h> diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 0765583d0c92..031c4c2cdf2e 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -18,7 +18,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/isadep.h> #include <asm/io.h> #include <asm/bootinfo.h> diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index c97087d12d07..bda8eb26ece7 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -29,7 +29,6 @@ #include <asm/pgtable.h> #include <asm/r4kcache.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/war.h> #include <asm/cacheflush.h> /* for run_uncached() */ diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index a43c197ccf8c..87d23cada6d6 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -18,7 +18,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/isadep.h> #include <asm/io.h> #include <asm/bootinfo.h> diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 69ebd586d7ff..c14f6dfed995 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -22,7 +22,6 @@ #include <asm/branch.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/highmem.h> /* For VMALLOC_END */ diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 36272f7d3744..cc0b626858b3 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -22,7 +22,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/prefetch.h> -#include <asm/system.h> #include <asm/bootinfo.h> #include <asm/mipsregs.h> #include <asm/mmu_context.h> diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c index a6bd11fba7bf..1eb708ef75ff 100644 --- a/arch/mips/mm/sc-ip22.c +++ b/arch/mips/mm/sc-ip22.c @@ -12,7 +12,6 @@ #include <asm/bcache.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/bootinfo.h> #include <asm/sgi/ip22.h> #include <asm/sgi/mc.h> diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 9cca8de00545..93d937b4b1ba 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -11,7 +11,6 @@ #include <asm/cacheops.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/r4kcache.h> diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c index ae1e533a096e..8d90ff25b123 100644 --- a/arch/mips/mm/sc-r5k.c +++ b/arch/mips/mm/sc-r5k.c @@ -12,7 +12,6 @@ #include <asm/cacheops.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/r4kcache.h> diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index ed1fa460f84e..a63d1ed0827f 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -19,7 +19,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/tlbmisc.h> #include <asm/isadep.h> #include <asm/io.h> diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 2dc625346c40..d2572cb232db 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -18,7 +18,6 @@ #include <asm/bootinfo.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/tlbmisc.h> extern void build_tlb_refill_handler(void); diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 3d95f76c106b..91c2499f806a 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -17,7 +17,6 @@ #include <asm/bootinfo.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> -#include <asm/system.h> extern void build_tlb_refill_handler(void); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index e06370f58ef3..0bc485b3cd60 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -32,6 +32,7 @@ #include <asm/pgtable.h> #include <asm/war.h> #include <asm/uasm.h> +#include <asm/setup.h> /* * TLB load/store/modify handlers. diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c index 4b988b9a30d5..27a6cdb36e37 100644 --- a/arch/mips/mti-malta/malta-init.c +++ b/arch/mips/mti-malta/malta-init.c @@ -26,7 +26,6 @@ #include <asm/bootinfo.h> #include <asm/gt64120.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/cacheflush.h> #include <asm/smp-ops.h> #include <asm/traps.h> diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index a588b5cef8d2..7b13a4caeea4 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -44,6 +44,7 @@ #include <asm/msc01_ic.h> #include <asm/gic.h> #include <asm/gcmpregs.h> +#include <asm/setup.h> int gcmp_present = -1; int gic_present; diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index f8ee945ee411..115f5bc06003 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -35,6 +35,7 @@ #include <asm/irq.h> #include <asm/div64.h> #include <asm/cpu.h> +#include <asm/setup.h> #include <asm/time.h> #include <asm/mc146818-time.h> #include <asm/msc01_ic.h> diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c index 49a4f6cf71e5..e52bfcbce093 100644 --- a/arch/mips/netlogic/common/irq.c +++ b/arch/mips/netlogic/common/irq.c @@ -43,7 +43,6 @@ #include <asm/errno.h> #include <asm/signal.h> -#include <asm/system.h> #include <asm/ptrace.h> #include <asm/mipsregs.h> #include <asm/thread_info.h> diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c index c4fa2d775d8b..2e6f7cab24c1 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c @@ -16,7 +16,6 @@ #include <linux/irq.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <msp_cic_int.h> #include <msp_regs.h> diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c index 98fd0099d964..598b6a66b970 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c @@ -16,7 +16,6 @@ #include <linux/bitops.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <msp_cic_int.h> #include <msp_regs.h> diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c index 5bbcc47da6b9..83a1c5eae3f8 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c @@ -16,7 +16,6 @@ #include <linux/bitops.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <msp_slp_int.h> #include <msp_regs.h> diff --git a/arch/mips/pmc-sierra/yosemite/irq.c b/arch/mips/pmc-sierra/yosemite/irq.c index 25bbbf428be9..6590812daa56 100644 --- a/arch/mips/pmc-sierra/yosemite/irq.c +++ b/arch/mips/pmc-sierra/yosemite/irq.c @@ -44,7 +44,6 @@ #include <asm/irq.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <asm/titan_dep.h> /* Hypertransport specific */ diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c index dcc926e06fce..6a2754c4f106 100644 --- a/arch/mips/pmc-sierra/yosemite/prom.c +++ b/arch/mips/pmc-sierra/yosemite/prom.c @@ -20,7 +20,6 @@ #include <asm/processor.h> #include <asm/reboot.h> #include <asm/smp-ops.h> -#include <asm/system.h> #include <asm/bootinfo.h> #include <asm/pmon.h> diff --git a/arch/mips/pnx833x/common/interrupts.c b/arch/mips/pnx833x/common/interrupts.c index adc171c8846f..a86d5d5fceb0 100644 --- a/arch/mips/pnx833x/common/interrupts.c +++ b/arch/mips/pnx833x/common/interrupts.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <asm/mipsregs.h> #include <asm/irq_cpu.h> +#include <asm/setup.h> #include <irq.h> #include <irq-mapping.h> #include <gpio.h> diff --git a/arch/mips/powertv/asic/asic_int.c b/arch/mips/powertv/asic/asic_int.c index 529c44a52d64..99d82e10000b 100644 --- a/arch/mips/powertv/asic/asic_int.c +++ b/arch/mips/powertv/asic/asic_int.c @@ -34,6 +34,7 @@ #include <asm/irq_cpu.h> #include <linux/io.h> #include <asm/irq_regs.h> +#include <asm/setup.h> #include <asm/mips-boards/generic.h> #include <asm/mach-powertv/asic_regs.h> diff --git a/arch/mips/powertv/asic/irq_asic.c b/arch/mips/powertv/asic/irq_asic.c index 7fb97fb0931e..fa9ae9584710 100644 --- a/arch/mips/powertv/asic/irq_asic.c +++ b/arch/mips/powertv/asic/irq_asic.c @@ -17,7 +17,6 @@ #include <asm/irq_cpu.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <asm/mach-powertv/asic_regs.h> diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c index 83552288e802..1cf5abbef715 100644 --- a/arch/mips/powertv/init.c +++ b/arch/mips/powertv/init.c @@ -26,7 +26,6 @@ #include <asm/bootinfo.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/cacheflush.h> #include <asm/traps.h> diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c index 7c6db74e3fad..f298430cff07 100644 --- a/arch/mips/rb532/irq.c +++ b/arch/mips/rb532/irq.c @@ -42,7 +42,6 @@ #include <asm/bootinfo.h> #include <asm/time.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <asm/mach-rc32434/irq.h> #include <asm/mach-rc32434/gpio.h> diff --git a/arch/mips/sgi-ip22/ip22-berr.c b/arch/mips/sgi-ip22/ip22-berr.c index 911d3999c0c7..3f6ccd53c15d 100644 --- a/arch/mips/sgi-ip22/ip22-berr.c +++ b/arch/mips/sgi-ip22/ip22-berr.c @@ -9,7 +9,6 @@ #include <linux/sched.h> #include <asm/addrspace.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/branch.h> #include <asm/irq_regs.h> diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c index 45b6694c2079..20363d29cb58 100644 --- a/arch/mips/sgi-ip22/ip22-reset.c +++ b/arch/mips/sgi-ip22/ip22-reset.c @@ -18,7 +18,6 @@ #include <asm/io.h> #include <asm/irq.h> -#include <asm/system.h> #include <asm/reboot.h> #include <asm/sgialib.h> #include <asm/sgi/ioc.h> diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c index 88c684e05a3d..0626555fd1a3 100644 --- a/arch/mips/sgi-ip22/ip28-berr.c +++ b/arch/mips/sgi-ip22/ip28-berr.c @@ -11,7 +11,6 @@ #include <linux/seq_file.h> #include <asm/addrspace.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/branch.h> #include <asm/irq_regs.h> diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 23642238c689..69a939ae65e4 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -27,7 +27,6 @@ #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/mipsregs.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/pci/bridge.h> diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c index c17076108d47..f347bc6b7954 100644 --- a/arch/mips/sgi-ip27/ip27-reset.c +++ b/arch/mips/sgi-ip27/ip27-reset.c @@ -19,7 +19,6 @@ #include <asm/io.h> #include <asm/irq.h> #include <asm/reboot.h> -#include <asm/system.h> #include <asm/sgialib.h> #include <asm/sn/addrs.h> #include <asm/sn/arch.h> diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c index a092860d5196..e7d5054de8c8 100644 --- a/arch/mips/sgi-ip32/ip32-irq.c +++ b/arch/mips/sgi-ip32/ip32-irq.c @@ -22,7 +22,6 @@ #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <asm/signal.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/ip32/crime.h> #include <asm/ip32/mace.h> diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index 9b95d80ebc6e..1f823da4c77b 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c @@ -20,7 +20,6 @@ #include <asm/addrspace.h> #include <asm/irq.h> #include <asm/reboot.h> -#include <asm/system.h> #include <asm/wbflush.h> #include <asm/ip32/mace.h> #include <asm/ip32/crime.h> diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c index 09740d60e187..215713e1f3c4 100644 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c @@ -27,7 +27,6 @@ #include <asm/errno.h> #include <asm/irq_regs.h> #include <asm/signal.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/sibyte/bcm1480_regs.h> diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c index 48853ab5bcf0..e8c4538c5f61 100644 --- a/arch/mips/sibyte/common/sb_tbprof.c +++ b/arch/mips/sibyte/common/sb_tbprof.c @@ -53,7 +53,6 @@ #define K_INT_PERF_CNT K_BCM1480_INT_PERF_CNT #endif -#include <asm/system.h> #include <asm/uaccess.h> #define SBPROF_TB_MAJOR 240 diff --git a/arch/mips/sibyte/sb1250/bus_watcher.c b/arch/mips/sibyte/sb1250/bus_watcher.c index 45274bd3cd8b..86e6e54dd15d 100644 --- a/arch/mips/sibyte/sb1250/bus_watcher.c +++ b/arch/mips/sibyte/sb1250/bus_watcher.c @@ -30,7 +30,6 @@ #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/proc_fs.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/sibyte/sb1250.h> diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index 76ee045e2ce4..340aaf626659 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c @@ -26,7 +26,6 @@ #include <asm/errno.h> #include <asm/signal.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/io.h> diff --git a/arch/mips/sni/reset.c b/arch/mips/sni/reset.c index 79f8d70f48c9..244f9427625b 100644 --- a/arch/mips/sni/reset.c +++ b/arch/mips/sni/reset.c @@ -5,7 +5,6 @@ */ #include <asm/io.h> #include <asm/reboot.h> -#include <asm/system.h> #include <asm/sni.h> /* diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c index fad2bef432cd..ae0e4ee6c617 100644 --- a/arch/mips/vr41xx/common/irq.c +++ b/arch/mips/vr41xx/common/irq.c @@ -22,7 +22,6 @@ #include <linux/irq.h> #include <asm/irq_cpu.h> -#include <asm/system.h> #include <asm/vr41xx/irq.h> typedef struct irq_cascade { diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c index 692b4e85b7fc..9fbf5f0d1faf 100644 --- a/arch/mips/vr41xx/common/pmu.c +++ b/arch/mips/vr41xx/common/pmu.c @@ -30,7 +30,6 @@ #include <asm/io.h> #include <asm/processor.h> #include <asm/reboot.h> -#include <asm/system.h> #define PMU_TYPE1_BASE 0x0b0000a0UL #define PMU_TYPE1_SIZE 0x0eUL diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index b9a8f8461262..975e1841ca64 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -12,112 +12,7 @@ #define _ASM_ATOMIC_H #include <asm/irqflags.h> - -#ifndef __ASSEMBLY__ - -#ifdef CONFIG_SMP -#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT -static inline -unsigned long __xchg(volatile unsigned long *m, unsigned long val) -{ - unsigned long status; - unsigned long oldval; - - asm volatile( - "1: mov %4,(_AAR,%3) \n" - " mov (_ADR,%3),%1 \n" - " mov %5,(_ADR,%3) \n" - " mov (_ADR,%3),%0 \n" /* flush */ - " mov (_ASR,%3),%0 \n" - " or %0,%0 \n" - " bne 1b \n" - : "=&r"(status), "=&r"(oldval), "=m"(*m) - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val) - : "memory", "cc"); - - return oldval; -} - -static inline unsigned long __cmpxchg(volatile unsigned long *m, - unsigned long old, unsigned long new) -{ - unsigned long status; - unsigned long oldval; - - asm volatile( - "1: mov %4,(_AAR,%3) \n" - " mov (_ADR,%3),%1 \n" - " cmp %5,%1 \n" - " bne 2f \n" - " mov %6,(_ADR,%3) \n" - "2: mov (_ADR,%3),%0 \n" /* flush */ - " mov (_ASR,%3),%0 \n" - " or %0,%0 \n" - " bne 1b \n" - : "=&r"(status), "=&r"(oldval), "=m"(*m) - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), - "r"(old), "r"(new) - : "memory", "cc"); - - return oldval; -} -#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ -#error "No SMP atomic operation support!" -#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ - -#else /* CONFIG_SMP */ - -/* - * Emulate xchg for non-SMP MN10300 - */ -struct __xchg_dummy { unsigned long a[100]; }; -#define __xg(x) ((struct __xchg_dummy *)(x)) - -static inline -unsigned long __xchg(volatile unsigned long *m, unsigned long val) -{ - unsigned long oldval; - unsigned long flags; - - flags = arch_local_cli_save(); - oldval = *m; - *m = val; - arch_local_irq_restore(flags); - return oldval; -} - -/* - * Emulate cmpxchg for non-SMP MN10300 - */ -static inline unsigned long __cmpxchg(volatile unsigned long *m, - unsigned long old, unsigned long new) -{ - unsigned long oldval; - unsigned long flags; - - flags = arch_local_cli_save(); - oldval = *m; - if (oldval == old) - *m = new; - arch_local_irq_restore(flags); - return oldval; -} - -#endif /* CONFIG_SMP */ - -#define xchg(ptr, v) \ - ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ - (unsigned long)(v))) - -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ - (unsigned long)(o), \ - (unsigned long)(n))) - -#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) - -#endif /* !__ASSEMBLY__ */ +#include <asm/cmpxchg.h> #ifndef CONFIG_SMP #include <asm-generic/atomic.h> @@ -269,6 +164,8 @@ static inline void atomic_dec(atomic_t *v) c; \ }) +#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) /** * atomic_clear_mask - Atomically clear bits in memory diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h new file mode 100644 index 000000000000..2bd97a5c8af7 --- /dev/null +++ b/arch/mn10300/include/asm/barrier.h @@ -0,0 +1,37 @@ +/* MN10300 memory barrier definitions + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_BARRIER_H +#define _ASM_BARRIER_H + +#define nop() asm volatile ("nop") + +#define mb() asm volatile ("": : :"memory") +#define rmb() mb() +#define wmb() asm volatile ("": : :"memory") + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define set_mb(var, value) do { xchg(&var, value); } while (0) +#else /* CONFIG_SMP */ +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define set_mb(var, value) do { var = value; mb(); } while (0) +#endif /* CONFIG_SMP */ + +#define set_wmb(var, value) do { var = value; wmb(); } while (0) + +#define read_barrier_depends() do {} while (0) +#define smp_read_barrier_depends() do {} while (0) + +#endif /* _ASM_BARRIER_H */ diff --git a/arch/mn10300/include/asm/cmpxchg.h b/arch/mn10300/include/asm/cmpxchg.h new file mode 100644 index 000000000000..97a4aaf387a6 --- /dev/null +++ b/arch/mn10300/include/asm/cmpxchg.h @@ -0,0 +1,115 @@ +/* MN10300 Atomic xchg/cmpxchg operations + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_CMPXCHG_H +#define _ASM_CMPXCHG_H + +#include <asm/irqflags.h> + +#ifdef CONFIG_SMP +#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long status; + unsigned long oldval; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " mov %5,(_ADR,%3) \n" + " mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(oldval), "=m"(*m) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val) + : "memory", "cc"); + + return oldval; +} + +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long status; + unsigned long oldval; + + asm volatile( + "1: mov %4,(_AAR,%3) \n" + " mov (_ADR,%3),%1 \n" + " cmp %5,%1 \n" + " bne 2f \n" + " mov %6,(_ADR,%3) \n" + "2: mov (_ADR,%3),%0 \n" /* flush */ + " mov (_ASR,%3),%0 \n" + " or %0,%0 \n" + " bne 1b \n" + : "=&r"(status), "=&r"(oldval), "=m"(*m) + : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), + "r"(old), "r"(new) + : "memory", "cc"); + + return oldval; +} +#else /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ +#error "No SMP atomic operation support!" +#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */ + +#else /* CONFIG_SMP */ + +/* + * Emulate xchg for non-SMP MN10300 + */ +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long oldval; + unsigned long flags; + + flags = arch_local_cli_save(); + oldval = *m; + *m = val; + arch_local_irq_restore(flags); + return oldval; +} + +/* + * Emulate cmpxchg for non-SMP MN10300 + */ +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long oldval; + unsigned long flags; + + flags = arch_local_cli_save(); + oldval = *m; + if (oldval == old) + *m = new; + arch_local_irq_restore(flags); + return oldval; +} + +#endif /* CONFIG_SMP */ + +#define xchg(ptr, v) \ + ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ + (unsigned long)(v))) + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n))) + +#endif /* _ASM_CMPXCHG_H */ diff --git a/arch/mn10300/include/asm/dma.h b/arch/mn10300/include/asm/dma.h index 098df2e617ab..10b77d4628c2 100644 --- a/arch/mn10300/include/asm/dma.h +++ b/arch/mn10300/include/asm/dma.h @@ -11,7 +11,6 @@ #ifndef _ASM_DMA_H #define _ASM_DMA_H -#include <asm/system.h> #include <linux/spinlock.h> #include <asm/io.h> #include <linux/delay.h> diff --git a/arch/mn10300/include/asm/exec.h b/arch/mn10300/include/asm/exec.h new file mode 100644 index 000000000000..c74e367f4b9d --- /dev/null +++ b/arch/mn10300/include/asm/exec.h @@ -0,0 +1,16 @@ +/* MN10300 process execution definitions + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_EXEC_H +#define _ASM_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _ASM_EXEC_H */ diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h new file mode 100644 index 000000000000..393d311735c8 --- /dev/null +++ b/arch/mn10300/include/asm/switch_to.h @@ -0,0 +1,49 @@ +/* MN10300 task switching definitions + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef _ASM_SWITCH_TO_H +#define _ASM_SWITCH_TO_H + +#include <asm/barrier.h> + +struct task_struct; +struct thread_struct; + +#if !defined(CONFIG_LAZY_SAVE_FPU) +struct fpu_state_struct; +extern asmlinkage void fpu_save(struct fpu_state_struct *); +#define switch_fpu(prev, next) \ + do { \ + if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \ + (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \ + (prev)->thread.uregs->epsw &= ~EPSW_FE; \ + fpu_save(&(prev)->thread.fpu_state); \ + } \ + } while (0) +#else +#define switch_fpu(prev, next) do {} while (0) +#endif + +/* context switching is now performed out-of-line in switch_to.S */ +extern asmlinkage +struct task_struct *__switch_to(struct thread_struct *prev, + struct thread_struct *next, + struct task_struct *prev_task); + +#define switch_to(prev, next, last) \ +do { \ + switch_fpu(prev, next); \ + current->thread.wchan = (u_long) __builtin_return_address(0); \ + (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ + mb(); \ + current->thread.wchan = 0; \ +} while (0) + +#endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h deleted file mode 100644 index 94b4c5e1491b..000000000000 --- a/arch/mn10300/include/asm/system.h +++ /dev/null @@ -1,102 +0,0 @@ -/* MN10300 System definitions - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#ifndef _ASM_SYSTEM_H -#define _ASM_SYSTEM_H - -#include <asm/cpu-regs.h> -#include <asm/intctl-regs.h> - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -#include <linux/kernel.h> -#include <linux/irqflags.h> -#include <linux/atomic.h> - -#if !defined(CONFIG_LAZY_SAVE_FPU) -struct fpu_state_struct; -extern asmlinkage void fpu_save(struct fpu_state_struct *); -#define switch_fpu(prev, next) \ - do { \ - if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \ - (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \ - (prev)->thread.uregs->epsw &= ~EPSW_FE; \ - fpu_save(&(prev)->thread.fpu_state); \ - } \ - } while (0) -#else -#define switch_fpu(prev, next) do {} while (0) -#endif - -struct task_struct; -struct thread_struct; - -extern asmlinkage -struct task_struct *__switch_to(struct thread_struct *prev, - struct thread_struct *next, - struct task_struct *prev_task); - -/* context switching is now performed out-of-line in switch_to.S */ -#define switch_to(prev, next, last) \ -do { \ - switch_fpu(prev, next); \ - current->thread.wchan = (u_long) __builtin_return_address(0); \ - (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ - mb(); \ - current->thread.wchan = 0; \ -} while (0) - -#define arch_align_stack(x) (x) - -#define nop() asm volatile ("nop") - -/* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. - * - * For now, "wmb()" doesn't actually do anything, as all - * Intel CPU's follow what Intel calls a *Processor Order*, - * in which all writes are seen in the program order even - * outside the CPU. - * - * I expect future Intel CPU's to have a weaker ordering, - * but I'd also expect them to finally get their act together - * and add some real memory barriers if so. - * - * Some non intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ - -#define mb() asm volatile ("": : :"memory") -#define rmb() mb() -#define wmb() asm volatile ("": : :"memory") - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define set_mb(var, value) do { xchg(&var, value); } while (0) -#else /* CONFIG_SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#endif /* CONFIG_SMP */ - -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#endif /* !__ASSEMBLY__ */ -#endif /* __KERNEL__ */ -#endif /* _ASM_SYSTEM_H */ diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S index 3e3620d9fc45..8e11f9f48999 100644 --- a/arch/mn10300/kernel/entry.S +++ b/arch/mn10300/kernel/entry.S @@ -15,7 +15,6 @@ #include <linux/sys.h> #include <linux/linkage.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/irqflags.h> #include <asm/thread_info.h> #include <asm/intctl-regs.h> diff --git a/arch/mn10300/kernel/fpu.c b/arch/mn10300/kernel/fpu.c index bb5fa7df6c44..064fa194de2b 100644 --- a/arch/mn10300/kernel/fpu.c +++ b/arch/mn10300/kernel/fpu.c @@ -12,7 +12,6 @@ #include <asm/fpu.h> #include <asm/elf.h> #include <asm/exceptions.h> -#include <asm/system.h> #ifdef CONFIG_LAZY_SAVE_FPU struct task_struct *fpu_state_owner; diff --git a/arch/mn10300/kernel/gdb-io-serial.c b/arch/mn10300/kernel/gdb-io-serial.c index f28dc99c6f72..df51242744cc 100644 --- a/arch/mn10300/kernel/gdb-io-serial.c +++ b/arch/mn10300/kernel/gdb-io-serial.c @@ -18,7 +18,6 @@ #include <linux/nmi.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/gdb-stub.h> #include <asm/exceptions.h> #include <asm/serial-regs.h> diff --git a/arch/mn10300/kernel/gdb-io-ttysm.c b/arch/mn10300/kernel/gdb-io-ttysm.c index c859cacbb9c3..caae8cac9db1 100644 --- a/arch/mn10300/kernel/gdb-io-ttysm.c +++ b/arch/mn10300/kernel/gdb-io-ttysm.c @@ -17,7 +17,6 @@ #include <linux/init.h> #include <linux/tty.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/gdb-stub.h> #include <asm/exceptions.h> #include <unit/clock.h> diff --git a/arch/mn10300/kernel/gdb-stub.c b/arch/mn10300/kernel/gdb-stub.c index 522eb8a9b60d..a128c57b586b 100644 --- a/arch/mn10300/kernel/gdb-stub.c +++ b/arch/mn10300/kernel/gdb-stub.c @@ -130,7 +130,6 @@ #include <linux/bug.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/gdb-stub.h> #include <asm/exceptions.h> #include <asm/debugger.h> diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c index 94901c56baf1..339cef4c8256 100644 --- a/arch/mn10300/kernel/mn10300-serial.c +++ b/arch/mn10300/kernel/mn10300-serial.c @@ -36,7 +36,6 @@ static const char serial_revdate[] = "2007-11-06"; #include <linux/console.h> #include <linux/sysrq.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/bitops.h> diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c index a45f0c7549a6..db64a7166c09 100644 --- a/arch/mn10300/kernel/mn10300-watchdog.c +++ b/arch/mn10300/kernel/mn10300-watchdog.c @@ -18,7 +18,6 @@ #include <linux/kernel_stat.h> #include <linux/nmi.h> #include <asm/processor.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/intctl-regs.h> #include <asm/rtc-regs.h> diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index cac401d37f75..14707f25153b 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -27,7 +27,6 @@ #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu_context.h> diff --git a/arch/mn10300/kernel/ptrace.c b/arch/mn10300/kernel/ptrace.c index 5c0b07e61006..5bd58514e739 100644 --- a/arch/mn10300/kernel/ptrace.c +++ b/arch/mn10300/kernel/ptrace.c @@ -21,7 +21,6 @@ #include <linux/tracehook.h> #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/fpu.h> diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c index 9e7a3209a3e1..33c3bd1e5c6d 100644 --- a/arch/mn10300/kernel/setup.c +++ b/arch/mn10300/kernel/setup.c @@ -26,7 +26,6 @@ #include <asm/processor.h> #include <linux/console.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/setup.h> #include <asm/io.h> #include <asm/smp.h> diff --git a/arch/mn10300/kernel/smp-low.S b/arch/mn10300/kernel/smp-low.S index 72938cefc05e..71f1b2faaa0b 100644 --- a/arch/mn10300/kernel/smp-low.S +++ b/arch/mn10300/kernel/smp-low.S @@ -13,9 +13,9 @@ #include <linux/sys.h> #include <linux/linkage.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/thread_info.h> #include <asm/cpu-regs.h> +#include <asm/intctl-regs.h> #include <proc/smp-regs.h> #include <asm/asm-offsets.h> #include <asm/frame.inc> diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 9242e9fcc564..910dddf65e44 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -25,7 +25,6 @@ #include <linux/profile.h> #include <linux/smp.h> #include <asm/tlbflush.h> -#include <asm/system.h> #include <asm/bitops.h> #include <asm/processor.h> #include <asm/bug.h> diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c index 9220a75a7b43..94a9c6d53e1b 100644 --- a/arch/mn10300/kernel/traps.c +++ b/arch/mn10300/kernel/traps.c @@ -27,7 +27,6 @@ #include <linux/bug.h> #include <linux/irq.h> #include <asm/processor.h> -#include <asm/system.h> #include <linux/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> diff --git a/arch/mn10300/lib/bitops.c b/arch/mn10300/lib/bitops.c index a66c6cdaf442..37309cdb7584 100644 --- a/arch/mn10300/lib/bitops.c +++ b/arch/mn10300/lib/bitops.c @@ -10,7 +10,6 @@ */ #include <linux/module.h> #include <asm/bitops.h> -#include <asm/system.h> /* * try flipping a bit using BSET and BCLR diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 0945409a8022..90f346f7392d 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -24,7 +24,6 @@ #include <linux/init.h> #include <linux/vt_kern.h> /* For unblank_screen() */ -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/hardirq.h> diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index 13801824e3ee..e57e5bc23562 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c @@ -29,7 +29,6 @@ #include <linux/gfp.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index f9bb8cb1c14a..b9920b1edd5a 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -23,7 +23,6 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c index 450f7ba3f8f2..4ebf117c3285 100644 --- a/arch/mn10300/mm/pgtable.c +++ b/arch/mn10300/mm/pgtable.c @@ -21,7 +21,6 @@ #include <linux/spinlock.h> #include <linux/quicklist.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c index 9a777498a916..3e57faf04083 100644 --- a/arch/mn10300/mm/tlb-smp.c +++ b/arch/mn10300/mm/tlb-smp.c @@ -24,7 +24,6 @@ #include <linux/profile.h> #include <linux/smp.h> #include <asm/tlbflush.h> -#include <asm/system.h> #include <asm/bitops.h> #include <asm/processor.h> #include <asm/bug.h> diff --git a/arch/mn10300/proc-mn2ws0050/proc-init.c b/arch/mn10300/proc-mn2ws0050/proc-init.c index fe6e24906ffc..ee6d03dbc8d8 100644 --- a/arch/mn10300/proc-mn2ws0050/proc-init.c +++ b/arch/mn10300/proc-mn2ws0050/proc-init.c @@ -15,7 +15,6 @@ #include <linux/interrupt.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 11162e6c878f..dcea5a0308ae 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -4,6 +4,7 @@ header-y += spr_defs.h generic-y += atomic.h generic-y += auxvec.h +generic-y += barrier.h generic-y += bitsperlong.h generic-y += bug.h generic-y += bugs.h @@ -19,6 +20,7 @@ generic-y += div64.h generic-y += dma.h generic-y += emergency-restart.h generic-y += errno.h +generic-y += exec.h generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h @@ -55,6 +57,7 @@ generic-y += sockios.h generic-y += statfs.h generic-y += stat.h generic-y += string.h +generic-y += switch_to.h generic-y += swab.h generic-y += termbits.h generic-y += termios.h diff --git a/arch/openrisc/include/asm/system.h b/arch/openrisc/include/asm/system.h deleted file mode 100644 index cf658882186b..000000000000 --- a/arch/openrisc/include/asm/system.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * OpenRISC Linux - * - * Linux architectural port borrowing liberally from similar works of - * others. All original copyrights apply as per the original source - * declaration. - * - * OpenRISC implementation: - * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> - * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> - * et al. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __ASM_OPENRISC_SYSTEM_H -#define __ASM_OPENRISC_SYSTEM_H - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -#include <asm/spr.h> -#include <asm-generic/system.h> - -/* We probably need this definition, but the generic system.h provides it - * and it's not used on our arch anyway... - */ -/*#define nop() __asm__ __volatile__ ("l.nop"::)*/ - -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ -#endif /* __ASM_OPENRISC_SYSTEM_H */ diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c index e5fc78877830..7d618feb1b72 100644 --- a/arch/openrisc/kernel/idle.c +++ b/arch/openrisc/kernel/idle.c @@ -31,7 +31,6 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index e4209af879ec..55210f37d1a3 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -38,7 +38,6 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/spr_defs.h> diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c index 3d4478f6c942..5869e3fa5dd3 100644 --- a/arch/openrisc/kernel/prom.c +++ b/arch/openrisc/kernel/prom.c @@ -42,7 +42,6 @@ #include <asm/processor.h> #include <asm/irq.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/mmu.h> #include <asm/pgtable.h> #include <asm/sections.h> diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c index 6deacb6b95a4..e71781d24b0e 100644 --- a/arch/openrisc/kernel/ptrace.c +++ b/arch/openrisc/kernel/ptrace.c @@ -33,7 +33,6 @@ #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> /* * Copy the thread state to a regset that can be interpreted by userspace. diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index bf5eba22ce9e..f4d5bedc3b4f 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -41,7 +41,6 @@ #include <linux/of_platform.h> #include <asm/segment.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/types.h> #include <asm/setup.h> diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c index a2ee12948f40..5cce396016d0 100644 --- a/arch/openrisc/kernel/traps.c +++ b/arch/openrisc/kernel/traps.c @@ -33,7 +33,6 @@ #include <linux/kallsyms.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/segment.h> #include <asm/io.h> #include <asm/pgtable.h> diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 736f6b2f30af..79dea9740a3c 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -33,7 +33,6 @@ #include <linux/pagemap.h> #include <linux/memblock.h> -#include <asm/system.h> #include <asm/segment.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c index 56b0b89624af..683bd4d31c7c 100644 --- a/arch/openrisc/mm/tlb.c +++ b/arch/openrisc/mm/tlb.c @@ -26,7 +26,6 @@ #include <linux/mm.h> #include <linux/init.h> -#include <asm/system.h> #include <asm/segment.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 4054b31e0fa9..3ae56073cc3d 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -6,7 +6,6 @@ #define _ASM_PARISC_ATOMIC_H_ #include <linux/types.h> -#include <asm/system.h> /* * Atomic operations that C can't guarantee us. Useful for diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h new file mode 100644 index 000000000000..e77d834aa803 --- /dev/null +++ b/arch/parisc/include/asm/barrier.h @@ -0,0 +1,35 @@ +#ifndef __PARISC_BARRIER_H +#define __PARISC_BARRIER_H + +/* +** This is simply the barrier() macro from linux/kernel.h but when serial.c +** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h +** hasn't yet been included yet so it fails, thus repeating the macro here. +** +** PA-RISC architecture allows for weakly ordered memory accesses although +** none of the processors use it. There is a strong ordered bit that is +** set in the O-bit of the page directory entry. Operating systems that +** can not tolerate out of order accesses should set this bit when mapping +** pages. The O-bit of the PSW should also be set to 1 (I don't believe any +** of the processor implemented the PSW O-bit). The PCX-W ERS states that +** the TLB O-bit is not implemented so the page directory does not need to +** have the O-bit set when mapping pages (section 3.1). This section also +** states that the PSW Y, Z, G, and O bits are not implemented. +** So it looks like nothing needs to be done for parisc-linux (yet). +** (thanks to chada for the above comment -ggg) +** +** The __asm__ op below simple prevents gcc/ld from reordering +** instructions across the mb() "call". +*/ +#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ +#define rmb() mb() +#define wmb() mb() +#define smp_mb() mb() +#define smp_rmb() mb() +#define smp_wmb() mb() +#define smp_read_barrier_depends() do { } while(0) +#define read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; mb(); } while (0) + +#endif /* __PARISC_BARRIER_H */ diff --git a/arch/parisc/include/asm/delay.h b/arch/parisc/include/asm/delay.h index 7a75e984674b..912ee7e6a579 100644 --- a/arch/parisc/include/asm/delay.h +++ b/arch/parisc/include/asm/delay.h @@ -1,7 +1,7 @@ #ifndef _PARISC_DELAY_H #define _PARISC_DELAY_H -#include <asm/system.h> /* for mfctl() */ +#include <asm/special_insns.h> /* for mfctl() */ #include <asm/processor.h> /* for boot_cpu_data */ diff --git a/arch/parisc/include/asm/dma.h b/arch/parisc/include/asm/dma.h index f7a18f968703..fd48ae2de950 100644 --- a/arch/parisc/include/asm/dma.h +++ b/arch/parisc/include/asm/dma.h @@ -9,7 +9,6 @@ #define _ASM_DMA_H #include <asm/io.h> /* need byte IO */ -#include <asm/system.h> #define dma_outb outb #define dma_inb inb diff --git a/arch/parisc/include/asm/exec.h b/arch/parisc/include/asm/exec.h new file mode 100644 index 000000000000..6bb5af75b17a --- /dev/null +++ b/arch/parisc/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef __PARISC_EXEC_H +#define __PARISC_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __PARISC_EXEC_H */ diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h new file mode 100644 index 000000000000..d2d11b7055ba --- /dev/null +++ b/arch/parisc/include/asm/ldcw.h @@ -0,0 +1,48 @@ +#ifndef __PARISC_LDCW_H +#define __PARISC_LDCW_H + +#ifndef CONFIG_PA20 +/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, + and GCC only guarantees 8-byte alignment for stack locals, we can't + be assured of 16-byte alignment for atomic lock data even if we + specify "__attribute ((aligned(16)))" in the type declaration. So, + we use a struct containing an array of four ints for the atomic lock + type and dynamically select the 16-byte aligned int from the array + for the semaphore. */ + +#define __PA_LDCW_ALIGNMENT 16 +#define __ldcw_align(a) ({ \ + unsigned long __ret = (unsigned long) &(a)->lock[0]; \ + __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ + & ~(__PA_LDCW_ALIGNMENT - 1); \ + (volatile unsigned int *) __ret; \ +}) +#define __LDCW "ldcw" + +#else /*CONFIG_PA20*/ +/* From: "Jim Hull" <jim.hull of hp.com> + I've attached a summary of the change, but basically, for PA 2.0, as + long as the ",CO" (coherent operation) completer is specified, then the + 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead + they only require "natural" alignment (4-byte for ldcw, 8-byte for + ldcd). */ + +#define __PA_LDCW_ALIGNMENT 4 +#define __ldcw_align(a) (&(a)->slock) +#define __LDCW "ldcw,co" + +#endif /*!CONFIG_PA20*/ + +/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ +#define __ldcw(a) ({ \ + unsigned __ret; \ + __asm__ __volatile__(__LDCW " 0(%2),%0" \ + : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ + __ret; \ +}) + +#ifdef CONFIG_SMP +# define __lock_aligned __attribute__((__section__(".data..lock_aligned"))) +#endif + +#endif /* __PARISC_LDCW_H */ diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 7213ec9e594c..acdf4cad6125 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -16,7 +16,6 @@ #include <asm/pdc.h> #include <asm/ptrace.h> #include <asm/types.h> -#include <asm/system.h> #include <asm/percpu.h> #endif /* __ASSEMBLY__ */ @@ -169,6 +168,7 @@ struct thread_struct { * Return saved PC of a blocked thread. This is used by ps mostly. */ +struct task_struct; unsigned long thread_saved_pc(struct task_struct *t); void show_trace(struct task_struct *task, unsigned long *stack); diff --git a/arch/parisc/include/asm/psw.h b/arch/parisc/include/asm/psw.h index 5a3e23c9ce63..ad69a35e9c0f 100644 --- a/arch/parisc/include/asm/psw.h +++ b/arch/parisc/include/asm/psw.h @@ -59,4 +59,45 @@ #define USER_PSW_MASK (WIDE_PSW | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB) #define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I) +#ifndef __ASSEMBLY__ + +/* The program status word as bitfields. */ +struct pa_psw { + unsigned int y:1; + unsigned int z:1; + unsigned int rv:2; + unsigned int w:1; + unsigned int e:1; + unsigned int s:1; + unsigned int t:1; + + unsigned int h:1; + unsigned int l:1; + unsigned int n:1; + unsigned int x:1; + unsigned int b:1; + unsigned int c:1; + unsigned int v:1; + unsigned int m:1; + + unsigned int cb:8; + + unsigned int o:1; + unsigned int g:1; + unsigned int f:1; + unsigned int r:1; + unsigned int q:1; + unsigned int p:1; + unsigned int d:1; + unsigned int i:1; +}; + +#ifdef CONFIG_64BIT +#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4)) +#else +#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) +#endif + +#endif /* !__ASSEMBLY__ */ + #endif diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h new file mode 100644 index 000000000000..d306b75bc77f --- /dev/null +++ b/arch/parisc/include/asm/special_insns.h @@ -0,0 +1,40 @@ +#ifndef __PARISC_SPECIAL_INSNS_H +#define __PARISC_SPECIAL_INSNS_H + +#define mfctl(reg) ({ \ + unsigned long cr; \ + __asm__ __volatile__( \ + "mfctl " #reg ",%0" : \ + "=r" (cr) \ + ); \ + cr; \ +}) + +#define mtctl(gr, cr) \ + __asm__ __volatile__("mtctl %0,%1" \ + : /* no outputs */ \ + : "r" (gr), "i" (cr) : "memory") + +/* these are here to de-mystefy the calling code, and to provide hooks */ +/* which I needed for debugging EIEM problems -PB */ +#define get_eiem() mfctl(15) +static inline void set_eiem(unsigned long val) +{ + mtctl(val, 15); +} + +#define mfsp(reg) ({ \ + unsigned long cr; \ + __asm__ __volatile__( \ + "mfsp " #reg ",%0" : \ + "=r" (cr) \ + ); \ + cr; \ +}) + +#define mtsp(gr, cr) \ + __asm__ __volatile__("mtsp %0,%1" \ + : /* no outputs */ \ + : "r" (gr), "i" (cr) : "memory") + +#endif /* __PARISC_SPECIAL_INSNS_H */ diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 74036f436a3b..804aa28ab1d6 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -1,7 +1,6 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include <asm/system.h> #include <asm/processor.h> #include <asm/spinlock_types.h> diff --git a/arch/parisc/include/asm/switch_to.h b/arch/parisc/include/asm/switch_to.h new file mode 100644 index 000000000000..8ed8fea1e784 --- /dev/null +++ b/arch/parisc/include/asm/switch_to.h @@ -0,0 +1,12 @@ +#ifndef __PARISC_SWITCH_TO_H +#define __PARISC_SWITCH_TO_H + +struct task_struct; + +extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *); + +#define switch_to(prev, next, last) do { \ + (last) = _switch_to(prev, next); \ +} while(0) + +#endif /* __PARISC_SWITCH_TO_H */ diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h deleted file mode 100644 index b19e63a8e848..000000000000 --- a/arch/parisc/include/asm/system.h +++ /dev/null @@ -1,165 +0,0 @@ -#ifndef __PARISC_SYSTEM_H -#define __PARISC_SYSTEM_H - -#include <linux/irqflags.h> - -/* The program status word as bitfields. */ -struct pa_psw { - unsigned int y:1; - unsigned int z:1; - unsigned int rv:2; - unsigned int w:1; - unsigned int e:1; - unsigned int s:1; - unsigned int t:1; - - unsigned int h:1; - unsigned int l:1; - unsigned int n:1; - unsigned int x:1; - unsigned int b:1; - unsigned int c:1; - unsigned int v:1; - unsigned int m:1; - - unsigned int cb:8; - - unsigned int o:1; - unsigned int g:1; - unsigned int f:1; - unsigned int r:1; - unsigned int q:1; - unsigned int p:1; - unsigned int d:1; - unsigned int i:1; -}; - -#ifdef CONFIG_64BIT -#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4)) -#else -#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) -#endif - -struct task_struct; - -extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *); - -#define switch_to(prev, next, last) do { \ - (last) = _switch_to(prev, next); \ -} while(0) - -#define mfctl(reg) ({ \ - unsigned long cr; \ - __asm__ __volatile__( \ - "mfctl " #reg ",%0" : \ - "=r" (cr) \ - ); \ - cr; \ -}) - -#define mtctl(gr, cr) \ - __asm__ __volatile__("mtctl %0,%1" \ - : /* no outputs */ \ - : "r" (gr), "i" (cr) : "memory") - -/* these are here to de-mystefy the calling code, and to provide hooks */ -/* which I needed for debugging EIEM problems -PB */ -#define get_eiem() mfctl(15) -static inline void set_eiem(unsigned long val) -{ - mtctl(val, 15); -} - -#define mfsp(reg) ({ \ - unsigned long cr; \ - __asm__ __volatile__( \ - "mfsp " #reg ",%0" : \ - "=r" (cr) \ - ); \ - cr; \ -}) - -#define mtsp(gr, cr) \ - __asm__ __volatile__("mtsp %0,%1" \ - : /* no outputs */ \ - : "r" (gr), "i" (cr) : "memory") - - -/* -** This is simply the barrier() macro from linux/kernel.h but when serial.c -** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h -** hasn't yet been included yet so it fails, thus repeating the macro here. -** -** PA-RISC architecture allows for weakly ordered memory accesses although -** none of the processors use it. There is a strong ordered bit that is -** set in the O-bit of the page directory entry. Operating systems that -** can not tolerate out of order accesses should set this bit when mapping -** pages. The O-bit of the PSW should also be set to 1 (I don't believe any -** of the processor implemented the PSW O-bit). The PCX-W ERS states that -** the TLB O-bit is not implemented so the page directory does not need to -** have the O-bit set when mapping pages (section 3.1). This section also -** states that the PSW Y, Z, G, and O bits are not implemented. -** So it looks like nothing needs to be done for parisc-linux (yet). -** (thanks to chada for the above comment -ggg) -** -** The __asm__ op below simple prevents gcc/ld from reordering -** instructions across the mb() "call". -*/ -#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ -#define rmb() mb() -#define wmb() mb() -#define smp_mb() mb() -#define smp_rmb() mb() -#define smp_wmb() mb() -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#ifndef CONFIG_PA20 -/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, - and GCC only guarantees 8-byte alignment for stack locals, we can't - be assured of 16-byte alignment for atomic lock data even if we - specify "__attribute ((aligned(16)))" in the type declaration. So, - we use a struct containing an array of four ints for the atomic lock - type and dynamically select the 16-byte aligned int from the array - for the semaphore. */ - -#define __PA_LDCW_ALIGNMENT 16 -#define __ldcw_align(a) ({ \ - unsigned long __ret = (unsigned long) &(a)->lock[0]; \ - __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ - & ~(__PA_LDCW_ALIGNMENT - 1); \ - (volatile unsigned int *) __ret; \ -}) -#define __LDCW "ldcw" - -#else /*CONFIG_PA20*/ -/* From: "Jim Hull" <jim.hull of hp.com> - I've attached a summary of the change, but basically, for PA 2.0, as - long as the ",CO" (coherent operation) completer is specified, then the - 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead - they only require "natural" alignment (4-byte for ldcw, 8-byte for - ldcd). */ - -#define __PA_LDCW_ALIGNMENT 4 -#define __ldcw_align(a) (&(a)->slock) -#define __LDCW "ldcw,co" - -#endif /*!CONFIG_PA20*/ - -/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ -#define __ldcw(a) ({ \ - unsigned __ret; \ - __asm__ __volatile__(__LDCW " 0(%2),%0" \ - : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ - __ret; \ -}) - -#ifdef CONFIG_SMP -# define __lock_aligned __attribute__((__section__(".data..lock_aligned"))) -#endif - -#define arch_align_stack(x) (x) - -#endif diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 6d9c7c7973d0..83ae7dd4d99e 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -5,6 +5,7 @@ #ifndef __ASSEMBLY__ #include <asm/processor.h> +#include <asm/special_insns.h> struct thread_info { struct task_struct *task; /* main task structure */ diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h index 3b68d77273d9..2bd51f6d832b 100644 --- a/arch/parisc/include/asm/timex.h +++ b/arch/parisc/include/asm/timex.h @@ -6,7 +6,6 @@ #ifndef _ASMPARISC_TIMEX_H #define _ASMPARISC_TIMEX_H -#include <asm/system.h> #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index ff4cf9dab8d2..9ac066086f03 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -5,7 +5,6 @@ * User space memory access functions */ #include <asm/page.h> -#include <asm/system.h> #include <asm/cache.h> #include <asm/errno.h> #include <asm-generic/uaccess-unaligned.h> diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 83335f3da5fc..9d181890a7e3 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -22,7 +22,6 @@ #include <asm/cache.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/processor.h> diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 4896ed090585..f65fa480c905 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -67,7 +67,6 @@ #include <asm/page.h> #include <asm/pdc.h> #include <asm/pdcpat.h> -#include <asm/system.h> #include <asm/processor.h> /* for boot_cpu_data */ static DEFINE_SPINLOCK(pdc_lock); diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 74d544b1cd22..24644aca10cb 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -16,7 +16,6 @@ #include <linux/types.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/superio.h> #define DEBUG_RESOURCES 0 diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2905b1f52d30..857c2f545470 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -22,7 +22,6 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/asm-offsets.h> diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 32d588488f04..5006e8ea3051 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -32,7 +32,6 @@ #include <linux/bitops.h> #include <linux/ftrace.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/current.h> #include <asm/delay.h> diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index f19e6604026a..45ba99f5080b 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -27,7 +27,6 @@ #include <linux/bug.h> #include <asm/assembly.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/irq.h> diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index a8bffd8af77d..187118841af1 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -8,7 +8,6 @@ #include <linux/kernel.h> #include <linux/spinlock.h> -#include <asm/system.h> #include <linux/atomic.h> #ifdef CONFIG_SMP diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore index 12da77ec0228..1c1aadc8c48f 100644 --- a/arch/powerpc/boot/.gitignore +++ b/arch/powerpc/boot/.gitignore @@ -27,7 +27,6 @@ zImage.bin.* zImage.chrp zImage.coff zImage.holly -zImage.iseries zImage.*lds zImage.miboot zImage.pmac diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 14174e838ad9..da29032ae38f 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -5,13 +5,9 @@ * PowerPC atomic operations */ -#include <linux/types.h> - #ifdef __KERNEL__ -#include <linux/compiler.h> -#include <asm/synch.h> -#include <asm/asm-compat.h> -#include <asm/system.h> +#include <linux/types.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/powerpc/include/asm/auxvec.h b/arch/powerpc/include/asm/auxvec.h index 19a099b62cd6..ce17d2c9eb4e 100644 --- a/arch/powerpc/include/asm/auxvec.h +++ b/arch/powerpc/include/asm/auxvec.h @@ -16,4 +16,6 @@ */ #define AT_SYSINFO_EHDR 33 +#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ + #endif diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h new file mode 100644 index 000000000000..ae782254e731 --- /dev/null +++ b/arch/powerpc/include/asm/barrier.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> + */ +#ifndef _ASM_POWERPC_BARRIER_H +#define _ASM_POWERPC_BARRIER_H + +/* + * Memory barrier. + * The sync instruction guarantees that all memory accesses initiated + * by this processor have been performed (with respect to all other + * mechanisms that access memory). The eieio instruction is a barrier + * providing an ordering (separately) for (a) cacheable stores and (b) + * loads and stores to non-cacheable memory (e.g. I/O devices). + * + * mb() prevents loads and stores being reordered across this point. + * rmb() prevents loads being reordered across this point. + * wmb() prevents stores being reordered across this point. + * read_barrier_depends() prevents data-dependent loads being reordered + * across this point (nop on PPC). + * + * *mb() variants without smp_ prefix must order all types of memory + * operations with one another. sync is the only instruction sufficient + * to do this. + * + * For the smp_ barriers, ordering is for cacheable memory operations + * only. We have to use the sync instruction for smp_mb(), since lwsync + * doesn't order loads with respect to previous stores. Lwsync can be + * used for smp_rmb() and smp_wmb(). + * + * However, on CPUs that don't support lwsync, lwsync actually maps to a + * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. + */ +#define mb() __asm__ __volatile__ ("sync" : : : "memory") +#define rmb() __asm__ __volatile__ ("sync" : : : "memory") +#define wmb() __asm__ __volatile__ ("sync" : : : "memory") +#define read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; mb(); } while (0) + +#ifdef CONFIG_SMP + +#ifdef __SUBARCH_HAS_LWSYNC +# define SMPWMB LWSYNC +#else +# define SMPWMB eieio +#endif + +#define smp_mb() mb() +#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) +#endif /* CONFIG_SMP */ + +/* + * This is a barrier which prevents following instructions from being + * started until the value of the argument x is known. For example, if + * x is a variable loaded from memory, this prevents following + * instructions from being executed until the load has been performed. + */ +#define data_barrier(x) \ + asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); + +#endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 065c590c991d..3eb53d741070 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -126,5 +126,16 @@ #include <asm-generic/bug.h> +#ifndef __ASSEMBLY__ + +struct pt_regs; +extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); +extern void bad_page_fault(struct pt_regs *, unsigned long, int); +extern void _exception(int, struct pt_regs *, int, unsigned long); +extern void die(const char *, struct pt_regs *, long); +extern void print_backtrace(unsigned long *); + +#endif /* !__ASSEMBLY__ */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_BUG_H */ diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 4b509411ad8a..9e495c9a6a88 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -42,8 +42,24 @@ extern struct ppc64_caches ppc64_caches; #endif /* __powerpc64__ && ! __ASSEMBLY__ */ #if !defined(__ASSEMBLY__) + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) + +#ifdef CONFIG_6xx +extern long _get_L2CR(void); +extern long _get_L3CR(void); +extern void _set_L2CR(unsigned long); +extern void _set_L3CR(unsigned long); +#else +#define _get_L2CR() 0L +#define _get_L3CR() 0L +#define _set_L2CR(val) do { } while(0) +#define _set_L3CR(val) do { } while(0) #endif +extern void cacheable_memzero(void *p, unsigned int nb); +extern void *cacheable_memcpy(void *, const void *, unsigned int); + +#endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_CACHE_H */ diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h new file mode 100644 index 000000000000..e245aab7f191 --- /dev/null +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -0,0 +1,309 @@ +#ifndef _ASM_POWERPC_CMPXCHG_H_ +#define _ASM_POWERPC_CMPXCHG_H_ + +#ifdef __KERNEL__ +#include <linux/compiler.h> +#include <asm/synch.h> +#include <asm/asm-compat.h> + +/* + * Atomic exchange + * + * Changes the memory location '*ptr' to be val and returns + * the previous value stored there. + */ +static __always_inline unsigned long +__xchg_u32(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( + PPC_RELEASE_BARRIER +"1: lwarx %0,0,%2 \n" + PPC405_ERR77(0,%2) +" stwcx. %3,0,%2 \n\ + bne- 1b" + PPC_ACQUIRE_BARRIER + : "=&r" (prev), "+m" (*(volatile unsigned int *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} + +/* + * Atomic exchange + * + * Changes the memory location '*ptr' to be val and returns + * the previous value stored there. + */ +static __always_inline unsigned long +__xchg_u32_local(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( +"1: lwarx %0,0,%2 \n" + PPC405_ERR77(0,%2) +" stwcx. %3,0,%2 \n\ + bne- 1b" + : "=&r" (prev), "+m" (*(volatile unsigned int *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} + +#ifdef CONFIG_PPC64 +static __always_inline unsigned long +__xchg_u64(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( + PPC_RELEASE_BARRIER +"1: ldarx %0,0,%2 \n" + PPC405_ERR77(0,%2) +" stdcx. %3,0,%2 \n\ + bne- 1b" + PPC_ACQUIRE_BARRIER + : "=&r" (prev), "+m" (*(volatile unsigned long *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__xchg_u64_local(volatile void *p, unsigned long val) +{ + unsigned long prev; + + __asm__ __volatile__( +"1: ldarx %0,0,%2 \n" + PPC405_ERR77(0,%2) +" stdcx. %3,0,%2 \n\ + bne- 1b" + : "=&r" (prev), "+m" (*(volatile unsigned long *)p) + : "r" (p), "r" (val) + : "cc", "memory"); + + return prev; +} +#endif + +/* + * This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +__xchg(volatile void *ptr, unsigned long x, unsigned int size) +{ + switch (size) { + case 4: + return __xchg_u32(ptr, x); +#ifdef CONFIG_PPC64 + case 8: + return __xchg_u64(ptr, x); +#endif + } + __xchg_called_with_bad_pointer(); + return x; +} + +static __always_inline unsigned long +__xchg_local(volatile void *ptr, unsigned long x, unsigned int size) +{ + switch (size) { + case 4: + return __xchg_u32_local(ptr, x); +#ifdef CONFIG_PPC64 + case 8: + return __xchg_u64_local(ptr, x); +#endif + } + __xchg_called_with_bad_pointer(); + return x; +} +#define xchg(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ + }) + +#define xchg_local(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg_local((ptr), \ + (unsigned long)_x_, sizeof(*(ptr))); \ + }) + +/* + * Compare and exchange - if *p == old, set it to new, + * and return the old value of *p. + */ +#define __HAVE_ARCH_CMPXCHG 1 + +static __always_inline unsigned long +__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) +{ + unsigned int prev; + + __asm__ __volatile__ ( + PPC_RELEASE_BARRIER +"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ + cmpw 0,%0,%3\n\ + bne- 2f\n" + PPC405_ERR77(0,%2) +" stwcx. %4,0,%2\n\ + bne- 1b" + PPC_ACQUIRE_BARRIER + "\n\ +2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, + unsigned long new) +{ + unsigned int prev; + + __asm__ __volatile__ ( +"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ + cmpw 0,%0,%3\n\ + bne- 2f\n" + PPC405_ERR77(0,%2) +" stwcx. %4,0,%2\n\ + bne- 1b" + "\n\ +2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +#ifdef CONFIG_PPC64 +static __always_inline unsigned long +__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( + PPC_RELEASE_BARRIER +"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ + cmpd 0,%0,%3\n\ + bne- 2f\n\ + stdcx. %4,0,%2\n\ + bne- 1b" + PPC_ACQUIRE_BARRIER + "\n\ +2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, + unsigned long new) +{ + unsigned long prev; + + __asm__ __volatile__ ( +"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ + cmpd 0,%0,%3\n\ + bne- 2f\n\ + stdcx. %4,0,%2\n\ + bne- 1b" + "\n\ +2:" + : "=&r" (prev), "+m" (*p) + : "r" (p), "r" (old), "r" (new) + : "cc", "memory"); + + return prev; +} +#endif + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, + unsigned int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); +#ifdef CONFIG_PPC64 + case 8: + return __cmpxchg_u64(ptr, old, new); +#endif + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +static __always_inline unsigned long +__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, + unsigned int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32_local(ptr, old, new); +#ifdef CONFIG_PPC64 + case 8: + return __cmpxchg_u64_local(ptr, old, new); +#endif + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + + +#define cmpxchg_local(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + +#ifdef CONFIG_PPC64 +#define cmpxchg64(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg((ptr), (o), (n)); \ + }) +#define cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ + }) +#else +#include <asm-generic/cmpxchg-local.h> +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_CMPXCHG_H_ */ diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h new file mode 100644 index 000000000000..716d2f089eb6 --- /dev/null +++ b/arch/powerpc/include/asm/debug.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> + */ +#ifndef _ASM_POWERPC_DEBUG_H +#define _ASM_POWERPC_DEBUG_H + +struct pt_regs; + +extern struct dentry *powerpc_debugfs_root; + +#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) + +extern int (*__debugger)(struct pt_regs *regs); +extern int (*__debugger_ipi)(struct pt_regs *regs); +extern int (*__debugger_bpt)(struct pt_regs *regs); +extern int (*__debugger_sstep)(struct pt_regs *regs); +extern int (*__debugger_iabr_match)(struct pt_regs *regs); +extern int (*__debugger_dabr_match)(struct pt_regs *regs); +extern int (*__debugger_fault_handler)(struct pt_regs *regs); + +#define DEBUGGER_BOILERPLATE(__NAME) \ +static inline int __NAME(struct pt_regs *regs) \ +{ \ + if (unlikely(__ ## __NAME)) \ + return __ ## __NAME(regs); \ + return 0; \ +} + +DEBUGGER_BOILERPLATE(debugger) +DEBUGGER_BOILERPLATE(debugger_ipi) +DEBUGGER_BOILERPLATE(debugger_bpt) +DEBUGGER_BOILERPLATE(debugger_sstep) +DEBUGGER_BOILERPLATE(debugger_iabr_match) +DEBUGGER_BOILERPLATE(debugger_dabr_match) +DEBUGGER_BOILERPLATE(debugger_fault_handler) + +#else +static inline int debugger(struct pt_regs *regs) { return 0; } +static inline int debugger_ipi(struct pt_regs *regs) { return 0; } +static inline int debugger_bpt(struct pt_regs *regs) { return 0; } +static inline int debugger_sstep(struct pt_regs *regs) { return 0; } +static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } +static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } +static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } +#endif + +extern int set_dabr(unsigned long dabr); +#ifdef CONFIG_PPC_ADV_DEBUG_REGS +extern void do_send_trap(struct pt_regs *regs, unsigned long address, + unsigned long error_code, int signal_code, int brkpt); +#else +extern void do_dabr(struct pt_regs *regs, unsigned long address, + unsigned long error_code); +#endif + +#endif /* _ASM_POWERPC_DEBUG_H */ diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h index adadb9943610..f6813e919bb2 100644 --- a/arch/powerpc/include/asm/dma.h +++ b/arch/powerpc/include/asm/dma.h @@ -24,7 +24,6 @@ #include <asm/io.h> #include <linux/spinlock.h> -#include <asm/system.h> #ifndef MAX_DMA_CHANNELS #define MAX_DMA_CHANNELS 8 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h new file mode 100644 index 000000000000..8196e9c7d7e8 --- /dev/null +++ b/arch/powerpc/include/asm/exec.h @@ -0,0 +1,9 @@ +/* + * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> + */ +#ifndef _ASM_POWERPC_EXEC_H +#define _ASM_POWERPC_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* _ASM_POWERPC_EXEC_H */ diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 80fd4d2b4a62..be04330af751 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -35,7 +35,7 @@ struct arch_hw_breakpoint { #include <linux/kdebug.h> #include <asm/reg.h> -#include <asm/system.h> +#include <asm/debug.h> struct perf_event; struct pmu; diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index edfc9803ec91..957a83f43646 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -112,7 +112,6 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, struct dma_attrs *attrs); extern void iommu_init_early_pSeries(void); -extern void iommu_init_early_iSeries(void); extern void iommu_init_early_dart(void); extern void iommu_init_early_pasemi(void); diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index fe0b09dceb7d..cf417e510736 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -27,12 +27,6 @@ extern atomic_t ppc_n_lost_interrupts; /* This number is used when no interrupt has been assigned */ #define NO_IRQ (0) -/* This is a special irq number to return from get_irq() to tell that - * no interrupt happened _and_ ignore it (don't count it as bad). Some - * platforms like iSeries rely on that. - */ -#define NO_IRQ_IGNORE ((unsigned int)-1) - /* Total number of virq in the platform */ #define NR_IRQS CONFIG_NR_IRQS diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index f7727d91ac6b..b921c3f48928 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h @@ -265,12 +265,9 @@ struct kvm_debug_exit_arch { struct kvm_guest_debug_arch { }; -#define KVM_REG_MASK 0x001f -#define KVM_REG_EXT_MASK 0xffe0 -#define KVM_REG_GPR 0x0000 -#define KVM_REG_FPR 0x0020 -#define KVM_REG_QPR 0x0040 -#define KVM_REG_FQPR 0x0060 +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; #define KVM_INTERRUPT_SET -1U #define KVM_INTERRUPT_UNSET -2U @@ -292,4 +289,41 @@ struct kvm_allocate_rma { __u64 rma_size; }; +struct kvm_book3e_206_tlb_entry { + __u32 mas8; + __u32 mas1; + __u64 mas2; + __u64 mas7_3; +}; + +struct kvm_book3e_206_tlb_params { + /* + * For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV: + * + * - The number of ways of TLB0 must be a power of two between 2 and + * 16. + * - TLB1 must be fully associative. + * - The size of TLB0 must be a multiple of the number of ways, and + * the number of sets must be a power of two. + * - The size of TLB1 may not exceed 64 entries. + * - TLB0 supports 4 KiB pages. + * - The page sizes supported by TLB1 are as indicated by + * TLB1CFG (if MMUCFG[MAVN] = 0) or TLB1PS (if MMUCFG[MAVN] = 1) + * as returned by KVM_GET_SREGS. + * - TLB2 and TLB3 are reserved, and their entries in tlb_sizes[] + * and tlb_ways[] must be zero. + * + * tlb_ways[n] = tlb_sizes[n] means the array is fully associative. + * + * KVM will adjust TLBnCFG based on the sizes configured here, + * though arrays greater than 2048 entries will have TLBnCFG[NENTRY] + * set to zero. + */ + __u32 tlb_sizes[4]; + __u32 tlb_ways[4]; + __u32 reserved[8]; +}; + +#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1) + #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 69c7377d2071..aa795ccef294 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -90,6 +90,8 @@ struct kvmppc_vcpu_book3s { #endif int context_id[SID_CONTEXTS]; + bool hior_explicit; /* HIOR is set by ioctl, not PVR */ + struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; @@ -119,6 +121,11 @@ extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); +extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, + struct kvm_vcpu *vcpu, unsigned long addr, + unsigned long status); +extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, + unsigned long slb_v, unsigned long valid); extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); @@ -138,6 +145,21 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); +extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, + unsigned long *rmap, long pte_index, int realmode); +extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, + unsigned long pte_index); +void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, + unsigned long pte_index); +extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, + unsigned long *nb_ret); +extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr); +extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, + long pte_index, unsigned long pteh, unsigned long ptel); +extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, + long pte_index, unsigned long pteh, unsigned long ptel); +extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, + struct kvm_memory_slot *memslot); extern void kvmppc_entry_trampoline(void); extern void kvmppc_hv_entry_trampoline(void); @@ -183,7 +205,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { if ( num < 14 ) { - to_svcpu(vcpu)->gpr[num] = val; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + svcpu->gpr[num] = val; + svcpu_put(svcpu); to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; } else vcpu->arch.gpr[num] = val; @@ -191,80 +215,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) { - if ( num < 14 ) - return to_svcpu(vcpu)->gpr[num]; - else + if ( num < 14 ) { + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + ulong r = svcpu->gpr[num]; + svcpu_put(svcpu); + return r; + } else return vcpu->arch.gpr[num]; } static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { - to_svcpu(vcpu)->cr = val; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + svcpu->cr = val; + svcpu_put(svcpu); to_book3s(vcpu)->shadow_vcpu->cr = val; } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) { - return to_svcpu(vcpu)->cr; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + u32 r; + r = svcpu->cr; + svcpu_put(svcpu); + return r; } static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) { - to_svcpu(vcpu)->xer = val; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + svcpu->xer = val; to_book3s(vcpu)->shadow_vcpu->xer = val; + svcpu_put(svcpu); } static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) { - return to_svcpu(vcpu)->xer; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + u32 r; + r = svcpu->xer; + svcpu_put(svcpu); + return r; } static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) { - to_svcpu(vcpu)->ctr = val; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + svcpu->ctr = val; + svcpu_put(svcpu); } static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) { - return to_svcpu(vcpu)->ctr; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + ulong r; + r = svcpu->ctr; + svcpu_put(svcpu); + return r; } static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) { - to_svcpu(vcpu)->lr = val; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + svcpu->lr = val; + svcpu_put(svcpu); } static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) { - return to_svcpu(vcpu)->lr; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + ulong r; + r = svcpu->lr; + svcpu_put(svcpu); + return r; } static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) { - to_svcpu(vcpu)->pc = val; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + svcpu->pc = val; + svcpu_put(svcpu); } static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) { - return to_svcpu(vcpu)->pc; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + ulong r; + r = svcpu->pc; + svcpu_put(svcpu); + return r; } static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) { ulong pc = kvmppc_get_pc(vcpu); - struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + u32 r; /* Load the instruction manually if it failed to do so in the * exit path */ if (svcpu->last_inst == KVM_INST_FETCH_FAILED) kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); - return svcpu->last_inst; + r = svcpu->last_inst; + svcpu_put(svcpu); + return r; } static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) { - return to_svcpu(vcpu)->fault_dar; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + ulong r; + r = svcpu->fault_dar; + svcpu_put(svcpu); + return r; } static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h index de604db135f5..38040ff82063 100644 --- a/arch/powerpc/include/asm/kvm_book3s_32.h +++ b/arch/powerpc/include/asm/kvm_book3s_32.h @@ -20,11 +20,15 @@ #ifndef __ASM_KVM_BOOK3S_32_H__ #define __ASM_KVM_BOOK3S_32_H__ -static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) +static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) { return to_book3s(vcpu)->shadow_vcpu; } +static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) +{ +} + #define PTE_SIZE 12 #define VSID_ALL 0 #define SR_INVALID 0x00000001 /* VSID 1 should always be unused */ diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index d0ac94f98f9e..b0c08b142770 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -21,14 +21,56 @@ #define __ASM_KVM_BOOK3S_64_H__ #ifdef CONFIG_KVM_BOOK3S_PR -static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) +static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) { + preempt_disable(); return &get_paca()->shadow_vcpu; } + +static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) +{ + preempt_enable(); +} #endif #define SPAPR_TCE_SHIFT 12 +#ifdef CONFIG_KVM_BOOK3S_64_HV +/* For now use fixed-size 16MB page table */ +#define HPT_ORDER 24 +#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */ +#define HPT_NPTE (HPT_NPTEG << 3) /* 8 PTEs per PTEG */ +#define HPT_HASH_MASK (HPT_NPTEG - 1) +#endif + +#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ + +/* + * We use a lock bit in HPTE dword 0 to synchronize updates and + * accesses to each HPTE, and another bit to indicate non-present + * HPTEs. + */ +#define HPTE_V_HVLOCK 0x40UL +#define HPTE_V_ABSENT 0x20UL + +static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) +{ + unsigned long tmp, old; + + asm volatile(" ldarx %0,0,%2\n" + " and. %1,%0,%3\n" + " bne 2f\n" + " ori %0,%0,%4\n" + " stdcx. %0,0,%2\n" + " beq+ 2f\n" + " li %1,%3\n" + "2: isync" + : "=&r" (tmp), "=&r" (old) + : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK) + : "cc", "memory"); + return old == 0; +} + static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, unsigned long pte_index) { @@ -62,4 +104,140 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, return rb; } +static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) +{ + /* only handle 4k, 64k and 16M pages for now */ + if (!(h & HPTE_V_LARGE)) + return 1ul << 12; /* 4k page */ + if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206)) + return 1ul << 16; /* 64k page */ + if ((l & 0xff000) == 0) + return 1ul << 24; /* 16M page */ + return 0; /* error */ +} + +static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) +{ + return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; +} + +static inline int hpte_is_writable(unsigned long ptel) +{ + unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP); + + return pp != PP_RXRX && pp != PP_RXXX; +} + +static inline unsigned long hpte_make_readonly(unsigned long ptel) +{ + if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX) + ptel = (ptel & ~HPTE_R_PP) | PP_RXXX; + else + ptel |= PP_RXRX; + return ptel; +} + +static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type) +{ + unsigned int wimg = ptel & HPTE_R_WIMG; + + /* Handle SAO */ + if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) && + cpu_has_feature(CPU_FTR_ARCH_206)) + wimg = HPTE_R_M; + + if (!io_type) + return wimg == HPTE_R_M; + + return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type; +} + +/* + * Lock and read a linux PTE. If it's present and writable, atomically + * set dirty and referenced bits and return the PTE, otherwise return 0. + */ +static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing) +{ + pte_t pte, tmp; + + /* wait until _PAGE_BUSY is clear then set it atomically */ + __asm__ __volatile__ ( + "1: ldarx %0,0,%3\n" + " andi. %1,%0,%4\n" + " bne- 1b\n" + " ori %1,%0,%4\n" + " stdcx. %1,0,%3\n" + " bne- 1b" + : "=&r" (pte), "=&r" (tmp), "=m" (*p) + : "r" (p), "i" (_PAGE_BUSY) + : "cc"); + + if (pte_present(pte)) { + pte = pte_mkyoung(pte); + if (writing && pte_write(pte)) + pte = pte_mkdirty(pte); + } + + *p = pte; /* clears _PAGE_BUSY */ + + return pte; +} + +/* Return HPTE cache control bits corresponding to Linux pte bits */ +static inline unsigned long hpte_cache_bits(unsigned long pte_val) +{ +#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W + return pte_val & (HPTE_R_W | HPTE_R_I); +#else + return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) + + ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0); +#endif +} + +static inline bool hpte_read_permission(unsigned long pp, unsigned long key) +{ + if (key) + return PP_RWRX <= pp && pp <= PP_RXRX; + return 1; +} + +static inline bool hpte_write_permission(unsigned long pp, unsigned long key) +{ + if (key) + return pp == PP_RWRW; + return pp <= PP_RWRW; +} + +static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr) +{ + unsigned long skey; + + skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) | + ((hpte_r & HPTE_R_KEY_LO) >> 9); + return (amr >> (62 - 2 * skey)) & 3; +} + +static inline void lock_rmap(unsigned long *rmap) +{ + do { + while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap)) + cpu_relax(); + } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap)); +} + +static inline void unlock_rmap(unsigned long *rmap) +{ + __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap); +} + +static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, + unsigned long pagesize) +{ + unsigned long mask = (pagesize >> PAGE_SHIFT) - 1; + + if (pagesize <= PAGE_SIZE) + return 1; + return !(memslot->base_gfn & mask) && !(memslot->npages & mask); +} + #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h index adbfca9dd100..8cd50a514271 100644 --- a/arch/powerpc/include/asm/kvm_e500.h +++ b/arch/powerpc/include/asm/kvm_e500.h @@ -22,46 +22,55 @@ #define E500_PID_NUM 3 #define E500_TLB_NUM 2 -struct tlbe{ - u32 mas1; - u32 mas2; - u32 mas3; - u32 mas7; -}; - #define E500_TLB_VALID 1 #define E500_TLB_DIRTY 2 -struct tlbe_priv { +struct tlbe_ref { pfn_t pfn; unsigned int flags; /* E500_TLB_* */ }; +struct tlbe_priv { + struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ +}; + struct vcpu_id_table; +struct kvmppc_e500_tlb_params { + int entries, ways, sets; +}; + struct kvmppc_vcpu_e500 { - /* Unmodified copy of the guest's TLB. */ - struct tlbe *gtlb_arch[E500_TLB_NUM]; + /* Unmodified copy of the guest's TLB -- shared with host userspace. */ + struct kvm_book3e_206_tlb_entry *gtlb_arch; + + /* Starting entry number in gtlb_arch[] */ + int gtlb_offset[E500_TLB_NUM]; /* KVM internal information associated with each guest TLB entry */ struct tlbe_priv *gtlb_priv[E500_TLB_NUM]; - unsigned int gtlb_size[E500_TLB_NUM]; + struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM]; + unsigned int gtlb_nv[E500_TLB_NUM]; + /* + * information associated with each host TLB entry -- + * TLB1 only for now. If/when guest TLB1 entries can be + * mapped with host TLB0, this will be used for that too. + * + * We don't want to use this for guest TLB0 because then we'd + * have the overhead of doing the translation again even if + * the entry is still in the guest TLB (e.g. we swapped out + * and back, and our host TLB entries got evicted). + */ + struct tlbe_ref *tlb_refs[E500_TLB_NUM]; + unsigned int host_tlb1_nv; + u32 host_pid[E500_PID_NUM]; u32 pid[E500_PID_NUM]; u32 svr; - u32 mas0; - u32 mas1; - u32 mas2; - u32 mas3; - u32 mas4; - u32 mas5; - u32 mas6; - u32 mas7; - /* vcpu id table */ struct vcpu_id_table *idt; @@ -73,6 +82,9 @@ struct kvmppc_vcpu_e500 { u32 tlb1cfg; u64 mcar; + struct page **shared_tlb_pages; + int num_shared_tlb_pages; + struct kvm_vcpu vcpu; }; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index bf8af5d5d5dc..52eb9c1f4fe0 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -32,17 +32,32 @@ #include <linux/atomic.h> #include <asm/kvm_asm.h> #include <asm/processor.h> +#include <asm/page.h> #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS #define KVM_MEMORY_SLOTS 32 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 4 +#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) #ifdef CONFIG_KVM_MMIO #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #endif +#ifdef CONFIG_KVM_BOOK3S_64_HV +#include <linux/mmu_notifier.h> + +#define KVM_ARCH_WANT_MMU_NOTIFIER + +struct kvm; +extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); +extern int kvm_age_hva(struct kvm *kvm, unsigned long hva); +extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); +extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); + +#endif + /* We don't currently support large pages. */ #define KVM_HPAGE_GFN_SHIFT(x) 0 #define KVM_NR_PAGE_SIZES 1 @@ -158,34 +173,72 @@ struct kvmppc_spapr_tce_table { struct page *pages[0]; }; -struct kvmppc_rma_info { +struct kvmppc_linear_info { void *base_virt; unsigned long base_pfn; unsigned long npages; struct list_head list; - atomic_t use_count; + atomic_t use_count; + int type; +}; + +/* + * The reverse mapping array has one entry for each HPTE, + * which stores the guest's view of the second word of the HPTE + * (including the guest physical address of the mapping), + * plus forward and backward pointers in a doubly-linked ring + * of HPTEs that map the same host page. The pointers in this + * ring are 32-bit HPTE indexes, to save space. + */ +struct revmap_entry { + unsigned long guest_rpte; + unsigned int forw, back; +}; + +/* + * We use the top bit of each memslot->rmap entry as a lock bit, + * and bit 32 as a present flag. The bottom 32 bits are the + * index in the guest HPT of a HPTE that points to the page. + */ +#define KVMPPC_RMAP_LOCK_BIT 63 +#define KVMPPC_RMAP_RC_SHIFT 32 +#define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT) +#define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT) +#define KVMPPC_RMAP_PRESENT 0x100000000ul +#define KVMPPC_RMAP_INDEX 0xfffffffful + +/* Low-order bits in kvm->arch.slot_phys[][] */ +#define KVMPPC_PAGE_ORDER_MASK 0x1f +#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */ +#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */ +#define KVMPPC_GOT_PAGE 0x80 + +struct kvm_arch_memory_slot { }; struct kvm_arch { #ifdef CONFIG_KVM_BOOK3S_64_HV unsigned long hpt_virt; - unsigned long ram_npages; - unsigned long ram_psize; - unsigned long ram_porder; - struct kvmppc_pginfo *ram_pginfo; + struct revmap_entry *revmap; unsigned int lpid; unsigned int host_lpid; unsigned long host_lpcr; unsigned long sdr1; unsigned long host_sdr1; int tlbie_lock; - int n_rma_pages; unsigned long lpcr; unsigned long rmor; - struct kvmppc_rma_info *rma; + struct kvmppc_linear_info *rma; + unsigned long vrma_slb_v; + int rma_setup_done; + int using_mmu_notifiers; struct list_head spapr_tce_tables; + spinlock_t slot_phys_lock; + unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; + int slot_npages[KVM_MEM_SLOTS_NUM]; unsigned short last_vcpu[NR_CPUS]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; + struct kvmppc_linear_info *hpt_li; #endif /* CONFIG_KVM_BOOK3S_64_HV */ }; @@ -318,10 +371,6 @@ struct kvm_vcpu_arch { u32 vrsave; /* also USPRG0 */ u32 mmucr; ulong shadow_msr; - ulong sprg4; - ulong sprg5; - ulong sprg6; - ulong sprg7; ulong csrr0; ulong csrr1; ulong dsrr0; @@ -329,16 +378,14 @@ struct kvm_vcpu_arch { ulong mcsrr0; ulong mcsrr1; ulong mcsr; - ulong esr; u32 dec; u32 decar; u32 tbl; u32 tbu; u32 tcr; - u32 tsr; + ulong tsr; /* we need to perform set/clr_bits() which requires ulong */ u32 ivor[64]; ulong ivpr; - u32 pir; u32 pvr; u32 shadow_pid; @@ -427,9 +474,14 @@ struct kvm_vcpu_arch { #ifdef CONFIG_KVM_BOOK3S_64_HV struct kvm_vcpu_arch_shared shregs; + unsigned long pgfault_addr; + long pgfault_index; + unsigned long pgfault_hpte[2]; + struct list_head run_list; struct task_struct *run_task; struct kvm_run *kvm_run; + pgd_t *pgdir; #endif }; @@ -438,4 +490,12 @@ struct kvm_vcpu_arch { #define KVMPPC_VCPU_BUSY_IN_HOST 1 #define KVMPPC_VCPU_RUNNABLE 2 +/* Values for vcpu->arch.io_gpr */ +#define KVM_MMIO_REG_MASK 0x001f +#define KVM_MMIO_REG_EXT_MASK 0xffe0 +#define KVM_MMIO_REG_GPR 0x0000 +#define KVM_MMIO_REG_FPR 0x0020 +#define KVM_MMIO_REG_QPR 0x0040 +#define KVM_MMIO_REG_FQPR 0x0060 + #endif /* __POWERPC_KVM_HOST_H__ */ diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index 50533f9adf40..7b754e743003 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h @@ -22,6 +22,16 @@ #include <linux/types.h> +/* + * Additions to this struct must only occur at the end, and should be + * accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present + * (albeit not necessarily relevant to the current target hardware platform). + * + * Struct fields are always 32 or 64 bit aligned, depending on them being 32 + * or 64 bit wide respectively. + * + * See Documentation/virtual/kvm/ppc-pv.txt + */ struct kvm_vcpu_arch_shared { __u64 scratch1; __u64 scratch2; @@ -33,11 +43,35 @@ struct kvm_vcpu_arch_shared { __u64 sprg3; __u64 srr0; __u64 srr1; - __u64 dar; + __u64 dar; /* dear on BookE */ __u64 msr; __u32 dsisr; __u32 int_pending; /* Tells the guest if we have an interrupt */ __u32 sr[16]; + __u32 mas0; + __u32 mas1; + __u64 mas7_3; + __u64 mas2; + __u32 mas4; + __u32 mas6; + __u32 esr; + __u32 pir; + + /* + * SPRG4-7 are user-readable, so we can only keep these consistent + * between the shared area and the real registers when there's an + * intervening exit to KVM. This also applies to SPRG3 on some + * chips. + * + * This suffices for access by guest userspace, since in PR-mode + * KVM, an exit must occur when changing the guest's MSR[PR]. + * If the guest kernel writes to SPRG3-7 via the shared area, it + * must also use the shared area for reading while in kernel space. + */ + __u64 sprg4; + __u64 sprg5; + __u64 sprg6; + __u64 sprg7; }; #define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */ @@ -47,7 +81,10 @@ struct kvm_vcpu_arch_shared { #define KVM_FEATURE_MAGIC_PAGE 1 -#define KVM_MAGIC_FEAT_SR (1 << 0) +#define KVM_MAGIC_FEAT_SR (1 << 0) + +/* MASn, ESR, PIR, and high SPRGs */ +#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1) #ifdef __KERNEL__ diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 46efd1a265c9..9d6dee0f7d48 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -66,6 +66,7 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run, extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); +extern void kvmppc_decrementer_func(unsigned long data); extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); /* Core-specific hooks */ @@ -94,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); -extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); +extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); @@ -120,15 +121,17 @@ extern long kvmppc_alloc_hpt(struct kvm *kvm); extern void kvmppc_free_hpt(struct kvm *kvm); extern long kvmppc_prepare_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem); -extern void kvmppc_map_vrma(struct kvm *kvm, - struct kvm_userspace_memory_region *mem); +extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *memslot, unsigned long porder); extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce *args); extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *rma); -extern struct kvmppc_rma_info *kvm_alloc_rma(void); -extern void kvm_release_rma(struct kvmppc_rma_info *ri); +extern struct kvmppc_linear_info *kvm_alloc_rma(void); +extern void kvm_release_rma(struct kvmppc_linear_info *ri); +extern struct kvmppc_linear_info *kvm_alloc_hpt(void); +extern void kvm_release_hpt(struct kvmppc_linear_info *li); extern int kvmppc_core_init_vm(struct kvm *kvm); extern void kvmppc_core_destroy_vm(struct kvm *kvm); extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, @@ -175,6 +178,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); +int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); + void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); #ifdef CONFIG_KVM_BOOK3S_64_HV @@ -183,14 +189,19 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) paca[cpu].kvm_hstate.xics_phys = addr; } -extern void kvm_rma_init(void); +extern void kvm_linear_init(void); #else static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) {} -static inline void kvm_rma_init(void) +static inline void kvm_linear_init(void) {} #endif +int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, + struct kvm_config_tlb *cfg); +int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, + struct kvm_dirty_tlb *cfg); + #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index bf37931d1ad6..42ce570812c1 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -99,9 +99,7 @@ struct machdep_calls { void (*init_IRQ)(void); - /* Return an irq, or NO_IRQ to indicate there are none pending. - * If for some reason there is no irq, but the interrupt - * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */ + /* Return an irq, or NO_IRQ to indicate there are none pending. */ unsigned int (*get_irq)(void); /* PCI stuff */ diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index f5f89cafebd0..cdb5421877e2 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -41,9 +41,10 @@ /* MAS registers bit definitions */ #define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) -#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) -#define MAS0_NV(x) ((x) & 0x00000FFF) #define MAS0_ESEL_MASK 0x0FFF0000 +#define MAS0_ESEL_SHIFT 16 +#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK) +#define MAS0_NV(x) ((x) & 0x00000FFF) #define MAS0_HES 0x00004000 #define MAS0_WQ_ALLWAYS 0x00000000 #define MAS0_WQ_COND 0x00001000 @@ -167,6 +168,7 @@ #define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ #define TLBnCFG_MAXSIZE_SHIFT 16 #define TLBnCFG_ASSOC 0xff000000 /* Associativity */ +#define TLBnCFG_ASSOC_SHIFT 24 /* TLBnPS encoding */ #define TLBnPS_4K 0x00000004 diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 412ba493cb98..1c65a59881ea 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -108,11 +108,11 @@ extern char initial_stab[]; #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) /* Values for PP (assumes Ks=0, Kp=1) */ -/* pp0 will always be 0 for linux */ #define PP_RWXX 0 /* Supervisor read/write, User none */ #define PP_RWRX 1 /* Supervisor read/write, User read */ #define PP_RWRW 2 /* Supervisor read/write, User read/write */ #define PP_RXRX 3 /* Supervisor read, User read */ +#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */ #ifndef __ASSEMBLY__ @@ -267,7 +267,6 @@ extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); extern void hpte_init_native(void); extern void hpte_init_lpar(void); -extern void hpte_init_iSeries(void); extern void hpte_init_beat(void); extern void hpte_init_beat_v3(void); @@ -325,9 +324,6 @@ extern void slb_set_size(u16 size); * WARNING - If you change these you must make sure the asm * implementations in slb_allocate (slb_low.S), do_stab_bolted * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. - * - * You'll also need to change the precomputed VSID values in head.S - * which are used by the iSeries firmware. */ #define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */ @@ -484,14 +480,6 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea, | (ea >> SID_SHIFT_1T), 1T); } -/* - * This is only used on legacy iSeries in lparmap.c, - * hence the 256MB segment assumption. - */ -#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \ - VSID_MODULUS_256M) -#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) - #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 5d487657322e..ac39e6a3b25a 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -155,14 +155,7 @@ struct pci_dn { struct pci_dev *pcidev; /* back-pointer to the pci device */ #ifdef CONFIG_EEH - int class_code; /* pci device class */ - int eeh_mode; /* See eeh.h for possible EEH_MODEs */ - int eeh_config_addr; - int eeh_pe_config_addr; /* new-style partition endpoint address */ - int eeh_check_count; /* # times driver ignored error */ - int eeh_freeze_count; /* # times this device froze up. */ - int eeh_false_positives; /* # times this device reported #ff's */ - u32 config_space[16]; /* saved PCI config space */ + struct eeh_dev *edev; /* eeh device */ #endif #define IODA_INVALID_PE (-1) #ifdef CONFIG_PPC_POWERNV @@ -185,6 +178,13 @@ static inline int pci_device_from_OF_node(struct device_node *np, return 0; } +#if defined(CONFIG_EEH) +static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn) +{ + return PCI_DN(dn)->edev; +} +#endif + /** Find the bus corresponding to the indicated device node */ extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 1a8093fa8f71..078019b5b353 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -47,6 +47,8 @@ struct power_pmu { */ #define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ #define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ +#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */ +#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index e980faae4225..d81f99430fe7 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -45,6 +45,7 @@ #define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff #define PPC_INST_MTSPR_DSCR 0x7c1103a6 #define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff +#define PPC_INST_SLBFEE 0x7c0007a7 #define PPC_INST_STRING 0x7c00042a #define PPC_INST_STRING_MASK 0xfc0007fe @@ -183,7 +184,8 @@ __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) #define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \ __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) - +#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ + __PPC_RT(t) | __PPC_RB(b)) /* * Define what the VSX XX1 form instructions will look like, then add diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index b585bff1a022..8e2d0371fe1e 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -385,6 +385,36 @@ static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) extern unsigned long cpuidle_disable; enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; +extern int powersave_nap; /* set if nap mode can be used in idle loop */ +void cpu_idle_wait(void); + +#ifdef CONFIG_PSERIES_IDLE +extern void update_smt_snooze_delay(int snooze); +extern int pseries_notify_cpuidle_add_cpu(int cpu); +#else +static inline void update_smt_snooze_delay(int snooze) {} +static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; } +#endif + +extern void flush_instruction_cache(void); +extern void hard_reset_now(void); +extern void poweroff_now(void); +extern int fix_alignment(struct pt_regs *); +extern void cvt_fd(float *from, double *to); +extern void cvt_df(double *from, float *to); +extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); + +#ifdef CONFIG_PPC64 +/* + * We handle most unaligned accesses in hardware. On the other hand + * unaligned DMA can be very expensive on some ppc64 IO chips (it does + * powers of 2 writes until it reaches sufficient alignment). + * + * Based on this we disable the IP header alignment in network drivers. + */ +#define NET_IP_ALIGN 0 +#endif + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PROCESSOR_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index b1a215eabef6..9d7f0fb69028 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -216,6 +216,7 @@ #define DSISR_ISSTORE 0x02000000 /* access was a store */ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ #define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ +#define DSISR_KEYFAULT 0x00200000 /* Key fault */ #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ @@ -237,6 +238,7 @@ #define LPCR_ISL (1ul << (63-2)) #define LPCR_VC_SH (63-2) #define LPCR_DPFD_SH (63-11) +#define LPCR_VRMASD (0x1ful << (63-16)) #define LPCR_VRMA_L (1ul << (63-12)) #define LPCR_VRMA_LP0 (1ul << (63-15)) #define LPCR_VRMA_LP1 (1ul << (63-16)) @@ -493,6 +495,9 @@ #define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */ #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ +#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */ +#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ +#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ #define SRR1_WAKESYSERR 0x00300000 /* System error */ #define SRR1_WAKEEE 0x00200000 /* External interrupt */ diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 8a97aa7289d3..b86faa9107da 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -15,6 +15,11 @@ #ifndef __ASM_POWERPC_REG_BOOKE_H__ #define __ASM_POWERPC_REG_BOOKE_H__ +#ifdef CONFIG_BOOKE_WDT +extern u32 booke_wdt_enabled; +extern u32 booke_wdt_period; +#endif /* CONFIG_BOOKE_WDT */ + /* Machine State Register (MSR) Fields */ #define MSR_GS (1<<28) /* Guest state */ #define MSR_UCLE (1<<26) /* User-mode cache lock enable */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 01c143bb77ae..557cff845dee 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -74,7 +74,6 @@ struct rtas_suspend_me_data { /* RTAS event classes */ #define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */ #define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */ -#define RTAS_POWERMGM_EVENTS 0x20000000 /* set bit 2 */ #define RTAS_HOTPLUG_EVENTS 0x10000000 /* set bit 3 */ #define RTAS_IO_EVENTS 0x08000000 /* set bit 4 */ #define RTAS_EVENT_SCAN_ALL_EVENTS 0xffffffff @@ -204,6 +203,39 @@ struct rtas_ext_event_log_v6 { /* Variable length. */ }; +/* pSeries event log format */ + +/* Two bytes ASCII section IDs */ +#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S') +#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T') +#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S') +#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W') +#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P') +#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R') +#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M') +#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P') +#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E') +#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I') +#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H') +#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D') + +/* Vendor specific Platform Event Log Format, Version 6, section header */ +struct pseries_errorlog { + uint16_t id; /* 0x00 2-byte ASCII section ID */ + uint16_t length; /* 0x02 Section length in bytes */ + uint8_t version; /* 0x04 Section version */ + uint8_t subtype; /* 0x05 Section subtype */ + uint16_t creator_component; /* 0x06 Creator component ID */ + uint8_t data[]; /* 0x08 Start of section data */ +}; + +struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, + uint16_t section_id); + /* * This can be set by the rtas_flash module so that it can get called * as the absolutely last thing before the kernel terminates. @@ -325,5 +357,7 @@ static inline int page_is_rtas_user_buf(unsigned long pfn) static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;} #endif +extern int call_rtas(const char *, int, int, unsigned long *, ...); + #endif /* __KERNEL__ */ #endif /* _POWERPC_RTAS_H */ diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h new file mode 100644 index 000000000000..54e9b963876e --- /dev/null +++ b/arch/powerpc/include/asm/runlatch.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> + */ +#ifndef _ASM_POWERPC_RUNLATCH_H +#define _ASM_POWERPC_RUNLATCH_H + +#ifdef CONFIG_PPC64 + +extern void __ppc64_runlatch_on(void); +extern void __ppc64_runlatch_off(void); + +/* + * We manually hard enable-disable, this is called + * in the idle loop and we don't want to mess up + * with soft-disable/enable & interrupt replay. + */ +#define ppc64_runlatch_off() \ + do { \ + if (cpu_has_feature(CPU_FTR_CTRL) && \ + test_thread_local_flags(_TLF_RUNLATCH)) { \ + unsigned long msr = mfmsr(); \ + __hard_irq_disable(); \ + __ppc64_runlatch_off(); \ + if (msr & MSR_EE) \ + __hard_irq_enable(); \ + } \ + } while (0) + +#define ppc64_runlatch_on() \ + do { \ + if (cpu_has_feature(CPU_FTR_CTRL) && \ + !test_thread_local_flags(_TLF_RUNLATCH)) { \ + unsigned long msr = mfmsr(); \ + __hard_irq_disable(); \ + __ppc64_runlatch_on(); \ + if (msr & MSR_EE) \ + __hard_irq_enable(); \ + } \ + } while (0) +#else +#define ppc64_runlatch_on() +#define ppc64_runlatch_off() +#endif /* CONFIG_PPC64 */ + +#endif /* _ASM_POWERPC_RUNLATCH_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 186e0fb835bd..d084ce195fc3 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -5,6 +5,28 @@ #ifndef __ASSEMBLY__ extern void ppc_printk_progress(char *s, unsigned short hex); -#endif + +extern unsigned int rtas_data; +extern int mem_init_done; /* set on boot once kmalloc can be called */ +extern int init_bootmem_done; /* set once bootmem is available */ +extern phys_addr_t memory_limit; +extern unsigned long klimit; +extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); + +extern void via_cuda_init(void); +extern void read_rtc_time(void); +extern void pmac_find_display(void); + +struct device_node; +extern void note_scsi_host(struct device_node *, void *); + +/* Used in very early kernel initialization. */ +extern unsigned long reloc_offset(void); +extern unsigned long add_reloc_offset(unsigned long); +extern void reloc_got2(unsigned long); + +#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) + +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index adba970ce918..ebc24dc5b1a1 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -122,7 +122,6 @@ extern void smp_muxed_ipi_set_data(int cpu, unsigned long data); extern void smp_muxed_ipi_message_pass(int cpu, int msg); extern irqreturn_t smp_ipi_demux(void); -void smp_init_iSeries(void); void smp_init_pSeries(void); void smp_init_cell(void); void smp_init_celleb(void); diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h new file mode 100644 index 000000000000..caf82d0a00de --- /dev/null +++ b/arch/powerpc/include/asm/switch_to.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> + */ +#ifndef _ASM_POWERPC_SWITCH_TO_H +#define _ASM_POWERPC_SWITCH_TO_H + +struct thread_struct; +struct task_struct; +struct pt_regs; + +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); +#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) + +struct thread_struct; +extern struct task_struct *_switch(struct thread_struct *prev, + struct thread_struct *next); + +extern void giveup_fpu(struct task_struct *); +extern void disable_kernel_fp(void); +extern void enable_kernel_fp(void); +extern void flush_fp_to_thread(struct task_struct *); +extern void enable_kernel_altivec(void); +extern void giveup_altivec(struct task_struct *); +extern void load_up_altivec(struct task_struct *); +extern int emulate_altivec(struct pt_regs *); +extern void __giveup_vsx(struct task_struct *); +extern void giveup_vsx(struct task_struct *); +extern void enable_kernel_spe(void); +extern void giveup_spe(struct task_struct *); +extern void load_up_spe(struct task_struct *); + +#ifndef CONFIG_SMP +extern void discard_lazy_cpu_state(void); +#else +static inline void discard_lazy_cpu_state(void) +{ +} +#endif + +#ifdef CONFIG_ALTIVEC +extern void flush_altivec_to_thread(struct task_struct *); +#else +static inline void flush_altivec_to_thread(struct task_struct *t) +{ +} +#endif + +#ifdef CONFIG_VSX +extern void flush_vsx_to_thread(struct task_struct *); +#else +static inline void flush_vsx_to_thread(struct task_struct *t) +{ +} +#endif + +#ifdef CONFIG_SPE +extern void flush_spe_to_thread(struct task_struct *); +#else +static inline void flush_spe_to_thread(struct task_struct *t) +{ +} +#endif + +#endif /* _ASM_POWERPC_SWITCH_TO_H */ diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h deleted file mode 100644 index a02883d5af43..000000000000 --- a/arch/powerpc/include/asm/system.h +++ /dev/null @@ -1,592 +0,0 @@ -/* - * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> - */ -#ifndef _ASM_POWERPC_SYSTEM_H -#define _ASM_POWERPC_SYSTEM_H - -#include <linux/kernel.h> -#include <linux/irqflags.h> - -#include <asm/hw_irq.h> - -/* - * Memory barrier. - * The sync instruction guarantees that all memory accesses initiated - * by this processor have been performed (with respect to all other - * mechanisms that access memory). The eieio instruction is a barrier - * providing an ordering (separately) for (a) cacheable stores and (b) - * loads and stores to non-cacheable memory (e.g. I/O devices). - * - * mb() prevents loads and stores being reordered across this point. - * rmb() prevents loads being reordered across this point. - * wmb() prevents stores being reordered across this point. - * read_barrier_depends() prevents data-dependent loads being reordered - * across this point (nop on PPC). - * - * *mb() variants without smp_ prefix must order all types of memory - * operations with one another. sync is the only instruction sufficient - * to do this. - * - * For the smp_ barriers, ordering is for cacheable memory operations - * only. We have to use the sync instruction for smp_mb(), since lwsync - * doesn't order loads with respect to previous stores. Lwsync can be - * used for smp_rmb() and smp_wmb(). - * - * However, on CPUs that don't support lwsync, lwsync actually maps to a - * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. - */ -#define mb() __asm__ __volatile__ ("sync" : : : "memory") -#define rmb() __asm__ __volatile__ ("sync" : : : "memory") -#define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define read_barrier_depends() do { } while(0) - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#ifdef __KERNEL__ -#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ -#ifdef CONFIG_SMP - -#ifdef __SUBARCH_HAS_LWSYNC -# define SMPWMB LWSYNC -#else -# define SMPWMB eieio -#endif - -#define smp_mb() mb() -#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") -#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif /* CONFIG_SMP */ - -/* - * This is a barrier which prevents following instructions from being - * started until the value of the argument x is known. For example, if - * x is a variable loaded from memory, this prevents following - * instructions from being executed until the load has been performed. - */ -#define data_barrier(x) \ - asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); - -struct task_struct; -struct pt_regs; - -#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) - -extern int (*__debugger)(struct pt_regs *regs); -extern int (*__debugger_ipi)(struct pt_regs *regs); -extern int (*__debugger_bpt)(struct pt_regs *regs); -extern int (*__debugger_sstep)(struct pt_regs *regs); -extern int (*__debugger_iabr_match)(struct pt_regs *regs); -extern int (*__debugger_dabr_match)(struct pt_regs *regs); -extern int (*__debugger_fault_handler)(struct pt_regs *regs); - -#define DEBUGGER_BOILERPLATE(__NAME) \ -static inline int __NAME(struct pt_regs *regs) \ -{ \ - if (unlikely(__ ## __NAME)) \ - return __ ## __NAME(regs); \ - return 0; \ -} - -DEBUGGER_BOILERPLATE(debugger) -DEBUGGER_BOILERPLATE(debugger_ipi) -DEBUGGER_BOILERPLATE(debugger_bpt) -DEBUGGER_BOILERPLATE(debugger_sstep) -DEBUGGER_BOILERPLATE(debugger_iabr_match) -DEBUGGER_BOILERPLATE(debugger_dabr_match) -DEBUGGER_BOILERPLATE(debugger_fault_handler) - -#else -static inline int debugger(struct pt_regs *regs) { return 0; } -static inline int debugger_ipi(struct pt_regs *regs) { return 0; } -static inline int debugger_bpt(struct pt_regs *regs) { return 0; } -static inline int debugger_sstep(struct pt_regs *regs) { return 0; } -static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } -static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } -static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } -#endif - -extern int set_dabr(unsigned long dabr); -#ifdef CONFIG_PPC_ADV_DEBUG_REGS -extern void do_send_trap(struct pt_regs *regs, unsigned long address, - unsigned long error_code, int signal_code, int brkpt); -#else -extern void do_dabr(struct pt_regs *regs, unsigned long address, - unsigned long error_code); -#endif -extern void print_backtrace(unsigned long *); -extern void flush_instruction_cache(void); -extern void hard_reset_now(void); -extern void poweroff_now(void); - -#ifdef CONFIG_6xx -extern long _get_L2CR(void); -extern long _get_L3CR(void); -extern void _set_L2CR(unsigned long); -extern void _set_L3CR(unsigned long); -#else -#define _get_L2CR() 0L -#define _get_L3CR() 0L -#define _set_L2CR(val) do { } while(0) -#define _set_L3CR(val) do { } while(0) -#endif - -extern void via_cuda_init(void); -extern void read_rtc_time(void); -extern void pmac_find_display(void); -extern void giveup_fpu(struct task_struct *); -extern void disable_kernel_fp(void); -extern void enable_kernel_fp(void); -extern void flush_fp_to_thread(struct task_struct *); -extern void enable_kernel_altivec(void); -extern void giveup_altivec(struct task_struct *); -extern void load_up_altivec(struct task_struct *); -extern int emulate_altivec(struct pt_regs *); -extern void __giveup_vsx(struct task_struct *); -extern void giveup_vsx(struct task_struct *); -extern void enable_kernel_spe(void); -extern void giveup_spe(struct task_struct *); -extern void load_up_spe(struct task_struct *); -extern int fix_alignment(struct pt_regs *); -extern void cvt_fd(float *from, double *to); -extern void cvt_df(double *from, float *to); - -#ifndef CONFIG_SMP -extern void discard_lazy_cpu_state(void); -#else -static inline void discard_lazy_cpu_state(void) -{ -} -#endif - -#ifdef CONFIG_ALTIVEC -extern void flush_altivec_to_thread(struct task_struct *); -#else -static inline void flush_altivec_to_thread(struct task_struct *t) -{ -} -#endif - -#ifdef CONFIG_VSX -extern void flush_vsx_to_thread(struct task_struct *); -#else -static inline void flush_vsx_to_thread(struct task_struct *t) -{ -} -#endif - -#ifdef CONFIG_SPE -extern void flush_spe_to_thread(struct task_struct *); -#else -static inline void flush_spe_to_thread(struct task_struct *t) -{ -} -#endif - -extern int call_rtas(const char *, int, int, unsigned long *, ...); -extern void cacheable_memzero(void *p, unsigned int nb); -extern void *cacheable_memcpy(void *, const void *, unsigned int); -extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); -extern void bad_page_fault(struct pt_regs *, unsigned long, int); -extern void _exception(int, struct pt_regs *, int, unsigned long); -extern void die(const char *, struct pt_regs *, long); -extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); - -#ifdef CONFIG_BOOKE_WDT -extern u32 booke_wdt_enabled; -extern u32 booke_wdt_period; -#endif /* CONFIG_BOOKE_WDT */ - -struct device_node; -extern void note_scsi_host(struct device_node *, void *); - -extern struct task_struct *__switch_to(struct task_struct *, - struct task_struct *); -#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) - -struct thread_struct; -extern struct task_struct *_switch(struct thread_struct *prev, - struct thread_struct *next); - -extern unsigned int rtas_data; -extern int mem_init_done; /* set on boot once kmalloc can be called */ -extern int init_bootmem_done; /* set once bootmem is available */ -extern phys_addr_t memory_limit; -extern unsigned long klimit; -extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); - -extern int powersave_nap; /* set if nap mode can be used in idle loop */ -void cpu_idle_wait(void); - -#ifdef CONFIG_PSERIES_IDLE -extern void update_smt_snooze_delay(int snooze); -extern int pseries_notify_cpuidle_add_cpu(int cpu); -#else -static inline void update_smt_snooze_delay(int snooze) {} -static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; } -#endif - -/* - * Atomic exchange - * - * Changes the memory location '*ptr' to be val and returns - * the previous value stored there. - */ -static __always_inline unsigned long -__xchg_u32(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( - PPC_RELEASE_BARRIER -"1: lwarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stwcx. %3,0,%2 \n\ - bne- 1b" - PPC_ACQUIRE_BARRIER - : "=&r" (prev), "+m" (*(volatile unsigned int *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} - -/* - * Atomic exchange - * - * Changes the memory location '*ptr' to be val and returns - * the previous value stored there. - */ -static __always_inline unsigned long -__xchg_u32_local(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stwcx. %3,0,%2 \n\ - bne- 1b" - : "=&r" (prev), "+m" (*(volatile unsigned int *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} - -#ifdef CONFIG_PPC64 -static __always_inline unsigned long -__xchg_u64(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( - PPC_RELEASE_BARRIER -"1: ldarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stdcx. %3,0,%2 \n\ - bne- 1b" - PPC_ACQUIRE_BARRIER - : "=&r" (prev), "+m" (*(volatile unsigned long *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} - -static __always_inline unsigned long -__xchg_u64_local(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( -"1: ldarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stdcx. %3,0,%2 \n\ - bne- 1b" - : "=&r" (prev), "+m" (*(volatile unsigned long *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} -#endif - -/* - * This function doesn't exist, so you'll get a linker error - * if something tries to do an invalid xchg(). - */ -extern void __xchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -__xchg(volatile void *ptr, unsigned long x, unsigned int size) -{ - switch (size) { - case 4: - return __xchg_u32(ptr, x); -#ifdef CONFIG_PPC64 - case 8: - return __xchg_u64(ptr, x); -#endif - } - __xchg_called_with_bad_pointer(); - return x; -} - -static __always_inline unsigned long -__xchg_local(volatile void *ptr, unsigned long x, unsigned int size) -{ - switch (size) { - case 4: - return __xchg_u32_local(ptr, x); -#ifdef CONFIG_PPC64 - case 8: - return __xchg_u64_local(ptr, x); -#endif - } - __xchg_called_with_bad_pointer(); - return x; -} -#define xchg(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ - }) - -#define xchg_local(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_local((ptr), \ - (unsigned long)_x_, sizeof(*(ptr))); \ - }) - -/* - * Compare and exchange - if *p == old, set it to new, - * and return the old value of *p. - */ -#define __HAVE_ARCH_CMPXCHG 1 - -static __always_inline unsigned long -__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) -{ - unsigned int prev; - - __asm__ __volatile__ ( - PPC_RELEASE_BARRIER -"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ - cmpw 0,%0,%3\n\ - bne- 2f\n" - PPC405_ERR77(0,%2) -" stwcx. %4,0,%2\n\ - bne- 1b" - PPC_ACQUIRE_BARRIER - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} - -static __always_inline unsigned long -__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, - unsigned long new) -{ - unsigned int prev; - - __asm__ __volatile__ ( -"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ - cmpw 0,%0,%3\n\ - bne- 2f\n" - PPC405_ERR77(0,%2) -" stwcx. %4,0,%2\n\ - bne- 1b" - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} - -#ifdef CONFIG_PPC64 -static __always_inline unsigned long -__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) -{ - unsigned long prev; - - __asm__ __volatile__ ( - PPC_RELEASE_BARRIER -"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ - cmpd 0,%0,%3\n\ - bne- 2f\n\ - stdcx. %4,0,%2\n\ - bne- 1b" - PPC_ACQUIRE_BARRIER - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} - -static __always_inline unsigned long -__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, - unsigned long new) -{ - unsigned long prev; - - __asm__ __volatile__ ( -"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ - cmpd 0,%0,%3\n\ - bne- 2f\n\ - stdcx. %4,0,%2\n\ - bne- 1b" - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} -#endif - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, - unsigned int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32(ptr, old, new); -#ifdef CONFIG_PPC64 - case 8: - return __cmpxchg_u64(ptr, old, new); -#endif - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -static __always_inline unsigned long -__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, - unsigned int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32_local(ptr, old, new); -#ifdef CONFIG_PPC64 - case 8: - return __cmpxchg_u64_local(ptr, old, new); -#endif - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - - -#define cmpxchg_local(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - -#ifdef CONFIG_PPC64 -/* - * We handle most unaligned accesses in hardware. On the other hand - * unaligned DMA can be very expensive on some ppc64 IO chips (it does - * powers of 2 writes until it reaches sufficient alignment). - * - * Based on this we disable the IP header alignment in network drivers. - */ -#define NET_IP_ALIGN 0 - -#define cmpxchg64(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ - }) -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) -#else -#include <asm-generic/cmpxchg-local.h> -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif - -extern unsigned long arch_align_stack(unsigned long sp); - -/* Used in very early kernel initialization. */ -extern unsigned long reloc_offset(void); -extern unsigned long add_reloc_offset(unsigned long); -extern void reloc_got2(unsigned long); - -#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) - -extern struct dentry *powerpc_debugfs_root; - -#ifdef CONFIG_PPC64 - -extern void __ppc64_runlatch_on(void); -extern void __ppc64_runlatch_off(void); - -/* - * We manually hard enable-disable, this is called - * in the idle loop and we don't want to mess up - * with soft-disable/enable & interrupt replay. - */ -#define ppc64_runlatch_off() \ - do { \ - if (cpu_has_feature(CPU_FTR_CTRL) && \ - test_thread_local_flags(_TLF_RUNLATCH)) { \ - unsigned long msr = mfmsr(); \ - __hard_irq_disable(); \ - __ppc64_runlatch_off(); \ - if (msr & MSR_EE) \ - __hard_irq_enable(); \ - } \ - } while (0) - -#define ppc64_runlatch_on() \ - do { \ - if (cpu_has_feature(CPU_FTR_CTRL) && \ - !test_thread_local_flags(_TLF_RUNLATCH)) { \ - unsigned long msr = mfmsr(); \ - __hard_irq_disable(); \ - __ppc64_runlatch_on(); \ - if (msr & MSR_EE) \ - __hard_irq_enable(); \ - } \ - } while (0) -#else -#define ppc64_runlatch_on() -#define ppc64_runlatch_off() -#endif /* CONFIG_PPC64 */ - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_SYSTEM_H */ diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 8338aef5a4d3..b3038817b8dc 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -44,7 +44,6 @@ extern void __init udbg_init_debug_lpar_hvsi(void); extern void __init udbg_init_pmac_realmode(void); extern void __init udbg_init_maple_realmode(void); extern void __init udbg_init_pas_realmode(void); -extern void __init udbg_init_iseries(void); extern void __init udbg_init_rtas_panel(void); extern void __init udbg_init_rtas_console(void); extern void __init udbg_init_debug_beat(void); diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h index 0a290a195946..6bfd5ffe1d4f 100644 --- a/arch/powerpc/include/asm/vio.h +++ b/arch/powerpc/include/asm/vio.h @@ -69,6 +69,7 @@ struct vio_dev { }; struct vio_driver { + const char *name; const struct vio_device_id *id_table; int (*probe)(struct vio_dev *dev, const struct vio_device_id *id); int (*remove)(struct vio_dev *dev); @@ -76,10 +77,17 @@ struct vio_driver { * be loaded in a CMO environment if it uses DMA. */ unsigned long (*get_desired_dma)(struct vio_dev *dev); + const struct dev_pm_ops *pm; struct device_driver driver; }; -extern int vio_register_driver(struct vio_driver *drv); +extern int __vio_register_driver(struct vio_driver *drv, struct module *owner, + const char *mod_name); +/* + * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded + */ +#define vio_register_driver(driver) \ + __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void vio_unregister_driver(struct vio_driver *drv); extern int vio_cmo_entitlement_update(size_t); diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 8184ee97e484..ee5b690a0bed 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -21,10 +21,10 @@ #include <linux/mm.h> #include <asm/processor.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/cache.h> #include <asm/cputable.h> #include <asm/emulated_ops.h> +#include <asm/switch_to.h> struct aligninfo { unsigned char len; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index cc492e48ddfa..34b8afe94a50 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -412,16 +412,23 @@ int main(void) DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); #endif - DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); - DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); - DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); - DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); + DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4)); + DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5)); + DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6)); + DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7)); DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); + DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); + DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1)); + DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2)); + DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3)); + DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); + DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); + /* book3s */ #ifdef CONFIG_KVM_BOOK3S_64_HV DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); @@ -434,6 +441,7 @@ int main(void) DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu)); DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); + DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); #endif diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 138ae183c440..455faa389876 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -20,6 +20,7 @@ #include <asm/cputable.h> #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ #include <asm/mmu.h> +#include <asm/setup.h> struct cpu_spec* cur_cpu_spec = NULL; EXPORT_SYMBOL(cur_cpu_spec); diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index abef75176c07..fdcd8f551aff 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -27,8 +27,8 @@ #include <asm/kdump.h> #include <asm/prom.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/setjmp.h> +#include <asm/debug.h> /* * The primary CPU waits a while for all secondary CPUs to enter. This is to diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 2d0868a4e2f0..cb705fdbb458 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -101,14 +101,14 @@ data_access_not_stab: END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) #endif EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, - KVMTEST_PR, 0x300) + KVMTEST, 0x300) . = 0x380 .globl data_access_slb_pSeries data_access_slb_pSeries: HMT_MEDIUM SET_SCRATCH0(r13) - EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) + EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_DAR #ifdef __DISABLED__ @@ -330,8 +330,8 @@ do_stab_bolted_pSeries: EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) #endif /* CONFIG_POWER4_ONLY */ - KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300) - KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) + KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index e8e821146f38..6d2209ac0c44 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -26,11 +26,11 @@ #include <linux/sysctl.h> #include <linux/tick.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/time.h> #include <asm/machdep.h> +#include <asm/runlatch.h> #include <asm/smp.h> #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index a3d128e94cff..243dbabfe74d 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -57,7 +57,6 @@ #include <linux/of_irq.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/irq.h> @@ -67,6 +66,7 @@ #include <asm/machdep.h> #include <asm/udbg.h> #include <asm/smp.h> +#include <asm/debug.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> @@ -208,8 +208,8 @@ notrace void arch_local_irq_restore(unsigned long en) * we are checking the "new" CPU instead of the old one. This * is only a problem if an event happened on the "old" CPU. * - * External interrupt events on non-iseries will have caused - * interrupts to be hard-disabled, so there is no problem, we + * External interrupt events will have caused interrupts to + * be hard-disabled, so there is no problem, we * cannot have preempted. */ irq_happened = get_irq_happened(); @@ -445,9 +445,9 @@ void do_IRQ(struct pt_regs *regs) may_hard_irq_enable(); /* And finally process it */ - if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) + if (irq != NO_IRQ) handle_one_irq(irq); - else if (irq != NO_IRQ_IGNORE) + else __get_cpu_var(irq_stat).spurious_irqs++; irq_exit(); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index bc47352deb1f..e88c64331819 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -35,7 +35,6 @@ #include <asm/cacheflush.h> #include <asm/sstep.h> #include <asm/uaccess.h> -#include <asm/system.h> #ifdef CONFIG_PPC_ADV_DEBUG_REGS #define MSR_SINGLESTEP (MSR_DE) diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 2985338d0e10..62bdf2389669 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -1,5 +1,6 @@ /* * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. + * Copyright 2010-2011 Freescale Semiconductor, Inc. * * Authors: * Alexander Graf <agraf@suse.de> @@ -29,6 +30,7 @@ #include <asm/sections.h> #include <asm/cacheflush.h> #include <asm/disassemble.h> +#include <asm/ppc-opcode.h> #define KVM_MAGIC_PAGE (-4096L) #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) @@ -41,34 +43,30 @@ #define KVM_INST_B 0x48000000 #define KVM_INST_B_MASK 0x03ffffff #define KVM_INST_B_MAX 0x01ffffff +#define KVM_INST_LI 0x38000000 #define KVM_MASK_RT 0x03e00000 #define KVM_RT_30 0x03c00000 #define KVM_MASK_RB 0x0000f800 #define KVM_INST_MFMSR 0x7c0000a6 -#define KVM_INST_MFSPR_SPRG0 0x7c1042a6 -#define KVM_INST_MFSPR_SPRG1 0x7c1142a6 -#define KVM_INST_MFSPR_SPRG2 0x7c1242a6 -#define KVM_INST_MFSPR_SPRG3 0x7c1342a6 -#define KVM_INST_MFSPR_SRR0 0x7c1a02a6 -#define KVM_INST_MFSPR_SRR1 0x7c1b02a6 -#define KVM_INST_MFSPR_DAR 0x7c1302a6 -#define KVM_INST_MFSPR_DSISR 0x7c1202a6 - -#define KVM_INST_MTSPR_SPRG0 0x7c1043a6 -#define KVM_INST_MTSPR_SPRG1 0x7c1143a6 -#define KVM_INST_MTSPR_SPRG2 0x7c1243a6 -#define KVM_INST_MTSPR_SPRG3 0x7c1343a6 -#define KVM_INST_MTSPR_SRR0 0x7c1a03a6 -#define KVM_INST_MTSPR_SRR1 0x7c1b03a6 -#define KVM_INST_MTSPR_DAR 0x7c1303a6 -#define KVM_INST_MTSPR_DSISR 0x7c1203a6 + +#define SPR_FROM 0 +#define SPR_TO 0x100 + +#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \ + (((sprn) & 0x1f) << 16) | \ + (((sprn) & 0x3e0) << 6) | \ + (moveto)) + +#define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM) +#define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO) #define KVM_INST_TLBSYNC 0x7c00046c #define KVM_INST_MTMSRD_L0 0x7c000164 #define KVM_INST_MTMSRD_L1 0x7c010164 #define KVM_INST_MTMSR 0x7c000124 +#define KVM_INST_WRTEE 0x7c000106 #define KVM_INST_WRTEEI_0 0x7c000146 #define KVM_INST_WRTEEI_1 0x7c008146 @@ -270,26 +268,27 @@ static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) #ifdef CONFIG_BOOKE -extern u32 kvm_emulate_wrteei_branch_offs; -extern u32 kvm_emulate_wrteei_ee_offs; -extern u32 kvm_emulate_wrteei_len; -extern u32 kvm_emulate_wrteei[]; +extern u32 kvm_emulate_wrtee_branch_offs; +extern u32 kvm_emulate_wrtee_reg_offs; +extern u32 kvm_emulate_wrtee_orig_ins_offs; +extern u32 kvm_emulate_wrtee_len; +extern u32 kvm_emulate_wrtee[]; -static void kvm_patch_ins_wrteei(u32 *inst) +static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) { u32 *p; int distance_start; int distance_end; ulong next_inst; - p = kvm_alloc(kvm_emulate_wrteei_len * 4); + p = kvm_alloc(kvm_emulate_wrtee_len * 4); if (!p) return; /* Find out where we are and put everything there */ distance_start = (ulong)p - (ulong)inst; next_inst = ((ulong)inst + 4); - distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs]; + distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs]; /* Make sure we only write valid b instructions */ if (distance_start > KVM_INST_B_MAX) { @@ -298,10 +297,65 @@ static void kvm_patch_ins_wrteei(u32 *inst) } /* Modify the chunk to fit the invocation */ - memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4); - p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK; - p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE); - flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4); + memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4); + p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK; + + if (imm_one) { + p[kvm_emulate_wrtee_reg_offs] = + KVM_INST_LI | __PPC_RT(30) | MSR_EE; + } else { + /* Make clobbered registers work too */ + switch (get_rt(rt)) { + case 30: + kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], + magic_var(scratch2), KVM_RT_30); + break; + case 31: + kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], + magic_var(scratch1), KVM_RT_30); + break; + default: + p[kvm_emulate_wrtee_reg_offs] |= rt; + break; + } + } + + p[kvm_emulate_wrtee_orig_ins_offs] = *inst; + flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4); + + /* Patch the invocation */ + kvm_patch_ins_b(inst, distance_start); +} + +extern u32 kvm_emulate_wrteei_0_branch_offs; +extern u32 kvm_emulate_wrteei_0_len; +extern u32 kvm_emulate_wrteei_0[]; + +static void kvm_patch_ins_wrteei_0(u32 *inst) +{ + u32 *p; + int distance_start; + int distance_end; + ulong next_inst; + + p = kvm_alloc(kvm_emulate_wrteei_0_len * 4); + if (!p) + return; + + /* Find out where we are and put everything there */ + distance_start = (ulong)p - (ulong)inst; + next_inst = ((ulong)inst + 4); + distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs]; + + /* Make sure we only write valid b instructions */ + if (distance_start > KVM_INST_B_MAX) { + kvm_patching_worked = false; + return; + } + + memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4); + p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK; + flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4); /* Patch the invocation */ kvm_patch_ins_b(inst, distance_start); @@ -380,56 +434,191 @@ static void kvm_check_ins(u32 *inst, u32 features) case KVM_INST_MFMSR: kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); break; - case KVM_INST_MFSPR_SPRG0: + case KVM_INST_MFSPR(SPRN_SPRG0): kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); break; - case KVM_INST_MFSPR_SPRG1: + case KVM_INST_MFSPR(SPRN_SPRG1): kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); break; - case KVM_INST_MFSPR_SPRG2: + case KVM_INST_MFSPR(SPRN_SPRG2): kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); break; - case KVM_INST_MFSPR_SPRG3: + case KVM_INST_MFSPR(SPRN_SPRG3): kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); break; - case KVM_INST_MFSPR_SRR0: + case KVM_INST_MFSPR(SPRN_SRR0): kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); break; - case KVM_INST_MFSPR_SRR1: + case KVM_INST_MFSPR(SPRN_SRR1): kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); break; - case KVM_INST_MFSPR_DAR: +#ifdef CONFIG_BOOKE + case KVM_INST_MFSPR(SPRN_DEAR): +#else + case KVM_INST_MFSPR(SPRN_DAR): +#endif kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); break; - case KVM_INST_MFSPR_DSISR: + case KVM_INST_MFSPR(SPRN_DSISR): kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); break; +#ifdef CONFIG_PPC_BOOK3E_MMU + case KVM_INST_MFSPR(SPRN_MAS0): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt); + break; + case KVM_INST_MFSPR(SPRN_MAS1): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt); + break; + case KVM_INST_MFSPR(SPRN_MAS2): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt); + break; + case KVM_INST_MFSPR(SPRN_MAS3): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt); + break; + case KVM_INST_MFSPR(SPRN_MAS4): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt); + break; + case KVM_INST_MFSPR(SPRN_MAS6): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt); + break; + case KVM_INST_MFSPR(SPRN_MAS7): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt); + break; +#endif /* CONFIG_PPC_BOOK3E_MMU */ + + case KVM_INST_MFSPR(SPRN_SPRG4): +#ifdef CONFIG_BOOKE + case KVM_INST_MFSPR(SPRN_SPRG4R): +#endif + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt); + break; + case KVM_INST_MFSPR(SPRN_SPRG5): +#ifdef CONFIG_BOOKE + case KVM_INST_MFSPR(SPRN_SPRG5R): +#endif + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt); + break; + case KVM_INST_MFSPR(SPRN_SPRG6): +#ifdef CONFIG_BOOKE + case KVM_INST_MFSPR(SPRN_SPRG6R): +#endif + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt); + break; + case KVM_INST_MFSPR(SPRN_SPRG7): +#ifdef CONFIG_BOOKE + case KVM_INST_MFSPR(SPRN_SPRG7R): +#endif + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt); + break; + +#ifdef CONFIG_BOOKE + case KVM_INST_MFSPR(SPRN_ESR): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt); + break; +#endif + + case KVM_INST_MFSPR(SPRN_PIR): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt); + break; + + /* Stores */ - case KVM_INST_MTSPR_SPRG0: + case KVM_INST_MTSPR(SPRN_SPRG0): kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); break; - case KVM_INST_MTSPR_SPRG1: + case KVM_INST_MTSPR(SPRN_SPRG1): kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); break; - case KVM_INST_MTSPR_SPRG2: + case KVM_INST_MTSPR(SPRN_SPRG2): kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); break; - case KVM_INST_MTSPR_SPRG3: + case KVM_INST_MTSPR(SPRN_SPRG3): kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); break; - case KVM_INST_MTSPR_SRR0: + case KVM_INST_MTSPR(SPRN_SRR0): kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); break; - case KVM_INST_MTSPR_SRR1: + case KVM_INST_MTSPR(SPRN_SRR1): kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); break; - case KVM_INST_MTSPR_DAR: +#ifdef CONFIG_BOOKE + case KVM_INST_MTSPR(SPRN_DEAR): +#else + case KVM_INST_MTSPR(SPRN_DAR): +#endif kvm_patch_ins_std(inst, magic_var(dar), inst_rt); break; - case KVM_INST_MTSPR_DSISR: + case KVM_INST_MTSPR(SPRN_DSISR): kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); break; +#ifdef CONFIG_PPC_BOOK3E_MMU + case KVM_INST_MTSPR(SPRN_MAS0): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt); + break; + case KVM_INST_MTSPR(SPRN_MAS1): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt); + break; + case KVM_INST_MTSPR(SPRN_MAS2): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_std(inst, magic_var(mas2), inst_rt); + break; + case KVM_INST_MTSPR(SPRN_MAS3): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt); + break; + case KVM_INST_MTSPR(SPRN_MAS4): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt); + break; + case KVM_INST_MTSPR(SPRN_MAS6): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt); + break; + case KVM_INST_MTSPR(SPRN_MAS7): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt); + break; +#endif /* CONFIG_PPC_BOOK3E_MMU */ + + case KVM_INST_MTSPR(SPRN_SPRG4): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt); + break; + case KVM_INST_MTSPR(SPRN_SPRG5): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt); + break; + case KVM_INST_MTSPR(SPRN_SPRG6): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt); + break; + case KVM_INST_MTSPR(SPRN_SPRG7): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt); + break; + +#ifdef CONFIG_BOOKE + case KVM_INST_MTSPR(SPRN_ESR): + if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) + kvm_patch_ins_stw(inst, magic_var(esr), inst_rt); + break; +#endif /* Nops */ case KVM_INST_TLBSYNC: @@ -444,6 +633,11 @@ static void kvm_check_ins(u32 *inst, u32 features) case KVM_INST_MTMSRD_L0: kvm_patch_ins_mtmsr(inst, inst_rt); break; +#ifdef CONFIG_BOOKE + case KVM_INST_WRTEE: + kvm_patch_ins_wrtee(inst, inst_rt, 0); + break; +#endif } switch (inst_no_rt & ~KVM_MASK_RB) { @@ -461,13 +655,19 @@ static void kvm_check_ins(u32 *inst, u32 features) switch (_inst) { #ifdef CONFIG_BOOKE case KVM_INST_WRTEEI_0: + kvm_patch_ins_wrteei_0(inst); + break; + case KVM_INST_WRTEEI_1: - kvm_patch_ins_wrteei(inst); + kvm_patch_ins_wrtee(inst, 0, 1); break; #endif } } +extern u32 kvm_template_start[]; +extern u32 kvm_template_end[]; + static void kvm_use_magic_page(void) { u32 *p; @@ -488,8 +688,23 @@ static void kvm_use_magic_page(void) start = (void*)_stext; end = (void*)_etext; - for (p = start; p < end; p++) + /* + * Being interrupted in the middle of patching would + * be bad for SPRG4-7, which KVM can't keep in sync + * with emulated accesses because reads don't trap. + */ + local_irq_disable(); + + for (p = start; p < end; p++) { + /* Avoid patching the template code */ + if (p >= kvm_template_start && p < kvm_template_end) { + p = kvm_template_end - 1; + continue; + } kvm_check_ins(p, features); + } + + local_irq_enable(); printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", kvm_patching_worked ? "worked" : "failed"); diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S index f2b1b2523e61..e291cf3cf954 100644 --- a/arch/powerpc/kernel/kvm_emul.S +++ b/arch/powerpc/kernel/kvm_emul.S @@ -13,6 +13,7 @@ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright SUSE Linux Products GmbH 2010 + * Copyright 2010-2011 Freescale Semiconductor, Inc. * * Authors: Alexander Graf <agraf@suse.de> */ @@ -65,6 +66,9 @@ kvm_hypercall_start: shared->critical == r1 and r2 is always != r1 */ \ STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); +.global kvm_template_start +kvm_template_start: + .global kvm_emulate_mtmsrd kvm_emulate_mtmsrd: @@ -167,6 +171,9 @@ maybe_stay_in_guest: kvm_emulate_mtmsr_reg2: ori r30, r0, 0 + /* Put MSR into magic page because we don't call mtmsr */ + STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) + /* Check if we have to fetch an interrupt */ lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) cmpwi r31, 0 @@ -174,15 +181,10 @@ kvm_emulate_mtmsr_reg2: /* Check if we may trigger an interrupt */ andi. r31, r30, MSR_EE - beq no_mtmsr - - b do_mtmsr + bne do_mtmsr no_mtmsr: - /* Put MSR into magic page because we don't call mtmsr */ - STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) - SCRATCH_RESTORE /* Go back to caller */ @@ -210,24 +212,80 @@ kvm_emulate_mtmsr_orig_ins_offs: kvm_emulate_mtmsr_len: .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 +/* also used for wrteei 1 */ +.global kvm_emulate_wrtee +kvm_emulate_wrtee: + + SCRATCH_SAVE + + /* Fetch old MSR in r31 */ + LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) + + /* Insert new MSR[EE] */ +kvm_emulate_wrtee_reg: + ori r30, r0, 0 + rlwimi r31, r30, 0, MSR_EE + + /* + * If MSR[EE] is now set, check for a pending interrupt. + * We could skip this if MSR[EE] was already on, but that + * should be rare, so don't bother. + */ + andi. r30, r30, MSR_EE + + /* Put MSR into magic page because we don't call wrtee */ + STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) + + beq no_wrtee + + /* Check if we have to fetch an interrupt */ + lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) + cmpwi r30, 0 + bne do_wrtee + +no_wrtee: + SCRATCH_RESTORE + + /* Go back to caller */ +kvm_emulate_wrtee_branch: + b . + +do_wrtee: + SCRATCH_RESTORE + /* Just fire off the wrtee if it's critical */ +kvm_emulate_wrtee_orig_ins: + wrtee r0 -.global kvm_emulate_wrteei -kvm_emulate_wrteei: + b kvm_emulate_wrtee_branch +kvm_emulate_wrtee_end: + +.global kvm_emulate_wrtee_branch_offs +kvm_emulate_wrtee_branch_offs: + .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4 + +.global kvm_emulate_wrtee_reg_offs +kvm_emulate_wrtee_reg_offs: + .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4 + +.global kvm_emulate_wrtee_orig_ins_offs +kvm_emulate_wrtee_orig_ins_offs: + .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4 + +.global kvm_emulate_wrtee_len +kvm_emulate_wrtee_len: + .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4 + +.global kvm_emulate_wrteei_0 +kvm_emulate_wrteei_0: SCRATCH_SAVE /* Fetch old MSR in r31 */ LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) /* Remove MSR_EE from old MSR */ - li r30, 0 - ori r30, r30, MSR_EE - andc r31, r31, r30 - - /* OR new MSR_EE onto the old MSR */ -kvm_emulate_wrteei_ee: - ori r31, r31, 0 + rlwinm r31, r31, 0, ~MSR_EE /* Write new MSR value back */ STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) @@ -235,22 +293,17 @@ kvm_emulate_wrteei_ee: SCRATCH_RESTORE /* Go back to caller */ -kvm_emulate_wrteei_branch: +kvm_emulate_wrteei_0_branch: b . -kvm_emulate_wrteei_end: - -.global kvm_emulate_wrteei_branch_offs -kvm_emulate_wrteei_branch_offs: - .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4 +kvm_emulate_wrteei_0_end: -.global kvm_emulate_wrteei_ee_offs -kvm_emulate_wrteei_ee_offs: - .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4 - -.global kvm_emulate_wrteei_len -kvm_emulate_wrteei_len: - .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4 +.global kvm_emulate_wrteei_0_branch_offs +kvm_emulate_wrteei_0_branch_offs: + .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4 +.global kvm_emulate_wrteei_0_len +kvm_emulate_wrteei_0_len: + .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4 .global kvm_emulate_mtsrin kvm_emulate_mtsrin: @@ -300,3 +353,6 @@ kvm_emulate_mtsrin_orig_ins_offs: .global kvm_emulate_mtsrin_len kvm_emulate_mtsrin_len: .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4 + +.global kvm_template_end +kvm_template_end: diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index ac12bd80ad95..f5725bce9ed2 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -30,7 +30,6 @@ #include <asm/hvcall.h> #include <asm/firmware.h> #include <asm/rtas.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/prom.h> #include <asm/vdso_datapage.h> diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index d3114a71dd32..786a2700ec2d 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -26,7 +26,6 @@ #include <linux/cuda.h> #include <linux/pmu.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/pci-bridge.h> #include <asm/irq.h> #include <asm/pmac_feature.h> @@ -43,6 +42,7 @@ #include <asm/signal.h> #include <asm/dcr.h> #include <asm/ftrace.h> +#include <asm/switch_to.h> #ifdef CONFIG_PPC32 extern void transfer_to_handler(void); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index e40707032ac3..f88698c0f332 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -41,14 +41,16 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/time.h> +#include <asm/runlatch.h> #include <asm/syscalls.h> +#include <asm/switch_to.h> +#include <asm/debug.h> #ifdef CONFIG_PPC64 #include <asm/firmware.h> #endif diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 89e850af3dd6..f191bf02943a 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -41,7 +41,6 @@ #include <asm/io.h> #include <asm/kdump.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/mmu.h> #include <asm/paca.h> #include <asm/pgtable.h> diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index e2d599048142..99860273211b 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -35,7 +35,6 @@ #include <asm/irq.h> #include <asm/io.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/mmu.h> #include <asm/pgtable.h> #include <asm/pci.h> @@ -447,7 +446,7 @@ static void __init __attribute__((noreturn)) prom_panic(const char *reason) if (RELOC(of_platform) == PLATFORM_POWERMAC) asm("trap\n"); - /* ToDo: should put up an SRC here on p/iSeries */ + /* ToDo: should put up an SRC here on pSeries */ call_prom("exit", 0, 0); for (;;) /* should never get here */ diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 5b43325402bc..8d8e028893be 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -36,7 +36,7 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> +#include <asm/switch_to.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index 69c4be917d07..469349d14a97 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c @@ -32,7 +32,7 @@ #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> +#include <asm/switch_to.h> /* * does not yet catch signals sent when the child dies. diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 9f843cdfee9e..fcec38241f79 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -33,7 +33,6 @@ #include <asm/firmware.h> #include <asm/page.h> #include <asm/param.h> -#include <asm/system.h> #include <asm/delay.h> #include <asm/uaccess.h> #include <asm/udbg.h> @@ -868,6 +867,40 @@ int rtas_ibm_suspend_me(struct rtas_args *args) } #endif +/** + * Find a specific pseries error log in an RTAS extended event log. + * @log: RTAS error/event log + * @section_id: two character section identifier + * + * Returns a pointer to the specified errorlog or NULL if not found. + */ +struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, + uint16_t section_id) +{ + struct rtas_ext_event_log_v6 *ext_log = + (struct rtas_ext_event_log_v6 *)log->buffer; + struct pseries_errorlog *sect; + unsigned char *p, *log_end; + + /* Check that we understand the format */ + if (log->extended_log_length < sizeof(struct rtas_ext_event_log_v6) || + ext_log->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || + ext_log->company_id != RTAS_V6EXT_COMPANY_ID_IBM) + return NULL; + + log_end = log->buffer + log->extended_log_length; + p = ext_log->vendor_log; + + while (p < log_end) { + sect = (struct pseries_errorlog *)p; + if (sect->id == section_id) + return sect; + p += sect->length; + } + + return NULL; +} + asmlinkage int ppc_rtas(struct rtas_args __user *uargs) { struct rtas_args args; diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index b0ebdeab9494..afd4f051f3f2 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -51,7 +51,6 @@ #include <asm/btext.h> #include <asm/nvram.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/rtas.h> #include <asm/iommu.h> #include <asm/serial.h> diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index ac7610815113..9825f29d1faf 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -30,7 +30,6 @@ #include <asm/btext.h> #include <asm/machdep.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/pmac_feature.h> #include <asm/sections.h> #include <asm/nvram.h> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4cb8f1e9d044..389bd4f0cdb1 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -52,7 +52,6 @@ #include <asm/btext.h> #include <asm/nvram.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/rtas.h> #include <asm/iommu.h> #include <asm/serial.h> @@ -598,7 +597,7 @@ void __init setup_arch(char **cmdline_p) /* Initialize the MMU context management stuff */ mmu_context_init(); - kvm_rma_init(); + kvm_linear_init(); ppc64_boot_msg(0x15, "Setup Done"); } diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 7006b7f4267a..651c5963662b 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c @@ -15,6 +15,7 @@ #include <asm/hw_breakpoint.h> #include <asm/uaccess.h> #include <asm/unistd.h> +#include <asm/debug.h> #include "signal.h" diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index e061ef5dd449..45eb998557f8 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -43,6 +43,7 @@ #include <asm/syscalls.h> #include <asm/sigcontext.h> #include <asm/vdso.h> +#include <asm/switch_to.h> #ifdef CONFIG_PPC64 #include "ppc32.h" #include <asm/unistd.h> diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index a50b5ec281dc..2692efdb154e 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -33,6 +33,7 @@ #include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/vdso.h> +#include <asm/switch_to.h> #include "signal.h" diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 46695febc09f..d9f94410fd7f 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -43,12 +43,12 @@ #include <asm/machdep.h> #include <asm/cputhreads.h> #include <asm/cputable.h> -#include <asm/system.h> #include <asm/mpic.h> #include <asm/vdso_datapage.h> #ifdef CONFIG_PPC64 #include <asm/paca.h> #endif +#include <asm/debug.h> #ifdef DEBUG #include <asm/udbg.h> diff --git a/arch/powerpc/kernel/softemu8xx.c b/arch/powerpc/kernel/softemu8xx.c index af0e8290b4fc..29b2f81dd709 100644 --- a/arch/powerpc/kernel/softemu8xx.c +++ b/arch/powerpc/kernel/softemu8xx.c @@ -26,7 +26,6 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/io.h> /* Eventually we may need a look-up table, but this works for now. diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index 641f9adc6205..eae33e10b65f 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c @@ -10,9 +10,9 @@ */ #include <linux/sched.h> -#include <asm/system.h> #include <asm/current.h> #include <asm/mmu_context.h> +#include <asm/switch_to.h> void save_processor_state(void) { diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c index 168e88480223..0e899e47c325 100644 --- a/arch/powerpc/kernel/swsusp_64.c +++ b/arch/powerpc/kernel/swsusp_64.c @@ -6,7 +6,6 @@ * GPLv2 */ -#include <asm/system.h> #include <asm/iommu.h> #include <linux/irq.h> #include <linux/sched.h> diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 4e5bf1edc0f2..81c570633ead 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -50,6 +50,7 @@ #include <asm/mmu_context.h> #include <asm/ppc-pci.h> #include <asm/syscalls.h> +#include <asm/switch_to.h> asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp, diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 0c683d376b1c..3529446c2abd 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -17,7 +17,6 @@ #include <asm/machdep.h> #include <asm/smp.h> #include <asm/pmc.h> -#include <asm/system.h> #include "cacheinfo.h" diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a750409ccc4e..6aa0c663e247 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -39,7 +39,6 @@ #include <asm/emulated_ops.h> #include <asm/pgtable.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/rtas.h> @@ -58,6 +57,8 @@ #include <asm/ppc-opcode.h> #include <asm/rio.h> #include <asm/fadump.h> +#include <asm/switch_to.h> +#include <asm/debug.h> #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) int (*__debugger)(struct pt_regs *regs) __read_mostly; diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 57fa2c0a531c..c39c1ca77f46 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -46,9 +46,6 @@ void __init udbg_early_init(void) #elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) /* Maple real mode debug */ udbg_init_maple_realmode(); -#elif defined(CONFIG_PPC_EARLY_DEBUG_ISERIES) - /* For iSeries - hit Ctrl-x Ctrl-x to see the output */ - udbg_init_iseries(); #elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT) udbg_init_debug_beat(); #elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE) diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index d36ee1055f88..9eb5b9b536a7 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -24,7 +24,6 @@ #include <linux/memblock.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mmu_context.h> @@ -721,10 +720,10 @@ static int __init vdso_init(void) vdso_data->version.minor = SYSTEMCFG_MINOR; vdso_data->processor = mfspr(SPRN_PVR); /* - * Fake the old platform number for pSeries and iSeries and add + * Fake the old platform number for pSeries and add * in LPAR bit if necessary */ - vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; + vdso_data->platform = 0x100; if (firmware_has_feature(FW_FEATURE_LPAR)) vdso_data->platform |= 1; vdso_data->physicalMemorySize = memblock_phys_mem_size(); diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index bca3fc427b45..b2f7c8480bf6 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1159,17 +1159,21 @@ static int vio_bus_remove(struct device *dev) * vio_register_driver: - Register a new vio driver * @drv: The vio_driver structure to be registered. */ -int vio_register_driver(struct vio_driver *viodrv) +int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, + const char *mod_name) { - printk(KERN_DEBUG "%s: driver %s registering\n", __func__, - viodrv->driver.name); + pr_debug("%s: driver %s registering\n", __func__, viodrv->name); /* fill in 'struct driver' fields */ + viodrv->driver.name = viodrv->name; + viodrv->driver.pm = viodrv->pm; viodrv->driver.bus = &vio_bus_type; + viodrv->driver.owner = owner; + viodrv->driver.mod_name = mod_name; return driver_register(&viodrv->driver); } -EXPORT_SYMBOL(vio_register_driver); +EXPORT_SYMBOL(__vio_register_driver); /** * vio_unregister_driver - Remove registration of vio driver. diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 78133deb4b64..8f64709ae331 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -69,6 +69,7 @@ config KVM_BOOK3S_64 config KVM_BOOK3S_64_HV bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" depends on KVM_BOOK3S_64 + select MMU_NOTIFIER ---help--- Support running unmodified book3s_64 guest kernels in virtual machines on POWER7 and PPC970 processors that have diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index e41ac6f7dcf1..7d54f4ed6d96 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) return true; } -void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) +void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) { unsigned long *pending = &vcpu->arch.pending_exceptions; unsigned long old_pending = vcpu->arch.pending_exceptions; @@ -423,10 +423,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) regs->sprg1 = vcpu->arch.shared->sprg1; regs->sprg2 = vcpu->arch.shared->sprg2; regs->sprg3 = vcpu->arch.shared->sprg3; - regs->sprg4 = vcpu->arch.sprg4; - regs->sprg5 = vcpu->arch.sprg5; - regs->sprg6 = vcpu->arch.sprg6; - regs->sprg7 = vcpu->arch.sprg7; + regs->sprg4 = vcpu->arch.shared->sprg4; + regs->sprg5 = vcpu->arch.shared->sprg5; + regs->sprg6 = vcpu->arch.shared->sprg6; + regs->sprg7 = vcpu->arch.shared->sprg7; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); @@ -450,10 +450,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) vcpu->arch.shared->sprg1 = regs->sprg1; vcpu->arch.shared->sprg2 = regs->sprg2; vcpu->arch.shared->sprg3 = regs->sprg3; - vcpu->arch.sprg4 = regs->sprg4; - vcpu->arch.sprg5 = regs->sprg5; - vcpu->arch.sprg6 = regs->sprg6; - vcpu->arch.sprg7 = regs->sprg7; + vcpu->arch.shared->sprg4 = regs->sprg4; + vcpu->arch.shared->sprg5 = regs->sprg5; + vcpu->arch.shared->sprg6 = regs->sprg6; + vcpu->arch.shared->sprg7 = regs->sprg7; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); @@ -477,41 +477,10 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, return 0; } -/* - * Get (and clear) the dirty memory log for a memory slot. - */ -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, - struct kvm_dirty_log *log) +void kvmppc_decrementer_func(unsigned long data) { - struct kvm_memory_slot *memslot; - struct kvm_vcpu *vcpu; - ulong ga, ga_end; - int is_dirty = 0; - int r; - unsigned long n; - - mutex_lock(&kvm->slots_lock); - - r = kvm_get_dirty_log(kvm, log, &is_dirty); - if (r) - goto out; - - /* If nothing is dirty, don't bother messing with page tables. */ - if (is_dirty) { - memslot = id_to_memslot(kvm->memslots, log->slot); + struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; - ga = memslot->base_gfn << PAGE_SHIFT; - ga_end = ga + (memslot->npages << PAGE_SHIFT); - - kvm_for_each_vcpu(n, vcpu, kvm) - kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); - - n = kvm_dirty_bitmap_bytes(memslot); - memset(memslot->dirty_bitmap, 0, n); - } - - r = 0; -out: - mutex_unlock(&kvm->slots_lock); - return r; + kvmppc_core_queue_dec(vcpu); + kvm_vcpu_kick(vcpu); } diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 9fecbfbce773..f922c29bb234 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -151,13 +151,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) bool primary = false; bool evict = false; struct hpte_cache *pte; + int r = 0; /* Get host physical address for gpa */ hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); if (is_error_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); - return -EINVAL; + r = -EINVAL; + goto out; } hpaddr <<= PAGE_SHIFT; @@ -249,7 +251,8 @@ next_pteg: kvmppc_mmu_hpte_cache_map(vcpu, pte); - return 0; +out: + return r; } static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) @@ -297,12 +300,14 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) u64 gvsid; u32 sr; struct kvmppc_sid_map *map; - struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + int r = 0; if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { /* Invalidate an entry */ svcpu->sr[esid] = SR_INVALID; - return -ENOENT; + r = -ENOENT; + goto out; } map = find_sid_vsid(vcpu, gvsid); @@ -315,17 +320,21 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); - return 0; +out: + svcpu_put(svcpu); + return r; } void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) { int i; - struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) svcpu->sr[i] = SR_INVALID; + + svcpu_put(svcpu); } void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index fa2f08434ba5..6f87f39a1ac2 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -88,12 +88,14 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) int vflags = 0; int attempt = 0; struct kvmppc_sid_map *map; + int r = 0; /* Get host physical address for gpa */ hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); if (is_error_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); - return -EINVAL; + r = -EINVAL; + goto out; } hpaddr <<= PAGE_SHIFT; hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); @@ -110,7 +112,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n", vsid, orig_pte->eaddr); WARN_ON(true); - return -EINVAL; + r = -EINVAL; + goto out; } vsid = map->host_vsid; @@ -131,8 +134,10 @@ map_again: /* In case we tried normal mapping already, let's nuke old entries */ if (attempt > 1) - if (ppc_md.hpte_remove(hpteg) < 0) - return -1; + if (ppc_md.hpte_remove(hpteg) < 0) { + r = -1; + goto out; + } ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); @@ -162,7 +167,8 @@ map_again: kvmppc_mmu_hpte_cache_map(vcpu, pte); } - return 0; +out: + return r; } static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) @@ -207,25 +213,30 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) { + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); int i; int max_slb_size = 64; int found_inval = -1; int r; - if (!to_svcpu(vcpu)->slb_max) - to_svcpu(vcpu)->slb_max = 1; + if (!svcpu->slb_max) + svcpu->slb_max = 1; /* Are we overwriting? */ - for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) { - if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V)) + for (i = 1; i < svcpu->slb_max; i++) { + if (!(svcpu->slb[i].esid & SLB_ESID_V)) found_inval = i; - else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid) - return i; + else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { + r = i; + goto out; + } } /* Found a spare entry that was invalidated before */ - if (found_inval > 0) - return found_inval; + if (found_inval > 0) { + r = found_inval; + goto out; + } /* No spare invalid entry, so create one */ @@ -233,30 +244,35 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) max_slb_size = mmu_slb_size; /* Overflowing -> purge */ - if ((to_svcpu(vcpu)->slb_max) == max_slb_size) + if ((svcpu->slb_max) == max_slb_size) kvmppc_mmu_flush_segments(vcpu); - r = to_svcpu(vcpu)->slb_max; - to_svcpu(vcpu)->slb_max++; + r = svcpu->slb_max; + svcpu->slb_max++; +out: + svcpu_put(svcpu); return r; } int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) { + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); u64 esid = eaddr >> SID_SHIFT; u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; u64 slb_vsid = SLB_VSID_USER; u64 gvsid; int slb_index; struct kvmppc_sid_map *map; + int r = 0; slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { /* Invalidate an entry */ - to_svcpu(vcpu)->slb[slb_index].esid = 0; - return -ENOENT; + svcpu->slb[slb_index].esid = 0; + r = -ENOENT; + goto out; } map = find_sid_vsid(vcpu, gvsid); @@ -269,18 +285,22 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) slb_vsid &= ~SLB_VSID_KP; slb_esid |= slb_index; - to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; - to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; + svcpu->slb[slb_index].esid = slb_esid; + svcpu->slb[slb_index].vsid = slb_vsid; trace_kvm_book3s_slbmte(slb_vsid, slb_esid); - return 0; +out: + svcpu_put(svcpu); + return r; } void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) { - to_svcpu(vcpu)->slb_max = 1; - to_svcpu(vcpu)->slb[0].esid = 0; + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + svcpu->slb_max = 1; + svcpu->slb[0].esid = 0; + svcpu_put(svcpu); } void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index bc3a2ea94217..ddc485a529f2 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -23,6 +23,7 @@ #include <linux/gfp.h> #include <linux/slab.h> #include <linux/hugetlb.h> +#include <linux/vmalloc.h> #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> @@ -33,15 +34,6 @@ #include <asm/ppc-opcode.h> #include <asm/cputable.h> -/* For now use fixed-size 16MB page table */ -#define HPT_ORDER 24 -#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */ -#define HPT_HASH_MASK (HPT_NPTEG - 1) - -/* Pages in the VRMA are 16MB pages */ -#define VRMA_PAGE_ORDER 24 -#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ - /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ #define MAX_LPID_970 63 #define NR_LPIDS (LPID_RSVD + 1) @@ -51,21 +43,41 @@ long kvmppc_alloc_hpt(struct kvm *kvm) { unsigned long hpt; unsigned long lpid; + struct revmap_entry *rev; + struct kvmppc_linear_info *li; + + /* Allocate guest's hashed page table */ + li = kvm_alloc_hpt(); + if (li) { + /* using preallocated memory */ + hpt = (ulong)li->base_virt; + kvm->arch.hpt_li = li; + } else { + /* using dynamic memory */ + hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| + __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT); + } - hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN, - HPT_ORDER - PAGE_SHIFT); if (!hpt) { pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n"); return -ENOMEM; } kvm->arch.hpt_virt = hpt; + /* Allocate reverse map array */ + rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE); + if (!rev) { + pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); + goto out_freehpt; + } + kvm->arch.revmap = rev; + + /* Allocate the guest's logical partition ID */ do { lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS); if (lpid >= NR_LPIDS) { pr_err("kvm_alloc_hpt: No LPIDs free\n"); - free_pages(hpt, HPT_ORDER - PAGE_SHIFT); - return -ENOMEM; + goto out_freeboth; } } while (test_and_set_bit(lpid, lpid_inuse)); @@ -74,37 +86,64 @@ long kvmppc_alloc_hpt(struct kvm *kvm) pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid); return 0; + + out_freeboth: + vfree(rev); + out_freehpt: + free_pages(hpt, HPT_ORDER - PAGE_SHIFT); + return -ENOMEM; } void kvmppc_free_hpt(struct kvm *kvm) { clear_bit(kvm->arch.lpid, lpid_inuse); - free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); + vfree(kvm->arch.revmap); + if (kvm->arch.hpt_li) + kvm_release_hpt(kvm->arch.hpt_li); + else + free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); +} + +/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ +static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) +{ + return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; +} + +/* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ +static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) +{ + return (pgsize == 0x10000) ? 0x1000 : 0; } -void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem) +void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, + unsigned long porder) { unsigned long i; - unsigned long npages = kvm->arch.ram_npages; - unsigned long pfn; - unsigned long *hpte; - unsigned long hash; - struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo; + unsigned long npages; + unsigned long hp_v, hp_r; + unsigned long addr, hash; + unsigned long psize; + unsigned long hp0, hp1; + long ret; - if (!pginfo) - return; + psize = 1ul << porder; + npages = memslot->npages >> (porder - PAGE_SHIFT); /* VRMA can't be > 1TB */ - if (npages > 1ul << (40 - kvm->arch.ram_porder)) - npages = 1ul << (40 - kvm->arch.ram_porder); + if (npages > 1ul << (40 - porder)) + npages = 1ul << (40 - porder); /* Can't use more than 1 HPTE per HPTEG */ if (npages > HPT_NPTEG) npages = HPT_NPTEG; + hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | + HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); + hp1 = hpte1_pgsize_encoding(psize) | + HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; + for (i = 0; i < npages; ++i) { - pfn = pginfo[i].pfn; - if (!pfn) - break; + addr = i << porder; /* can't use hpt_hash since va > 64 bits */ hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK; /* @@ -113,15 +152,15 @@ void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem) * at most one HPTE per HPTEG, we just assume entry 7 * is available and use it. */ - hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7)); - hpte += 7 * 2; - /* HPTE low word - RPN, protection, etc. */ - hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C | - HPTE_R_M | PP_RWXX; - wmb(); - hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | - (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED | - HPTE_V_LARGE | HPTE_V_VALID; + hash = (hash << 3) + 7; + hp_v = hp0 | ((addr >> 16) & ~0x7fUL); + hp_r = hp1 | addr; + ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r); + if (ret != H_SUCCESS) { + pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", + addr, ret); + break; + } } } @@ -158,10 +197,814 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); } +/* + * This is called to get a reference to a guest page if there isn't + * one already in the kvm->arch.slot_phys[][] arrays. + */ +static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, + struct kvm_memory_slot *memslot, + unsigned long psize) +{ + unsigned long start; + long np, err; + struct page *page, *hpage, *pages[1]; + unsigned long s, pgsize; + unsigned long *physp; + unsigned int is_io, got, pgorder; + struct vm_area_struct *vma; + unsigned long pfn, i, npages; + + physp = kvm->arch.slot_phys[memslot->id]; + if (!physp) + return -EINVAL; + if (physp[gfn - memslot->base_gfn]) + return 0; + + is_io = 0; + got = 0; + page = NULL; + pgsize = psize; + err = -EINVAL; + start = gfn_to_hva_memslot(memslot, gfn); + + /* Instantiate and get the page we want access to */ + np = get_user_pages_fast(start, 1, 1, pages); + if (np != 1) { + /* Look up the vma for the page */ + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, start); + if (!vma || vma->vm_start > start || + start + psize > vma->vm_end || + !(vma->vm_flags & VM_PFNMAP)) + goto up_err; + is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); + pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); + /* check alignment of pfn vs. requested page size */ + if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1))) + goto up_err; + up_read(¤t->mm->mmap_sem); + + } else { + page = pages[0]; + got = KVMPPC_GOT_PAGE; + + /* See if this is a large page */ + s = PAGE_SIZE; + if (PageHuge(page)) { + hpage = compound_head(page); + s <<= compound_order(hpage); + /* Get the whole large page if slot alignment is ok */ + if (s > psize && slot_is_aligned(memslot, s) && + !(memslot->userspace_addr & (s - 1))) { + start &= ~(s - 1); + pgsize = s; + page = hpage; + } + } + if (s < psize) + goto out; + pfn = page_to_pfn(page); + } + + npages = pgsize >> PAGE_SHIFT; + pgorder = __ilog2(npages); + physp += (gfn - memslot->base_gfn) & ~(npages - 1); + spin_lock(&kvm->arch.slot_phys_lock); + for (i = 0; i < npages; ++i) { + if (!physp[i]) { + physp[i] = ((pfn + i) << PAGE_SHIFT) + + got + is_io + pgorder; + got = 0; + } + } + spin_unlock(&kvm->arch.slot_phys_lock); + err = 0; + + out: + if (got) { + if (PageHuge(page)) + page = compound_head(page); + put_page(page); + } + return err; + + up_err: + up_read(¤t->mm->mmap_sem); + return err; +} + +/* + * We come here on a H_ENTER call from the guest when we are not + * using mmu notifiers and we don't have the requested page pinned + * already. + */ +long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, + long pte_index, unsigned long pteh, unsigned long ptel) +{ + struct kvm *kvm = vcpu->kvm; + unsigned long psize, gpa, gfn; + struct kvm_memory_slot *memslot; + long ret; + + if (kvm->arch.using_mmu_notifiers) + goto do_insert; + + psize = hpte_page_size(pteh, ptel); + if (!psize) + return H_PARAMETER; + + pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); + + /* Find the memslot (if any) for this address */ + gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); + gfn = gpa >> PAGE_SHIFT; + memslot = gfn_to_memslot(kvm, gfn); + if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) { + if (!slot_is_aligned(memslot, psize)) + return H_PARAMETER; + if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0) + return H_PARAMETER; + } + + do_insert: + /* Protect linux PTE lookup from page table destruction */ + rcu_read_lock_sched(); /* this disables preemption too */ + vcpu->arch.pgdir = current->mm->pgd; + ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel); + rcu_read_unlock_sched(); + if (ret == H_TOO_HARD) { + /* this can't happen */ + pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); + ret = H_RESOURCE; /* or something */ + } + return ret; + +} + +static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, + gva_t eaddr) +{ + u64 mask; + int i; + + for (i = 0; i < vcpu->arch.slb_nr; i++) { + if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) + continue; + + if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) + mask = ESID_MASK_1T; + else + mask = ESID_MASK; + + if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) + return &vcpu->arch.slb[i]; + } + return NULL; +} + +static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, + unsigned long ea) +{ + unsigned long ra_mask; + + ra_mask = hpte_page_size(v, r) - 1; + return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); +} + static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, - struct kvmppc_pte *gpte, bool data) + struct kvmppc_pte *gpte, bool data) +{ + struct kvm *kvm = vcpu->kvm; + struct kvmppc_slb *slbe; + unsigned long slb_v; + unsigned long pp, key; + unsigned long v, gr; + unsigned long *hptep; + int index; + int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); + + /* Get SLB entry */ + if (virtmode) { + slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); + if (!slbe) + return -EINVAL; + slb_v = slbe->origv; + } else { + /* real mode access */ + slb_v = vcpu->kvm->arch.vrma_slb_v; + } + + /* Find the HPTE in the hash table */ + index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, + HPTE_V_VALID | HPTE_V_ABSENT); + if (index < 0) + return -ENOENT; + hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); + v = hptep[0] & ~HPTE_V_HVLOCK; + gr = kvm->arch.revmap[index].guest_rpte; + + /* Unlock the HPTE */ + asm volatile("lwsync" : : : "memory"); + hptep[0] = v; + + gpte->eaddr = eaddr; + gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); + + /* Get PP bits and key for permission check */ + pp = gr & (HPTE_R_PP0 | HPTE_R_PP); + key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; + key &= slb_v; + + /* Calculate permissions */ + gpte->may_read = hpte_read_permission(pp, key); + gpte->may_write = hpte_write_permission(pp, key); + gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); + + /* Storage key permission check for POWER7 */ + if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) { + int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); + if (amrfield & 1) + gpte->may_read = 0; + if (amrfield & 2) + gpte->may_write = 0; + } + + /* Get the guest physical address */ + gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); + return 0; +} + +/* + * Quick test for whether an instruction is a load or a store. + * If the instruction is a load or a store, then this will indicate + * which it is, at least on server processors. (Embedded processors + * have some external PID instructions that don't follow the rule + * embodied here.) If the instruction isn't a load or store, then + * this doesn't return anything useful. + */ +static int instruction_is_store(unsigned int instr) +{ + unsigned int mask; + + mask = 0x10000000; + if ((instr & 0xfc000000) == 0x7c000000) + mask = 0x100; /* major opcode 31 */ + return (instr & mask) != 0; +} + +static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned long gpa, int is_store) +{ + int ret; + u32 last_inst; + unsigned long srr0 = kvmppc_get_pc(vcpu); + + /* We try to load the last instruction. We don't let + * emulate_instruction do it as it doesn't check what + * kvmppc_ld returns. + * If we fail, we just return to the guest and try executing it again. + */ + if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { + ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); + if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) + return RESUME_GUEST; + vcpu->arch.last_inst = last_inst; + } + + /* + * WARNING: We do not know for sure whether the instruction we just + * read from memory is the same that caused the fault in the first + * place. If the instruction we read is neither an load or a store, + * then it can't access memory, so we don't need to worry about + * enforcing access permissions. So, assuming it is a load or + * store, we just check that its direction (load or store) is + * consistent with the original fault, since that's what we + * checked the access permissions against. If there is a mismatch + * we just return and retry the instruction. + */ + + if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) + return RESUME_GUEST; + + /* + * Emulated accesses are emulated by looking at the hash for + * translation once, then performing the access later. The + * translation could be invalidated in the meantime in which + * point performing the subsequent memory access on the old + * physical address could possibly be a security hole for the + * guest (but not the host). + * + * This is less of an issue for MMIO stores since they aren't + * globally visible. It could be an issue for MMIO loads to + * a certain extent but we'll ignore it for now. + */ + + vcpu->arch.paddr_accessed = gpa; + return kvmppc_emulate_mmio(run, vcpu); +} + +int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned long ea, unsigned long dsisr) +{ + struct kvm *kvm = vcpu->kvm; + unsigned long *hptep, hpte[3], r; + unsigned long mmu_seq, psize, pte_size; + unsigned long gfn, hva, pfn; + struct kvm_memory_slot *memslot; + unsigned long *rmap; + struct revmap_entry *rev; + struct page *page, *pages[1]; + long index, ret, npages; + unsigned long is_io; + unsigned int writing, write_ok; + struct vm_area_struct *vma; + unsigned long rcbits; + + /* + * Real-mode code has already searched the HPT and found the + * entry we're interested in. Lock the entry and check that + * it hasn't changed. If it has, just return and re-execute the + * instruction. + */ + if (ea != vcpu->arch.pgfault_addr) + return RESUME_GUEST; + index = vcpu->arch.pgfault_index; + hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); + rev = &kvm->arch.revmap[index]; + preempt_disable(); + while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) + cpu_relax(); + hpte[0] = hptep[0] & ~HPTE_V_HVLOCK; + hpte[1] = hptep[1]; + hpte[2] = r = rev->guest_rpte; + asm volatile("lwsync" : : : "memory"); + hptep[0] = hpte[0]; + preempt_enable(); + + if (hpte[0] != vcpu->arch.pgfault_hpte[0] || + hpte[1] != vcpu->arch.pgfault_hpte[1]) + return RESUME_GUEST; + + /* Translate the logical address and get the page */ + psize = hpte_page_size(hpte[0], r); + gfn = hpte_rpn(r, psize); + memslot = gfn_to_memslot(kvm, gfn); + + /* No memslot means it's an emulated MMIO region */ + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { + unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); + return kvmppc_hv_emulate_mmio(run, vcpu, gpa, + dsisr & DSISR_ISSTORE); + } + + if (!kvm->arch.using_mmu_notifiers) + return -EFAULT; /* should never get here */ + + /* used to check for invalidations in progress */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + is_io = 0; + pfn = 0; + page = NULL; + pte_size = PAGE_SIZE; + writing = (dsisr & DSISR_ISSTORE) != 0; + /* If writing != 0, then the HPTE must allow writing, if we get here */ + write_ok = writing; + hva = gfn_to_hva_memslot(memslot, gfn); + npages = get_user_pages_fast(hva, 1, writing, pages); + if (npages < 1) { + /* Check if it's an I/O mapping */ + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, hva); + if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && + (vma->vm_flags & VM_PFNMAP)) { + pfn = vma->vm_pgoff + + ((hva - vma->vm_start) >> PAGE_SHIFT); + pte_size = psize; + is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); + write_ok = vma->vm_flags & VM_WRITE; + } + up_read(¤t->mm->mmap_sem); + if (!pfn) + return -EFAULT; + } else { + page = pages[0]; + if (PageHuge(page)) { + page = compound_head(page); + pte_size <<= compound_order(page); + } + /* if the guest wants write access, see if that is OK */ + if (!writing && hpte_is_writable(r)) { + pte_t *ptep, pte; + + /* + * We need to protect against page table destruction + * while looking up and updating the pte. + */ + rcu_read_lock_sched(); + ptep = find_linux_pte_or_hugepte(current->mm->pgd, + hva, NULL); + if (ptep && pte_present(*ptep)) { + pte = kvmppc_read_update_linux_pte(ptep, 1); + if (pte_write(pte)) + write_ok = 1; + } + rcu_read_unlock_sched(); + } + pfn = page_to_pfn(page); + } + + ret = -EFAULT; + if (psize > pte_size) + goto out_put; + + /* Check WIMG vs. the actual page we're accessing */ + if (!hpte_cache_flags_ok(r, is_io)) { + if (is_io) + return -EFAULT; + /* + * Allow guest to map emulated device memory as + * uncacheable, but actually make it cacheable. + */ + r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; + } + + /* Set the HPTE to point to pfn */ + r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); + if (hpte_is_writable(r) && !write_ok) + r = hpte_make_readonly(r); + ret = RESUME_GUEST; + preempt_disable(); + while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) + cpu_relax(); + if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] || + rev->guest_rpte != hpte[2]) + /* HPTE has been changed under us; let the guest retry */ + goto out_unlock; + hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; + + rmap = &memslot->rmap[gfn - memslot->base_gfn]; + lock_rmap(rmap); + + /* Check if we might have been invalidated; let the guest retry if so */ + ret = RESUME_GUEST; + if (mmu_notifier_retry(vcpu, mmu_seq)) { + unlock_rmap(rmap); + goto out_unlock; + } + + /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ + rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; + r &= rcbits | ~(HPTE_R_R | HPTE_R_C); + + if (hptep[0] & HPTE_V_VALID) { + /* HPTE was previously valid, so we need to invalidate it */ + unlock_rmap(rmap); + hptep[0] |= HPTE_V_ABSENT; + kvmppc_invalidate_hpte(kvm, hptep, index); + /* don't lose previous R and C bits */ + r |= hptep[1] & (HPTE_R_R | HPTE_R_C); + } else { + kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); + } + + hptep[1] = r; + eieio(); + hptep[0] = hpte[0]; + asm volatile("ptesync" : : : "memory"); + preempt_enable(); + if (page && hpte_is_writable(r)) + SetPageDirty(page); + + out_put: + if (page) + put_page(page); + return ret; + + out_unlock: + hptep[0] &= ~HPTE_V_HVLOCK; + preempt_enable(); + goto out_put; +} + +static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, + int (*handler)(struct kvm *kvm, unsigned long *rmapp, + unsigned long gfn)) +{ + int ret; + int retval = 0; + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) { + unsigned long start = memslot->userspace_addr; + unsigned long end; + + end = start + (memslot->npages << PAGE_SHIFT); + if (hva >= start && hva < end) { + gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; + + ret = handler(kvm, &memslot->rmap[gfn_offset], + memslot->base_gfn + gfn_offset); + retval |= ret; + } + } + + return retval; +} + +static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, + unsigned long gfn) +{ + struct revmap_entry *rev = kvm->arch.revmap; + unsigned long h, i, j; + unsigned long *hptep; + unsigned long ptel, psize, rcbits; + + for (;;) { + lock_rmap(rmapp); + if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { + unlock_rmap(rmapp); + break; + } + + /* + * To avoid an ABBA deadlock with the HPTE lock bit, + * we can't spin on the HPTE lock while holding the + * rmap chain lock. + */ + i = *rmapp & KVMPPC_RMAP_INDEX; + hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); + if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { + /* unlock rmap before spinning on the HPTE lock */ + unlock_rmap(rmapp); + while (hptep[0] & HPTE_V_HVLOCK) + cpu_relax(); + continue; + } + j = rev[i].forw; + if (j == i) { + /* chain is now empty */ + *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); + } else { + /* remove i from chain */ + h = rev[i].back; + rev[h].forw = j; + rev[j].back = h; + rev[i].forw = rev[i].back = i; + *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; + } + + /* Now check and modify the HPTE */ + ptel = rev[i].guest_rpte; + psize = hpte_page_size(hptep[0], ptel); + if ((hptep[0] & HPTE_V_VALID) && + hpte_rpn(ptel, psize) == gfn) { + hptep[0] |= HPTE_V_ABSENT; + kvmppc_invalidate_hpte(kvm, hptep, i); + /* Harvest R and C */ + rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); + *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; + rev[i].guest_rpte = ptel | rcbits; + } + unlock_rmap(rmapp); + hptep[0] &= ~HPTE_V_HVLOCK; + } + return 0; +} + +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ + if (kvm->arch.using_mmu_notifiers) + kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); + return 0; +} + +static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, + unsigned long gfn) +{ + struct revmap_entry *rev = kvm->arch.revmap; + unsigned long head, i, j; + unsigned long *hptep; + int ret = 0; + + retry: + lock_rmap(rmapp); + if (*rmapp & KVMPPC_RMAP_REFERENCED) { + *rmapp &= ~KVMPPC_RMAP_REFERENCED; + ret = 1; + } + if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { + unlock_rmap(rmapp); + return ret; + } + + i = head = *rmapp & KVMPPC_RMAP_INDEX; + do { + hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); + j = rev[i].forw; + + /* If this HPTE isn't referenced, ignore it */ + if (!(hptep[1] & HPTE_R_R)) + continue; + + if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { + /* unlock rmap before spinning on the HPTE lock */ + unlock_rmap(rmapp); + while (hptep[0] & HPTE_V_HVLOCK) + cpu_relax(); + goto retry; + } + + /* Now check and modify the HPTE */ + if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) { + kvmppc_clear_ref_hpte(kvm, hptep, i); + rev[i].guest_rpte |= HPTE_R_R; + ret = 1; + } + hptep[0] &= ~HPTE_V_HVLOCK; + } while ((i = j) != head); + + unlock_rmap(rmapp); + return ret; +} + +int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ + if (!kvm->arch.using_mmu_notifiers) + return 0; + return kvm_handle_hva(kvm, hva, kvm_age_rmapp); +} + +static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, + unsigned long gfn) +{ + struct revmap_entry *rev = kvm->arch.revmap; + unsigned long head, i, j; + unsigned long *hp; + int ret = 1; + + if (*rmapp & KVMPPC_RMAP_REFERENCED) + return 1; + + lock_rmap(rmapp); + if (*rmapp & KVMPPC_RMAP_REFERENCED) + goto out; + + if (*rmapp & KVMPPC_RMAP_PRESENT) { + i = head = *rmapp & KVMPPC_RMAP_INDEX; + do { + hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); + j = rev[i].forw; + if (hp[1] & HPTE_R_R) + goto out; + } while ((i = j) != head); + } + ret = 0; + + out: + unlock_rmap(rmapp); + return ret; +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + if (!kvm->arch.using_mmu_notifiers) + return 0; + return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); +} + +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { - return -ENOENT; + if (!kvm->arch.using_mmu_notifiers) + return; + kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); +} + +static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) +{ + struct revmap_entry *rev = kvm->arch.revmap; + unsigned long head, i, j; + unsigned long *hptep; + int ret = 0; + + retry: + lock_rmap(rmapp); + if (*rmapp & KVMPPC_RMAP_CHANGED) { + *rmapp &= ~KVMPPC_RMAP_CHANGED; + ret = 1; + } + if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { + unlock_rmap(rmapp); + return ret; + } + + i = head = *rmapp & KVMPPC_RMAP_INDEX; + do { + hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); + j = rev[i].forw; + + if (!(hptep[1] & HPTE_R_C)) + continue; + + if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { + /* unlock rmap before spinning on the HPTE lock */ + unlock_rmap(rmapp); + while (hptep[0] & HPTE_V_HVLOCK) + cpu_relax(); + goto retry; + } + + /* Now check and modify the HPTE */ + if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) { + /* need to make it temporarily absent to clear C */ + hptep[0] |= HPTE_V_ABSENT; + kvmppc_invalidate_hpte(kvm, hptep, i); + hptep[1] &= ~HPTE_R_C; + eieio(); + hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; + rev[i].guest_rpte |= HPTE_R_C; + ret = 1; + } + hptep[0] &= ~HPTE_V_HVLOCK; + } while ((i = j) != head); + + unlock_rmap(rmapp); + return ret; +} + +long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ + unsigned long i; + unsigned long *rmapp, *map; + + preempt_disable(); + rmapp = memslot->rmap; + map = memslot->dirty_bitmap; + for (i = 0; i < memslot->npages; ++i) { + if (kvm_test_clear_dirty(kvm, rmapp)) + __set_bit_le(i, map); + ++rmapp; + } + preempt_enable(); + return 0; +} + +void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, + unsigned long *nb_ret) +{ + struct kvm_memory_slot *memslot; + unsigned long gfn = gpa >> PAGE_SHIFT; + struct page *page, *pages[1]; + int npages; + unsigned long hva, psize, offset; + unsigned long pa; + unsigned long *physp; + + memslot = gfn_to_memslot(kvm, gfn); + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) + return NULL; + if (!kvm->arch.using_mmu_notifiers) { + physp = kvm->arch.slot_phys[memslot->id]; + if (!physp) + return NULL; + physp += gfn - memslot->base_gfn; + pa = *physp; + if (!pa) { + if (kvmppc_get_guest_page(kvm, gfn, memslot, + PAGE_SIZE) < 0) + return NULL; + pa = *physp; + } + page = pfn_to_page(pa >> PAGE_SHIFT); + } else { + hva = gfn_to_hva_memslot(memslot, gfn); + npages = get_user_pages_fast(hva, 1, 1, pages); + if (npages < 1) + return NULL; + page = pages[0]; + } + psize = PAGE_SIZE; + if (PageHuge(page)) { + page = compound_head(page); + psize <<= compound_order(page); + } + if (!kvm->arch.using_mmu_notifiers) + get_page(page); + offset = gpa & (psize - 1); + if (nb_ret) + *nb_ret = psize - offset; + return page_address(page) + offset; +} + +void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) +{ + struct page *page = virt_to_page(va); + + page = compound_head(page); + put_page(page); } void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 0c9dc62532d0..f1950d131827 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -230,9 +230,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, r = kvmppc_st(vcpu, &addr, 32, zeros, true); if ((r == -ENOENT) || (r == -EPERM)) { + struct kvmppc_book3s_shadow_vcpu *svcpu; + + svcpu = svcpu_get(vcpu); *advance = 0; vcpu->arch.shared->dar = vaddr; - to_svcpu(vcpu)->fault_dar = vaddr; + svcpu->fault_dar = vaddr; dsisr = DSISR_ISSTORE; if (r == -ENOENT) @@ -241,7 +244,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, dsisr |= DSISR_PROTFAULT; vcpu->arch.shared->dsisr = dsisr; - to_svcpu(vcpu)->fault_dsisr = dsisr; + svcpu->fault_dsisr = dsisr; + svcpu_put(svcpu); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a7267167a550..01294a5099dd 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -45,25 +45,18 @@ #include <asm/cputhreads.h> #include <asm/page.h> #include <asm/hvcall.h> +#include <asm/switch_to.h> #include <linux/gfp.h> #include <linux/vmalloc.h> #include <linux/highmem.h> - -/* - * For now, limit memory to 64GB and require it to be large pages. - * This value is chosen because it makes the ram_pginfo array be - * 64kB in size, which is about as large as we want to be trying - * to allocate with kmalloc. - */ -#define MAX_MEM_ORDER 36 - -#define LARGE_PAGE_ORDER 24 /* 16MB pages */ +#include <linux/hugetlb.h> /* #define EXIT_DEBUG */ /* #define EXIT_DEBUG_SIMPLE */ /* #define EXIT_DEBUG_INT */ static void kvmppc_end_cede(struct kvm_vcpu *vcpu); +static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu); void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { @@ -146,10 +139,10 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, unsigned long vcpuid, unsigned long vpa) { struct kvm *kvm = vcpu->kvm; - unsigned long pg_index, ra, len; - unsigned long pg_offset; + unsigned long len, nb; void *va; struct kvm_vcpu *tvcpu; + int err = H_PARAMETER; tvcpu = kvmppc_find_vcpu(kvm, vcpuid); if (!tvcpu) @@ -162,45 +155,41 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, if (flags < 4) { if (vpa & 0x7f) return H_PARAMETER; + if (flags >= 2 && !tvcpu->arch.vpa) + return H_RESOURCE; /* registering new area; convert logical addr to real */ - pg_index = vpa >> kvm->arch.ram_porder; - pg_offset = vpa & (kvm->arch.ram_psize - 1); - if (pg_index >= kvm->arch.ram_npages) + va = kvmppc_pin_guest_page(kvm, vpa, &nb); + if (va == NULL) return H_PARAMETER; - if (kvm->arch.ram_pginfo[pg_index].pfn == 0) - return H_PARAMETER; - ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT; - ra |= pg_offset; - va = __va(ra); if (flags <= 1) len = *(unsigned short *)(va + 4); else len = *(unsigned int *)(va + 4); - if (pg_offset + len > kvm->arch.ram_psize) - return H_PARAMETER; + if (len > nb) + goto out_unpin; switch (flags) { case 1: /* register VPA */ if (len < 640) - return H_PARAMETER; + goto out_unpin; + if (tvcpu->arch.vpa) + kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa); tvcpu->arch.vpa = va; init_vpa(vcpu, va); break; case 2: /* register DTL */ if (len < 48) - return H_PARAMETER; - if (!tvcpu->arch.vpa) - return H_RESOURCE; + goto out_unpin; len -= len % 48; + if (tvcpu->arch.dtl) + kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl); tvcpu->arch.dtl = va; tvcpu->arch.dtl_end = va + len; break; case 3: /* register SLB shadow buffer */ - if (len < 8) - return H_PARAMETER; - if (!tvcpu->arch.vpa) - return H_RESOURCE; - tvcpu->arch.slb_shadow = va; - len = (len - 16) / 16; + if (len < 16) + goto out_unpin; + if (tvcpu->arch.slb_shadow) + kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow); tvcpu->arch.slb_shadow = va; break; } @@ -209,17 +198,30 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, case 5: /* unregister VPA */ if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl) return H_RESOURCE; + if (!tvcpu->arch.vpa) + break; + kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa); tvcpu->arch.vpa = NULL; break; case 6: /* unregister DTL */ + if (!tvcpu->arch.dtl) + break; + kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl); tvcpu->arch.dtl = NULL; break; case 7: /* unregister SLB shadow buffer */ + if (!tvcpu->arch.slb_shadow) + break; + kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow); tvcpu->arch.slb_shadow = NULL; break; } } return H_SUCCESS; + + out_unpin: + kvmppc_unpin_guest_page(kvm, va); + return err; } int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) @@ -229,6 +231,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) struct kvm_vcpu *tvcpu; switch (req) { + case H_ENTER: + ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6), + kvmppc_get_gpr(vcpu, 7)); + break; case H_CEDE: break; case H_PROD: @@ -318,20 +326,19 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, break; } /* - * We get these next two if the guest does a bad real-mode access, - * as we have enabled VRMA (virtualized real mode area) mode in the - * LPCR. We just generate an appropriate DSI/ISI to the guest. + * We get these next two if the guest accesses a page which it thinks + * it has mapped but which is not actually present, either because + * it is for an emulated I/O device or because the corresonding + * host page has been paged out. Any other HDSI/HISI interrupts + * have been handled already. */ case BOOK3S_INTERRUPT_H_DATA_STORAGE: - vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr; - vcpu->arch.shregs.dar = vcpu->arch.fault_dar; - kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0); - r = RESUME_GUEST; + r = kvmppc_book3s_hv_page_fault(run, vcpu, + vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); break; case BOOK3S_INTERRUPT_H_INST_STORAGE: - kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, - 0x08000000); - r = RESUME_GUEST; + r = kvmppc_book3s_hv_page_fault(run, vcpu, + kvmppc_get_pc(vcpu), 0); break; /* * This occurs if the guest executes an illegal instruction. @@ -391,6 +398,42 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return 0; } +int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +{ + int r = -EINVAL; + + switch (reg->id) { + case KVM_REG_PPC_HIOR: + r = put_user(0, (u64 __user *)reg->addr); + break; + default: + break; + } + + return r; +} + +int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +{ + int r = -EINVAL; + + switch (reg->id) { + case KVM_REG_PPC_HIOR: + { + u64 hior; + /* Only allow this to be set to zero */ + r = get_user(hior, (u64 __user *)reg->addr); + if (!r && (hior != 0)) + r = -EINVAL; + break; + } + default: + break; + } + + return r; +} + int kvmppc_core_check_processor_compat(void) { if (cpu_has_feature(CPU_FTR_HVMODE)) @@ -410,7 +453,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) goto out; err = -ENOMEM; - vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); + vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu) goto out; @@ -462,15 +505,21 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) return vcpu; free_vcpu: - kfree(vcpu); + kmem_cache_free(kvm_vcpu_cache, vcpu); out: return ERR_PTR(err); } void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) { + if (vcpu->arch.dtl) + kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl); + if (vcpu->arch.slb_shadow) + kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow); + if (vcpu->arch.vpa) + kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa); kvm_vcpu_uninit(vcpu); - kfree(vcpu); + kmem_cache_free(kvm_vcpu_cache, vcpu); } static void kvmppc_set_timer(struct kvm_vcpu *vcpu) @@ -481,7 +530,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) if (now > vcpu->arch.dec_expires) { /* decrementer has already gone negative */ kvmppc_core_queue_dec(vcpu); - kvmppc_core_deliver_interrupts(vcpu); + kvmppc_core_prepare_to_enter(vcpu); return; } dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC @@ -796,7 +845,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) list_for_each_entry_safe(v, vn, &vc->runnable_threads, arch.run_list) { - kvmppc_core_deliver_interrupts(v); + kvmppc_core_prepare_to_enter(v); if (signal_pending(v->arch.run_task)) { kvmppc_remove_runnable(vc, v); v->stat.signal_exits++; @@ -835,20 +884,26 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) return -EINVAL; } + kvmppc_core_prepare_to_enter(vcpu); + /* No need to go into the guest when all we'll do is come back out */ if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; return -EINTR; } - /* On PPC970, check that we have an RMA region */ - if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201)) - return -EPERM; + /* On the first time here, set up VRMA or RMA */ + if (!vcpu->kvm->arch.rma_setup_done) { + r = kvmppc_hv_setup_rma(vcpu); + if (r) + return r; + } flush_fp_to_thread(current); flush_altivec_to_thread(current); flush_vsx_to_thread(current); vcpu->arch.wqp = &vcpu->arch.vcore->wq; + vcpu->arch.pgdir = current->mm->pgd; do { r = kvmppc_run_vcpu(run, vcpu); @@ -856,7 +911,7 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) if (run->exit_reason == KVM_EXIT_PAPR_HCALL && !(vcpu->arch.shregs.msr & MSR_PR)) { r = kvmppc_pseries_do_hcall(vcpu); - kvmppc_core_deliver_interrupts(vcpu); + kvmppc_core_prepare_to_enter(vcpu); } } while (r == RESUME_GUEST); return r; @@ -1000,7 +1055,7 @@ static inline int lpcr_rmls(unsigned long rma_size) static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { - struct kvmppc_rma_info *ri = vma->vm_file->private_data; + struct kvmppc_linear_info *ri = vma->vm_file->private_data; struct page *page; if (vmf->pgoff >= ri->npages) @@ -1025,7 +1080,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma) static int kvm_rma_release(struct inode *inode, struct file *filp) { - struct kvmppc_rma_info *ri = filp->private_data; + struct kvmppc_linear_info *ri = filp->private_data; kvm_release_rma(ri); return 0; @@ -1038,7 +1093,7 @@ static struct file_operations kvm_rma_fops = { long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) { - struct kvmppc_rma_info *ri; + struct kvmppc_linear_info *ri; long fd; ri = kvm_alloc_rma(); @@ -1053,89 +1108,189 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) return fd; } -static struct page *hva_to_page(unsigned long addr) +/* + * Get (and clear) the dirty memory log for a memory slot. + */ +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { - struct page *page[1]; - int npages; + struct kvm_memory_slot *memslot; + int r; + unsigned long n; - might_sleep(); + mutex_lock(&kvm->slots_lock); - npages = get_user_pages_fast(addr, 1, 1, page); + r = -EINVAL; + if (log->slot >= KVM_MEMORY_SLOTS) + goto out; - if (unlikely(npages != 1)) - return 0; + memslot = id_to_memslot(kvm->memslots, log->slot); + r = -ENOENT; + if (!memslot->dirty_bitmap) + goto out; + + n = kvm_dirty_bitmap_bytes(memslot); + memset(memslot->dirty_bitmap, 0, n); + + r = kvmppc_hv_get_dirty_log(kvm, memslot); + if (r) + goto out; - return page[0]; + r = -EFAULT; + if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) + goto out; + + r = 0; +out: + mutex_unlock(&kvm->slots_lock); + return r; +} + +static unsigned long slb_pgsize_encoding(unsigned long psize) +{ + unsigned long senc = 0; + + if (psize > 0x1000) { + senc = SLB_VSID_L; + if (psize == 0x10000) + senc |= SLB_VSID_LP_01; + } + return senc; } int kvmppc_core_prepare_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem) { - unsigned long psize, porder; - unsigned long i, npages, totalpages; - unsigned long pg_ix; - struct kvmppc_pginfo *pginfo; - unsigned long hva; - struct kvmppc_rma_info *ri = NULL; + unsigned long npages; + unsigned long *phys; + + /* Allocate a slot_phys array */ + phys = kvm->arch.slot_phys[mem->slot]; + if (!kvm->arch.using_mmu_notifiers && !phys) { + npages = mem->memory_size >> PAGE_SHIFT; + phys = vzalloc(npages * sizeof(unsigned long)); + if (!phys) + return -ENOMEM; + kvm->arch.slot_phys[mem->slot] = phys; + kvm->arch.slot_npages[mem->slot] = npages; + } + + return 0; +} + +static void unpin_slot(struct kvm *kvm, int slot_id) +{ + unsigned long *physp; + unsigned long j, npages, pfn; struct page *page; - /* For now, only allow 16MB pages */ - porder = LARGE_PAGE_ORDER; - psize = 1ul << porder; - if ((mem->memory_size & (psize - 1)) || - (mem->guest_phys_addr & (psize - 1))) { - pr_err("bad memory_size=%llx @ %llx\n", - mem->memory_size, mem->guest_phys_addr); - return -EINVAL; + physp = kvm->arch.slot_phys[slot_id]; + npages = kvm->arch.slot_npages[slot_id]; + if (physp) { + spin_lock(&kvm->arch.slot_phys_lock); + for (j = 0; j < npages; j++) { + if (!(physp[j] & KVMPPC_GOT_PAGE)) + continue; + pfn = physp[j] >> PAGE_SHIFT; + page = pfn_to_page(pfn); + if (PageHuge(page)) + page = compound_head(page); + SetPageDirty(page); + put_page(page); + } + kvm->arch.slot_phys[slot_id] = NULL; + spin_unlock(&kvm->arch.slot_phys_lock); + vfree(physp); } +} - npages = mem->memory_size >> porder; - totalpages = (mem->guest_phys_addr + mem->memory_size) >> porder; +void kvmppc_core_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem) +{ +} - /* More memory than we have space to track? */ - if (totalpages > (1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER))) - return -EINVAL; +static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) +{ + int err = 0; + struct kvm *kvm = vcpu->kvm; + struct kvmppc_linear_info *ri = NULL; + unsigned long hva; + struct kvm_memory_slot *memslot; + struct vm_area_struct *vma; + unsigned long lpcr, senc; + unsigned long psize, porder; + unsigned long rma_size; + unsigned long rmls; + unsigned long *physp; + unsigned long i, npages; - /* Do we already have an RMA registered? */ - if (mem->guest_phys_addr == 0 && kvm->arch.rma) - return -EINVAL; + mutex_lock(&kvm->lock); + if (kvm->arch.rma_setup_done) + goto out; /* another vcpu beat us to it */ - if (totalpages > kvm->arch.ram_npages) - kvm->arch.ram_npages = totalpages; + /* Look up the memslot for guest physical address 0 */ + memslot = gfn_to_memslot(kvm, 0); + + /* We must have some memory at 0 by now */ + err = -EINVAL; + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) + goto out; + + /* Look up the VMA for the start of this memory slot */ + hva = memslot->userspace_addr; + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, hva); + if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) + goto up_out; + + psize = vma_kernel_pagesize(vma); + porder = __ilog2(psize); /* Is this one of our preallocated RMAs? */ - if (mem->guest_phys_addr == 0) { - struct vm_area_struct *vma; - - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, mem->userspace_addr); - if (vma && vma->vm_file && - vma->vm_file->f_op == &kvm_rma_fops && - mem->userspace_addr == vma->vm_start) - ri = vma->vm_file->private_data; - up_read(¤t->mm->mmap_sem); - if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) { - pr_err("CPU requires an RMO\n"); - return -EINVAL; + if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops && + hva == vma->vm_start) + ri = vma->vm_file->private_data; + + up_read(¤t->mm->mmap_sem); + + if (!ri) { + /* On POWER7, use VRMA; on PPC970, give up */ + err = -EPERM; + if (cpu_has_feature(CPU_FTR_ARCH_201)) { + pr_err("KVM: CPU requires an RMO\n"); + goto out; } - } - if (ri) { - unsigned long rma_size; - unsigned long lpcr; - long rmls; + /* We can handle 4k, 64k or 16M pages in the VRMA */ + err = -EINVAL; + if (!(psize == 0x1000 || psize == 0x10000 || + psize == 0x1000000)) + goto out; + + /* Update VRMASD field in the LPCR */ + senc = slb_pgsize_encoding(psize); + kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | + (VRMA_VSID << SLB_VSID_SHIFT_1T); + lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; + lpcr |= senc << (LPCR_VRMASD_SH - 4); + kvm->arch.lpcr = lpcr; - rma_size = ri->npages << PAGE_SHIFT; - if (rma_size > mem->memory_size) - rma_size = mem->memory_size; + /* Create HPTEs in the hash page table for the VRMA */ + kvmppc_map_vrma(vcpu, memslot, porder); + + } else { + /* Set up to use an RMO region */ + rma_size = ri->npages; + if (rma_size > memslot->npages) + rma_size = memslot->npages; + rma_size <<= PAGE_SHIFT; rmls = lpcr_rmls(rma_size); + err = -EINVAL; if (rmls < 0) { - pr_err("Can't use RMA of 0x%lx bytes\n", rma_size); - return -EINVAL; + pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); + goto out; } atomic_inc(&ri->use_count); kvm->arch.rma = ri; - kvm->arch.n_rma_pages = rma_size >> porder; /* Update LPCR and RMOR */ lpcr = kvm->arch.lpcr; @@ -1155,53 +1310,35 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT; } kvm->arch.lpcr = lpcr; - pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n", + pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); - } - pg_ix = mem->guest_phys_addr >> porder; - pginfo = kvm->arch.ram_pginfo + pg_ix; - for (i = 0; i < npages; ++i, ++pg_ix) { - if (ri && pg_ix < kvm->arch.n_rma_pages) { - pginfo[i].pfn = ri->base_pfn + - (pg_ix << (porder - PAGE_SHIFT)); - continue; - } - hva = mem->userspace_addr + (i << porder); - page = hva_to_page(hva); - if (!page) { - pr_err("oops, no pfn for hva %lx\n", hva); - goto err; - } - /* Check it's a 16MB page */ - if (!PageHead(page) || - compound_order(page) != (LARGE_PAGE_ORDER - PAGE_SHIFT)) { - pr_err("page at %lx isn't 16MB (o=%d)\n", - hva, compound_order(page)); - goto err; - } - pginfo[i].pfn = page_to_pfn(page); + /* Initialize phys addrs of pages in RMO */ + npages = ri->npages; + porder = __ilog2(npages); + physp = kvm->arch.slot_phys[memslot->id]; + spin_lock(&kvm->arch.slot_phys_lock); + for (i = 0; i < npages; ++i) + physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder; + spin_unlock(&kvm->arch.slot_phys_lock); } - return 0; - - err: - return -EINVAL; -} + /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ + smp_wmb(); + kvm->arch.rma_setup_done = 1; + err = 0; + out: + mutex_unlock(&kvm->lock); + return err; -void kvmppc_core_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem) -{ - if (mem->guest_phys_addr == 0 && mem->memory_size != 0 && - !kvm->arch.rma) - kvmppc_map_vrma(kvm, mem); + up_out: + up_read(¤t->mm->mmap_sem); + goto out; } int kvmppc_core_init_vm(struct kvm *kvm) { long r; - unsigned long npages = 1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER); - long err = -ENOMEM; unsigned long lpcr; /* Allocate hashed page table */ @@ -1211,19 +1348,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); - kvm->arch.ram_pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo), - GFP_KERNEL); - if (!kvm->arch.ram_pginfo) { - pr_err("kvmppc_core_init_vm: couldn't alloc %lu bytes\n", - npages * sizeof(struct kvmppc_pginfo)); - goto out_free; - } - - kvm->arch.ram_npages = 0; - kvm->arch.ram_psize = 1ul << LARGE_PAGE_ORDER; - kvm->arch.ram_porder = LARGE_PAGE_ORDER; kvm->arch.rma = NULL; - kvm->arch.n_rma_pages = 0; kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); @@ -1241,30 +1366,25 @@ int kvmppc_core_init_vm(struct kvm *kvm) kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); lpcr &= LPCR_PECE | LPCR_LPES; lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | - LPCR_VPM0 | LPCR_VRMA_L; + LPCR_VPM0 | LPCR_VPM1; + kvm->arch.vrma_slb_v = SLB_VSID_B_1T | + (VRMA_VSID << SLB_VSID_SHIFT_1T); } kvm->arch.lpcr = lpcr; + kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206); + spin_lock_init(&kvm->arch.slot_phys_lock); return 0; - - out_free: - kvmppc_free_hpt(kvm); - return err; } void kvmppc_core_destroy_vm(struct kvm *kvm) { - struct kvmppc_pginfo *pginfo; unsigned long i; - if (kvm->arch.ram_pginfo) { - pginfo = kvm->arch.ram_pginfo; - kvm->arch.ram_pginfo = NULL; - for (i = kvm->arch.n_rma_pages; i < kvm->arch.ram_npages; ++i) - if (pginfo[i].pfn) - put_page(pfn_to_page(pginfo[i].pfn)); - kfree(pginfo); - } + if (!kvm->arch.using_mmu_notifiers) + for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) + unpin_slot(kvm, i); + if (kvm->arch.rma) { kvm_release_rma(kvm->arch.rma); kvm->arch.rma = NULL; diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index a795a13f4a70..bed1279aa6a8 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -18,6 +18,15 @@ #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> +#define KVM_LINEAR_RMA 0 +#define KVM_LINEAR_HPT 1 + +static void __init kvm_linear_init_one(ulong size, int count, int type); +static struct kvmppc_linear_info *kvm_alloc_linear(int type); +static void kvm_release_linear(struct kvmppc_linear_info *ri); + +/*************** RMA *************/ + /* * This maintains a list of RMAs (real mode areas) for KVM guests to use. * Each RMA has to be physically contiguous and of a size that the @@ -29,32 +38,6 @@ static unsigned long kvm_rma_size = 64 << 20; /* 64MB */ static unsigned long kvm_rma_count; -static int __init early_parse_rma_size(char *p) -{ - if (!p) - return 1; - - kvm_rma_size = memparse(p, &p); - - return 0; -} -early_param("kvm_rma_size", early_parse_rma_size); - -static int __init early_parse_rma_count(char *p) -{ - if (!p) - return 1; - - kvm_rma_count = simple_strtoul(p, NULL, 0); - - return 0; -} -early_param("kvm_rma_count", early_parse_rma_count); - -static struct kvmppc_rma_info *rma_info; -static LIST_HEAD(free_rmas); -static DEFINE_SPINLOCK(rma_lock); - /* Work out RMLS (real mode limit selector) field value for a given RMA size. Assumes POWER7 or PPC970. */ static inline int lpcr_rmls(unsigned long rma_size) @@ -81,45 +64,106 @@ static inline int lpcr_rmls(unsigned long rma_size) } } +static int __init early_parse_rma_size(char *p) +{ + if (!p) + return 1; + + kvm_rma_size = memparse(p, &p); + + return 0; +} +early_param("kvm_rma_size", early_parse_rma_size); + +static int __init early_parse_rma_count(char *p) +{ + if (!p) + return 1; + + kvm_rma_count = simple_strtoul(p, NULL, 0); + + return 0; +} +early_param("kvm_rma_count", early_parse_rma_count); + +struct kvmppc_linear_info *kvm_alloc_rma(void) +{ + return kvm_alloc_linear(KVM_LINEAR_RMA); +} +EXPORT_SYMBOL_GPL(kvm_alloc_rma); + +void kvm_release_rma(struct kvmppc_linear_info *ri) +{ + kvm_release_linear(ri); +} +EXPORT_SYMBOL_GPL(kvm_release_rma); + +/*************** HPT *************/ + /* - * Called at boot time while the bootmem allocator is active, - * to allocate contiguous physical memory for the real memory - * areas for guests. + * This maintains a list of big linear HPT tables that contain the GVA->HPA + * memory mappings. If we don't reserve those early on, we might not be able + * to get a big (usually 16MB) linear memory region from the kernel anymore. */ -void __init kvm_rma_init(void) + +static unsigned long kvm_hpt_count; + +static int __init early_parse_hpt_count(char *p) +{ + if (!p) + return 1; + + kvm_hpt_count = simple_strtoul(p, NULL, 0); + + return 0; +} +early_param("kvm_hpt_count", early_parse_hpt_count); + +struct kvmppc_linear_info *kvm_alloc_hpt(void) +{ + return kvm_alloc_linear(KVM_LINEAR_HPT); +} +EXPORT_SYMBOL_GPL(kvm_alloc_hpt); + +void kvm_release_hpt(struct kvmppc_linear_info *li) +{ + kvm_release_linear(li); +} +EXPORT_SYMBOL_GPL(kvm_release_hpt); + +/*************** generic *************/ + +static LIST_HEAD(free_linears); +static DEFINE_SPINLOCK(linear_lock); + +static void __init kvm_linear_init_one(ulong size, int count, int type) { unsigned long i; unsigned long j, npages; - void *rma; + void *linear; struct page *pg; + const char *typestr; + struct kvmppc_linear_info *linear_info; - /* Only do this on PPC970 in HV mode */ - if (!cpu_has_feature(CPU_FTR_HVMODE) || - !cpu_has_feature(CPU_FTR_ARCH_201)) - return; - - if (!kvm_rma_size || !kvm_rma_count) + if (!count) return; - /* Check that the requested size is one supported in hardware */ - if (lpcr_rmls(kvm_rma_size) < 0) { - pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); - return; - } - - npages = kvm_rma_size >> PAGE_SHIFT; - rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info)); - for (i = 0; i < kvm_rma_count; ++i) { - rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size); - pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma, - kvm_rma_size >> 20); - rma_info[i].base_virt = rma; - rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT; - rma_info[i].npages = npages; - list_add_tail(&rma_info[i].list, &free_rmas); - atomic_set(&rma_info[i].use_count, 0); - - pg = pfn_to_page(rma_info[i].base_pfn); + typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT"; + + npages = size >> PAGE_SHIFT; + linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info)); + for (i = 0; i < count; ++i) { + linear = alloc_bootmem_align(size, size); + pr_info("Allocated KVM %s at %p (%ld MB)\n", typestr, linear, + size >> 20); + linear_info[i].base_virt = linear; + linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT; + linear_info[i].npages = npages; + linear_info[i].type = type; + list_add_tail(&linear_info[i].list, &free_linears); + atomic_set(&linear_info[i].use_count, 0); + + pg = pfn_to_page(linear_info[i].base_pfn); for (j = 0; j < npages; ++j) { atomic_inc(&pg->_count); ++pg; @@ -127,30 +171,59 @@ void __init kvm_rma_init(void) } } -struct kvmppc_rma_info *kvm_alloc_rma(void) +static struct kvmppc_linear_info *kvm_alloc_linear(int type) { - struct kvmppc_rma_info *ri; + struct kvmppc_linear_info *ri; ri = NULL; - spin_lock(&rma_lock); - if (!list_empty(&free_rmas)) { - ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list); + spin_lock(&linear_lock); + list_for_each_entry(ri, &free_linears, list) { + if (ri->type != type) + continue; + list_del(&ri->list); atomic_inc(&ri->use_count); + break; } - spin_unlock(&rma_lock); + spin_unlock(&linear_lock); + memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT); return ri; } -EXPORT_SYMBOL_GPL(kvm_alloc_rma); -void kvm_release_rma(struct kvmppc_rma_info *ri) +static void kvm_release_linear(struct kvmppc_linear_info *ri) { if (atomic_dec_and_test(&ri->use_count)) { - spin_lock(&rma_lock); - list_add_tail(&ri->list, &free_rmas); - spin_unlock(&rma_lock); + spin_lock(&linear_lock); + list_add_tail(&ri->list, &free_linears); + spin_unlock(&linear_lock); } } -EXPORT_SYMBOL_GPL(kvm_release_rma); +/* + * Called at boot time while the bootmem allocator is active, + * to allocate contiguous physical memory for the hash page + * tables for guests. + */ +void __init kvm_linear_init(void) +{ + /* HPT */ + kvm_linear_init_one(1 << HPT_ORDER, kvm_hpt_count, KVM_LINEAR_HPT); + + /* RMA */ + /* Only do this on PPC970 in HV mode */ + if (!cpu_has_feature(CPU_FTR_HVMODE) || + !cpu_has_feature(CPU_FTR_ARCH_201)) + return; + + if (!kvm_rma_size || !kvm_rma_count) + return; + + /* Check that the requested size is one supported in hardware */ + if (lpcr_rmls(kvm_rma_size) < 0) { + pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); + return; + } + + kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA); +} diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index bacb0cfa3602..def880aea63a 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -11,6 +11,7 @@ #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/hugetlb.h> +#include <linux/module.h> #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> @@ -20,95 +21,307 @@ #include <asm/synch.h> #include <asm/ppc-opcode.h> -/* For now use fixed-size 16MB page table */ -#define HPT_ORDER 24 -#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */ -#define HPT_HASH_MASK (HPT_NPTEG - 1) +/* Translate address of a vmalloc'd thing to a linear map address */ +static void *real_vmalloc_addr(void *x) +{ + unsigned long addr = (unsigned long) x; + pte_t *p; -#define HPTE_V_HVLOCK 0x40UL + p = find_linux_pte(swapper_pg_dir, addr); + if (!p || !pte_present(*p)) + return NULL; + /* assume we don't have huge pages in vmalloc space... */ + addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); + return __va(addr); +} -static inline long lock_hpte(unsigned long *hpte, unsigned long bits) +/* + * Add this HPTE into the chain for the real page. + * Must be called with the chain locked; it unlocks the chain. + */ +void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, + unsigned long *rmap, long pte_index, int realmode) { - unsigned long tmp, old; + struct revmap_entry *head, *tail; + unsigned long i; - asm volatile(" ldarx %0,0,%2\n" - " and. %1,%0,%3\n" - " bne 2f\n" - " ori %0,%0,%4\n" - " stdcx. %0,0,%2\n" - " beq+ 2f\n" - " li %1,%3\n" - "2: isync" - : "=&r" (tmp), "=&r" (old) - : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK) - : "cc", "memory"); - return old == 0; + if (*rmap & KVMPPC_RMAP_PRESENT) { + i = *rmap & KVMPPC_RMAP_INDEX; + head = &kvm->arch.revmap[i]; + if (realmode) + head = real_vmalloc_addr(head); + tail = &kvm->arch.revmap[head->back]; + if (realmode) + tail = real_vmalloc_addr(tail); + rev->forw = i; + rev->back = head->back; + tail->forw = pte_index; + head->back = pte_index; + } else { + rev->forw = rev->back = pte_index; + i = pte_index; + } + smp_wmb(); + *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */ +} +EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); + +/* Remove this HPTE from the chain for a real page */ +static void remove_revmap_chain(struct kvm *kvm, long pte_index, + struct revmap_entry *rev, + unsigned long hpte_v, unsigned long hpte_r) +{ + struct revmap_entry *next, *prev; + unsigned long gfn, ptel, head; + struct kvm_memory_slot *memslot; + unsigned long *rmap; + unsigned long rcbits; + + rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); + ptel = rev->guest_rpte |= rcbits; + gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); + memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) + return; + + rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]); + lock_rmap(rmap); + + head = *rmap & KVMPPC_RMAP_INDEX; + next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); + prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); + next->back = rev->back; + prev->forw = rev->forw; + if (head == pte_index) { + head = rev->forw; + if (head == pte_index) + *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); + else + *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; + } + *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT; + unlock_rmap(rmap); +} + +static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva, + int writing, unsigned long *pte_sizep) +{ + pte_t *ptep; + unsigned long ps = *pte_sizep; + unsigned int shift; + + ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift); + if (!ptep) + return __pte(0); + if (shift) + *pte_sizep = 1ul << shift; + else + *pte_sizep = PAGE_SIZE; + if (ps > *pte_sizep) + return __pte(0); + if (!pte_present(*ptep)) + return __pte(0); + return kvmppc_read_update_linux_pte(ptep, writing); +} + +static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v) +{ + asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); + hpte[0] = hpte_v; } long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel) { - unsigned long porder; struct kvm *kvm = vcpu->kvm; - unsigned long i, lpn, pa; + unsigned long i, pa, gpa, gfn, psize; + unsigned long slot_fn, hva; unsigned long *hpte; + struct revmap_entry *rev; + unsigned long g_ptel = ptel; + struct kvm_memory_slot *memslot; + unsigned long *physp, pte_size; + unsigned long is_io; + unsigned long *rmap; + pte_t pte; + unsigned int writing; + unsigned long mmu_seq; + unsigned long rcbits; + bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING; - /* only handle 4k, 64k and 16M pages for now */ - porder = 12; - if (pteh & HPTE_V_LARGE) { - if (cpu_has_feature(CPU_FTR_ARCH_206) && - (ptel & 0xf000) == 0x1000) { - /* 64k page */ - porder = 16; - } else if ((ptel & 0xff000) == 0) { - /* 16M page */ - porder = 24; - /* lowest AVA bit must be 0 for 16M pages */ - if (pteh & 0x80) - return H_PARAMETER; - } else + psize = hpte_page_size(pteh, ptel); + if (!psize) + return H_PARAMETER; + writing = hpte_is_writable(ptel); + pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); + + /* used later to detect if we might have been invalidated */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + /* Find the memslot (if any) for this address */ + gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); + gfn = gpa >> PAGE_SHIFT; + memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); + pa = 0; + is_io = ~0ul; + rmap = NULL; + if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { + /* PPC970 can't do emulated MMIO */ + if (!cpu_has_feature(CPU_FTR_ARCH_206)) return H_PARAMETER; + /* Emulated MMIO - mark this with key=31 */ + pteh |= HPTE_V_ABSENT; + ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; + goto do_insert; } - lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder; - if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder) - return H_PARAMETER; - pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT; - if (!pa) + + /* Check if the requested page fits entirely in the memslot. */ + if (!slot_is_aligned(memslot, psize)) return H_PARAMETER; - /* Check WIMG */ - if ((ptel & HPTE_R_WIMG) != HPTE_R_M && - (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M)) + slot_fn = gfn - memslot->base_gfn; + rmap = &memslot->rmap[slot_fn]; + + if (!kvm->arch.using_mmu_notifiers) { + physp = kvm->arch.slot_phys[memslot->id]; + if (!physp) + return H_PARAMETER; + physp += slot_fn; + if (realmode) + physp = real_vmalloc_addr(physp); + pa = *physp; + if (!pa) + return H_TOO_HARD; + is_io = pa & (HPTE_R_I | HPTE_R_W); + pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); + pa &= PAGE_MASK; + } else { + /* Translate to host virtual address */ + hva = gfn_to_hva_memslot(memslot, gfn); + + /* Look up the Linux PTE for the backing page */ + pte_size = psize; + pte = lookup_linux_pte(vcpu, hva, writing, &pte_size); + if (pte_present(pte)) { + if (writing && !pte_write(pte)) + /* make the actual HPTE be read-only */ + ptel = hpte_make_readonly(ptel); + is_io = hpte_cache_bits(pte_val(pte)); + pa = pte_pfn(pte) << PAGE_SHIFT; + } + } + if (pte_size < psize) return H_PARAMETER; - pteh &= ~0x60UL; - ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize); + if (pa && pte_size > psize) + pa |= gpa & (pte_size - 1); + + ptel &= ~(HPTE_R_PP0 - psize); ptel |= pa; - if (pte_index >= (HPT_NPTEG << 3)) + + if (pa) + pteh |= HPTE_V_VALID; + else + pteh |= HPTE_V_ABSENT; + + /* Check WIMG */ + if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) { + if (is_io) + return H_PARAMETER; + /* + * Allow guest to map emulated device memory as + * uncacheable, but actually make it cacheable. + */ + ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G); + ptel |= HPTE_R_M; + } + + /* Find and lock the HPTEG slot to use */ + do_insert: + if (pte_index >= HPT_NPTE) return H_PARAMETER; if (likely((flags & H_EXACT) == 0)) { pte_index &= ~7UL; hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); - for (i = 0; ; ++i) { - if (i == 8) - return H_PTEG_FULL; + for (i = 0; i < 8; ++i) { if ((*hpte & HPTE_V_VALID) == 0 && - lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) + try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | + HPTE_V_ABSENT)) break; hpte += 2; } + if (i == 8) { + /* + * Since try_lock_hpte doesn't retry (not even stdcx. + * failures), it could be that there is a free slot + * but we transiently failed to lock it. Try again, + * actually locking each slot and checking it. + */ + hpte -= 16; + for (i = 0; i < 8; ++i) { + while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) + cpu_relax(); + if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT))) + break; + *hpte &= ~HPTE_V_HVLOCK; + hpte += 2; + } + if (i == 8) + return H_PTEG_FULL; + } + pte_index += i; } else { - i = 0; hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); - if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) - return H_PTEG_FULL; + if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | + HPTE_V_ABSENT)) { + /* Lock the slot and check again */ + while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) + cpu_relax(); + if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { + *hpte &= ~HPTE_V_HVLOCK; + return H_PTEG_FULL; + } + } } + + /* Save away the guest's idea of the second HPTE dword */ + rev = &kvm->arch.revmap[pte_index]; + if (realmode) + rev = real_vmalloc_addr(rev); + if (rev) + rev->guest_rpte = g_ptel; + + /* Link HPTE into reverse-map chain */ + if (pteh & HPTE_V_VALID) { + if (realmode) + rmap = real_vmalloc_addr(rmap); + lock_rmap(rmap); + /* Check for pending invalidations under the rmap chain lock */ + if (kvm->arch.using_mmu_notifiers && + mmu_notifier_retry(vcpu, mmu_seq)) { + /* inval in progress, write a non-present HPTE */ + pteh |= HPTE_V_ABSENT; + pteh &= ~HPTE_V_VALID; + unlock_rmap(rmap); + } else { + kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, + realmode); + /* Only set R/C in real HPTE if already set in *rmap */ + rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; + ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C); + } + } + hpte[1] = ptel; + + /* Write the first HPTE dword, unlocking the HPTE and making it valid */ eieio(); hpte[0] = pteh; asm volatile("ptesync" : : : "memory"); - atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt); - vcpu->arch.gpr[4] = pte_index + i; + + vcpu->arch.gpr[4] = pte_index; return H_SUCCESS; } +EXPORT_SYMBOL_GPL(kvmppc_h_enter); #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) @@ -137,37 +350,46 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, struct kvm *kvm = vcpu->kvm; unsigned long *hpte; unsigned long v, r, rb; + struct revmap_entry *rev; - if (pte_index >= (HPT_NPTEG << 3)) + if (pte_index >= HPT_NPTE) return H_PARAMETER; hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); - while (!lock_hpte(hpte, HPTE_V_HVLOCK)) + while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); - if ((hpte[0] & HPTE_V_VALID) == 0 || + if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { hpte[0] &= ~HPTE_V_HVLOCK; return H_NOT_FOUND; } - if (atomic_read(&kvm->online_vcpus) == 1) - flags |= H_LOCAL; - vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK; - vcpu->arch.gpr[5] = r = hpte[1]; - rb = compute_tlbie_rb(v, r, pte_index); - hpte[0] = 0; - if (!(flags & H_LOCAL)) { - while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) - cpu_relax(); - asm volatile("ptesync" : : : "memory"); - asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" - : : "r" (rb), "r" (kvm->arch.lpid)); - asm volatile("ptesync" : : : "memory"); - kvm->arch.tlbie_lock = 0; - } else { - asm volatile("ptesync" : : : "memory"); - asm volatile("tlbiel %0" : : "r" (rb)); - asm volatile("ptesync" : : : "memory"); + + rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); + v = hpte[0] & ~HPTE_V_HVLOCK; + if (v & HPTE_V_VALID) { + hpte[0] &= ~HPTE_V_VALID; + rb = compute_tlbie_rb(v, hpte[1], pte_index); + if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) { + while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) + cpu_relax(); + asm volatile("ptesync" : : : "memory"); + asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" + : : "r" (rb), "r" (kvm->arch.lpid)); + asm volatile("ptesync" : : : "memory"); + kvm->arch.tlbie_lock = 0; + } else { + asm volatile("ptesync" : : : "memory"); + asm volatile("tlbiel %0" : : "r" (rb)); + asm volatile("ptesync" : : : "memory"); + } + /* Read PTE low word after tlbie to get final R/C values */ + remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); } + r = rev->guest_rpte; + unlock_hpte(hpte, 0); + + vcpu->arch.gpr[4] = v; + vcpu->arch.gpr[5] = r; return H_SUCCESS; } @@ -175,78 +397,117 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; unsigned long *args = &vcpu->arch.gpr[4]; - unsigned long *hp, tlbrb[4]; - long int i, found; - long int n_inval = 0; - unsigned long flags, req, pte_index; + unsigned long *hp, *hptes[4], tlbrb[4]; + long int i, j, k, n, found, indexes[4]; + unsigned long flags, req, pte_index, rcbits; long int local = 0; long int ret = H_SUCCESS; + struct revmap_entry *rev, *revs[4]; if (atomic_read(&kvm->online_vcpus) == 1) local = 1; - for (i = 0; i < 4; ++i) { - pte_index = args[i * 2]; - flags = pte_index >> 56; - pte_index &= ((1ul << 56) - 1); - req = flags >> 6; - flags &= 3; - if (req == 3) - break; - if (req != 1 || flags == 3 || - pte_index >= (HPT_NPTEG << 3)) { - /* parameter error */ - args[i * 2] = ((0xa0 | flags) << 56) + pte_index; - ret = H_PARAMETER; - break; - } - hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); - while (!lock_hpte(hp, HPTE_V_HVLOCK)) - cpu_relax(); - found = 0; - if (hp[0] & HPTE_V_VALID) { - switch (flags & 3) { - case 0: /* absolute */ - found = 1; + for (i = 0; i < 4 && ret == H_SUCCESS; ) { + n = 0; + for (; i < 4; ++i) { + j = i * 2; + pte_index = args[j]; + flags = pte_index >> 56; + pte_index &= ((1ul << 56) - 1); + req = flags >> 6; + flags &= 3; + if (req == 3) { /* no more requests */ + i = 4; break; - case 1: /* andcond */ - if (!(hp[0] & args[i * 2 + 1])) - found = 1; + } + if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) { + /* parameter error */ + args[j] = ((0xa0 | flags) << 56) + pte_index; + ret = H_PARAMETER; break; - case 2: /* AVPN */ - if ((hp[0] & ~0x7fUL) == args[i * 2 + 1]) + } + hp = (unsigned long *) + (kvm->arch.hpt_virt + (pte_index << 4)); + /* to avoid deadlock, don't spin except for first */ + if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { + if (n) + break; + while (!try_lock_hpte(hp, HPTE_V_HVLOCK)) + cpu_relax(); + } + found = 0; + if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) { + switch (flags & 3) { + case 0: /* absolute */ found = 1; - break; + break; + case 1: /* andcond */ + if (!(hp[0] & args[j + 1])) + found = 1; + break; + case 2: /* AVPN */ + if ((hp[0] & ~0x7fUL) == args[j + 1]) + found = 1; + break; + } + } + if (!found) { + hp[0] &= ~HPTE_V_HVLOCK; + args[j] = ((0x90 | flags) << 56) + pte_index; + continue; } + + args[j] = ((0x80 | flags) << 56) + pte_index; + rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); + + if (!(hp[0] & HPTE_V_VALID)) { + /* insert R and C bits from PTE */ + rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); + args[j] |= rcbits << (56 - 5); + continue; + } + + hp[0] &= ~HPTE_V_VALID; /* leave it locked */ + tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index); + indexes[n] = j; + hptes[n] = hp; + revs[n] = rev; + ++n; + } + + if (!n) + break; + + /* Now that we've collected a batch, do the tlbies */ + if (!local) { + while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) + cpu_relax(); + asm volatile("ptesync" : : : "memory"); + for (k = 0; k < n; ++k) + asm volatile(PPC_TLBIE(%1,%0) : : + "r" (tlbrb[k]), + "r" (kvm->arch.lpid)); + asm volatile("eieio; tlbsync; ptesync" : : : "memory"); + kvm->arch.tlbie_lock = 0; + } else { + asm volatile("ptesync" : : : "memory"); + for (k = 0; k < n; ++k) + asm volatile("tlbiel %0" : : "r" (tlbrb[k])); + asm volatile("ptesync" : : : "memory"); } - if (!found) { - hp[0] &= ~HPTE_V_HVLOCK; - args[i * 2] = ((0x90 | flags) << 56) + pte_index; - continue; + + /* Read PTE low words after tlbie to get final R/C values */ + for (k = 0; k < n; ++k) { + j = indexes[k]; + pte_index = args[j] & ((1ul << 56) - 1); + hp = hptes[k]; + rev = revs[k]; + remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]); + rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); + args[j] |= rcbits << (56 - 5); + hp[0] = 0; } - /* insert R and C bits from PTE */ - flags |= (hp[1] >> 5) & 0x0c; - args[i * 2] = ((0x80 | flags) << 56) + pte_index; - tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index); - hp[0] = 0; - } - if (n_inval == 0) - return ret; - - if (!local) { - while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) - cpu_relax(); - asm volatile("ptesync" : : : "memory"); - for (i = 0; i < n_inval; ++i) - asm volatile(PPC_TLBIE(%1,%0) - : : "r" (tlbrb[i]), "r" (kvm->arch.lpid)); - asm volatile("eieio; tlbsync; ptesync" : : : "memory"); - kvm->arch.tlbie_lock = 0; - } else { - asm volatile("ptesync" : : : "memory"); - for (i = 0; i < n_inval; ++i) - asm volatile("tlbiel %0" : : "r" (tlbrb[i])); - asm volatile("ptesync" : : : "memory"); } + return ret; } @@ -256,40 +517,55 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, { struct kvm *kvm = vcpu->kvm; unsigned long *hpte; - unsigned long v, r, rb; + struct revmap_entry *rev; + unsigned long v, r, rb, mask, bits; - if (pte_index >= (HPT_NPTEG << 3)) + if (pte_index >= HPT_NPTE) return H_PARAMETER; + hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); - while (!lock_hpte(hpte, HPTE_V_HVLOCK)) + while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); - if ((hpte[0] & HPTE_V_VALID) == 0 || + if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { hpte[0] &= ~HPTE_V_HVLOCK; return H_NOT_FOUND; } + if (atomic_read(&kvm->online_vcpus) == 1) flags |= H_LOCAL; v = hpte[0]; - r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | - HPTE_R_KEY_HI | HPTE_R_KEY_LO); - r |= (flags << 55) & HPTE_R_PP0; - r |= (flags << 48) & HPTE_R_KEY_HI; - r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); - rb = compute_tlbie_rb(v, r, pte_index); - hpte[0] = v & ~HPTE_V_VALID; - if (!(flags & H_LOCAL)) { - while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) - cpu_relax(); - asm volatile("ptesync" : : : "memory"); - asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" - : : "r" (rb), "r" (kvm->arch.lpid)); - asm volatile("ptesync" : : : "memory"); - kvm->arch.tlbie_lock = 0; - } else { - asm volatile("ptesync" : : : "memory"); - asm volatile("tlbiel %0" : : "r" (rb)); - asm volatile("ptesync" : : : "memory"); + bits = (flags << 55) & HPTE_R_PP0; + bits |= (flags << 48) & HPTE_R_KEY_HI; + bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); + + /* Update guest view of 2nd HPTE dword */ + mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | + HPTE_R_KEY_HI | HPTE_R_KEY_LO; + rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); + if (rev) { + r = (rev->guest_rpte & ~mask) | bits; + rev->guest_rpte = r; + } + r = (hpte[1] & ~mask) | bits; + + /* Update HPTE */ + if (v & HPTE_V_VALID) { + rb = compute_tlbie_rb(v, r, pte_index); + hpte[0] = v & ~HPTE_V_VALID; + if (!(flags & H_LOCAL)) { + while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) + cpu_relax(); + asm volatile("ptesync" : : : "memory"); + asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" + : : "r" (rb), "r" (kvm->arch.lpid)); + asm volatile("ptesync" : : : "memory"); + kvm->arch.tlbie_lock = 0; + } else { + asm volatile("ptesync" : : : "memory"); + asm volatile("tlbiel %0" : : "r" (rb)); + asm volatile("ptesync" : : : "memory"); + } } hpte[1] = r; eieio(); @@ -298,40 +574,243 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, return H_SUCCESS; } -static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr) -{ - long int i; - unsigned long offset, rpn; - - offset = realaddr & (kvm->arch.ram_psize - 1); - rpn = (realaddr - offset) >> PAGE_SHIFT; - for (i = 0; i < kvm->arch.ram_npages; ++i) - if (rpn == kvm->arch.ram_pginfo[i].pfn) - return (i << PAGE_SHIFT) + offset; - return HPTE_R_RPN; /* all 1s in the RPN field */ -} - long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index) { struct kvm *kvm = vcpu->kvm; - unsigned long *hpte, r; + unsigned long *hpte, v, r; int i, n = 1; + struct revmap_entry *rev = NULL; - if (pte_index >= (HPT_NPTEG << 3)) + if (pte_index >= HPT_NPTE) return H_PARAMETER; if (flags & H_READ_4) { pte_index &= ~3; n = 4; } + rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); for (i = 0; i < n; ++i, ++pte_index) { hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); + v = hpte[0] & ~HPTE_V_HVLOCK; r = hpte[1]; - if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID)) - r = reverse_xlate(kvm, r & HPTE_R_RPN) | - (r & ~HPTE_R_RPN); - vcpu->arch.gpr[4 + i * 2] = hpte[0]; + if (v & HPTE_V_ABSENT) { + v &= ~HPTE_V_ABSENT; + v |= HPTE_V_VALID; + } + if (v & HPTE_V_VALID) + r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); + vcpu->arch.gpr[4 + i * 2] = v; vcpu->arch.gpr[5 + i * 2] = r; } return H_SUCCESS; } + +void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, + unsigned long pte_index) +{ + unsigned long rb; + + hptep[0] &= ~HPTE_V_VALID; + rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); + while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) + cpu_relax(); + asm volatile("ptesync" : : : "memory"); + asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" + : : "r" (rb), "r" (kvm->arch.lpid)); + asm volatile("ptesync" : : : "memory"); + kvm->arch.tlbie_lock = 0; +} +EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); + +void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, + unsigned long pte_index) +{ + unsigned long rb; + unsigned char rbyte; + + rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); + rbyte = (hptep[1] & ~HPTE_R_R) >> 8; + /* modify only the second-last byte, which contains the ref bit */ + *((char *)hptep + 14) = rbyte; + while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) + cpu_relax(); + asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" + : : "r" (rb), "r" (kvm->arch.lpid)); + asm volatile("ptesync" : : : "memory"); + kvm->arch.tlbie_lock = 0; +} +EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte); + +static int slb_base_page_shift[4] = { + 24, /* 16M */ + 16, /* 64k */ + 34, /* 16G */ + 20, /* 1M, unsupported */ +}; + +long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, + unsigned long valid) +{ + unsigned int i; + unsigned int pshift; + unsigned long somask; + unsigned long vsid, hash; + unsigned long avpn; + unsigned long *hpte; + unsigned long mask, val; + unsigned long v, r; + + /* Get page shift, work out hash and AVPN etc. */ + mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY; + val = 0; + pshift = 12; + if (slb_v & SLB_VSID_L) { + mask |= HPTE_V_LARGE; + val |= HPTE_V_LARGE; + pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4]; + } + if (slb_v & SLB_VSID_B_1T) { + somask = (1UL << 40) - 1; + vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; + vsid ^= vsid << 25; + } else { + somask = (1UL << 28) - 1; + vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; + } + hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK; + avpn = slb_v & ~(somask >> 16); /* also includes B */ + avpn |= (eaddr & somask) >> 16; + + if (pshift >= 24) + avpn &= ~((1UL << (pshift - 16)) - 1); + else + avpn &= ~0x7fUL; + val |= avpn; + + for (;;) { + hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7)); + + for (i = 0; i < 16; i += 2) { + /* Read the PTE racily */ + v = hpte[i] & ~HPTE_V_HVLOCK; + + /* Check valid/absent, hash, segment size and AVPN */ + if (!(v & valid) || (v & mask) != val) + continue; + + /* Lock the PTE and read it under the lock */ + while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) + cpu_relax(); + v = hpte[i] & ~HPTE_V_HVLOCK; + r = hpte[i+1]; + + /* + * Check the HPTE again, including large page size + * Since we don't currently allow any MPSS (mixed + * page-size segment) page sizes, it is sufficient + * to check against the actual page size. + */ + if ((v & valid) && (v & mask) == val && + hpte_page_size(v, r) == (1ul << pshift)) + /* Return with the HPTE still locked */ + return (hash << 3) + (i >> 1); + + /* Unlock and move on */ + hpte[i] = v; + } + + if (val & HPTE_V_SECONDARY) + break; + val |= HPTE_V_SECONDARY; + hash = hash ^ HPT_HASH_MASK; + } + return -1; +} +EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte); + +/* + * Called in real mode to check whether an HPTE not found fault + * is due to accessing a paged-out page or an emulated MMIO page, + * or if a protection fault is due to accessing a page that the + * guest wanted read/write access to but which we made read-only. + * Returns a possibly modified status (DSISR) value if not + * (i.e. pass the interrupt to the guest), + * -1 to pass the fault up to host kernel mode code, -2 to do that + * and also load the instruction word (for MMIO emulation), + * or 0 if we should make the guest retry the access. + */ +long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, + unsigned long slb_v, unsigned int status, bool data) +{ + struct kvm *kvm = vcpu->kvm; + long int index; + unsigned long v, r, gr; + unsigned long *hpte; + unsigned long valid; + struct revmap_entry *rev; + unsigned long pp, key; + + /* For protection fault, expect to find a valid HPTE */ + valid = HPTE_V_VALID; + if (status & DSISR_NOHPTE) + valid |= HPTE_V_ABSENT; + + index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid); + if (index < 0) { + if (status & DSISR_NOHPTE) + return status; /* there really was no HPTE */ + return 0; /* for prot fault, HPTE disappeared */ + } + hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); + v = hpte[0] & ~HPTE_V_HVLOCK; + r = hpte[1]; + rev = real_vmalloc_addr(&kvm->arch.revmap[index]); + gr = rev->guest_rpte; + + unlock_hpte(hpte, v); + + /* For not found, if the HPTE is valid by now, retry the instruction */ + if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID)) + return 0; + + /* Check access permissions to the page */ + pp = gr & (HPTE_R_PP0 | HPTE_R_PP); + key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; + status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */ + if (!data) { + if (gr & (HPTE_R_N | HPTE_R_G)) + return status | SRR1_ISI_N_OR_G; + if (!hpte_read_permission(pp, slb_v & key)) + return status | SRR1_ISI_PROT; + } else if (status & DSISR_ISSTORE) { + /* check write permission */ + if (!hpte_write_permission(pp, slb_v & key)) + return status | DSISR_PROTFAULT; + } else { + if (!hpte_read_permission(pp, slb_v & key)) + return status | DSISR_PROTFAULT; + } + + /* Check storage key, if applicable */ + if (data && (vcpu->arch.shregs.msr & MSR_DR)) { + unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); + if (status & DSISR_ISSTORE) + perm >>= 1; + if (perm & 1) + return status | DSISR_KEYFAULT; + } + + /* Save HPTE info for virtual-mode handler */ + vcpu->arch.pgfault_addr = addr; + vcpu->arch.pgfault_index = index; + vcpu->arch.pgfault_hpte[0] = v; + vcpu->arch.pgfault_hpte[1] = r; + + /* Check the storage key to see if it is possibly emulated MMIO */ + if (data && (vcpu->arch.shregs.msr & MSR_IR) && + (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) == + (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) + return -2; /* MMIO emulation - load instr word */ + + return -1; /* send fault up to host kernel mode */ +} diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5c8b26183f50..b70bf22a3ff3 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -601,6 +601,30 @@ kvmppc_interrupt: stw r12,VCPU_TRAP(r9) + /* Save HEIR (HV emulation assist reg) in last_inst + if this is an HEI (HV emulation interrupt, e40) */ + li r3,KVM_INST_FETCH_FAILED +BEGIN_FTR_SECTION + cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST + bne 11f + mfspr r3,SPRN_HEIR +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) +11: stw r3,VCPU_LAST_INST(r9) + + /* these are volatile across C function calls */ + mfctr r3 + mfxer r4 + std r3, VCPU_CTR(r9) + stw r4, VCPU_XER(r9) + +BEGIN_FTR_SECTION + /* If this is a page table miss then see if it's theirs or ours */ + cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE + beq kvmppc_hdsi + cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE + beq kvmppc_hisi +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) + /* See if this is a leftover HDEC interrupt */ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER bne 2f @@ -608,7 +632,7 @@ kvmppc_interrupt: cmpwi r3,0 bge ignore_hdec 2: - /* See if this is something we can handle in real mode */ + /* See if this is an hcall we can handle in real mode */ cmpwi r12,BOOK3S_INTERRUPT_SYSCALL beq hcall_try_real_mode @@ -624,6 +648,7 @@ BEGIN_FTR_SECTION 1: END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) +nohpte_cont: hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ /* Save DEC */ mfspr r5,SPRN_DEC @@ -632,36 +657,21 @@ hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ add r5,r5,r6 std r5,VCPU_DEC_EXPIRES(r9) - /* Save HEIR (HV emulation assist reg) in last_inst - if this is an HEI (HV emulation interrupt, e40) */ - li r3,-1 -BEGIN_FTR_SECTION - cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST - bne 11f - mfspr r3,SPRN_HEIR -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) -11: stw r3,VCPU_LAST_INST(r9) - /* Save more register state */ - mfxer r5 mfdar r6 mfdsisr r7 - mfctr r8 - - stw r5, VCPU_XER(r9) std r6, VCPU_DAR(r9) stw r7, VCPU_DSISR(r9) - std r8, VCPU_CTR(r9) - /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */ BEGIN_FTR_SECTION + /* don't overwrite fault_dar/fault_dsisr if HDSI */ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE beq 6f END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) -7: std r6, VCPU_FAULT_DAR(r9) + std r6, VCPU_FAULT_DAR(r9) stw r7, VCPU_FAULT_DSISR(r9) /* Save guest CTRL register, set runlatch to 1 */ - mfspr r6,SPRN_CTRLF +6: mfspr r6,SPRN_CTRLF stw r6,VCPU_CTRL(r9) andi. r0,r6,1 bne 4f @@ -1094,9 +1104,131 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) mtspr SPRN_HSRR1, r7 ba 0x500 -6: mfspr r6,SPRN_HDAR - mfspr r7,SPRN_HDSISR - b 7b +/* + * Check whether an HDSI is an HPTE not found fault or something else. + * If it is an HPTE not found fault that is due to the guest accessing + * a page that they have mapped but which we have paged out, then + * we continue on with the guest exit path. In all other cases, + * reflect the HDSI to the guest as a DSI. + */ +kvmppc_hdsi: + mfspr r4, SPRN_HDAR + mfspr r6, SPRN_HDSISR + /* HPTE not found fault or protection fault? */ + andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h + beq 1f /* if not, send it to the guest */ + andi. r0, r11, MSR_DR /* data relocation enabled? */ + beq 3f + clrrdi r0, r4, 28 + PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ + bne 1f /* if no SLB entry found */ +4: std r4, VCPU_FAULT_DAR(r9) + stw r6, VCPU_FAULT_DSISR(r9) + + /* Search the hash table. */ + mr r3, r9 /* vcpu pointer */ + li r7, 1 /* data fault */ + bl .kvmppc_hpte_hv_fault + ld r9, HSTATE_KVM_VCPU(r13) + ld r10, VCPU_PC(r9) + ld r11, VCPU_MSR(r9) + li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE + cmpdi r3, 0 /* retry the instruction */ + beq 6f + cmpdi r3, -1 /* handle in kernel mode */ + beq nohpte_cont + cmpdi r3, -2 /* MMIO emulation; need instr word */ + beq 2f + + /* Synthesize a DSI for the guest */ + ld r4, VCPU_FAULT_DAR(r9) + mr r6, r3 +1: mtspr SPRN_DAR, r4 + mtspr SPRN_DSISR, r6 + mtspr SPRN_SRR0, r10 + mtspr SPRN_SRR1, r11 + li r10, BOOK3S_INTERRUPT_DATA_STORAGE + li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ + rotldi r11, r11, 63 +6: ld r7, VCPU_CTR(r9) + lwz r8, VCPU_XER(r9) + mtctr r7 + mtxer r8 + mr r4, r9 + b fast_guest_return + +3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ + ld r5, KVM_VRMA_SLB_V(r5) + b 4b + + /* If this is for emulated MMIO, load the instruction word */ +2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ + + /* Set guest mode to 'jump over instruction' so if lwz faults + * we'll just continue at the next IP. */ + li r0, KVM_GUEST_MODE_SKIP + stb r0, HSTATE_IN_GUEST(r13) + + /* Do the access with MSR:DR enabled */ + mfmsr r3 + ori r4, r3, MSR_DR /* Enable paging for data */ + mtmsrd r4 + lwz r8, 0(r10) + mtmsrd r3 + + /* Store the result */ + stw r8, VCPU_LAST_INST(r9) + + /* Unset guest mode. */ + li r0, KVM_GUEST_MODE_NONE + stb r0, HSTATE_IN_GUEST(r13) + b nohpte_cont + +/* + * Similarly for an HISI, reflect it to the guest as an ISI unless + * it is an HPTE not found fault for a page that we have paged out. + */ +kvmppc_hisi: + andis. r0, r11, SRR1_ISI_NOPT@h + beq 1f + andi. r0, r11, MSR_IR /* instruction relocation enabled? */ + beq 3f + clrrdi r0, r10, 28 + PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ + bne 1f /* if no SLB entry found */ +4: + /* Search the hash table. */ + mr r3, r9 /* vcpu pointer */ + mr r4, r10 + mr r6, r11 + li r7, 0 /* instruction fault */ + bl .kvmppc_hpte_hv_fault + ld r9, HSTATE_KVM_VCPU(r13) + ld r10, VCPU_PC(r9) + ld r11, VCPU_MSR(r9) + li r12, BOOK3S_INTERRUPT_H_INST_STORAGE + cmpdi r3, 0 /* retry the instruction */ + beq 6f + cmpdi r3, -1 /* handle in kernel mode */ + beq nohpte_cont + + /* Synthesize an ISI for the guest */ + mr r11, r3 +1: mtspr SPRN_SRR0, r10 + mtspr SPRN_SRR1, r11 + li r10, BOOK3S_INTERRUPT_INST_STORAGE + li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ + rotldi r11, r11, 63 +6: ld r7, VCPU_CTR(r9) + lwz r8, VCPU_XER(r9) + mtctr r7 + mtxer r8 + mr r4, r9 + b fast_guest_return + +3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ + ld r5, KVM_VRMA_SLB_V(r6) + b 4b /* * Try to handle an hcall in real mode. diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index 7b0ee96c1bed..e70ef2d86431 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -196,7 +196,8 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_inject_pf(vcpu, addr, false); goto done_load; } else if (r == EMULATE_DO_MMIO) { - emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1); + emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, + len, 1); goto done_load; } @@ -286,11 +287,13 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_inject_pf(vcpu, addr, false); goto done_load; } else if ((r == EMULATE_DO_MMIO) && w) { - emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1); + emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, + 4, 1); vcpu->arch.qpr[rs] = tmp[1]; goto done_load; } else if (r == EMULATE_DO_MMIO) { - emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1); + emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs, + 8, 1); goto done_load; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 220fcdf26978..7340e1090b77 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -51,15 +51,19 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, #define MSR_USER32 MSR_USER #define MSR_USER64 MSR_USER #define HW_PAGE_SIZE PAGE_SIZE +#define __hard_irq_disable local_irq_disable +#define __hard_irq_enable local_irq_enable #endif void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { #ifdef CONFIG_PPC_BOOK3S_64 - memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, sizeof(get_paca()->shadow_vcpu)); - to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; + svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; + svcpu_put(svcpu); #endif #ifdef CONFIG_PPC_BOOK3S_32 @@ -70,10 +74,12 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_BOOK3S_64 - memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, sizeof(get_paca()->shadow_vcpu)); - to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; + to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; + svcpu_put(svcpu); #endif kvmppc_giveup_ext(vcpu, MSR_FP); @@ -151,14 +157,16 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) #ifdef CONFIG_PPC_BOOK3S_64 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { kvmppc_mmu_book3s_64_init(vcpu); - to_book3s(vcpu)->hior = 0xfff00000; + if (!to_book3s(vcpu)->hior_explicit) + to_book3s(vcpu)->hior = 0xfff00000; to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; vcpu->arch.cpu_type = KVM_CPU_3S_64; } else #endif { kvmppc_mmu_book3s_32_init(vcpu); - to_book3s(vcpu)->hior = 0; + if (!to_book3s(vcpu)->hior_explicit) + to_book3s(vcpu)->hior = 0; to_book3s(vcpu)->msr_mask = 0xffffffffULL; vcpu->arch.cpu_type = KVM_CPU_3S_32; } @@ -308,19 +316,22 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, if (page_found == -ENOENT) { /* Page not found in guest PTE entries */ + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); - vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; + vcpu->arch.shared->dsisr = svcpu->fault_dsisr; vcpu->arch.shared->msr |= - (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); + (svcpu->shadow_srr1 & 0x00000000f8000000ULL); + svcpu_put(svcpu); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EPERM) { /* Storage protection */ + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); - vcpu->arch.shared->dsisr = - to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; + vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; vcpu->arch.shared->msr |= - (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); + svcpu->shadow_srr1 & 0x00000000f8000000ULL; + svcpu_put(svcpu); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EINVAL) { /* Page not found in guest SLB */ @@ -517,24 +528,29 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, run->ready_for_interrupt_injection = 1; trace_kvm_book3s_exit(exit_nr, vcpu); + preempt_enable(); kvm_resched(vcpu); switch (exit_nr) { case BOOK3S_INTERRUPT_INST_STORAGE: + { + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + ulong shadow_srr1 = svcpu->shadow_srr1; vcpu->stat.pf_instruc++; #ifdef CONFIG_PPC_BOOK3S_32 /* We set segments as unused segments when invalidating them. So * treat the respective fault as segment fault. */ - if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] - == SR_INVALID) { + if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); r = RESUME_GUEST; + svcpu_put(svcpu); break; } #endif + svcpu_put(svcpu); /* only care about PTEG not found errors, but leave NX alone */ - if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { + if (shadow_srr1 & 0x40000000) { r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); vcpu->stat.sp_instruc++; } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && @@ -547,33 +563,37 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); r = RESUME_GUEST; } else { - vcpu->arch.shared->msr |= - to_svcpu(vcpu)->shadow_srr1 & 0x58000000; + vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; } break; + } case BOOK3S_INTERRUPT_DATA_STORAGE: { ulong dar = kvmppc_get_fault_dar(vcpu); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + u32 fault_dsisr = svcpu->fault_dsisr; vcpu->stat.pf_storage++; #ifdef CONFIG_PPC_BOOK3S_32 /* We set segments as unused segments when invalidating them. So * treat the respective fault as segment fault. */ - if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) { + if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { kvmppc_mmu_map_segment(vcpu, dar); r = RESUME_GUEST; + svcpu_put(svcpu); break; } #endif + svcpu_put(svcpu); /* The only case we need to handle is missing shadow PTEs */ - if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { + if (fault_dsisr & DSISR_NOHPTE) { r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); } else { vcpu->arch.shared->dar = dar; - vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; + vcpu->arch.shared->dsisr = fault_dsisr; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; } @@ -609,10 +629,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_PROGRAM: { enum emulation_result er; + struct kvmppc_book3s_shadow_vcpu *svcpu; ulong flags; program_interrupt: - flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; + svcpu = svcpu_get(vcpu); + flags = svcpu->shadow_srr1 & 0x1f0000ull; + svcpu_put(svcpu); if (vcpu->arch.shared->msr & MSR_PR) { #ifdef EXIT_DEBUG @@ -740,20 +763,33 @@ program_interrupt: r = RESUME_GUEST; break; default: + { + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + ulong shadow_srr1 = svcpu->shadow_srr1; + svcpu_put(svcpu); /* Ugh - bork here! What did we get? */ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", - exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); + exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); r = RESUME_HOST; BUG(); break; } - + } if (!(r & RESUME_HOST)) { /* To avoid clobbering exit_reason, only check for signals if * we aren't already exiting to userspace for some other * reason. */ + + /* + * Interrupts could be timers for the guest which we have to + * inject again, so let's postpone them until we're in the guest + * and if we really did time things so badly, then we just exit + * again due to a host external interrupt. + */ + __hard_irq_disable(); if (signal_pending(current)) { + __hard_irq_enable(); #ifdef EXIT_DEBUG printk(KERN_EMERG "KVM: Going back to host\n"); #endif @@ -761,10 +797,12 @@ program_interrupt: run->exit_reason = KVM_EXIT_INTR; r = -EINTR; } else { + preempt_disable(); + /* In case an interrupt came in that was triggered * from userspace (like DEC), we need to check what * to inject now! */ - kvmppc_core_deliver_interrupts(vcpu); + kvmppc_core_prepare_to_enter(vcpu); } } @@ -836,6 +874,38 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return 0; } +int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +{ + int r = -EINVAL; + + switch (reg->id) { + case KVM_REG_PPC_HIOR: + r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); + break; + default: + break; + } + + return r; +} + +int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +{ + int r = -EINVAL; + + switch (reg->id) { + case KVM_REG_PPC_HIOR: + r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); + if (!r) + to_book3s(vcpu)->hior_explicit = true; + break; + default: + break; + } + + return r; +} + int kvmppc_core_check_processor_compat(void) { return 0; @@ -923,16 +993,31 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #endif ulong ext_msr; + preempt_disable(); + /* Check if we can run the vcpu at all */ if (!vcpu->arch.sane) { kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return -EINVAL; + ret = -EINVAL; + goto out; } + kvmppc_core_prepare_to_enter(vcpu); + + /* + * Interrupts could be timers for the guest which we have to inject + * again, so let's postpone them until we're in the guest and if we + * really did time things so badly, then we just exit again due to + * a host external interrupt. + */ + __hard_irq_disable(); + /* No need to go into the guest when all we do is going out */ if (signal_pending(current)) { + __hard_irq_enable(); kvm_run->exit_reason = KVM_EXIT_INTR; - return -EINTR; + ret = -EINTR; + goto out; } /* Save FPU state in stack */ @@ -974,8 +1059,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvm_guest_exit(); - local_irq_disable(); - current->thread.regs->msr = ext_msr; /* Make sure we save the guest FPU/Altivec/VSX state */ @@ -1002,9 +1085,50 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) current->thread.used_vsr = used_vsr; #endif +out: + preempt_enable(); return ret; } +/* + * Get (and clear) the dirty memory log for a memory slot. + */ +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, + struct kvm_dirty_log *log) +{ + struct kvm_memory_slot *memslot; + struct kvm_vcpu *vcpu; + ulong ga, ga_end; + int is_dirty = 0; + int r; + unsigned long n; + + mutex_lock(&kvm->slots_lock); + + r = kvm_get_dirty_log(kvm, log, &is_dirty); + if (r) + goto out; + + /* If nothing is dirty, don't bother messing with page tables. */ + if (is_dirty) { + memslot = id_to_memslot(kvm->memslots, log->slot); + + ga = memslot->base_gfn << PAGE_SHIFT; + ga_end = ga + (memslot->npages << PAGE_SHIFT); + + kvm_for_each_vcpu(n, vcpu, kvm) + kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); + + n = kvm_dirty_bitmap_bytes(memslot); + memset(memslot->dirty_bitmap, 0, n); + } + + r = 0; +out: + mutex_unlock(&kvm->slots_lock); + return r; +} + int kvmppc_core_prepare_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem) { diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index bb6c988f010a..ee9e1ee9c858 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -124,12 +124,6 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) vcpu->arch.shared->msr = new_msr; kvmppc_mmu_msr_notify(vcpu, old_msr); - - if (vcpu->arch.shared->msr & MSR_WE) { - kvm_vcpu_block(vcpu); - kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); - }; - kvmppc_vcpu_sync_spe(vcpu); } @@ -258,9 +252,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, allowed = vcpu->arch.shared->msr & MSR_ME; msr_mask = 0; break; - case BOOKE_IRQPRIO_EXTERNAL: case BOOKE_IRQPRIO_DECREMENTER: case BOOKE_IRQPRIO_FIT: + keep_irq = true; + /* fall through */ + case BOOKE_IRQPRIO_EXTERNAL: allowed = vcpu->arch.shared->msr & MSR_EE; allowed = allowed && !crit; msr_mask = MSR_CE|MSR_ME|MSR_DE; @@ -276,7 +272,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; if (update_esr == true) - vcpu->arch.esr = vcpu->arch.queued_esr; + vcpu->arch.shared->esr = vcpu->arch.queued_esr; if (update_dear == true) vcpu->arch.shared->dar = vcpu->arch.queued_dear; kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); @@ -288,13 +284,26 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, return allowed; } -/* Check pending exceptions and deliver one, if possible. */ -void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) +static void update_timer_ints(struct kvm_vcpu *vcpu) +{ + if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) + kvmppc_core_queue_dec(vcpu); + else + kvmppc_core_dequeue_dec(vcpu); +} + +static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) { unsigned long *pending = &vcpu->arch.pending_exceptions; - unsigned long old_pending = vcpu->arch.pending_exceptions; unsigned int priority; + if (vcpu->requests) { + if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) { + smp_mb(); + update_timer_ints(vcpu); + } + } + priority = __ffs(*pending); while (priority <= BOOKE_IRQPRIO_MAX) { if (kvmppc_booke_irqprio_deliver(vcpu, priority)) @@ -306,10 +315,24 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) } /* Tell the guest about our interrupt status */ - if (*pending) - vcpu->arch.shared->int_pending = 1; - else if (old_pending) - vcpu->arch.shared->int_pending = 0; + vcpu->arch.shared->int_pending = !!*pending; +} + +/* Check pending exceptions and deliver one, if possible. */ +void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) +{ + WARN_ON_ONCE(!irqs_disabled()); + + kvmppc_core_check_exceptions(vcpu); + + if (vcpu->arch.shared->msr & MSR_WE) { + local_irq_enable(); + kvm_vcpu_block(vcpu); + local_irq_disable(); + + kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); + kvmppc_core_check_exceptions(vcpu); + }; } int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) @@ -322,11 +345,21 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) } local_irq_disable(); + + kvmppc_core_prepare_to_enter(vcpu); + + if (signal_pending(current)) { + kvm_run->exit_reason = KVM_EXIT_INTR; + ret = -EINTR; + goto out; + } + kvm_guest_enter(); ret = __kvmppc_vcpu_run(kvm_run, vcpu); kvm_guest_exit(); - local_irq_enable(); +out: + local_irq_enable(); return ret; } @@ -603,7 +636,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, local_irq_disable(); - kvmppc_core_deliver_interrupts(vcpu); + kvmppc_core_prepare_to_enter(vcpu); if (!(r & RESUME_HOST)) { /* To avoid clobbering exit_reason, only check for signals if @@ -628,6 +661,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.pc = 0; vcpu->arch.shared->msr = 0; vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; + vcpu->arch.shared->pir = vcpu->vcpu_id; kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ vcpu->arch.shadow_pid = 1; @@ -662,10 +696,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) regs->sprg1 = vcpu->arch.shared->sprg1; regs->sprg2 = vcpu->arch.shared->sprg2; regs->sprg3 = vcpu->arch.shared->sprg3; - regs->sprg4 = vcpu->arch.sprg4; - regs->sprg5 = vcpu->arch.sprg5; - regs->sprg6 = vcpu->arch.sprg6; - regs->sprg7 = vcpu->arch.sprg7; + regs->sprg4 = vcpu->arch.shared->sprg4; + regs->sprg5 = vcpu->arch.shared->sprg5; + regs->sprg6 = vcpu->arch.shared->sprg6; + regs->sprg7 = vcpu->arch.shared->sprg7; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); @@ -690,10 +724,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) vcpu->arch.shared->sprg1 = regs->sprg1; vcpu->arch.shared->sprg2 = regs->sprg2; vcpu->arch.shared->sprg3 = regs->sprg3; - vcpu->arch.sprg4 = regs->sprg4; - vcpu->arch.sprg5 = regs->sprg5; - vcpu->arch.sprg6 = regs->sprg6; - vcpu->arch.sprg7 = regs->sprg7; + vcpu->arch.shared->sprg4 = regs->sprg4; + vcpu->arch.shared->sprg5 = regs->sprg5; + vcpu->arch.shared->sprg6 = regs->sprg6; + vcpu->arch.shared->sprg7 = regs->sprg7; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); @@ -711,7 +745,7 @@ static void get_sregs_base(struct kvm_vcpu *vcpu, sregs->u.e.csrr0 = vcpu->arch.csrr0; sregs->u.e.csrr1 = vcpu->arch.csrr1; sregs->u.e.mcsr = vcpu->arch.mcsr; - sregs->u.e.esr = vcpu->arch.esr; + sregs->u.e.esr = vcpu->arch.shared->esr; sregs->u.e.dear = vcpu->arch.shared->dar; sregs->u.e.tsr = vcpu->arch.tsr; sregs->u.e.tcr = vcpu->arch.tcr; @@ -729,28 +763,19 @@ static int set_sregs_base(struct kvm_vcpu *vcpu, vcpu->arch.csrr0 = sregs->u.e.csrr0; vcpu->arch.csrr1 = sregs->u.e.csrr1; vcpu->arch.mcsr = sregs->u.e.mcsr; - vcpu->arch.esr = sregs->u.e.esr; + vcpu->arch.shared->esr = sregs->u.e.esr; vcpu->arch.shared->dar = sregs->u.e.dear; vcpu->arch.vrsave = sregs->u.e.vrsave; - vcpu->arch.tcr = sregs->u.e.tcr; + kvmppc_set_tcr(vcpu, sregs->u.e.tcr); - if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) + if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) { vcpu->arch.dec = sregs->u.e.dec; - - kvmppc_emulate_dec(vcpu); + kvmppc_emulate_dec(vcpu); + } if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { - /* - * FIXME: existing KVM timer handling is incomplete. - * TSR cannot be read by the guest, and its value in - * vcpu->arch is always zero. For now, just handle - * the case where the caller is trying to inject a - * decrementer interrupt. - */ - - if ((sregs->u.e.tsr & TSR_DIS) && - (vcpu->arch.tcr & TCR_DIE)) - kvmppc_core_queue_dec(vcpu); + vcpu->arch.tsr = sregs->u.e.tsr; + update_timer_ints(vcpu); } return 0; @@ -761,7 +786,7 @@ static void get_sregs_arch206(struct kvm_vcpu *vcpu, { sregs->u.e.features |= KVM_SREGS_E_ARCH206; - sregs->u.e.pir = 0; + sregs->u.e.pir = vcpu->vcpu_id; sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; sregs->u.e.decar = vcpu->arch.decar; @@ -774,7 +799,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu, if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) return 0; - if (sregs->u.e.pir != 0) + if (sregs->u.e.pir != vcpu->vcpu_id) return -EINVAL; vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; @@ -862,6 +887,16 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return kvmppc_core_set_sregs(vcpu, sregs); } +int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +{ + return -EINVAL; +} + +int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) +{ + return -EINVAL; +} + int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -ENOTSUPP; @@ -906,6 +941,33 @@ void kvmppc_core_destroy_vm(struct kvm *kvm) { } +void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) +{ + vcpu->arch.tcr = new_tcr; + update_timer_ints(vcpu); +} + +void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) +{ + set_bits(tsr_bits, &vcpu->arch.tsr); + smp_wmb(); + kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); + kvm_vcpu_kick(vcpu); +} + +void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) +{ + clear_bits(tsr_bits, &vcpu->arch.tsr); + update_timer_ints(vcpu); +} + +void kvmppc_decrementer_func(unsigned long data) +{ + struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; + + kvmppc_set_tsr_bits(vcpu, TSR_DIS); +} + int __init kvmppc_booke_init(void) { unsigned long ivor[16]; diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 8e1fe33d64e5..2fe202705a3f 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -55,6 +55,10 @@ extern unsigned long kvmppc_booke_handlers; void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr); +void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr); +void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); +void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); + int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance); int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 1260f5f24c0c..3e652da36534 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -13,6 +13,7 @@ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2008 + * Copyright 2011 Freescale Semiconductor, Inc. * * Authors: Hollis Blanchard <hollisb@us.ibm.com> */ @@ -107,7 +108,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) case SPRN_DEAR: vcpu->arch.shared->dar = spr_val; break; case SPRN_ESR: - vcpu->arch.esr = spr_val; break; + vcpu->arch.shared->esr = spr_val; break; case SPRN_DBCR0: vcpu->arch.dbcr0 = spr_val; break; case SPRN_DBCR1: @@ -115,23 +116,23 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) case SPRN_DBSR: vcpu->arch.dbsr &= ~spr_val; break; case SPRN_TSR: - vcpu->arch.tsr &= ~spr_val; break; + kvmppc_clr_tsr_bits(vcpu, spr_val); + break; case SPRN_TCR: - vcpu->arch.tcr = spr_val; - kvmppc_emulate_dec(vcpu); + kvmppc_set_tcr(vcpu, spr_val); break; /* Note: SPRG4-7 are user-readable. These values are * loaded into the real SPRGs when resuming the * guest. */ case SPRN_SPRG4: - vcpu->arch.sprg4 = spr_val; break; + vcpu->arch.shared->sprg4 = spr_val; break; case SPRN_SPRG5: - vcpu->arch.sprg5 = spr_val; break; + vcpu->arch.shared->sprg5 = spr_val; break; case SPRN_SPRG6: - vcpu->arch.sprg6 = spr_val; break; + vcpu->arch.shared->sprg6 = spr_val; break; case SPRN_SPRG7: - vcpu->arch.sprg7 = spr_val; break; + vcpu->arch.shared->sprg7 = spr_val; break; case SPRN_IVPR: vcpu->arch.ivpr = spr_val; @@ -202,13 +203,17 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) case SPRN_DEAR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; case SPRN_ESR: - kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; case SPRN_DBCR0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; case SPRN_DBCR1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; case SPRN_DBSR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; + case SPRN_TSR: + kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break; + case SPRN_TCR: + kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break; case SPRN_IVOR0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 42f2fb1f66e9..10d8ef602e5c 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -402,19 +402,25 @@ lightweight_exit: /* Save vcpu pointer for the exception handlers. */ mtspr SPRN_SPRG_WVCPU, r4 + lwz r5, VCPU_SHARED(r4) + /* Can't switch the stack pointer until after IVPR is switched, * because host interrupt handlers would get confused. */ lwz r1, VCPU_GPR(r1)(r4) - /* Host interrupt handlers may have clobbered these guest-readable - * SPRGs, so we need to reload them here with the guest's values. */ - lwz r3, VCPU_SPRG4(r4) + /* + * Host interrupt handlers may have clobbered these + * guest-readable SPRGs, or the guest kernel may have + * written directly to the shared area, so we + * need to reload them here with the guest's values. + */ + lwz r3, VCPU_SHARED_SPRG4(r5) mtspr SPRN_SPRG4W, r3 - lwz r3, VCPU_SPRG5(r4) + lwz r3, VCPU_SHARED_SPRG5(r5) mtspr SPRN_SPRG5W, r3 - lwz r3, VCPU_SPRG6(r4) + lwz r3, VCPU_SHARED_SPRG6(r5) mtspr SPRN_SPRG6W, r3 - lwz r3, VCPU_SPRG7(r4) + lwz r3, VCPU_SHARED_SPRG7(r5) mtspr SPRN_SPRG7W, r3 #ifdef CONFIG_KVM_EXIT_TIMING diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 8c0d45a6faf7..ddcd896fa2ff 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -71,9 +71,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.pvr = mfspr(SPRN_PVR); vcpu_e500->svr = mfspr(SPRN_SVR); - /* Since booke kvm only support one core, update all vcpus' PIR to 0 */ - vcpu->vcpu_id = 0; - vcpu->arch.cpu_type = KVM_CPU_E500V2; return 0; @@ -118,12 +115,12 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; - sregs->u.e.mas0 = vcpu_e500->mas0; - sregs->u.e.mas1 = vcpu_e500->mas1; - sregs->u.e.mas2 = vcpu_e500->mas2; - sregs->u.e.mas7_3 = ((u64)vcpu_e500->mas7 << 32) | vcpu_e500->mas3; - sregs->u.e.mas4 = vcpu_e500->mas4; - sregs->u.e.mas6 = vcpu_e500->mas6; + sregs->u.e.mas0 = vcpu->arch.shared->mas0; + sregs->u.e.mas1 = vcpu->arch.shared->mas1; + sregs->u.e.mas2 = vcpu->arch.shared->mas2; + sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; + sregs->u.e.mas4 = vcpu->arch.shared->mas4; + sregs->u.e.mas6 = vcpu->arch.shared->mas6; sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG); sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg; @@ -151,13 +148,12 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) } if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { - vcpu_e500->mas0 = sregs->u.e.mas0; - vcpu_e500->mas1 = sregs->u.e.mas1; - vcpu_e500->mas2 = sregs->u.e.mas2; - vcpu_e500->mas7 = sregs->u.e.mas7_3 >> 32; - vcpu_e500->mas3 = (u32)sregs->u.e.mas7_3; - vcpu_e500->mas4 = sregs->u.e.mas4; - vcpu_e500->mas6 = sregs->u.e.mas6; + vcpu->arch.shared->mas0 = sregs->u.e.mas0; + vcpu->arch.shared->mas1 = sregs->u.e.mas1; + vcpu->arch.shared->mas2 = sregs->u.e.mas2; + vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; + vcpu->arch.shared->mas4 = sregs->u.e.mas4; + vcpu->arch.shared->mas6 = sregs->u.e.mas6; } if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) @@ -233,6 +229,10 @@ static int __init kvmppc_e500_init(void) unsigned long ivor[3]; unsigned long max_ivor = 0; + r = kvmppc_core_check_processor_compat(); + if (r) + return r; + r = kvmppc_booke_init(); if (r) return r; diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index d48ae396f41e..6d0b2bd54fb0 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -89,19 +89,23 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) return EMULATE_FAIL; vcpu_e500->pid[2] = spr_val; break; case SPRN_MAS0: - vcpu_e500->mas0 = spr_val; break; + vcpu->arch.shared->mas0 = spr_val; break; case SPRN_MAS1: - vcpu_e500->mas1 = spr_val; break; + vcpu->arch.shared->mas1 = spr_val; break; case SPRN_MAS2: - vcpu_e500->mas2 = spr_val; break; + vcpu->arch.shared->mas2 = spr_val; break; case SPRN_MAS3: - vcpu_e500->mas3 = spr_val; break; + vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; + vcpu->arch.shared->mas7_3 |= spr_val; + break; case SPRN_MAS4: - vcpu_e500->mas4 = spr_val; break; + vcpu->arch.shared->mas4 = spr_val; break; case SPRN_MAS6: - vcpu_e500->mas6 = spr_val; break; + vcpu->arch.shared->mas6 = spr_val; break; case SPRN_MAS7: - vcpu_e500->mas7 = spr_val; break; + vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; + vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; + break; case SPRN_L1CSR0: vcpu_e500->l1csr0 = spr_val; vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); @@ -143,6 +147,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int emulated = EMULATE_DONE; + unsigned long val; switch (sprn) { case SPRN_PID: @@ -152,20 +157,23 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) case SPRN_PID2: kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; case SPRN_MAS0: - kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break; case SPRN_MAS1: - kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break; case SPRN_MAS2: - kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break; case SPRN_MAS3: - kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break; + val = (u32)vcpu->arch.shared->mas7_3; + kvmppc_set_gpr(vcpu, rt, val); + break; case SPRN_MAS4: - kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break; case SPRN_MAS6: - kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break; + kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break; case SPRN_MAS7: - kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break; - + val = vcpu->arch.shared->mas7_3 >> 32; + kvmppc_set_gpr(vcpu, rt, val); + break; case SPRN_TLB0CFG: kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; case SPRN_TLB1CFG: diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 13c432ea2fa8..6e53e4164de1 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -12,12 +12,19 @@ * published by the Free Software Foundation. */ +#include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> +#include <linux/log2.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/rwsem.h> +#include <linux/vmalloc.h> +#include <linux/hugetlb.h> #include <asm/kvm_ppc.h> #include <asm/kvm_e500.h> @@ -26,7 +33,7 @@ #include "trace.h" #include "timing.h" -#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) +#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) struct id { unsigned long val; @@ -63,7 +70,14 @@ static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); * The valid range of shadow ID is [1..255] */ static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); -static unsigned int tlb1_entry_num; +static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; + +static struct kvm_book3e_206_tlb_entry *get_entry( + struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry) +{ + int offset = vcpu_e500->gtlb_offset[tlbsel]; + return &vcpu_e500->gtlb_arch[offset + entry]; +} /* * Allocate a free shadow id and setup a valid sid mapping in given entry. @@ -116,13 +130,11 @@ static inline int local_sid_lookup(struct id *entry) return -1; } -/* Invalidate all id mappings on local core */ +/* Invalidate all id mappings on local core -- call with preempt disabled */ static inline void local_sid_destroy_all(void) { - preempt_disable(); __get_cpu_var(pcpu_last_used_sid) = 0; memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); - preempt_enable(); } static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) @@ -218,34 +230,13 @@ void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) preempt_enable(); } -void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) -{ - struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct tlbe *tlbe; - int i, tlbsel; - - printk("| %8s | %8s | %8s | %8s | %8s |\n", - "nr", "mas1", "mas2", "mas3", "mas7"); - - for (tlbsel = 0; tlbsel < 2; tlbsel++) { - printk("Guest TLB%d:\n", tlbsel); - for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) { - tlbe = &vcpu_e500->gtlb_arch[tlbsel][i]; - if (tlbe->mas1 & MAS1_VALID) - printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n", - tlbsel, i, tlbe->mas1, tlbe->mas2, - tlbe->mas3, tlbe->mas7); - } - } -} - -static inline unsigned int tlb0_get_next_victim( +static inline unsigned int gtlb0_get_next_victim( struct kvmppc_vcpu_e500 *vcpu_e500) { unsigned int victim; victim = vcpu_e500->gtlb_nv[0]++; - if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM)) + if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways)) vcpu_e500->gtlb_nv[0] = 0; return victim; @@ -254,12 +245,12 @@ static inline unsigned int tlb0_get_next_victim( static inline unsigned int tlb1_max_shadow_size(void) { /* reserve one entry for magic page */ - return tlb1_entry_num - tlbcam_index - 1; + return host_tlb_params[1].entries - tlbcam_index - 1; } -static inline int tlbe_is_writable(struct tlbe *tlbe) +static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) { - return tlbe->mas3 & (MAS3_SW|MAS3_UW); + return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); } static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) @@ -290,40 +281,66 @@ static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) /* * writing shadow tlb entry to host TLB */ -static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0) +static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, + uint32_t mas0) { unsigned long flags; local_irq_save(flags); mtspr(SPRN_MAS0, mas0); mtspr(SPRN_MAS1, stlbe->mas1); - mtspr(SPRN_MAS2, stlbe->mas2); - mtspr(SPRN_MAS3, stlbe->mas3); - mtspr(SPRN_MAS7, stlbe->mas7); + mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); + mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); + mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); asm volatile("isync; tlbwe" : : : "memory"); local_irq_restore(flags); + + trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, + stlbe->mas2, stlbe->mas7_3); +} + +/* + * Acquire a mas0 with victim hint, as if we just took a TLB miss. + * + * We don't care about the address we're searching for, other than that it's + * in the right set and is not present in the TLB. Using a zero PID and a + * userspace address means we don't have to set and then restore MAS5, or + * calculate a proper MAS6 value. + */ +static u32 get_host_mas0(unsigned long eaddr) +{ + unsigned long flags; + u32 mas0; + + local_irq_save(flags); + mtspr(SPRN_MAS6, 0); + asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); + mas0 = mfspr(SPRN_MAS0); + local_irq_restore(flags); + + return mas0; } +/* sesel is for tlb1 only */ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, - int tlbsel, int esel, struct tlbe *stlbe) + int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) { + u32 mas0; + if (tlbsel == 0) { - __write_host_tlbe(stlbe, - MAS0_TLBSEL(0) | - MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1))); + mas0 = get_host_mas0(stlbe->mas2); + __write_host_tlbe(stlbe, mas0); } else { __write_host_tlbe(stlbe, MAS0_TLBSEL(1) | - MAS0_ESEL(to_htlb1_esel(esel))); + MAS0_ESEL(to_htlb1_esel(sesel))); } - trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, - stlbe->mas3, stlbe->mas7); } void kvmppc_map_magic(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct tlbe magic; + struct kvm_book3e_206_tlb_entry magic; ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; unsigned int stid; pfn_t pfn; @@ -337,9 +354,9 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu) magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | MAS1_TSIZE(BOOK3E_PAGESZ_4K); magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; - magic.mas3 = (pfn << PAGE_SHIFT) | - MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; - magic.mas7 = pfn >> (32 - PAGE_SHIFT); + magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | + MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; + magic.mas8 = 0; __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); preempt_enable(); @@ -357,10 +374,11 @@ void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) { } -static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, - int tlbsel, int esel) +static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, + int tlbsel, int esel) { - struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; + struct kvm_book3e_206_tlb_entry *gtlbe = + get_entry(vcpu_e500, tlbsel, esel); struct vcpu_id_table *idt = vcpu_e500->idt; unsigned int pr, tid, ts, pid; u32 val, eaddr; @@ -414,25 +432,57 @@ static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, preempt_enable(); } +static int tlb0_set_base(gva_t addr, int sets, int ways) +{ + int set_base; + + set_base = (addr >> PAGE_SHIFT) & (sets - 1); + set_base *= ways; + + return set_base; +} + +static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr) +{ + return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets, + vcpu_e500->gtlb_params[0].ways); +} + +static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + int esel = get_tlb_esel_bit(vcpu); + + if (tlbsel == 0) { + esel &= vcpu_e500->gtlb_params[0].ways - 1; + esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); + } else { + esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; + } + + return esel; +} + /* Search the guest TLB for a matching entry. */ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t eaddr, int tlbsel, unsigned int pid, int as) { - int size = vcpu_e500->gtlb_size[tlbsel]; - int set_base; + int size = vcpu_e500->gtlb_params[tlbsel].entries; + unsigned int set_base, offset; int i; if (tlbsel == 0) { - int mask = size / KVM_E500_TLB0_WAY_NUM - 1; - set_base = (eaddr >> PAGE_SHIFT) & mask; - set_base *= KVM_E500_TLB0_WAY_NUM; - size = KVM_E500_TLB0_WAY_NUM; + set_base = gtlb0_set_base(vcpu_e500, eaddr); + size = vcpu_e500->gtlb_params[0].ways; } else { set_base = 0; } + offset = vcpu_e500->gtlb_offset[tlbsel]; + for (i = 0; i < size; i++) { - struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i]; + struct kvm_book3e_206_tlb_entry *tlbe = + &vcpu_e500->gtlb_arch[offset + set_base + i]; unsigned int tid; if (eaddr < get_tlb_eaddr(tlbe)) @@ -457,27 +507,55 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, return -1; } -static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv, - struct tlbe *gtlbe, - pfn_t pfn) +static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, + struct kvm_book3e_206_tlb_entry *gtlbe, + pfn_t pfn) { - priv->pfn = pfn; - priv->flags = E500_TLB_VALID; + ref->pfn = pfn; + ref->flags = E500_TLB_VALID; if (tlbe_is_writable(gtlbe)) - priv->flags |= E500_TLB_DIRTY; + ref->flags |= E500_TLB_DIRTY; } -static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv) +static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) { - if (priv->flags & E500_TLB_VALID) { - if (priv->flags & E500_TLB_DIRTY) - kvm_release_pfn_dirty(priv->pfn); + if (ref->flags & E500_TLB_VALID) { + if (ref->flags & E500_TLB_DIRTY) + kvm_release_pfn_dirty(ref->pfn); else - kvm_release_pfn_clean(priv->pfn); + kvm_release_pfn_clean(ref->pfn); + + ref->flags = 0; + } +} + +static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + int tlbsel = 0; + int i; + + for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->gtlb_priv[tlbsel][i].ref; + kvmppc_e500_ref_release(ref); + } +} + +static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + int stlbsel = 1; + int i; + + kvmppc_e500_id_table_reset_all(vcpu_e500); - priv->flags = 0; + for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { + struct tlbe_ref *ref = + &vcpu_e500->tlb_refs[stlbsel][i]; + kvmppc_e500_ref_release(ref); } + + clear_tlb_privs(vcpu_e500); } static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, @@ -488,59 +566,54 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, int tlbsel; /* since we only have two TLBs, only lower bit is used. */ - tlbsel = (vcpu_e500->mas4 >> 28) & 0x1; - victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; - pidsel = (vcpu_e500->mas4 >> 16) & 0xf; - tsized = (vcpu_e500->mas4 >> 7) & 0x1f; + tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; + victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; + pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf; + tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; - vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) + vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); - vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) + vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) | MAS1_TID(vcpu_e500->pid[pidsel]) | MAS1_TSIZE(tsized); - vcpu_e500->mas2 = (eaddr & MAS2_EPN) - | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK); - vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; - vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1) + vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) + | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); + vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; + vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1) | (get_cur_pid(vcpu) << 16) | (as ? MAS6_SAS : 0); - vcpu_e500->mas7 = 0; } -static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, - struct tlbe *gtlbe, int tsize, - struct tlbe_priv *priv, - u64 gvaddr, struct tlbe *stlbe) +/* TID must be supplied by the caller */ +static inline void kvmppc_e500_setup_stlbe( + struct kvmppc_vcpu_e500 *vcpu_e500, + struct kvm_book3e_206_tlb_entry *gtlbe, + int tsize, struct tlbe_ref *ref, u64 gvaddr, + struct kvm_book3e_206_tlb_entry *stlbe) { - pfn_t pfn = priv->pfn; - unsigned int stid; + pfn_t pfn = ref->pfn; - stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe), - get_tlb_tid(gtlbe), - get_cur_pr(&vcpu_e500->vcpu), 0); + BUG_ON(!(ref->flags & E500_TLB_VALID)); /* Force TS=1 IPROT=0 for all guest mappings. */ - stlbe->mas1 = MAS1_TSIZE(tsize) - | MAS1_TID(stid) | MAS1_TS | MAS1_VALID; + stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID; stlbe->mas2 = (gvaddr & MAS2_EPN) | e500_shadow_mas2_attrib(gtlbe->mas2, vcpu_e500->vcpu.arch.shared->msr & MSR_PR); - stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN) - | e500_shadow_mas3_attrib(gtlbe->mas3, + stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) + | e500_shadow_mas3_attrib(gtlbe->mas7_3, vcpu_e500->vcpu.arch.shared->msr & MSR_PR); - stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN; } - static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, - u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel, - struct tlbe *stlbe) + u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, + int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, + struct tlbe_ref *ref) { struct kvm_memory_slot *slot; unsigned long pfn, hva; int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; - struct tlbe_priv *priv; /* * Translate guest physical to true physical, acquiring @@ -621,12 +694,31 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, pfn &= ~(tsize_pages - 1); break; } + } else if (vma && hva >= vma->vm_start && + (vma->vm_flags & VM_HUGETLB)) { + unsigned long psize = vma_kernel_pagesize(vma); + + tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> + MAS1_TSIZE_SHIFT; + + /* + * Take the largest page size that satisfies both host + * and guest mapping + */ + tsize = min(__ilog2(psize) - 10, tsize); + + /* + * e500 doesn't implement the lowest tsize bit, + * or 1K pages. + */ + tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); } up_read(¤t->mm->mmap_sem); } if (likely(!pfnmap)) { + unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn); if (is_error_pfn(pfn)) { printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", @@ -634,45 +726,52 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, kvm_release_pfn_clean(pfn); return; } + + /* Align guest and physical address to page map boundaries */ + pfn &= ~(tsize_pages - 1); + gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); } - /* Drop old priv and setup new one. */ - priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; - kvmppc_e500_priv_release(priv); - kvmppc_e500_priv_setup(priv, gtlbe, pfn); + /* Drop old ref and setup new one. */ + kvmppc_e500_ref_release(ref); + kvmppc_e500_ref_setup(ref, gtlbe, pfn); - kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe); + kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe); } /* XXX only map the one-one case, for now use TLB0 */ -static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, - int esel, struct tlbe *stlbe) +static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, + int esel, + struct kvm_book3e_206_tlb_entry *stlbe) { - struct tlbe *gtlbe; + struct kvm_book3e_206_tlb_entry *gtlbe; + struct tlbe_ref *ref; - gtlbe = &vcpu_e500->gtlb_arch[0][esel]; + gtlbe = get_entry(vcpu_e500, 0, esel); + ref = &vcpu_e500->gtlb_priv[0][esel].ref; kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), get_tlb_raddr(gtlbe) >> PAGE_SHIFT, - gtlbe, 0, esel, stlbe); - - return esel; + gtlbe, 0, stlbe, ref); } /* Caller must ensure that the specified guest TLB entry is safe to insert into * the shadow TLB. */ /* XXX for both one-one and one-to-many , for now use TLB1 */ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, - u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe) + u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, + struct kvm_book3e_206_tlb_entry *stlbe) { + struct tlbe_ref *ref; unsigned int victim; - victim = vcpu_e500->gtlb_nv[1]++; + victim = vcpu_e500->host_tlb1_nv++; - if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size())) - vcpu_e500->gtlb_nv[1] = 0; + if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) + vcpu_e500->host_tlb1_nv = 0; - kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe); + ref = &vcpu_e500->tlb_refs[1][victim]; + kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); return victim; } @@ -689,7 +788,8 @@ static inline int kvmppc_e500_gtlbe_invalidate( struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int esel) { - struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; + struct kvm_book3e_206_tlb_entry *gtlbe = + get_entry(vcpu_e500, tlbsel, esel); if (unlikely(get_tlb_iprot(gtlbe))) return -1; @@ -704,10 +804,10 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) int esel; if (value & MMUCSR0_TLB0FI) - for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++) + for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); if (value & MMUCSR0_TLB1FI) - for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++) + for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); /* Invalidate all vcpu id mappings */ @@ -732,7 +832,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) if (ia) { /* invalidate all entries */ - for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++) + for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; + esel++) kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); } else { ea &= 0xfffff000; @@ -752,18 +853,17 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int tlbsel, esel; - struct tlbe *gtlbe; + struct kvm_book3e_206_tlb_entry *gtlbe; - tlbsel = get_tlb_tlbsel(vcpu_e500); - esel = get_tlb_esel(vcpu_e500, tlbsel); + tlbsel = get_tlb_tlbsel(vcpu); + esel = get_tlb_esel(vcpu, tlbsel); - gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; - vcpu_e500->mas0 &= ~MAS0_NV(~0); - vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); - vcpu_e500->mas1 = gtlbe->mas1; - vcpu_e500->mas2 = gtlbe->mas2; - vcpu_e500->mas3 = gtlbe->mas3; - vcpu_e500->mas7 = gtlbe->mas7; + gtlbe = get_entry(vcpu_e500, tlbsel, esel); + vcpu->arch.shared->mas0 &= ~MAS0_NV(~0); + vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); + vcpu->arch.shared->mas1 = gtlbe->mas1; + vcpu->arch.shared->mas2 = gtlbe->mas2; + vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; return EMULATE_DONE; } @@ -771,10 +871,10 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - int as = !!get_cur_sas(vcpu_e500); - unsigned int pid = get_cur_spid(vcpu_e500); + int as = !!get_cur_sas(vcpu); + unsigned int pid = get_cur_spid(vcpu); int esel, tlbsel; - struct tlbe *gtlbe = NULL; + struct kvm_book3e_206_tlb_entry *gtlbe = NULL; gva_t ea; ea = kvmppc_get_gpr(vcpu, rb); @@ -782,70 +882,90 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) for (tlbsel = 0; tlbsel < 2; tlbsel++) { esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); if (esel >= 0) { - gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; + gtlbe = get_entry(vcpu_e500, tlbsel, esel); break; } } if (gtlbe) { - vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) + esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; + + vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); - vcpu_e500->mas1 = gtlbe->mas1; - vcpu_e500->mas2 = gtlbe->mas2; - vcpu_e500->mas3 = gtlbe->mas3; - vcpu_e500->mas7 = gtlbe->mas7; + vcpu->arch.shared->mas1 = gtlbe->mas1; + vcpu->arch.shared->mas2 = gtlbe->mas2; + vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; } else { int victim; /* since we only have two TLBs, only lower bit is used. */ - tlbsel = vcpu_e500->mas4 >> 28 & 0x1; - victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; + tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1; + victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; - vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) + vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) + | MAS0_ESEL(victim) | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); - vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0) - | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0)) - | (vcpu_e500->mas4 & MAS4_TSIZED(~0)); - vcpu_e500->mas2 &= MAS2_EPN; - vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK; - vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; - vcpu_e500->mas7 = 0; + vcpu->arch.shared->mas1 = + (vcpu->arch.shared->mas6 & MAS6_SPID0) + | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0)) + | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0)); + vcpu->arch.shared->mas2 &= MAS2_EPN; + vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 & + MAS2_ATTRIB_MASK; + vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | + MAS3_U2 | MAS3_U3; } kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); return EMULATE_DONE; } +/* sesel is for tlb1 only */ +static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, + struct kvm_book3e_206_tlb_entry *gtlbe, + struct kvm_book3e_206_tlb_entry *stlbe, + int stlbsel, int sesel) +{ + int stid; + + preempt_disable(); + stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe), + get_tlb_tid(gtlbe), + get_cur_pr(&vcpu_e500->vcpu), 0); + + stlbe->mas1 |= MAS1_TID(stid); + write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); + preempt_enable(); +} + int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct tlbe *gtlbe; + struct kvm_book3e_206_tlb_entry *gtlbe; int tlbsel, esel; - tlbsel = get_tlb_tlbsel(vcpu_e500); - esel = get_tlb_esel(vcpu_e500, tlbsel); + tlbsel = get_tlb_tlbsel(vcpu); + esel = get_tlb_esel(vcpu, tlbsel); - gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; + gtlbe = get_entry(vcpu_e500, tlbsel, esel); if (get_tlb_v(gtlbe)) - kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel); + inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); - gtlbe->mas1 = vcpu_e500->mas1; - gtlbe->mas2 = vcpu_e500->mas2; - gtlbe->mas3 = vcpu_e500->mas3; - gtlbe->mas7 = vcpu_e500->mas7; + gtlbe->mas1 = vcpu->arch.shared->mas1; + gtlbe->mas2 = vcpu->arch.shared->mas2; + gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; - trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2, - gtlbe->mas3, gtlbe->mas7); + trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, + gtlbe->mas2, gtlbe->mas7_3); /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe_is_host_safe(vcpu, gtlbe)) { - struct tlbe stlbe; + struct kvm_book3e_206_tlb_entry stlbe; int stlbsel, sesel; u64 eaddr; u64 raddr; - preempt_disable(); switch (tlbsel) { case 0: /* TLB0 */ @@ -853,7 +973,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); stlbsel = 0; - sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); + kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); + sesel = 0; /* unused */ break; @@ -874,8 +995,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) default: BUG(); } - write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe); - preempt_enable(); + + write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); } kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); @@ -914,9 +1035,11 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, gva_t eaddr) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); - struct tlbe *gtlbe = - &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)]; - u64 pgmask = get_tlb_bytes(gtlbe) - 1; + struct kvm_book3e_206_tlb_entry *gtlbe; + u64 pgmask; + + gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index)); + pgmask = get_tlb_bytes(gtlbe) - 1; return get_tlb_raddr(gtlbe) | (eaddr & pgmask); } @@ -930,22 +1053,21 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct tlbe_priv *priv; - struct tlbe *gtlbe, stlbe; + struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; int tlbsel = tlbsel_of(index); int esel = esel_of(index); int stlbsel, sesel; - gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; + gtlbe = get_entry(vcpu_e500, tlbsel, esel); - preempt_disable(); switch (tlbsel) { case 0: stlbsel = 0; - sesel = esel; - priv = &vcpu_e500->gtlb_priv[stlbsel][sesel]; + sesel = 0; /* unused */ + priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K, - priv, eaddr, &stlbe); + &priv->ref, eaddr, &stlbe); break; case 1: { @@ -962,8 +1084,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, break; } - write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe); - preempt_enable(); + write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); } int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, @@ -993,85 +1114,279 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) { - struct tlbe *tlbe; + struct kvm_book3e_206_tlb_entry *tlbe; /* Insert large initial mapping for guest. */ - tlbe = &vcpu_e500->gtlb_arch[1][0]; + tlbe = get_entry(vcpu_e500, 1, 0); tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); tlbe->mas2 = 0; - tlbe->mas3 = E500_TLB_SUPER_PERM_MASK; - tlbe->mas7 = 0; + tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK; /* 4K map for serial output. Used by kernel wrapper. */ - tlbe = &vcpu_e500->gtlb_arch[1][1]; + tlbe = get_entry(vcpu_e500, 1, 1); tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; - tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; - tlbe->mas7 = 0; + tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; +} + +static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) +{ + int i; + + clear_tlb_refs(vcpu_e500); + kfree(vcpu_e500->gtlb_priv[0]); + kfree(vcpu_e500->gtlb_priv[1]); + + if (vcpu_e500->shared_tlb_pages) { + vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch, + PAGE_SIZE))); + + for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) { + set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]); + put_page(vcpu_e500->shared_tlb_pages[i]); + } + + vcpu_e500->num_shared_tlb_pages = 0; + vcpu_e500->shared_tlb_pages = NULL; + } else { + kfree(vcpu_e500->gtlb_arch); + } + + vcpu_e500->gtlb_arch = NULL; +} + +int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, + struct kvm_config_tlb *cfg) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + struct kvm_book3e_206_tlb_params params; + char *virt; + struct page **pages; + struct tlbe_priv *privs[2] = {}; + size_t array_len; + u32 sets; + int num_pages, ret, i; + + if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)cfg->params, + sizeof(params))) + return -EFAULT; + + if (params.tlb_sizes[1] > 64) + return -EINVAL; + if (params.tlb_ways[1] != params.tlb_sizes[1]) + return -EINVAL; + if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0) + return -EINVAL; + if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0) + return -EINVAL; + + if (!is_power_of_2(params.tlb_ways[0])) + return -EINVAL; + + sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]); + if (!is_power_of_2(sets)) + return -EINVAL; + + array_len = params.tlb_sizes[0] + params.tlb_sizes[1]; + array_len *= sizeof(struct kvm_book3e_206_tlb_entry); + + if (cfg->array_len < array_len) + return -EINVAL; + + num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) - + cfg->array / PAGE_SIZE; + pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); + if (!pages) + return -ENOMEM; + + ret = get_user_pages_fast(cfg->array, num_pages, 1, pages); + if (ret < 0) + goto err_pages; + + if (ret != num_pages) { + num_pages = ret; + ret = -EFAULT; + goto err_put_page; + } + + virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); + if (!virt) + goto err_put_page; + + privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0], + GFP_KERNEL); + privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1], + GFP_KERNEL); + + if (!privs[0] || !privs[1]) + goto err_put_page; + + free_gtlb(vcpu_e500); + + vcpu_e500->gtlb_priv[0] = privs[0]; + vcpu_e500->gtlb_priv[1] = privs[1]; + + vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) + (virt + (cfg->array & (PAGE_SIZE - 1))); + + vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; + vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; + + vcpu_e500->gtlb_offset[0] = 0; + vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; + + vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); + if (params.tlb_sizes[0] <= 2048) + vcpu_e500->tlb0cfg |= params.tlb_sizes[0]; + vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; + + vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); + vcpu_e500->tlb1cfg |= params.tlb_sizes[1]; + vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; + + vcpu_e500->shared_tlb_pages = pages; + vcpu_e500->num_shared_tlb_pages = num_pages; + + vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0]; + vcpu_e500->gtlb_params[0].sets = sets; + + vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; + vcpu_e500->gtlb_params[1].sets = 1; + + return 0; + +err_put_page: + kfree(privs[0]); + kfree(privs[1]); + + for (i = 0; i < num_pages; i++) + put_page(pages[i]); + +err_pages: + kfree(pages); + return ret; +} + +int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, + struct kvm_dirty_tlb *dirty) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + + clear_tlb_refs(vcpu_e500); + return 0; } int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) { - tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF; - - vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE; - vcpu_e500->gtlb_arch[0] = - kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL); - if (vcpu_e500->gtlb_arch[0] == NULL) - goto err_out; - - vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE; - vcpu_e500->gtlb_arch[1] = - kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL); - if (vcpu_e500->gtlb_arch[1] == NULL) - goto err_out_guest0; - - vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *) - kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL); - if (vcpu_e500->gtlb_priv[0] == NULL) - goto err_out_guest1; - vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *) - kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL); - - if (vcpu_e500->gtlb_priv[1] == NULL) - goto err_out_priv0; + int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); + int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; + + host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; + host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; + + /* + * This should never happen on real e500 hardware, but is + * architecturally possible -- e.g. in some weird nested + * virtualization case. + */ + if (host_tlb_params[0].entries == 0 || + host_tlb_params[1].entries == 0) { + pr_err("%s: need to know host tlb size\n", __func__); + return -ENODEV; + } + + host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> + TLBnCFG_ASSOC_SHIFT; + host_tlb_params[1].ways = host_tlb_params[1].entries; + + if (!is_power_of_2(host_tlb_params[0].entries) || + !is_power_of_2(host_tlb_params[0].ways) || + host_tlb_params[0].entries < host_tlb_params[0].ways || + host_tlb_params[0].ways == 0) { + pr_err("%s: bad tlb0 host config: %u entries %u ways\n", + __func__, host_tlb_params[0].entries, + host_tlb_params[0].ways); + return -ENODEV; + } + + host_tlb_params[0].sets = + host_tlb_params[0].entries / host_tlb_params[0].ways; + host_tlb_params[1].sets = 1; + + vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; + vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; + + vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM; + vcpu_e500->gtlb_params[0].sets = + KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM; + + vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE; + vcpu_e500->gtlb_params[1].sets = 1; + + vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL); + if (!vcpu_e500->gtlb_arch) + return -ENOMEM; + + vcpu_e500->gtlb_offset[0] = 0; + vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; + + vcpu_e500->tlb_refs[0] = + kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, + GFP_KERNEL); + if (!vcpu_e500->tlb_refs[0]) + goto err; + + vcpu_e500->tlb_refs[1] = + kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, + GFP_KERNEL); + if (!vcpu_e500->tlb_refs[1]) + goto err; + + vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) * + vcpu_e500->gtlb_params[0].entries, + GFP_KERNEL); + if (!vcpu_e500->gtlb_priv[0]) + goto err; + + vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) * + vcpu_e500->gtlb_params[1].entries, + GFP_KERNEL); + if (!vcpu_e500->gtlb_priv[1]) + goto err; if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) - goto err_out_priv1; + goto err; /* Init TLB configuration register */ - vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; - vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0]; - vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL; - vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1]; + vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & + ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); + vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries; + vcpu_e500->tlb0cfg |= + vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; + + vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & + ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); + vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries; + vcpu_e500->tlb0cfg |= + vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; return 0; -err_out_priv1: - kfree(vcpu_e500->gtlb_priv[1]); -err_out_priv0: - kfree(vcpu_e500->gtlb_priv[0]); -err_out_guest1: - kfree(vcpu_e500->gtlb_arch[1]); -err_out_guest0: - kfree(vcpu_e500->gtlb_arch[0]); -err_out: +err: + free_gtlb(vcpu_e500); + kfree(vcpu_e500->tlb_refs[0]); + kfree(vcpu_e500->tlb_refs[1]); return -1; } void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) { - int stlbsel, i; - - /* release all privs */ - for (stlbsel = 0; stlbsel < 2; stlbsel++) - for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) { - struct tlbe_priv *priv = - &vcpu_e500->gtlb_priv[stlbsel][i]; - kvmppc_e500_priv_release(priv); - } - + free_gtlb(vcpu_e500); kvmppc_e500_id_table_free(vcpu_e500); - kfree(vcpu_e500->gtlb_arch[1]); - kfree(vcpu_e500->gtlb_arch[0]); + + kfree(vcpu_e500->tlb_refs[0]); + kfree(vcpu_e500->tlb_refs[1]); } diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h index 59b88e99a235..5c6d2d7bf058 100644 --- a/arch/powerpc/kvm/e500_tlb.h +++ b/arch/powerpc/kvm/e500_tlb.h @@ -20,13 +20,9 @@ #include <asm/tlb.h> #include <asm/kvm_e500.h> -#define KVM_E500_TLB0_WAY_SIZE_BIT 7 /* Fixed */ -#define KVM_E500_TLB0_WAY_SIZE (1UL << KVM_E500_TLB0_WAY_SIZE_BIT) -#define KVM_E500_TLB0_WAY_SIZE_MASK (KVM_E500_TLB0_WAY_SIZE - 1) - -#define KVM_E500_TLB0_WAY_NUM_BIT 1 /* No greater than 7 */ -#define KVM_E500_TLB0_WAY_NUM (1UL << KVM_E500_TLB0_WAY_NUM_BIT) -#define KVM_E500_TLB0_WAY_NUM_MASK (KVM_E500_TLB0_WAY_NUM - 1) +/* This geometry is the legacy default -- can be overridden by userspace */ +#define KVM_E500_TLB0_WAY_SIZE 128 +#define KVM_E500_TLB0_WAY_NUM 2 #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM) #define KVM_E500_TLB1_SIZE 16 @@ -58,50 +54,54 @@ extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *); extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *); /* TLB helper functions */ -static inline unsigned int get_tlb_size(const struct tlbe *tlbe) +static inline unsigned int +get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe) { return (tlbe->mas1 >> 7) & 0x1f; } -static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) +static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe) { return tlbe->mas2 & 0xfffff000; } -static inline u64 get_tlb_bytes(const struct tlbe *tlbe) +static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe) { unsigned int pgsize = get_tlb_size(tlbe); return 1ULL << 10 << pgsize; } -static inline gva_t get_tlb_end(const struct tlbe *tlbe) +static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe) { u64 bytes = get_tlb_bytes(tlbe); return get_tlb_eaddr(tlbe) + bytes - 1; } -static inline u64 get_tlb_raddr(const struct tlbe *tlbe) +static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe) { - u64 rpn = tlbe->mas7; - return (rpn << 32) | (tlbe->mas3 & 0xfffff000); + return tlbe->mas7_3 & ~0xfffULL; } -static inline unsigned int get_tlb_tid(const struct tlbe *tlbe) +static inline unsigned int +get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe) { return (tlbe->mas1 >> 16) & 0xff; } -static inline unsigned int get_tlb_ts(const struct tlbe *tlbe) +static inline unsigned int +get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe) { return (tlbe->mas1 >> 12) & 0x1; } -static inline unsigned int get_tlb_v(const struct tlbe *tlbe) +static inline unsigned int +get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe) { return (tlbe->mas1 >> 31) & 0x1; } -static inline unsigned int get_tlb_iprot(const struct tlbe *tlbe) +static inline unsigned int +get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe) { return (tlbe->mas1 >> 30) & 0x1; } @@ -121,59 +121,37 @@ static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu) return !!(vcpu->arch.shared->msr & MSR_PR); } -static inline unsigned int get_cur_spid( - const struct kvmppc_vcpu_e500 *vcpu_e500) +static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu) { - return (vcpu_e500->mas6 >> 16) & 0xff; + return (vcpu->arch.shared->mas6 >> 16) & 0xff; } -static inline unsigned int get_cur_sas( - const struct kvmppc_vcpu_e500 *vcpu_e500) +static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu) { - return vcpu_e500->mas6 & 0x1; + return vcpu->arch.shared->mas6 & 0x1; } -static inline unsigned int get_tlb_tlbsel( - const struct kvmppc_vcpu_e500 *vcpu_e500) +static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu) { /* * Manual says that tlbsel has 2 bits wide. * Since we only have two TLBs, only lower bit is used. */ - return (vcpu_e500->mas0 >> 28) & 0x1; -} - -static inline unsigned int get_tlb_nv_bit( - const struct kvmppc_vcpu_e500 *vcpu_e500) -{ - return vcpu_e500->mas0 & 0xfff; + return (vcpu->arch.shared->mas0 >> 28) & 0x1; } -static inline unsigned int get_tlb_esel_bit( - const struct kvmppc_vcpu_e500 *vcpu_e500) +static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu) { - return (vcpu_e500->mas0 >> 16) & 0xfff; + return vcpu->arch.shared->mas0 & 0xfff; } -static inline unsigned int get_tlb_esel( - const struct kvmppc_vcpu_e500 *vcpu_e500, - int tlbsel) +static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu) { - unsigned int esel = get_tlb_esel_bit(vcpu_e500); - - if (tlbsel == 0) { - esel &= KVM_E500_TLB0_WAY_NUM_MASK; - esel |= ((vcpu_e500->mas2 >> 12) & KVM_E500_TLB0_WAY_SIZE_MASK) - << KVM_E500_TLB0_WAY_NUM_BIT; - } else { - esel &= KVM_E500_TLB1_SIZE - 1; - } - - return esel; + return (vcpu->arch.shared->mas0 >> 16) & 0xfff; } static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, - const struct tlbe *tlbe) + const struct kvm_book3e_206_tlb_entry *tlbe) { gpa_t gpa; diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 141dce3c6810..968f40101883 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -13,6 +13,7 @@ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 + * Copyright 2011 Freescale Semiconductor, Inc. * * Authors: Hollis Blanchard <hollisb@us.ibm.com> */ @@ -69,54 +70,55 @@ #define OP_STH 44 #define OP_STHU 45 -#ifdef CONFIG_PPC_BOOK3S -static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) -{ - return 1; -} -#else -static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.tcr & TCR_DIE; -} -#endif - void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) { unsigned long dec_nsec; + unsigned long long dec_time; pr_debug("mtDEC: %x\n", vcpu->arch.dec); + hrtimer_try_to_cancel(&vcpu->arch.dec_timer); + #ifdef CONFIG_PPC_BOOK3S /* mtdec lowers the interrupt line when positive. */ kvmppc_core_dequeue_dec(vcpu); /* POWER4+ triggers a dec interrupt if the value is < 0 */ if (vcpu->arch.dec & 0x80000000) { - hrtimer_try_to_cancel(&vcpu->arch.dec_timer); kvmppc_core_queue_dec(vcpu); return; } #endif - if (kvmppc_dec_enabled(vcpu)) { - /* The decrementer ticks at the same rate as the timebase, so - * that's how we convert the guest DEC value to the number of - * host ticks. */ - - hrtimer_try_to_cancel(&vcpu->arch.dec_timer); - dec_nsec = vcpu->arch.dec; - dec_nsec *= 1000; - dec_nsec /= tb_ticks_per_usec; - hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), - HRTIMER_MODE_REL); - vcpu->arch.dec_jiffies = get_tb(); - } else { - hrtimer_try_to_cancel(&vcpu->arch.dec_timer); - } + +#ifdef CONFIG_BOOKE + /* On BOOKE, DEC = 0 is as good as decrementer not enabled */ + if (vcpu->arch.dec == 0) + return; +#endif + + /* + * The decrementer ticks at the same rate as the timebase, so + * that's how we convert the guest DEC value to the number of + * host ticks. + */ + + dec_time = vcpu->arch.dec; + dec_time *= 1000; + do_div(dec_time, tb_ticks_per_usec); + dec_nsec = do_div(dec_time, NSEC_PER_SEC); + hrtimer_start(&vcpu->arch.dec_timer, + ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); + vcpu->arch.dec_jiffies = get_tb(); } u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) { u64 jd = tb - vcpu->arch.dec_jiffies; + +#ifdef CONFIG_BOOKE + if (vcpu->arch.dec < jd) + return 0; +#endif + return vcpu->arch.dec - jd; } @@ -159,7 +161,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else - kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); + kvmppc_core_queue_program(vcpu, + vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 607fbdf24b84..00d7e345b3fe 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -39,7 +39,8 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return !(v->arch.shared->msr & MSR_WE) || - !!(v->arch.pending_exceptions); + !!(v->arch.pending_exceptions) || + v->requests; } int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) @@ -66,7 +67,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) vcpu->arch.magic_page_pa = param1; vcpu->arch.magic_page_ea = param2; - r2 = KVM_MAGIC_FEAT_SR; + r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; r = HC_EV_SUCCESS; break; @@ -171,8 +172,11 @@ void kvm_arch_check_processor_compat(void *rtn) *(int *)rtn = kvmppc_core_check_processor_compat(); } -int kvm_arch_init_vm(struct kvm *kvm) +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { + if (type) + return -EINVAL; + return kvmppc_core_init_vm(kvm); } @@ -208,17 +212,22 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_PPC_BOOKE_SREGS: #else case KVM_CAP_PPC_SEGSTATE: + case KVM_CAP_PPC_HIOR: case KVM_CAP_PPC_PAPR: #endif case KVM_CAP_PPC_UNSET_IRQ: case KVM_CAP_PPC_IRQ_LEVEL: case KVM_CAP_ENABLE_CAP: + case KVM_CAP_ONE_REG: r = 1; break; #ifndef CONFIG_KVM_BOOK3S_64_HV case KVM_CAP_PPC_PAIRED_SINGLES: case KVM_CAP_PPC_OSI: case KVM_CAP_PPC_GET_PVINFO: +#ifdef CONFIG_KVM_E500 + case KVM_CAP_SW_TLB: +#endif r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -238,7 +247,26 @@ int kvm_dev_ioctl_check_extension(long ext) if (cpu_has_feature(CPU_FTR_ARCH_201)) r = 2; break; + case KVM_CAP_SYNC_MMU: + r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; + break; #endif + case KVM_CAP_NR_VCPUS: + /* + * Recommending a number of CPUs is somewhat arbitrary; we + * return the number of present CPUs for -HV (since a host + * will have secondary threads "offline"), and for other KVM + * implementations just count online CPUs. + */ +#ifdef CONFIG_KVM_BOOK3S_64_HV + r = num_present_cpus(); +#else + r = num_online_cpus(); +#endif + break; + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; default: r = 0; break; @@ -253,6 +281,16 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; } +void kvm_arch_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ +} + +int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +{ + return 0; +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, @@ -279,9 +317,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; vcpu = kvmppc_core_vcpu_create(kvm, id); - vcpu->arch.wqp = &vcpu->wq; - if (!IS_ERR(vcpu)) + if (!IS_ERR(vcpu)) { + vcpu->arch.wqp = &vcpu->wq; kvmppc_create_vcpu_debugfs(vcpu, id); + } return vcpu; } @@ -305,18 +344,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) return kvmppc_core_pending_dec(vcpu); } -static void kvmppc_decrementer_func(unsigned long data) -{ - struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; - - kvmppc_core_queue_dec(vcpu); - - if (waitqueue_active(vcpu->arch.wqp)) { - wake_up_interruptible(vcpu->arch.wqp); - vcpu->stat.halt_wakeup++; - } -} - /* * low level hrtimer wake routine. Because this runs in hardirq context * we schedule a tasklet to do the real work. @@ -431,20 +458,20 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); - switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) { - case KVM_REG_GPR: + switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { + case KVM_MMIO_REG_GPR: kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); break; - case KVM_REG_FPR: - vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; + case KVM_MMIO_REG_FPR: + vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #ifdef CONFIG_PPC_BOOK3S - case KVM_REG_QPR: - vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; + case KVM_MMIO_REG_QPR: + vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; - case KVM_REG_FQPR: - vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; - vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; + case KVM_MMIO_REG_FQPR: + vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; + vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif default: @@ -553,8 +580,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) vcpu->arch.hcall_needed = 0; } - kvmppc_core_deliver_interrupts(vcpu); - r = kvmppc_vcpu_run(run, vcpu); if (vcpu->sigset_active) @@ -563,6 +588,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) return r; } +void kvm_vcpu_kick(struct kvm_vcpu *vcpu) +{ + int me; + int cpu = vcpu->cpu; + + me = get_cpu(); + if (waitqueue_active(vcpu->arch.wqp)) { + wake_up_interruptible(vcpu->arch.wqp); + vcpu->stat.halt_wakeup++; + } else if (cpu != me && cpu != -1) { + smp_send_reschedule(vcpu->cpu); + } + put_cpu(); +} + int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq == KVM_INTERRUPT_UNSET) { @@ -571,13 +611,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) } kvmppc_core_queue_external(vcpu, irq); - - if (waitqueue_active(vcpu->arch.wqp)) { - wake_up_interruptible(vcpu->arch.wqp); - vcpu->stat.halt_wakeup++; - } else if (vcpu->cpu != -1) { - smp_send_reschedule(vcpu->cpu); - } + kvm_vcpu_kick(vcpu); return 0; } @@ -599,6 +633,19 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, r = 0; vcpu->arch.papr_enabled = true; break; +#ifdef CONFIG_KVM_E500 + case KVM_CAP_SW_TLB: { + struct kvm_config_tlb cfg; + void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; + + r = -EFAULT; + if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) + break; + + r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); + break; + } +#endif default: r = -EINVAL; break; @@ -648,6 +695,32 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); break; } + + case KVM_SET_ONE_REG: + case KVM_GET_ONE_REG: + { + struct kvm_one_reg reg; + r = -EFAULT; + if (copy_from_user(®, argp, sizeof(reg))) + goto out; + if (ioctl == KVM_SET_ONE_REG) + r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); + else + r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); + break; + } + +#ifdef CONFIG_KVM_E500 + case KVM_DIRTY_TLB: { + struct kvm_dirty_tlb dirty; + r = -EFAULT; + if (copy_from_user(&dirty, argp, sizeof(dirty))) + goto out; + r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); + break; + } +#endif + default: r = -EINVAL; } @@ -656,6 +729,11 @@ out: return r; } +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) { u32 inst_lis = 0x3c000000; diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index b135d3d397db..877186b7b1c3 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h @@ -118,11 +118,14 @@ TRACE_EVENT(kvm_book3s_exit, ), TP_fast_assign( + struct kvmppc_book3s_shadow_vcpu *svcpu; __entry->exit_nr = exit_nr; __entry->pc = kvmppc_get_pc(vcpu); __entry->dar = kvmppc_get_fault_dar(vcpu); __entry->msr = vcpu->arch.shared->msr; - __entry->srr1 = to_svcpu(vcpu)->shadow_srr1; + svcpu = svcpu_get(vcpu); + __entry->srr1 = svcpu->shadow_srr1; + svcpu_put(svcpu); ), TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx", @@ -337,6 +340,63 @@ TRACE_EVENT(kvm_book3s_slbmte, #endif /* CONFIG_PPC_BOOK3S */ + +/************************************************************************* + * Book3E trace points * + *************************************************************************/ + +#ifdef CONFIG_BOOKE + +TRACE_EVENT(kvm_booke206_stlb_write, + TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3), + TP_ARGS(mas0, mas8, mas1, mas2, mas7_3), + + TP_STRUCT__entry( + __field( __u32, mas0 ) + __field( __u32, mas8 ) + __field( __u32, mas1 ) + __field( __u64, mas2 ) + __field( __u64, mas7_3 ) + ), + + TP_fast_assign( + __entry->mas0 = mas0; + __entry->mas8 = mas8; + __entry->mas1 = mas1; + __entry->mas2 = mas2; + __entry->mas7_3 = mas7_3; + ), + + TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx", + __entry->mas0, __entry->mas8, __entry->mas1, + __entry->mas2, __entry->mas7_3) +); + +TRACE_EVENT(kvm_booke206_gtlb_write, + TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3), + TP_ARGS(mas0, mas1, mas2, mas7_3), + + TP_STRUCT__entry( + __field( __u32, mas0 ) + __field( __u32, mas1 ) + __field( __u64, mas2 ) + __field( __u64, mas7_3 ) + ), + + TP_fast_assign( + __entry->mas0 = mas0; + __entry->mas1 = mas1; + __entry->mas2 = mas2; + __entry->mas7_3 = mas7_3; + ), + + TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx", + __entry->mas0, __entry->mas1, + __entry->mas2, __entry->mas7_3) +); + +#endif + #endif /* _TRACE_KVM_H */ /* This part must be outside protection */ diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index 13b676c20d12..da22c84a8fed 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c @@ -3,8 +3,8 @@ #include <linux/slab.h> #include <linux/bootmem.h> #include <linux/string.h> +#include <asm/setup.h> -#include <asm/system.h> void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) { diff --git a/arch/powerpc/lib/copyuser_power7_vmx.c b/arch/powerpc/lib/copyuser_power7_vmx.c index 6e1efadac48b..bf2654f2b68e 100644 --- a/arch/powerpc/lib/copyuser_power7_vmx.c +++ b/arch/powerpc/lib/copyuser_power7_vmx.c @@ -20,6 +20,7 @@ */ #include <linux/uaccess.h> #include <linux/hardirq.h> +#include <asm/switch_to.h> int enter_vmx_copy(void) { diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index 388b95e1a009..2c9441ee6bb8 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -27,7 +27,6 @@ #include <linux/memblock.h> #include <asm/mmu.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/cacheflush.h> diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 19f2f9498b27..08ffcf52a856 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -38,10 +38,10 @@ #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/tlbflush.h> #include <asm/siginfo.h> +#include <asm/debug.h> #include <mm/mmu_decl.h> #include "icswx.h" diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 3e8c37a4e395..377e5cbedbbb 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -40,7 +40,6 @@ #include <asm/mmu_context.h> #include <asm/page.h> #include <asm/types.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/machdep.h> #include <asm/prom.h> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 57c7465e656e..fb05b123218f 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/hugetlb.h> +#include <linux/export.h> #include <linux/of_fdt.h> #include <linux/memblock.h> #include <linux/bootmem.h> @@ -103,6 +104,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift *shift = hugepd_shift(*hpdp); return hugepte_offset(hpdp, ea, pdshift); } +EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { @@ -310,7 +312,8 @@ void __init reserve_hugetlb_gpages(void) int i; strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); - parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup); + parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0, + &do_gpage_early_setup); /* * Walk gpage list in reverse, allocating larger page sizes first. diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6157be2a7049..01e2db97a210 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -45,7 +45,6 @@ #include <asm/btext.h> #include <asm/tlb.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/hugetlb.h> #include "mmu_decl.h" diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index e94b57fb79a0..620b7acd2fdf 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -61,7 +61,6 @@ #include <asm/mmzone.h> #include <asm/cputable.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/iommu.h> #include <asm/abs_addr.h> #include <asm/vdso.h> diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 3feefc3842a8..b6edbb3b4a54 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -24,11 +24,11 @@ #include <linux/node.h> #include <asm/sparsemem.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/smp.h> #include <asm/firmware.h> #include <asm/paca.h> #include <asm/hvcall.h> +#include <asm/setup.h> static int numa_enabled = 1; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 0907f92ce309..6c856fb8c15b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -33,6 +33,7 @@ #include <asm/pgalloc.h> #include <asm/fixmap.h> #include <asm/io.h> +#include <asm/setup.h> #include "mmu_decl.h" diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index ad36ede469cc..249a0631c4db 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -51,7 +51,6 @@ #include <asm/processor.h> #include <asm/cputable.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/abs_addr.h> #include <asm/firmware.h> diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 6f01624f317f..4f51025f5b00 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c @@ -18,7 +18,6 @@ #include <linux/smp.h> #include <linux/errno.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/pmc.h> #include <asm/cputable.h> #include <asm/oprofile_impl.h> diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c index f8d36f940e88..ff617246d128 100644 --- a/arch/powerpc/oprofile/op_model_7450.c +++ b/arch/powerpc/oprofile/op_model_7450.c @@ -19,7 +19,6 @@ #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/page.h> diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index cb515cff745c..b9589c19ccda 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -34,7 +34,6 @@ #include <asm/ptrace.h> #include <asm/reg.h> #include <asm/rtas.h> -#include <asm/system.h> #include <asm/cell-regs.h> #include "../platforms/cell/interrupt.h" diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c index d4e6507277b5..ccc1daa33aed 100644 --- a/arch/powerpc/oprofile/op_model_fsl_emb.c +++ b/arch/powerpc/oprofile/op_model_fsl_emb.c @@ -17,7 +17,6 @@ #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/reg_fsl_emb.h> diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index e6bec74be131..95ae77dec3f6 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -14,7 +14,6 @@ #include <linux/smp.h> #include <asm/firmware.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/rtas.h> diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c index a20afe45d936..9b801b8c8c5a 100644 --- a/arch/powerpc/oprofile/op_model_rs64.c +++ b/arch/powerpc/oprofile/op_model_rs64.c @@ -11,7 +11,6 @@ #include <linux/init.h> #include <linux/smp.h> #include <asm/ptrace.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/oprofile_impl.h> diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index c2e27ede07ec..02aee03e713c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -116,14 +116,45 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) *addrp = mfspr(SPRN_SDAR); } +static inline u32 perf_flags_from_msr(struct pt_regs *regs) +{ + if (regs->msr & MSR_PR) + return PERF_RECORD_MISC_USER; + if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV) + return PERF_RECORD_MISC_HYPERVISOR; + return PERF_RECORD_MISC_KERNEL; +} + static inline u32 perf_get_misc_flags(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; unsigned long sihv = MMCRA_SIHV; unsigned long sipr = MMCRA_SIPR; + /* Not a PMU interrupt: Make up flags from regs->msr */ if (TRAP(regs) != 0xf00) - return 0; /* not a PMU interrupt */ + return perf_flags_from_msr(regs); + + /* + * If we don't support continuous sampling and this + * is not a marked event, same deal + */ + if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) && + !(mmcra & MMCRA_SAMPLE_ENABLE)) + return perf_flags_from_msr(regs); + + /* + * If we don't have flags in MMCRA, rather than using + * the MSR, we intuit the flags from the address in + * SIAR which should give slightly more reliable + * results + */ + if (ppmu->flags & PPMU_NO_SIPR) { + unsigned long siar = mfspr(SPRN_SIAR); + if (siar >= PAGE_OFFSET) + return PERF_RECORD_MISC_KERNEL; + return PERF_RECORD_MISC_USER; + } if (ppmu->flags & PPMU_ALT_SIPR) { sihv = POWER6_MMCRA_SIHV; @@ -1299,13 +1330,18 @@ unsigned long perf_misc_flags(struct pt_regs *regs) */ unsigned long perf_instruction_pointer(struct pt_regs *regs) { - unsigned long ip; + unsigned long mmcra = regs->dsisr; + /* Not a PMU interrupt */ if (TRAP(regs) != 0xf00) - return regs->nip; /* not a PMU interrupt */ + return regs->nip; + + /* Processor doesn't support sampling non marked events */ + if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) && + !(mmcra & MMCRA_SAMPLE_ENABLE)) + return regs->nip; - ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); - return ip; + return mfspr(SPRN_SIAR) + perf_ip_adjust(regs); } static bool pmc_overflow(unsigned long val) diff --git a/arch/powerpc/perf/power4-pmu.c b/arch/powerpc/perf/power4-pmu.c index b4f1dda4d089..9103a1de864d 100644 --- a/arch/powerpc/perf/power4-pmu.c +++ b/arch/powerpc/perf/power4-pmu.c @@ -607,6 +607,7 @@ static struct power_pmu power4_pmu = { .n_generic = ARRAY_SIZE(p4_generic_events), .generic_events = p4_generic_events, .cache_events = &power4_cache_events, + .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING, }; static int __init init_power4_pmu(void) diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c index 111eb25bb0b6..20139ceeacf6 100644 --- a/arch/powerpc/perf/ppc970-pmu.c +++ b/arch/powerpc/perf/ppc970-pmu.c @@ -487,6 +487,7 @@ static struct power_pmu ppc970_pmu = { .n_generic = ARRAY_SIZE(ppc970_generic_events), .generic_events = ppc970_generic_events, .cache_events = &ppc970_cache_events, + .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING, }; static int __init init_ppc970_pmu(void) diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index eda0fc2a3914..870b70f5d1bd 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c @@ -3,6 +3,7 @@ #include <asm/io.h> #include <asm/time.h> #include <asm/mpc52xx.h> +#include <asm/switch_to.h> /* defined in lite5200_sleep.S and only used here */ extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar); diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c index d111b024eafd..fb94d10e5a4d 100644 --- a/arch/powerpc/platforms/82xx/pq2.c +++ b/arch/powerpc/platforms/82xx/pq2.c @@ -17,7 +17,6 @@ #include <asm/cpm2.h> #include <asm/io.h> #include <asm/pci-bridge.h> -#include <asm/system.h> #include <platforms/82xx/pq2.h> diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c index 65eb792a0d00..a266ba876863 100644 --- a/arch/powerpc/platforms/83xx/km83xx.c +++ b/arch/powerpc/platforms/83xx/km83xx.c @@ -27,7 +27,6 @@ #include <linux/of_platform.h> #include <linux/of_device.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c index e36bc611dd6e..d440435e055c 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c @@ -26,7 +26,6 @@ #include <linux/of_platform.h> #include <linux/of_device.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c index 39849dd1b5bb..a494fa57bdf9 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_itx.c +++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c @@ -25,7 +25,6 @@ #include <linux/root_dev.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c index 5828d8e97c37..553e793a4a93 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c @@ -25,7 +25,6 @@ #include <linux/root_dev.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c index ad8e4bcd7d55..1b1f6c8a1a12 100644 --- a/arch/powerpc/platforms/83xx/mpc836x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c @@ -33,7 +33,6 @@ #include <linux/of_platform.h> #include <linux/of_device.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c index 8a81d7640b1f..26cb3e934722 100644 --- a/arch/powerpc/platforms/83xx/sbc834x.c +++ b/arch/powerpc/platforms/83xx/sbc834x.c @@ -27,7 +27,6 @@ #include <linux/root_dev.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index edf66870d978..1a046715e461 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -27,6 +27,7 @@ #include <asm/io.h> #include <asm/time.h> #include <asm/mpc6xx.h> +#include <asm/switch_to.h> #include <sysdev/fsl_soc.h> diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c index df69e99e511c..dd3617c531d7 100644 --- a/arch/powerpc/platforms/85xx/corenet_ds.c +++ b/arch/powerpc/platforms/85xx/corenet_ds.c @@ -18,7 +18,6 @@ #include <linux/interrupt.h> #include <linux/memblock.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c index d50056f424f6..18014629416d 100644 --- a/arch/powerpc/platforms/85xx/ge_imp3a.c +++ b/arch/powerpc/platforms/85xx/ge_imp3a.c @@ -24,7 +24,6 @@ #include <linux/of_platform.h> #include <linux/memblock.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index 60120e55da41..3dc1bda3ddc3 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c @@ -20,7 +20,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c index f58872688d8f..585bd22b1406 100644 --- a/arch/powerpc/platforms/85xx/mpc8536_ds.c +++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c @@ -19,7 +19,6 @@ #include <linux/of_platform.h> #include <linux/memblock.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c index d19f675cb369..29ee8fcd75a2 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c @@ -19,7 +19,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index ab5f0bf19454..11156fb53d83 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -27,7 +27,6 @@ #include <linux/fsl_devices.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/page.h> #include <linux/atomic.h> diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 6e23e3e34bd9..1fd91e9e0ffb 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -22,7 +22,6 @@ #include <linux/of_platform.h> #include <linux/memblock.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index f33662b46b8d..3754ddc00af7 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -35,7 +35,6 @@ #include <linux/phy.h> #include <linux/memblock.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c index db214cd4c822..9848f9e39853 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c @@ -18,7 +18,6 @@ #include <linux/interrupt.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c index d8bd6563d9ca..dbaf44354f0d 100644 --- a/arch/powerpc/platforms/85xx/p1010rdb.c +++ b/arch/powerpc/platforms/85xx/p1010rdb.c @@ -16,7 +16,6 @@ #include <linux/interrupt.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c index 6b07398e4369..2990e8b13dc9 100644 --- a/arch/powerpc/platforms/85xx/p1023_rds.c +++ b/arch/powerpc/platforms/85xx/p1023_rds.c @@ -22,7 +22,6 @@ #include <linux/of_platform.h> #include <linux/of_device.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/p2041_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c index eda6ed5683e1..6541fa2630c0 100644 --- a/arch/powerpc/platforms/85xx/p2041_rdb.c +++ b/arch/powerpc/platforms/85xx/p2041_rdb.c @@ -16,7 +16,6 @@ #include <linux/interrupt.h> #include <linux/phy.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c index 96d99a374dcf..f238efa75891 100644 --- a/arch/powerpc/platforms/85xx/p3041_ds.c +++ b/arch/powerpc/platforms/85xx/p3041_ds.c @@ -18,7 +18,6 @@ #include <linux/interrupt.h> #include <linux/phy.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c index d1b21d7663e3..c92417dc6574 100644 --- a/arch/powerpc/platforms/85xx/p4080_ds.c +++ b/arch/powerpc/platforms/85xx/p4080_ds.c @@ -17,7 +17,6 @@ #include <linux/delay.h> #include <linux/interrupt.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c index e8cba5004fd8..17bef15a85ed 100644 --- a/arch/powerpc/platforms/85xx/p5020_ds.c +++ b/arch/powerpc/platforms/85xx/p5020_ds.c @@ -18,7 +18,6 @@ #include <linux/interrupt.h> #include <linux/phy.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c index 1677b8a22677..cd3a66bdb54b 100644 --- a/arch/powerpc/platforms/85xx/sbc8548.c +++ b/arch/powerpc/platforms/85xx/sbc8548.c @@ -30,7 +30,6 @@ #include <linux/fsl_devices.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/page.h> #include <linux/atomic.h> diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c index 3c3bbcc27566..b1be632ede43 100644 --- a/arch/powerpc/platforms/85xx/sbc8560.c +++ b/arch/powerpc/platforms/85xx/sbc8560.c @@ -21,7 +21,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c index b71919217756..b9c6daa07b66 100644 --- a/arch/powerpc/platforms/85xx/socrates.c +++ b/arch/powerpc/platforms/85xx/socrates.c @@ -29,7 +29,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c index 27ca3a7b04ab..e0508002b086 100644 --- a/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/arch/powerpc/platforms/85xx/stx_gp3.c @@ -28,7 +28,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index d7504cefe016..4d786c25d3e5 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c @@ -26,7 +26,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c index 503c21596c63..41c687550ea7 100644 --- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c +++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c @@ -21,7 +21,6 @@ #include <linux/interrupt.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c index ed58b6cfd60c..1fca663f1b25 100644 --- a/arch/powerpc/platforms/86xx/gef_ppc9a.c +++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c @@ -24,7 +24,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c index 710db69bd523..14e0e576bcbd 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc310.c +++ b/arch/powerpc/platforms/86xx/gef_sbc310.c @@ -24,7 +24,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c index 4a13d2f4ac20..1638f43599f0 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc610.c +++ b/arch/powerpc/platforms/86xx/gef_sbc610.c @@ -24,7 +24,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c index 13fa9a6403e6..bbc615206c67 100644 --- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c @@ -25,7 +25,6 @@ #include <linux/seq_file.h> #include <linux/of.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index 569262ca499a..3755e61d7ecf 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c @@ -21,7 +21,6 @@ #include <linux/of_platform.h> #include <linux/memblock.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index 22cc3571ae19..9982f57c98b9 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c @@ -12,7 +12,6 @@ #include <linux/interrupt.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/mpic.h> #include <asm/i8259.h> diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c index 51c8f331b671..e7007d0d949e 100644 --- a/arch/powerpc/platforms/86xx/sbc8641d.c +++ b/arch/powerpc/platforms/86xx/sbc8641d.c @@ -21,7 +21,6 @@ #include <linux/seq_file.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c index caaec29796b7..866feff83c91 100644 --- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c @@ -19,7 +19,6 @@ #include <asm/io.h> #include <asm/machdep.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/8xx_immap.h> #include <asm/cpm1.h> diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c index 45ed6cdc1310..5d98398c2f5e 100644 --- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c @@ -32,7 +32,6 @@ #include <asm/machdep.h> #include <asm/page.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/mpc8xx.h> #include <asm/8xx_immap.h> diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c index 528e00ddef31..8d21ab70e06c 100644 --- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c +++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c @@ -35,7 +35,6 @@ #include <asm/machdep.h> #include <asm/page.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/mpc8xx.h> #include <asm/8xx_immap.h> diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c index 2516c1cf8467..943c9d39aa16 100644 --- a/arch/powerpc/platforms/cell/beat_htab.c +++ b/arch/powerpc/platforms/cell/beat_htab.c @@ -95,7 +95,6 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group, unsigned long lpar_rc; u64 hpte_v, hpte_r, slot; - /* same as iseries */ if (vflags & HPTE_V_SECONDARY) return -1; @@ -319,7 +318,6 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, unsigned long lpar_rc; u64 hpte_v, hpte_r, slot; - /* same as iseries */ if (vflags & HPTE_V_SECONDARY) return -1; diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index 4a255cf8cd17..49a65e2dfc71 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -38,7 +38,6 @@ #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/firmware.h> -#include <asm/system.h> #include <asm/rtas.h> #include <asm/cputhreads.h> diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c index 8cab5731850f..ebd3963fdf91 100644 --- a/arch/powerpc/platforms/embedded6xx/c2k.c +++ b/arch/powerpc/platforms/embedded6xx/c2k.c @@ -23,7 +23,6 @@ #include <asm/machdep.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/time.h> #include <mm/mmu_decl.h> diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index ab51b21b4bd7..8c305c7c8977 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c @@ -28,7 +28,6 @@ #include <linux/of_platform.h> #include <linux/module.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/prom.h> diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 74ccce36baed..beeaf4a173e1 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -32,7 +32,6 @@ #include <linux/tty.h> #include <linux/serial_core.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/prom.h> diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c index 670035f49a69..d455f08bea53 100644 --- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c +++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c @@ -17,7 +17,6 @@ #include <asm/machdep.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/time.h> #include <mm/mmu_decl.h> diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c index e0ed3c71d69b..c458b60d14c4 100644 --- a/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/arch/powerpc/platforms/embedded6xx/storcenter.c @@ -16,7 +16,6 @@ #include <linux/initrd.h> #include <linux/of_platform.h> -#include <asm/system.h> #include <asm/time.h> #include <asm/prom.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c index 8b0c2082a783..64fde058e545 100644 --- a/arch/powerpc/platforms/fsl_uli1575.c +++ b/arch/powerpc/platforms/fsl_uli1575.c @@ -15,7 +15,6 @@ #include <linux/interrupt.h> #include <linux/mc146818rtc.h> -#include <asm/system.h> #include <asm/pci-bridge.h> #define ULI_PIRQA 0x08 diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 3b7545a51aa9..cb1b0b35a0c6 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -47,7 +47,6 @@ #include <asm/processor.h> #include <asm/sections.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c index eac569dee27c..b4a369dac3a8 100644 --- a/arch/powerpc/platforms/maple/time.c +++ b/arch/powerpc/platforms/maple/time.c @@ -27,7 +27,6 @@ #include <asm/sections.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index e777ad471a48..2ed9212d7d59 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -32,13 +32,13 @@ #include <linux/gfp.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/iommu.h> #include <asm/machdep.h> #include <asm/mpic.h> #include <asm/smp.h> #include <asm/time.h> #include <asm/mmu.h> +#include <asm/debug.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c index 84d7fd9bcc69..3e91ef538114 100644 --- a/arch/powerpc/platforms/powermac/bootx_init.c +++ b/arch/powerpc/platforms/powermac/bootx_init.c @@ -19,6 +19,7 @@ #include <asm/bootx.h> #include <asm/btext.h> #include <asm/io.h> +#include <asm/setup.h> #undef DEBUG #define SET_BOOT_BAT diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 1fc386a23f18..64171198535c 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -33,9 +33,9 @@ #include <asm/sections.h> #include <asm/cputable.h> #include <asm/time.h> -#include <asm/system.h> #include <asm/mpic.h> #include <asm/keylargo.h> +#include <asm/switch_to.h> /* WARNING !!! This will cause calibrate_delay() to be called, * but this is an __init function ! So you MUST go edit diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index da18b26dcc6f..014d06e6d46b 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -23,7 +23,6 @@ #include <linux/spinlock.h> #include <asm/sections.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/nvram.h> diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 970ea1de4298..141f8899a633 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -57,7 +57,6 @@ #include <asm/reg.h> #include <asm/sections.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/pci-bridge.h> diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index 11c9fce43b5b..8680bb69795d 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -26,7 +26,6 @@ #include <asm/sections.h> #include <asm/prom.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 17210c526c52..3ef46254c35b 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -25,7 +25,6 @@ #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/firmware.h> -#include <asm/system.h> #include <asm/rtas.h> #include <asm/vdso_datapage.h> #include <asm/cputhreads.h> diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 8bd6ba542691..de2aea421707 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -29,6 +29,7 @@ #include <asm/prom.h> #include <asm/udbg.h> #include <asm/lv1call.h> +#include <asm/setup.h> #include "platform.h" diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 0e8656370063..a7648543c59e 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -25,10 +25,10 @@ #include <linux/debugfs.h> #include <linux/spinlock.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/firmware.h> #include <asm/lppaca.h> +#include <asm/debug.h> #include "plpar_wrappers.h" diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 8011088392d3..309d38ef7322 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -984,7 +984,8 @@ int __exit eeh_ops_unregister(const char *name) */ void __init eeh_init(void) { - struct device_node *phb, *np; + struct pci_controller *hose, *tmp; + struct device_node *phb; int ret; /* call platform initialization function */ @@ -1000,19 +1001,9 @@ void __init eeh_init(void) raw_spin_lock_init(&confirm_error_lock); - np = of_find_node_by_path("/rtas"); - if (np == NULL) - return; - - /* Enable EEH for all adapters. Note that eeh requires buid's */ - for (phb = of_find_node_by_name(NULL, "pci"); phb; - phb = of_find_node_by_name(phb, "pci")) { - unsigned long buid; - - buid = get_phb_buid(phb); - if (buid == 0 || !of_node_to_eeh_dev(phb)) - continue; - + /* Enable EEH for all adapters */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + phb = hose->dn; traverse_pci_devices(phb, eeh_early_enable, NULL); } diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/platforms/pseries/eeh_dev.c index f3aed7dcae95..c4507d095900 100644 --- a/arch/powerpc/platforms/pseries/eeh_dev.c +++ b/arch/powerpc/platforms/pseries/eeh_dev.c @@ -62,7 +62,7 @@ void * __devinit eeh_dev_init(struct device_node *dn, void *data) } /* Associate EEH device with OF node */ - dn->edev = edev; + PCI_DN(dn)->edev = edev; edev->dn = dn; edev->phb = phb; diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index c986d08d0807..64c97d8ac0c5 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -23,7 +23,6 @@ #include <linux/delay.h> #include <linux/sched.h> /* for idle_task_exit */ #include <linux/cpu.h> -#include <asm/system.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/firmware.h> diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c index 1a709bc48ce1..ef9d9d84c7d5 100644 --- a/arch/powerpc/platforms/pseries/io_event_irq.c +++ b/arch/powerpc/platforms/pseries/io_event_irq.c @@ -63,73 +63,9 @@ EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list); static int ioei_check_exception_token; -/* pSeries event log format */ - -/* Two bytes ASCII section IDs */ -#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H') -#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H') -#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S') -#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H') -#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T') -#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S') -#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H') -#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W') -#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P') -#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R') -#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M') -#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P') -#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E') -#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I') -#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H') -#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D') - -/* Vendor specific Platform Event Log Format, Version 6, section header */ -struct pseries_elog_section { - uint16_t id; /* 0x00 2-byte ASCII section ID */ - uint16_t length; /* 0x02 Section length in bytes */ - uint8_t version; /* 0x04 Section version */ - uint8_t subtype; /* 0x05 Section subtype */ - uint16_t creator_component; /* 0x06 Creator component ID */ - uint8_t data[]; /* 0x08 Start of section data */ -}; - static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; /** - * Find data portion of a specific section in RTAS extended event log. - * @elog: RTAS error/event log. - * @sect_id: secsion ID. - * - * Return: - * pointer to the section data of the specified section - * NULL if not found - */ -static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog, - uint16_t sect_id) -{ - struct rtas_ext_event_log_v6 *xelog = - (struct rtas_ext_event_log_v6 *) elog->buffer; - struct pseries_elog_section *sect; - unsigned char *p, *log_end; - - /* Check that we understand the format */ - if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) || - xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || - xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM) - return NULL; - - log_end = elog->buffer + elog->extended_log_length; - p = xelog->vendor_log; - while (p < log_end) { - sect = (struct pseries_elog_section *)p; - if (sect->id == sect_id) - return sect; - p += sect->length; - } - return NULL; -} - -/** * Find the data portion of an IO Event section from event log. * @elog: RTAS error/event log. * @@ -138,7 +74,7 @@ static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *el */ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) { - struct pseries_elog_section *sect; + struct pseries_errorlog *sect; /* We should only ever get called for io-event interrupts, but if * we do get called for another type then something went wrong so @@ -152,7 +88,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) return NULL; } - sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT); + sect = get_pseries_errorlog(elog, PSERIES_ELOG_SECT_ID_IO_EVENT); if (unlikely(!sect)) { printk_once(KERN_WARNING "io_event_irq: RTAS extended event " "log does not contain an IO Event section. " diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index c442f2b1980f..0915b1ad66ce 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -809,8 +809,7 @@ machine_arch_initcall(pseries, find_existing_ddw_windows); static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, struct ddw_query_response *query) { - struct device_node *dn; - struct pci_dn *pcidn; + struct eeh_dev *edev; u32 cfg_addr; u64 buid; int ret; @@ -821,12 +820,12 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, * Retrieve them from the pci device, not the node with the * dma-window property */ - dn = pci_device_to_OF_node(dev); - pcidn = PCI_DN(dn); - cfg_addr = pcidn->eeh_config_addr; - if (pcidn->eeh_pe_config_addr) - cfg_addr = pcidn->eeh_pe_config_addr; - buid = pcidn->phb->buid; + edev = pci_dev_to_eeh_dev(dev); + cfg_addr = edev->config_addr; + if (edev->pe_config_addr) + cfg_addr = edev->pe_config_addr; + buid = edev->phb->buid; + ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, cfg_addr, BUID_HI(buid), BUID_LO(buid)); dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" @@ -839,8 +838,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, struct ddw_create_response *create, int page_shift, int window_shift) { - struct device_node *dn; - struct pci_dn *pcidn; + struct eeh_dev *edev; u32 cfg_addr; u64 buid; int ret; @@ -851,12 +849,11 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, * Retrieve them from the pci device, not the node with the * dma-window property */ - dn = pci_device_to_OF_node(dev); - pcidn = PCI_DN(dn); - cfg_addr = pcidn->eeh_config_addr; - if (pcidn->eeh_pe_config_addr) - cfg_addr = pcidn->eeh_pe_config_addr; - buid = pcidn->phb->buid; + edev = pci_dev_to_eeh_dev(dev); + cfg_addr = edev->config_addr; + if (edev->pe_config_addr) + cfg_addr = edev->pe_config_addr; + buid = edev->phb->buid; do { /* extra outputs are LIOBN and dma-addr (hi, lo) */ diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index a12e95af6933..41a34bc4a9a2 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c @@ -14,9 +14,9 @@ #include <asm/paca.h> #include <asm/reg.h> -#include <asm/system.h> #include <asm/machdep.h> #include <asm/firmware.h> +#include <asm/runlatch.h> #include "plpar_wrappers.h" #include "pseries.h" diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 086d2ae4e06a..c4dfccd3a3d9 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -16,37 +16,15 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* Change Activity: - * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support. - * End Change Activity - */ - -#include <linux/errno.h> -#include <linux/threads.h> -#include <linux/kernel_stat.h> -#include <linux/signal.h> #include <linux/sched.h> -#include <linux/ioport.h> #include <linux/interrupt.h> -#include <linux/timex.h> -#include <linux/init.h> -#include <linux/delay.h> #include <linux/irq.h> -#include <linux/random.h> -#include <linux/sysrq.h> -#include <linux/bitops.h> - -#include <asm/uaccess.h> -#include <asm/system.h> -#include <asm/io.h> -#include <asm/pgtable.h> -#include <asm/irq.h> -#include <asm/cache.h> -#include <asm/prom.h> -#include <asm/ptrace.h> +#include <linux/of.h> +#include <linux/fs.h> +#include <linux/reboot.h> + #include <asm/machdep.h> #include <asm/rtas.h> -#include <asm/udbg.h> #include <asm/firmware.h> #include "pseries.h" @@ -57,7 +35,6 @@ static DEFINE_SPINLOCK(ras_log_buf_lock); static char global_mce_data_buf[RTAS_ERROR_LOG_MAX]; static DEFINE_PER_CPU(__u64, mce_data_buf); -static int ras_get_sensor_state_token; static int ras_check_exception_token; #define EPOW_SENSOR_TOKEN 9 @@ -75,7 +52,6 @@ static int __init init_ras_IRQ(void) { struct device_node *np; - ras_get_sensor_state_token = rtas_token("get-sensor-state"); ras_check_exception_token = rtas_token("check-exception"); /* Internal Errors */ @@ -95,26 +71,126 @@ static int __init init_ras_IRQ(void) return 0; } -__initcall(init_ras_IRQ); +subsys_initcall(init_ras_IRQ); -/* - * Handle power subsystem events (EPOW). - * - * Presently we just log the event has occurred. This should be fixed - * to examine the type of power failure and take appropriate action where - * the time horizon permits something useful to be done. - */ +#define EPOW_SHUTDOWN_NORMAL 1 +#define EPOW_SHUTDOWN_ON_UPS 2 +#define EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS 3 +#define EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH 4 + +static void handle_system_shutdown(char event_modifier) +{ + switch (event_modifier) { + case EPOW_SHUTDOWN_NORMAL: + pr_emerg("Firmware initiated power off"); + orderly_poweroff(1); + break; + + case EPOW_SHUTDOWN_ON_UPS: + pr_emerg("Loss of power reported by firmware, system is " + "running on UPS/battery"); + break; + + case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: + pr_emerg("Loss of system critical functions reported by " + "firmware"); + pr_emerg("Check RTAS error log for details"); + orderly_poweroff(1); + break; + + case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH: + pr_emerg("Ambient temperature too high reported by firmware"); + pr_emerg("Check RTAS error log for details"); + orderly_poweroff(1); + break; + + default: + pr_err("Unknown power/cooling shutdown event (modifier %d)", + event_modifier); + } +} + +struct epow_errorlog { + unsigned char sensor_value; + unsigned char event_modifier; + unsigned char extended_modifier; + unsigned char reserved; + unsigned char platform_reason; +}; + +#define EPOW_RESET 0 +#define EPOW_WARN_COOLING 1 +#define EPOW_WARN_POWER 2 +#define EPOW_SYSTEM_SHUTDOWN 3 +#define EPOW_SYSTEM_HALT 4 +#define EPOW_MAIN_ENCLOSURE 5 +#define EPOW_POWER_OFF 7 + +void rtas_parse_epow_errlog(struct rtas_error_log *log) +{ + struct pseries_errorlog *pseries_log; + struct epow_errorlog *epow_log; + char action_code; + char modifier; + + pseries_log = get_pseries_errorlog(log, PSERIES_ELOG_SECT_ID_EPOW); + if (pseries_log == NULL) + return; + + epow_log = (struct epow_errorlog *)pseries_log->data; + action_code = epow_log->sensor_value & 0xF; /* bottom 4 bits */ + modifier = epow_log->event_modifier & 0xF; /* bottom 4 bits */ + + switch (action_code) { + case EPOW_RESET: + pr_err("Non critical power or cooling issue cleared"); + break; + + case EPOW_WARN_COOLING: + pr_err("Non critical cooling issue reported by firmware"); + pr_err("Check RTAS error log for details"); + break; + + case EPOW_WARN_POWER: + pr_err("Non critical power issue reported by firmware"); + pr_err("Check RTAS error log for details"); + break; + + case EPOW_SYSTEM_SHUTDOWN: + handle_system_shutdown(epow_log->event_modifier); + break; + + case EPOW_SYSTEM_HALT: + pr_emerg("Firmware initiated power off"); + orderly_poweroff(1); + break; + + case EPOW_MAIN_ENCLOSURE: + case EPOW_POWER_OFF: + pr_emerg("Critical power/cooling issue reported by firmware"); + pr_emerg("Check RTAS error log for details"); + pr_emerg("Immediate power off"); + emergency_sync(); + kernel_power_off(); + break; + + default: + pr_err("Unknown power/cooling event (action code %d)", + action_code); + } +} + +/* Handle environmental and power warning (EPOW) interrupts. */ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) { - int status = 0xdeadbeef; - int state = 0; + int status; + int state; int critical; - status = rtas_call(ras_get_sensor_state_token, 2, 2, &state, - EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX); + status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state); if (state > 3) - critical = 1; /* Time Critical */ + critical = 1; /* Time Critical */ else critical = 0; @@ -123,18 +199,14 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) status = rtas_call(ras_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq), - RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, + RTAS_EPOW_WARNING, critical, __pa(&ras_log_buf), rtas_get_error_log_max()); - udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n", - *((unsigned long *)&ras_log_buf), status, state); - printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n", - *((unsigned long *)&ras_log_buf), status, state); - - /* format and print the extended information */ log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0); + rtas_parse_epow_errlog((struct rtas_error_log *)ras_log_buf); + spin_unlock(&ras_log_buf_lock); return IRQ_HANDLED; } @@ -150,7 +222,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) static irqreturn_t ras_error_interrupt(int irq, void *dev_id) { struct rtas_error_log *rtas_elog; - int status = 0xdeadbeef; + int status; int fatal; spin_lock(&ras_log_buf_lock); @@ -158,7 +230,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) status = rtas_call(ras_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq), - RTAS_INTERNAL_ERROR, 1 /*Time Critical */, + RTAS_INTERNAL_ERROR, 1 /* Time Critical */, __pa(&ras_log_buf), rtas_get_error_log_max()); @@ -173,24 +245,13 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal); if (fatal) { - udbg_printf("Fatal HW Error <0x%lx 0x%x>\n", - *((unsigned long *)&ras_log_buf), status); - printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n", - *((unsigned long *)&ras_log_buf), status); - -#ifndef DEBUG_RTAS_POWER_OFF - /* Don't actually power off when debugging so we can test - * without actually failing while injecting errors. - * Error data will not be logged to syslog. - */ - ppc_md.power_off(); -#endif + pr_emerg("Fatal hardware error reported by firmware"); + pr_emerg("Check RTAS error log for details"); + pr_emerg("Immediate power off"); + emergency_sync(); + kernel_power_off(); } else { - udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n", - *((unsigned long *)&ras_log_buf), status); - printk(KERN_WARNING - "Warning: Recoverable hardware error <0x%lx 0x%x>\n", - *((unsigned long *)&ras_log_buf), status); + pr_err("Recoverable hardware error reported by firmware"); } spin_unlock(&ras_log_buf_lock); diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index eadba9521a35..e16bb8d48550 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -37,7 +37,6 @@ #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/firmware.h> -#include <asm/system.h> #include <asm/rtas.h> #include <asm/pSeries_reconfig.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c index ca6fa26f6e63..8ef53bc2e70e 100644 --- a/arch/powerpc/platforms/wsp/chroma.c +++ b/arch/powerpc/platforms/wsp/chroma.c @@ -17,7 +17,6 @@ #include <linux/time.h> #include <asm/machdep.h> -#include <asm/system.h> #include <asm/udbg.h> #include "ics.h" diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c index 0c1ae06d0be1..508ec8282b96 100644 --- a/arch/powerpc/platforms/wsp/psr2.c +++ b/arch/powerpc/platforms/wsp/psr2.c @@ -17,7 +17,6 @@ #include <linux/time.h> #include <asm/machdep.h> -#include <asm/system.h> #include <asm/udbg.h> #include "ics.h" diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c index 763014cd1e62..1526551f9fe6 100644 --- a/arch/powerpc/platforms/wsp/wsp_pci.c +++ b/arch/powerpc/platforms/wsp/wsp_pci.c @@ -27,6 +27,7 @@ #include <asm/ppc-pci.h> #include <asm/iommu.h> #include <asm/io-workarounds.h> +#include <asm/debug.h> #include "wsp.h" #include "wsp_pci.h" diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index bf6c7cc0a6af..4dd534194ae8 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -26,7 +26,6 @@ #include <asm/udbg.h> #include <asm/io.h> -#include <asm/system.h> #include <asm/rheap.h> #include <asm/cpm.h> diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index e8f385fbf549..c449dbd1c938 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -31,7 +31,6 @@ #include <linux/fs_enet_pd.h> #include <linux/fs_uart_pd.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/irq.h> diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 5287e95cec3a..0968b66b4cf9 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/bitmap.h> #include <asm/msi_bitmap.h> +#include <asm/setup.h> int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num) { diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c index 2370e1c63379..1fd0717ade02 100644 --- a/arch/powerpc/sysdev/tsi108_dev.c +++ b/arch/powerpc/sysdev/tsi108_dev.c @@ -22,7 +22,6 @@ #include <linux/of_net.h> #include <asm/tsi108.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/irq.h> diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 68a9cbbab450..0f3ab06d2222 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -41,6 +41,7 @@ #include <asm/spu_priv1.h> #include <asm/setjmp.h> #include <asm/reg.h> +#include <asm/debug.h> #ifdef CONFIG_PPC64 #include <asm/hvcall.h> diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index ffd1ac255f19..9178db6db0a5 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h @@ -17,6 +17,7 @@ #define _CRYPTO_ARCH_S390_CRYPT_S390_H #include <asm/errno.h> +#include <asm/facility.h> #define CRYPT_S390_OP_MASK 0xFF00 #define CRYPT_S390_FUNC_MASK 0x00FF diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 8517d2ae3b5c..748347baecb8 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -15,7 +15,7 @@ #include <linux/compiler.h> #include <linux/types.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h new file mode 100644 index 000000000000..451273ad4d34 --- /dev/null +++ b/arch/s390/include/asm/barrier.h @@ -0,0 +1,35 @@ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +/* + * Force strict CPU ordering. + * And yes, this is required on UP too when we're talking + * to devices. + * + * This is very similar to the ppc eieio/sync instruction in that is + * does a checkpoint syncronisation & makes sure that + * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). + */ + +#define eieio() asm volatile("bcr 15,0" : : : "memory") +#define SYNC_OTHER_CORES(x) eieio() +#define mb() eieio() +#define rmb() eieio() +#define wmb() eieio() +#define read_barrier_depends() do { } while(0) +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +#define set_mb(var, value) do { var = value; mb(); } while (0) + +#endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h new file mode 100644 index 000000000000..ecde9417d669 --- /dev/null +++ b/arch/s390/include/asm/ctl_reg.h @@ -0,0 +1,76 @@ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef __ASM_CTL_REG_H +#define __ASM_CTL_REG_H + +#ifdef __s390x__ + +#define __ctl_load(array, low, high) ({ \ + typedef struct { char _[sizeof(array)]; } addrtype; \ + asm volatile( \ + " lctlg %1,%2,%0\n" \ + : : "Q" (*(addrtype *)(&array)), \ + "i" (low), "i" (high)); \ + }) + +#define __ctl_store(array, low, high) ({ \ + typedef struct { char _[sizeof(array)]; } addrtype; \ + asm volatile( \ + " stctg %1,%2,%0\n" \ + : "=Q" (*(addrtype *)(&array)) \ + : "i" (low), "i" (high)); \ + }) + +#else /* __s390x__ */ + +#define __ctl_load(array, low, high) ({ \ + typedef struct { char _[sizeof(array)]; } addrtype; \ + asm volatile( \ + " lctl %1,%2,%0\n" \ + : : "Q" (*(addrtype *)(&array)), \ + "i" (low), "i" (high)); \ +}) + +#define __ctl_store(array, low, high) ({ \ + typedef struct { char _[sizeof(array)]; } addrtype; \ + asm volatile( \ + " stctl %1,%2,%0\n" \ + : "=Q" (*(addrtype *)(&array)) \ + : "i" (low), "i" (high)); \ + }) + +#endif /* __s390x__ */ + +#define __ctl_set_bit(cr, bit) ({ \ + unsigned long __dummy; \ + __ctl_store(__dummy, cr, cr); \ + __dummy |= 1UL << (bit); \ + __ctl_load(__dummy, cr, cr); \ +}) + +#define __ctl_clear_bit(cr, bit) ({ \ + unsigned long __dummy; \ + __ctl_store(__dummy, cr, cr); \ + __dummy &= ~(1UL << (bit)); \ + __ctl_load(__dummy, cr, cr); \ +}) + +#ifdef CONFIG_SMP + +extern void smp_ctl_set_bit(int cr, int bit); +extern void smp_ctl_clear_bit(int cr, int bit); +#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) +#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) + +#else + +#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) +#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) + +#endif /* CONFIG_SMP */ + +#endif /* __ASM_CTL_REG_H */ diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 547f1a6a35d4..c4ee39f7a4d6 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -129,7 +129,6 @@ typedef s390_fp_regs compat_elf_fpregset_t; typedef s390_compat_regs compat_elf_gregset_t; #include <linux/sched.h> /* for task_struct */ -#include <asm/system.h> /* for save_access_regs */ #include <asm/mmu_context.h> #include <asm/vdso.h> diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h new file mode 100644 index 000000000000..c4a93d6327fa --- /dev/null +++ b/arch/s390/include/asm/exec.h @@ -0,0 +1,12 @@ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef __ASM_EXEC_H +#define __ASM_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* __ASM_EXEC_H */ diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h new file mode 100644 index 000000000000..1e5b27edc0c9 --- /dev/null +++ b/arch/s390/include/asm/facility.h @@ -0,0 +1,63 @@ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef __ASM_FACILITY_H +#define __ASM_FACILITY_H + +#include <linux/string.h> +#include <linux/preempt.h> +#include <asm/lowcore.h> + +#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */ + +/* + * The test_facility function uses the bit odering where the MSB is bit 0. + * That makes it easier to query facility bits with the bit number as + * documented in the Principles of Operation. + */ +static inline int test_facility(unsigned long nr) +{ + unsigned char *ptr; + + if (nr >= MAX_FACILITY_BIT) + return 0; + ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3); + return (*ptr & (0x80 >> (nr & 7))) != 0; +} + +/** + * stfle - Store facility list extended + * @stfle_fac_list: array where facility list can be stored + * @size: size of passed in array in double words + */ +static inline void stfle(u64 *stfle_fac_list, int size) +{ + unsigned long nr; + + preempt_disable(); + S390_lowcore.stfl_fac_list = 0; + asm volatile( + " .insn s,0xb2b10000,0(0)\n" /* stfl */ + "0:\n" + EX_TABLE(0b, 0b) + : "=m" (S390_lowcore.stfl_fac_list)); + nr = 4; /* bytes stored by stfl */ + memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); + if (S390_lowcore.stfl_fac_list & 0x01000000) { + /* More facility bits available with stfle */ + register unsigned long reg0 asm("0") = size - 1; + + asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */ + : "+d" (reg0) + : "a" (stfle_fac_list) + : "memory", "cc"); + nr = (reg0 + 1) * 8; /* # bytes stored by stfle */ + } + memset((char *) stfle_fac_list + nr, 0, size * 8 - nr); + preempt_enable(); +} + +#endif /* __ASM_FACILITY_H */ diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h index 82b32a100c7d..96076676e224 100644 --- a/arch/s390/include/asm/kvm.h +++ b/arch/s390/include/asm/kvm.h @@ -41,4 +41,15 @@ struct kvm_debug_exit_arch { struct kvm_guest_debug_arch { }; +#define KVM_SYNC_PREFIX (1UL << 0) +#define KVM_SYNC_GPRS (1UL << 1) +#define KVM_SYNC_ACRS (1UL << 2) +#define KVM_SYNC_CRS (1UL << 3) +/* definition of registers in kvm_run */ +struct kvm_sync_regs { + __u64 prefix; /* prefix register */ + __u64 gprs[16]; /* general purpose registers */ + __u32 acrs[16]; /* access registers */ + __u64 crs[16]; /* control registers */ +}; #endif diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index b0c235cb6ad5..7343872890a2 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -220,18 +220,17 @@ struct kvm_s390_float_interrupt { struct list_head list; atomic_t active; int next_rr_cpu; - unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)]; - struct kvm_s390_local_interrupt *local_int[64]; + unsigned long idle_mask[(KVM_MAX_VCPUS + sizeof(long) - 1) + / sizeof(long)]; + struct kvm_s390_local_interrupt *local_int[KVM_MAX_VCPUS]; }; struct kvm_vcpu_arch { struct kvm_s390_sie_block *sie_block; - unsigned long guest_gprs[16]; s390_fp_regs host_fpregs; unsigned int host_acrs[NUM_ACRS]; s390_fp_regs guest_fpregs; - unsigned int guest_acrs[NUM_ACRS]; struct kvm_s390_local_interrupt local_int; struct hrtimer ckc_timer; struct tasklet_struct tasklet; @@ -246,6 +245,9 @@ struct kvm_vm_stat { u32 remote_tlb_flush; }; +struct kvm_arch_memory_slot { +}; + struct kvm_arch{ struct sca_block *sca; debug_info_t *dbf; @@ -253,5 +255,5 @@ struct kvm_arch{ struct gmap *gmap; }; -extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); +extern int sie64a(struct kvm_s390_sie_block *, u64 *); #endif diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 4506791adcd5..1c7d6ce328bf 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -21,4 +21,18 @@ typedef struct { .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), +static inline int tprot(unsigned long addr) +{ + int rc = -EFAULT; + + asm volatile( + " tprot 0(%1),0\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b,1b) + : "+d" (rc) : "a" (addr) : "cc"); + return rc; +} + #endif diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 5682f160ff82..5d09e405c54d 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -12,6 +12,7 @@ #include <asm/pgalloc.h> #include <asm/uaccess.h> #include <asm/tlbflush.h> +#include <asm/ctl_reg.h> #include <asm-generic/mm_hooks.h> static inline int init_new_context(struct task_struct *tsk, diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index d25843a6a915..d499b30ea487 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -14,6 +14,7 @@ #define __ASM_S390_PROCESSOR_H #include <linux/linkage.h> +#include <linux/irqflags.h> #include <asm/cpu.h> #include <asm/page.h> #include <asm/ptrace.h> @@ -156,6 +157,14 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15]) +static inline unsigned short stap(void) +{ + unsigned short cpu_address; + + asm volatile("stap %0" : "=m" (cpu_address)); + return cpu_address; +} + /* * Give up the time slice of the virtual PU. */ @@ -304,6 +313,21 @@ static inline void __noreturn disabled_wait(unsigned long code) } /* + * Use to set psw mask except for the first byte which + * won't be changed by this function. + */ +static inline void +__set_psw_mask(unsigned long mask) +{ + __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); +} + +#define local_mcck_enable() \ + __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) +#define local_mcck_disable() \ + __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) + +/* * Basic Machine Check/Program Check Handler. */ diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 097183c70407..b21e46e5d4b8 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -140,6 +140,20 @@ extern char vmpoff_cmd[]; #define NSS_NAME_SIZE 8 extern char kernel_nss_name[]; +#ifdef CONFIG_PFAULT +extern int pfault_init(void); +extern void pfault_fini(void); +#else /* CONFIG_PFAULT */ +#define pfault_init() ({-1;}) +#define pfault_fini() do { } while (0) +#endif /* CONFIG_PFAULT */ + +extern void cmma_init(void); + +extern void (*_machine_restart)(char *command); +extern void (*_machine_halt)(void); +extern void (*_machine_power_off)(void); + #else /* __ASSEMBLY__ */ #ifndef __s390x__ diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 797f78729680..c77c6de6f6c0 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -9,7 +9,7 @@ #ifdef CONFIG_SMP -#include <asm/system.h> +#include <asm/lowcore.h> #define raw_smp_processor_id() (S390_lowcore.cpu_nr) diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h new file mode 100644 index 000000000000..f223068b7822 --- /dev/null +++ b/arch/s390/include/asm/switch_to.h @@ -0,0 +1,100 @@ +/* + * Copyright IBM Corp. 1999, 2009 + * + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef __ASM_SWITCH_TO_H +#define __ASM_SWITCH_TO_H + +#include <linux/thread_info.h> + +extern struct task_struct *__switch_to(void *, void *); +extern void update_per_regs(struct task_struct *task); + +static inline void save_fp_regs(s390_fp_regs *fpregs) +{ + asm volatile( + " std 0,%O0+8(%R0)\n" + " std 2,%O0+24(%R0)\n" + " std 4,%O0+40(%R0)\n" + " std 6,%O0+56(%R0)" + : "=Q" (*fpregs) : "Q" (*fpregs)); + if (!MACHINE_HAS_IEEE) + return; + asm volatile( + " stfpc %0\n" + " std 1,%O0+16(%R0)\n" + " std 3,%O0+32(%R0)\n" + " std 5,%O0+48(%R0)\n" + " std 7,%O0+64(%R0)\n" + " std 8,%O0+72(%R0)\n" + " std 9,%O0+80(%R0)\n" + " std 10,%O0+88(%R0)\n" + " std 11,%O0+96(%R0)\n" + " std 12,%O0+104(%R0)\n" + " std 13,%O0+112(%R0)\n" + " std 14,%O0+120(%R0)\n" + " std 15,%O0+128(%R0)\n" + : "=Q" (*fpregs) : "Q" (*fpregs)); +} + +static inline void restore_fp_regs(s390_fp_regs *fpregs) +{ + asm volatile( + " ld 0,%O0+8(%R0)\n" + " ld 2,%O0+24(%R0)\n" + " ld 4,%O0+40(%R0)\n" + " ld 6,%O0+56(%R0)" + : : "Q" (*fpregs)); + if (!MACHINE_HAS_IEEE) + return; + asm volatile( + " lfpc %0\n" + " ld 1,%O0+16(%R0)\n" + " ld 3,%O0+32(%R0)\n" + " ld 5,%O0+48(%R0)\n" + " ld 7,%O0+64(%R0)\n" + " ld 8,%O0+72(%R0)\n" + " ld 9,%O0+80(%R0)\n" + " ld 10,%O0+88(%R0)\n" + " ld 11,%O0+96(%R0)\n" + " ld 12,%O0+104(%R0)\n" + " ld 13,%O0+112(%R0)\n" + " ld 14,%O0+120(%R0)\n" + " ld 15,%O0+128(%R0)\n" + : : "Q" (*fpregs)); +} + +static inline void save_access_regs(unsigned int *acrs) +{ + asm volatile("stam 0,15,%0" : "=Q" (*acrs)); +} + +static inline void restore_access_regs(unsigned int *acrs) +{ + asm volatile("lam 0,15,%0" : : "Q" (*acrs)); +} + +#define switch_to(prev,next,last) do { \ + if (prev->mm) { \ + save_fp_regs(&prev->thread.fp_regs); \ + save_access_regs(&prev->thread.acrs[0]); \ + } \ + if (next->mm) { \ + restore_fp_regs(&next->thread.fp_regs); \ + restore_access_regs(&next->thread.acrs[0]); \ + update_per_regs(next); \ + } \ + prev = __switch_to(prev,next); \ +} while (0) + +extern void account_vtime(struct task_struct *, struct task_struct *); +extern void account_tick_vtime(struct task_struct *); + +#define finish_arch_switch(prev) do { \ + set_fs(current->thread.mm_segment); \ + account_vtime(prev, current); \ +} while (0) + +#endif /* __ASM_SWITCH_TO_H */ diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h deleted file mode 100644 index 2e0bb7f0f9b2..000000000000 --- a/arch/s390/include/asm/system.h +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Copyright IBM Corp. 1999, 2009 - * - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> - */ - -#ifndef __ASM_SYSTEM_H -#define __ASM_SYSTEM_H - -#include <linux/preempt.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <asm/types.h> -#include <asm/ptrace.h> -#include <asm/setup.h> -#include <asm/processor.h> -#include <asm/lowcore.h> -#include <asm/cmpxchg.h> - -#ifdef __KERNEL__ - -struct task_struct; - -extern struct task_struct *__switch_to(void *, void *); -extern void update_per_regs(struct task_struct *task); - -static inline void save_fp_regs(s390_fp_regs *fpregs) -{ - asm volatile( - " std 0,%O0+8(%R0)\n" - " std 2,%O0+24(%R0)\n" - " std 4,%O0+40(%R0)\n" - " std 6,%O0+56(%R0)" - : "=Q" (*fpregs) : "Q" (*fpregs)); - if (!MACHINE_HAS_IEEE) - return; - asm volatile( - " stfpc %0\n" - " std 1,%O0+16(%R0)\n" - " std 3,%O0+32(%R0)\n" - " std 5,%O0+48(%R0)\n" - " std 7,%O0+64(%R0)\n" - " std 8,%O0+72(%R0)\n" - " std 9,%O0+80(%R0)\n" - " std 10,%O0+88(%R0)\n" - " std 11,%O0+96(%R0)\n" - " std 12,%O0+104(%R0)\n" - " std 13,%O0+112(%R0)\n" - " std 14,%O0+120(%R0)\n" - " std 15,%O0+128(%R0)\n" - : "=Q" (*fpregs) : "Q" (*fpregs)); -} - -static inline void restore_fp_regs(s390_fp_regs *fpregs) -{ - asm volatile( - " ld 0,%O0+8(%R0)\n" - " ld 2,%O0+24(%R0)\n" - " ld 4,%O0+40(%R0)\n" - " ld 6,%O0+56(%R0)" - : : "Q" (*fpregs)); - if (!MACHINE_HAS_IEEE) - return; - asm volatile( - " lfpc %0\n" - " ld 1,%O0+16(%R0)\n" - " ld 3,%O0+32(%R0)\n" - " ld 5,%O0+48(%R0)\n" - " ld 7,%O0+64(%R0)\n" - " ld 8,%O0+72(%R0)\n" - " ld 9,%O0+80(%R0)\n" - " ld 10,%O0+88(%R0)\n" - " ld 11,%O0+96(%R0)\n" - " ld 12,%O0+104(%R0)\n" - " ld 13,%O0+112(%R0)\n" - " ld 14,%O0+120(%R0)\n" - " ld 15,%O0+128(%R0)\n" - : : "Q" (*fpregs)); -} - -static inline void save_access_regs(unsigned int *acrs) -{ - asm volatile("stam 0,15,%0" : "=Q" (*acrs)); -} - -static inline void restore_access_regs(unsigned int *acrs) -{ - asm volatile("lam 0,15,%0" : : "Q" (*acrs)); -} - -#define switch_to(prev,next,last) do { \ - if (prev->mm) { \ - save_fp_regs(&prev->thread.fp_regs); \ - save_access_regs(&prev->thread.acrs[0]); \ - } \ - if (next->mm) { \ - restore_fp_regs(&next->thread.fp_regs); \ - restore_access_regs(&next->thread.acrs[0]); \ - update_per_regs(next); \ - } \ - prev = __switch_to(prev,next); \ -} while (0) - -extern void account_vtime(struct task_struct *, struct task_struct *); -extern void account_tick_vtime(struct task_struct *); - -#ifdef CONFIG_PFAULT -extern int pfault_init(void); -extern void pfault_fini(void); -#else /* CONFIG_PFAULT */ -#define pfault_init() ({-1;}) -#define pfault_fini() do { } while (0) -#endif /* CONFIG_PFAULT */ - -extern void cmma_init(void); -extern int memcpy_real(void *, void *, size_t); -extern void copy_to_absolute_zero(void *dest, void *src, size_t count); -extern int copy_to_user_real(void __user *dest, void *src, size_t count); -extern int copy_from_user_real(void *dest, void __user *src, size_t count); - -#define finish_arch_switch(prev) do { \ - set_fs(current->thread.mm_segment); \ - account_vtime(prev, current); \ -} while (0) - -#define nop() asm volatile("nop") - -/* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. - * - * This is very similar to the ppc eieio/sync instruction in that is - * does a checkpoint syncronisation & makes sure that - * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). - */ - -#define eieio() asm volatile("bcr 15,0" : : : "memory") -#define SYNC_OTHER_CORES(x) eieio() -#define mb() eieio() -#define rmb() eieio() -#define wmb() eieio() -#define read_barrier_depends() do { } while(0) -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() - - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#ifdef __s390x__ - -#define __ctl_load(array, low, high) ({ \ - typedef struct { char _[sizeof(array)]; } addrtype; \ - asm volatile( \ - " lctlg %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), \ - "i" (low), "i" (high)); \ - }) - -#define __ctl_store(array, low, high) ({ \ - typedef struct { char _[sizeof(array)]; } addrtype; \ - asm volatile( \ - " stctg %1,%2,%0\n" \ - : "=Q" (*(addrtype *)(&array)) \ - : "i" (low), "i" (high)); \ - }) - -#else /* __s390x__ */ - -#define __ctl_load(array, low, high) ({ \ - typedef struct { char _[sizeof(array)]; } addrtype; \ - asm volatile( \ - " lctl %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), \ - "i" (low), "i" (high)); \ -}) - -#define __ctl_store(array, low, high) ({ \ - typedef struct { char _[sizeof(array)]; } addrtype; \ - asm volatile( \ - " stctl %1,%2,%0\n" \ - : "=Q" (*(addrtype *)(&array)) \ - : "i" (low), "i" (high)); \ - }) - -#endif /* __s390x__ */ - -#define __ctl_set_bit(cr, bit) ({ \ - unsigned long __dummy; \ - __ctl_store(__dummy, cr, cr); \ - __dummy |= 1UL << (bit); \ - __ctl_load(__dummy, cr, cr); \ -}) - -#define __ctl_clear_bit(cr, bit) ({ \ - unsigned long __dummy; \ - __ctl_store(__dummy, cr, cr); \ - __dummy &= ~(1UL << (bit)); \ - __ctl_load(__dummy, cr, cr); \ -}) - -/* - * Use to set psw mask except for the first byte which - * won't be changed by this function. - */ -static inline void -__set_psw_mask(unsigned long mask) -{ - __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); -} - -#define local_mcck_enable() \ - __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) -#define local_mcck_disable() \ - __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) - -#ifdef CONFIG_SMP - -extern void smp_ctl_set_bit(int cr, int bit); -extern void smp_ctl_clear_bit(int cr, int bit); -#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) -#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) - -#else - -#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) -#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) - -#endif /* CONFIG_SMP */ - -#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */ - -/* - * The test_facility function uses the bit odering where the MSB is bit 0. - * That makes it easier to query facility bits with the bit number as - * documented in the Principles of Operation. - */ -static inline int test_facility(unsigned long nr) -{ - unsigned char *ptr; - - if (nr >= MAX_FACILITY_BIT) - return 0; - ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3); - return (*ptr & (0x80 >> (nr & 7))) != 0; -} - -/** - * stfle - Store facility list extended - * @stfle_fac_list: array where facility list can be stored - * @size: size of passed in array in double words - */ -static inline void stfle(u64 *stfle_fac_list, int size) -{ - unsigned long nr; - - preempt_disable(); - S390_lowcore.stfl_fac_list = 0; - asm volatile( - " .insn s,0xb2b10000,0(0)\n" /* stfl */ - "0:\n" - EX_TABLE(0b, 0b) - : "=m" (S390_lowcore.stfl_fac_list)); - nr = 4; /* bytes stored by stfl */ - memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); - if (S390_lowcore.stfl_fac_list & 0x01000000) { - /* More facility bits available with stfle */ - register unsigned long reg0 asm("0") = size - 1; - - asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */ - : "+d" (reg0) - : "a" (stfle_fac_list) - : "memory", "cc"); - nr = (reg0 + 1) * 8; /* # bytes stored by stfle */ - } - memset((char *) stfle_fac_list + nr, 0, size * 8 - nr); - preempt_enable(); -} - -static inline unsigned short stap(void) -{ - unsigned short cpu_address; - - asm volatile("stap %0" : "=m" (cpu_address)); - return cpu_address; -} - -extern void (*_machine_restart)(char *command); -extern void (*_machine_halt)(void); -extern void (*_machine_power_off)(void); - -extern unsigned long arch_align_stack(unsigned long sp); - -static inline int tprot(unsigned long addr) -{ - int rc = -EFAULT; - - asm volatile( - " tprot 0(%1),0\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - EX_TABLE(0b,1b) - : "+d" (rc) : "a" (addr) : "cc"); - return rc; -} - -#endif /* __KERNEL__ */ - -#endif diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 2b23885e81e9..8f2cada4f7c9 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -16,6 +16,7 @@ */ #include <linux/sched.h> #include <linux/errno.h> +#include <asm/ctl_reg.h> #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -375,4 +376,9 @@ clear_user(void __user *to, unsigned long n) return n; } +extern int memcpy_real(void *, void *, size_t); +extern void copy_to_absolute_zero(void *dest, void *src, size_t count); +extern int copy_to_user_real(void __user *dest, void *src, size_t count); +extern int copy_from_user_real(void *dest, void __user *src, size_t count); + #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index ed8c913db79e..83e6edf5cf17 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -12,7 +12,6 @@ #include <asm/timer.h> #include <asm/vdso.h> #include <asm/pgtable.h> -#include <asm/system.h> /* * Make sure that the compiler is new enough. We want a compiler that diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 53a82c8d50e9..28040fd5e8a2 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -27,6 +27,7 @@ #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/lowcore.h> +#include <asm/switch_to.h> #include "compat_linux.h" #include "compat_ptrace.h" #include "entry.h" diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index 3e8b8816f309..e3dd886e1b32 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -18,7 +18,6 @@ #include <linux/string.h> #include <asm/ebcdic.h> #include <asm/cpcmd.h> -#include <asm/system.h> #include <asm/io.h> static DEFINE_SPINLOCK(cpcmd_lock); diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index e2f847599c8e..3221c6fca8bb 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -24,7 +24,6 @@ #include <linux/kprobes.h> #include <linux/kdebug.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 578eb4e6d157..9475e682727f 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -29,7 +29,7 @@ #include <asm/sysinfo.h> #include <asm/cpcmd.h> #include <asm/sclp.h> -#include <asm/system.h> +#include <asm/facility.h> #include "entry.h" /* diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c index 8431b92ca3ae..ac39e7a731fc 100644 --- a/arch/s390/kernel/lgr.c +++ b/arch/s390/kernel/lgr.c @@ -10,7 +10,6 @@ #include <linux/slab.h> #include <asm/sysinfo.h> #include <asm/ebcdic.h> -#include <asm/system.h> #include <asm/debug.h> #include <asm/ipl.h> diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 0f8cdf1268d0..bdad47d54478 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -19,7 +19,6 @@ #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <asm/smp.h> #include <asm/reset.h> #include <asm/ipl.h> diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c index bbe522672e06..e8d6c214d498 100644 --- a/arch/s390/kernel/os_info.c +++ b/arch/s390/kernel/os_info.c @@ -12,7 +12,6 @@ #include <linux/kernel.h> #include <asm/checksum.h> #include <asm/lowcore.h> -#include <asm/system.h> #include <asm/os_info.h> /* diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 3732e4c09cbe..60055cefdd04 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -23,13 +23,13 @@ #include <linux/kprobes.h> #include <linux/random.h> #include <linux/module.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/irq.h> #include <asm/timer.h> #include <asm/nmi.h> #include <asm/smp.h> +#include <asm/switch_to.h> #include "entry.h" asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 61f95489d70c..02f300fbf070 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -26,9 +26,9 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/unistd.h> +#include <asm/switch_to.h> #include "entry.h" #ifdef CONFIG_COMPAT diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 38e751278bf7..1581ea2e027a 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -50,7 +50,6 @@ #include <asm/ipl.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/smp.h> #include <asm/mmu_context.h> #include <asm/cpcmd.h> diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index f29f5ef400e5..f7582b27f600 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -30,6 +30,7 @@ #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/lowcore.h> +#include <asm/switch_to.h> #include "entry.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index 47df775c844d..aa1494d0e380 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -9,7 +9,7 @@ #include <linux/pfn.h> #include <linux/suspend.h> #include <linux/mm.h> -#include <asm/system.h> +#include <asm/ctl_reg.h> /* * References to section boundaries diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index cd6ebe12c481..77cdf4234ebc 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -33,7 +33,6 @@ #include <linux/kprobes.h> #include <linux/bug.h> #include <linux/utsname.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 9c80138206b0..ea5590fdca3b 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -25,12 +25,12 @@ #include <linux/compat.h> #include <asm/asm-offsets.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/vdso.h> +#include <asm/facility.h> #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) extern char vdso32_start, vdso32_end; diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index a21634173a66..78eb9847008f 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -34,6 +34,15 @@ config KVM If unsure, say N. +config KVM_S390_UCONTROL + bool "Userspace controlled virtual machines" + depends on KVM + ---help--- + Allow CAP_SYS_ADMIN users to create KVM virtual machines that are + controlled by userspace. + + If unsure, say N. + # OK, it's a little counter-intuitive to do this, but it puts it neatly under # the virtualization menu. source drivers/vhost/Kconfig diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 8943e82cd4d9..a353f0ea45c2 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -20,8 +20,8 @@ static int diag_release_pages(struct kvm_vcpu *vcpu) unsigned long start, end; unsigned long prefix = vcpu->arch.sie_block->prefix; - start = vcpu->arch.guest_gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; - end = vcpu->arch.guest_gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; + start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; + end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; if (start & ~PAGE_MASK || end & ~PAGE_MASK || start > end || start < 2 * PAGE_SIZE) @@ -56,7 +56,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) static int __diag_ipl_functions(struct kvm_vcpu *vcpu) { unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; - unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff; + unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff; VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); switch (subcode) { diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 02434543eabb..361456577c6f 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -36,7 +36,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) useraddr = disp2; if (base2) - useraddr += vcpu->arch.guest_gprs[base2]; + useraddr += vcpu->run->s.regs.gprs[base2]; if (useraddr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -75,7 +75,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu) useraddr = disp2; if (base2) - useraddr += vcpu->arch.guest_gprs[base2]; + useraddr += vcpu->run->s.regs.gprs[base2]; if (useraddr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -133,13 +133,6 @@ static int handle_stop(struct kvm_vcpu *vcpu) vcpu->stat.exit_stop_request++; spin_lock_bh(&vcpu->arch.local_int.lock); - if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { - vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; - rc = kvm_s390_vcpu_store_status(vcpu, - KVM_S390_STORE_STATUS_NOADDR); - if (rc >= 0) - rc = -EOPNOTSUPP; - } if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP; @@ -155,7 +148,18 @@ static int handle_stop(struct kvm_vcpu *vcpu) rc = -EOPNOTSUPP; } - spin_unlock_bh(&vcpu->arch.local_int.lock); + if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { + vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; + /* store status must be called unlocked. Since local_int.lock + * only protects local_int.* and not guest memory we can give + * up the lock here */ + spin_unlock_bh(&vcpu->arch.local_int.lock); + rc = kvm_s390_vcpu_store_status(vcpu, + KVM_S390_STORE_STATUS_NOADDR); + if (rc >= 0) + rc = -EOPNOTSUPP; + } else + spin_unlock_bh(&vcpu->arch.local_int.lock); return rc; } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index f0647ce6da21..2d9f9a72bb81 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -236,8 +236,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", inti->prefix.address); vcpu->stat.deliver_prefix_signal++; - vcpu->arch.sie_block->prefix = inti->prefix.address; - vcpu->arch.sie_block->ihcpu = 0xffff; + kvm_s390_set_prefix(vcpu, inti->prefix.address); break; case KVM_S390_RESTART: diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d1c445732451..217ce44395a4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -27,7 +27,7 @@ #include <asm/lowcore.h> #include <asm/pgtable.h> #include <asm/nmi.h> -#include <asm/system.h> +#include <asm/switch_to.h> #include "kvm-s390.h" #include "gaccess.h" @@ -129,6 +129,10 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_S390_PSW: case KVM_CAP_S390_GMAP: case KVM_CAP_SYNC_MMU: +#ifdef CONFIG_KVM_S390_UCONTROL + case KVM_CAP_S390_UCONTROL: +#endif + case KVM_CAP_SYNC_REGS: r = 1; break; default: @@ -171,11 +175,22 @@ long kvm_arch_vm_ioctl(struct file *filp, return r; } -int kvm_arch_init_vm(struct kvm *kvm) +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int rc; char debug_name[16]; + rc = -EINVAL; +#ifdef CONFIG_KVM_S390_UCONTROL + if (type & ~KVM_VM_S390_UCONTROL) + goto out_err; + if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) + goto out_err; +#else + if (type) + goto out_err; +#endif + rc = s390_enable_sie(); if (rc) goto out_err; @@ -198,10 +213,13 @@ int kvm_arch_init_vm(struct kvm *kvm) debug_register_view(kvm->arch.dbf, &debug_sprintf_view); VM_EVENT(kvm, 3, "%s", "vm created"); - kvm->arch.gmap = gmap_alloc(current->mm); - if (!kvm->arch.gmap) - goto out_nogmap; - + if (type & KVM_VM_S390_UCONTROL) { + kvm->arch.gmap = NULL; + } else { + kvm->arch.gmap = gmap_alloc(current->mm); + if (!kvm->arch.gmap) + goto out_nogmap; + } return 0; out_nogmap: debug_unregister(kvm->arch.dbf); @@ -214,11 +232,18 @@ out_err: void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 3, "%s", "free cpu"); - clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn); - if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == - (__u64) vcpu->arch.sie_block) - vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; + if (!kvm_is_ucontrol(vcpu->kvm)) { + clear_bit(63 - vcpu->vcpu_id, + (unsigned long *) &vcpu->kvm->arch.sca->mcn); + if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == + (__u64) vcpu->arch.sie_block) + vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; + } smp_mb(); + + if (kvm_is_ucontrol(vcpu->kvm)) + gmap_free(vcpu->arch.gmap); + free_page((unsigned long)(vcpu->arch.sie_block)); kvm_vcpu_uninit(vcpu); kfree(vcpu); @@ -249,13 +274,25 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_free_vcpus(kvm); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); - gmap_free(kvm->arch.gmap); + if (!kvm_is_ucontrol(kvm)) + gmap_free(kvm->arch.gmap); } /* Section: vcpu related */ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { + if (kvm_is_ucontrol(vcpu->kvm)) { + vcpu->arch.gmap = gmap_alloc(current->mm); + if (!vcpu->arch.gmap) + return -ENOMEM; + return 0; + } + vcpu->arch.gmap = vcpu->kvm->arch.gmap; + vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | + KVM_SYNC_GPRS | + KVM_SYNC_ACRS | + KVM_SYNC_CRS; return 0; } @@ -270,7 +307,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) save_access_regs(vcpu->arch.host_acrs); vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; restore_fp_regs(&vcpu->arch.guest_fpregs); - restore_access_regs(vcpu->arch.guest_acrs); + restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); } @@ -280,7 +317,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); save_fp_regs(&vcpu->arch.guest_fpregs); - save_access_regs(vcpu->arch.guest_acrs); + save_access_regs(vcpu->run->s.regs.acrs); restore_fp_regs(&vcpu->arch.host_fpregs); restore_access_regs(vcpu->arch.host_acrs); } @@ -290,8 +327,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) /* this equals initial cpu reset in pop, but we don't switch to ESA */ vcpu->arch.sie_block->gpsw.mask = 0UL; vcpu->arch.sie_block->gpsw.addr = 0UL; - vcpu->arch.sie_block->prefix = 0UL; - vcpu->arch.sie_block->ihcpu = 0xffff; + kvm_s390_set_prefix(vcpu, 0); vcpu->arch.sie_block->cputm = 0UL; vcpu->arch.sie_block->ckc = 0UL; vcpu->arch.sie_block->todpr = 0; @@ -342,12 +378,19 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, goto out_free_cpu; vcpu->arch.sie_block->icpua = id; - BUG_ON(!kvm->arch.sca); - if (!kvm->arch.sca->cpu[id].sda) - kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; - vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); - vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; - set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); + if (!kvm_is_ucontrol(kvm)) { + if (!kvm->arch.sca) { + WARN_ON_ONCE(1); + goto out_free_cpu; + } + if (!kvm->arch.sca->cpu[id].sda) + kvm->arch.sca->cpu[id].sda = + (__u64) vcpu->arch.sie_block; + vcpu->arch.sie_block->scaoh = + (__u32)(((__u64)kvm->arch.sca) >> 32); + vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; + set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); + } spin_lock_init(&vcpu->arch.local_int.lock); INIT_LIST_HEAD(&vcpu->arch.local_int.list); @@ -388,29 +431,29 @@ static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { - memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs)); + memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); return 0; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { - memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs)); + memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); return 0; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); + memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); - restore_access_regs(vcpu->arch.guest_acrs); + restore_access_regs(vcpu->run->s.regs.acrs); return 0; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs)); + memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); return 0; } @@ -418,7 +461,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); - vcpu->arch.guest_fpregs.fpc = fpu->fpc; + vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK; restore_fp_regs(&vcpu->arch.guest_fpregs); return 0; } @@ -467,9 +510,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return -EINVAL; /* not implemented yet */ } -static void __vcpu_run(struct kvm_vcpu *vcpu) +static int __vcpu_run(struct kvm_vcpu *vcpu) { - memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); + int rc; + + memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); if (need_resched()) schedule(); @@ -477,7 +522,8 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) if (test_thread_flag(TIF_MCCK_PENDING)) s390_handle_mcck(); - kvm_s390_deliver_pending_interrupts(vcpu); + if (!kvm_is_ucontrol(vcpu->kvm)) + kvm_s390_deliver_pending_interrupts(vcpu); vcpu->arch.sie_block->icptcode = 0; local_irq_disable(); @@ -485,9 +531,15 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) local_irq_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); - if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { - VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); + if (rc) { + if (kvm_is_ucontrol(vcpu->kvm)) { + rc = SIE_INTERCEPT_UCONTROL; + } else { + VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); + kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + rc = 0; + } } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); @@ -495,7 +547,8 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) kvm_guest_exit(); local_irq_enable(); - memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16); + memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); + return rc; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) @@ -516,6 +569,7 @@ rerun_vcpu: case KVM_EXIT_UNKNOWN: case KVM_EXIT_INTR: case KVM_EXIT_S390_RESET: + case KVM_EXIT_S390_UCONTROL: break; default: BUG(); @@ -523,12 +577,26 @@ rerun_vcpu: vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; + if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { + kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX; + kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); + } + if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { + kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS; + memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); + kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); + } might_fault(); do { - __vcpu_run(vcpu); - rc = kvm_handle_sie_intercept(vcpu); + rc = __vcpu_run(vcpu); + if (rc) + break; + if (kvm_is_ucontrol(vcpu->kvm)) + rc = -EOPNOTSUPP; + else + rc = kvm_handle_sie_intercept(vcpu); } while (!signal_pending(current) && !rc); if (rc == SIE_INTERCEPT_RERUNVCPU) @@ -539,6 +607,16 @@ rerun_vcpu: rc = -EINTR; } +#ifdef CONFIG_KVM_S390_UCONTROL + if (rc == SIE_INTERCEPT_UCONTROL) { + kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL; + kvm_run->s390_ucontrol.trans_exc_code = + current->thread.gmap_addr; + kvm_run->s390_ucontrol.pgm_code = 0x10; + rc = 0; + } +#endif + if (rc == -EOPNOTSUPP) { /* intercept cannot be handled in-kernel, prepare kvm-run */ kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; @@ -556,6 +634,8 @@ rerun_vcpu: kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; + kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; + memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); @@ -602,7 +682,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs), - vcpu->arch.guest_gprs, 128, prefix)) + vcpu->run->s.regs.gprs, 128, prefix)) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw), @@ -631,7 +711,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return -EFAULT; if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs), - &vcpu->arch.guest_acrs, 64, prefix)) + &vcpu->run->s.regs.acrs, 64, prefix)) return -EFAULT; if (__guestcopy(vcpu, @@ -673,12 +753,77 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_S390_INITIAL_RESET: r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); break; +#ifdef CONFIG_KVM_S390_UCONTROL + case KVM_S390_UCAS_MAP: { + struct kvm_s390_ucas_mapping ucasmap; + + if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { + r = -EFAULT; + break; + } + + if (!kvm_is_ucontrol(vcpu->kvm)) { + r = -EINVAL; + break; + } + + r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, + ucasmap.vcpu_addr, ucasmap.length); + break; + } + case KVM_S390_UCAS_UNMAP: { + struct kvm_s390_ucas_mapping ucasmap; + + if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { + r = -EFAULT; + break; + } + + if (!kvm_is_ucontrol(vcpu->kvm)) { + r = -EINVAL; + break; + } + + r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, + ucasmap.length); + break; + } +#endif + case KVM_S390_VCPU_FAULT: { + r = gmap_fault(arg, vcpu->arch.gmap); + if (!IS_ERR_VALUE(r)) + r = 0; + break; + } default: - r = -EINVAL; + r = -ENOTTY; } return r; } +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ +#ifdef CONFIG_KVM_S390_UCONTROL + if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) + && (kvm_is_ucontrol(vcpu->kvm))) { + vmf->page = virt_to_page(vcpu->arch.sie_block); + get_page(vmf->page); + return 0; + } +#endif + return VM_FAULT_SIGBUS; +} + +void kvm_arch_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ +} + +int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +{ + return 0; +} + /* Section: memory related */ int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 99b0b7597115..ff28f9d1c9eb 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -26,6 +26,7 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); /* negativ values are error codes, positive values for internal conditions */ #define SIE_INTERCEPT_RERUNVCPU (1<<0) +#define SIE_INTERCEPT_UCONTROL (1<<1) int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ @@ -47,6 +48,23 @@ static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu) return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT; } +static inline int kvm_is_ucontrol(struct kvm *kvm) +{ +#ifdef CONFIG_KVM_S390_UCONTROL + if (kvm->arch.gmap) + return 0; + return 1; +#else + return 0; +#endif +} + +static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) +{ + vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u; + vcpu->arch.sie_block->ihcpu = 0xffff; +} + int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); void kvm_s390_tasklet(unsigned long parm); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index d02638959922..e5a45dbd26ac 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -33,7 +33,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) operand2 = disp2; if (base2) - operand2 += vcpu->arch.guest_gprs[base2]; + operand2 += vcpu->run->s.regs.gprs[base2]; /* must be word boundary */ if (operand2 & 3) { @@ -56,8 +56,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) goto out; } - vcpu->arch.sie_block->prefix = address; - vcpu->arch.sie_block->ihcpu = 0xffff; + kvm_s390_set_prefix(vcpu, address); VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); out: @@ -74,7 +73,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) vcpu->stat.instruction_stpx++; operand2 = disp2; if (base2) - operand2 += vcpu->arch.guest_gprs[base2]; + operand2 += vcpu->run->s.regs.gprs[base2]; /* must be word boundary */ if (operand2 & 3) { @@ -106,7 +105,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) vcpu->stat.instruction_stap++; useraddr = disp2; if (base2) - useraddr += vcpu->arch.guest_gprs[base2]; + useraddr += vcpu->run->s.regs.gprs[base2]; if (useraddr & 1) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -181,7 +180,7 @@ static int handle_stidp(struct kvm_vcpu *vcpu) vcpu->stat.instruction_stidp++; operand2 = disp2; if (base2) - operand2 += vcpu->arch.guest_gprs[base2]; + operand2 += vcpu->run->s.regs.gprs[base2]; if (operand2 & 7) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -232,9 +231,9 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) static int handle_stsi(struct kvm_vcpu *vcpu) { - int fc = (vcpu->arch.guest_gprs[0] & 0xf0000000) >> 28; - int sel1 = vcpu->arch.guest_gprs[0] & 0xff; - int sel2 = vcpu->arch.guest_gprs[1] & 0xffff; + int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; + int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; + int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; @@ -245,14 +244,14 @@ static int handle_stsi(struct kvm_vcpu *vcpu) operand2 = disp2; if (base2) - operand2 += vcpu->arch.guest_gprs[base2]; + operand2 += vcpu->run->s.regs.gprs[base2]; if (operand2 & 0xfff && fc > 0) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); switch (fc) { case 0: - vcpu->arch.guest_gprs[0] = 3 << 28; + vcpu->run->s.regs.gprs[0] = 3 << 28; vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); return 0; case 1: /* same handling for 1 and 2 */ @@ -281,7 +280,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) } free_page(mem); vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); - vcpu->arch.guest_gprs[0] = 0; + vcpu->run->s.regs.gprs[0] = 0; return 0; out_mem: free_page(mem); @@ -333,8 +332,8 @@ static int handle_tprot(struct kvm_vcpu *vcpu) int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; int disp2 = vcpu->arch.sie_block->ipb & 0x0fff; - u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; - u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; + u64 address1 = disp1 + base1 ? vcpu->run->s.regs.gprs[base1] : 0; + u64 address2 = disp2 + base2 ? vcpu->run->s.regs.gprs[base2] : 0; struct vm_area_struct *vma; unsigned long user_address; diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 0a7941d74bc6..0ad4cf238391 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -48,7 +48,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, - unsigned long *reg) + u64 *reg) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; int rc; @@ -160,12 +160,15 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) inti->type = KVM_S390_SIGP_STOP; spin_lock_bh(&li->lock); + if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) + goto out; list_add_tail(&inti->list, &li->list); atomic_set(&li->active, 1); atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); li->action_bits |= action; if (waitqueue_active(&li->wq)) wake_up_interruptible(&li->wq); +out: spin_unlock_bh(&li->lock); return 0; /* order accepted */ @@ -220,7 +223,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) } static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, - unsigned long *reg) + u64 *reg) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li = NULL; @@ -278,7 +281,7 @@ out_fi: } static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, - unsigned long *reg) + u64 *reg) { int rc; struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; @@ -309,6 +312,34 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, return rc; } +static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr) +{ + int rc = 0; + struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; + struct kvm_s390_local_interrupt *li; + + if (cpu_addr >= KVM_MAX_VCPUS) + return 3; /* not operational */ + + spin_lock(&fi->lock); + li = fi->local_int[cpu_addr]; + if (li == NULL) { + rc = 3; /* not operational */ + goto out; + } + + spin_lock_bh(&li->lock); + if (li->action_bits & ACTION_STOP_ON_STOP) + rc = 2; /* busy */ + else + VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace", + cpu_addr); + spin_unlock_bh(&li->lock); +out: + spin_unlock(&fi->lock); + return rc; +} + int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) { int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; @@ -316,7 +347,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u32 parameter; - u16 cpu_addr = vcpu->arch.guest_gprs[r3]; + u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; u8 order_code; int rc; @@ -327,18 +358,18 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) order_code = disp2; if (base2) - order_code += vcpu->arch.guest_gprs[base2]; + order_code += vcpu->run->s.regs.gprs[base2]; if (r1 % 2) - parameter = vcpu->arch.guest_gprs[r1]; + parameter = vcpu->run->s.regs.gprs[r1]; else - parameter = vcpu->arch.guest_gprs[r1 + 1]; + parameter = vcpu->run->s.regs.gprs[r1 + 1]; switch (order_code) { case SIGP_SENSE: vcpu->stat.instruction_sigp_sense++; rc = __sigp_sense(vcpu, cpu_addr, - &vcpu->arch.guest_gprs[r1]); + &vcpu->run->s.regs.gprs[r1]); break; case SIGP_EXTERNAL_CALL: vcpu->stat.instruction_sigp_external_call++; @@ -354,7 +385,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) break; case SIGP_STOP_STORE_STATUS: vcpu->stat.instruction_sigp_stop++; - rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP); + rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP | + ACTION_STOP_ON_STOP); break; case SIGP_SET_ARCH: vcpu->stat.instruction_sigp_arch++; @@ -363,15 +395,18 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) case SIGP_SET_PREFIX: vcpu->stat.instruction_sigp_prefix++; rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, - &vcpu->arch.guest_gprs[r1]); + &vcpu->run->s.regs.gprs[r1]); break; case SIGP_SENSE_RUNNING: vcpu->stat.instruction_sigp_sense_running++; rc = __sigp_sense_running(vcpu, cpu_addr, - &vcpu->arch.guest_gprs[r1]); + &vcpu->run->s.regs.gprs[r1]); break; case SIGP_RESTART: vcpu->stat.instruction_sigp_restart++; + rc = __sigp_restart(vcpu, cpu_addr); + if (rc == 2) /* busy */ + break; /* user space must know about restart */ default: return -EOPNOTSUPP; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index b17c42df61c9..46ef3fd0663b 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -32,10 +32,10 @@ #include <linux/uaccess.h> #include <linux/hugetlb.h> #include <asm/asm-offsets.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/mmu_context.h> +#include <asm/facility.h> #include "../kernel/entry.h" #ifndef CONFIG_64BIT diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 50236610de83..2bea0605856e 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -29,7 +29,6 @@ #include <linux/export.h> #include <linux/gfp.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -38,6 +37,7 @@ #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/sections.h> +#include <asm/ctl_reg.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 1cb8427bedfb..7bb15fcca75e 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -12,7 +12,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/gfp.h> -#include <asm/system.h> +#include <asm/ctl_reg.h> /* * This function writes to kernel memory bypassing DAT and possible diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 51b0738e13d1..373adf69b01c 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -18,7 +18,6 @@ #include <linux/rcupdate.h> #include <linux/slab.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index f097d516d8c5..c6646de07bf4 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c @@ -17,6 +17,8 @@ #include <linux/semaphore.h> #include <linux/oom.h> #include <linux/oprofile.h> + +#include <asm/facility.h> #include <asm/cpu_mf.h> #include <asm/irq.h> diff --git a/arch/score/include/asm/atomic.h b/arch/score/include/asm/atomic.h index 84eb8ddf9f3f..edf33dbded1e 100644 --- a/arch/score/include/asm/atomic.h +++ b/arch/score/include/asm/atomic.h @@ -1,6 +1,7 @@ #ifndef _ASM_SCORE_ATOMIC_H #define _ASM_SCORE_ATOMIC_H +#include <asm/cmpxchg.h> #include <asm-generic/atomic.h> #endif /* _ASM_SCORE_ATOMIC_H */ diff --git a/arch/score/include/asm/barrier.h b/arch/score/include/asm/barrier.h new file mode 100644 index 000000000000..0eacb6471e6d --- /dev/null +++ b/arch/score/include/asm/barrier.h @@ -0,0 +1,16 @@ +#ifndef _ASM_SCORE_BARRIER_H +#define _ASM_SCORE_BARRIER_H + +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() + +#define read_barrier_depends() do {} while (0) +#define smp_read_barrier_depends() do {} while (0) + +#define set_mb(var, value) do {var = value; wmb(); } while (0) + +#endif /* _ASM_SCORE_BARRIER_H */ diff --git a/arch/score/include/asm/bitops.h b/arch/score/include/asm/bitops.h index 2763b050fca8..a304096b1894 100644 --- a/arch/score/include/asm/bitops.h +++ b/arch/score/include/asm/bitops.h @@ -2,7 +2,6 @@ #define _ASM_SCORE_BITOPS_H #include <asm/byteorder.h> /* swab32 */ -#include <asm/system.h> /* save_flags */ /* * clear_bit() doesn't provide any barrier for the compiler. diff --git a/arch/score/include/asm/bug.h b/arch/score/include/asm/bug.h index bb76a330bcf1..fd7164af1f04 100644 --- a/arch/score/include/asm/bug.h +++ b/arch/score/include/asm/bug.h @@ -3,4 +3,15 @@ #include <asm-generic/bug.h> +struct pt_regs; +extern void __die(const char *, struct pt_regs *, const char *, + const char *, unsigned long) __attribute__((noreturn)); +extern void __die_if_kernel(const char *, struct pt_regs *, const char *, + const char *, unsigned long); + +#define die(msg, regs) \ + __die(msg, regs, __FILE__ ":", __func__, __LINE__) +#define die_if_kernel(msg, regs) \ + __die_if_kernel(msg, regs, __FILE__ ":", __func__, __LINE__) + #endif /* _ASM_SCORE_BUG_H */ diff --git a/arch/score/include/asm/cmpxchg.h b/arch/score/include/asm/cmpxchg.h new file mode 100644 index 000000000000..f384839c3ee5 --- /dev/null +++ b/arch/score/include/asm/cmpxchg.h @@ -0,0 +1,49 @@ +#ifndef _ASM_SCORE_CMPXCHG_H +#define _ASM_SCORE_CMPXCHG_H + +#include <linux/irqflags.h> + +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((struct __xchg_dummy *)(x)) + +static inline +unsigned long __xchg(volatile unsigned long *m, unsigned long val) +{ + unsigned long retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); + return retval; +} + +#define xchg(ptr, v) \ + ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ + (unsigned long)(v))) + +static inline unsigned long __cmpxchg(volatile unsigned long *m, + unsigned long old, unsigned long new) +{ + unsigned long retval; + unsigned long flags; + + local_irq_save(flags); + retval = *m; + if (retval == old) + *m = new; + local_irq_restore(flags); + return retval; +} + +#define cmpxchg(ptr, o, n) \ + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ + (unsigned long)(o), \ + (unsigned long)(n))) + +#define __HAVE_ARCH_CMPXCHG 1 + +#include <asm-generic/cmpxchg-local.h> + +#endif /* _ASM_SCORE_CMPXCHG_H */ diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h new file mode 100644 index 000000000000..f9f3cd59c860 --- /dev/null +++ b/arch/score/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SCORE_EXEC_H +#define _ASM_SCORE_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* _ASM_SCORE_EXEC_H */ diff --git a/arch/score/include/asm/switch_to.h b/arch/score/include/asm/switch_to.h new file mode 100644 index 000000000000..031756b59ece --- /dev/null +++ b/arch/score/include/asm/switch_to.h @@ -0,0 +1,13 @@ +#ifndef _ASM_SCORE_SWITCH_TO_H +#define _ASM_SCORE_SWITCH_TO_H + +extern void *resume(void *last, void *next, void *next_ti); + +#define switch_to(prev, next, last) \ +do { \ + (last) = resume(prev, next, task_thread_info(next)); \ +} while (0) + +#define finish_arch_switch(prev) do {} while (0) + +#endif /* _ASM_SCORE_SWITCH_TO_H */ diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h deleted file mode 100644 index 589d5c7e171c..000000000000 --- a/arch/score/include/asm/system.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _ASM_SCORE_SYSTEM_H -#define _ASM_SCORE_SYSTEM_H - -#include <linux/types.h> -#include <linux/irqflags.h> - -struct pt_regs; -struct task_struct; - -extern void *resume(void *last, void *next, void *next_ti); - -#define switch_to(prev, next, last) \ -do { \ - (last) = resume(prev, next, task_thread_info(next)); \ -} while (0) - -#define finish_arch_switch(prev) do {} while (0) - -typedef void (*vi_handler_t)(void); -extern unsigned long arch_align_stack(unsigned long sp); - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#define set_mb(var, value) do {var = value; wmb(); } while (0) - -#define __HAVE_ARCH_CMPXCHG 1 - -#include <asm-generic/cmpxchg-local.h> - -#ifndef __ASSEMBLY__ - -struct __xchg_dummy { unsigned long a[100]; }; -#define __xg(x) ((struct __xchg_dummy *)(x)) - -static inline -unsigned long __xchg(volatile unsigned long *m, unsigned long val) -{ - unsigned long retval; - unsigned long flags; - - local_irq_save(flags); - retval = *m; - *m = val; - local_irq_restore(flags); - return retval; -} - -#define xchg(ptr, v) \ - ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \ - (unsigned long)(v))) - -static inline unsigned long __cmpxchg(volatile unsigned long *m, - unsigned long old, unsigned long new) -{ - unsigned long retval; - unsigned long flags; - - local_irq_save(flags); - retval = *m; - if (retval == old) - *m = new; - local_irq_restore(flags); - return retval; -} - -#define cmpxchg(ptr, o, n) \ - ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ - (unsigned long)(o), \ - (unsigned long)(n))) - -extern void __die(const char *, struct pt_regs *, const char *, - const char *, unsigned long) __attribute__((noreturn)); -extern void __die_if_kernel(const char *, struct pt_regs *, const char *, - const char *, unsigned long); - -#define die(msg, regs) \ - __die(msg, regs, __FILE__ ":", __func__, __LINE__) -#define die_if_kernel(msg, regs) \ - __die_if_kernel(msg, regs, __FILE__ ":", __func__, __LINE__) - -#endif /* !__ASSEMBLY__ */ -#endif /* _ASM_SCORE_SYSTEM_H */ diff --git a/arch/sh/boards/mach-microdev/irq.c b/arch/sh/boards/mach-microdev/irq.c index 4fb00369f0e2..9a8aff339619 100644 --- a/arch/sh/boards/mach-microdev/irq.c +++ b/arch/sh/boards/mach-microdev/irq.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> -#include <asm/system.h> #include <asm/io.h> #include <mach/microdev.h> diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h index 467d9415a32e..9f7c56609e53 100644 --- a/arch/sh/include/asm/atomic-irq.h +++ b/arch/sh/include/asm/atomic-irq.h @@ -1,6 +1,8 @@ #ifndef __ASM_SH_ATOMIC_IRQ_H #define __ASM_SH_ATOMIC_IRQ_H +#include <linux/irqflags.h> + /* * To get proper branch prediction for the main line, we must branch * forward to code at the end of this object's .text section, then diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 63a27dbc952e..37f2f4a55231 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -9,7 +9,7 @@ #include <linux/compiler.h> #include <linux/types.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) diff --git a/arch/sh/include/asm/auxvec.h b/arch/sh/include/asm/auxvec.h index 483effd65e00..8bcc51af9367 100644 --- a/arch/sh/include/asm/auxvec.h +++ b/arch/sh/include/asm/auxvec.h @@ -33,4 +33,6 @@ #define AT_L1D_CACHESHAPE 35 #define AT_L2_CACHESHAPE 36 +#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ + #endif /* __ASM_SH_AUXVEC_H */ diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h new file mode 100644 index 000000000000..72c103dae300 --- /dev/null +++ b/arch/sh/include/asm/barrier.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima + * Copyright (C) 2002 Paul Mundt + */ +#ifndef __ASM_SH_BARRIER_H +#define __ASM_SH_BARRIER_H + +#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) +#include <asm/cache_insns.h> +#endif + +/* + * A brief note on ctrl_barrier(), the control register write barrier. + * + * Legacy SH cores typically require a sequence of 8 nops after + * modification of a control register in order for the changes to take + * effect. On newer cores (like the sh4a and sh5) this is accomplished + * with icbi. + * + * Also note that on sh4a in the icbi case we can forego a synco for the + * write barrier, as it's not necessary for control registers. + * + * Historically we have only done this type of barrier for the MMUCR, but + * it's also necessary for the CCR, so we make it generic here instead. + */ +#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) +#define mb() __asm__ __volatile__ ("synco": : :"memory") +#define rmb() mb() +#define wmb() __asm__ __volatile__ ("synco": : :"memory") +#define ctrl_barrier() __icbi(PAGE_OFFSET) +#define read_barrier_depends() do { } while(0) +#else +#define mb() __asm__ __volatile__ ("": : :"memory") +#define rmb() mb() +#define wmb() __asm__ __volatile__ ("": : :"memory") +#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") +#define read_barrier_depends() do { } while(0) +#endif + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) +#endif + +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) + +#endif /* __ASM_SH_BARRIER_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 90fa3e48b4d6..ea8706d94f08 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -7,7 +7,6 @@ #error only <linux/bitops.h> can be included directly #endif -#include <asm/system.h> /* For __swab32 */ #include <asm/byteorder.h> diff --git a/arch/sh/include/asm/bl_bit.h b/arch/sh/include/asm/bl_bit.h new file mode 100644 index 000000000000..45e6b9fc37a0 --- /dev/null +++ b/arch/sh/include/asm/bl_bit.h @@ -0,0 +1,10 @@ +#ifndef __ASM_SH_BL_BIT_H +#define __ASM_SH_BL_BIT_H + +#ifdef CONFIG_SUPERH32 +# include "bl_bit_32.h" +#else +# include "bl_bit_64.h" +#endif + +#endif /* __ASM_SH_BL_BIT_H */ diff --git a/arch/sh/include/asm/bl_bit_32.h b/arch/sh/include/asm/bl_bit_32.h new file mode 100644 index 000000000000..fd21eee62149 --- /dev/null +++ b/arch/sh/include/asm/bl_bit_32.h @@ -0,0 +1,33 @@ +#ifndef __ASM_SH_BL_BIT_32_H +#define __ASM_SH_BL_BIT_32_H + +static inline void set_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "or %2, %0\n\t" + "and %3, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "r" (0x10000000), "r" (0xffffff0f) + : "memory" + ); +} + +static inline void clear_bl_bit(void) +{ + unsigned long __dummy0, __dummy1; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and %2, %0\n\t" + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~0x10000000) + : "memory" + ); +} + +#endif /* __ASM_SH_BL_BIT_32_H */ diff --git a/arch/sh/include/asm/bl_bit_64.h b/arch/sh/include/asm/bl_bit_64.h new file mode 100644 index 000000000000..6cc8711af435 --- /dev/null +++ b/arch/sh/include/asm/bl_bit_64.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __ASM_SH_BL_BIT_64_H +#define __ASM_SH_BL_BIT_64_H + +#include <asm/processor.h> + +#define SR_BL_LL 0x0000000010000000LL + +static inline void set_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "or %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); + +} + +static inline void clear_bl_bit(void) +{ + unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; + + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "and %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); +} + +#endif /* __ASM_SH_BL_BIT_64_H */ diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h index 6323f864d111..2b87d86bfc41 100644 --- a/arch/sh/include/asm/bug.h +++ b/arch/sh/include/asm/bug.h @@ -1,6 +1,8 @@ #ifndef __ASM_SH_BUG_H #define __ASM_SH_BUG_H +#include <linux/linkage.h> + #define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */ #define BUGFLAG_UNWINDER (1 << 1) @@ -107,4 +109,7 @@ do { \ #include <asm-generic/bug.h> +struct pt_regs; +extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); + #endif /* __ASM_SH_BUG_H */ diff --git a/arch/sh/include/asm/cache_insns.h b/arch/sh/include/asm/cache_insns.h new file mode 100644 index 000000000000..d25fbe53090d --- /dev/null +++ b/arch/sh/include/asm/cache_insns.h @@ -0,0 +1,11 @@ +#ifndef __ASM_SH_CACHE_INSNS_H +#define __ASM_SH_CACHE_INSNS_H + + +#ifdef CONFIG_SUPERH32 +# include "cache_insns_32.h" +#else +# include "cache_insns_64.h" +#endif + +#endif /* __ASM_SH_CACHE_INSNS_H */ diff --git a/arch/sh/include/asm/cache_insns_32.h b/arch/sh/include/asm/cache_insns_32.h new file mode 100644 index 000000000000..b92fe5416092 --- /dev/null +++ b/arch/sh/include/asm/cache_insns_32.h @@ -0,0 +1,21 @@ +#ifndef __ASM_SH_CACHE_INSNS_32_H +#define __ASM_SH_CACHE_INSNS_32_H + +#include <linux/types.h> + +#if defined(CONFIG_CPU_SH4A) +#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr)) +#else +#define __icbi(addr) mb() +#endif + +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr)) +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr)) +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr)) + +static inline reg_size_t register_align(void *val) +{ + return (unsigned long)(signed long)val; +} + +#endif /* __ASM_SH_CACHE_INSNS_32_H */ diff --git a/arch/sh/include/asm/cache_insns_64.h b/arch/sh/include/asm/cache_insns_64.h new file mode 100644 index 000000000000..70b6357eaf1a --- /dev/null +++ b/arch/sh/include/asm/cache_insns_64.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __ASM_SH_CACHE_INSNS_64_H +#define __ASM_SH_CACHE_INSNS_64_H + +#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr)) +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr)) +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr)) +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr)) + +static inline reg_size_t register_align(void *val) +{ + return (unsigned long long)(signed long long)(signed long)val; +} + +#endif /* __ASM_SH_CACHE_INSNS_64_H */ diff --git a/arch/sh/include/asm/cmpxchg-irq.h b/arch/sh/include/asm/cmpxchg-irq.h index 43049ec0554b..bd11f630414a 100644 --- a/arch/sh/include/asm/cmpxchg-irq.h +++ b/arch/sh/include/asm/cmpxchg-irq.h @@ -1,6 +1,8 @@ #ifndef __ASM_SH_CMPXCHG_IRQ_H #define __ASM_SH_CMPXCHG_IRQ_H +#include <linux/irqflags.h> + static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) { unsigned long flags, retval; diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h new file mode 100644 index 000000000000..f6bd1406b897 --- /dev/null +++ b/arch/sh/include/asm/cmpxchg.h @@ -0,0 +1,70 @@ +#ifndef __ASM_SH_CMPXCHG_H +#define __ASM_SH_CMPXCHG_H + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +#include <linux/compiler.h> +#include <linux/types.h> + +#if defined(CONFIG_GUSA_RB) +#include <asm/cmpxchg-grb.h> +#elif defined(CONFIG_CPU_SH4A) +#include <asm/cmpxchg-llsc.h> +#else +#include <asm/cmpxchg-irq.h> +#endif + +extern void __xchg_called_with_bad_pointer(void); + +#define __xchg(ptr, x, size) \ +({ \ + unsigned long __xchg__res; \ + volatile void *__xchg_ptr = (ptr); \ + switch (size) { \ + case 4: \ + __xchg__res = xchg_u32(__xchg_ptr, x); \ + break; \ + case 1: \ + __xchg__res = xchg_u8(__xchg_ptr, x); \ + break; \ + default: \ + __xchg_called_with_bad_pointer(); \ + __xchg__res = x; \ + break; \ + } \ + \ + __xchg__res; \ +}) + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +#define __HAVE_ARCH_CMPXCHG 1 + +static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr,o,n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + +#endif /* __ASM_SH_CMPXCHG_H */ diff --git a/arch/sh/include/asm/exec.h b/arch/sh/include/asm/exec.h new file mode 100644 index 000000000000..69486a9497f7 --- /dev/null +++ b/arch/sh/include/asm/exec.h @@ -0,0 +1,10 @@ +/* + * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima + * Copyright (C) 2002 Paul Mundt + */ +#ifndef __ASM_SH_EXEC_H +#define __ASM_SH_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __ASM_SH_EXEC_H */ diff --git a/arch/sh/include/asm/futex-irq.h b/arch/sh/include/asm/futex-irq.h index 6cb9f193a95e..63d33129ea23 100644 --- a/arch/sh/include/asm/futex-irq.h +++ b/arch/sh/include/asm/futex-irq.h @@ -1,7 +1,6 @@ #ifndef __ASM_SH_FUTEX_IRQ_H #define __ASM_SH_FUTEX_IRQ_H -#include <asm/system.h> static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *oldval) diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 28c5aa58bb45..35fc8b077cb1 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -14,7 +14,6 @@ */ #include <linux/errno.h> #include <asm/cache.h> -#include <asm/system.h> #include <asm/addrspace.h> #include <asm/machvec.h> #include <asm/pgtable.h> diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 9c7bdfcaebbd..a229c393826a 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -101,6 +101,10 @@ extern struct sh_cpuinfo cpu_data[]; #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") #define cpu_relax() barrier() +void default_idle(void); +void cpu_idle_wait(void); +void stop_this_cpu(void *); + /* Forward decl */ struct seq_operations; struct task_struct; @@ -161,6 +165,17 @@ int vsyscall_init(void); #define vsyscall_init() do { } while (0) #endif +/* + * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks. + */ +#ifdef CONFIG_CPU_SH2A +extern unsigned int instruction_size(unsigned int insn); +#elif defined(CONFIG_SUPERH32) +#define instruction_size(insn) (2) +#else +#define instruction_size(insn) (4) +#endif + #endif /* __ASSEMBLY__ */ #ifdef CONFIG_SUPERH32 diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index 2d3679b2447f..c7b7e1ed194a 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h @@ -37,7 +37,6 @@ #include <linux/thread_info.h> #include <asm/addrspace.h> #include <asm/page.h> -#include <asm/system.h> #define user_mode(regs) (((regs)->sr & 0x40000000)==0) #define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15]) diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h index 01fa17a3d759..465a22df8fd0 100644 --- a/arch/sh/include/asm/setup.h +++ b/arch/sh/include/asm/setup.h @@ -20,6 +20,7 @@ void sh_mv_setup(void); void check_for_initrd(void); +void per_cpu_trap_init(void); #endif /* __KERNEL__ */ diff --git a/arch/sh/include/asm/switch_to.h b/arch/sh/include/asm/switch_to.h new file mode 100644 index 000000000000..62b1941813e3 --- /dev/null +++ b/arch/sh/include/asm/switch_to.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __ASM_SH_SWITCH_TO_H +#define __ASM_SH_SWITCH_TO_H + +#ifdef CONFIG_SUPERH32 +# include "switch_to_32.h" +#else +# include "switch_to_64.h" +#endif + +#endif /* __ASM_SH_SWITCH_TO_H */ diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/switch_to_32.h index a4ad1cd9bc4d..0c065513e7ac 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/switch_to_32.h @@ -1,8 +1,5 @@ -#ifndef __ASM_SH_SYSTEM_32_H -#define __ASM_SH_SYSTEM_32_H - -#include <linux/types.h> -#include <asm/mmu.h> +#ifndef __ASM_SH_SWITCH_TO_32_H +#define __ASM_SH_SWITCH_TO_32_H #ifdef CONFIG_SH_DSP @@ -32,7 +29,6 @@ do { \ : : "r" (__ts2)); \ } while (0) - #define __save_dsp(tsk) \ do { \ register u32 *__ts2 __asm__ ("r2") = \ @@ -64,16 +60,6 @@ do { \ #define __restore_dsp(tsk) do { } while (0) #endif -#if defined(CONFIG_CPU_SH4A) -#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr)) -#else -#define __icbi(addr) mb() -#endif - -#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr)) -#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr)) -#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr)) - struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next); @@ -145,92 +131,4 @@ do { \ __restore_dsp(prev); \ } while (0) -#ifdef CONFIG_CPU_HAS_SR_RB -#define lookup_exception_vector() \ -({ \ - unsigned long _vec; \ - \ - __asm__ __volatile__ ( \ - "stc r2_bank, %0\n\t" \ - : "=r" (_vec) \ - ); \ - \ - _vec; \ -}) -#else -#define lookup_exception_vector() \ -({ \ - unsigned long _vec; \ - __asm__ __volatile__ ( \ - "mov r4, %0\n\t" \ - : "=r" (_vec) \ - ); \ - \ - _vec; \ -}) -#endif - -static inline reg_size_t register_align(void *val) -{ - return (unsigned long)(signed long)val; -} - -int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, - struct mem_access *ma, int, unsigned long address); - -static inline void trigger_address_error(void) -{ - __asm__ __volatile__ ( - "ldc %0, sr\n\t" - "mov.l @%1, %0" - : - : "r" (0x10000000), "r" (0x80000001) - ); -} - -asmlinkage void do_address_error(struct pt_regs *regs, - unsigned long writeaccess, - unsigned long address); -asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs __regs); -asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs __regs); -asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs __regs); -asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs __regs); - -static inline void set_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "or %2, %0\n\t" - "and %3, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "r" (0x10000000), "r" (0xffffff0f) - : "memory" - ); -} - -static inline void clear_bl_bit(void) -{ - unsigned long __dummy0, __dummy1; - - __asm__ __volatile__ ( - "stc sr, %0\n\t" - "and %2, %0\n\t" - "ldc %0, sr\n\t" - : "=&r" (__dummy0), "=r" (__dummy1) - : "1" (~0x10000000) - : "memory" - ); -} - -#endif /* __ASM_SH_SYSTEM_32_H */ +#endif /* __ASM_SH_SWITCH_TO_32_H */ diff --git a/arch/sh/include/asm/switch_to_64.h b/arch/sh/include/asm/switch_to_64.h new file mode 100644 index 000000000000..ba3129d6bc21 --- /dev/null +++ b/arch/sh/include/asm/switch_to_64.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __ASM_SH_SWITCH_TO_64_H +#define __ASM_SH_SWITCH_TO_64_H + +struct thread_struct; +struct task_struct; + +/* + * switch_to() should switch tasks to task nr n, first + */ +struct task_struct *sh64_switch_to(struct task_struct *prev, + struct thread_struct *prev_thread, + struct task_struct *next, + struct thread_struct *next_thread); + +#define switch_to(prev,next,last) \ +do { \ + if (last_task_used_math != next) { \ + struct pt_regs *regs = next->thread.uregs; \ + if (regs) regs->sr |= SR_FD; \ + } \ + last = sh64_switch_to(prev, &prev->thread, next, \ + &next->thread); \ +} while (0) + + +#endif /* __ASM_SH_SWITCH_TO_64_H */ diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h deleted file mode 100644 index 10c8b1823a18..000000000000 --- a/arch/sh/include/asm/system.h +++ /dev/null @@ -1,184 +0,0 @@ -#ifndef __ASM_SH_SYSTEM_H -#define __ASM_SH_SYSTEM_H - -/* - * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima - * Copyright (C) 2002 Paul Mundt - */ - -#include <linux/irqflags.h> -#include <linux/compiler.h> -#include <linux/linkage.h> -#include <asm/types.h> -#include <asm/uncached.h> - -#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ - -/* - * A brief note on ctrl_barrier(), the control register write barrier. - * - * Legacy SH cores typically require a sequence of 8 nops after - * modification of a control register in order for the changes to take - * effect. On newer cores (like the sh4a and sh5) this is accomplished - * with icbi. - * - * Also note that on sh4a in the icbi case we can forego a synco for the - * write barrier, as it's not necessary for control registers. - * - * Historically we have only done this type of barrier for the MMUCR, but - * it's also necessary for the CCR, so we make it generic here instead. - */ -#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) -#define mb() __asm__ __volatile__ ("synco": : :"memory") -#define rmb() mb() -#define wmb() __asm__ __volatile__ ("synco": : :"memory") -#define ctrl_barrier() __icbi(PAGE_OFFSET) -#define read_barrier_depends() do { } while(0) -#else -#define mb() __asm__ __volatile__ ("": : :"memory") -#define rmb() mb() -#define wmb() __asm__ __volatile__ ("": : :"memory") -#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") -#define read_barrier_depends() do { } while(0) -#endif - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) - -#ifdef CONFIG_GUSA_RB -#include <asm/cmpxchg-grb.h> -#elif defined(CONFIG_CPU_SH4A) -#include <asm/cmpxchg-llsc.h> -#else -#include <asm/cmpxchg-irq.h> -#endif - -extern void __xchg_called_with_bad_pointer(void); - -#define __xchg(ptr, x, size) \ -({ \ - unsigned long __xchg__res; \ - volatile void *__xchg_ptr = (ptr); \ - switch (size) { \ - case 4: \ - __xchg__res = xchg_u32(__xchg_ptr, x); \ - break; \ - case 1: \ - __xchg__res = xchg_u8(__xchg_ptr, x); \ - break; \ - default: \ - __xchg_called_with_bad_pointer(); \ - __xchg__res = x; \ - break; \ - } \ - \ - __xchg__res; \ -}) - -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) - -/* This function doesn't exist, so you'll get a linker error - * if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -#define __HAVE_ARCH_CMPXCHG 1 - -static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, - unsigned long new, int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32(ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr,o,n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - -struct pt_regs; - -extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); -void free_initmem(void); -void free_initrd_mem(unsigned long start, unsigned long end); - -extern void *set_exception_table_vec(unsigned int vec, void *handler); - -static inline void *set_exception_table_evt(unsigned int evt, void *handler) -{ - return set_exception_table_vec(evt >> 5, handler); -} - -/* - * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks. - */ -#ifdef CONFIG_CPU_SH2A -extern unsigned int instruction_size(unsigned int insn); -#elif defined(CONFIG_SUPERH32) -#define instruction_size(insn) (2) -#else -#define instruction_size(insn) (4) -#endif - -void per_cpu_trap_init(void); -void default_idle(void); -void cpu_idle_wait(void); -void stop_this_cpu(void *); - -#ifdef CONFIG_SUPERH32 -#define BUILD_TRAP_HANDLER(name) \ -asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ - unsigned long r6, unsigned long r7, \ - struct pt_regs __regs) - -#define TRAP_HANDLER_DECL \ - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \ - unsigned int vec = regs->tra; \ - (void)vec; -#else -#define BUILD_TRAP_HANDLER(name) \ -asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) -#define TRAP_HANDLER_DECL -#endif - -BUILD_TRAP_HANDLER(address_error); -BUILD_TRAP_HANDLER(debug); -BUILD_TRAP_HANDLER(bug); -BUILD_TRAP_HANDLER(breakpoint); -BUILD_TRAP_HANDLER(singlestep); -BUILD_TRAP_HANDLER(fpu_error); -BUILD_TRAP_HANDLER(fpu_state_restore); -BUILD_TRAP_HANDLER(nmi); - -#define arch_align_stack(x) (x) - -struct mem_access { - unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt); - unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt); -}; - -#ifdef CONFIG_SUPERH32 -# include "system_32.h" -#else -# include "system_64.h" -#endif - -#endif diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h deleted file mode 100644 index 8593bc8d1a4e..000000000000 --- a/arch/sh/include/asm/system_64.h +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef __ASM_SH_SYSTEM_64_H -#define __ASM_SH_SYSTEM_64_H - -/* - * include/asm-sh/system_64.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Paul Mundt - * Copyright (C) 2004 Richard Curnow - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <cpu/registers.h> -#include <asm/processor.h> - -/* - * switch_to() should switch tasks to task nr n, first - */ -struct thread_struct; -struct task_struct *sh64_switch_to(struct task_struct *prev, - struct thread_struct *prev_thread, - struct task_struct *next, - struct thread_struct *next_thread); - -#define switch_to(prev,next,last) \ -do { \ - if (last_task_used_math != next) { \ - struct pt_regs *regs = next->thread.uregs; \ - if (regs) regs->sr |= SR_FD; \ - } \ - last = sh64_switch_to(prev, &prev->thread, next, \ - &next->thread); \ -} while (0) - -#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr)) -#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr)) -#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr)) -#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr)) - -static inline reg_size_t register_align(void *val) -{ - return (unsigned long long)(signed long long)(signed long)val; -} - -extern void phys_stext(void); - -static inline void trigger_address_error(void) -{ - phys_stext(); -} - -#define SR_BL_LL 0x0000000010000000LL - -static inline void set_bl_bit(void) -{ - unsigned long long __dummy0, __dummy1 = SR_BL_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "or %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); - -} - -static inline void clear_bl_bit(void) -{ - unsigned long long __dummy0, __dummy1 = ~SR_BL_LL; - - __asm__ __volatile__("getcon " __SR ", %0\n\t" - "and %0, %1, %0\n\t" - "putcon %0, " __SR "\n\t" - : "=&r" (__dummy0) - : "r" (__dummy1)); -} - -#endif /* __ASM_SH_SYSTEM_64_H */ diff --git a/arch/sh/include/asm/traps.h b/arch/sh/include/asm/traps.h new file mode 100644 index 000000000000..afd9df8d0641 --- /dev/null +++ b/arch/sh/include/asm/traps.h @@ -0,0 +1,21 @@ +#ifndef __ASM_SH_TRAPS_H +#define __ASM_SH_TRAPS_H + +#include <linux/compiler.h> + +#ifdef CONFIG_SUPERH32 +# include "traps_32.h" +#else +# include "traps_64.h" +#endif + +BUILD_TRAP_HANDLER(address_error); +BUILD_TRAP_HANDLER(debug); +BUILD_TRAP_HANDLER(bug); +BUILD_TRAP_HANDLER(breakpoint); +BUILD_TRAP_HANDLER(singlestep); +BUILD_TRAP_HANDLER(fpu_error); +BUILD_TRAP_HANDLER(fpu_state_restore); +BUILD_TRAP_HANDLER(nmi); + +#endif /* __ASM_SH_TRAPS_H */ diff --git a/arch/sh/include/asm/traps_32.h b/arch/sh/include/asm/traps_32.h new file mode 100644 index 000000000000..cfd55ff9dff2 --- /dev/null +++ b/arch/sh/include/asm/traps_32.h @@ -0,0 +1,68 @@ +#ifndef __ASM_SH_TRAPS_32_H +#define __ASM_SH_TRAPS_32_H + +#include <linux/types.h> +#include <asm/mmu.h> + +#ifdef CONFIG_CPU_HAS_SR_RB +#define lookup_exception_vector() \ +({ \ + unsigned long _vec; \ + \ + __asm__ __volatile__ ( \ + "stc r2_bank, %0\n\t" \ + : "=r" (_vec) \ + ); \ + \ + _vec; \ +}) +#else +#define lookup_exception_vector() \ +({ \ + unsigned long _vec; \ + __asm__ __volatile__ ( \ + "mov r4, %0\n\t" \ + : "=r" (_vec) \ + ); \ + \ + _vec; \ +}) +#endif + +static inline void trigger_address_error(void) +{ + __asm__ __volatile__ ( + "ldc %0, sr\n\t" + "mov.l @%1, %0" + : + : "r" (0x10000000), "r" (0x80000001) + ); +} + +asmlinkage void do_address_error(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address); +asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); +asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); + +#define BUILD_TRAP_HANDLER(name) \ +asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ + unsigned long r6, unsigned long r7, \ + struct pt_regs __regs) + +#define TRAP_HANDLER_DECL \ + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \ + unsigned int vec = regs->tra; \ + (void)vec; + +#endif /* __ASM_SH_TRAPS_32_H */ diff --git a/arch/sh/include/asm/traps_64.h b/arch/sh/include/asm/traps_64.h new file mode 100644 index 000000000000..c52d7f9a06c1 --- /dev/null +++ b/arch/sh/include/asm/traps_64.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __ASM_SH_TRAPS_64_H +#define __ASM_SH_TRAPS_64_H + +extern void phys_stext(void); + +static inline void trigger_address_error(void) +{ + phys_stext(); +} + +#define BUILD_TRAP_HANDLER(name) \ +asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) +#define TRAP_HANDLER_DECL + +#endif /* __ASM_SH_TRAPS_64_H */ diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 075848f43b6a..050f221fa898 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -254,5 +254,19 @@ int fixup_exception(struct pt_regs *regs); unsigned long search_exception_table(unsigned long addr); const struct exception_table_entry *search_exception_tables(unsigned long addr); +extern void *set_exception_table_vec(unsigned int vec, void *handler); + +static inline void *set_exception_table_evt(unsigned int evt, void *handler) +{ + return set_exception_table_vec(evt >> 5, handler); +} + +struct mem_access { + unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt); + unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt); +}; + +int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, + struct mem_access *ma, int, unsigned long address); #endif /* __ASM_SH_UACCESS_H */ diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index fac742e514ee..61a07dafcd46 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -18,13 +18,13 @@ #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/page.h> -#include <asm/system.h> #include <asm/cacheflush.h> #include <asm/cache.h> #include <asm/elf.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/sh_bios.h> +#include <asm/setup.h> #ifdef CONFIG_SH_FPU #define cpu_has_fpu 1 diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c index 39b6a24c159d..e7f1745bd121 100644 --- a/arch/sh/kernel/cpu/irq/imask.c +++ b/arch/sh/kernel/cpu/irq/imask.c @@ -19,7 +19,6 @@ #include <linux/cache.h> #include <linux/irq.h> #include <linux/bitmap.h> -#include <asm/system.h> #include <asm/irq.h> /* Bitmap of IRQ masked */ diff --git a/arch/sh/kernel/cpu/sh2a/opcode_helper.c b/arch/sh/kernel/cpu/sh2a/opcode_helper.c index 9704b7926d8b..72aa61c81e48 100644 --- a/arch/sh/kernel/cpu/sh2a/opcode_helper.c +++ b/arch/sh/kernel/cpu/sh2a/opcode_helper.c @@ -10,7 +10,6 @@ * for more details. */ #include <linux/kernel.h> -#include <asm/system.h> /* * Instructions on SH are generally fixed at 16-bits, however, SH-2A diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index 447482d7f65e..e74cd6c0f10d 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -15,7 +15,6 @@ #include <linux/io.h> #include <cpu/fpu.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/fpu.h> /* The PR (precision) bit in the FP Status Register must be clear when diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c index efae6ab3d54c..f9173766ec4b 100644 --- a/arch/sh/kernel/hw_breakpoint.c +++ b/arch/sh/kernel/hw_breakpoint.c @@ -22,6 +22,7 @@ #include <asm/hw_breakpoint.h> #include <asm/mmu_context.h> #include <asm/ptrace.h> +#include <asm/traps.h> /* * Stores the breakpoints currently in use on each breakpoint address diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 7e4892826563..64852ecc6881 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -18,9 +18,9 @@ #include <linux/smp.h> #include <linux/cpuidle.h> #include <asm/pgalloc.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/smp.h> +#include <asm/bl_bit.h> void (*pm_idle)(void); diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c index 0f62f4672754..c0a9761f2f8a 100644 --- a/arch/sh/kernel/io_trapped.c +++ b/arch/sh/kernel/io_trapped.c @@ -15,7 +15,6 @@ #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/init.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/uaccess.h> #include <asm/io.h> diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 7ec665178125..f72e3a951588 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -24,7 +24,6 @@ #include <linux/prefetch.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> -#include <asm/system.h> #include <asm/fpu.h> #include <asm/syscalls.h> diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index cbd4e4bb9fc5..4264583eabac 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -30,6 +30,7 @@ #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/fpu.h> +#include <asm/switch_to.h> struct task_struct *last_task_used_math = NULL; diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index a3e651563763..9698671444e6 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -28,7 +28,6 @@ #include <linux/hw_breakpoint.h> #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/mmu_context.h> #include <asm/syscalls.h> diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index 3d0080b5c976..bc81e07dc098 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c @@ -34,11 +34,11 @@ #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/mmu_context.h> #include <asm/syscalls.h> #include <asm/fpu.h> +#include <asm/traps.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c index ca6a5ca64015..04afe5b20663 100644 --- a/arch/sh/kernel/reboot.c +++ b/arch/sh/kernel/reboot.c @@ -8,8 +8,8 @@ #endif #include <asm/addrspace.h> #include <asm/reboot.h> -#include <asm/system.h> #include <asm/tlbflush.h> +#include <asm/traps.h> void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index a7a55ed43a59..0bc58866add1 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -25,7 +25,6 @@ #include <linux/freezer.h> #include <linux/io.h> #include <linux/tracehook.h> -#include <asm/system.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/pgtable.h> diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index f624174bf239..a17a14d32340 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -23,7 +23,6 @@ #include <linux/sched.h> #include <linux/atomic.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/smp.h> #include <asm/cacheflush.h> diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 0830c2a9f712..a87e58a9e38f 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -7,7 +7,7 @@ #include <linux/uaccess.h> #include <linux/hardirq.h> #include <asm/unwinder.h> -#include <asm/system.h> +#include <asm/traps.h> #ifdef CONFIG_GENERIC_BUG static void handle_BUG(struct pt_regs *regs) diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 7bbef95c9d1b..a37175deb73f 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -27,10 +27,11 @@ #include <linux/sysfs.h> #include <linux/uaccess.h> #include <linux/perf_event.h> -#include <asm/system.h> #include <asm/alignment.h> #include <asm/fpu.h> #include <asm/kprobes.h> +#include <asm/traps.h> +#include <asm/bl_bit.h> #ifdef CONFIG_CPU_SH2 # define TRAP_RESERVED_INST 4 diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index cd3a40483299..6c0486094e48 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c @@ -25,7 +25,6 @@ #include <linux/sysctl.h> #include <linux/module.h> #include <linux/perf_event.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/atomic.h> diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c index 977195210653..b876780c1e1c 100644 --- a/arch/sh/math-emu/math.c +++ b/arch/sh/math-emu/math.c @@ -14,7 +14,6 @@ #include <linux/signal.h> #include <linux/perf_event.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/processor.h> #include <asm/io.h> diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 7bebd044f2a1..324eef93c900 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -17,9 +17,9 @@ #include <linux/kprobes.h> #include <linux/perf_event.h> #include <asm/io_trapped.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> +#include <asm/traps.h> static inline int notify_page_fault(struct pt_regs *regs, int trap) { diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c index 2b356cec2489..44a341029e7b 100644 --- a/arch/sh/mm/fault_64.c +++ b/arch/sh/mm/fault_64.c @@ -33,7 +33,6 @@ #include <linux/mm.h> #include <linux/smp.h> #include <linux/interrupt.h> -#include <asm/system.h> #include <asm/tlb.h> #include <asm/io.h> #include <asm/uaccess.h> diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c index cef402678f42..75a17f5bfa14 100644 --- a/arch/sh/mm/flush-sh4.c +++ b/arch/sh/mm/flush-sh4.c @@ -1,6 +1,7 @@ #include <linux/mm.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> +#include <asm/traps.h> /* * Write back the dirty D-caches, but not invalidate them. diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index fad52f1f6812..7160c9fd6fe3 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -25,7 +25,6 @@ #include <linux/vmalloc.h> #include <asm/cacheflush.h> #include <asm/sizes.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/page.h> diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index b71db6af8060..4db21adfe5de 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c @@ -12,7 +12,6 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c index 7a940dbfc2e9..6554fb439f0e 100644 --- a/arch/sh/mm/tlb-sh3.c +++ b/arch/sh/mm/tlb-sh3.c @@ -20,7 +20,6 @@ #include <linux/smp.h> #include <linux/interrupt.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index cfdf7930d294..d42dd7e443d5 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -11,7 +11,6 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index e3430e093d43..11c5a18f10ed 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -22,7 +22,6 @@ #include <linux/smp.h> #include <linux/perf_event.h> #include <linux/interrupt.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/tlb.h> #include <asm/uaccess.h> diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 9dd0a769fa18..905832aa9e9e 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -13,9 +13,9 @@ #include <linux/types.h> +#include <asm/cmpxchg.h> #include <asm-generic/atomic64.h> -#include <asm/system.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 9f421df46aec..ce35a1cf1a20 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -8,7 +8,7 @@ #define __ARCH_SPARC64_ATOMIC__ #include <linux/types.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } @@ -85,7 +85,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return c; } - #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) diff --git a/arch/sparc/include/asm/auxio_32.h b/arch/sparc/include/asm/auxio_32.h index e03e088be95f..3a319775ae37 100644 --- a/arch/sparc/include/asm/auxio_32.h +++ b/arch/sparc/include/asm/auxio_32.h @@ -6,7 +6,6 @@ #ifndef _SPARC_AUXIO_H #define _SPARC_AUXIO_H -#include <asm/system.h> #include <asm/vaddrs.h> /* This register is an unsigned char in IO space. It does two things. diff --git a/arch/sparc/include/asm/barrier.h b/arch/sparc/include/asm/barrier.h new file mode 100644 index 000000000000..b25f02a029e0 --- /dev/null +++ b/arch/sparc/include/asm/barrier.h @@ -0,0 +1,8 @@ +#ifndef ___ASM_SPARC_BARRIER_H +#define ___ASM_SPARC_BARRIER_H +#if defined(__sparc__) && defined(__arch64__) +#include <asm/barrier_64.h> +#else +#include <asm/barrier_32.h> +#endif +#endif diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h new file mode 100644 index 000000000000..c1b76654ee76 --- /dev/null +++ b/arch/sparc/include/asm/barrier_32.h @@ -0,0 +1,15 @@ +#ifndef __SPARC_BARRIER_H +#define __SPARC_BARRIER_H + +/* XXX Change this if we ever use a PSO mode kernel. */ +#define mb() __asm__ __volatile__ ("" : : : "memory") +#define rmb() mb() +#define wmb() mb() +#define read_barrier_depends() do { } while(0) +#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) +#define smp_mb() __asm__ __volatile__("":::"memory") +#define smp_rmb() __asm__ __volatile__("":::"memory") +#define smp_wmb() __asm__ __volatile__("":::"memory") +#define smp_read_barrier_depends() do { } while(0) + +#endif /* !(__SPARC_BARRIER_H) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h new file mode 100644 index 000000000000..95d45986f908 --- /dev/null +++ b/arch/sparc/include/asm/barrier_64.h @@ -0,0 +1,56 @@ +#ifndef __SPARC64_BARRIER_H +#define __SPARC64_BARRIER_H + +/* These are here in an effort to more fully work around Spitfire Errata + * #51. Essentially, if a memory barrier occurs soon after a mispredicted + * branch, the chip can stop executing instructions until a trap occurs. + * Therefore, if interrupts are disabled, the chip can hang forever. + * + * It used to be believed that the memory barrier had to be right in the + * delay slot, but a case has been traced recently wherein the memory barrier + * was one instruction after the branch delay slot and the chip still hung. + * The offending sequence was the following in sym_wakeup_done() of the + * sym53c8xx_2 driver: + * + * call sym_ccb_from_dsa, 0 + * movge %icc, 0, %l0 + * brz,pn %o0, .LL1303 + * mov %o0, %l2 + * membar #LoadLoad + * + * The branch has to be mispredicted for the bug to occur. Therefore, we put + * the memory barrier explicitly into a "branch always, predicted taken" + * delay slot to avoid the problem case. + */ +#define membar_safe(type) \ +do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ + " membar " type "\n" \ + "1:\n" \ + : : : "memory"); \ +} while (0) + +/* The kernel always executes in TSO memory model these days, + * and furthermore most sparc64 chips implement more stringent + * memory ordering than required by the specifications. + */ +#define mb() membar_safe("#StoreLoad") +#define rmb() __asm__ __volatile__("":::"memory") +#define wmb() __asm__ __volatile__("":::"memory") + +#define read_barrier_depends() do { } while(0) +#define set_mb(__var, __value) \ + do { __var = __value; membar_safe("#StoreLoad"); } while(0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#else +#define smp_mb() __asm__ __volatile__("":::"memory") +#define smp_rmb() __asm__ __volatile__("":::"memory") +#define smp_wmb() __asm__ __volatile__("":::"memory") +#endif + +#define smp_read_barrier_depends() do { } while(0) + +#endif /* !(__SPARC64_BARRIER_H) */ diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h index 8a59e5a8c217..6bd9f43cb5a5 100644 --- a/arch/sparc/include/asm/bug.h +++ b/arch/sparc/include/asm/bug.h @@ -19,4 +19,7 @@ extern void do_BUG(const char *file, int line); #include <asm-generic/bug.h> +struct pt_regs; +extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); + #endif diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h index 2e468773f250..68431b47a22a 100644 --- a/arch/sparc/include/asm/cacheflush_32.h +++ b/arch/sparc/include/asm/cacheflush_32.h @@ -83,4 +83,13 @@ extern void sparc_flush_page_to_ram(struct page *page); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() +/* When a context switch happens we must flush all user windows so that + * the windows of the current process are flushed onto its stack. This + * way the windows are all clean for the next process and the stack + * frames are up to date. + */ +extern void flush_user_windows(void); +extern void kill_user_windows(void); +extern void flushw_all(void); + #endif /* _SPARC_CACHEFLUSH_H */ diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h index b95384033e89..2efea2ff88b7 100644 --- a/arch/sparc/include/asm/cacheflush_64.h +++ b/arch/sparc/include/asm/cacheflush_64.h @@ -9,6 +9,16 @@ /* Cache flush operations. */ + +#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") +#define flushw_all() __asm__ __volatile__("flushw") + +extern void __flushw_user(void); +#define flushw_user() __flushw_user() + +#define flush_user_windows flushw_user +#define flush_register_windows flushw_all + /* These are the same regardless of whether this is an SMP kernel or not. */ #define flush_cache_mm(__mm) \ do { if ((__mm) == current->mm) flushw_user(); } while(0) diff --git a/arch/sparc/include/asm/cmpxchg.h b/arch/sparc/include/asm/cmpxchg.h new file mode 100644 index 000000000000..9355893efa52 --- /dev/null +++ b/arch/sparc/include/asm/cmpxchg.h @@ -0,0 +1,8 @@ +#ifndef ___ASM_SPARC_CMPXCHG_H +#define ___ASM_SPARC_CMPXCHG_H +#if defined(__sparc__) && defined(__arch64__) +#include <asm/cmpxchg_64.h> +#else +#include <asm/cmpxchg_32.h> +#endif +#endif diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h new file mode 100644 index 000000000000..c786b0a92b51 --- /dev/null +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -0,0 +1,112 @@ +/* 32-bit atomic xchg() and cmpxchg() definitions. + * + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) + * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org) + * + * Additions by Keith M Wesolowski (wesolows@foobazco.org) based + * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. + */ + +#ifndef __ARCH_SPARC_CMPXCHG__ +#define __ARCH_SPARC_CMPXCHG__ + +#include <asm/btfixup.h> + +/* This has special calling conventions */ +#ifndef CONFIG_SMP +BTFIXUPDEF_CALL(void, ___xchg32, void) +#endif + +static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) +{ +#ifdef CONFIG_SMP + __asm__ __volatile__("swap [%2], %0" + : "=&r" (val) + : "0" (val), "r" (m) + : "memory"); + return val; +#else + register unsigned long *ptr asm("g1"); + register unsigned long ret asm("g2"); + + ptr = (unsigned long *) m; + ret = val; + + /* Note: this is magic and the nop there is + really needed. */ + __asm__ __volatile__( + "mov %%o7, %%g4\n\t" + "call ___f____xchg32\n\t" + " nop\n\t" + : "=&r" (ret) + : "0" (ret), "r" (ptr) + : "g3", "g4", "g7", "memory", "cc"); + + return ret; +#endif +} + +extern void __xchg_called_with_bad_pointer(void); + +static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) +{ + switch (size) { + case 4: + return xchg_u32(ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +/* Emulate cmpxchg() the same way we emulate atomics, + * by hashing the object address and indexing into an array + * of spinlocks to get a bit of performance... + * + * See arch/sparc/lib/atomic32.c for implementation. + * + * Cribbed from <asm-parisc/atomic.h> + */ +#define __HAVE_ARCH_CMPXCHG 1 + +/* bug catcher for when unsupported size is used - won't link */ +extern void __cmpxchg_called_with_bad_pointer(void); +/* we only need to support cmpxchg of a u32 on sparc */ +extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); + +/* don't worry...optimizer will get rid of most of this */ +static inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); + default: + __cmpxchg_called_with_bad_pointer(); + break; + } + return old; +} + +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ +}) + +#include <asm-generic/cmpxchg-local.h> + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#endif /* __ARCH_SPARC_CMPXCHG__ */ diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h new file mode 100644 index 000000000000..b30eb37294c5 --- /dev/null +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -0,0 +1,145 @@ +/* 64-bit atomic xchg() and cmpxchg() definitions. + * + * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com) + */ + +#ifndef __ARCH_SPARC64_CMPXCHG__ +#define __ARCH_SPARC64_CMPXCHG__ + +static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) +{ + unsigned long tmp1, tmp2; + + __asm__ __volatile__( +" mov %0, %1\n" +"1: lduw [%4], %2\n" +" cas [%4], %2, %0\n" +" cmp %2, %0\n" +" bne,a,pn %%icc, 1b\n" +" mov %1, %0\n" + : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) + : "0" (val), "r" (m) + : "cc", "memory"); + return val; +} + +static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val) +{ + unsigned long tmp1, tmp2; + + __asm__ __volatile__( +" mov %0, %1\n" +"1: ldx [%4], %2\n" +" casx [%4], %2, %0\n" +" cmp %2, %0\n" +" bne,a,pn %%xcc, 1b\n" +" mov %1, %0\n" + : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) + : "0" (val), "r" (m) + : "cc", "memory"); + return val; +} + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +extern void __xchg_called_with_bad_pointer(void); + +static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, + int size) +{ + switch (size) { + case 4: + return xchg32(ptr, x); + case 8: + return xchg64(ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +#include <asm-generic/cmpxchg-local.h> + +#define __HAVE_ARCH_CMPXCHG 1 + +static inline unsigned long +__cmpxchg_u32(volatile int *m, int old, int new) +{ + __asm__ __volatile__("cas [%2], %3, %0" + : "=&r" (new) + : "0" (new), "r" (m), "r" (old) + : "memory"); + + return new; +} + +static inline unsigned long +__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) +{ + __asm__ __volatile__("casx [%2], %3, %0" + : "=&r" (new) + : "0" (new), "r" (m), "r" (old) + : "memory"); + + return new; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); + case 8: + return __cmpxchg_u64(ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr,o,n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 4: + case 8: return __cmpxchg(ptr, old, new, size); + default: + return __cmpxchg_local_generic(ptr, old, new, size); + } + + return old; +} + +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ + }) + +#endif /* __ARCH_SPARC64_CMPXCHG__ */ diff --git a/arch/sparc/include/asm/cpu_type.h b/arch/sparc/include/asm/cpu_type.h new file mode 100644 index 000000000000..4ca184d95d82 --- /dev/null +++ b/arch/sparc/include/asm/cpu_type.h @@ -0,0 +1,34 @@ +#ifndef __ASM_CPU_TYPE_H +#define __ASM_CPU_TYPE_H + +/* + * Sparc (general) CPU types + */ +enum sparc_cpu { + sun4 = 0x00, + sun4c = 0x01, + sun4m = 0x02, + sun4d = 0x03, + sun4e = 0x04, + sun4u = 0x05, /* V8 ploos ploos */ + sun_unknown = 0x06, + ap1000 = 0x07, /* almost a sun4m */ + sparc_leon = 0x08, /* Leon SoC */ +}; + +#ifdef CONFIG_SPARC32 +extern enum sparc_cpu sparc_cpu_model; + +#define ARCH_SUN4C (sparc_cpu_model==sun4c) + +#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */ + +#else + +#define sparc_cpu_model sun4u + +/* This cannot ever be a sun4c :) That's just history. */ +#define ARCH_SUN4C 0 +#endif + +#endif /* __ASM_CPU_TYPE_H */ diff --git a/arch/sparc/include/asm/exec.h b/arch/sparc/include/asm/exec.h new file mode 100644 index 000000000000..2e085881e0d1 --- /dev/null +++ b/arch/sparc/include/asm/exec.h @@ -0,0 +1,6 @@ +#ifndef __SPARC_EXEC_H +#define __SPARC_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* __SPARC_EXEC_H */ diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h index 7440915e86d8..698d9559fead 100644 --- a/arch/sparc/include/asm/floppy_32.h +++ b/arch/sparc/include/asm/floppy_32.h @@ -11,7 +11,6 @@ #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/idprom.h> #include <asm/machines.h> #include <asm/oplib.h> diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h index 444e7bea23bc..4e899b0dabf7 100644 --- a/arch/sparc/include/asm/futex_64.h +++ b/arch/sparc/include/asm/futex_64.h @@ -4,7 +4,6 @@ #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/errno.h> -#include <asm/system.h> #define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile__( \ diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h index 2006e5d359df..c1acbd891cbc 100644 --- a/arch/sparc/include/asm/io_32.h +++ b/arch/sparc/include/asm/io_32.h @@ -6,7 +6,6 @@ #include <linux/ioport.h> /* struct resource */ #include <asm/page.h> /* IO address mapping routines need this */ -#include <asm/system.h> #include <asm-generic/pci_iomap.h> #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 9481e5a6fa90..09b0b88aeb2a 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -6,7 +6,6 @@ #include <linux/types.h> #include <asm/page.h> /* IO address mapping routines need this */ -#include <asm/system.h> #include <asm/asi.h> #include <asm-generic/pci_iomap.h> diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h index 14848909e0de..e414c06615c1 100644 --- a/arch/sparc/include/asm/irqflags_32.h +++ b/arch/sparc/include/asm/irqflags_32.h @@ -13,6 +13,7 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> +#include <asm/psr.h> extern void arch_local_irq_restore(unsigned long); extern unsigned long arch_local_irq_save(void); diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 666a73fef28d..a97fd085cebe 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -6,7 +6,6 @@ #ifndef __ASSEMBLY__ #include <linux/spinlock.h> -#include <asm/system.h> #include <asm/spitfire.h> #include <asm-generic/mm_hooks.h> diff --git a/arch/sparc/include/asm/ns87303.h b/arch/sparc/include/asm/ns87303.h index af755483e17d..6b947ee0f6aa 100644 --- a/arch/sparc/include/asm/ns87303.h +++ b/arch/sparc/include/asm/ns87303.h @@ -79,7 +79,6 @@ #include <linux/spinlock.h> -#include <asm/system.h> #include <asm/io.h> extern spinlock_t ns87303_lock; diff --git a/arch/sparc/include/asm/perfctr.h b/arch/sparc/include/asm/perfctr.h index 8d8720a8770d..3332d2cba6c1 100644 --- a/arch/sparc/include/asm/perfctr.h +++ b/arch/sparc/include/asm/perfctr.h @@ -168,6 +168,29 @@ struct vcounter_struct { unsigned long long vcnt1; }; +#else /* !(__KERNEL__) */ + +#ifndef CONFIG_SPARC32 + +/* Performance counter register access. */ +#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p)) +#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p)) +#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p)) + +/* Blackbird errata workaround. See commentary in + * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt() + * for more information. + */ +#define write_pic(__p) \ + __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \ + " nop\n\t" \ + ".align 64\n" \ + "99:wr %0, 0x0, %%pic\n\t" \ + "rd %%pic, %%g0" : : "r" (__p)) +#define reset_pic() write_pic(0) + +#endif /* !CONFIG_SPARC32 */ + #endif /* !(__KERNEL__) */ #endif /* !(PERF_COUNTER_API) */ diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index a790cc657476..3d7101860e68 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -21,7 +21,7 @@ #include <asm/vac-ops.h> #include <asm/oplib.h> #include <asm/btfixup.h> -#include <asm/system.h> +#include <asm/cpu_type.h> struct vm_area_struct; diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 38ebb2c60137..6fa2f7980e6b 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -19,7 +19,6 @@ #include <asm/types.h> #include <asm/spitfire.h> #include <asm/asi.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/processor.h> diff --git a/arch/sparc/include/asm/processor.h b/arch/sparc/include/asm/processor.h index 9da9646bf6c6..2fe99e66e760 100644 --- a/arch/sparc/include/asm/processor.h +++ b/arch/sparc/include/asm/processor.h @@ -5,4 +5,7 @@ #else #include <asm/processor_32.h> #endif + +#define nop() __asm__ __volatile__ ("nop") + #endif diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 59fcebb8f440..e713db249931 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -18,6 +18,9 @@ #include <asm/ptrace.h> #include <asm/page.h> +/* Don't hold the runqueue lock over context switch */ +#define __ARCH_WANT_UNLOCKED_CTXSW + /* The sparc has no problems with write protection */ #define wp_works_ok 1 #define wp_works_ok__is_a_macro /* for versions in ksyms.c */ diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index c00c3b5c2806..ef8c7c068f53 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h @@ -98,6 +98,8 @@ struct sparc_trapf { */ #ifndef __ASSEMBLY__ +#include <linux/types.h> + struct pt_regs { unsigned long psr; unsigned long pc; @@ -163,7 +165,6 @@ struct sparc_stackf { #ifdef __KERNEL__ #include <linux/threads.h> -#include <asm/system.h> static inline int pt_regs_trap_type(struct pt_regs *regs) { @@ -240,8 +241,6 @@ extern unsigned long profile_pc(struct pt_regs *); #ifdef __KERNEL__ -#include <asm/system.h> - static inline bool pt_regs_is_syscall(struct pt_regs *regs) { return (regs->psr & PSR_SYSCALL); diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h index 64718ba26434..00497abec996 100644 --- a/arch/sparc/include/asm/setup.h +++ b/arch/sparc/include/asm/setup.h @@ -13,14 +13,30 @@ #ifdef __KERNEL__ +extern char reboot_command[]; + #ifdef CONFIG_SPARC32 /* The CPU that was used for booting * Only sun4d + leon may have boot_cpu_id != 0 */ extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id4; + +extern unsigned long empty_bad_page; +extern unsigned long empty_bad_page_table; +extern unsigned long empty_zero_page; + +extern int serial_console; +static inline int con_is_present(void) +{ + return serial_console ? 0 : 1; +} #endif +extern void sun_do_break(void); +extern int stop_a_enabled; +extern int scons_pwroff; + #endif /* __KERNEL__ */ #endif /* _SPARC_SETUP_H */ diff --git a/arch/sparc/include/asm/switch_to.h b/arch/sparc/include/asm/switch_to.h new file mode 100644 index 000000000000..2dc4fa5c6f8c --- /dev/null +++ b/arch/sparc/include/asm/switch_to.h @@ -0,0 +1,8 @@ +#ifndef ___ASM_SPARC_SWITCH_TO_H +#define ___ASM_SPARC_SWITCH_TO_H +#if defined(__sparc__) && defined(__arch64__) +#include <asm/switch_to_64.h> +#else +#include <asm/switch_to_32.h> +#endif +#endif diff --git a/arch/sparc/include/asm/switch_to_32.h b/arch/sparc/include/asm/switch_to_32.h new file mode 100644 index 000000000000..e32e82b76eed --- /dev/null +++ b/arch/sparc/include/asm/switch_to_32.h @@ -0,0 +1,106 @@ +#ifndef __SPARC_SWITCH_TO_H +#define __SPARC_SWITCH_TO_H + +#include <asm/smp.h> + +extern struct thread_info *current_set[NR_CPUS]; + +/* + * Flush windows so that the VM switch which follows + * would not pull the stack from under us. + * + * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) + * XXX WTF is the above comment? Found in late teen 2.4.x. + */ +#ifdef CONFIG_SMP +#define SWITCH_ENTER(prv) \ + do { \ + if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \ + put_psr(get_psr() | PSR_EF); \ + fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \ + &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \ + clear_tsk_thread_flag(prv, TIF_USEDFPU); \ + (prv)->thread.kregs->psr &= ~PSR_EF; \ + } \ + } while(0) + +#define SWITCH_DO_LAZY_FPU(next) /* */ +#else +#define SWITCH_ENTER(prv) /* */ +#define SWITCH_DO_LAZY_FPU(nxt) \ + do { \ + if (last_task_used_math != (nxt)) \ + (nxt)->thread.kregs->psr&=~PSR_EF; \ + } while(0) +#endif + +#define prepare_arch_switch(next) do { \ + __asm__ __volatile__( \ + ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \ + "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ + "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ + "save %sp, -0x40, %sp\n\t" \ + "restore; restore; restore; restore; restore; restore; restore"); \ +} while(0) + + /* Much care has gone into this code, do not touch it. + * + * We need to loadup regs l0/l1 for the newly forked child + * case because the trap return path relies on those registers + * holding certain values, gcc is told that they are clobbered. + * Gcc needs registers for 3 values in and 1 value out, so we + * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM + * + * Hey Dave, that do not touch sign is too much of an incentive + * - Anton & Pete + */ +#define switch_to(prev, next, last) do { \ + SWITCH_ENTER(prev); \ + SWITCH_DO_LAZY_FPU(next); \ + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \ + __asm__ __volatile__( \ + "sethi %%hi(here - 0x8), %%o7\n\t" \ + "mov %%g6, %%g3\n\t" \ + "or %%o7, %%lo(here - 0x8), %%o7\n\t" \ + "rd %%psr, %%g4\n\t" \ + "std %%sp, [%%g6 + %4]\n\t" \ + "rd %%wim, %%g5\n\t" \ + "wr %%g4, 0x20, %%psr\n\t" \ + "nop\n\t" \ + "std %%g4, [%%g6 + %3]\n\t" \ + "ldd [%2 + %3], %%g4\n\t" \ + "mov %2, %%g6\n\t" \ + ".globl patchme_store_new_current\n" \ +"patchme_store_new_current:\n\t" \ + "st %2, [%1]\n\t" \ + "wr %%g4, 0x20, %%psr\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \ + "ldd [%%g6 + %4], %%sp\n\t" \ + "wr %%g5, 0x0, %%wim\n\t" \ + "ldd [%%sp + 0x00], %%l0\n\t" \ + "ldd [%%sp + 0x38], %%i6\n\t" \ + "wr %%g4, 0x0, %%psr\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + "jmpl %%o7 + 0x8, %%g0\n\t" \ + " ld [%%g3 + %5], %0\n\t" \ + "here:\n" \ + : "=&r" (last) \ + : "r" (&(current_set[hard_smp_processor_id()])), \ + "r" (task_thread_info(next)), \ + "i" (TI_KPSR), \ + "i" (TI_KSP), \ + "i" (TI_TASK) \ + : "g1", "g2", "g3", "g4", "g5", "g7", \ + "l0", "l1", "l3", "l4", "l5", "l6", "l7", \ + "i0", "i1", "i2", "i3", "i4", "i5", \ + "o0", "o1", "o2", "o3", "o7"); \ + } while(0) + +extern void fpsave(unsigned long *fpregs, unsigned long *fsr, + void *fpqueue, unsigned long *fpqdepth); +extern void synchronize_user_stack(void); + +#endif /* __SPARC_SWITCH_TO_H */ diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h new file mode 100644 index 000000000000..7923c4a2be38 --- /dev/null +++ b/arch/sparc/include/asm/switch_to_64.h @@ -0,0 +1,72 @@ +#ifndef __SPARC64_SWITCH_TO_64_H +#define __SPARC64_SWITCH_TO_64_H + +#include <asm/visasm.h> + +#define prepare_arch_switch(next) \ +do { \ + flushw_all(); \ +} while (0) + + /* See what happens when you design the chip correctly? + * + * We tell gcc we clobber all non-fixed-usage registers except + * for l0/l1. It will use one for 'next' and the other to hold + * the output value of 'last'. 'next' is not referenced again + * past the invocation of switch_to in the scheduler, so we need + * not preserve it's value. Hairy, but it lets us remove 2 loads + * and 2 stores in this critical code path. -DaveM + */ +#define switch_to(prev, next, last) \ +do { flush_tlb_pending(); \ + save_and_clear_fpu(); \ + /* If you are tempted to conditionalize the following */ \ + /* so that ASI is only written if it changes, think again. */ \ + __asm__ __volatile__("wr %%g0, %0, %%asi" \ + : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ + trap_block[current_thread_info()->cpu].thread = \ + task_thread_info(next); \ + __asm__ __volatile__( \ + "mov %%g4, %%g7\n\t" \ + "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ + "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ + "rdpr %%wstate, %%o5\n\t" \ + "stx %%o6, [%%g6 + %6]\n\t" \ + "stb %%o5, [%%g6 + %5]\n\t" \ + "rdpr %%cwp, %%o5\n\t" \ + "stb %%o5, [%%g6 + %8]\n\t" \ + "wrpr %%g0, 15, %%pil\n\t" \ + "mov %4, %%g6\n\t" \ + "ldub [%4 + %8], %%g1\n\t" \ + "wrpr %%g1, %%cwp\n\t" \ + "ldx [%%g6 + %6], %%o6\n\t" \ + "ldub [%%g6 + %5], %%o5\n\t" \ + "ldub [%%g6 + %7], %%o7\n\t" \ + "wrpr %%o5, 0x0, %%wstate\n\t" \ + "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ + "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ + "ldx [%%g6 + %9], %%g4\n\t" \ + "wrpr %%g0, 14, %%pil\n\t" \ + "brz,pt %%o7, switch_to_pc\n\t" \ + " mov %%g7, %0\n\t" \ + "sethi %%hi(ret_from_syscall), %%g1\n\t" \ + "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \ + " nop\n\t" \ + ".globl switch_to_pc\n\t" \ + "switch_to_pc:\n\t" \ + : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \ + "=r" (__local_per_cpu_offset) \ + : "0" (task_thread_info(next)), \ + "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ + "i" (TI_CWP), "i" (TI_TASK) \ + : "cc", \ + "g1", "g2", "g3", "g7", \ + "l1", "l2", "l3", "l4", "l5", "l6", "l7", \ + "i0", "i1", "i2", "i3", "i4", "i5", \ + "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ +} while(0) + +extern void synchronize_user_stack(void); +extern void fault_in_user_windows(void); + +#endif /* __SPARC64_SWITCH_TO_64_H */ diff --git a/arch/sparc/include/asm/system.h b/arch/sparc/include/asm/system.h deleted file mode 100644 index 7944a7cfc996..000000000000 --- a/arch/sparc/include/asm/system.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef ___ASM_SPARC_SYSTEM_H -#define ___ASM_SPARC_SYSTEM_H -#if defined(__sparc__) && defined(__arch64__) -#include <asm/system_64.h> -#else -#include <asm/system_32.h> -#endif -#endif diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h deleted file mode 100644 index aba16092a81b..000000000000 --- a/arch/sparc/include/asm/system_32.h +++ /dev/null @@ -1,284 +0,0 @@ -#ifndef __SPARC_SYSTEM_H -#define __SPARC_SYSTEM_H - -#include <linux/kernel.h> -#include <linux/threads.h> /* NR_CPUS */ -#include <linux/thread_info.h> - -#include <asm/page.h> -#include <asm/psr.h> -#include <asm/ptrace.h> -#include <asm/btfixup.h> -#include <asm/smp.h> - -#ifndef __ASSEMBLY__ - -#include <linux/irqflags.h> - -/* - * Sparc (general) CPU types - */ -enum sparc_cpu { - sun4 = 0x00, - sun4c = 0x01, - sun4m = 0x02, - sun4d = 0x03, - sun4e = 0x04, - sun4u = 0x05, /* V8 ploos ploos */ - sun_unknown = 0x06, - ap1000 = 0x07, /* almost a sun4m */ - sparc_leon = 0x08, /* Leon SoC */ -}; - -/* Really, userland should not be looking at any of this... */ -#ifdef __KERNEL__ - -extern enum sparc_cpu sparc_cpu_model; - -#define ARCH_SUN4C (sparc_cpu_model==sun4c) - -#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */ - -extern char reboot_command[]; - -extern struct thread_info *current_set[NR_CPUS]; - -extern unsigned long empty_bad_page; -extern unsigned long empty_bad_page_table; -extern unsigned long empty_zero_page; - -extern void sun_do_break(void); -extern int serial_console; -extern int stop_a_enabled; -extern int scons_pwroff; - -static inline int con_is_present(void) -{ - return serial_console ? 0 : 1; -} - -/* When a context switch happens we must flush all user windows so that - * the windows of the current process are flushed onto its stack. This - * way the windows are all clean for the next process and the stack - * frames are up to date. - */ -extern void flush_user_windows(void); -extern void kill_user_windows(void); -extern void synchronize_user_stack(void); -extern void fpsave(unsigned long *fpregs, unsigned long *fsr, - void *fpqueue, unsigned long *fpqdepth); - -#ifdef CONFIG_SMP -#define SWITCH_ENTER(prv) \ - do { \ - if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \ - put_psr(get_psr() | PSR_EF); \ - fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \ - &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \ - clear_tsk_thread_flag(prv, TIF_USEDFPU); \ - (prv)->thread.kregs->psr &= ~PSR_EF; \ - } \ - } while(0) - -#define SWITCH_DO_LAZY_FPU(next) /* */ -#else -#define SWITCH_ENTER(prv) /* */ -#define SWITCH_DO_LAZY_FPU(nxt) \ - do { \ - if (last_task_used_math != (nxt)) \ - (nxt)->thread.kregs->psr&=~PSR_EF; \ - } while(0) -#endif - -extern void flushw_all(void); - -/* - * Flush windows so that the VM switch which follows - * would not pull the stack from under us. - * - * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) - * XXX WTF is the above comment? Found in late teen 2.4.x. - */ -#define prepare_arch_switch(next) do { \ - __asm__ __volatile__( \ - ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \ - "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ - "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ - "save %sp, -0x40, %sp\n\t" \ - "restore; restore; restore; restore; restore; restore; restore"); \ -} while(0) - - /* Much care has gone into this code, do not touch it. - * - * We need to loadup regs l0/l1 for the newly forked child - * case because the trap return path relies on those registers - * holding certain values, gcc is told that they are clobbered. - * Gcc needs registers for 3 values in and 1 value out, so we - * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM - * - * Hey Dave, that do not touch sign is too much of an incentive - * - Anton & Pete - */ -#define switch_to(prev, next, last) do { \ - SWITCH_ENTER(prev); \ - SWITCH_DO_LAZY_FPU(next); \ - cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \ - __asm__ __volatile__( \ - "sethi %%hi(here - 0x8), %%o7\n\t" \ - "mov %%g6, %%g3\n\t" \ - "or %%o7, %%lo(here - 0x8), %%o7\n\t" \ - "rd %%psr, %%g4\n\t" \ - "std %%sp, [%%g6 + %4]\n\t" \ - "rd %%wim, %%g5\n\t" \ - "wr %%g4, 0x20, %%psr\n\t" \ - "nop\n\t" \ - "std %%g4, [%%g6 + %3]\n\t" \ - "ldd [%2 + %3], %%g4\n\t" \ - "mov %2, %%g6\n\t" \ - ".globl patchme_store_new_current\n" \ -"patchme_store_new_current:\n\t" \ - "st %2, [%1]\n\t" \ - "wr %%g4, 0x20, %%psr\n\t" \ - "nop\n\t" \ - "nop\n\t" \ - "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \ - "ldd [%%g6 + %4], %%sp\n\t" \ - "wr %%g5, 0x0, %%wim\n\t" \ - "ldd [%%sp + 0x00], %%l0\n\t" \ - "ldd [%%sp + 0x38], %%i6\n\t" \ - "wr %%g4, 0x0, %%psr\n\t" \ - "nop\n\t" \ - "nop\n\t" \ - "jmpl %%o7 + 0x8, %%g0\n\t" \ - " ld [%%g3 + %5], %0\n\t" \ - "here:\n" \ - : "=&r" (last) \ - : "r" (&(current_set[hard_smp_processor_id()])), \ - "r" (task_thread_info(next)), \ - "i" (TI_KPSR), \ - "i" (TI_KSP), \ - "i" (TI_TASK) \ - : "g1", "g2", "g3", "g4", "g5", "g7", \ - "l0", "l1", "l3", "l4", "l5", "l6", "l7", \ - "i0", "i1", "i2", "i3", "i4", "i5", \ - "o0", "o1", "o2", "o3", "o7"); \ - } while(0) - -/* XXX Change this if we ever use a PSO mode kernel. */ -#define mb() __asm__ __volatile__ ("" : : : "memory") -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) -#define smp_mb() __asm__ __volatile__("":::"memory") -#define smp_rmb() __asm__ __volatile__("":::"memory") -#define smp_wmb() __asm__ __volatile__("":::"memory") -#define smp_read_barrier_depends() do { } while(0) - -#define nop() __asm__ __volatile__ ("nop") - -/* This has special calling conventions */ -#ifndef CONFIG_SMP -BTFIXUPDEF_CALL(void, ___xchg32, void) -#endif - -static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) -{ -#ifdef CONFIG_SMP - __asm__ __volatile__("swap [%2], %0" - : "=&r" (val) - : "0" (val), "r" (m) - : "memory"); - return val; -#else - register unsigned long *ptr asm("g1"); - register unsigned long ret asm("g2"); - - ptr = (unsigned long *) m; - ret = val; - - /* Note: this is magic and the nop there is - really needed. */ - __asm__ __volatile__( - "mov %%o7, %%g4\n\t" - "call ___f____xchg32\n\t" - " nop\n\t" - : "=&r" (ret) - : "0" (ret), "r" (ptr) - : "g3", "g4", "g7", "memory", "cc"); - - return ret; -#endif -} - -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -extern void __xchg_called_with_bad_pointer(void); - -static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) -{ - switch (size) { - case 4: - return xchg_u32(ptr, x); - } - __xchg_called_with_bad_pointer(); - return x; -} - -/* Emulate cmpxchg() the same way we emulate atomics, - * by hashing the object address and indexing into an array - * of spinlocks to get a bit of performance... - * - * See arch/sparc/lib/atomic32.c for implementation. - * - * Cribbed from <asm-parisc/atomic.h> - */ -#define __HAVE_ARCH_CMPXCHG 1 - -/* bug catcher for when unsupported size is used - won't link */ -extern void __cmpxchg_called_with_bad_pointer(void); -/* we only need to support cmpxchg of a u32 on sparc */ -extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); - -/* don't worry...optimizer will get rid of most of this */ -static inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); - default: - __cmpxchg_called_with_bad_pointer(); - break; - } - return old; -} - -#define cmpxchg(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ -}) - -#include <asm-generic/cmpxchg-local.h> - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); - -#endif /* __KERNEL__ */ - -#endif /* __ASSEMBLY__ */ - -#define arch_align_stack(x) (x) - -#endif /* !(__SPARC_SYSTEM_H) */ diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h deleted file mode 100644 index 10bcabce97b2..000000000000 --- a/arch/sparc/include/asm/system_64.h +++ /dev/null @@ -1,331 +0,0 @@ -#ifndef __SPARC64_SYSTEM_H -#define __SPARC64_SYSTEM_H - -#include <asm/ptrace.h> -#include <asm/processor.h> -#include <asm/visasm.h> - -#ifndef __ASSEMBLY__ - -#include <linux/irqflags.h> -#include <asm-generic/cmpxchg-local.h> - -/* - * Sparc (general) CPU types - */ -enum sparc_cpu { - sun4 = 0x00, - sun4c = 0x01, - sun4m = 0x02, - sun4d = 0x03, - sun4e = 0x04, - sun4u = 0x05, /* V8 ploos ploos */ - sun_unknown = 0x06, - ap1000 = 0x07, /* almost a sun4m */ -}; - -#define sparc_cpu_model sun4u - -/* This cannot ever be a sun4c :) That's just history. */ -#define ARCH_SUN4C 0 - -extern char reboot_command[]; - -/* These are here in an effort to more fully work around Spitfire Errata - * #51. Essentially, if a memory barrier occurs soon after a mispredicted - * branch, the chip can stop executing instructions until a trap occurs. - * Therefore, if interrupts are disabled, the chip can hang forever. - * - * It used to be believed that the memory barrier had to be right in the - * delay slot, but a case has been traced recently wherein the memory barrier - * was one instruction after the branch delay slot and the chip still hung. - * The offending sequence was the following in sym_wakeup_done() of the - * sym53c8xx_2 driver: - * - * call sym_ccb_from_dsa, 0 - * movge %icc, 0, %l0 - * brz,pn %o0, .LL1303 - * mov %o0, %l2 - * membar #LoadLoad - * - * The branch has to be mispredicted for the bug to occur. Therefore, we put - * the memory barrier explicitly into a "branch always, predicted taken" - * delay slot to avoid the problem case. - */ -#define membar_safe(type) \ -do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ - " membar " type "\n" \ - "1:\n" \ - : : : "memory"); \ -} while (0) - -/* The kernel always executes in TSO memory model these days, - * and furthermore most sparc64 chips implement more stringent - * memory ordering than required by the specifications. - */ -#define mb() membar_safe("#StoreLoad") -#define rmb() __asm__ __volatile__("":::"memory") -#define wmb() __asm__ __volatile__("":::"memory") - -#endif - -#define nop() __asm__ __volatile__ ("nop") - -#define read_barrier_depends() do { } while(0) -#define set_mb(__var, __value) \ - do { __var = __value; membar_safe("#StoreLoad"); } while(0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#else -#define smp_mb() __asm__ __volatile__("":::"memory") -#define smp_rmb() __asm__ __volatile__("":::"memory") -#define smp_wmb() __asm__ __volatile__("":::"memory") -#endif - -#define smp_read_barrier_depends() do { } while(0) - -#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") - -#define flushw_all() __asm__ __volatile__("flushw") - -/* Performance counter register access. */ -#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p)) -#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p)) -#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p)) - -/* Blackbird errata workaround. See commentary in - * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt() - * for more information. - */ -#define write_pic(__p) \ - __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \ - " nop\n\t" \ - ".align 64\n" \ - "99:wr %0, 0x0, %%pic\n\t" \ - "rd %%pic, %%g0" : : "r" (__p)) -#define reset_pic() write_pic(0) - -#ifndef __ASSEMBLY__ - -extern void sun_do_break(void); -extern int stop_a_enabled; -extern int scons_pwroff; - -extern void fault_in_user_windows(void); -extern void synchronize_user_stack(void); - -extern void __flushw_user(void); -#define flushw_user() __flushw_user() - -#define flush_user_windows flushw_user -#define flush_register_windows flushw_all - -/* Don't hold the runqueue lock over context switch */ -#define __ARCH_WANT_UNLOCKED_CTXSW -#define prepare_arch_switch(next) \ -do { \ - flushw_all(); \ -} while (0) - - /* See what happens when you design the chip correctly? - * - * We tell gcc we clobber all non-fixed-usage registers except - * for l0/l1. It will use one for 'next' and the other to hold - * the output value of 'last'. 'next' is not referenced again - * past the invocation of switch_to in the scheduler, so we need - * not preserve it's value. Hairy, but it lets us remove 2 loads - * and 2 stores in this critical code path. -DaveM - */ -#define switch_to(prev, next, last) \ -do { flush_tlb_pending(); \ - save_and_clear_fpu(); \ - /* If you are tempted to conditionalize the following */ \ - /* so that ASI is only written if it changes, think again. */ \ - __asm__ __volatile__("wr %%g0, %0, %%asi" \ - : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ - trap_block[current_thread_info()->cpu].thread = \ - task_thread_info(next); \ - __asm__ __volatile__( \ - "mov %%g4, %%g7\n\t" \ - "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ - "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ - "rdpr %%wstate, %%o5\n\t" \ - "stx %%o6, [%%g6 + %6]\n\t" \ - "stb %%o5, [%%g6 + %5]\n\t" \ - "rdpr %%cwp, %%o5\n\t" \ - "stb %%o5, [%%g6 + %8]\n\t" \ - "wrpr %%g0, 15, %%pil\n\t" \ - "mov %4, %%g6\n\t" \ - "ldub [%4 + %8], %%g1\n\t" \ - "wrpr %%g1, %%cwp\n\t" \ - "ldx [%%g6 + %6], %%o6\n\t" \ - "ldub [%%g6 + %5], %%o5\n\t" \ - "ldub [%%g6 + %7], %%o7\n\t" \ - "wrpr %%o5, 0x0, %%wstate\n\t" \ - "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ - "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ - "ldx [%%g6 + %9], %%g4\n\t" \ - "wrpr %%g0, 14, %%pil\n\t" \ - "brz,pt %%o7, switch_to_pc\n\t" \ - " mov %%g7, %0\n\t" \ - "sethi %%hi(ret_from_syscall), %%g1\n\t" \ - "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \ - " nop\n\t" \ - ".globl switch_to_pc\n\t" \ - "switch_to_pc:\n\t" \ - : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \ - "=r" (__local_per_cpu_offset) \ - : "0" (task_thread_info(next)), \ - "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ - "i" (TI_CWP), "i" (TI_TASK) \ - : "cc", \ - "g1", "g2", "g3", "g7", \ - "l1", "l2", "l3", "l4", "l5", "l6", "l7", \ - "i0", "i1", "i2", "i3", "i4", "i5", \ - "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ -} while(0) - -static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -" mov %0, %1\n" -"1: lduw [%4], %2\n" -" cas [%4], %2, %0\n" -" cmp %2, %0\n" -" bne,a,pn %%icc, 1b\n" -" mov %1, %0\n" - : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) - : "0" (val), "r" (m) - : "cc", "memory"); - return val; -} - -static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( -" mov %0, %1\n" -"1: ldx [%4], %2\n" -" casx [%4], %2, %0\n" -" cmp %2, %0\n" -" bne,a,pn %%xcc, 1b\n" -" mov %1, %0\n" - : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) - : "0" (val), "r" (m) - : "cc", "memory"); - return val; -} - -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -extern void __xchg_called_with_bad_pointer(void); - -static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, - int size) -{ - switch (size) { - case 4: - return xchg32(ptr, x); - case 8: - return xchg64(ptr, x); - } - __xchg_called_with_bad_pointer(); - return x; -} - -extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -#define __HAVE_ARCH_CMPXCHG 1 - -static inline unsigned long -__cmpxchg_u32(volatile int *m, int old, int new) -{ - __asm__ __volatile__("cas [%2], %3, %0" - : "=&r" (new) - : "0" (new), "r" (m), "r" (old) - : "memory"); - - return new; -} - -static inline unsigned long -__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) -{ - __asm__ __volatile__("casx [%2], %3, %0" - : "=&r" (new) - : "0" (new), "r" (m), "r" (old) - : "memory"); - - return new; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32(ptr, old, new); - case 8: - return __cmpxchg_u64(ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr,o,n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - switch (size) { - case 4: - case 8: return __cmpxchg(ptr, old, new, size); - default: - return __cmpxchg_local_generic(ptr, old, new, size); - } - - return old; -} - -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) - -#endif /* !(__ASSEMBLY__) */ - -#define arch_align_stack(x) (x) - -#endif /* !(__SPARC64_SYSTEM_H) */ diff --git a/arch/sparc/include/asm/timer_32.h b/arch/sparc/include/asm/timer_32.h index 2ec030ef3810..1a91e11dd104 100644 --- a/arch/sparc/include/asm/timer_32.h +++ b/arch/sparc/include/asm/timer_32.h @@ -8,12 +8,13 @@ #ifndef _SPARC_TIMER_H #define _SPARC_TIMER_H -#include <asm/system.h> /* For SUN4M_NCPUS */ +#include <asm/cpu_type.h> /* For SUN4M_NCPUS */ #include <asm/btfixup.h> extern __volatile__ unsigned int *master_l10_counter; /* FIXME: Make do_[gs]ettimeofday btfixup calls */ +struct timespec; BTFIXUPDEF_CALL(int, bus_do_settimeofday, struct timespec *tv) #define bus_do_settimeofday(tv) BTFIXUP_CALL(bus_do_settimeofday)(tv) diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 3e1449f07798..a1091afb8831 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -11,7 +11,6 @@ #include <linux/string.h> #include <linux/thread_info.h> #include <asm/asi.h> -#include <asm/system.h> #include <asm/spitfire.h> #include <asm-generic/uaccess-unaligned.h> #endif diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index 9d83d3bcb494..432afa838861 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -284,6 +284,7 @@ struct vio_dev { }; struct vio_driver { + const char *name; struct list_head node; const struct vio_device_id *id_table; int (*probe)(struct vio_dev *dev, const struct vio_device_id *id); @@ -371,7 +372,13 @@ do { if (vio->debug & VIO_DEBUG_##TYPE) \ vio->vdev->channel_id, ## a); \ } while (0) -extern int vio_register_driver(struct vio_driver *drv); +extern int __vio_register_driver(struct vio_driver *drv, struct module *owner, + const char *mod_name); +/* + * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded + */ +#define vio_register_driver(driver) \ + __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) extern void vio_unregister_driver(struct vio_driver *drv); static inline struct vio_driver *to_vio_driver(struct device_driver *drv) diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c index f7ea8f032719..56d0f52c3e62 100644 --- a/arch/sparc/kernel/auxio_32.c +++ b/arch/sparc/kernel/auxio_32.c @@ -13,6 +13,7 @@ #include <asm/io.h> #include <asm/auxio.h> #include <asm/string.h> /* memset(), Linux has no bzero() */ +#include <asm/cpu_type.h> /* Probe and map in the Auxiliary I/O register */ diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c index 113c052c3043..6b2f56a6f8af 100644 --- a/arch/sparc/kernel/devices.c +++ b/arch/sparc/kernel/devices.c @@ -17,8 +17,8 @@ #include <asm/oplib.h> #include <asm/prom.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/cpudata.h> +#include <asm/cpu_type.h> extern void clock_stop_probe(void); /* tadpole.c */ extern void sun4c_probe_memerr_reg(void); diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 381edcd5bc29..fea13c7b1aee 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -1244,10 +1244,7 @@ static struct vio_driver ds_driver = { .id_table = ds_match, .probe = ds_probe, .remove = ds_remove, - .driver = { - .name = "ds", - .owner = THIS_MODULE, - } + .name = "ds", }; static int __init ds_init(void) diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h index 42851122bbd9..5a021dd2f854 100644 --- a/arch/sparc/kernel/irq.h +++ b/arch/sparc/kernel/irq.h @@ -1,6 +1,7 @@ #include <linux/platform_device.h> #include <asm/btfixup.h> +#include <asm/cpu_type.h> struct irq_bucket { struct irq_bucket *next; diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index d45b710ea7e4..dff2c3d7d370 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -26,7 +26,6 @@ #include <asm/ptrace.h> #include <asm/processor.h> #include <linux/atomic.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/iommu.h> diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c index 539243b236fa..2e424a576a36 100644 --- a/arch/sparc/kernel/kgdb_32.c +++ b/arch/sparc/kernel/kgdb_32.c @@ -9,6 +9,7 @@ #include <asm/kdebug.h> #include <asm/ptrace.h> #include <asm/irq.h> +#include <asm/cacheflush.h> extern unsigned long trapbase; diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index e5519870c3d9..276359e1ff56 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c @@ -16,6 +16,7 @@ #include <asm/processor.h> #include <asm/spitfire.h> +#include <asm/cacheflush.h> #include "entry.h" diff --git a/arch/sparc/kernel/muldiv.c b/arch/sparc/kernel/muldiv.c index 6ce1021d487c..f7db516b07d8 100644 --- a/arch/sparc/kernel/muldiv.c +++ b/arch/sparc/kernel/muldiv.c @@ -14,7 +14,6 @@ #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include "kernel.h" diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index c76fe0b5bd94..eb1c1f010a47 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -22,6 +22,7 @@ #include <asm/perf_event.h> #include <asm/ptrace.h> #include <asm/pcr.h> +#include <asm/perfctr.h> #include "kstack.h" diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index a24072a49270..0ce0dd2332aa 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -14,6 +14,7 @@ #include <asm/pcr.h> #include <asm/nmi.h> #include <asm/spitfire.h> +#include <asm/perfctr.h> /* This code is shared between various users of the performance * counters. Users will be oprofile, pseudo-NMI watchdog, and the diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 8e16a4a21582..28559ce5eeb5 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -25,6 +25,8 @@ #include <linux/atomic.h> #include <asm/nmi.h> #include <asm/pcr.h> +#include <asm/perfctr.h> +#include <asm/cacheflush.h> #include "kernel.h" #include "kstack.h" diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 935fdbcd88c2..efa07542e85f 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -28,7 +28,6 @@ #include <asm/auxio.h> #include <asm/oplib.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> @@ -38,6 +37,7 @@ #include <asm/elf.h> #include <asm/prom.h> #include <asm/unistd.h> +#include <asm/setup.h> /* * Power management idle function diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 06b5b5fc20c7..aff0c72fac09 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -32,7 +32,6 @@ #include <linux/nmi.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c index 27b9e93d0121..896ba7c5cd8e 100644 --- a/arch/sparc/kernel/ptrace_32.c +++ b/arch/sparc/kernel/ptrace_32.c @@ -23,8 +23,8 @@ #include <linux/tracehook.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/uaccess.h> +#include <asm/cacheflush.h> /* #define ALLOW_INIT_TRACING */ diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 9388844cd88c..6f97c0767995 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -29,7 +29,6 @@ #include <asm/asi.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/psrcompat.h> #include <asm/visasm.h> diff --git a/arch/sparc/kernel/reboot.c b/arch/sparc/kernel/reboot.c index 006a42dd2007..eba7d918162a 100644 --- a/arch/sparc/kernel/reboot.c +++ b/arch/sparc/kernel/reboot.c @@ -7,9 +7,9 @@ #include <linux/export.h> #include <linux/pm.h> -#include <asm/system.h> #include <asm/oplib.h> #include <asm/prom.h> +#include <asm/setup.h> /* sysctl - toggle power-off restriction for serial console * systems in machine_power_off() diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index ffb883ddd0f0..d444468b27f6 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -33,7 +33,6 @@ #include <linux/kdebug.h> #include <linux/export.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/oplib.h> @@ -46,6 +45,7 @@ #include <asm/machines.h> #include <asm/cpudata.h> #include <asm/setup.h> +#include <asm/cacheflush.h> #include "kernel.h" diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index a854a1c240ff..1414d16712b2 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -31,7 +31,6 @@ #include <linux/initrd.h> #include <linux/module.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/oplib.h> @@ -49,6 +48,7 @@ #include <asm/btext.h> #include <asm/elf.h> #include <asm/mdesc.h> +#include <asm/cacheflush.h> #ifdef CONFIG_IP_PNP #include <net/ipconfig.h> diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index c8f5b50db89c..948700fb9036 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -28,6 +28,7 @@ #include <asm/fpumacro.h> #include <asm/visasm.h> #include <asm/compat_signal.h> +#include <asm/switch_to.h> #include "sigutil.h" diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 7bb71b6fbd20..1e750e415d7a 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -25,6 +25,7 @@ #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> /* flush_sig_insns */ +#include <asm/switch_to.h> #include "sigutil.h" diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index d8a67e60be80..48b0f57b65f7 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -31,6 +31,8 @@ #include <asm/uctx.h> #include <asm/siginfo.h> #include <asm/visasm.h> +#include <asm/switch_to.h> +#include <asm/cacheflush.h> #include "entry.h" #include "systbls.h" diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c index 35c7897b009a..0f6eebe71e6c 100644 --- a/arch/sparc/kernel/sigutil_32.c +++ b/arch/sparc/kernel/sigutil_32.c @@ -7,6 +7,7 @@ #include <asm/sigcontext.h> #include <asm/fpumacro.h> #include <asm/ptrace.h> +#include <asm/switch_to.h> #include "sigutil.h" diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c index b19570d41a39..387834a9c56a 100644 --- a/arch/sparc/kernel/sigutil_64.c +++ b/arch/sparc/kernel/sigutil_64.c @@ -7,6 +7,7 @@ #include <asm/sigcontext.h> #include <asm/fpumacro.h> #include <asm/ptrace.h> +#include <asm/switch_to.h> #include "sigutil.h" diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c index 12ff09824cd9..9f5e24ddcc70 100644 --- a/arch/sparc/kernel/sparc_ksyms_64.c +++ b/arch/sparc/kernel/sparc_ksyms_64.c @@ -10,12 +10,12 @@ #include <linux/init.h> #include <linux/bitops.h> -#include <asm/system.h> #include <asm/cpudata.h> #include <asm/uaccess.h> #include <asm/spitfire.h> #include <asm/oplib.h> #include <asm/hypervisor.h> +#include <asm/cacheflush.h> struct poll { int fd; diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 1060e0672a4b..7d0c088e8aba 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -37,7 +37,6 @@ #include <asm/oplib.h> #include <asm/timex.h> #include <asm/timer.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/idprom.h> diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 591f20ca9e48..d2de21333146 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -17,7 +17,6 @@ #include <linux/export.h> #include <asm/delay.h> -#include <asm/system.h> #include <asm/ptrace.h> #include <asm/oplib.h> #include <asm/page.h> diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 0cbdaa41cd1e..c72fdf55e1c1 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -22,7 +22,6 @@ #include <asm/smp.h> #include <asm/delay.h> -#include <asm/system.h> #include <asm/ptrace.h> #include <asm/oplib.h> #include <asm/page.h> @@ -41,6 +40,7 @@ #include <asm/head.h> #include <asm/prom.h> #include <asm/memctrl.h> +#include <asm/cacheflush.h> #include "entry.h" #include "kstack.h" diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 4d043a1b2492..c0ec89786193 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -12,7 +12,6 @@ #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/perf_event.h> diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 76e4ac1a13e1..dae85bc2eda5 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -16,7 +16,6 @@ #include <asm/ptrace.h> #include <asm/pstate.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/bitops.h> @@ -24,6 +23,7 @@ #include <linux/ratelimit.h> #include <linux/bitops.h> #include <asm/fpumacro.h> +#include <asm/cacheflush.h> enum direction { load, /* ld, ldd, ldh, ldsh */ diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index f67e28ef598c..5cffdc55f075 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -119,13 +119,17 @@ static struct bus_type vio_bus_type = { .remove = vio_device_remove, }; -int vio_register_driver(struct vio_driver *viodrv) +int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, + const char *mod_name) { viodrv->driver.bus = &vio_bus_type; + viodrv->driver.name = viodrv->name; + viodrv->driver.owner = owner; + viodrv->driver.mod_name = mod_name; return driver_register(&viodrv->driver); } -EXPORT_SYMBOL(vio_register_driver); +EXPORT_SYMBOL(__vio_register_driver); void vio_unregister_driver(struct vio_driver *viodrv) { diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index 73370674ccff..08e074b7eb6a 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -9,9 +9,9 @@ #include <asm/ptrace.h> #include <asm/pstate.h> -#include <asm/system.h> #include <asm/fpumacro.h> #include <asm/uaccess.h> +#include <asm/cacheflush.h> /* OPF field of various VIS instructions. */ diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c index e575bd2fe381..2bbe2f28ad23 100644 --- a/arch/sparc/math-emu/math_64.c +++ b/arch/sparc/math-emu/math_64.c @@ -16,6 +16,7 @@ #include <asm/fpumacro.h> #include <asm/ptrace.h> #include <asm/uaccess.h> +#include <asm/cacheflush.h> #include "sfp-util_64.h" #include <math-emu/soft-fp.h> diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c index 8a7f81743c12..09d6af22db2d 100644 --- a/arch/sparc/mm/btfixup.c +++ b/arch/sparc/mm/btfixup.c @@ -12,7 +12,6 @@ #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> -#include <asm/system.h> #include <asm/cacheflush.h> #define BTFIXUP_OPTIMIZE_NOP diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 8023fd7e77b5..7705c6731e28 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -22,7 +22,6 @@ #include <linux/interrupt.h> #include <linux/kdebug.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/memreg.h> diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 7b00de61c5f1..c5f9021b1a01 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -27,7 +27,6 @@ #include <linux/gfp.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/vac-ops.h> #include <asm/page.h> #include <asm/pgtable.h> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index b3f5e7dfea51..21faaeea85de 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -28,7 +28,6 @@ #include <linux/gfp.h> #include <asm/head.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h index 77d1b313e344..3e1ac8b96cae 100644 --- a/arch/sparc/mm/init_64.h +++ b/arch/sparc/mm/init_64.h @@ -36,8 +36,6 @@ extern unsigned long kern_locked_tte_data; extern void prom_world(int enter); -extern void free_initmem(void); - #ifdef CONFIG_SPARSEMEM_VMEMMAP #define VMEMMAP_CHUNK_SHIFT 22 #define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT) diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c index 82ec8f666036..c5bf2a6c3858 100644 --- a/arch/sparc/mm/loadmmu.c +++ b/arch/sparc/mm/loadmmu.c @@ -11,7 +11,6 @@ #include <linux/mm.h> #include <linux/init.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 536412d8f416..c52add79b83d 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -6,7 +6,6 @@ #include <linux/kernel.h> #include <linux/preempt.h> #include <linux/slab.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/tlbflush.h> #include <asm/tlb.h> diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c index a00f47b16c10..1cfb50f4cb9c 100644 --- a/arch/sparc/prom/console_32.c +++ b/arch/sparc/prom/console_32.c @@ -11,7 +11,6 @@ #include <linux/sched.h> #include <asm/openprom.h> #include <asm/oplib.h> -#include <asm/system.h> #include <linux/string.h> extern void restore_current(void); diff --git a/arch/sparc/prom/console_64.c b/arch/sparc/prom/console_64.c index 9de6c8cfe04a..f95edcc54fd5 100644 --- a/arch/sparc/prom/console_64.c +++ b/arch/sparc/prom/console_64.c @@ -10,7 +10,6 @@ #include <linux/sched.h> #include <asm/openprom.h> #include <asm/oplib.h> -#include <asm/system.h> #include <linux/string.h> static int __prom_console_write_buf(const char *buf, int len) diff --git a/arch/sparc/prom/misc_32.c b/arch/sparc/prom/misc_32.c index 677b6a10fbde..8dc0b6b271e8 100644 --- a/arch/sparc/prom/misc_32.c +++ b/arch/sparc/prom/misc_32.c @@ -13,7 +13,6 @@ #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/auxio.h> -#include <asm/system.h> extern void restore_current(void); diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c index e4f31d4d3715..f178b9dcc7b7 100644 --- a/arch/sparc/prom/misc_64.c +++ b/arch/sparc/prom/misc_64.c @@ -15,7 +15,6 @@ #include <asm/openprom.h> #include <asm/oplib.h> -#include <asm/system.h> #include <asm/ldc.h> static int prom_service_exists(const char *service_name) diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c index d9850c2b9bf2..04a4540509dd 100644 --- a/arch/sparc/prom/p1275.c +++ b/arch/sparc/prom/p1275.c @@ -13,7 +13,6 @@ #include <asm/openprom.h> #include <asm/oplib.h> -#include <asm/system.h> #include <asm/spitfire.h> #include <asm/pstate.h> #include <asm/ldc.h> diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c index 0857aa9e839d..ad143c13bdc0 100644 --- a/arch/sparc/prom/ranges.c +++ b/arch/sparc/prom/ranges.c @@ -11,7 +11,6 @@ #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/types.h> -#include <asm/system.h> static struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX]; static int num_obio_ranges; diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index 921dbeb8a70c..bb696da5d7cd 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h @@ -20,7 +20,7 @@ #ifndef __ASSEMBLY__ #include <linux/compiler.h> -#include <asm/system.h> +#include <linux/types.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index c03349e0ca9f..466dc4a39a4f 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -17,6 +17,7 @@ #ifndef _ASM_TILE_ATOMIC_32_H #define _ASM_TILE_ATOMIC_32_H +#include <asm/barrier.h> #include <arch/chip.h> #ifndef __ASSEMBLY__ diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index 27fe667fddfe..f4500c688ffa 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -19,6 +19,7 @@ #ifndef __ASSEMBLY__ +#include <asm/barrier.h> #include <arch/spr_def.h> /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/barrier.h index 23d1842f4839..990a217a0b72 100644 --- a/arch/tile/include/asm/system.h +++ b/arch/tile/include/asm/barrier.h @@ -12,20 +12,15 @@ * more details. */ -#ifndef _ASM_TILE_SYSTEM_H -#define _ASM_TILE_SYSTEM_H +#ifndef _ASM_TILE_BARRIER_H +#define _ASM_TILE_BARRIER_H #ifndef __ASSEMBLY__ #include <linux/types.h> -#include <linux/irqflags.h> - -/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ -#include <asm/ptrace.h> - #include <arch/chip.h> -#include <arch/sim_def.h> #include <arch/spr_def.h> +#include <asm/timex.h> /* * read_barrier_depends - Flush all pending reads that subsequents reads @@ -78,17 +73,10 @@ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() * in cases like this where there are no data dependencies. */ - #define read_barrier_depends() do { } while (0) #define __sync() __insn_mf() -#if CHIP_HAS_SPLIT_CYCLE() -#define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) -#else -#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ -#endif - #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() #include <hv/syscall_public.h> /* @@ -156,106 +144,5 @@ mb_incoherent(void) #define set_mb(var, value) \ do { var = value; mb(); } while (0) -/* - * Pause the DMA engine and static network before task switching. - */ -#define prepare_arch_switch(next) _prepare_arch_switch(next) -void _prepare_arch_switch(struct task_struct *next); - - -/* - * switch_to(n) should switch tasks to task nr n, first - * checking that n isn't the current task, in which case it does nothing. - * The number of callee-saved registers saved on the kernel stack - * is defined here for use in copy_thread() and must agree with __switch_to(). - */ -#endif /* !__ASSEMBLY__ */ -#define CALLEE_SAVED_FIRST_REG 30 -#define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ -#ifndef __ASSEMBLY__ -struct task_struct; -#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) -extern struct task_struct *_switch_to(struct task_struct *prev, - struct task_struct *next); - -/* Helper function for _switch_to(). */ -extern struct task_struct *__switch_to(struct task_struct *prev, - struct task_struct *next, - unsigned long new_system_save_k_0); - -/* Address that switched-away from tasks are at. */ -extern unsigned long get_switch_to_pc(void); - -/* - * On SMP systems, when the scheduler does migration-cost autodetection, - * it needs a way to flush as much of the CPU's caches as possible: - * - * TODO: fill this in! - */ -static inline void sched_cacheflush(void) -{ -} - -#define arch_align_stack(x) (x) - -/* - * Is the kernel doing fixups of unaligned accesses? If <0, no kernel - * intervention occurs and SIGBUS is delivered with no data address - * info. If 0, the kernel single-steps the instruction to discover - * the data address to provide with the SIGBUS. If 1, the kernel does - * a fixup. - */ -extern int unaligned_fixup; - -/* Is the kernel printing on each unaligned fixup? */ -extern int unaligned_printk; - -/* Number of unaligned fixups performed */ -extern unsigned int unaligned_fixup_count; - -/* Init-time routine to do tile-specific per-cpu setup. */ -void setup_cpu(int boot); - -/* User-level DMA management functions */ -void grant_dma_mpls(void); -void restrict_dma_mpls(void); - -#ifdef CONFIG_HARDWALL -/* User-level network management functions */ -void reset_network_state(void); -void grant_network_mpls(void); -void restrict_network_mpls(void); -int hardwall_deactivate(struct task_struct *task); - -/* Hook hardwall code into changes in affinity. */ -#define arch_set_cpus_allowed(p, new_mask) do { \ - if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ - hardwall_deactivate(p); \ -} while (0) -#endif - -/* - * Kernel threads can check to see if they need to migrate their - * stack whenever they return from a context switch; for user - * threads, we defer until they are returning to user-space. - */ -#define finish_arch_switch(prev) do { \ - if (unlikely((prev)->state == TASK_DEAD)) \ - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ - ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ - (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ - if (current->mm == NULL && !kstack_hash && \ - current_thread_info()->homecache_cpu != smp_processor_id()) \ - homecache_migrate_kthread(); \ -} while (0) - -/* Support function for forking a new task. */ -void ret_from_fork(void); - -/* Called from ret_from_fork() when a new process starts up. */ -struct task_struct *sim_notify_fork(struct task_struct *prev); - #endif /* !__ASSEMBLY__ */ - -#endif /* _ASM_TILE_SYSTEM_H */ +#endif /* _ASM_TILE_BARRIER_H */ diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 571b118bfd9b..ddc4c1efde43 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h @@ -17,7 +17,6 @@ #include <linux/compiler.h> #include <linux/atomic.h> -#include <asm/system.h> /* Tile-specific routines to support <asm/bitops.h>. */ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h index e9c8e381ee0e..58d021a9834f 100644 --- a/arch/tile/include/asm/bitops_64.h +++ b/arch/tile/include/asm/bitops_64.h @@ -17,7 +17,6 @@ #include <linux/compiler.h> #include <linux/atomic.h> -#include <asm/system.h> /* See <asm/bitops.h> for API comments. */ diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h index e925f4bb498f..0fc63c488edf 100644 --- a/arch/tile/include/asm/cacheflush.h +++ b/arch/tile/include/asm/cacheflush.h @@ -20,7 +20,6 @@ /* Keep includes the same across arches. */ #include <linux/mm.h> #include <linux/cache.h> -#include <asm/system.h> #include <arch/icache.h> /* Caches are physically-indexed and so don't need special treatment */ @@ -152,4 +151,14 @@ static inline void finv_buffer_local(void *buffer, size_t size) */ void finv_buffer_remote(void *buffer, size_t size, int hfh); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible: + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} + #endif /* _ASM_TILE_CACHEFLUSH_H */ diff --git a/arch/tile/include/asm/exec.h b/arch/tile/include/asm/exec.h new file mode 100644 index 000000000000..a714e1950867 --- /dev/null +++ b/arch/tile/include/asm/exec.h @@ -0,0 +1,20 @@ +/* + * Copyright 2010 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_TILE_EXEC_H +#define _ASM_TILE_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _ASM_TILE_EXEC_H */ diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index 1a20b7ef8ea2..67490910774d 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h @@ -29,7 +29,6 @@ #include <linux/spinlock.h> #include <asm/processor.h> #include <asm/fixmap.h> -#include <asm/system.h> struct mm_struct; struct vm_area_struct; diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h index 7caf0f36b030..e58613e0752f 100644 --- a/arch/tile/include/asm/setup.h +++ b/arch/tile/include/asm/setup.h @@ -31,6 +31,28 @@ void early_panic(const char *fmt, ...); void warn_early_printk(void); void __init disable_early_printk(void); +/* Init-time routine to do tile-specific per-cpu setup. */ +void setup_cpu(int boot); + +/* User-level DMA management functions */ +void grant_dma_mpls(void); +void restrict_dma_mpls(void); + +#ifdef CONFIG_HARDWALL +/* User-level network management functions */ +void reset_network_state(void); +void grant_network_mpls(void); +void restrict_network_mpls(void); +struct task_struct; +int hardwall_deactivate(struct task_struct *task); + +/* Hook hardwall code into changes in affinity. */ +#define arch_set_cpus_allowed(p, new_mask) do { \ + if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ + hardwall_deactivate(p); \ +} while (0) +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_TILE_SETUP_H */ diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h index a5e4208d34f9..c0a77b38d39a 100644 --- a/arch/tile/include/asm/spinlock_32.h +++ b/arch/tile/include/asm/spinlock_32.h @@ -19,7 +19,6 @@ #include <linux/atomic.h> #include <asm/page.h> -#include <asm/system.h> #include <linux/compiler.h> /* diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h new file mode 100644 index 000000000000..1d48c5fee8b7 --- /dev/null +++ b/arch/tile/include/asm/switch_to.h @@ -0,0 +1,76 @@ +/* + * Copyright 2010 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_TILE_SWITCH_TO_H +#define _ASM_TILE_SWITCH_TO_H + +#include <arch/sim_def.h> + +/* + * switch_to(n) should switch tasks to task nr n, first + * checking that n isn't the current task, in which case it does nothing. + * The number of callee-saved registers saved on the kernel stack + * is defined here for use in copy_thread() and must agree with __switch_to(). + */ +#define CALLEE_SAVED_FIRST_REG 30 +#define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ + +#ifndef __ASSEMBLY__ + +struct task_struct; + +/* + * Pause the DMA engine and static network before task switching. + */ +#define prepare_arch_switch(next) _prepare_arch_switch(next) +void _prepare_arch_switch(struct task_struct *next); + +struct task_struct; +#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) +extern struct task_struct *_switch_to(struct task_struct *prev, + struct task_struct *next); + +/* Helper function for _switch_to(). */ +extern struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next, + unsigned long new_system_save_k_0); + +/* Address that switched-away from tasks are at. */ +extern unsigned long get_switch_to_pc(void); + +/* + * Kernel threads can check to see if they need to migrate their + * stack whenever they return from a context switch; for user + * threads, we defer until they are returning to user-space. + */ +#define finish_arch_switch(prev) do { \ + if (unlikely((prev)->state == TASK_DEAD)) \ + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ + ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ + (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ + if (current->mm == NULL && !kstack_hash && \ + current_thread_info()->homecache_cpu != smp_processor_id()) \ + homecache_migrate_kthread(); \ +} while (0) + +/* Support function for forking a new task. */ +void ret_from_fork(void); + +/* Called from ret_from_fork() when a new process starts up. */ +struct task_struct *sim_notify_fork(struct task_struct *prev); + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_TILE_SWITCH_TO_H */ diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h index 29921f0b86da..dc987d53e2a9 100644 --- a/arch/tile/include/asm/timex.h +++ b/arch/tile/include/asm/timex.h @@ -29,11 +29,13 @@ typedef unsigned long long cycles_t; #if CHIP_HAS_SPLIT_CYCLE() cycles_t get_cycles(void); +#define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) #else static inline cycles_t get_cycles(void) { return __insn_mfspr(SPR_CYCLE); } +#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ #endif cycles_t get_clock_rate(void); diff --git a/arch/tile/include/asm/unaligned.h b/arch/tile/include/asm/unaligned.h index 137e2de5b102..37dfbe598872 100644 --- a/arch/tile/include/asm/unaligned.h +++ b/arch/tile/include/asm/unaligned.h @@ -21,4 +21,19 @@ #define get_unaligned __get_unaligned_le #define put_unaligned __put_unaligned_le +/* + * Is the kernel doing fixups of unaligned accesses? If <0, no kernel + * intervention occurs and SIGBUS is delivered with no data address + * info. If 0, the kernel single-steps the instruction to discover + * the data address to provide with the SIGBUS. If 1, the kernel does + * a fixup. + */ +extern int unaligned_fixup; + +/* Is the kernel printing on each unaligned fixup? */ +extern int unaligned_printk; + +/* Number of unaligned fixups performed */ +extern unsigned int unaligned_fixup_count; + #endif /* _ASM_TILE_UNALIGNED_H */ diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c index 493a0e66d916..afb9c9a0d887 100644 --- a/arch/tile/kernel/early_printk.c +++ b/arch/tile/kernel/early_printk.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> +#include <linux/irqflags.h> #include <asm/setup.h> #include <hv/hypervisor.h> diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c index 62d820833c68..7a9327046404 100644 --- a/arch/tile/kernel/proc.c +++ b/arch/tile/kernel/proc.c @@ -23,6 +23,7 @@ #include <linux/sysctl.h> #include <linux/hardirq.h> #include <linux/mman.h> +#include <asm/unaligned.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/sections.h> diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 6ae495ef2b99..30caecac94dc 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -27,16 +27,17 @@ #include <linux/kernel.h> #include <linux/tracehook.h> #include <linux/signal.h> -#include <asm/system.h> #include <asm/stack.h> #include <asm/homecache.h> #include <asm/syscalls.h> #include <asm/traps.h> +#include <asm/setup.h> #ifdef CONFIG_HARDWALL #include <asm/hardwall.h> #endif #include <arch/chip.h> #include <arch/abi.h> +#include <arch/sim_def.h> /* diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S index caa13101c264..c12280c2d904 100644 --- a/arch/tile/kernel/regs_32.S +++ b/arch/tile/kernel/regs_32.S @@ -13,11 +13,11 @@ */ #include <linux/linkage.h> -#include <asm/system.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <arch/spr_def.h> #include <asm/processor.h> +#include <asm/switch_to.h> /* * See <asm/system.h>; called with prev and next task_struct pointers. diff --git a/arch/tile/kernel/regs_64.S b/arch/tile/kernel/regs_64.S index f748c1e85285..0829fd01fa30 100644 --- a/arch/tile/kernel/regs_64.S +++ b/arch/tile/kernel/regs_64.S @@ -13,11 +13,11 @@ */ #include <linux/linkage.h> -#include <asm/system.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <arch/spr_def.h> #include <asm/processor.h> +#include <asm/switch_to.h> /* * See <asm/system.h>; called with prev and next task_struct pointers. diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index b7a879504086..bc1eb586e24d 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c @@ -25,6 +25,7 @@ #include <linux/types.h> #include <linux/err.h> #include <asm/cacheflush.h> +#include <asm/unaligned.h> #include <arch/abi.h> #include <arch/opcode.h> diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 4f47b8a356df..2bb6602a1ee7 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -21,6 +21,7 @@ #include <linux/ptrace.h> #include <asm/stack.h> #include <asm/traps.h> +#include <asm/setup.h> #include <arch/interrupts.h> #include <arch/spr_def.h> diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c index 1a00fb64fc88..758b6038c2b7 100644 --- a/arch/tile/mm/elf.c +++ b/arch/tile/mm/elf.c @@ -21,6 +21,7 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/sections.h> +#include <arch/sim_def.h> /* Notify a running simulator, if any, that an exec just occurred. */ static void sim_notify_exec(const char *binary_name) diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index c1eaaa1fcc20..cba30e9547b4 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -35,7 +35,6 @@ #include <linux/syscalls.h> #include <linux/uaccess.h> -#include <asm/system.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/traps.h> diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 7309988c9794..830c4908ea76 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -38,7 +38,6 @@ #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/dma.h> diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index de7d8e21e01d..87303693a072 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -27,7 +27,6 @@ #include <linux/vmalloc.h> #include <linux/smp.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> diff --git a/arch/um/include/asm/fixmap.h b/arch/um/include/asm/fixmap.h index 69c0252345f1..21a423bae5e8 100644 --- a/arch/um/include/asm/fixmap.h +++ b/arch/um/include/asm/fixmap.h @@ -2,7 +2,6 @@ #define __UM_FIXMAP_H #include <asm/processor.h> -#include <asm/system.h> #include <asm/kmap_types.h> #include <asm/archparam.h> #include <asm/page.h> diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index ca113d6999c5..34b789b71115 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm generic-y += atomic.h generic-y += auxvec.h generic-y += bitsperlong.h -generic-y += bug.h generic-y += bugs.h generic-y += cputime.h generic-y += current.h diff --git a/arch/unicore32/include/asm/barrier.h b/arch/unicore32/include/asm/barrier.h new file mode 100644 index 000000000000..a6620e5336b6 --- /dev/null +++ b/arch/unicore32/include/asm/barrier.h @@ -0,0 +1,28 @@ +/* + * Memory barrier implementations for PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2012 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __UNICORE_BARRIER_H__ +#define __UNICORE_BARRIER_H__ + +#define isb() __asm__ __volatile__ ("" : : : "memory") +#define dsb() __asm__ __volatile__ ("" : : : "memory") +#define dmb() __asm__ __volatile__ ("" : : : "memory") + +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define read_barrier_depends() do { } while (0) +#define smp_read_barrier_depends() do { } while (0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) + +#endif /* __UNICORE_BARRIER_H__ */ diff --git a/arch/unicore32/include/asm/bug.h b/arch/unicore32/include/asm/bug.h new file mode 100644 index 000000000000..b1ff8cadb086 --- /dev/null +++ b/arch/unicore32/include/asm/bug.h @@ -0,0 +1,27 @@ +/* + * Bug handling for PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2012 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __UNICORE_BUG_H__ +#define __UNICORE_BUG_H__ + +#include <asm-generic/bug.h> + +struct pt_regs; +struct siginfo; + +extern void die(const char *msg, struct pt_regs *regs, int err); +extern void uc32_notify_die(const char *str, struct pt_regs *regs, + struct siginfo *info, unsigned long err, unsigned long trap); + +extern asmlinkage void __backtrace(void); +extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +extern void __show_regs(struct pt_regs *); + +#endif /* __UNICORE_BUG_H__ */ diff --git a/arch/unicore32/include/asm/cmpxchg.h b/arch/unicore32/include/asm/cmpxchg.h new file mode 100644 index 000000000000..df4d5acfd19f --- /dev/null +++ b/arch/unicore32/include/asm/cmpxchg.h @@ -0,0 +1,61 @@ +/* + * Atomics xchg/cmpxchg for PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2012 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __UNICORE_CMPXCHG_H__ +#define __UNICORE_CMPXCHG_H__ + +/* + * Generate a link failure on undefined symbol if the pointer points to a value + * of unsupported size. + */ +extern void __xchg_bad_pointer(void); + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, + int size) +{ + unsigned long ret; + + switch (size) { + case 1: + asm volatile("swapb %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("swapw %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + default: + ret = __xchg_bad_pointer(); + } + + return ret; +} + +#define xchg(ptr, x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) + +#include <asm-generic/cmpxchg-local.h> + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \ + (unsigned long)(o), (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) \ + __cmpxchg64_local_generic((ptr), (o), (n)) + +#include <asm-generic/cmpxchg.h> + +#endif /* __UNICORE_CMPXCHG_H__ */ diff --git a/arch/unicore32/include/asm/exec.h b/arch/unicore32/include/asm/exec.h new file mode 100644 index 000000000000..06d1f0f57888 --- /dev/null +++ b/arch/unicore32/include/asm/exec.h @@ -0,0 +1,15 @@ +/* + * Process execution bits for PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2012 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __UNICORE_EXEC_H__ +#define __UNICORE_EXEC_H__ + +#define arch_align_stack(x) (x) + +#endif /* __UNICORE_EXEC_H__ */ diff --git a/arch/unicore32/include/asm/hwdef-copro.h b/arch/unicore32/include/asm/hwdef-copro.h new file mode 100644 index 000000000000..a3292f039a68 --- /dev/null +++ b/arch/unicore32/include/asm/hwdef-copro.h @@ -0,0 +1,48 @@ +/* + * Co-processor register definitions for PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2012 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __UNICORE_HWDEF_COPRO_H__ +#define __UNICORE_HWDEF_COPRO_H__ + +/* + * Control Register bits (CP#0 CR1) + */ +#define CR_M (1 << 0) /* MMU enable */ +#define CR_A (1 << 1) /* Alignment abort enable */ +#define CR_D (1 << 2) /* Dcache enable */ +#define CR_I (1 << 3) /* Icache enable */ +#define CR_B (1 << 4) /* Dcache write mechanism: write back */ +#define CR_T (1 << 5) /* Burst enable */ +#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ + +#ifndef __ASSEMBLY__ + +#define vectors_high() (cr_alignment & CR_V) + +extern unsigned long cr_no_alignment; /* defined in entry.S */ +extern unsigned long cr_alignment; /* defined in entry.S */ + +static inline unsigned int get_cr(void) +{ + unsigned int val; + asm("movc %0, p0.c1, #0" : "=r" (val) : : "cc"); + return val; +} + +static inline void set_cr(unsigned int val) +{ + asm volatile("movc p0.c1, %0, #0" : : "r" (val) : "cc"); + isb(); +} + +extern void adjust_cr(unsigned long mask, unsigned long set); + +#endif /* __ASSEMBLY__ */ + +#endif /* __UNICORE_HWDEF_COPRO_H__ */ diff --git a/arch/unicore32/include/asm/io.h b/arch/unicore32/include/asm/io.h index adddf6d64077..39decb6e6f57 100644 --- a/arch/unicore32/include/asm/io.h +++ b/arch/unicore32/include/asm/io.h @@ -16,7 +16,6 @@ #include <asm/byteorder.h> #include <asm/memory.h> -#include <asm/system.h> #define PCI_IOBASE PKUNITY_PCILIO_BASE #include <asm-generic/io.h> diff --git a/arch/unicore32/include/asm/switch_to.h b/arch/unicore32/include/asm/switch_to.h new file mode 100644 index 000000000000..39572d2bd692 --- /dev/null +++ b/arch/unicore32/include/asm/switch_to.h @@ -0,0 +1,30 @@ +/* + * Task switching for PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2012 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __UNICORE_SWITCH_TO_H__ +#define __UNICORE_SWITCH_TO_H__ + +struct task_struct; +struct thread_info; + +/* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'. schedule() itself + * contains the memory barrier to tell GCC not to cache `current'. + */ +extern struct task_struct *__switch_to(struct task_struct *, + struct thread_info *, struct thread_info *); + +#define switch_to(prev, next, last) \ + do { \ + last = __switch_to(prev, task_thread_info(prev), \ + task_thread_info(next)); \ + } while (0) + +#endif /* __UNICORE_SWITCH_TO_H__ */ diff --git a/arch/unicore32/include/asm/system.h b/arch/unicore32/include/asm/system.h deleted file mode 100644 index 246b71c17fda..000000000000 --- a/arch/unicore32/include/asm/system.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * linux/arch/unicore32/include/asm/system.h - * - * Code specific to PKUnity SoC and UniCore ISA - * - * Copyright (C) 2001-2010 GUAN Xue-tao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __UNICORE_SYSTEM_H__ -#define __UNICORE_SYSTEM_H__ - -#ifdef __KERNEL__ - -/* - * CR1 bits (CP#0 CR1) - */ -#define CR_M (1 << 0) /* MMU enable */ -#define CR_A (1 << 1) /* Alignment abort enable */ -#define CR_D (1 << 2) /* Dcache enable */ -#define CR_I (1 << 3) /* Icache enable */ -#define CR_B (1 << 4) /* Dcache write mechanism: write back */ -#define CR_T (1 << 5) /* Burst enable */ -#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ - -#ifndef __ASSEMBLY__ - -#include <linux/linkage.h> -#include <linux/irqflags.h> - -struct thread_info; -struct task_struct; - -struct pt_regs; - -void die(const char *msg, struct pt_regs *regs, int err); - -struct siginfo; -void uc32_notify_die(const char *str, struct pt_regs *regs, - struct siginfo *info, unsigned long err, unsigned long trap); - -void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, - struct pt_regs *), - int sig, int code, const char *name); - -#define xchg(ptr, x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) - -extern asmlinkage void __backtrace(void); -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); - -struct mm_struct; -extern void show_pte(struct mm_struct *mm, unsigned long addr); -extern void __show_regs(struct pt_regs *); - -extern int cpu_architecture(void); -extern void cpu_init(void); - -#define vectors_high() (cr_alignment & CR_V) - -#define isb() __asm__ __volatile__ ("" : : : "memory") -#define dsb() __asm__ __volatile__ ("" : : : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - -#define set_mb(var, value) do { var = value; smp_mb(); } while (0) -#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); - -extern unsigned long cr_no_alignment; /* defined in entry-unicore.S */ -extern unsigned long cr_alignment; /* defined in entry-unicore.S */ - -static inline unsigned int get_cr(void) -{ - unsigned int val; - asm("movc %0, p0.c1, #0" : "=r" (val) : : "cc"); - return val; -} - -static inline void set_cr(unsigned int val) -{ - asm volatile("movc p0.c1, %0, #0 @set CR" - : : "r" (val) : "cc"); - isb(); -} - -extern void adjust_cr(unsigned long mask, unsigned long set); - -/* - * switch_to(prev, next) should switch from task `prev' to `next' - * `prev' will never be the same as `next'. schedule() itself - * contains the memory barrier to tell GCC not to cache `current'. - */ -extern struct task_struct *__switch_to(struct task_struct *, - struct thread_info *, struct thread_info *); -extern void panic(const char *fmt, ...); - -#define switch_to(prev, next, last) \ -do { \ - last = __switch_to(prev, \ - task_thread_info(prev), task_thread_info(next)); \ -} while (0) - -static inline unsigned long -__xchg(unsigned long x, volatile void *ptr, int size) -{ - unsigned long ret; - - switch (size) { - case 1: - asm volatile("@ __xchg1\n" - " swapb %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: - asm volatile("@ __xchg4\n" - " swapw %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - default: - panic("xchg: bad data size: ptr 0x%p, size %d\n", - ptr, size); - } - - return ret; -} - -#include <asm-generic/cmpxchg-local.h> - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \ - (unsigned long)(o), (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) \ - __cmpxchg64_local_generic((ptr), (o), (n)) - -#include <asm-generic/cmpxchg.h> - -#endif /* __ASSEMBLY__ */ - -#define arch_align_stack(x) (x) - -#endif /* __KERNEL__ */ - -#endif diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h index 2acda503a6d9..897e11ad8124 100644 --- a/arch/unicore32/include/asm/uaccess.h +++ b/arch/unicore32/include/asm/uaccess.h @@ -16,7 +16,6 @@ #include <linux/errno.h> #include <asm/memory.h> -#include <asm/system.h> #define __copy_from_user __copy_from_user #define __copy_to_user __copy_to_user diff --git a/arch/unicore32/kernel/dma.c b/arch/unicore32/kernel/dma.c index ae441bc3122c..ed2d4d78d9c4 100644 --- a/arch/unicore32/kernel/dma.c +++ b/arch/unicore32/kernel/dma.c @@ -18,7 +18,6 @@ #include <linux/errno.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/dma.h> diff --git a/arch/unicore32/kernel/head.S b/arch/unicore32/kernel/head.S index 8caf322e110d..e8f0b98c02ee 100644 --- a/arch/unicore32/kernel/head.S +++ b/arch/unicore32/kernel/head.S @@ -17,7 +17,7 @@ #include <generated/asm-offsets.h> #include <asm/memory.h> #include <asm/thread_info.h> -#include <asm/system.h> +#include <asm/hwdef-copro.h> #include <asm/pgtable-hwdef.h> #if (PHYS_OFFSET & 0x003fffff) diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c index 7d0f0b7983a0..d75ef8b6cb56 100644 --- a/arch/unicore32/kernel/hibernate.c +++ b/arch/unicore32/kernel/hibernate.c @@ -15,7 +15,6 @@ #include <linux/suspend.h> #include <linux/bootmem.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c index d4efa7d679ff..0be5ccd7ccd2 100644 --- a/arch/unicore32/kernel/irq.c +++ b/arch/unicore32/kernel/irq.c @@ -26,7 +26,6 @@ #include <linux/syscore_ops.h> #include <linux/gpio.h> -#include <asm/system.h> #include <mach/hardware.h> #include "setup.h" diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c index d98bd812cae1..d285d71cbe35 100644 --- a/arch/unicore32/kernel/ksyms.c +++ b/arch/unicore32/kernel/ksyms.c @@ -20,7 +20,6 @@ #include <linux/io.h> #include <asm/checksum.h> -#include <asm/system.h> #include "ksyms.h" diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index 432b4291f37b..b6f0458c3143 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -34,7 +34,6 @@ #include <asm/cacheflush.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/stacktrace.h> #include "setup.h" diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h index dcd1306eb5c6..f23955028a18 100644 --- a/arch/unicore32/kernel/setup.h +++ b/arch/unicore32/kernel/setup.h @@ -12,8 +12,11 @@ #ifndef __UNICORE_KERNEL_SETUP_H__ #define __UNICORE_KERNEL_SETUP_H__ +#include <asm/hwdef-copro.h> + extern void paging_init(void); extern void puv3_core_init(void); +extern void cpu_init(void); extern void puv3_ps2_init(void); extern void pci_puv3_preinit(void); diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index b9a26465e728..2054f0d4db13 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -26,7 +26,6 @@ #include <linux/unistd.h> #include <asm/cacheflush.h> -#include <asm/system.h> #include <asm/traps.h> #include "setup.h" diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c index 28f576d733ee..de7dc5fdd58b 100644 --- a/arch/unicore32/mm/alignment.c +++ b/arch/unicore32/mm/alignment.c @@ -24,6 +24,8 @@ #include <asm/tlbflush.h> #include <asm/unaligned.h> +#include "mm.h" + #define CODING_BITS(i) (i & 0xe0000120) #define LDST_P_BIT(i) (i & (1 << 28)) /* Preindex */ diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 283aa4b50b7a..2eeb9c04cab0 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -20,7 +20,6 @@ #include <linux/sched.h> #include <linux/io.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> diff --git a/arch/unicore32/mm/flush.c b/arch/unicore32/mm/flush.c index 93478cc8b26d..6d4c096ffa2a 100644 --- a/arch/unicore32/mm/flush.c +++ b/arch/unicore32/mm/flush.c @@ -14,7 +14,6 @@ #include <linux/pagemap.h> #include <asm/cacheflush.h> -#include <asm/system.h> #include <asm/tlbflush.h> void flush_cache_mm(struct mm_struct *mm) diff --git a/arch/unicore32/mm/mm.h b/arch/unicore32/mm/mm.h index 3296bca0f1f7..05c7f532eee2 100644 --- a/arch/unicore32/mm/mm.h +++ b/arch/unicore32/mm/mm.h @@ -9,6 +9,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include <asm/hwdef-copro.h> + /* the upper-most page table pointer */ extern pmd_t *top_pmd; extern int sysctl_overcommit_memory; @@ -34,6 +36,9 @@ struct mem_type { const struct mem_type *get_mem_type(unsigned int type); extern void __flush_dcache_page(struct address_space *, struct page *); +extern void hook_fault_code(int nr, int (*fn) + (unsigned long, unsigned int, struct pt_regs *), + int sig, int code, const char *name); void __init bootmem_init(void); void uc32_mm_memblock_reserve(void); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 901952355969..3ad653de7100 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2125,6 +2125,13 @@ config NET5501 ---help--- This option enables system support for the Soekris Engineering net5501. +config GEOS + bool "Traverse Technologies GEOS System Support (LEDS, GPIO, etc)" + select GPIOLIB + depends on DMI + ---help--- + This option enables system support for the Traverse Technologies GEOS. + endif # X86_32 config AMD_NB diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 4c2e59a420b9..d511d951a052 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -26,7 +26,6 @@ #include <linux/init.h> #include <linux/jiffies.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/cacheflush.h> diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index a9371c91718c..4b2caeefe1a2 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -11,7 +11,6 @@ #include <linux/atomic.h> #include <asm/fixmap.h> #include <asm/mpspec.h> -#include <asm/system.h> #include <asm/msr.h> #define ARCH_APICTIMER_STOPS_ON_C3 1 diff --git a/arch/x86/include/asm/auxvec.h b/arch/x86/include/asm/auxvec.h index 1316b4c35425..77203ac352de 100644 --- a/arch/x86/include/asm/auxvec.h +++ b/arch/x86/include/asm/auxvec.h @@ -9,4 +9,11 @@ #endif #define AT_SYSINFO_EHDR 33 +/* entries in ARCH_DLINFO: */ +#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) +# define AT_VECTOR_SIZE_ARCH 2 +#else /* else it's non-compat x86-64 */ +# define AT_VECTOR_SIZE_ARCH 1 +#endif + #endif /* _ASM_X86_AUXVEC_H */ diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h new file mode 100644 index 000000000000..c6cd358a1eec --- /dev/null +++ b/arch/x86/include/asm/barrier.h @@ -0,0 +1,116 @@ +#ifndef _ASM_X86_BARRIER_H +#define _ASM_X86_BARRIER_H + +#include <asm/alternative.h> +#include <asm/nops.h> + +/* + * Force strict CPU ordering. + * And yes, this is required on UP too when we're talking + * to devices. + */ + +#ifdef CONFIG_X86_32 +/* + * Some non-Intel clones support out of order store. wmb() ceases to be a + * nop for these. + */ +#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) +#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) +#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) +#else +#define mb() asm volatile("mfence":::"memory") +#define rmb() asm volatile("lfence":::"memory") +#define wmb() asm volatile("sfence" ::: "memory") +#endif + +/** + * read_barrier_depends - Flush all pending reads that subsequents reads + * depend on. + * + * No data-dependent reads from memory-like regions are ever reordered + * over this barrier. All reads preceding this primitive are guaranteed + * to access memory (but not necessarily other CPUs' caches) before any + * reads following this primitive that depend on the data return by + * any of the preceding reads. This primitive is much lighter weight than + * rmb() on most CPUs, and is never heavier weight than is + * rmb(). + * + * These ordering constraints are respected by both the local CPU + * and the compiler. + * + * Ordering is not guaranteed by anything other than these primitives, + * not even by data dependencies. See the documentation for + * memory_barrier() for examples and URLs to more information. + * + * For example, the following code would force ordering (the initial + * value of "a" is zero, "b" is one, and "p" is "&a"): + * + * <programlisting> + * CPU 0 CPU 1 + * + * b = 2; + * memory_barrier(); + * p = &b; q = p; + * read_barrier_depends(); + * d = *q; + * </programlisting> + * + * because the read of "*q" depends on the read of "p" and these + * two reads are separated by a read_barrier_depends(). However, + * the following code, with the same initial values for "a" and "b": + * + * <programlisting> + * CPU 0 CPU 1 + * + * a = 2; + * memory_barrier(); + * b = 3; y = b; + * read_barrier_depends(); + * x = a; + * </programlisting> + * + * does not enforce ordering, since there is no data dependency between + * the read of "a" and the read of "b". Therefore, on some CPUs, such + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() + * in cases like this where there are no data dependencies. + **/ + +#define read_barrier_depends() do { } while (0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#ifdef CONFIG_X86_PPRO_FENCE +# define smp_rmb() rmb() +#else +# define smp_rmb() barrier() +#endif +#ifdef CONFIG_X86_OOSTORE +# define smp_wmb() wmb() +#else +# define smp_wmb() barrier() +#endif +#define smp_read_barrier_depends() read_barrier_depends() +#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#define set_mb(var, value) do { var = value; barrier(); } while (0) +#endif + +/* + * Stop RDTSC speculation. This is needed when you need to use RDTSC + * (or get_cycles or vread that possibly accesses the TSC) in a defined + * code region. + * + * (Could use an alternative three way for this if there was one.) + */ +static __always_inline void rdtsc_barrier(void) +{ + alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); + alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); +} + +#endif /* _ASM_X86_BARRIER_H */ diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index f654d1bb17fb..11e1152222d0 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -36,4 +36,8 @@ do { \ #endif /* !CONFIG_BUG */ #include <asm-generic/bug.h> + + +extern void show_regs_common(void); + #endif /* _ASM_X86_BUG_H */ diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 4e12668711e5..9863ee3747da 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -3,6 +3,7 @@ /* Caches aren't brain-dead on the intel. */ #include <asm-generic/cacheflush.h> +#include <asm/special_insns.h> #ifdef CONFIG_X86_PAT /* diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 5f962df30d0f..f27f79abe021 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -84,7 +84,6 @@ extern unsigned int vdso_enabled; (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) #include <asm/processor.h> -#include <asm/system.h> #ifdef CONFIG_X86_32 #include <asm/desc.h> diff --git a/arch/x86/include/asm/exec.h b/arch/x86/include/asm/exec.h new file mode 100644 index 000000000000..54c2e1db274a --- /dev/null +++ b/arch/x86/include/asm/exec.h @@ -0,0 +1 @@ +/* define arch_align_stack() here */ diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h index d09bb03653f0..71ecbcba1a4e 100644 --- a/arch/x86/include/asm/futex.h +++ b/arch/x86/include/asm/futex.h @@ -9,7 +9,6 @@ #include <asm/asm.h> #include <asm/errno.h> #include <asm/processor.h> -#include <asm/system.h> #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ asm volatile("1:\t" insn "\n" \ diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 7ce0798b1b26..257d9cca214f 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -14,7 +14,6 @@ #include <linux/sched.h> #include <linux/hardirq.h> -#include <asm/system.h> struct pt_regs; struct user_i387_struct; diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 4d8dcbdfc120..e7d1c194d272 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -321,4 +321,8 @@ struct kvm_xcrs { __u64 padding[16]; }; +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + #endif /* _ASM_X86_KVM_H */ diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 7b9cfc4878af..c222e1a1b12a 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -176,6 +176,7 @@ struct x86_emulate_ops { void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr); int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); + void (*set_rflags)(struct x86_emulate_ctxt *ctxt, ulong val); int (*cpl)(struct x86_emulate_ctxt *ctxt); int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); @@ -388,7 +389,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt); #define EMULATION_INTERCEPTED 2 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); int emulator_task_switch(struct x86_emulate_ctxt *ctxt, - u16 tss_selector, int reason, + u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code); int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); #endif /* _ASM_X86_KVM_X86_EMULATE_H */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 52d6640a5ca1..e216ba066e79 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -29,7 +29,7 @@ #include <asm/msr-index.h> #define KVM_MAX_VCPUS 254 -#define KVM_SOFT_MAX_VCPUS 64 +#define KVM_SOFT_MAX_VCPUS 160 #define KVM_MEMORY_SLOTS 32 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 4 @@ -181,13 +181,6 @@ struct kvm_mmu_memory_cache { void *objects[KVM_NR_MEM_OBJS]; }; -#define NR_PTE_CHAIN_ENTRIES 5 - -struct kvm_pte_chain { - u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; - struct hlist_node link; -}; - /* * kvm_mmu_page_role, below, is defined as: * @@ -427,12 +420,16 @@ struct kvm_vcpu_arch { u64 last_guest_tsc; u64 last_kernel_ns; - u64 last_tsc_nsec; - u64 last_tsc_write; - u32 virtual_tsc_khz; + u64 last_host_tsc; + u64 tsc_offset_adjustment; + u64 this_tsc_nsec; + u64 this_tsc_write; + u8 this_tsc_generation; bool tsc_catchup; - u32 tsc_catchup_mult; - s8 tsc_catchup_shift; + bool tsc_always_catchup; + s8 virtual_tsc_shift; + u32 virtual_tsc_mult; + u32 virtual_tsc_khz; atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ unsigned nmi_pending; /* NMI queued after currently running handler */ @@ -478,6 +475,21 @@ struct kvm_vcpu_arch { u32 id; bool send_user_only; } apf; + + /* OSVW MSRs (AMD only) */ + struct { + u64 length; + u64 status; + } osvw; +}; + +struct kvm_lpage_info { + unsigned long rmap_pde; + int write_count; +}; + +struct kvm_arch_memory_slot { + struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1]; }; struct kvm_arch { @@ -511,8 +523,12 @@ struct kvm_arch { s64 kvmclock_offset; raw_spinlock_t tsc_write_lock; u64 last_tsc_nsec; - u64 last_tsc_offset; u64 last_tsc_write; + u32 last_tsc_khz; + u64 cur_tsc_nsec; + u64 cur_tsc_write; + u64 cur_tsc_offset; + u8 cur_tsc_generation; struct kvm_xen_hvm_config xen_hvm_config; @@ -644,7 +660,7 @@ struct kvm_x86_ops { u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); int (*get_lpage_level)(void); bool (*rdtscp_supported)(void); - void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment); + void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host); void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); @@ -652,7 +668,7 @@ struct kvm_x86_ops { bool (*has_wbinvd_exit)(void); - void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz); + void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale); void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); @@ -674,6 +690,17 @@ struct kvm_arch_async_pf { extern struct kvm_x86_ops *kvm_x86_ops; +static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, + s64 adjustment) +{ + kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false); +} + +static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) +{ + kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true); +} + int kvm_mmu_module_init(void); void kvm_mmu_module_exit(void); @@ -741,8 +768,8 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); -int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, - bool has_error_code, u32 error_code); +int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, + int reason, bool has_error_code, u32 error_code); int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 9cdae5d47e8f..c8bed0da434a 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -3,7 +3,6 @@ #include <linux/percpu.h> -#include <asm/system.h> #include <linux/atomic.h> #include <asm/asm.h> diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h index 0e8e85bb7c51..d354fb781c57 100644 --- a/arch/x86/include/asm/mc146818rtc.h +++ b/arch/x86/include/asm/mc146818rtc.h @@ -5,7 +5,6 @@ #define _ASM_X86_MC146818RTC_H #include <asm/io.h> -#include <asm/system.h> #include <asm/processor.h> #include <linux/mc146818rtc.h> diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index bce688d54c12..e21fdd10479f 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -55,7 +55,6 @@ extern unsigned long init_memory_mapping(unsigned long start, unsigned long end); extern void initmem_init(void); -extern void free_initmem(void); #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index e8fb2c7a5f4f..2291895b1836 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -23,6 +23,7 @@ #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) +#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19) #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 5533b30cac07..a19542c1685e 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -14,13 +14,13 @@ struct mm_struct; #include <asm/sigcontext.h> #include <asm/current.h> #include <asm/cpufeature.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgtable_types.h> #include <asm/percpu.h> #include <asm/msr.h> #include <asm/desc_defs.h> #include <asm/nops.h> +#include <asm/special_insns.h> #include <linux/personality.h> #include <linux/cpumask.h> @@ -29,6 +29,15 @@ struct mm_struct; #include <linux/math64.h> #include <linux/init.h> #include <linux/err.h> +#include <linux/irqflags.h> + +/* + * We handle most unaligned accesses in hardware. On the other hand + * unaligned DMA can be quite expensive on some Nehalem processors. + * + * Based on this we disable the IP header alignment in network drivers. + */ +#define NET_IP_ALIGN 0 #define HBP_NUM 4 /* @@ -959,4 +968,24 @@ extern bool cpu_has_amd_erratum(const int *); #define cpu_has_amd_erratum(x) (false) #endif /* CONFIG_CPU_SUP_AMD */ +#ifdef CONFIG_X86_32 +/* + * disable hlt during certain critical i/o operations + */ +#define HAVE_DISABLE_HLT +#endif + +void disable_hlt(void); +void enable_hlt(void); + +void cpu_idle_wait(void); + +extern unsigned long arch_align_stack(unsigned long sp); +extern void free_init_pages(char *what, unsigned long begin, unsigned long end); + +void default_idle(void); +bool set_pm_idle_to_default(void); + +void stop_this_cpu(void *dummy); + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 5e641715c3fe..165466233ab0 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -212,7 +212,61 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10]; -#endif -#endif + +/* + * Load a segment. Fall back on loading the zero + * segment if something goes wrong.. + */ +#define loadsegment(seg, value) \ +do { \ + unsigned short __val = (value); \ + \ + asm volatile(" \n" \ + "1: movl %k0,%%" #seg " \n" \ + \ + ".section .fixup,\"ax\" \n" \ + "2: xorl %k0,%k0 \n" \ + " jmp 1b \n" \ + ".previous \n" \ + \ + _ASM_EXTABLE(1b, 2b) \ + \ + : "+r" (__val) : : "memory"); \ +} while (0) + +/* + * Save a segment register away + */ +#define savesegment(seg, value) \ + asm("mov %%" #seg ",%0":"=r" (value) : : "memory") + +/* + * x86_32 user gs accessors. + */ +#ifdef CONFIG_X86_32 +#ifdef CONFIG_X86_32_LAZY_GS +#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) +#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) +#define task_user_gs(tsk) ((tsk)->thread.gs) +#define lazy_save_gs(v) savesegment(gs, (v)) +#define lazy_load_gs(v) loadsegment(gs, (v)) +#else /* X86_32_LAZY_GS */ +#define get_user_gs(regs) (u16)((regs)->gs) +#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) +#define task_user_gs(tsk) (task_pt_regs(tsk)->gs) +#define lazy_save_gs(v) do { } while (0) +#define lazy_load_gs(v) do { } while (0) +#endif /* X86_32_LAZY_GS */ +#endif /* X86_32 */ + +static inline unsigned long get_limit(unsigned long segment) +{ + unsigned long __limit; + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); + return __limit + 1; +} + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ #endif /* _ASM_X86_SEGMENT_H */ diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h new file mode 100644 index 000000000000..41fc93a2e225 --- /dev/null +++ b/arch/x86/include/asm/special_insns.h @@ -0,0 +1,199 @@ +#ifndef _ASM_X86_SPECIAL_INSNS_H +#define _ASM_X86_SPECIAL_INSNS_H + + +#ifdef __KERNEL__ + +static inline void native_clts(void) +{ + asm volatile("clts"); +} + +/* + * Volatile isn't enough to prevent the compiler from reordering the + * read/write functions for the control registers and messing everything up. + * A memory clobber would solve the problem, but would prevent reordering of + * all loads stores around it, which can hurt performance. Solution is to + * use a variable and mimic reads and writes to it to enforce serialization + */ +static unsigned long __force_order; + +static inline unsigned long native_read_cr0(void) +{ + unsigned long val; + asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); + return val; +} + +static inline void native_write_cr0(unsigned long val) +{ + asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); +} + +static inline unsigned long native_read_cr2(void) +{ + unsigned long val; + asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); + return val; +} + +static inline void native_write_cr2(unsigned long val) +{ + asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); +} + +static inline unsigned long native_read_cr3(void) +{ + unsigned long val; + asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); + return val; +} + +static inline void native_write_cr3(unsigned long val) +{ + asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); +} + +static inline unsigned long native_read_cr4(void) +{ + unsigned long val; + asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); + return val; +} + +static inline unsigned long native_read_cr4_safe(void) +{ + unsigned long val; + /* This could fault if %cr4 does not exist. In x86_64, a cr4 always + * exists, so it will never fail. */ +#ifdef CONFIG_X86_32 + asm volatile("1: mov %%cr4, %0\n" + "2:\n" + _ASM_EXTABLE(1b, 2b) + : "=r" (val), "=m" (__force_order) : "0" (0)); +#else + val = native_read_cr4(); +#endif + return val; +} + +static inline void native_write_cr4(unsigned long val) +{ + asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); +} + +#ifdef CONFIG_X86_64 +static inline unsigned long native_read_cr8(void) +{ + unsigned long cr8; + asm volatile("movq %%cr8,%0" : "=r" (cr8)); + return cr8; +} + +static inline void native_write_cr8(unsigned long val) +{ + asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); +} +#endif + +static inline void native_wbinvd(void) +{ + asm volatile("wbinvd": : :"memory"); +} + +extern void native_load_gs_index(unsigned); + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else + +static inline unsigned long read_cr0(void) +{ + return native_read_cr0(); +} + +static inline void write_cr0(unsigned long x) +{ + native_write_cr0(x); +} + +static inline unsigned long read_cr2(void) +{ + return native_read_cr2(); +} + +static inline void write_cr2(unsigned long x) +{ + native_write_cr2(x); +} + +static inline unsigned long read_cr3(void) +{ + return native_read_cr3(); +} + +static inline void write_cr3(unsigned long x) +{ + native_write_cr3(x); +} + +static inline unsigned long read_cr4(void) +{ + return native_read_cr4(); +} + +static inline unsigned long read_cr4_safe(void) +{ + return native_read_cr4_safe(); +} + +static inline void write_cr4(unsigned long x) +{ + native_write_cr4(x); +} + +static inline void wbinvd(void) +{ + native_wbinvd(); +} + +#ifdef CONFIG_X86_64 + +static inline unsigned long read_cr8(void) +{ + return native_read_cr8(); +} + +static inline void write_cr8(unsigned long x) +{ + native_write_cr8(x); +} + +static inline void load_gs_index(unsigned selector) +{ + native_load_gs_index(selector); +} + +#endif + +/* Clear the 'TS' bit */ +static inline void clts(void) +{ + native_clts(); +} + +#endif/* CONFIG_PARAVIRT */ + +#define stts() write_cr0(read_cr0() | X86_CR0_TS) + +static inline void clflush(volatile void *__p) +{ + asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); +} + +#define nop() asm volatile ("nop") + + +#endif /* __KERNEL__ */ + +#endif /* _ASM_X86_SPECIAL_INSNS_H */ diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 157517763565..b5d9533d2c38 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -38,7 +38,6 @@ #include <asm/tsc.h> #include <asm/processor.h> #include <asm/percpu.h> -#include <asm/system.h> #include <asm/desc.h> #include <linux/random.h> diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h new file mode 100644 index 000000000000..4ec45b3abba1 --- /dev/null +++ b/arch/x86/include/asm/switch_to.h @@ -0,0 +1,129 @@ +#ifndef _ASM_X86_SWITCH_TO_H +#define _ASM_X86_SWITCH_TO_H + +struct task_struct; /* one of the stranger aspects of C forward declarations */ +struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); +struct tss_struct; +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, + struct tss_struct *tss); + +#ifdef CONFIG_X86_32 + +#ifdef CONFIG_CC_STACKPROTECTOR +#define __switch_canary \ + "movl %P[task_canary](%[next]), %%ebx\n\t" \ + "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" +#define __switch_canary_oparam \ + , [stack_canary] "=m" (stack_canary.canary) +#define __switch_canary_iparam \ + , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) +#else /* CC_STACKPROTECTOR */ +#define __switch_canary +#define __switch_canary_oparam +#define __switch_canary_iparam +#endif /* CC_STACKPROTECTOR */ + +/* + * Saving eflags is important. It switches not only IOPL between tasks, + * it also protects other tasks from NT leaking through sysenter etc. + */ +#define switch_to(prev, next, last) \ +do { \ + /* \ + * Context-switching clobbers all registers, so we clobber \ + * them explicitly, via unused output variables. \ + * (EAX and EBP is not listed because EBP is saved/restored \ + * explicitly for wchan access and EAX is the return value of \ + * __switch_to()) \ + */ \ + unsigned long ebx, ecx, edx, esi, edi; \ + \ + asm volatile("pushfl\n\t" /* save flags */ \ + "pushl %%ebp\n\t" /* save EBP */ \ + "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ + "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ + "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ + "pushl %[next_ip]\n\t" /* restore EIP */ \ + __switch_canary \ + "jmp __switch_to\n" /* regparm call */ \ + "1:\t" \ + "popl %%ebp\n\t" /* restore EBP */ \ + "popfl\n" /* restore flags */ \ + \ + /* output parameters */ \ + : [prev_sp] "=m" (prev->thread.sp), \ + [prev_ip] "=m" (prev->thread.ip), \ + "=a" (last), \ + \ + /* clobbered output registers: */ \ + "=b" (ebx), "=c" (ecx), "=d" (edx), \ + "=S" (esi), "=D" (edi) \ + \ + __switch_canary_oparam \ + \ + /* input parameters: */ \ + : [next_sp] "m" (next->thread.sp), \ + [next_ip] "m" (next->thread.ip), \ + \ + /* regparm parameters for __switch_to(): */ \ + [prev] "a" (prev), \ + [next] "d" (next) \ + \ + __switch_canary_iparam \ + \ + : /* reloaded segment registers */ \ + "memory"); \ +} while (0) + +#else /* CONFIG_X86_32 */ + +/* frame pointer must be last for get_wchan */ +#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" + +#define __EXTRA_CLOBBER \ + , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ + "r12", "r13", "r14", "r15" + +#ifdef CONFIG_CC_STACKPROTECTOR +#define __switch_canary \ + "movq %P[task_canary](%%rsi),%%r8\n\t" \ + "movq %%r8,"__percpu_arg([gs_canary])"\n\t" +#define __switch_canary_oparam \ + , [gs_canary] "=m" (irq_stack_union.stack_canary) +#define __switch_canary_iparam \ + , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) +#else /* CC_STACKPROTECTOR */ +#define __switch_canary +#define __switch_canary_oparam +#define __switch_canary_iparam +#endif /* CC_STACKPROTECTOR */ + +/* Save restore flags to clear handle leaking NT */ +#define switch_to(prev, next, last) \ + asm volatile(SAVE_CONTEXT \ + "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ + "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ + "call __switch_to\n\t" \ + "movq "__percpu_arg([current_task])",%%rsi\n\t" \ + __switch_canary \ + "movq %P[thread_info](%%rsi),%%r8\n\t" \ + "movq %%rax,%%rdi\n\t" \ + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ + "jnz ret_from_fork\n\t" \ + RESTORE_CONTEXT \ + : "=a" (last) \ + __switch_canary_oparam \ + : [next] "S" (next), [prev] "D" (prev), \ + [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ + [ti_flags] "i" (offsetof(struct thread_info, flags)), \ + [_tif_fork] "i" (_TIF_FORK), \ + [thread_info] "i" (offsetof(struct task_struct, stack)), \ + [current_task] "m" (current_task) \ + __switch_canary_iparam \ + : "memory", "cc" __EXTRA_CLOBBER) + +#endif /* CONFIG_X86_32 */ + +#endif /* _ASM_X86_SWITCH_TO_H */ diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h deleted file mode 100644 index 2d2f01ce6dcb..000000000000 --- a/arch/x86/include/asm/system.h +++ /dev/null @@ -1,523 +0,0 @@ -#ifndef _ASM_X86_SYSTEM_H -#define _ASM_X86_SYSTEM_H - -#include <asm/asm.h> -#include <asm/segment.h> -#include <asm/cpufeature.h> -#include <asm/cmpxchg.h> -#include <asm/nops.h> - -#include <linux/kernel.h> -#include <linux/irqflags.h> - -/* entries in ARCH_DLINFO: */ -#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64) -# define AT_VECTOR_SIZE_ARCH 2 -#else /* else it's non-compat x86-64 */ -# define AT_VECTOR_SIZE_ARCH 1 -#endif - -struct task_struct; /* one of the stranger aspects of C forward declarations */ -struct task_struct *__switch_to(struct task_struct *prev, - struct task_struct *next); -struct tss_struct; -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, - struct tss_struct *tss); -extern void show_regs_common(void); - -#ifdef CONFIG_X86_32 - -#ifdef CONFIG_CC_STACKPROTECTOR -#define __switch_canary \ - "movl %P[task_canary](%[next]), %%ebx\n\t" \ - "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" -#define __switch_canary_oparam \ - , [stack_canary] "=m" (stack_canary.canary) -#define __switch_canary_iparam \ - , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) -#else /* CC_STACKPROTECTOR */ -#define __switch_canary -#define __switch_canary_oparam -#define __switch_canary_iparam -#endif /* CC_STACKPROTECTOR */ - -/* - * Saving eflags is important. It switches not only IOPL between tasks, - * it also protects other tasks from NT leaking through sysenter etc. - */ -#define switch_to(prev, next, last) \ -do { \ - /* \ - * Context-switching clobbers all registers, so we clobber \ - * them explicitly, via unused output variables. \ - * (EAX and EBP is not listed because EBP is saved/restored \ - * explicitly for wchan access and EAX is the return value of \ - * __switch_to()) \ - */ \ - unsigned long ebx, ecx, edx, esi, edi; \ - \ - asm volatile("pushfl\n\t" /* save flags */ \ - "pushl %%ebp\n\t" /* save EBP */ \ - "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \ - "movl %[next_sp],%%esp\n\t" /* restore ESP */ \ - "movl $1f,%[prev_ip]\n\t" /* save EIP */ \ - "pushl %[next_ip]\n\t" /* restore EIP */ \ - __switch_canary \ - "jmp __switch_to\n" /* regparm call */ \ - "1:\t" \ - "popl %%ebp\n\t" /* restore EBP */ \ - "popfl\n" /* restore flags */ \ - \ - /* output parameters */ \ - : [prev_sp] "=m" (prev->thread.sp), \ - [prev_ip] "=m" (prev->thread.ip), \ - "=a" (last), \ - \ - /* clobbered output registers: */ \ - "=b" (ebx), "=c" (ecx), "=d" (edx), \ - "=S" (esi), "=D" (edi) \ - \ - __switch_canary_oparam \ - \ - /* input parameters: */ \ - : [next_sp] "m" (next->thread.sp), \ - [next_ip] "m" (next->thread.ip), \ - \ - /* regparm parameters for __switch_to(): */ \ - [prev] "a" (prev), \ - [next] "d" (next) \ - \ - __switch_canary_iparam \ - \ - : /* reloaded segment registers */ \ - "memory"); \ -} while (0) - -/* - * disable hlt during certain critical i/o operations - */ -#define HAVE_DISABLE_HLT -#else - -/* frame pointer must be last for get_wchan */ -#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" - -#define __EXTRA_CLOBBER \ - , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ - "r12", "r13", "r14", "r15" - -#ifdef CONFIG_CC_STACKPROTECTOR -#define __switch_canary \ - "movq %P[task_canary](%%rsi),%%r8\n\t" \ - "movq %%r8,"__percpu_arg([gs_canary])"\n\t" -#define __switch_canary_oparam \ - , [gs_canary] "=m" (irq_stack_union.stack_canary) -#define __switch_canary_iparam \ - , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) -#else /* CC_STACKPROTECTOR */ -#define __switch_canary -#define __switch_canary_oparam -#define __switch_canary_iparam -#endif /* CC_STACKPROTECTOR */ - -/* Save restore flags to clear handle leaking NT */ -#define switch_to(prev, next, last) \ - asm volatile(SAVE_CONTEXT \ - "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ - "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \ - "call __switch_to\n\t" \ - "movq "__percpu_arg([current_task])",%%rsi\n\t" \ - __switch_canary \ - "movq %P[thread_info](%%rsi),%%r8\n\t" \ - "movq %%rax,%%rdi\n\t" \ - "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ - "jnz ret_from_fork\n\t" \ - RESTORE_CONTEXT \ - : "=a" (last) \ - __switch_canary_oparam \ - : [next] "S" (next), [prev] "D" (prev), \ - [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ - [ti_flags] "i" (offsetof(struct thread_info, flags)), \ - [_tif_fork] "i" (_TIF_FORK), \ - [thread_info] "i" (offsetof(struct task_struct, stack)), \ - [current_task] "m" (current_task) \ - __switch_canary_iparam \ - : "memory", "cc" __EXTRA_CLOBBER) -#endif - -#ifdef __KERNEL__ - -extern void native_load_gs_index(unsigned); - -/* - * Load a segment. Fall back on loading the zero - * segment if something goes wrong.. - */ -#define loadsegment(seg, value) \ -do { \ - unsigned short __val = (value); \ - \ - asm volatile(" \n" \ - "1: movl %k0,%%" #seg " \n" \ - \ - ".section .fixup,\"ax\" \n" \ - "2: xorl %k0,%k0 \n" \ - " jmp 1b \n" \ - ".previous \n" \ - \ - _ASM_EXTABLE(1b, 2b) \ - \ - : "+r" (__val) : : "memory"); \ -} while (0) - -/* - * Save a segment register away - */ -#define savesegment(seg, value) \ - asm("mov %%" #seg ",%0":"=r" (value) : : "memory") - -/* - * x86_32 user gs accessors. - */ -#ifdef CONFIG_X86_32 -#ifdef CONFIG_X86_32_LAZY_GS -#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) -#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) -#define task_user_gs(tsk) ((tsk)->thread.gs) -#define lazy_save_gs(v) savesegment(gs, (v)) -#define lazy_load_gs(v) loadsegment(gs, (v)) -#else /* X86_32_LAZY_GS */ -#define get_user_gs(regs) (u16)((regs)->gs) -#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) -#define task_user_gs(tsk) (task_pt_regs(tsk)->gs) -#define lazy_save_gs(v) do { } while (0) -#define lazy_load_gs(v) do { } while (0) -#endif /* X86_32_LAZY_GS */ -#endif /* X86_32 */ - -static inline unsigned long get_limit(unsigned long segment) -{ - unsigned long __limit; - asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); - return __limit + 1; -} - -static inline void native_clts(void) -{ - asm volatile("clts"); -} - -/* - * Volatile isn't enough to prevent the compiler from reordering the - * read/write functions for the control registers and messing everything up. - * A memory clobber would solve the problem, but would prevent reordering of - * all loads stores around it, which can hurt performance. Solution is to - * use a variable and mimic reads and writes to it to enforce serialization - */ -static unsigned long __force_order; - -static inline unsigned long native_read_cr0(void) -{ - unsigned long val; - asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); - return val; -} - -static inline void native_write_cr0(unsigned long val) -{ - asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); -} - -static inline unsigned long native_read_cr2(void) -{ - unsigned long val; - asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); - return val; -} - -static inline void native_write_cr2(unsigned long val) -{ - asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); -} - -static inline unsigned long native_read_cr3(void) -{ - unsigned long val; - asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); - return val; -} - -static inline void native_write_cr3(unsigned long val) -{ - asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); -} - -static inline unsigned long native_read_cr4(void) -{ - unsigned long val; - asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); - return val; -} - -static inline unsigned long native_read_cr4_safe(void) -{ - unsigned long val; - /* This could fault if %cr4 does not exist. In x86_64, a cr4 always - * exists, so it will never fail. */ -#ifdef CONFIG_X86_32 - asm volatile("1: mov %%cr4, %0\n" - "2:\n" - _ASM_EXTABLE(1b, 2b) - : "=r" (val), "=m" (__force_order) : "0" (0)); -#else - val = native_read_cr4(); -#endif - return val; -} - -static inline void native_write_cr4(unsigned long val) -{ - asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); -} - -#ifdef CONFIG_X86_64 -static inline unsigned long native_read_cr8(void) -{ - unsigned long cr8; - asm volatile("movq %%cr8,%0" : "=r" (cr8)); - return cr8; -} - -static inline void native_write_cr8(unsigned long val) -{ - asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); -} -#endif - -static inline void native_wbinvd(void) -{ - asm volatile("wbinvd": : :"memory"); -} - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else - -static inline unsigned long read_cr0(void) -{ - return native_read_cr0(); -} - -static inline void write_cr0(unsigned long x) -{ - native_write_cr0(x); -} - -static inline unsigned long read_cr2(void) -{ - return native_read_cr2(); -} - -static inline void write_cr2(unsigned long x) -{ - native_write_cr2(x); -} - -static inline unsigned long read_cr3(void) -{ - return native_read_cr3(); -} - -static inline void write_cr3(unsigned long x) -{ - native_write_cr3(x); -} - -static inline unsigned long read_cr4(void) -{ - return native_read_cr4(); -} - -static inline unsigned long read_cr4_safe(void) -{ - return native_read_cr4_safe(); -} - -static inline void write_cr4(unsigned long x) -{ - native_write_cr4(x); -} - -static inline void wbinvd(void) -{ - native_wbinvd(); -} - -#ifdef CONFIG_X86_64 - -static inline unsigned long read_cr8(void) -{ - return native_read_cr8(); -} - -static inline void write_cr8(unsigned long x) -{ - native_write_cr8(x); -} - -static inline void load_gs_index(unsigned selector) -{ - native_load_gs_index(selector); -} - -#endif - -/* Clear the 'TS' bit */ -static inline void clts(void) -{ - native_clts(); -} - -#endif/* CONFIG_PARAVIRT */ - -#define stts() write_cr0(read_cr0() | X86_CR0_TS) - -#endif /* __KERNEL__ */ - -static inline void clflush(volatile void *__p) -{ - asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); -} - -#define nop() asm volatile ("nop") - -void disable_hlt(void); -void enable_hlt(void); - -void cpu_idle_wait(void); - -extern unsigned long arch_align_stack(unsigned long sp); -extern void free_init_pages(char *what, unsigned long begin, unsigned long end); - -void default_idle(void); -bool set_pm_idle_to_default(void); - -void stop_this_cpu(void *dummy); - -/* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. - */ -#ifdef CONFIG_X86_32 -/* - * Some non-Intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) -#else -#define mb() asm volatile("mfence":::"memory") -#define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence" ::: "memory") -#endif - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#ifdef CONFIG_X86_PPRO_FENCE -# define smp_rmb() rmb() -#else -# define smp_rmb() barrier() -#endif -#ifdef CONFIG_X86_OOSTORE -# define smp_wmb() wmb() -#else -# define smp_wmb() barrier() -#endif -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif - -/* - * Stop RDTSC speculation. This is needed when you need to use RDTSC - * (or get_cycles or vread that possibly accesses the TSC) in a defined - * code region. - * - * (Could use an alternative three way for this if there was one.) - */ -static __always_inline void rdtsc_barrier(void) -{ - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); - alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); -} - -/* - * We handle most unaligned accesses in hardware. On the other hand - * unaligned DMA can be quite expensive on some Nehalem processors. - * - * Based on this we disable the IP header alignment in network drivers. - */ -#define NET_IP_ALIGN 0 -#endif /* _ASM_X86_SYSTEM_H */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 169be8938b96..c0e108e08079 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -5,7 +5,7 @@ #include <linux/sched.h> #include <asm/processor.h> -#include <asm/system.h> +#include <asm/special_insns.h> #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 15d99153a96d..c91e8b9d588b 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -61,7 +61,7 @@ extern void check_tsc_sync_source(int cpu); extern void check_tsc_sync_target(void); extern int notsc_setup(char *); -extern void save_sched_clock_state(void); -extern void restore_sched_clock_state(void); +extern void tsc_save_sched_clock_state(void); +extern void tsc_restore_sched_clock_state(void); #endif /* _ASM_X86_TSC_H */ diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index e0f9aa16358b..5da71c27cc59 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -16,7 +16,6 @@ #define _ASM_X86_VIRTEX_H #include <asm/processor.h> -#include <asm/system.h> #include <asm/vmx.h> #include <asm/svm.h> diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 517d4767ffdd..baaca8defec8 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -145,9 +145,11 @@ struct x86_init_ops { /** * struct x86_cpuinit_ops - platform specific cpu hotplug setups * @setup_percpu_clockev: set up the per cpu clock event device + * @early_percpu_clock_init: early init of the per cpu clock event device */ struct x86_cpuinit_ops { void (*setup_percpu_clockev)(void); + void (*early_percpu_clock_init)(void); void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); }; @@ -160,6 +162,8 @@ struct x86_cpuinit_ops { * @is_untracked_pat_range exclude from PAT logic * @nmi_init enable NMI on cpus * @i8042_detect pre-detect if i8042 controller exists + * @save_sched_clock_state: save state for sched_clock() on suspend + * @restore_sched_clock_state: restore state for sched_clock() on resume */ struct x86_platform_ops { unsigned long (*calibrate_tsc)(void); @@ -171,6 +175,8 @@ struct x86_platform_ops { void (*nmi_init)(void); unsigned char (*get_nmi_reason)(void); int (*i8042_detect)(void); + void (*save_sched_clock_state)(void); + void (*restore_sched_clock_state)(void); }; struct pci_dev; diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index f50e7fb2a201..d2b7f27781bc 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -14,6 +14,7 @@ #include <acpi/processor.h> #include <asm/acpi.h> #include <asm/mwait.h> +#include <asm/special_insns.h> /* * Initialize bm_flags based on the CPU cache properties diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 5d56931a15b3..459e78cbf61e 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -231,7 +231,6 @@ #include <linux/syscore_ops.h> #include <linux/i8253.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/desc.h> #include <asm/olpc.h> diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index 5c0e6533d9bc..2d5454cd2c4f 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c @@ -9,7 +9,6 @@ #include <linux/smp.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/mce.h> #include <asm/msr.h> diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 67bb17a37a0a..47a1870279aa 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -25,7 +25,6 @@ #include <linux/cpu.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/apic.h> #include <asm/idle.h> #include <asm/mce.h> diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index 54060f565974..2d7998fb628c 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c @@ -8,7 +8,6 @@ #include <linux/init.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/mce.h> #include <asm/msr.h> diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 97b26356e9ee..75772ae6c65f 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -12,7 +12,6 @@ #include <asm/processor-flags.h> #include <asm/cpufeature.h> #include <asm/tlbflush.h> -#include <asm/system.h> #include <asm/mtrr.h> #include <asm/msr.h> #include <asm/pat.h> diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index a524353d93f2..39472dd2323f 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -43,7 +43,6 @@ #include <asm/processor.h> #include <asm/msr.h> -#include <asm/system.h> static struct class *cpuid_class; diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 610485223bdb..36d1853e91af 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -15,7 +15,6 @@ #include <linux/delay.h> #include <linux/atomic.h> -#include <asm/system.h> #include <asm/timer.h> #include <asm/hw_irq.h> #include <asm/pgtable.h> diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 43e2b1cff0a7..6d5fc8cfd5d6 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -16,7 +16,6 @@ #include <linux/delay.h> #include <linux/atomic.h> -#include <asm/system.h> #include <asm/timer.h> #include <asm/hw_irq.h> #include <asm/pgtable.h> diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index fdc37b3d0ce3..db6720edfdd0 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -46,7 +46,6 @@ #include <asm/debugreg.h> #include <asm/apicdef.h> -#include <asm/system.h> #include <asm/apic.h> #include <asm/nmi.h> diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 44842d756b29..f8492da65bfc 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -136,6 +136,15 @@ int kvm_register_clock(char *txt) return ret; } +static void kvm_save_sched_clock_state(void) +{ +} + +static void kvm_restore_sched_clock_state(void) +{ + kvm_register_clock("primary cpu clock, resume"); +} + #ifdef CONFIG_X86_LOCAL_APIC static void __cpuinit kvm_setup_secondary_clock(void) { @@ -144,8 +153,6 @@ static void __cpuinit kvm_setup_secondary_clock(void) * we shouldn't fail. */ WARN_ON(kvm_register_clock("secondary cpu clock")); - /* ok, done with our trickery, call native */ - setup_secondary_APIC_clock(); } #endif @@ -194,9 +201,11 @@ void __init kvmclock_init(void) x86_platform.get_wallclock = kvm_get_wallclock; x86_platform.set_wallclock = kvm_set_wallclock; #ifdef CONFIG_X86_LOCAL_APIC - x86_cpuinit.setup_percpu_clockev = + x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock; #endif + x86_platform.save_sched_clock_state = kvm_save_sched_clock_state; + x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state; machine_ops.shutdown = kvm_shutdown; #ifdef CONFIG_KEXEC machine_ops.crash_shutdown = kvm_crash_shutdown; diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ea697263b373..ebc987398923 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -15,7 +15,6 @@ #include <linux/vmalloc.h> #include <linux/uaccess.h> -#include <asm/system.h> #include <asm/ldt.h> #include <asm/desc.h> #include <asm/mmu_context.h> diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index a3fa43ba5d3b..5b19e4d78b00 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -23,7 +23,6 @@ #include <asm/apic.h> #include <asm/cpufeature.h> #include <asm/desc.h> -#include <asm/system.h> #include <asm/cacheflush.h> #include <asm/debugreg.h> diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c index 177183cbb6ae..7eb1e2b97827 100644 --- a/arch/x86/kernel/mca_32.c +++ b/arch/x86/kernel/mca_32.c @@ -43,7 +43,6 @@ #include <linux/mca.h> #include <linux/kprobes.h> #include <linux/slab.h> -#include <asm/system.h> #include <asm/io.h> #include <linux/proc_fs.h> #include <linux/mman.h> diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 925179f871de..f21fd94ac897 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -26,7 +26,6 @@ #include <linux/gfp.h> #include <linux/jump_label.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgtable.h> diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 96356762a51d..eb113693f043 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -40,7 +40,6 @@ #include <asm/processor.h> #include <asm/msr.h> -#include <asm/system.h> static struct class *msr_class; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 9c57c02e54f6..ab137605e694 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -38,6 +38,7 @@ #include <asm/apic.h> #include <asm/tlbflush.h> #include <asm/timer.h> +#include <asm/special_insns.h> /* nop stub */ void _paravirt_nop(void) diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 726494b58345..6ac5782f4d6b 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -42,7 +42,6 @@ #include <asm/calgary.h> #include <asm/tce.h> #include <asm/pci-direct.h> -#include <asm/system.h> #include <asm/dma.h> #include <asm/rio.h> #include <asm/bios_ebda.h> diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 14baf78d5a1f..9b24f36eb55f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -15,7 +15,6 @@ #include <trace/events/power.h> #include <linux/hw_breakpoint.h> #include <asm/cpu.h> -#include <asm/system.h> #include <asm/apic.h> #include <asm/syscalls.h> #include <asm/idle.h> diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 9d7d4842bfaf..aae4f4bbbe88 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -41,7 +41,6 @@ #include <linux/cpuidle.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/ldt.h> #include <asm/processor.h> #include <asm/i387.h> @@ -59,6 +58,7 @@ #include <asm/syscalls.h> #include <asm/debugreg.h> #include <asm/nmi.h> +#include <asm/switch_to.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 292da13fc5aa..61270e8d428a 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -40,7 +40,6 @@ #include <linux/cpuidle.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/fpu-internal.h> @@ -53,6 +52,7 @@ #include <asm/syscalls.h> #include <asm/debugreg.h> #include <asm/nmi.h> +#include <asm/switch_to.h> asmlinkage extern void ret_from_fork(void); diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 78f05e438be5..8a634c887652 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -24,7 +24,6 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/fpu-internal.h> diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ab77aae4ad9b..1a2901562059 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -90,7 +90,6 @@ #include <asm/processor.h> #include <asm/bugs.h> -#include <asm/system.h> #include <asm/vsyscall.h> #include <asm/cpu.h> #include <asm/desc.h> diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index e578a79a3093..5104a2b685cf 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -255,6 +255,7 @@ notrace static void __cpuinit start_secondary(void *unused) * most necessary things. */ cpu_init(); + x86_cpuinit.early_percpu_clock_init(); preempt_disable(); smp_callin(); diff --git a/arch/x86/kernel/tce_64.c b/arch/x86/kernel/tce_64.c index 9e540fee7009..ab40954e113e 100644 --- a/arch/x86/kernel/tce_64.c +++ b/arch/x86/kernel/tce_64.c @@ -34,6 +34,7 @@ #include <asm/tce.h> #include <asm/calgary.h> #include <asm/proto.h> +#include <asm/cacheflush.h> /* flush a tce at 'tceaddr' to main memory */ static inline void flush_tce(void* tceaddr) diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 6bb7b8579e70..73920e4c6dc5 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -6,7 +6,6 @@ #include <asm/uaccess.h> #include <asm/desc.h> -#include <asm/system.h> #include <asm/ldt.h> #include <asm/processor.h> #include <asm/proto.h> diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ec61d4c1b93b..860f126ca233 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -50,7 +50,6 @@ #include <asm/processor.h> #include <asm/debugreg.h> #include <linux/atomic.h> -#include <asm/system.h> #include <asm/traps.h> #include <asm/desc.h> #include <asm/i387.h> diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 183c5925a9fe..899a03f2d181 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -630,7 +630,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) static unsigned long long cyc2ns_suspend; -void save_sched_clock_state(void) +void tsc_save_sched_clock_state(void) { if (!sched_clock_stable) return; @@ -646,7 +646,7 @@ void save_sched_clock_state(void) * that sched_clock() continues from the point where it was left off during * suspend. */ -void restore_sched_clock_state(void) +void tsc_restore_sched_clock_state(void) { unsigned long long offset; unsigned long flags; diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 947a06ccc673..e9f265fd79ae 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -91,6 +91,7 @@ struct x86_init_ops x86_init __initdata = { }; struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { + .early_percpu_clock_init = x86_init_noop, .setup_percpu_clockev = setup_secondary_APIC_clock, .fixup_cpu_id = x86_default_fixup_cpu_id, }; @@ -107,7 +108,9 @@ struct x86_platform_ops x86_platform = { .is_untracked_pat_range = is_ISA_range, .nmi_init = default_nmi_init, .get_nmi_reason = default_get_nmi_reason, - .i8042_detect = default_i8042_detect + .i8042_detect = default_i8042_detect, + .save_sched_clock_state = tsc_save_sched_clock_state, + .restore_sched_clock_state = tsc_restore_sched_clock_state, }; EXPORT_SYMBOL_GPL(x86_platform); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 89b02bfaaca5..9fed5bedaad6 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -236,7 +236,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, const u32 kvm_supported_word6_x86_features = F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | - F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | + F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); /* cpuid 0xC0000001.edx */ diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 5b97e1797a6d..26d1fb437eb5 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -43,4 +43,12 @@ static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); } +static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); + return best && (best->ecx & bit(X86_FEATURE_OSVW)); +} + #endif diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 0982507b962a..83756223f8aa 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -57,6 +57,7 @@ #define OpDS 23ull /* DS */ #define OpFS 24ull /* FS */ #define OpGS 25ull /* GS */ +#define OpMem8 26ull /* 8-bit zero extended memory operand */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) @@ -101,6 +102,7 @@ #define SrcAcc (OpAcc << SrcShift) #define SrcImmU16 (OpImmU16 << SrcShift) #define SrcDX (OpDX << SrcShift) +#define SrcMem8 (OpMem8 << SrcShift) #define SrcMask (OpMask << SrcShift) #define BitOp (1<<11) #define MemAbs (1<<12) /* Memory operand is absolute displacement */ @@ -858,8 +860,7 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, } static void decode_register_operand(struct x86_emulate_ctxt *ctxt, - struct operand *op, - int inhibit_bytereg) + struct operand *op) { unsigned reg = ctxt->modrm_reg; int highbyte_regs = ctxt->rex_prefix == 0; @@ -876,7 +877,7 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt, } op->type = OP_REG; - if ((ctxt->d & ByteOp) && !inhibit_bytereg) { + if (ctxt->d & ByteOp) { op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs); op->bytes = 1; } else { @@ -1151,6 +1152,22 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, return 1; } +static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, + u16 index, struct desc_struct *desc) +{ + struct desc_ptr dt; + ulong addr; + + ctxt->ops->get_idt(ctxt, &dt); + + if (dt.size < index * 8 + 7) + return emulate_gp(ctxt, index << 3 | 0x2); + + addr = dt.address + index * 8; + return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, + &ctxt->exception); +} + static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_ptr *dt) { @@ -1227,6 +1244,8 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; + if (ctxt->mode == X86EMUL_MODE_VM86) + seg_desc.dpl = 3; goto load; } @@ -1891,6 +1910,17 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, ss->p = 1; } +static bool vendor_intel(struct x86_emulate_ctxt *ctxt) +{ + u32 eax, ebx, ecx, edx; + + eax = ecx = 0; + return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx) + && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx + && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx + && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; +} + static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) { struct x86_emulate_ops *ops = ctxt->ops; @@ -2007,6 +2037,14 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); + /* + * Not recognized on AMD in compat mode (but is recognized in legacy + * mode). + */ + if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) + && !vendor_intel(ctxt)) + return emulate_ud(ctxt); + /* XXX sysenter/sysexit have not been tested in 64bit mode. * Therefore, we inject an #UD. */ @@ -2306,6 +2344,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, return emulate_gp(ctxt, 0); ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; + + /* General purpose registers */ ctxt->regs[VCPU_REGS_RAX] = tss->eax; ctxt->regs[VCPU_REGS_RCX] = tss->ecx; ctxt->regs[VCPU_REGS_RDX] = tss->edx; @@ -2328,6 +2368,24 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); /* + * If we're switching between Protected Mode and VM86, we need to make + * sure to update the mode before loading the segment descriptors so + * that the selectors are interpreted correctly. + * + * Need to get rflags to the vcpu struct immediately because it + * influences the CPL which is checked at least when loading the segment + * descriptors and when pushing an error code to the new kernel stack. + * + * TODO Introduce a separate ctxt->ops->set_cpl callback + */ + if (ctxt->eflags & X86_EFLAGS_VM) + ctxt->mode = X86EMUL_MODE_VM86; + else + ctxt->mode = X86EMUL_MODE_PROT32; + + ctxt->ops->set_rflags(ctxt, ctxt->eflags); + + /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ @@ -2401,7 +2459,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, } static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, - u16 tss_selector, int reason, + u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { struct x86_emulate_ops *ops = ctxt->ops; @@ -2423,12 +2481,35 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, /* FIXME: check that next_tss_desc is tss */ - if (reason != TASK_SWITCH_IRET) { - if ((tss_selector & 3) > next_tss_desc.dpl || - ops->cpl(ctxt) > next_tss_desc.dpl) - return emulate_gp(ctxt, 0); + /* + * Check privileges. The three cases are task switch caused by... + * + * 1. jmp/call/int to task gate: Check against DPL of the task gate + * 2. Exception/IRQ/iret: No check is performed + * 3. jmp/call to TSS: Check agains DPL of the TSS + */ + if (reason == TASK_SWITCH_GATE) { + if (idt_index != -1) { + /* Software interrupts */ + struct desc_struct task_gate_desc; + int dpl; + + ret = read_interrupt_descriptor(ctxt, idt_index, + &task_gate_desc); + if (ret != X86EMUL_CONTINUE) + return ret; + + dpl = task_gate_desc.dpl; + if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) + return emulate_gp(ctxt, (idt_index << 3) | 0x2); + } + } else if (reason != TASK_SWITCH_IRET) { + int dpl = next_tss_desc.dpl; + if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) + return emulate_gp(ctxt, tss_selector); } + desc_limit = desc_limit_scaled(&next_tss_desc); if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || @@ -2481,7 +2562,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, } int emulator_task_switch(struct x86_emulate_ctxt *ctxt, - u16 tss_selector, int reason, + u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { int rc; @@ -2489,7 +2570,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, ctxt->_eip = ctxt->eip; ctxt->dst.type = OP_NONE; - rc = emulator_do_task_switch(ctxt, tss_selector, reason, + rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (rc == X86EMUL_CONTINUE) @@ -3514,13 +3595,13 @@ static struct opcode twobyte_table[256] = { I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), - D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), + D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xB8 - 0xBF */ N, N, G(BitOp, group8), I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr), - D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), + D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xC0 - 0xCF */ D2bv(DstMem | SrcReg | ModRM | Lock), N, D(DstMem | SrcReg | ModRM | Mov), @@ -3602,9 +3683,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, switch (d) { case OpReg: - decode_register_operand(ctxt, op, - op == &ctxt->dst && - ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7)); + decode_register_operand(ctxt, op); break; case OpImmUByte: rc = decode_imm(ctxt, op, 1, false); @@ -3656,6 +3735,9 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, case OpImm: rc = decode_imm(ctxt, op, imm_size(ctxt), true); break; + case OpMem8: + ctxt->memop.bytes = 1; + goto mem_common; case OpMem16: ctxt->memop.bytes = 2; goto mem_common; diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index b6a73537e1ef..81cf4fa4a2be 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -307,6 +307,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) if (val & 0x10) { s->init4 = val & 1; s->last_irr = 0; + s->irr &= s->elcr; s->imr = 0; s->priority_add = 0; s->special_mask = 0; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 31bfc6927bc0..858432287ab6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -433,7 +433,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, break; case APIC_DM_INIT: - if (level) { + if (!trig_mode || level) { result = 1; vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; kvm_make_request(KVM_REQ_EVENT, vcpu); @@ -731,7 +731,7 @@ static void start_apic_timer(struct kvm_lapic *apic) u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; u64 ns = 0; struct kvm_vcpu *vcpu = apic->vcpu; - unsigned long this_tsc_khz = vcpu_tsc_khz(vcpu); + unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; unsigned long flags; if (unlikely(!tscdeadline || !this_tsc_khz)) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 224b02c3cda9..4cb164268846 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -688,9 +688,8 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, { unsigned long idx; - idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - - (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); - return &slot->lpage_info[level - 2][idx]; + idx = gfn_to_index(gfn, slot->base_gfn, level); + return &slot->arch.lpage_info[level - 2][idx]; } static void account_shadowed(struct kvm *kvm, gfn_t gfn) @@ -946,7 +945,7 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn) } } -static unsigned long *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level, +static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, struct kvm_memory_slot *slot) { struct kvm_lpage_info *linfo; @@ -966,7 +965,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) struct kvm_memory_slot *slot; slot = gfn_to_memslot(kvm, gfn); - return __gfn_to_rmap(kvm, gfn, level, slot); + return __gfn_to_rmap(gfn, level, slot); } static bool rmap_can_add(struct kvm_vcpu *vcpu) @@ -988,7 +987,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) return pte_list_add(vcpu, spte, rmapp); } -static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) +static u64 *rmap_next(unsigned long *rmapp, u64 *spte) { return pte_list_next(rmapp, spte); } @@ -1018,8 +1017,8 @@ int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, u64 *spte; int i, write_protected = 0; - rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot); - spte = rmap_next(kvm, rmapp, NULL); + rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot); + spte = rmap_next(rmapp, NULL); while (spte) { BUG_ON(!(*spte & PT_PRESENT_MASK)); rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); @@ -1027,14 +1026,14 @@ int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK); write_protected = 1; } - spte = rmap_next(kvm, rmapp, spte); + spte = rmap_next(rmapp, spte); } /* check for huge page mappings */ for (i = PT_DIRECTORY_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { - rmapp = __gfn_to_rmap(kvm, gfn, i, slot); - spte = rmap_next(kvm, rmapp, NULL); + rmapp = __gfn_to_rmap(gfn, i, slot); + spte = rmap_next(rmapp, NULL); while (spte) { BUG_ON(!(*spte & PT_PRESENT_MASK)); BUG_ON(!is_large_pte(*spte)); @@ -1045,7 +1044,7 @@ int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, spte = NULL; write_protected = 1; } - spte = rmap_next(kvm, rmapp, spte); + spte = rmap_next(rmapp, spte); } } @@ -1066,7 +1065,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 *spte; int need_tlb_flush = 0; - while ((spte = rmap_next(kvm, rmapp, NULL))) { + while ((spte = rmap_next(rmapp, NULL))) { BUG_ON(!(*spte & PT_PRESENT_MASK)); rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); drop_spte(kvm, spte); @@ -1085,14 +1084,14 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, WARN_ON(pte_huge(*ptep)); new_pfn = pte_pfn(*ptep); - spte = rmap_next(kvm, rmapp, NULL); + spte = rmap_next(rmapp, NULL); while (spte) { BUG_ON(!is_shadow_present_pte(*spte)); rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); need_flush = 1; if (pte_write(*ptep)) { drop_spte(kvm, spte); - spte = rmap_next(kvm, rmapp, NULL); + spte = rmap_next(rmapp, NULL); } else { new_spte = *spte &~ (PT64_BASE_ADDR_MASK); new_spte |= (u64)new_pfn << PAGE_SHIFT; @@ -1102,7 +1101,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, new_spte &= ~shadow_accessed_mask; mmu_spte_clear_track_bits(spte); mmu_spte_set(spte, new_spte); - spte = rmap_next(kvm, rmapp, spte); + spte = rmap_next(rmapp, spte); } } if (need_flush) @@ -1176,7 +1175,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, if (!shadow_accessed_mask) return kvm_unmap_rmapp(kvm, rmapp, data); - spte = rmap_next(kvm, rmapp, NULL); + spte = rmap_next(rmapp, NULL); while (spte) { int _young; u64 _spte = *spte; @@ -1186,7 +1185,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, young = 1; clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); } - spte = rmap_next(kvm, rmapp, spte); + spte = rmap_next(rmapp, spte); } return young; } @@ -1205,7 +1204,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, if (!shadow_accessed_mask) goto out; - spte = rmap_next(kvm, rmapp, NULL); + spte = rmap_next(rmapp, NULL); while (spte) { u64 _spte = *spte; BUG_ON(!(_spte & PT_PRESENT_MASK)); @@ -1214,7 +1213,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, young = 1; break; } - spte = rmap_next(kvm, rmapp, spte); + spte = rmap_next(rmapp, spte); } out: return young; @@ -1391,11 +1390,6 @@ struct kvm_mmu_pages { unsigned int nr; }; -#define for_each_unsync_children(bitmap, idx) \ - for (idx = find_first_bit(bitmap, 512); \ - idx < 512; \ - idx = find_next_bit(bitmap, 512, idx+1)) - static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, int idx) { @@ -1417,7 +1411,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, { int i, ret, nr_unsync_leaf = 0; - for_each_unsync_children(sp->unsync_child_bitmap, i) { + for_each_set_bit(i, sp->unsync_child_bitmap, 512) { struct kvm_mmu_page *child; u64 ent = sp->spt[i]; @@ -1803,6 +1797,7 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) { if (is_large_pte(*sptep)) { drop_spte(vcpu->kvm, sptep); + --vcpu->kvm->stat.lpages; kvm_flush_remote_tlbs(vcpu->kvm); } } @@ -3190,15 +3185,14 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, #undef PTTYPE static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, - struct kvm_mmu *context, - int level) + struct kvm_mmu *context) { int maxphyaddr = cpuid_maxphyaddr(vcpu); u64 exb_bit_rsvd = 0; if (!context->nx) exb_bit_rsvd = rsvd_bits(63, 63); - switch (level) { + switch (context->root_level) { case PT32_ROOT_LEVEL: /* no rsvd bits for 2 level 4K page table entries */ context->rsvd_bits_mask[0][1] = 0; @@ -3256,8 +3250,9 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) { context->nx = is_nx(vcpu); + context->root_level = level; - reset_rsvds_bits_mask(vcpu, context, level); + reset_rsvds_bits_mask(vcpu, context); ASSERT(is_pae(vcpu)); context->new_cr3 = paging_new_cr3; @@ -3267,7 +3262,6 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, context->invlpg = paging64_invlpg; context->update_pte = paging64_update_pte; context->free = paging_free; - context->root_level = level; context->shadow_root_level = level; context->root_hpa = INVALID_PAGE; context->direct_map = false; @@ -3284,8 +3278,9 @@ static int paging32_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { context->nx = false; + context->root_level = PT32_ROOT_LEVEL; - reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); + reset_rsvds_bits_mask(vcpu, context); context->new_cr3 = paging_new_cr3; context->page_fault = paging32_page_fault; @@ -3294,7 +3289,6 @@ static int paging32_init_context(struct kvm_vcpu *vcpu, context->sync_page = paging32_sync_page; context->invlpg = paging32_invlpg; context->update_pte = paging32_update_pte; - context->root_level = PT32_ROOT_LEVEL; context->shadow_root_level = PT32E_ROOT_LEVEL; context->root_hpa = INVALID_PAGE; context->direct_map = false; @@ -3325,7 +3319,6 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->get_cr3 = get_cr3; context->get_pdptr = kvm_pdptr_read; context->inject_page_fault = kvm_inject_page_fault; - context->nx = is_nx(vcpu); if (!is_paging(vcpu)) { context->nx = false; @@ -3333,19 +3326,19 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->root_level = 0; } else if (is_long_mode(vcpu)) { context->nx = is_nx(vcpu); - reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL); - context->gva_to_gpa = paging64_gva_to_gpa; context->root_level = PT64_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = paging64_gva_to_gpa; } else if (is_pae(vcpu)) { context->nx = is_nx(vcpu); - reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL); - context->gva_to_gpa = paging64_gva_to_gpa; context->root_level = PT32E_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = paging64_gva_to_gpa; } else { context->nx = false; - reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); - context->gva_to_gpa = paging32_gva_to_gpa; context->root_level = PT32_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, context); + context->gva_to_gpa = paging32_gva_to_gpa; } return 0; @@ -3408,18 +3401,18 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; } else if (is_long_mode(vcpu)) { g_context->nx = is_nx(vcpu); - reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL); g_context->root_level = PT64_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, g_context); g_context->gva_to_gpa = paging64_gva_to_gpa_nested; } else if (is_pae(vcpu)) { g_context->nx = is_nx(vcpu); - reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL); g_context->root_level = PT32E_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, g_context); g_context->gva_to_gpa = paging64_gva_to_gpa_nested; } else { g_context->nx = false; - reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL); g_context->root_level = PT32_ROOT_LEVEL; + reset_rsvds_bits_mask(vcpu, g_context); g_context->gva_to_gpa = paging32_gva_to_gpa_nested; } @@ -3555,7 +3548,7 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, * If we're seeing too many writes to a page, it may no longer be a page table, * or we may be forking, in which case it is better to unmap the page. */ -static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte) +static bool detect_write_flooding(struct kvm_mmu_page *sp) { /* * Skip write-flooding detected for the sp whose level is 1, because @@ -3664,10 +3657,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { - spte = get_written_sptes(sp, gpa, &npte); - if (detect_write_misaligned(sp, gpa, bytes) || - detect_write_flooding(sp, spte)) { + detect_write_flooding(sp)) { zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); ++vcpu->kvm->stat.mmu_flooded; diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index ea7b4fd34676..715da5a19a5b 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -200,13 +200,13 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) slot = gfn_to_memslot(kvm, sp->gfn); rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; - spte = rmap_next(kvm, rmapp, NULL); + spte = rmap_next(rmapp, NULL); while (spte) { if (is_writable_pte(*spte)) audit_printk(kvm, "shadow page has writable " "mappings: gfn %llx role %x\n", sp->gfn, sp->role.word); - spte = rmap_next(kvm, rmapp, spte); + spte = rmap_next(rmapp, spte); } } diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 7aad5446f393..a73f0c104813 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -33,10 +33,11 @@ static struct kvm_arch_event_perf_mapping { [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, + [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, }; /* mapping between fixed pmc index and arch_events array */ -int fixed_pmc_events[] = {1, 0, 2}; +int fixed_pmc_events[] = {1, 0, 7}; static bool pmc_is_gp(struct kvm_pmc *pmc) { @@ -210,6 +211,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) unsigned config, type = PERF_TYPE_RAW; u8 event_select, unit_mask; + if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) + printk_once("kvm pmu: pin control bit is ignored\n"); + pmc->eventsel = eventsel; stop_counter(pmc); @@ -220,7 +224,7 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; - if (!(event_select & (ARCH_PERFMON_EVENTSEL_EDGE | + if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | ARCH_PERFMON_EVENTSEL_INV | ARCH_PERFMON_EVENTSEL_CMASK))) { config = find_arch_event(&pmc->vcpu->arch.pmu, event_select, @@ -413,7 +417,7 @@ int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) struct kvm_pmc *counters; u64 ctr; - pmc &= (3u << 30) - 1; + pmc &= ~(3u << 30); if (!fixed && pmc >= pmu->nr_arch_gp_counters) return 1; if (fixed && pmc >= pmu->nr_arch_fixed_counters) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e385214711cb..e334389e1c75 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -111,6 +111,12 @@ struct nested_state { #define MSRPM_OFFSETS 16 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; +/* + * Set osvw_len to higher value when updated Revision Guides + * are published and we know what the new status bits are + */ +static uint64_t osvw_len = 4, osvw_status; + struct vcpu_svm { struct kvm_vcpu vcpu; struct vmcb *vmcb; @@ -177,11 +183,13 @@ static bool npt_enabled = true; #else static bool npt_enabled; #endif -static int npt = 1; +/* allow nested paging (virtualized MMU) for all guests */ +static int npt = true; module_param(npt, int, S_IRUGO); -static int nested = 1; +/* allow nested virtualization in KVM/SVM */ +static int nested = true; module_param(nested, int, S_IRUGO); static void svm_flush_tlb(struct kvm_vcpu *vcpu); @@ -557,6 +565,27 @@ static void svm_init_erratum_383(void) erratum_383_found = true; } +static void svm_init_osvw(struct kvm_vcpu *vcpu) +{ + /* + * Guests should see errata 400 and 415 as fixed (assuming that + * HLT and IO instructions are intercepted). + */ + vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; + vcpu->arch.osvw.status = osvw_status & ~(6ULL); + + /* + * By increasing VCPU's osvw.length to 3 we are telling the guest that + * all osvw.status bits inside that length, including bit 0 (which is + * reserved for erratum 298), are valid. However, if host processor's + * osvw_len is 0 then osvw_status[0] carries no information. We need to + * be conservative here and therefore we tell the guest that erratum 298 + * is present (because we really don't know). + */ + if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) + vcpu->arch.osvw.status |= 1; +} + static int has_svm(void) { const char *msg; @@ -623,6 +652,36 @@ static int svm_hardware_enable(void *garbage) __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT; } + + /* + * Get OSVW bits. + * + * Note that it is possible to have a system with mixed processor + * revisions and therefore different OSVW bits. If bits are not the same + * on different processors then choose the worst case (i.e. if erratum + * is present on one processor and not on another then assume that the + * erratum is present everywhere). + */ + if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { + uint64_t len, status = 0; + int err; + + len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); + if (!err) + status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, + &err); + + if (err) + osvw_status = osvw_len = 0; + else { + if (len < osvw_len) + osvw_len = len; + osvw_status |= status; + osvw_status &= (1ULL << osvw_len) - 1; + } + } else + osvw_status = osvw_len = 0; + svm_init_erratum_383(); amd_pmu_enable_virt(); @@ -910,20 +969,25 @@ static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) return _tsc; } -static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) +static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) { struct vcpu_svm *svm = to_svm(vcpu); u64 ratio; u64 khz; - /* TSC scaling supported? */ - if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) + /* Guest TSC same frequency as host TSC? */ + if (!scale) { + svm->tsc_ratio = TSC_RATIO_DEFAULT; return; + } - /* TSC-Scaling disabled or guest TSC same frequency as host TSC? */ - if (user_tsc_khz == 0) { - vcpu->arch.virtual_tsc_khz = 0; - svm->tsc_ratio = TSC_RATIO_DEFAULT; + /* TSC scaling supported? */ + if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { + if (user_tsc_khz > tsc_khz) { + vcpu->arch.tsc_catchup = 1; + vcpu->arch.tsc_always_catchup = 1; + } else + WARN(1, "user requested TSC rate below hardware speed\n"); return; } @@ -938,7 +1002,6 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) user_tsc_khz); return; } - vcpu->arch.virtual_tsc_khz = user_tsc_khz; svm->tsc_ratio = ratio; } @@ -958,10 +1021,14 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } -static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) +static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host) { struct vcpu_svm *svm = to_svm(vcpu); + WARN_ON(adjustment < 0); + if (host) + adjustment = svm_scale_tsc(vcpu, adjustment); + svm->vmcb->control.tsc_offset += adjustment; if (is_guest_mode(vcpu)) svm->nested.hsave->control.tsc_offset += adjustment; @@ -1191,6 +1258,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) if (kvm_vcpu_is_bsp(&svm->vcpu)) svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; + svm_init_osvw(&svm->vcpu); + return &svm->vcpu; free_page4: @@ -1268,6 +1337,21 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } +static void svm_update_cpl(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + int cpl; + + if (!is_protmode(vcpu)) + cpl = 0; + else if (svm->vmcb->save.rflags & X86_EFLAGS_VM) + cpl = 3; + else + cpl = svm->vmcb->save.cs.selector & 0x3; + + svm->vmcb->save.cpl = cpl; +} + static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) { return to_svm(vcpu)->vmcb->save.rflags; @@ -1275,7 +1359,11 @@ static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { + unsigned long old_rflags = to_svm(vcpu)->vmcb->save.rflags; + to_svm(vcpu)->vmcb->save.rflags = rflags; + if ((old_rflags ^ rflags) & X86_EFLAGS_VM) + svm_update_cpl(vcpu); } static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) @@ -1543,9 +1631,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; } if (seg == VCPU_SREG_CS) - svm->vmcb->save.cpl - = (svm->vmcb->save.cs.attrib - >> SVM_SELECTOR_DPL_SHIFT) & 3; + svm_update_cpl(vcpu); mark_dirty(svm->vmcb, VMCB_SEG); } @@ -2735,7 +2821,10 @@ static int task_switch_interception(struct vcpu_svm *svm) (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) skip_emulated_instruction(&svm->vcpu); - if (kvm_task_switch(&svm->vcpu, tss_selector, reason, + if (int_type != SVM_EXITINTINFO_TYPE_SOFT) + int_vec = -1; + + if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, has_error_code, error_code) == EMULATE_FAIL) { svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR; svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 246490f643b6..280751c84724 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -70,9 +70,6 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); static bool __read_mostly vmm_exclusive = 1; module_param(vmm_exclusive, bool, S_IRUGO); -static bool __read_mostly yield_on_hlt = 1; -module_param(yield_on_hlt, bool, S_IRUGO); - static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); @@ -1655,17 +1652,6 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) vmx_set_interrupt_shadow(vcpu, 0); } -static void vmx_clear_hlt(struct kvm_vcpu *vcpu) -{ - /* Ensure that we clear the HLT state in the VMCS. We don't need to - * explicitly skip the instruction because if the HLT state is set, then - * the instruction is already executing and RIP has already been - * advanced. */ - if (!yield_on_hlt && - vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) - vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); -} - /* * KVM wants to inject page-faults which it got to the guest. This function * checks whether in a nested guest, we need to inject them to L1 or L2. @@ -1678,7 +1664,7 @@ static int nested_pf_handled(struct kvm_vcpu *vcpu) struct vmcs12 *vmcs12 = get_vmcs12(vcpu); /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ - if (!(vmcs12->exception_bitmap & PF_VECTOR)) + if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR))) return 0; nested_vmx_vmexit(vcpu); @@ -1718,7 +1704,6 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, intr_info |= INTR_TYPE_HARD_EXCEPTION; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); - vmx_clear_hlt(vcpu); } static bool vmx_rdtscp_supported(void) @@ -1817,13 +1802,19 @@ u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu) } /* - * Empty call-back. Needs to be implemented when VMX enables the SET_TSC_KHZ - * ioctl. In this case the call-back should update internal vmx state to make - * the changes effective. + * Engage any workarounds for mis-matched TSC rates. Currently limited to + * software catchup for faster rates on slower CPUs. */ -static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) +static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) { - /* Nothing to do here */ + if (!scale) + return; + + if (user_tsc_khz > tsc_khz) { + vcpu->arch.tsc_catchup = 1; + vcpu->arch.tsc_always_catchup = 1; + } else + WARN(1, "user requested TSC rate below hardware speed\n"); } /* @@ -1850,7 +1841,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) } } -static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) +static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host) { u64 offset = vmcs_read64(TSC_OFFSET); vmcs_write64(TSC_OFFSET, offset + adjustment); @@ -2219,6 +2210,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) msr = find_msr_entry(vmx, msr_index); if (msr) { msr->data = data; + if (msr - vmx->guest_msrs < vmx->save_nmsrs) + kvm_set_shared_msr(msr->index, msr->data, + msr->mask); break; } ret = kvm_set_msr_common(vcpu, msr_index, data); @@ -2399,7 +2393,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) &_pin_based_exec_control) < 0) return -EIO; - min = + min = CPU_BASED_HLT_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | @@ -2414,9 +2408,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) CPU_BASED_INVLPG_EXITING | CPU_BASED_RDPMC_EXITING; - if (yield_on_hlt) - min |= CPU_BASED_HLT_EXITING; - opt = CPU_BASED_TPR_SHADOW | CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; @@ -4003,7 +3994,6 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu) } else intr |= INTR_TYPE_EXT_INTR; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); - vmx_clear_hlt(vcpu); } static void vmx_inject_nmi(struct kvm_vcpu *vcpu) @@ -4035,7 +4025,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); - vmx_clear_hlt(vcpu); } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) @@ -4672,9 +4661,10 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) bool has_error_code = false; u32 error_code = 0; u16 tss_selector; - int reason, type, idt_v; + int reason, type, idt_v, idt_index; idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); + idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); @@ -4712,8 +4702,9 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) type != INTR_TYPE_NMI_INTR)) skip_emulated_instruction(vcpu); - if (kvm_task_switch(vcpu, tss_selector, reason, - has_error_code, error_code) == EMULATE_FAIL) { + if (kvm_task_switch(vcpu, tss_selector, + type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, + has_error_code, error_code) == EMULATE_FAIL) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 54696b5f8443..4044ce0bf7c1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -97,6 +97,10 @@ EXPORT_SYMBOL_GPL(kvm_has_tsc_control); u32 kvm_max_guest_tsc_khz; EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); +/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ +static u32 tsc_tolerance_ppm = 250; +module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); + #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { @@ -969,50 +973,51 @@ static inline u64 get_kernel_ns(void) static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); unsigned long max_tsc_khz; -static inline int kvm_tsc_changes_freq(void) +static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { - int cpu = get_cpu(); - int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && - cpufreq_quick_get(cpu) != 0; - put_cpu(); - return ret; + return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, + vcpu->arch.virtual_tsc_shift); } -u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu) +static u32 adjust_tsc_khz(u32 khz, s32 ppm) { - if (vcpu->arch.virtual_tsc_khz) - return vcpu->arch.virtual_tsc_khz; - else - return __this_cpu_read(cpu_tsc_khz); + u64 v = (u64)khz * (1000000 + ppm); + do_div(v, 1000000); + return v; } -static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) +static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) { - u64 ret; - - WARN_ON(preemptible()); - if (kvm_tsc_changes_freq()) - printk_once(KERN_WARNING - "kvm: unreliable cycle conversion on adjustable rate TSC\n"); - ret = nsec * vcpu_tsc_khz(vcpu); - do_div(ret, USEC_PER_SEC); - return ret; -} + u32 thresh_lo, thresh_hi; + int use_scaling = 0; -static void kvm_init_tsc_catchup(struct kvm_vcpu *vcpu, u32 this_tsc_khz) -{ /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, - &vcpu->arch.tsc_catchup_shift, - &vcpu->arch.tsc_catchup_mult); + &vcpu->arch.virtual_tsc_shift, + &vcpu->arch.virtual_tsc_mult); + vcpu->arch.virtual_tsc_khz = this_tsc_khz; + + /* + * Compute the variation in TSC rate which is acceptable + * within the range of tolerance and decide if the + * rate being applied is within that bounds of the hardware + * rate. If so, no scaling or compensation need be done. + */ + thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); + thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); + if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { + pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); + use_scaling = 1; + } + kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { - u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec, - vcpu->arch.tsc_catchup_mult, - vcpu->arch.tsc_catchup_shift); - tsc += vcpu->arch.last_tsc_write; + u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, + vcpu->arch.virtual_tsc_mult, + vcpu->arch.virtual_tsc_shift); + tsc += vcpu->arch.this_tsc_write; return tsc; } @@ -1021,48 +1026,88 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; - s64 sdiff; + s64 usdiff; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; - sdiff = data - kvm->arch.last_tsc_write; - if (sdiff < 0) - sdiff = -sdiff; + + /* n.b - signed multiplication and division required */ + usdiff = data - kvm->arch.last_tsc_write; +#ifdef CONFIG_X86_64 + usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; +#else + /* do_div() only does unsigned */ + asm("idivl %2; xor %%edx, %%edx" + : "=A"(usdiff) + : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz)); +#endif + do_div(elapsed, 1000); + usdiff -= elapsed; + if (usdiff < 0) + usdiff = -usdiff; /* - * Special case: close write to TSC within 5 seconds of - * another CPU is interpreted as an attempt to synchronize - * The 5 seconds is to accommodate host load / swapping as - * well as any reset of TSC during the boot process. - * - * In that case, for a reliable TSC, we can match TSC offsets, - * or make a best guest using elapsed value. - */ - if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) && - elapsed < 5ULL * NSEC_PER_SEC) { + * Special case: TSC write with a small delta (1 second) of virtual + * cycle time against real time is interpreted as an attempt to + * synchronize the CPU. + * + * For a reliable TSC, we can match TSC offsets, and for an unstable + * TSC, we add elapsed time in this computation. We could let the + * compensation code attempt to catch up if we fall behind, but + * it's better to try to match offsets from the beginning. + */ + if (usdiff < USEC_PER_SEC && + vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { if (!check_tsc_unstable()) { - offset = kvm->arch.last_tsc_offset; + offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); - offset += delta; + data += delta; + offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } - ns = kvm->arch.last_tsc_nsec; + } else { + /* + * We split periods of matched TSC writes into generations. + * For each generation, we track the original measured + * nanosecond time, offset, and write, so if TSCs are in + * sync, we can match exact offset, and if not, we can match + * exact software computaion in compute_guest_tsc() + * + * These values are tracked in kvm->arch.cur_xxx variables. + */ + kvm->arch.cur_tsc_generation++; + kvm->arch.cur_tsc_nsec = ns; + kvm->arch.cur_tsc_write = data; + kvm->arch.cur_tsc_offset = offset; + pr_debug("kvm: new tsc generation %u, clock %llu\n", + kvm->arch.cur_tsc_generation, data); } + + /* + * We also track th most recent recorded KHZ, write and time to + * allow the matching interval to be extended at each write. + */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; - kvm->arch.last_tsc_offset = offset; - kvm_x86_ops->write_tsc_offset(vcpu, offset); - raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); + kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; /* Reset of TSC must disable overshoot protection below */ vcpu->arch.hv_clock.tsc_timestamp = 0; - vcpu->arch.last_tsc_write = data; - vcpu->arch.last_tsc_nsec = ns; + vcpu->arch.last_guest_tsc = data; + + /* Keep track of which generation this VCPU has synchronized to */ + vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; + vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; + vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; + + kvm_x86_ops->write_tsc_offset(vcpu, offset); + raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); } + EXPORT_SYMBOL_GPL(kvm_write_tsc); static int kvm_guest_time_update(struct kvm_vcpu *v) @@ -1078,7 +1123,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) local_irq_save(flags); tsc_timestamp = kvm_x86_ops->read_l1_tsc(v); kernel_ns = get_kernel_ns(); - this_tsc_khz = vcpu_tsc_khz(v); + this_tsc_khz = __get_cpu_var(cpu_tsc_khz); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); @@ -1098,7 +1143,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { - kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp); + adjust_tsc_offset_guest(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } @@ -1130,7 +1175,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) * observed by the guest and ensure the new system time is greater. */ max_kernel_ns = 0; - if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) { + if (vcpu->hv_clock.tsc_timestamp) { max_kernel_ns = vcpu->last_guest_tsc - vcpu->hv_clock.tsc_timestamp; max_kernel_ns = pvclock_scale_delta(max_kernel_ns, @@ -1504,6 +1549,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ + data &= ~(u64)0x8; /* ignore TLB cache disable */ if (data != 0) { pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); @@ -1676,6 +1722,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) */ pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; + case MSR_AMD64_OSVW_ID_LENGTH: + if (!guest_cpuid_has_osvw(vcpu)) + return 1; + vcpu->arch.osvw.length = data; + break; + case MSR_AMD64_OSVW_STATUS: + if (!guest_cpuid_has_osvw(vcpu)) + return 1; + vcpu->arch.osvw.status = data; + break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); @@ -1960,6 +2016,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) */ data = 0xbe702111; break; + case MSR_AMD64_OSVW_ID_LENGTH: + if (!guest_cpuid_has_osvw(vcpu)) + return 1; + data = vcpu->arch.osvw.length; + break; + case MSR_AMD64_OSVW_STATUS: + if (!guest_cpuid_has_osvw(vcpu)) + return 1; + data = vcpu->arch.osvw.status; + break; default: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); @@ -2080,6 +2146,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: + case KVM_CAP_PCI_2_3: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -2214,19 +2281,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } kvm_x86_ops->vcpu_load(vcpu, cpu); - if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { - /* Make sure TSC doesn't go backwards */ - s64 tsc_delta; - u64 tsc; - tsc = kvm_x86_ops->read_l1_tsc(vcpu); - tsc_delta = !vcpu->arch.last_guest_tsc ? 0 : - tsc - vcpu->arch.last_guest_tsc; + /* Apply any externally detected TSC adjustments (due to suspend) */ + if (unlikely(vcpu->arch.tsc_offset_adjustment)) { + adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); + vcpu->arch.tsc_offset_adjustment = 0; + set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); + } + if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { + s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : + native_read_tsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { - kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta); + u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, + vcpu->arch.last_guest_tsc); + kvm_x86_ops->write_tsc_offset(vcpu, offset); vcpu->arch.tsc_catchup = 1; } kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -2243,7 +2314,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); - vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu); + vcpu->arch.last_host_tsc = native_read_tsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, @@ -2785,26 +2856,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, u32 user_tsc_khz; r = -EINVAL; - if (!kvm_has_tsc_control) - break; - user_tsc_khz = (u32)arg; if (user_tsc_khz >= kvm_max_guest_tsc_khz) goto out; - kvm_x86_ops->set_tsc_khz(vcpu, user_tsc_khz); + if (user_tsc_khz == 0) + user_tsc_khz = tsc_khz; + + kvm_set_tsc_khz(vcpu, user_tsc_khz); r = 0; goto out; } case KVM_GET_TSC_KHZ: { - r = -EIO; - if (check_tsc_unstable()) - goto out; - - r = vcpu_tsc_khz(vcpu); - + r = vcpu->arch.virtual_tsc_khz; goto out; } default: @@ -2815,6 +2881,11 @@ out: return r; } +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) { int ret; @@ -2998,6 +3069,8 @@ static void write_protect_slot(struct kvm *kvm, unsigned long *dirty_bitmap, unsigned long nr_dirty_pages) { + spin_lock(&kvm->mmu_lock); + /* Not many dirty pages compared to # of shadow pages. */ if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) { unsigned long gfn_offset; @@ -3005,16 +3078,13 @@ static void write_protect_slot(struct kvm *kvm, for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) { unsigned long gfn = memslot->base_gfn + gfn_offset; - spin_lock(&kvm->mmu_lock); kvm_mmu_rmap_write_protect(kvm, gfn, memslot); - spin_unlock(&kvm->mmu_lock); } kvm_flush_remote_tlbs(kvm); - } else { - spin_lock(&kvm->mmu_lock); + } else kvm_mmu_slot_remove_write_access(kvm, memslot->id); - spin_unlock(&kvm->mmu_lock); - } + + spin_unlock(&kvm->mmu_lock); } /* @@ -3133,6 +3203,9 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; + r = -EINVAL; + if (atomic_read(&kvm->online_vcpus)) + goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { @@ -4063,6 +4136,11 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) return res; } +static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val) +{ + kvm_set_rflags(emul_to_vcpu(ctxt), val); +} + static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); @@ -4244,6 +4322,7 @@ static struct x86_emulate_ops emulate_ops = { .set_idt = emulator_set_idt, .get_cr = emulator_get_cr, .set_cr = emulator_set_cr, + .set_rflags = emulator_set_rflags, .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, @@ -5288,6 +5367,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) profile_hit(KVM_PROFILING, (void *)rip); } + if (unlikely(vcpu->arch.tsc_always_catchup)) + kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_lapic_sync_from_vapic(vcpu); @@ -5587,15 +5668,15 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return 0; } -int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, - bool has_error_code, u32 error_code) +int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, + int reason, bool has_error_code, u32 error_code) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); - ret = emulator_task_switch(ctxt, tss_selector, reason, + ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (ret) @@ -5928,13 +6009,88 @@ int kvm_arch_hardware_enable(void *garbage) struct kvm *kvm; struct kvm_vcpu *vcpu; int i; + int ret; + u64 local_tsc; + u64 max_tsc = 0; + bool stable, backwards_tsc = false; kvm_shared_msr_cpu_online(); - list_for_each_entry(kvm, &vm_list, vm_list) - kvm_for_each_vcpu(i, vcpu, kvm) - if (vcpu->cpu == smp_processor_id()) - kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); - return kvm_x86_ops->hardware_enable(garbage); + ret = kvm_x86_ops->hardware_enable(garbage); + if (ret != 0) + return ret; + + local_tsc = native_read_tsc(); + stable = !check_tsc_unstable(); + list_for_each_entry(kvm, &vm_list, vm_list) { + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!stable && vcpu->cpu == smp_processor_id()) + set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); + if (stable && vcpu->arch.last_host_tsc > local_tsc) { + backwards_tsc = true; + if (vcpu->arch.last_host_tsc > max_tsc) + max_tsc = vcpu->arch.last_host_tsc; + } + } + } + + /* + * Sometimes, even reliable TSCs go backwards. This happens on + * platforms that reset TSC during suspend or hibernate actions, but + * maintain synchronization. We must compensate. Fortunately, we can + * detect that condition here, which happens early in CPU bringup, + * before any KVM threads can be running. Unfortunately, we can't + * bring the TSCs fully up to date with real time, as we aren't yet far + * enough into CPU bringup that we know how much real time has actually + * elapsed; our helper function, get_kernel_ns() will be using boot + * variables that haven't been updated yet. + * + * So we simply find the maximum observed TSC above, then record the + * adjustment to TSC in each VCPU. When the VCPU later gets loaded, + * the adjustment will be applied. Note that we accumulate + * adjustments, in case multiple suspend cycles happen before some VCPU + * gets a chance to run again. In the event that no KVM threads get a + * chance to run, we will miss the entire elapsed period, as we'll have + * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may + * loose cycle time. This isn't too big a deal, since the loss will be + * uniform across all VCPUs (not to mention the scenario is extremely + * unlikely). It is possible that a second hibernate recovery happens + * much faster than a first, causing the observed TSC here to be + * smaller; this would require additional padding adjustment, which is + * why we set last_host_tsc to the local tsc observed here. + * + * N.B. - this code below runs only on platforms with reliable TSC, + * as that is the only way backwards_tsc is set above. Also note + * that this runs for ALL vcpus, which is not a bug; all VCPUs should + * have the same delta_cyc adjustment applied if backwards_tsc + * is detected. Note further, this adjustment is only done once, + * as we reset last_host_tsc on all VCPUs to stop this from being + * called multiple times (one for each physical CPU bringup). + * + * Platforms with unnreliable TSCs don't have to deal with this, they + * will be compensated by the logic in vcpu_load, which sets the TSC to + * catchup mode. This will catchup all VCPUs to real time, but cannot + * guarantee that they stay in perfect synchronization. + */ + if (backwards_tsc) { + u64 delta_cyc = max_tsc - local_tsc; + list_for_each_entry(kvm, &vm_list, vm_list) { + kvm_for_each_vcpu(i, vcpu, kvm) { + vcpu->arch.tsc_offset_adjustment += delta_cyc; + vcpu->arch.last_host_tsc = local_tsc; + } + + /* + * We have to disable TSC offset matching.. if you were + * booting a VM while issuing an S4 host suspend.... + * you may have some problem. Solving this issue is + * left as an exercise to the reader. + */ + kvm->arch.last_tsc_nsec = 0; + kvm->arch.last_tsc_write = 0; + } + + } + return 0; } void kvm_arch_hardware_disable(void *garbage) @@ -5958,6 +6114,11 @@ void kvm_arch_check_processor_compat(void *rtn) kvm_x86_ops->check_processor_compatibility(rtn); } +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) +{ + return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); +} + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; @@ -5980,7 +6141,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) } vcpu->arch.pio_data = page_address(page); - kvm_init_tsc_catchup(vcpu, max_tsc_khz); + kvm_set_tsc_khz(vcpu, max_tsc_khz); r = kvm_mmu_create(vcpu); if (r < 0) @@ -6032,8 +6193,11 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->arch.pio_data); } -int kvm_arch_init_vm(struct kvm *kvm) +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { + if (type) + return -EINVAL; + INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); @@ -6093,6 +6257,65 @@ void kvm_arch_destroy_vm(struct kvm *kvm) put_page(kvm->arch.ept_identity_pagetable); } +void kvm_arch_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ + int i; + + for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { + if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) { + vfree(free->arch.lpage_info[i]); + free->arch.lpage_info[i] = NULL; + } + } +} + +int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +{ + int i; + + for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { + unsigned long ugfn; + int lpages; + int level = i + 2; + + lpages = gfn_to_index(slot->base_gfn + npages - 1, + slot->base_gfn, level) + 1; + + slot->arch.lpage_info[i] = + vzalloc(lpages * sizeof(*slot->arch.lpage_info[i])); + if (!slot->arch.lpage_info[i]) + goto out_free; + + if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) + slot->arch.lpage_info[i][0].write_count = 1; + if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) + slot->arch.lpage_info[i][lpages - 1].write_count = 1; + ugfn = slot->userspace_addr >> PAGE_SHIFT; + /* + * If the gfn and userspace address are not aligned wrt each + * other, or if explicitly asked to, disable large page + * support for this slot + */ + if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || + !kvm_largepages_enabled()) { + unsigned long j; + + for (j = 0; j < lpages; ++j) + slot->arch.lpage_info[i][j].write_count = 1; + } + } + + return 0; + +out_free: + for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { + vfree(slot->arch.lpage_info[i]); + slot->arch.lpage_info[i] = NULL; + } + return -ENOMEM; +} + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_memory_slot old, diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 6cabf6570d64..4f0cec7e4ffb 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -12,7 +12,6 @@ #include <asm/page_types.h> #include <asm/sections.h> #include <asm/setup.h> -#include <asm/system.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/proto.h> diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8663f6c47ccb..575d86f85ce4 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -35,7 +35,6 @@ #include <asm/asm.h> #include <asm/bios_ebda.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/dma.h> diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 436a0309db33..fc18be0f6f29 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -35,7 +35,6 @@ #include <asm/processor.h> #include <asm/bios_ebda.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index cac718499256..a69bcb8c7621 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -10,7 +10,6 @@ #include <linux/spinlock.h> #include <linux/module.h> -#include <asm/system.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts index e70be38ce039..ce874f872cc6 100644 --- a/arch/x86/platform/ce4100/falconfalls.dts +++ b/arch/x86/platform/ce4100/falconfalls.dts @@ -208,16 +208,19 @@ interrupts = <14 1>; }; - gpio@b,1 { + pcigpio: gpio@b,1 { + #gpio-cells = <2>; + #interrupt-cells = <2>; compatible = "pci8086,2e67.2", "pci8086,2e67", "pciclassff0000", "pciclassff00"; - #gpio-cells = <2>; reg = <0x15900 0x0 0x0 0x0 0x0>; interrupts = <15 1>; + interrupt-controller; gpio-controller; + intel,muxctl = <0>; }; i2c-controller@b,2 { diff --git a/arch/x86/platform/geode/Makefile b/arch/x86/platform/geode/Makefile index 246b788847ff..5b51194f4c8d 100644 --- a/arch/x86/platform/geode/Makefile +++ b/arch/x86/platform/geode/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_ALIX) += alix.o obj-$(CONFIG_NET5501) += net5501.o +obj-$(CONFIG_GEOS) += geos.o diff --git a/arch/x86/platform/geode/geos.c b/arch/x86/platform/geode/geos.c new file mode 100644 index 000000000000..c2e6d53558be --- /dev/null +++ b/arch/x86/platform/geode/geos.c @@ -0,0 +1,128 @@ +/* + * System Specific setup for Traverse Technologies GEOS. + * At the moment this means setup of GPIO control of LEDs. + * + * Copyright (C) 2008 Constantin Baranov <const@mimas.ru> + * Copyright (C) 2011 Ed Wildgoose <kernel@wildgooses.com> + * and Philip Prindeville <philipp@redfish-solutions.com> + * + * TODO: There are large similarities with leds-net5501.c + * by Alessandro Zummo <a.zummo@towertech.it> + * In the future leds-net5501.c should be migrated over to platform + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/string.h> +#include <linux/module.h> +#include <linux/leds.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/input.h> +#include <linux/gpio_keys.h> +#include <linux/dmi.h> + +#include <asm/geode.h> + +static struct gpio_keys_button geos_gpio_buttons[] = { + { + .code = KEY_RESTART, + .gpio = 3, + .active_low = 1, + .desc = "Reset button", + .type = EV_KEY, + .wakeup = 0, + .debounce_interval = 100, + .can_disable = 0, + } +}; +static struct gpio_keys_platform_data geos_buttons_data = { + .buttons = geos_gpio_buttons, + .nbuttons = ARRAY_SIZE(geos_gpio_buttons), + .poll_interval = 20, +}; + +static struct platform_device geos_buttons_dev = { + .name = "gpio-keys-polled", + .id = 1, + .dev = { + .platform_data = &geos_buttons_data, + } +}; + +static struct gpio_led geos_leds[] = { + { + .name = "geos:1", + .gpio = 6, + .default_trigger = "default-on", + .active_low = 1, + }, + { + .name = "geos:2", + .gpio = 25, + .default_trigger = "default-off", + .active_low = 1, + }, + { + .name = "geos:3", + .gpio = 27, + .default_trigger = "default-off", + .active_low = 1, + }, +}; + +static struct gpio_led_platform_data geos_leds_data = { + .num_leds = ARRAY_SIZE(geos_leds), + .leds = geos_leds, +}; + +static struct platform_device geos_leds_dev = { + .name = "leds-gpio", + .id = -1, + .dev.platform_data = &geos_leds_data, +}; + +static struct __initdata platform_device *geos_devs[] = { + &geos_buttons_dev, + &geos_leds_dev, +}; + +static void __init register_geos(void) +{ + /* Setup LED control through leds-gpio driver */ + platform_add_devices(geos_devs, ARRAY_SIZE(geos_devs)); +} + +static int __init geos_init(void) +{ + const char *vendor, *product; + + if (!is_geode()) + return 0; + + vendor = dmi_get_system_info(DMI_SYS_VENDOR); + if (!vendor || strcmp(vendor, "Traverse Technologies")) + return 0; + + product = dmi_get_system_info(DMI_PRODUCT_NAME); + if (!product || strcmp(product, "Geos")) + return 0; + + printk(KERN_INFO "%s: system is recognized as \"%s %s\"\n", + KBUILD_MODNAME, vendor, product); + + register_geos(); + + return 0; +} + +module_init(geos_init); + +MODULE_AUTHOR("Philip Prindeville <philipp@redfish-solutions.com>"); +MODULE_DESCRIPTION("Traverse Technologies Geos System Setup"); +MODULE_LICENSE("GPL"); diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 4889655ba784..47936830968c 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -115,7 +115,7 @@ static void __save_processor_state(struct saved_context *ctxt) void save_processor_state(void) { __save_processor_state(&saved_context); - save_sched_clock_state(); + x86_platform.save_sched_clock_state(); } #ifdef CONFIG_X86_32 EXPORT_SYMBOL(save_processor_state); @@ -231,8 +231,8 @@ static void __restore_processor_state(struct saved_context *ctxt) /* Needed by apm.c */ void restore_processor_state(void) { + x86_platform.restore_sched_clock_state(); __restore_processor_state(&saved_context); - restore_sched_clock_state(); } #ifdef CONFIG_X86_32 EXPORT_SYMBOL(restore_processor_state); diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 3769079874d8..74202c1910cd 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -10,7 +10,6 @@ #include <linux/suspend.h> #include <linux/bootmem.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmzone.h> diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 23592eff67ad..b40989308775 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -18,7 +18,7 @@ #ifdef __KERNEL__ #include <asm/processor.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h new file mode 100644 index 000000000000..55707a8009d3 --- /dev/null +++ b/arch/xtensa/include/asm/barrier.h @@ -0,0 +1,29 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SYSTEM_H +#define _XTENSA_SYSTEM_H + +#define smp_read_barrier_depends() do { } while(0) +#define read_barrier_depends() do { } while(0) + +#define mb() barrier() +#define rmb() mb() +#define wmb() mb() + +#ifdef CONFIG_SMP +#error smp_* not defined +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#endif + +#define set_mb(var, value) do { var = value; mb(); } while (0) + +#endif /* _XTENSA_SYSTEM_H */ diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 40aa7fe77f66..5270197ddd36 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -21,7 +21,6 @@ #include <asm/processor.h> #include <asm/byteorder.h> -#include <asm/system.h> #ifdef CONFIG_SMP # error SMP not supported on this architecture diff --git a/arch/xtensa/include/asm/system.h b/arch/xtensa/include/asm/cmpxchg.h index 1e7e09ab6cd7..e32149063d83 100644 --- a/arch/xtensa/include/asm/system.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -1,5 +1,5 @@ /* - * include/asm-xtensa/system.h + * Atomic xchg and cmpxchg operations. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -8,44 +8,12 @@ * Copyright (C) 2001 - 2005 Tensilica Inc. */ -#ifndef _XTENSA_SYSTEM_H -#define _XTENSA_SYSTEM_H +#ifndef _XTENSA_CMPXCHG_H +#define _XTENSA_CMPXCHG_H -#include <linux/stringify.h> -#include <linux/irqflags.h> - -#include <asm/processor.h> - -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() +#ifndef __ASSEMBLY__ -#ifdef CONFIG_SMP -#error smp_* not defined -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#endif - -#define set_mb(var, value) do { var = value; mb(); } while (0) - -#if !defined (__ASSEMBLY__) - -/* * switch_to(n) should switch tasks to task nr n, first - * checking that n isn't the current task, in which case it does nothing. - */ -extern void *_switch_to(void *last, void *next); - -#endif /* __ASSEMBLY__ */ - -#define switch_to(prev,next,last) \ -do { \ - (last) = _switch_to(prev, next); \ -} while(0) +#include <linux/stringify.h> /* * cmpxchg @@ -158,27 +126,6 @@ __xchg(unsigned long x, volatile void * ptr, int size) return x; } -extern void set_except_vector(int n, void *addr); - -static inline void spill_registers(void) -{ - unsigned int a0, ps; - - __asm__ __volatile__ ( - "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t" - "mov a12, a0\n\t" - "rsr a13," __stringify(SAR) "\n\t" - "xsr a14," __stringify(PS) "\n\t" - "movi a0, _spill_registers\n\t" - "rsync\n\t" - "callx0 a0\n\t" - "mov a0, a12\n\t" - "wsr a13," __stringify(SAR) "\n\t" - "wsr a14," __stringify(PS) "\n\t" - :: "a" (&a0), "a" (&ps) - : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); -} - -#define arch_align_stack(x) (x) +#endif /* __ASSEMBLY__ */ -#endif /* _XTENSA_SYSTEM_H */ +#endif /* _XTENSA_CMPXCHG_H */ diff --git a/arch/xtensa/include/asm/exec.h b/arch/xtensa/include/asm/exec.h new file mode 100644 index 000000000000..af949e28cb32 --- /dev/null +++ b/arch/xtensa/include/asm/exec.h @@ -0,0 +1,14 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_EXEC_H +#define _XTENSA_EXEC_H + +#define arch_align_stack(x) (x) + +#endif /* _XTENSA_EXEC_H */ diff --git a/arch/xtensa/include/asm/setup.h b/arch/xtensa/include/asm/setup.h index e3636520d8cc..9fa8ad979361 100644 --- a/arch/xtensa/include/asm/setup.h +++ b/arch/xtensa/include/asm/setup.h @@ -13,4 +13,6 @@ #define COMMAND_LINE_SIZE 256 +extern void set_except_vector(int n, void *addr); + #endif diff --git a/arch/xtensa/include/asm/switch_to.h b/arch/xtensa/include/asm/switch_to.h new file mode 100644 index 000000000000..6b73bf0eb1ff --- /dev/null +++ b/arch/xtensa/include/asm/switch_to.h @@ -0,0 +1,22 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2005 Tensilica Inc. + */ + +#ifndef _XTENSA_SWITCH_TO_H +#define _XTENSA_SWITCH_TO_H + +/* * switch_to(n) should switch tasks to task nr n, first + * checking that n isn't the current task, in which case it does nothing. + */ +extern void *_switch_to(void *last, void *next); + +#define switch_to(prev,next,last) \ +do { \ + (last) = _switch_to(prev, next); \ +} while(0) + +#endif /* _XTENSA_SWITCH_TO_H */ diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 3fa526fd3c99..6e4bb3b791ab 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -17,7 +17,9 @@ #define _XTENSA_UACCESS_H #include <linux/errno.h> +#ifndef __ASSEMBLY__ #include <linux/prefetch.h> +#endif #include <asm/types.h> #define VERIFY_READ 0 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 2c9004770c4e..6a2d6edf8f72 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -34,7 +34,6 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/platform.h> diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index 2dff698ab02e..33eea4c16f12 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c @@ -24,7 +24,6 @@ #include <asm/pgtable.h> #include <asm/page.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/ptrace.h> #include <asm/elf.h> diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 1e5a034fe011..17e746f7be60 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -34,7 +34,6 @@ # include <linux/seq_file.h> #endif -#include <asm/system.h> #include <asm/bootparam.h> #include <asm/pgtable.h> #include <asm/processor.h> diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index e64efac3b9db..bc1e14cf9369 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -381,6 +381,25 @@ static __always_inline unsigned long *stack_pointer(struct task_struct *task) return sp; } +static inline void spill_registers(void) +{ + unsigned int a0, ps; + + __asm__ __volatile__ ( + "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t" + "mov a12, a0\n\t" + "rsr a13," __stringify(SAR) "\n\t" + "xsr a14," __stringify(PS) "\n\t" + "movi a0, _spill_registers\n\t" + "rsync\n\t" + "callx0 a0\n\t" + "mov a0, a12\n\t" + "wsr a13," __stringify(SAR) "\n\t" + "wsr a14," __stringify(PS) "\n\t" + :: "a" (&a0), "a" (&ps) + : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); +} + void show_trace(struct task_struct *task, unsigned long *sp) { unsigned long a0, a1, pc; diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index e367e3026436..b17885a0b508 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -19,7 +19,6 @@ #include <asm/cacheflush.h> #include <asm/hardirq.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/pgalloc.h> unsigned long asid_cache = ASID_USER_FIRST; diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 239461d8ea88..e2700b21395b 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -18,7 +18,6 @@ #include <asm/processor.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> -#include <asm/system.h> #include <asm/cacheflush.h> |