diff options
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r-- | arch/mips/kernel/traps.c | 317 |
1 files changed, 145 insertions, 172 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 6379003f9d8d..fcae66752972 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -9,9 +9,10 @@ * Copyright (C) 1999 Silicon Graphics, Inc. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000, 01 MIPS Technologies, Inc. - * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki + * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki */ #include <linux/bug.h> +#include <linux/compiler.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> @@ -104,7 +105,7 @@ static int __init set_raw_show_trace(char *str) __setup("raw_show_trace", set_raw_show_trace); #endif -static void show_backtrace(struct task_struct *task, struct pt_regs *regs) +static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) { unsigned long sp = regs->regs[29]; unsigned long ra = regs->regs[31]; @@ -126,7 +127,8 @@ static void show_backtrace(struct task_struct *task, struct pt_regs *regs) * This routine abuses get_user()/put_user() to reference pointers * with at least a bit of error checking ... */ -static void show_stacktrace(struct task_struct *task, struct pt_regs *regs) +static void show_stacktrace(struct task_struct *task, + const struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); long stackdata; @@ -203,7 +205,7 @@ static void show_code(unsigned int __user *pc) } } -void show_regs(struct pt_regs *regs) +static void __show_regs(const struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); unsigned int cause = regs->cp0_cause; @@ -295,15 +297,24 @@ void show_regs(struct pt_regs *regs) if (1 <= cause && cause <= 5) printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); - printk("PrId : %08x\n", read_c0_prid()); + printk("PrId : %08x (%s)\n", read_c0_prid(), + cpu_name_string()); } -void show_registers(struct pt_regs *regs) +/* + * FIXME: really the generic show_regs should take a const pointer argument. + */ +void show_regs(struct pt_regs *regs) { - show_regs(regs); + __show_regs((struct pt_regs *)regs); +} + +void show_registers(const struct pt_regs *regs) +{ + __show_regs(regs); print_modules(); printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n", - current->comm, current->pid, current_thread_info(), current); + current->comm, task_pid_nr(current), current_thread_info(), current); show_stacktrace(current, regs); show_code((unsigned int __user *) regs->cp0_epc); printk("\n"); @@ -311,7 +322,7 @@ void show_registers(struct pt_regs *regs) static DEFINE_SPINLOCK(die_lock); -void __noreturn die(const char * str, struct pt_regs * regs) +void __noreturn die(const char * str, const struct pt_regs * regs) { static int die_counter; #ifdef CONFIG_MIPS_MT_SMTC @@ -400,7 +411,7 @@ asmlinkage void do_be(struct pt_regs *regs) } /* - * ll/sc emulation + * ll/sc, rdhwr, sync emulation */ #define OPCODE 0xfc000000 @@ -409,9 +420,11 @@ asmlinkage void do_be(struct pt_regs *regs) #define OFFSET 0x0000ffff #define LL 0xc0000000 #define SC 0xe0000000 +#define SPEC0 0x00000000 #define SPEC3 0x7c000000 #define RD 0x0000f800 #define FUNC 0x0000003f +#define SYNC 0x0000000f #define RDHWR 0x0000003b /* @@ -422,11 +435,10 @@ unsigned long ll_bit; static struct task_struct *ll_task = NULL; -static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) +static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) { unsigned long value, __user *vaddr; long offset; - int signal = 0; /* * analyse the ll instruction that just caused a ri exception @@ -441,14 +453,10 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) vaddr = (unsigned long __user *) ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); - if ((unsigned long)vaddr & 3) { - signal = SIGBUS; - goto sig; - } - if (get_user(value, vaddr)) { - signal = SIGSEGV; - goto sig; - } + if ((unsigned long)vaddr & 3) + return SIGBUS; + if (get_user(value, vaddr)) + return SIGSEGV; preempt_disable(); @@ -461,22 +469,16 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode) preempt_enable(); - compute_return_epc(regs); - regs->regs[(opcode & RT) >> 16] = value; - return; - -sig: - force_sig(signal, current); + return 0; } -static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) +static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) { unsigned long __user *vaddr; unsigned long reg; long offset; - int signal = 0; /* * analyse the sc instruction that just caused a ri exception @@ -492,34 +494,25 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode) ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); reg = (opcode & RT) >> 16; - if ((unsigned long)vaddr & 3) { - signal = SIGBUS; - goto sig; - } + if ((unsigned long)vaddr & 3) + return SIGBUS; preempt_disable(); if (ll_bit == 0 || ll_task != current) { - compute_return_epc(regs); regs->regs[reg] = 0; preempt_enable(); - return; + return 0; } preempt_enable(); - if (put_user(regs->regs[reg], vaddr)) { - signal = SIGSEGV; - goto sig; - } + if (put_user(regs->regs[reg], vaddr)) + return SIGSEGV; - compute_return_epc(regs); regs->regs[reg] = 1; - return; - -sig: - force_sig(signal, current); + return 0; } /* @@ -529,27 +522,14 @@ sig: * few processors such as NEC's VR4100 throw reserved instruction exceptions * instead, so we're doing the emulation thing in both exception handlers. */ -static inline int simulate_llsc(struct pt_regs *regs) +static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) { - unsigned int opcode; - - if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) - goto out_sigsegv; - - if ((opcode & OPCODE) == LL) { - simulate_ll(regs, opcode); - return 0; - } - if ((opcode & OPCODE) == SC) { - simulate_sc(regs, opcode); - return 0; - } - - return -EFAULT; /* Strange things going on ... */ + if ((opcode & OPCODE) == LL) + return simulate_ll(regs, opcode); + if ((opcode & OPCODE) == SC) + return simulate_sc(regs, opcode); -out_sigsegv: - force_sig(SIGSEGV, current); - return -EFAULT; + return -1; /* Must be something else ... */ } /* @@ -557,16 +537,9 @@ out_sigsegv: * registers not implemented in hardware. The only current use of this * is the thread area pointer. */ -static inline int simulate_rdhwr(struct pt_regs *regs) +static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) { struct thread_info *ti = task_thread_info(current); - unsigned int opcode; - - if (get_user(opcode, (unsigned int __user *) exception_epc(regs))) - goto out_sigsegv; - - if (unlikely(compute_return_epc(regs))) - return -EFAULT; if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { int rd = (opcode & RD) >> 11; @@ -576,16 +549,20 @@ static inline int simulate_rdhwr(struct pt_regs *regs) regs->regs[rt] = ti->tp_value; return 0; default: - return -EFAULT; + return -1; } } /* Not ours. */ - return -EFAULT; + return -1; +} -out_sigsegv: - force_sig(SIGSEGV, current); - return -EFAULT; +static int simulate_sync(struct pt_regs *regs, unsigned int opcode) +{ + if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) + return 0; + + return -1; /* Must be something else ... */ } asmlinkage void do_ov(struct pt_regs *regs) @@ -627,7 +604,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) lose_fpu(1); /* Run the emulator */ - sig = fpu_emulator_cop1Handler (regs, ¤t->thread.fpu, 1); + sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1); /* * We can't allow the emulated instruction to leave any of @@ -757,16 +734,35 @@ out_sigsegv: asmlinkage void do_ri(struct pt_regs *regs) { - die_if_kernel("Reserved instruction in kernel code", regs); + unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); + unsigned long old_epc = regs->cp0_epc; + unsigned int opcode = 0; + int status = -1; - if (!cpu_has_llsc) - if (!simulate_llsc(regs)) - return; + die_if_kernel("Reserved instruction in kernel code", regs); - if (!simulate_rdhwr(regs)) + if (unlikely(compute_return_epc(regs) < 0)) return; - force_sig(SIGILL, current); + if (unlikely(get_user(opcode, epc) < 0)) + status = SIGSEGV; + + if (!cpu_has_llsc && status < 0) + status = simulate_llsc(regs, opcode); + + if (status < 0) + status = simulate_rdhwr(regs, opcode); + + if (status < 0) + status = simulate_sync(regs, opcode); + + if (status < 0) + status = SIGILL; + + if (unlikely(status > 0)) { + regs->cp0_epc = old_epc; /* Undo skip-over. */ + force_sig(status, current); + } } /* @@ -798,7 +794,11 @@ static void mt_ase_fp_affinity(void) asmlinkage void do_cpu(struct pt_regs *regs) { + unsigned int __user *epc; + unsigned long old_epc; + unsigned int opcode; unsigned int cpid; + int status; die_if_kernel("do_cpu invoked from kernel context!", regs); @@ -806,14 +806,32 @@ asmlinkage void do_cpu(struct pt_regs *regs) switch (cpid) { case 0: - if (!cpu_has_llsc) - if (!simulate_llsc(regs)) - return; + epc = (unsigned int __user *)exception_epc(regs); + old_epc = regs->cp0_epc; + opcode = 0; + status = -1; - if (!simulate_rdhwr(regs)) + if (unlikely(compute_return_epc(regs) < 0)) return; - break; + if (unlikely(get_user(opcode, epc) < 0)) + status = SIGSEGV; + + if (!cpu_has_llsc && status < 0) + status = simulate_llsc(regs, opcode); + + if (status < 0) + status = simulate_rdhwr(regs, opcode); + + if (status < 0) + status = SIGILL; + + if (unlikely(status > 0)) { + regs->cp0_epc = old_epc; /* Undo skip-over. */ + force_sig(status, current); + } + + return; case 1: if (used_math()) /* Using the FPU again. */ @@ -954,7 +972,7 @@ asmlinkage void do_reserved(struct pt_regs *regs) */ static inline void parity_protection_init(void) { - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_24K: case CPU_34K: case CPU_5KC: @@ -1075,66 +1093,13 @@ void *set_except_vector(int n, void *addr) exception_handlers[n] = handler; if (n == 0 && cpu_has_divec) { - *(volatile u32 *)(ebase + 0x200) = 0x08000000 | - (0x03ffffff & (handler >> 2)); + *(u32 *)(ebase + 0x200) = 0x08000000 | + (0x03ffffff & (handler >> 2)); flush_icache_range(ebase + 0x200, ebase + 0x204); } return (void *)old_handler; } -#ifdef CONFIG_CPU_MIPSR2_SRS -/* - * MIPSR2 shadow register set allocation - * FIXME: SMP... - */ - -static struct shadow_registers { - /* - * Number of shadow register sets supported - */ - unsigned long sr_supported; - /* - * Bitmap of allocated shadow registers - */ - unsigned long sr_allocated; -} shadow_registers; - -static void mips_srs_init(void) -{ - shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; - printk(KERN_INFO "%ld MIPSR2 register sets available\n", - shadow_registers.sr_supported); - shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ -} - -int mips_srs_max(void) -{ - return shadow_registers.sr_supported; -} - -int mips_srs_alloc(void) -{ - struct shadow_registers *sr = &shadow_registers; - int set; - -again: - set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported); - if (set >= sr->sr_supported) - return -1; - - if (test_and_set_bit(set, &sr->sr_allocated)) - goto again; - - return set; -} - -void mips_srs_free(int set) -{ - struct shadow_registers *sr = &shadow_registers; - - clear_bit(set, &sr->sr_allocated); -} - static asmlinkage void do_default_vi(void) { show_regs(get_irq_regs()); @@ -1145,6 +1110,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) { unsigned long handler; unsigned long old_handler = vi_handlers[n]; + int srssets = current_cpu_data.srsets; u32 *w; unsigned char *b; @@ -1160,16 +1126,16 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); - if (srs >= mips_srs_max()) + if (srs >= srssets) panic("Shadow register set %d not supported", srs); if (cpu_has_veic) { if (board_bind_eic_interrupt) - board_bind_eic_interrupt (n, srs); + board_bind_eic_interrupt(n, srs); } else if (cpu_has_vint) { /* SRSMap is only defined if shadow sets are implemented */ - if (mips_srs_max() > 1) - change_c0_srsmap (0xf << n*4, srs << n*4); + if (srssets > 1) + change_c0_srsmap(0xf << n*4, srs << n*4); } if (srs == 0) { @@ -1198,10 +1164,10 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) * Sigh... panicing won't help as the console * is probably not configured :( */ - panic ("VECTORSPACING too small"); + panic("VECTORSPACING too small"); } - memcpy (b, &except_vec_vi, handler_len); + memcpy(b, &except_vec_vi, handler_len); #ifdef CONFIG_MIPS_MT_SMTC BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ @@ -1235,14 +1201,6 @@ void *set_vi_handler(int n, vi_handler_t addr) return set_vi_srs_handler(n, addr, 0); } -#else - -static inline void mips_srs_init(void) -{ -} - -#endif /* CONFIG_CPU_MIPSR2_SRS */ - /* * This is used by native signal handling */ @@ -1318,6 +1276,17 @@ extern void cpu_cache_init(void); extern void tlb_init(void); extern void flush_tlb_handlers(void); +/* + * Timer interrupt + */ +int cp0_compare_irq; + +/* + * Performance counter IRQ or -1 if shared with timer + */ +int cp0_perfcount_irq; +EXPORT_SYMBOL_GPL(cp0_perfcount_irq); + void __init per_cpu_trap_init(void) { unsigned int cpu = smp_processor_id(); @@ -1348,12 +1317,12 @@ void __init per_cpu_trap_init(void) #endif if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) status_set |= ST0_XX; + if (cpu_has_dsp) + status_set |= ST0_MX; + change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); - if (cpu_has_dsp) - set_c0_status(ST0_MX); - #ifdef CONFIG_CPU_MIPSR2 if (cpu_has_mips_r2) { unsigned int enable = 0x0000000f; @@ -1370,9 +1339,9 @@ void __init per_cpu_trap_init(void) #endif /* CONFIG_MIPS_MT_SMTC */ if (cpu_has_veic || cpu_has_vint) { - write_c0_ebase (ebase); + write_c0_ebase(ebase); /* Setting vector spacing enables EI/VI mode */ - change_c0_intctl (0x3e0, VECTORSPACING); + change_c0_intctl(0x3e0, VECTORSPACING); } if (cpu_has_divec) { if (cpu_has_mipsmt) { @@ -1390,8 +1359,8 @@ void __init per_cpu_trap_init(void) * o read IntCtl.IPPCI to determine the performance counter interrupt */ if (cpu_has_mips_r2) { - cp0_compare_irq = (read_c0_intctl () >> 29) & 7; - cp0_perfcount_irq = (read_c0_intctl () >> 26) & 7; + cp0_compare_irq = (read_c0_intctl() >> 29) & 7; + cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7; if (cp0_perfcount_irq == cp0_compare_irq) cp0_perfcount_irq = -1; } else { @@ -1429,14 +1398,17 @@ void __init per_cpu_trap_init(void) } /* Install CPU exception handler */ -void __init set_handler (unsigned long offset, void *addr, unsigned long size) +void __init set_handler(unsigned long offset, void *addr, unsigned long size) { memcpy((void *)(ebase + offset), addr, size); flush_icache_range(ebase + offset, ebase + offset + size); } +static char panic_null_cerr[] __initdata = + "Trying to set NULL cache error exception handler"; + /* Install uncached CPU exception handler */ -void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size) +void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size) { #ifdef CONFIG_32BIT unsigned long uncached_ebase = KSEG1ADDR(ebase); @@ -1445,6 +1417,9 @@ void __init set_uncached_handler (unsigned long offset, void *addr, unsigned lon unsigned long uncached_ebase = TO_UNCAC(ebase); #endif + if (!addr) + panic(panic_null_cerr); + memcpy((void *)(uncached_ebase + offset), addr, size); } @@ -1464,12 +1439,10 @@ void __init trap_init(void) unsigned long i; if (cpu_has_veic || cpu_has_vint) - ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64); + ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64); else ebase = CAC_BASE; - mips_srs_init(); - per_cpu_trap_init(); /* @@ -1490,7 +1463,7 @@ void __init trap_init(void) * destination. */ if (cpu_has_ejtag && board_ejtag_handler_setup) - board_ejtag_handler_setup (); + board_ejtag_handler_setup(); /* * Only some CPUs have the watch exceptions. @@ -1543,8 +1516,8 @@ void __init trap_init(void) set_except_vector(12, handle_ov); set_except_vector(13, handle_tr); - if (current_cpu_data.cputype == CPU_R6000 || - current_cpu_data.cputype == CPU_R6000A) { + if (current_cpu_type() == CPU_R6000 || + current_cpu_type() == CPU_R6000A) { /* * The R6000 is the only R-series CPU that features a machine * check exception (similar to the R4000 cache error) and |