diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-27 21:11:26 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-27 21:11:26 -0800 |
commit | 1b17366d695c8ab03f98d0155357e97a427e1dce (patch) | |
tree | d223c79cc33ca1d890d264a202a1dd9c29655039 /arch/powerpc/kernel/traps.c | |
parent | d12de1ef5eba3adb88f8e9dd81b6a60349466378 (diff) | |
parent | 7179ba52889bef7e5e23f72908270e1ab2b7fc6f (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt:
"So here's my next branch for powerpc. A bit late as I was on vacation
last week. It's mostly the same stuff that was in next already, I
just added two patches today which are the wiring up of lockref for
powerpc, which for some reason fell through the cracks last time and
is trivial.
The highlights are, in addition to a bunch of bug fixes:
- Reworked Machine Check handling on kernels running without a
hypervisor (or acting as a hypervisor). Provides hooks to handle
some errors in real mode such as TLB errors, handle SLB errors,
etc...
- Support for retrieving memory error information from the service
processor on IBM servers running without a hypervisor and routing
them to the memory poison infrastructure.
- _PAGE_NUMA support on server processors
- 32-bit BookE relocatable kernel support
- FSL e6500 hardware tablewalk support
- A bunch of new/revived board support
- FSL e6500 deeper idle states and altivec powerdown support
You'll notice a generic mm change here, it has been acked by the
relevant authorities and is a pre-req for our _PAGE_NUMA support"
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (121 commits)
powerpc: Implement arch_spin_is_locked() using arch_spin_value_unlocked()
powerpc: Add support for the optimised lockref implementation
powerpc/powernv: Call OPAL sync before kexec'ing
powerpc/eeh: Escalate error on non-existing PE
powerpc/eeh: Handle multiple EEH errors
powerpc: Fix transactional FP/VMX/VSX unavailable handlers
powerpc: Don't corrupt transactional state when using FP/VMX in kernel
powerpc: Reclaim two unused thread_info flag bits
powerpc: Fix races with irq_work
Move precessing of MCE queued event out from syscall exit path.
pseries/cpuidle: Remove redundant call to ppc64_runlatch_off() in cpu idle routines
powerpc: Make add_system_ram_resources() __init
powerpc: add SATA_MV to ppc64_defconfig
powerpc/powernv: Increase candidate fw image size
powerpc: Add debug checks to catch invalid cpu-to-node mappings
powerpc: Fix the setup of CPU-to-Node mappings during CPU online
powerpc/iommu: Don't detach device without IOMMU group
powerpc/eeh: Hotplug improvement
powerpc/eeh: Call opal_pci_reinit() on powernv for restoring config space
powerpc/eeh: Add restore_config operation
...
Diffstat (limited to 'arch/powerpc/kernel/traps.c')
-rw-r--r-- | arch/powerpc/kernel/traps.c | 72 |
1 files changed, 54 insertions, 18 deletions
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 907a472f9a9e..33cd7a0b8e73 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -285,6 +285,21 @@ void system_reset_exception(struct pt_regs *regs) /* What should we do here? We could issue a shutdown or hard reset. */ } + +/* + * This function is called in real mode. Strictly no printk's please. + * + * regs->nip and regs->msr contains srr0 and ssr1. + */ +long machine_check_early(struct pt_regs *regs) +{ + long handled = 0; + + if (cur_cpu_spec && cur_cpu_spec->machine_check_early) + handled = cur_cpu_spec->machine_check_early(regs); + return handled; +} + #endif /* @@ -1384,7 +1399,6 @@ void fp_unavailable_tm(struct pt_regs *regs) TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", regs->nip, regs->msr); - tm_enable(); /* We can only have got here if the task started using FP after * beginning the transaction. So, the transactional regs are just a @@ -1393,8 +1407,7 @@ void fp_unavailable_tm(struct pt_regs *regs) * transaction, and probably retry but now with FP enabled. So the * checkpointed FP registers need to be loaded. */ - tm_reclaim(¤t->thread, current->thread.regs->msr, - TM_CAUSE_FAC_UNAV); + tm_reclaim_current(TM_CAUSE_FAC_UNAV); /* Reclaim didn't save out any FPRs to transact_fprs. */ /* Enable FP for the task: */ @@ -1403,11 +1416,19 @@ void fp_unavailable_tm(struct pt_regs *regs) /* This loads and recheckpoints the FP registers from * thread.fpr[]. They will remain in registers after the * checkpoint so we don't need to reload them after. + * If VMX is in use, the VRs now hold checkpointed values, + * so we don't want to load the VRs from the thread_struct. */ - tm_recheckpoint(¤t->thread, regs->msr); + tm_recheckpoint(¤t->thread, MSR_FP); + + /* If VMX is in use, get the transactional values back */ + if (regs->msr & MSR_VEC) { + do_load_up_transact_altivec(¤t->thread); + /* At this point all the VSX state is loaded, so enable it */ + regs->msr |= MSR_VSX; + } } -#ifdef CONFIG_ALTIVEC void altivec_unavailable_tm(struct pt_regs *regs) { /* See the comments in fp_unavailable_tm(). This function operates @@ -1417,18 +1438,21 @@ void altivec_unavailable_tm(struct pt_regs *regs) TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," "MSR=%lx\n", regs->nip, regs->msr); - tm_enable(); - tm_reclaim(¤t->thread, current->thread.regs->msr, - TM_CAUSE_FAC_UNAV); + tm_reclaim_current(TM_CAUSE_FAC_UNAV); regs->msr |= MSR_VEC; - tm_recheckpoint(¤t->thread, regs->msr); + tm_recheckpoint(¤t->thread, MSR_VEC); current->thread.used_vr = 1; + + if (regs->msr & MSR_FP) { + do_load_up_transact_fpu(¤t->thread); + regs->msr |= MSR_VSX; + } } -#endif -#ifdef CONFIG_VSX void vsx_unavailable_tm(struct pt_regs *regs) { + unsigned long orig_msr = regs->msr; + /* See the comments in fp_unavailable_tm(). This works similarly, * though we're loading both FP and VEC registers in here. * @@ -1440,18 +1464,30 @@ void vsx_unavailable_tm(struct pt_regs *regs) "MSR=%lx\n", regs->nip, regs->msr); - tm_enable(); + current->thread.used_vsr = 1; + + /* If FP and VMX are already loaded, we have all the state we need */ + if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { + regs->msr |= MSR_VSX; + return; + } + /* This reclaims FP and/or VR regs if they're already enabled */ - tm_reclaim(¤t->thread, current->thread.regs->msr, - TM_CAUSE_FAC_UNAV); + tm_reclaim_current(TM_CAUSE_FAC_UNAV); regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | MSR_VSX; - /* This loads & recheckpoints FP and VRs. */ - tm_recheckpoint(¤t->thread, regs->msr); - current->thread.used_vsr = 1; + + /* This loads & recheckpoints FP and VRs; but we have + * to be sure not to overwrite previously-valid state. + */ + tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); + + if (orig_msr & MSR_FP) + do_load_up_transact_fpu(¤t->thread); + if (orig_msr & MSR_VEC) + do_load_up_transact_altivec(¤t->thread); } -#endif #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ void performance_monitor_exception(struct pt_regs *regs) |