From 8eb9803723a14fd12675641b953e4ccbd86187a8 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 29 May 2016 22:03:50 +1000 Subject: powerpc: Avoid load hit store in __giveup_fpu() and __giveup_altivec() In both __giveup_fpu() and __giveup_altivec() we make two modifications to tsk->thread.regs->msr. gcc decides to do a read/modify/write of each change, so we end up with a load hit store: ld r9,264(r10) rldicl r9,r9,50,1 rotldi r9,r9,14 std r9,264(r10) ... ld r9,264(r10) rldicl r9,r9,40,1 rotldi r9,r9,24 std r9,264(r10) Fix this by using a temporary. Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/process.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index e2f12cbcade9..a2dd3b1276ff 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -139,12 +139,16 @@ EXPORT_SYMBOL(__msr_check_and_clear); #ifdef CONFIG_PPC_FPU void __giveup_fpu(struct task_struct *tsk) { + unsigned long msr; + save_fpu(tsk); - tsk->thread.regs->msr &= ~MSR_FP; + msr = tsk->thread.regs->msr; + msr &= ~MSR_FP; #ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) - tsk->thread.regs->msr &= ~MSR_VSX; + msr &= ~MSR_VSX; #endif + tsk->thread.regs->msr = msr; } void giveup_fpu(struct task_struct *tsk) @@ -219,12 +223,16 @@ static int restore_fp(struct task_struct *tsk) { return 0; } static void __giveup_altivec(struct task_struct *tsk) { + unsigned long msr; + save_altivec(tsk); - tsk->thread.regs->msr &= ~MSR_VEC; + msr = tsk->thread.regs->msr; + msr &= ~MSR_VEC; #ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) - tsk->thread.regs->msr &= ~MSR_VSX; + msr &= ~MSR_VSX; #endif + tsk->thread.regs->msr = msr; } void giveup_altivec(struct task_struct *tsk) -- cgit v1.2.3 From 027dfac694fc27ef0273afb810d9b1f9da57d6e1 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 1 Jun 2016 16:34:37 +1000 Subject: powerpc: Various typo fixes Signed-off-by: Andrea Gelmini Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/process.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a2dd3b1276ff..c5c3ae2ef3c1 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -802,7 +802,7 @@ static void tm_reclaim_thread(struct thread_struct *thr, * this state. * We do this using the current MSR, rather tracking it in * some specific thread_struct bit, as it has the additional - * benifit of checking for a potential TM bad thing exception. + * benefit of checking for a potential TM bad thing exception. */ if (!MSR_TM_SUSPENDED(mfmsr())) return; -- cgit v1.2.3 From b57bd2de8c6c9aa03f1b899edd6f5582cc8b5b08 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Thu, 9 Jun 2016 12:31:08 +1000 Subject: powerpc: Improve FSCR init and context switching This fixes a few issues with FSCR init and switching. In commit 152d523e6307 ("powerpc: Create context switch helpers save_sprs() and restore_sprs()") we moved the setting of the FSCR register from inside an CPU_FTR_ARCH_207S section to inside just a CPU_FTR_ARCH_DSCR section. Hence we are setting FSCR on POWER6/7 where the FSCR doesn't exist. This is harmless but we shouldn't do it. Also, we can simplify the FSCR context switch. We don't need to go through the calculation involving dscr_inherit. We can just restore what we saved last time. We also set an initial value in INIT_THREAD, so that pid 1 which is cloned from that gets a sane value. Based on patch by Jack Miller. Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/process.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index c5c3ae2ef3c1..6d0a831bc7d8 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1031,18 +1031,11 @@ static inline void restore_sprs(struct thread_struct *old_thread, #ifdef CONFIG_PPC_BOOK3S_64 if (cpu_has_feature(CPU_FTR_DSCR)) { u64 dscr = get_paca()->dscr_default; - u64 fscr = old_thread->fscr & ~FSCR_DSCR; - - if (new_thread->dscr_inherit) { + if (new_thread->dscr_inherit) dscr = new_thread->dscr; - fscr |= FSCR_DSCR; - } if (old_thread->dscr != dscr) mtspr(SPRN_DSCR, dscr); - - if (old_thread->fscr != fscr) - mtspr(SPRN_FSCR, fscr); } if (cpu_has_feature(CPU_FTR_ARCH_207S)) { @@ -1053,6 +1046,9 @@ static inline void restore_sprs(struct thread_struct *old_thread, if (old_thread->ebbrr != new_thread->ebbrr) mtspr(SPRN_EBBRR, new_thread->ebbrr); + if (old_thread->fscr != new_thread->fscr) + mtspr(SPRN_FSCR, new_thread->fscr); + if (old_thread->tar != new_thread->tar) mtspr(SPRN_TAR, new_thread->tar); } -- cgit v1.2.3 From bd3ea317fddfd0f2044f94bed294b90c4bc8e69e Mon Sep 17 00:00:00 2001 From: Jack Miller Date: Thu, 9 Jun 2016 12:31:09 +1000 Subject: powerpc: Load Monitor Register Support This enables new registers, LMRR and LMSER, that can trigger an EBB in userspace code when a monitored load (via the new ldmx instruction) loads memory from a monitored space. This facility is controlled by a new FSCR bit, LM. This patch disables the FSCR LM control bit on task init and enables that bit when a load monitor facility unavailable exception is taken for using it. On context switch, this bit is then used to determine whether the two relevant registers are saved and restored. This is done lazily for performance reasons. Signed-off-by: Jack Miller Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/process.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch/powerpc/kernel/process.c') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 6d0a831bc7d8..ddceeb96e8fb 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1017,6 +1017,14 @@ static inline void save_sprs(struct thread_struct *t) */ t->tar = mfspr(SPRN_TAR); } + + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + /* Conditionally save Load Monitor registers, if enabled */ + if (t->fscr & FSCR_LM) { + t->lmrr = mfspr(SPRN_LMRR); + t->lmser = mfspr(SPRN_LMSER); + } + } #endif } @@ -1052,6 +1060,16 @@ static inline void restore_sprs(struct thread_struct *old_thread, if (old_thread->tar != new_thread->tar) mtspr(SPRN_TAR, new_thread->tar); } + + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + /* Conditionally restore Load Monitor registers, if enabled */ + if (new_thread->fscr & FSCR_LM) { + if (old_thread->lmrr != new_thread->lmrr) + mtspr(SPRN_LMRR, new_thread->lmrr); + if (old_thread->lmser != new_thread->lmser) + mtspr(SPRN_LMSER, new_thread->lmser); + } + } #endif } -- cgit v1.2.3