summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2015-10-29 11:44:04 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2015-12-01 13:52:25 +1100
commita0e72cf12b1a1f159b6822ed2e1e41893d996fc7 (patch)
tree341c39ac34fad618accdb2a37aaba9af83a1a6d7
parent1552cd703cf5a07caeb17ccd82f80e20a23b1707 (diff)
powerpc: Create msr_check_and_{set,clear}()
Create helper functions to set and clear MSR bits after first checking if they are already set. Grouping them will make it easy to avoid the MSR writes in a subsequent optimisation. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/kernel/process.c107
1 files changed, 52 insertions, 55 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 0cb627662ded..5cdd35c0b026 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -87,23 +87,46 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-#ifdef CONFIG_PPC_FPU
-void giveup_fpu(struct task_struct *tsk)
+static void msr_check_and_set(unsigned long bits)
{
- u64 oldmsr = mfmsr();
- u64 newmsr;
+ unsigned long oldmsr = mfmsr();
+ unsigned long newmsr;
- check_if_tm_restore_required(tsk);
+ newmsr = oldmsr | bits;
- newmsr = oldmsr | MSR_FP;
#ifdef CONFIG_VSX
- if (cpu_has_feature(CPU_FTR_VSX))
+ if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
newmsr |= MSR_VSX;
#endif
+
if (oldmsr != newmsr)
mtmsr_isync(newmsr);
+}
+
+static void msr_check_and_clear(unsigned long bits)
+{
+ unsigned long oldmsr = mfmsr();
+ unsigned long newmsr;
+
+ newmsr = oldmsr & ~bits;
+
+#ifdef CONFIG_VSX
+ if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
+ newmsr &= ~MSR_VSX;
+#endif
+ if (oldmsr != newmsr)
+ mtmsr_isync(newmsr);
+}
+
+#ifdef CONFIG_PPC_FPU
+void giveup_fpu(struct task_struct *tsk)
+{
+ check_if_tm_restore_required(tsk);
+
+ msr_check_and_set(MSR_FP);
__giveup_fpu(tsk);
+ msr_check_and_clear(MSR_FP);
}
EXPORT_SYMBOL(giveup_fpu);
@@ -144,30 +167,21 @@ void enable_kernel_fp(void)
{
WARN_ON(preemptible());
- if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
- giveup_fpu(current);
- } else {
- u64 oldmsr = mfmsr();
+ msr_check_and_set(MSR_FP);
- if (!(oldmsr & MSR_FP))
- mtmsr_isync(oldmsr | MSR_FP);
- }
+ if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
+ __giveup_fpu(current);
}
EXPORT_SYMBOL(enable_kernel_fp);
#ifdef CONFIG_ALTIVEC
void giveup_altivec(struct task_struct *tsk)
{
- u64 oldmsr = mfmsr();
- u64 newmsr;
-
check_if_tm_restore_required(tsk);
- newmsr = oldmsr | MSR_VEC;
- if (oldmsr != newmsr)
- mtmsr_isync(newmsr);
-
+ msr_check_and_set(MSR_VEC);
__giveup_altivec(tsk);
+ msr_check_and_clear(MSR_VEC);
}
EXPORT_SYMBOL(giveup_altivec);
@@ -175,14 +189,10 @@ void enable_kernel_altivec(void)
{
WARN_ON(preemptible());
- if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
- giveup_altivec(current);
- } else {
- u64 oldmsr = mfmsr();
+ msr_check_and_set(MSR_VEC);
- if (!(oldmsr & MSR_VEC))
- mtmsr_isync(oldmsr | MSR_VEC);
- }
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
+ __giveup_altivec(current);
}
EXPORT_SYMBOL(enable_kernel_altivec);
@@ -207,20 +217,15 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#ifdef CONFIG_VSX
void giveup_vsx(struct task_struct *tsk)
{
- u64 oldmsr = mfmsr();
- u64 newmsr;
-
check_if_tm_restore_required(tsk);
- newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX);
- if (oldmsr != newmsr)
- mtmsr_isync(newmsr);
-
+ msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
if (tsk->thread.regs->msr & MSR_FP)
__giveup_fpu(tsk);
if (tsk->thread.regs->msr & MSR_VEC)
__giveup_altivec(tsk);
__giveup_vsx(tsk);
+ msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
EXPORT_SYMBOL(giveup_vsx);
@@ -228,13 +233,14 @@ void enable_kernel_vsx(void)
{
WARN_ON(preemptible());
- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
- giveup_vsx(current);
- } else {
- u64 oldmsr = mfmsr();
+ msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
- if (!(oldmsr & MSR_VSX))
- mtmsr_isync(oldmsr | MSR_VSX);
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
+ if (current->thread.regs->msr & MSR_FP)
+ __giveup_fpu(current);
+ if (current->thread.regs->msr & MSR_VEC)
+ __giveup_altivec(current);
+ __giveup_vsx(current);
}
}
EXPORT_SYMBOL(enable_kernel_vsx);
@@ -256,16 +262,11 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#ifdef CONFIG_SPE
void giveup_spe(struct task_struct *tsk)
{
- u64 oldmsr = mfmsr();
- u64 newmsr;
-
check_if_tm_restore_required(tsk);
- newmsr = oldmsr | MSR_SPE;
- if (oldmsr != newmsr)
- mtmsr_isync(newmsr);
-
+ msr_check_and_set(MSR_SPE);
__giveup_spe(tsk);
+ msr_check_and_clear(MSR_SPE);
}
EXPORT_SYMBOL(giveup_spe);
@@ -273,14 +274,10 @@ void enable_kernel_spe(void)
{
WARN_ON(preemptible());
- if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
- giveup_spe(current);
- } else {
- u64 oldmsr = mfmsr();
+ msr_check_and_set(MSR_SPE);
- if (!(oldmsr & MSR_SPE))
- mtmsr_isync(oldmsr | MSR_SPE);
- }
+ if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+ __giveup_spe(current);
}
EXPORT_SYMBOL(enable_kernel_spe);