summaryrefslogtreecommitdiff
path: root/arch/powerpc/lib/xor_vmx.c
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2015-10-29 11:44:05 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2015-12-01 13:52:25 +1100
commitdc4fbba11e4661a6a77a1f89ba32f9082e6395ff (patch)
tree567037dc8e93063615e558eda1c80f8a82154749 /arch/powerpc/lib/xor_vmx.c
parenta0e72cf12b1a1f159b6822ed2e1e41893d996fc7 (diff)
powerpc: Create disable_kernel_{fp,altivec,vsx,spe}()
The enable_kernel_*() functions leave the relevant MSR bits enabled until we exit the kernel sometime later. Create disable versions that wrap the kernel use of FP, Altivec VSX or SPE. While we don't want to disable it normally for performance reasons (MSR writes are slow), it will be used for a debug boot option that does this and catches bad uses in other areas of the kernel. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/lib/xor_vmx.c')
-rw-r--r--arch/powerpc/lib/xor_vmx.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c
index e905f7c2ea7b..07f49f1568e5 100644
--- a/arch/powerpc/lib/xor_vmx.c
+++ b/arch/powerpc/lib/xor_vmx.c
@@ -74,6 +74,7 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
v2 += 4;
} while (--lines > 0);
+ disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_2);
@@ -102,6 +103,7 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
v3 += 4;
} while (--lines > 0);
+ disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_3);
@@ -135,6 +137,7 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
v4 += 4;
} while (--lines > 0);
+ disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_4);
@@ -172,6 +175,7 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
v5 += 4;
} while (--lines > 0);
+ disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_5);