summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h4
-rw-r--r--arch/powerpc/include/asm/paca.h1
-rw-r--r--arch/powerpc/include/asm/uaccess.h17
3 files changed, 22 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index f00e10e2a335..651e1354498e 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -55,6 +55,10 @@ extern struct ppc_emulated {
struct ppc_emulated_entry mfdscr;
struct ppc_emulated_entry mtdscr;
struct ppc_emulated_entry lq_stq;
+ struct ppc_emulated_entry lxvw4x;
+ struct ppc_emulated_entry lxvh8x;
+ struct ppc_emulated_entry lxvd2x;
+ struct ppc_emulated_entry lxvb16x;
#endif
} ppc_emulated;
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 04b60af027ae..1e06310ccc09 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -210,6 +210,7 @@ struct paca_struct {
*/
u16 in_mce;
u8 hmi_event_available; /* HMI event is available */
+ u8 hmi_p9_special_emu; /* HMI P9 special emulation */
#endif
/* Stuff for accurate time accounting */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 9c0e60ca1666..e34f15e727d9 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -173,6 +173,23 @@ do { \
extern long __get_user_bad(void);
+/*
+ * This does an atomic 128 byte aligned load from userspace.
+ * Upto caller to do enable_kernel_vmx() before calling!
+ */
+#define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
+ __asm__ __volatile__( \
+ "1: lvx 0,0,%1 # get user\n" \
+ " stvx 0,0,%2 # put kernel\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ EX_TABLE(1b, 3b) \
+ : "=r" (err) \
+ : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
+
#define __get_user_asm(x, addr, err, op) \
__asm__ __volatile__( \
"1: "op" %1,0(%2) # get_user\n" \