summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/cputable.h126
-rw-r--r--arch/powerpc/include/asm/dbell.h30
-rw-r--r--arch/powerpc/include/asm/debug.h15
-rw-r--r--arch/powerpc/include/asm/eeh.h3
-rw-r--r--arch/powerpc/include/asm/exception-64s.h138
-rw-r--r--arch/powerpc/include/asm/firmware.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h9
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h35
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h3
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/machdep.h4
-rw-r--r--arch/powerpc/include/asm/mpc5121.h17
-rw-r--r--arch/powerpc/include/asm/paca.h10
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h10
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h13
-rw-r--r--arch/powerpc/include/asm/ppc4xx_ocm.h45
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h110
-rw-r--r--arch/powerpc/include/asm/processor.h48
-rw-r--r--arch/powerpc/include/asm/ps3.h2
-rw-r--r--arch/powerpc/include/asm/reg.h43
-rw-r--r--arch/powerpc/include/asm/sections.h3
-rw-r--r--arch/powerpc/include/asm/spinlock.h2
-rw-r--r--arch/powerpc/include/asm/tm.h20
23 files changed, 579 insertions, 111 deletions
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 76f81bd64f1d..fb3245e928ea 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -106,37 +106,37 @@ extern const char *powerpc_base_platform;
/* CPU kernel features */
/* Retain the 32b definitions all use bottom half of word */
-#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000000000000001)
-#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002)
-#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004)
-#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008)
-#define CPU_FTR_TAU ASM_CONST(0x0000000000000010)
-#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020)
-#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
-#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
-#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
-#define CPU_FTR_DBELL ASM_CONST(0x0000000000000200)
-#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
-#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
-#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
-#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
-#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
-#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
-#define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000)
-#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
-#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
-#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x0000000000080000)
-#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
-#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
-#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
-#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x0000000000800000)
-#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000)
-#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
-#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
-#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
-#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
-#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
-#define CPU_FTR_EMB_HV ASM_CONST(0x0000000040000000)
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x00000001)
+#define CPU_FTR_L2CR ASM_CONST(0x00000002)
+#define CPU_FTR_SPEC7450 ASM_CONST(0x00000004)
+#define CPU_FTR_ALTIVEC ASM_CONST(0x00000008)
+#define CPU_FTR_TAU ASM_CONST(0x00000010)
+#define CPU_FTR_CAN_DOZE ASM_CONST(0x00000020)
+#define CPU_FTR_USE_TB ASM_CONST(0x00000040)
+#define CPU_FTR_L2CSR ASM_CONST(0x00000080)
+#define CPU_FTR_601 ASM_CONST(0x00000100)
+#define CPU_FTR_DBELL ASM_CONST(0x00000200)
+#define CPU_FTR_CAN_NAP ASM_CONST(0x00000400)
+#define CPU_FTR_L3CR ASM_CONST(0x00000800)
+#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00001000)
+#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00002000)
+#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x00004000)
+#define CPU_FTR_NO_DPM ASM_CONST(0x00008000)
+#define CPU_FTR_476_DD2 ASM_CONST(0x00010000)
+#define CPU_FTR_NEED_COHERENT ASM_CONST(0x00020000)
+#define CPU_FTR_NO_BTIC ASM_CONST(0x00040000)
+#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00080000)
+#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00100000)
+#define CPU_FTR_PPC_LE ASM_CONST(0x00200000)
+#define CPU_FTR_REAL_LE ASM_CONST(0x00400000)
+#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00800000)
+#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x01000000)
+#define CPU_FTR_SPE ASM_CONST(0x02000000)
+#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x04000000)
+#define CPU_FTR_LWSYNC ASM_CONST(0x08000000)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x10000000)
+#define CPU_FTR_INDEXED_DCR ASM_CONST(0x20000000)
+#define CPU_FTR_EMB_HV ASM_CONST(0x40000000)
/*
* Add the 64-bit processor unique features in the top half of the word;
@@ -148,29 +148,33 @@ extern const char *powerpc_base_platform;
#define LONG_ASM_CONST(x) 0
#endif
-#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000)
-#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000)
-#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000)
-#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000)
-#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
-#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
-#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
-#define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000)
-#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000)
-#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
-#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
-#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
-#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
-#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
-#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
-#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
-#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
-#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000)
-#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
-#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
-#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
-#define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x2000000000000000)
+#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000100000000)
+#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000200000000)
+#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000400000000)
+#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_IABR LONG_ASM_CONST(0x0000001000000000)
+#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000002000000000)
+#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000004000000000)
+#define CPU_FTR_SMT LONG_ASM_CONST(0x0000008000000000)
+#define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000010000000000)
+#define CPU_FTR_PURR LONG_ASM_CONST(0x0000020000000000)
+#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000040000000000)
+#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000080000000000)
+#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000100000000000)
+#define CPU_FTR_VSX LONG_ASM_CONST(0x0000200000000000)
+#define CPU_FTR_SAO LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000800000000000)
+#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0001000000000000)
+#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0002000000000000)
+#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0004000000000000)
+#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0008000000000000)
+#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0010000000000000)
+#define CPU_FTR_ICSWX LONG_ASM_CONST(0x0020000000000000)
+#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0040000000000000)
+#define CPU_FTR_TM LONG_ASM_CONST(0x0080000000000000)
+#define CPU_FTR_BCTAR LONG_ASM_CONST(0x0100000000000000)
+#define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000)
+#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
#ifndef __ASSEMBLY__
@@ -216,6 +220,13 @@ extern const char *powerpc_base_platform;
#define PPC_FEATURE_HAS_EFP_DOUBLE_COMP 0
#endif
+/* We only set the TM feature if the kernel was compiled with TM supprt */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define CPU_FTR_TM_COMP CPU_FTR_TM
+#else
+#define CPU_FTR_TM_COMP 0
+#endif
+
/* We need to mark all pages as being coherent if we're SMP or we have a
* 74[45]x and an MPC107 host bridge. Also 83xx and PowerQUICC II
* require it for PCI "streaming/prefetch" to work properly.
@@ -400,7 +411,8 @@ extern const char *powerpc_base_platform;
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY)
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \
+ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR)
#define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -408,7 +420,9 @@ extern const char *powerpc_base_platform;
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY)
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | CPU_FTR_BCTAR | \
+ CPU_FTR_TM_COMP)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 607e4eeeb694..5fa6b20eba10 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -28,8 +28,36 @@ enum ppc_dbell {
PPC_G_DBELL = 2, /* guest doorbell */
PPC_G_DBELL_CRIT = 3, /* guest critical doorbell */
PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */
+ PPC_DBELL_SERVER = 5, /* doorbell on server */
};
+#ifdef CONFIG_PPC_BOOK3S
+
+#define PPC_DBELL_MSGTYPE PPC_DBELL_SERVER
+#define SPRN_DOORBELL_CPUTAG SPRN_TIR
+#define PPC_DBELL_TAG_MASK 0x7f
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ else
+ __asm__ __volatile__ (PPC_MSGSNDP(%0) : : "r" (msg));
+}
+
+#else /* CONFIG_PPC_BOOK3S */
+
+#define PPC_DBELL_MSGTYPE PPC_DBELL
+#define SPRN_DOORBELL_CPUTAG SPRN_PIR
+#define PPC_DBELL_TAG_MASK 0x3fff
+
+static inline void _ppc_msgsnd(u32 msg)
+{
+ __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+}
+
+#endif /* CONFIG_PPC_BOOK3S */
+
extern void doorbell_cause_ipi(int cpu, unsigned long data);
extern void doorbell_exception(struct pt_regs *regs);
extern void doorbell_setup_this_cpu(void);
@@ -39,7 +67,7 @@ static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
u32 msg = PPC_DBELL_TYPE(type) | (flags & PPC_DBELL_MSG_BRDCAST) |
(tag & 0x07ffffff);
- __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+ _ppc_msgsnd(msg);
}
#endif /* _ASM_POWERPC_DBELL_H */
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 32de2577bb6d..d2516308ed1e 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -4,6 +4,8 @@
#ifndef _ASM_POWERPC_DEBUG_H
#define _ASM_POWERPC_DEBUG_H
+#include <asm/hw_breakpoint.h>
+
struct pt_regs;
extern struct dentry *powerpc_debugfs_root;
@@ -15,7 +17,7 @@ extern int (*__debugger_ipi)(struct pt_regs *regs);
extern int (*__debugger_bpt)(struct pt_regs *regs);
extern int (*__debugger_sstep)(struct pt_regs *regs);
extern int (*__debugger_iabr_match)(struct pt_regs *regs);
-extern int (*__debugger_dabr_match)(struct pt_regs *regs);
+extern int (*__debugger_break_match)(struct pt_regs *regs);
extern int (*__debugger_fault_handler)(struct pt_regs *regs);
#define DEBUGGER_BOILERPLATE(__NAME) \
@@ -31,7 +33,7 @@ DEBUGGER_BOILERPLATE(debugger_ipi)
DEBUGGER_BOILERPLATE(debugger_bpt)
DEBUGGER_BOILERPLATE(debugger_sstep)
DEBUGGER_BOILERPLATE(debugger_iabr_match)
-DEBUGGER_BOILERPLATE(debugger_dabr_match)
+DEBUGGER_BOILERPLATE(debugger_break_match)
DEBUGGER_BOILERPLATE(debugger_fault_handler)
#else
@@ -40,17 +42,18 @@ static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
-static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_break_match(struct pt_regs *regs) { return 0; }
static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
#endif
-extern int set_dabr(unsigned long dabr, unsigned long dabrx);
+int set_breakpoint(struct arch_hw_breakpoint *brk);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
extern void do_send_trap(struct pt_regs *regs, unsigned long address,
unsigned long error_code, int signal_code, int brkpt);
#else
-extern void do_dabr(struct pt_regs *regs, unsigned long address,
- unsigned long error_code);
+
+extern void do_break(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
#endif
#endif /* _ASM_POWERPC_DEBUG_H */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index a8fb03e22770..a80e32b46c11 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -201,6 +201,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev);
void __init eeh_addr_cache_build(void);
void eeh_add_device_tree_early(struct device_node *);
void eeh_add_device_tree_late(struct pci_bus *);
+void eeh_add_sysfs_files(struct pci_bus *);
void eeh_remove_bus_device(struct pci_dev *, int);
/**
@@ -240,6 +241,8 @@ static inline void eeh_add_device_tree_early(struct device_node *dn) { }
static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
+
static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { }
static inline void eeh_lock(void) { }
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index ad708dda3ba3..05e6d2ee1db9 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -47,9 +47,10 @@
#define EX_R3 64
#define EX_LR 72
#define EX_CFAR 80
+#define EX_PPR 88 /* SMT thread status register (priority) */
#ifdef CONFIG_RELOCATABLE
-#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label); \
@@ -60,13 +61,15 @@
blr;
#else
/* If not relocatable, we can jump directly -- and save messing with LR */
-#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
li r10,MSR_RI; \
mtmsrd r10,1; /* Set RI (EE=0) */ \
b label;
#endif
+#define EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+ __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
/*
* As EXCEPTION_PROLOG_PSERIES(), except we've already got relocation on
@@ -74,6 +77,7 @@
* case EXCEPTION_RELON_PROLOG_PSERIES_1 will be using lr.
*/
#define EXCEPTION_RELON_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)
@@ -107,14 +111,59 @@
#define RESTORE_LR(reg, area)
#endif
-#define __EXCEPTION_PROLOG_1(area, extra, vec) \
+/*
+ * PPR save/restore macros used in exceptions_64s.S
+ * Used for P7 or later processors
+ */
+#define SAVE_PPR(area, ra, rb) \
+BEGIN_FTR_SECTION_NESTED(940) \
+ ld ra,PACACURRENT(r13); \
+ ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \
+ std rb,TASKTHREADPPR(ra); \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
+
+#define RESTORE_PPR_PACA(area, ra) \
+BEGIN_FTR_SECTION_NESTED(941) \
+ ld ra,area+EX_PPR(r13); \
+ mtspr SPRN_PPR,ra; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
+
+/*
+ * Increase the priority on systems where PPR save/restore is not
+ * implemented/ supported.
+ */
+#define HMT_MEDIUM_PPR_DISCARD \
+BEGIN_FTR_SECTION_NESTED(942) \
+ HMT_MEDIUM; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,0,942) /*non P7*/
+
+/*
+ * Get an SPR into a register if the CPU has the given feature
+ */
+#define OPT_GET_SPR(ra, spr, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ mfspr ra,spr; \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+/*
+ * Save a register to the PACA if the CPU has the given feature
+ */
+#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
+BEGIN_FTR_SECTION_NESTED(943) \
+ std ra,offset(r13); \
+END_FTR_SECTION_NESTED(ftr,ftr,943)
+
+#define EXCEPTION_PROLOG_0(area) \
GET_PACA(r13); \
- std r9,area+EX_R9(r13); /* save r9 - r12 */ \
- std r10,area+EX_R10(r13); \
- BEGIN_FTR_SECTION_NESTED(66); \
- mfspr r10,SPRN_CFAR; \
- std r10,area+EX_CFAR(r13); \
- END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
+ std r9,area+EX_R9(r13); /* save r9 */ \
+ OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
+ HMT_MEDIUM; \
+ std r10,area+EX_R10(r13); /* save r10 - r12 */ \
+ OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
+
+#define __EXCEPTION_PROLOG_1(area, extra, vec) \
+ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
+ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
SAVE_LR(r10, area); \
mfcr r9; \
extra(vec); \
@@ -139,6 +188,7 @@
__EXCEPTION_PROLOG_PSERIES_1(label, h)
#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label, h);
@@ -149,10 +199,14 @@
#define __KVM_HANDLER(area, h, n) \
do_kvm_##n: \
+ BEGIN_FTR_SECTION_NESTED(947) \
+ ld r10,area+EX_CFAR(r13); \
+ std r10,HSTATE_CFAR(r13); \
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
ld r10,area+EX_R10(r13); \
- stw r9,HSTATE_SCRATCH1(r13); \
+ stw r9,HSTATE_SCRATCH1(r13); \
ld r9,area+EX_R9(r13); \
- std r12,HSTATE_SCRATCH0(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
li r12,n; \
b kvmppc_interrupt
@@ -224,8 +278,10 @@ do_kvm_##n: \
std r10,0(r1); /* make stack chain pointer */ \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r10,GPR1(r1); /* save r1 in stackframe */ \
+ beq 4f; /* if from kernel mode */ \
ACCOUNT_CPU_USER_ENTRY(r9, r10); \
- std r2,GPR2(r1); /* save r2 in stackframe */ \
+ SAVE_PPR(area, r9, r10); \
+4: std r2,GPR2(r1); /* save r2 in stackframe */ \
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
@@ -266,45 +322,74 @@ do_kvm_##n: \
. = loc; \
.globl label##_pSeries; \
label##_pSeries: \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_STD, KVMTEST_PR, vec)
+/* Version of above for when we have to branch out-of-line */
+#define STD_EXCEPTION_PSERIES_OOL(vec, label) \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD)
+
#define STD_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_hv; \
label##_hv: \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_HV, KVMTEST, vec)
+/* Version of above for when we have to branch out-of-line */
+#define STD_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_hv; \
+label##_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV)
+
#define STD_RELON_EXCEPTION_PSERIES(loc, vec, label) \
. = loc; \
.globl label##_relon_pSeries; \
label##_relon_pSeries: \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_STD, KVMTEST_PR, vec)
+#define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
+ .globl label##_relon_pSeries; \
+label##_relon_pSeries: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
+
#define STD_RELON_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_relon_hv; \
label##_relon_hv: \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
/* No guest interrupts come through here */ \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
EXC_HV, KVMTEST, vec)
+#define STD_RELON_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec); \
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
+
/* This associate vector numbers with bits in paca->irq_happened */
#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
#define SOFTEN_VALUE_0x502 PACA_IRQ_EE
#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
#define SOFTEN_VALUE_0x982 PACA_IRQ_DEC
+#define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL
+#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL
+#define SOFTEN_VALUE_0xe82 PACA_IRQ_DBELL
#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
@@ -329,10 +414,12 @@ label##_relon_hv: \
#define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec)
#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
SET_SCRATCH0(r13); /* save r13 */ \
- __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ EXCEPTION_PROLOG_0(PACA_EXGEN); \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label##_common, h);
+
#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
__MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)
@@ -350,9 +437,16 @@ label##_hv: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_TEST_HV)
+#define MASKABLE_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_hv; \
+label##_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
+
#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
- HMT_MEDIUM; \
+ HMT_MEDIUM_PPR_DISCARD; \
SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_0(PACA_EXGEN); \
__EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, h);
#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
@@ -372,6 +466,12 @@ label##_relon_hv: \
_MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
EXC_HV, SOFTEN_NOTEST_HV)
+#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label) \
+ .globl label##_relon_hv; \
+label##_relon_hv: \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
+
/*
* Our exception common code can be passed various "additions"
* to specify the behaviour of interrupts, whether to kick the
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 973cc3be011b..097dee57a7a9 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -50,6 +50,7 @@
#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
+#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
#ifndef __ASSEMBLY__
@@ -64,7 +65,7 @@ enum {
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
- FW_FEATURE_SET_MODE,
+ FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
FW_FEATURE_POWERNV_ALWAYS = 0,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 0975e5c0bb19..4bc2c3dad6ad 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -395,6 +395,15 @@ static inline unsigned long cmo_get_page_size(void)
{
return CMO_PageSize;
}
+
+extern long pSeries_enable_reloc_on_exc(void);
+extern long pSeries_disable_reloc_on_exc(void);
+
+#else
+
+#define pSeries_enable_reloc_on_exc() do {} while (0)
+#define pSeries_disable_reloc_on_exc() do {} while (0)
+
#endif /* CONFIG_PPC_PSERIES */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 423424599dad..eb0f4ac75c4c 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -24,16 +24,30 @@
#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
#ifdef __KERNEL__
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-
struct arch_hw_breakpoint {
unsigned long address;
- unsigned long dabrx;
- int type;
- u8 len; /* length of the target data symbol */
- bool extraneous_interrupt;
+ u16 type;
+ u16 len; /* length of the target data symbol */
};
+/* Note: Don't change the the first 6 bits below as they are in the same order
+ * as the dabr and dabrx.
+ */
+#define HW_BRK_TYPE_READ 0x01
+#define HW_BRK_TYPE_WRITE 0x02
+#define HW_BRK_TYPE_TRANSLATE 0x04
+#define HW_BRK_TYPE_USER 0x08
+#define HW_BRK_TYPE_KERNEL 0x10
+#define HW_BRK_TYPE_HYP 0x20
+#define HW_BRK_TYPE_EXTRANEOUS_IRQ 0x80
+
+/* bits that overlap with the bottom 3 bits of the dabr */
+#define HW_BRK_TYPE_RDWR (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)
+#define HW_BRK_TYPE_DABR (HW_BRK_TYPE_RDWR | HW_BRK_TYPE_TRANSLATE)
+#define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
+ HW_BRK_TYPE_HYP)
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <linux/kdebug.h>
#include <asm/reg.h>
#include <asm/debug.h>
@@ -43,8 +57,6 @@ struct pmu;
struct perf_sample_data;
#define HW_BREAKPOINT_ALIGN 0x7
-/* Maximum permissible length of any HW Breakpoint */
-#define HW_BREAKPOINT_LEN 0x8
extern int hw_breakpoint_slots(int type);
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
@@ -62,7 +74,12 @@ extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
static inline void hw_breakpoint_disable(void)
{
- set_dabr(0, 0);
+ struct arch_hw_breakpoint brk;
+
+ brk.address = 0;
+ brk.type = 0;
+ brk.len = 0;
+ set_breakpoint(&brk);
}
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 88609b23b775..cdc3d2717cc6 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -93,6 +93,9 @@ struct kvmppc_host_state {
u64 host_dscr;
u64 dec_expires;
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u64 cfar;
+#endif
};
struct kvmppc_book3s_shadow_vcpu {
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index ca9bf459db6a..03d7beae89a0 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -440,6 +440,7 @@ struct kvm_vcpu_arch {
ulong uamor;
u32 ctrl;
ulong dabr;
+ ulong cfar;
#endif
u32 vrsave; /* also USPRG0 */
u32 mmucr;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 19d9d96eb8d3..3d6b4100dac1 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -180,6 +180,10 @@ struct machdep_calls {
int (*set_dabr)(unsigned long dabr,
unsigned long dabrx);
+ /* Set DAWR for this platform, leave empty for default implemenation */
+ int (*set_dawr)(unsigned long dawr,
+ unsigned long dawrx);
+
#ifdef CONFIG_PPC32 /* XXX for now */
/* A general init function, called by ppc_init in init/main.c.
May be NULL. */
diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h
index 8c0ab2ca689c..885c040d6194 100644
--- a/arch/powerpc/include/asm/mpc5121.h
+++ b/arch/powerpc/include/asm/mpc5121.h
@@ -53,4 +53,21 @@ struct mpc512x_ccm {
u32 m4ccr; /* MSCAN4 CCR */
u8 res[0x98]; /* Reserved */
};
+
+/*
+ * LPC Module
+ */
+struct mpc512x_lpc {
+ u32 cs_cfg[8]; /* CS config */
+ u32 cs_ctrl; /* CS Control Register */
+ u32 cs_status; /* CS Status Register */
+ u32 burst_ctrl; /* CS Burst Control Register */
+ u32 deadcycle_ctrl; /* CS Deadcycle Control Register */
+ u32 holdcycle_ctrl; /* CS Holdcycle Control Register */
+ u32 alt; /* Address Latch Timing Register */
+};
+
+int mpc512x_cs_config(unsigned int cs, u32 val);
+int __init mpc5121_clk_init(void);
+
#endif /* __ASM_POWERPC_MPC5121_H__ */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e9e7a6999bb8..77c91e74b612 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -93,9 +93,9 @@ struct paca_struct {
* Now, starting in cacheline 2, the exception save areas
*/
/* used for most interrupts/exceptions */
- u64 exgen[11] __attribute__((aligned(0x80)));
- u64 exmc[11]; /* used for machine checks */
- u64 exslb[11]; /* used for SLB/segment table misses
+ u64 exgen[12] __attribute__((aligned(0x80)));
+ u64 exmc[12]; /* used for machine checks */
+ u64 exslb[12]; /* used for SLB/segment table misses
* on the linear mapping */
/* SLB related definitions */
u16 vmalloc_sllp;
@@ -137,6 +137,9 @@ struct paca_struct {
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
u8 nap_state_lost; /* NV GPR values lost in power7_idle */
u64 sprg3; /* Saved user-visible sprg */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tm_scratch; /* TM scratch area for reclaim */
+#endif
#ifdef CONFIG_PPC_POWERNV
/* Pointer to OPAL machine check event structure set by the
@@ -167,7 +170,6 @@ struct paca_struct {
};
extern struct paca_struct *paca;
-extern __initdata struct paca_struct boot_paca;
extern void initialise_paca(struct paca_struct *new_paca, int cpu);
extern void setup_paca(struct paca_struct *new_paca);
extern void allocate_pacas(void);
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 136bba62efa4..d0aec72722e9 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -47,11 +47,11 @@ struct power_pmu {
/*
* Values for power_pmu.flags
*/
-#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
-#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
-#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */
-#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */
-#define PPMU_SIAR_VALID 16 /* Processor has SIAR Valid bit */
+#define PPMU_LIMITED_PMC5_6 0x00000001 /* PMC5/6 have limited function */
+#define PPMU_ALT_SIPR 0x00000002 /* uses alternate posn for SIPR/HV */
+#define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */
+#define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */
+#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
/*
* Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 51fb00a20d7e..8752bc8e34a3 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -100,6 +100,7 @@
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
#define PPC_INST_MSGSND 0x7c00019c
+#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
@@ -128,6 +129,9 @@
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
#define PPC_INST_XXLOR 0xf0000510
#define PPC_INST_XVCPSGNDP 0xf0000780
+#define PPC_INST_TRECHKPT 0x7c0007dd
+#define PPC_INST_TRECLAIM 0x7c00075d
+#define PPC_INST_TABORT 0x7c00071d
#define PPC_INST_NAP 0x4c000364
#define PPC_INST_SLEEP 0x4c0003a4
@@ -227,6 +231,8 @@
___PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
___PPC_RB(b))
+#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
+ ___PPC_RB(b))
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
__PPC_RA(a) | __PPC_RS(s))
#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
@@ -291,4 +297,11 @@
#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
+/* Transactional memory instructions */
+#define TRECHKPT stringify_in_c(.long PPC_INST_TRECHKPT)
+#define TRECLAIM(r) stringify_in_c(.long PPC_INST_TRECLAIM \
+ | __PPC_RA(r))
+#define TABORT(r) stringify_in_c(.long PPC_INST_TABORT \
+ | __PPC_RA(r))
+
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc4xx_ocm.h b/arch/powerpc/include/asm/ppc4xx_ocm.h
new file mode 100644
index 000000000000..6ce904605538
--- /dev/null
+++ b/arch/powerpc/include/asm/ppc4xx_ocm.h
@@ -0,0 +1,45 @@
+/*
+ * PowerPC 4xx OCM memory allocation support
+ *
+ * (C) Copyright 2009, Applied Micro Circuits Corporation
+ * Victor Gallardo (vgallardo@amcc.com)
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __ASM_POWERPC_PPC4XX_OCM_H__
+#define __ASM_POWERPC_PPC4XX_OCM_H__
+
+#define PPC4XX_OCM_NON_CACHED 0
+#define PPC4XX_OCM_CACHED 1
+
+#if defined(CONFIG_PPC4xx_OCM)
+
+void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
+ int flags, const char *owner);
+void ppc4xx_ocm_free(const void *virt);
+
+#else
+
+#define ppc4xx_ocm_alloc(phys, size, align, flags, owner) NULL
+#define ppc4xx_ocm_free(addr) ((void)0)
+
+#endif /* CONFIG_PPC4xx_OCM */
+
+#endif /* __ASM_POWERPC_PPC4XX_OCM_H__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 2d0e1f5d8339..cea8496091ff 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -30,7 +30,6 @@
#define ACCOUNT_STOLEN_TIME
#else
#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
- beq 2f; /* if from kernel mode */ \
MFTB(ra); /* get timebase */ \
ld rb,PACA_STARTTIME_USER(r13); \
std ra,PACA_STARTTIME(r13); \
@@ -38,7 +37,6 @@
ld ra,PACA_USER_TIME(r13); \
add ra,ra,rb; /* add on to user time */ \
std ra,PACA_USER_TIME(r13); \
-2:
#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
MFTB(ra); /* get timebase */ \
@@ -125,6 +123,89 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
+/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in
+ * thread_struct:
+ */
+#define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \
+ 8*TS_FPRWIDTH*(n)(base)
+#define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \
+ SAVE_FPR_TRANSACT(n+1, base)
+#define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \
+ SAVE_2FPRS_TRANSACT(n+2, base)
+#define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \
+ SAVE_4FPRS_TRANSACT(n+4, base)
+#define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \
+ SAVE_8FPRS_TRANSACT(n+8, base)
+#define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \
+ SAVE_16FPRS_TRANSACT(n+16, base)
+
+#define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \
+ 8*TS_FPRWIDTH*(n)(base)
+#define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \
+ REST_FPR_TRANSACT(n+1, base)
+#define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \
+ REST_2FPRS_TRANSACT(n+2, base)
+#define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \
+ REST_4FPRS_TRANSACT(n+4, base)
+#define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \
+ REST_8FPRS_TRANSACT(n+8, base)
+#define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \
+ REST_16FPRS_TRANSACT(n+16, base)
+
+
+#define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
+ stvx n,b,base
+#define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \
+ SAVE_VR_TRANSACT(n+1,b,base)
+#define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \
+ SAVE_2VRS_TRANSACT(n+2,b,base)
+#define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \
+ SAVE_4VRS_TRANSACT(n+4,b,base)
+#define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \
+ SAVE_8VRS_TRANSACT(n+8,b,base)
+#define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \
+ SAVE_16VRS_TRANSACT(n+16,b,base)
+
+#define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \
+ lvx n,b,base
+#define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \
+ REST_VR_TRANSACT(n+1,b,base)
+#define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \
+ REST_2VRS_TRANSACT(n+2,b,base)
+#define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \
+ REST_4VRS_TRANSACT(n+4,b,base)
+#define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \
+ REST_8VRS_TRANSACT(n+8,b,base)
+#define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \
+ REST_16VRS_TRANSACT(n+16,b,base)
+
+
+#define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
+ STXVD2X(n,R##base,R##b)
+#define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \
+ SAVE_VSR_TRANSACT(n+1,b,base)
+#define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \
+ SAVE_2VSRS_TRANSACT(n+2,b,base)
+#define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \
+ SAVE_4VSRS_TRANSACT(n+4,b,base)
+#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \
+ SAVE_8VSRS_TRANSACT(n+8,b,base)
+#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \
+ SAVE_16VSRS_TRANSACT(n+16,b,base)
+
+#define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \
+ LXVD2X(n,R##base,R##b)
+#define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \
+ REST_VSR_TRANSACT(n+1,b,base)
+#define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \
+ REST_2VSRS_TRANSACT(n+2,b,base)
+#define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \
+ REST_4VSRS_TRANSACT(n+4,b,base)
+#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \
+ REST_8VSRS_TRANSACT(n+8,b,base)
+#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \
+ REST_16VSRS_TRANSACT(n+16,b,base)
+
/* Save the lower 32 VSRs in the thread VSR region */
#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b)
#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
@@ -391,6 +472,31 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
FTR_SECTION_ELSE_NESTED(848); \
mtocrf (FXM), RS; \
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
+
+/*
+ * PPR restore macros used in entry_64.S
+ * Used for P7 or later processors
+ */
+#define HMT_MEDIUM_LOW_HAS_PPR \
+BEGIN_FTR_SECTION_NESTED(944) \
+ HMT_MEDIUM_LOW; \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,944)
+
+#define SET_DEFAULT_THREAD_PPR(ra, rb) \
+BEGIN_FTR_SECTION_NESTED(945) \
+ lis ra,INIT_PPR@highest; /* default ppr=3 */ \
+ ld rb,PACACURRENT(r13); \
+ sldi ra,ra,32; /* 11- 13 bits are used for ppr */ \
+ std ra,TASKTHREADPPR(rb); \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
+
+#define RESTORE_PPR(ra, rb) \
+BEGIN_FTR_SECTION_NESTED(946) \
+ ld ra,PACACURRENT(r13); \
+ ld rb,TASKTHREADPPR(ra); \
+ mtspr SPRN_PPR,rb; /* Restore PPR */ \
+END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
+
#endif
/*
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 87502046c0dc..7ff9eaa3ea6c 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -18,11 +18,22 @@
#define TS_FPRWIDTH 1
#endif
+#ifdef CONFIG_PPC64
+/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
+#define PPR_PRIORITY 3
+#ifdef __ASSEMBLY__
+#define INIT_PPR (PPR_PRIORITY << 50)
+#else
+#define INIT_PPR ((u64)PPR_PRIORITY << 50)
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PPC64 */
+
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/cache.h>
#include <asm/ptrace.h>
#include <asm/types.h>
+#include <asm/hw_breakpoint.h>
/* We do _not_ want to define new machine types at all, those must die
* in favor of using the device-tree
@@ -141,6 +152,7 @@ typedef struct {
#define TS_FPROFFSET 0
#define TS_VSRLOWOFFSET 1
#define TS_FPR(i) fpr[i][TS_FPROFFSET]
+#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
struct thread_struct {
unsigned long ksp; /* Kernel stack pointer */
@@ -215,8 +227,7 @@ struct thread_struct {
struct perf_event *last_hit_ubp;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
- unsigned long dabr; /* Data address breakpoint register */
- unsigned long dabrx; /* ... extension */
+ struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
unsigned long trap_nr; /* last trap # on this thread */
#ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */
@@ -236,6 +247,34 @@ struct thread_struct {
unsigned long spefscr; /* SPE & eFP status */
int used_spe; /* set if process has used spe */
#endif /* CONFIG_SPE */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tm_tfhar; /* Transaction fail handler addr */
+ u64 tm_texasr; /* Transaction exception & summary */
+ u64 tm_tfiar; /* Transaction fail instr address reg */
+ unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
+ struct pt_regs ckpt_regs; /* Checkpointed registers */
+
+ /*
+ * Transactional FP and VSX 0-31 register set.
+ * NOTE: the sense of these is the opposite of the integer ckpt_regs!
+ *
+ * When a transaction is active/signalled/scheduled etc., *regs is the
+ * most recent set of/speculated GPRs with ckpt_regs being the older
+ * checkpointed regs to which we roll back if transaction aborts.
+ *
+ * However, fpr[] is the checkpointed 'base state' of FP regs, and
+ * transact_fpr[] is the new set of transactional values.
+ * VRs work the same way.
+ */
+ double transact_fpr[32][TS_FPRWIDTH];
+ struct {
+ unsigned int pad;
+ unsigned int val; /* Floating point status */
+ } transact_fpscr;
+ vector128 transact_vr[32] __attribute__((aligned(16)));
+ vector128 transact_vscr __attribute__((aligned(16)));
+ unsigned long transact_vrsave;
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
@@ -245,6 +284,10 @@ struct thread_struct {
#ifdef CONFIG_PPC64
unsigned long dscr;
int dscr_inherit;
+ unsigned long ppr; /* used to save/restore SMT priority */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ unsigned long tar;
#endif
};
@@ -278,6 +321,7 @@ struct thread_struct {
.fpr = {{0}}, \
.fpscr = { .val = 0, }, \
.fpexc_mode = 0, \
+ .ppr = INIT_PPR, \
}
#endif
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index 0e15db4d703b..678a7c1d9cb8 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -245,7 +245,7 @@ enum lv1_result {
static inline const char* ps3_result(int result)
{
-#if defined(DEBUG)
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
switch (result) {
case LV1_SUCCESS:
return "LV1_SUCCESS (0)";
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 3d5c9dc8917a..7035e608f3fa 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -29,6 +29,10 @@
#define MSR_SF_LG 63 /* Enable 64 bit mode */
#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
#define MSR_HV_LG 60 /* Hypervisor state */
+#define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */
+#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */
+#define MSR_TS_LG 33 /* Trans Mem state (2 bits) */
+#define MSR_TM_LG 32 /* Trans Mem Available */
#define MSR_VEC_LG 25 /* Enable AltiVec */
#define MSR_VSX_LG 23 /* Enable VSX */
#define MSR_POW_LG 18 /* Enable Power Management */
@@ -98,6 +102,26 @@
#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
+#define MSR_TM __MASK(MSR_TM_LG) /* Transactional Mem Available */
+#define MSR_TS_N 0 /* Non-transactional */
+#define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */
+#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
+#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
+#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
+#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
+
+/* Reason codes describing kernel causes for transaction aborts. By
+ convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if
+ the failure is persistent.
+*/
+#define TM_CAUSE_RESCHED 0xfe
+#define TM_CAUSE_TLBI 0xfc
+#define TM_CAUSE_FAC_UNAV 0xfa
+#define TM_CAUSE_SYSCALL 0xf9 /* Persistent */
+#define TM_CAUSE_MISC 0xf6
+#define TM_CAUSE_SIGNAL 0xf4
+
#if defined(CONFIG_PPC_BOOK3S_64)
#define MSR_64BIT MSR_SF
@@ -193,6 +217,10 @@
#define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */
#define SPRN_AMOR 0x15d /* Authority Mask Override Register */
#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
+#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
+#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
+#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
+#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
#define CTRL_CT 0xc0000000 /* current thread */
@@ -200,10 +228,12 @@
#define CTRL_CT1 0x40000000 /* thread 1 */
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
+#define SPRN_DAWR 0xB4
+#define SPRN_DAWRX 0xBC
+#define DAWRX_USER (1UL << 0)
+#define DAWRX_KERNEL (1UL << 1)
+#define DAWRX_HYP (1UL << 2)
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
-#define DABR_TRANSLATION (1UL << 2)
-#define DABR_DATA_WRITE (1UL << 1)
-#define DABR_DATA_READ (1UL << 0)
#define SPRN_DABR2 0x13D /* e300 */
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
#define DABRX_USER (1UL << 0)
@@ -235,6 +265,9 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_TAR (1<<8) /* Enable Target Adress Register */
+#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
#define LPCR_VPM1 (1ul << (63-1))
@@ -289,6 +322,7 @@
#define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */
#define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */
#define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */
+#define SPRN_PPR 0x380 /* SMT Thread status Register */
#define SPRN_DEC 0x016 /* Decrement Register */
#define SPRN_DER 0x095 /* Debug Enable Regsiter */
@@ -483,6 +517,7 @@
#ifndef SPRN_PIR
#define SPRN_PIR 0x3FF /* Processor Identification Register */
#endif
+#define SPRN_TIR 0x1BE /* Thread Identification Register */
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
@@ -763,7 +798,7 @@
* HV mode in which case it is HSPRG0
*
* 64-bit server:
- * - SPRG0 unused (reserved for HV on Power4)
+ * - SPRG0 scratch for TM recheckpoint/reclaim (reserved for HV on Power4)
* - SPRG2 scratch for exception vectors
* - SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
* - HSPRG0 stores PACA in HV mode
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index a0f358d4a00c..4ee06fe15de4 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -10,6 +10,9 @@
extern char __end_interrupts[];
+extern char __prom_init_toc_start[];
+extern char __prom_init_toc_end[];
+
static inline int in_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 7124fc06ad47..5b23f910ee57 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -96,7 +96,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
+#define SHARED_PROCESSOR (local_paca->lppaca_ptr->shared_proc)
extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR */
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h
new file mode 100644
index 000000000000..4b4449abf3f8
--- /dev/null
+++ b/arch/powerpc/include/asm/tm.h
@@ -0,0 +1,20 @@
+/*
+ * Transactional memory support routines to reclaim and recheckpoint
+ * transactional process state.
+ *
+ * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
+ */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+extern void do_load_up_transact_fpu(struct thread_struct *thread);
+extern void do_load_up_transact_altivec(struct thread_struct *thread);
+#endif
+
+extern void tm_enable(void);
+extern void tm_reclaim(struct thread_struct *thread,
+ unsigned long orig_msr, uint8_t cause);
+extern void tm_recheckpoint(struct thread_struct *thread,
+ unsigned long orig_msr);
+extern void tm_abort(uint8_t cause);
+extern void tm_save_sprs(struct thread_struct *thread);
+extern void tm_restore_sprs(struct thread_struct *thread);