summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Makefile6
-rw-r--r--arch/powerpc/include/asm/atomic.h48
-rw-r--r--arch/powerpc/include/asm/bitops.h12
-rw-r--r--arch/powerpc/include/asm/futex.h7
-rw-r--r--arch/powerpc/include/asm/kvm.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/asm/sections.h2
-rw-r--r--arch/powerpc/include/asm/synch.h9
-rw-r--r--arch/powerpc/kernel/entry_32.S15
-rw-r--r--arch/powerpc/kernel/jump_label.c2
-rw-r--r--arch/powerpc/kernel/kvm.c1
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/process.c24
-rw-r--r--arch/powerpc/kernel/prom_init.c6
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_pr.c14
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/lib/feature-fixups.c23
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c23
-rw-r--r--arch/powerpc/platforms/ps3/platform.h1
-rw-r--r--arch/powerpc/platforms/ps3/smp.c62
28 files changed, 151 insertions, 131 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b177caa56d95..951e18f5335b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -345,7 +345,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
- depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !47x)) && EXPERIMENTAL
+ depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 57af16edc192..70ba0c0a1223 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -255,12 +255,6 @@ checkbin:
echo 'disable kernel modules' ; \
false ; \
fi
- @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
- echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
- echo 'correctly with old versions of binutils.' ; \
- echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
- false ; \
- fi
CLEAN_FILES += $(TOUT)
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index e2a4c26ad377..02e41b53488d 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -49,13 +49,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
@@ -85,13 +85,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
@@ -119,13 +119,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # atomic_inc_return\n\
addic %0,%0,1\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (&v->counter)
: "cc", "xer", "memory");
@@ -163,13 +163,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # atomic_dec_return\n\
addic %0,%0,-1\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (&v->counter)
: "cc", "xer", "memory");
@@ -194,7 +194,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
int t;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # __atomic_add_unless\n\
cmpw 0,%0,%3 \n\
beq- 2f \n\
@@ -202,7 +202,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
PPC405_ERR77(0,%2)
" stwcx. %0,0,%1 \n\
bne- 1b \n"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
" subf %0,%2,%0 \n\
2:"
: "=&r" (t)
@@ -226,7 +226,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
int t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
cmpwi %0,1\n\
addi %0,%0,-1\n\
@@ -234,7 +234,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:" : "=&b" (t)
: "r" (&v->counter)
@@ -285,12 +285,12 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 # atomic64_add_return\n\
add %0,%1,%0\n\
stdcx. %0,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
@@ -319,12 +319,12 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 # atomic64_sub_return\n\
subf %0,%1,%0\n\
stdcx. %0,0,%2 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
@@ -351,12 +351,12 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # atomic64_inc_return\n\
addic %0,%0,1\n\
stdcx. %0,0,%1 \n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (&v->counter)
: "cc", "xer", "memory");
@@ -393,12 +393,12 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # atomic64_dec_return\n\
addic %0,%0,-1\n\
stdcx. %0,0,%1\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (&v->counter)
: "cc", "xer", "memory");
@@ -418,13 +418,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
long t;
__asm__ __volatile__(
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
addic. %0,%0,-1\n\
blt- 2f\n\
stdcx. %0,0,%1\n\
bne- 1b"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"\n\
2:" : "=&r" (t)
: "r" (&v->counter)
@@ -450,14 +450,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
long t;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%1 # __atomic_add_unless\n\
cmpd 0,%0,%3 \n\
beq- 2f \n\
add %0,%2,%0 \n"
" stdcx. %0,0,%1 \n\
bne- 1b \n"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
" subf %0,%2,%0 \n\
2:"
: "=&r" (t)
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index e137afcc10fa..efdc92618b38 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -124,14 +124,14 @@ static __inline__ unsigned long fn( \
return (old & mask); \
}
-DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER,
- PPC_ACQUIRE_BARRIER, 0)
+DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_set_bits_lock, or, "",
PPC_ACQUIRE_BARRIER, 1)
-DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER,
- PPC_ACQUIRE_BARRIER, 0)
-DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER,
- PPC_ACQUIRE_BARRIER, 0)
+DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
+DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
static __inline__ int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index c94e4a3fe2ef..2a9cf845473b 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -11,12 +11,13 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
- PPC_RELEASE_BARRIER \
+ PPC_ATOMIC_ENTRY_BARRIER \
"1: lwarx %0,0,%2\n" \
insn \
PPC405_ERR77(0, %2) \
"2: stwcx. %1,0,%2\n" \
"bne- 1b\n" \
+ PPC_ATOMIC_EXIT_BARRIER \
"li %1,0\n" \
"3: .section .fixup,\"ax\"\n" \
"4: li %1,%3\n" \
@@ -92,14 +93,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
__asm__ __volatile__ (
- PPC_RELEASE_BARRIER
+ PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
cmpw 0,%1,%4\n\
bne- 3f\n"
PPC405_ERR77(0,%3)
"2: stwcx. %5,0,%3\n\
bne- 1b\n"
- PPC_ACQUIRE_BARRIER
+ PPC_ATOMIC_EXIT_BARRIER
"3: .section .fixup,\"ax\"\n\
4: li %0,%6\n\
b 3b\n\
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index 08fe69edcd10..0ad432bc81d6 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -149,12 +149,6 @@ struct kvm_regs {
#define KVM_SREGS_E_UPDATE_DBSR (1 << 3)
/*
- * Book3S special bits to indicate contents in the struct by maintaining
- * backwards compatibility with older structs. If adding a new field,
- * please make sure to add a flag for that new field */
-#define KVM_SREGS_S_HIOR (1 << 0)
-
-/*
* In KVM_SET_SREGS, reserved/pad fields must be left untouched from a
* previous KVM_GET_REGS.
*
@@ -179,8 +173,6 @@ struct kvm_sregs {
__u64 ibat[8];
__u64 dbat[8];
} ppc32;
- __u64 flags; /* KVM_SREGS_S_ */
- __u64 hior;
} s;
struct {
union {
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index a384ffdf33de..d4df013ad779 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -90,8 +90,6 @@ struct kvmppc_vcpu_book3s {
#endif
int context_id[SID_CONTEXTS];
- bool hior_sregs; /* HIOR is set by SREGS, not PVR */
-
struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 28cdbd9f399c..03c48e819c8e 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -31,7 +31,7 @@
#define MSR_ MSR_ME | MSR_CE
#define MSR_KERNEL MSR_ | MSR_64BIT
-#define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE
+#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
#define MSR_USER64 MSR_USER32 | MSR_64BIT
#elif defined (CONFIG_40x)
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 6fbce725c710..a0f358d4a00c 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,7 +8,7 @@
#ifdef __powerpc64__
-extern char _end[];
+extern char __end_interrupts[];
static inline int in_kernel_text(unsigned long addr)
{
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index d7cab44643c5..e682a7143edb 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -13,6 +13,7 @@
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
+extern void do_final_fixups(void);
static inline void eieio(void)
{
@@ -41,11 +42,15 @@ static inline void isync(void)
START_LWSYNC_SECTION(97); \
isync; \
MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
-#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
-#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
+#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
#else
#define PPC_ACQUIRE_BARRIER
#define PPC_RELEASE_BARRIER
+#define PPC_ATOMIC_ENTRY_BARRIER
+#define PPC_ATOMIC_EXIT_BARRIER
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 56212bc0ab08..4f80cf1ce77b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -215,7 +215,22 @@ reenable_mmu: /* re-enable mmu so we can */
stw r9,8(r1)
stw r11,12(r1)
stw r3,ORIG_GPR3(r1)
+ /*
+ * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
+ * If from user mode there is only one stack frame on the stack, and
+ * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
+ * stack frame to make trace_hardirqs_off happy.
+ */
+ andi. r12,r12,MSR_PR
+ beq 11f
+ stwu r1,-16(r1)
+ bl trace_hardirqs_off
+ addi r1,r1,16
+ b 12f
+
+11:
bl trace_hardirqs_off
+12:
lwz r0,GPR0(r1)
lwz r3,ORIG_GPR3(r1)
lwz r4,GPR4(r1)
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index 368d158d665d..a1ed8a8c7cb4 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -11,6 +11,7 @@
#include <linux/jump_label.h>
#include <asm/code-patching.h>
+#ifdef HAVE_JUMP_LABEL
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
@@ -21,3 +22,4 @@ void arch_jump_label_transform(struct jump_entry *entry,
else
patch_instruction(addr, PPC_INST_NOP);
}
+#endif
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 35f27646c4ff..2985338d0e10 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -132,7 +132,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr)
/* On relocatable kernels interrupts handlers and our code
can be in different regions, so we don't patch them */
- extern u32 __end_interrupts;
if ((ulong)inst < (ulong)&__end_interrupts)
return;
#endif
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index f7d760ab5ca1..7cd07b42ca1a 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -738,7 +738,7 @@ relocate_new_kernel:
mr r5, r31
li r0, 0
-#elif defined(CONFIG_44x) && !defined(CONFIG_47x)
+#elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
/*
* Code for setting up 1:1 mapping for PPC440x for KEXEC
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9054ca9ab4f9..6457574c0b2f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -486,28 +486,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
new_thread = &new->thread;
old_thread = &current->thread;
-#if defined(CONFIG_PPC_BOOK3E_64)
- /* XXX Current Book3E code doesn't deal with kernel side DBCR0,
- * we always hold the user values, so we set it now.
- *
- * However, we ensure the kernel MSR:DE is appropriately cleared too
- * to avoid spurrious single step exceptions in the kernel.
- *
- * This will have to change to merge with the ppc32 code at some point,
- * but I don't like much what ppc32 is doing today so there's some
- * thinking needed there
- */
- if ((new_thread->dbcr0 | old_thread->dbcr0) & DBCR0_IDM) {
- u32 dbcr0;
-
- mtmsr(mfmsr() & ~MSR_DE);
- isync();
- dbcr0 = mfspr(SPRN_DBCR0);
- dbcr0 = (dbcr0 & DBCR0_EDM) | new_thread->dbcr0;
- mtspr(SPRN_DBCR0, dbcr0);
- }
-#endif /* CONFIG_PPC64_BOOK3E */
-
#ifdef CONFIG_PPC64
/*
* Collect processor utilization data per process
@@ -657,7 +635,7 @@ void show_regs(struct pt_regs * regs)
if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
printk("CFAR: "REG"\n", regs->orig_gpr3);
if (trap == 0x300 || trap == 0x600)
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
#else
printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index b4fa66127495..cc584865b3df 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1579,10 +1579,8 @@ static void __init prom_instantiate_rtas(void)
return;
base = alloc_down(size, PAGE_SIZE, 0);
- if (base == 0) {
- prom_printf("RTAS allocation failed !\n");
- return;
- }
+ if (base == 0)
+ prom_panic("Could not allocate memory for RTAS\n");
rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
if (!IHANDLE_VALID(rtas_inst)) {
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index c1ce86357ecb..ac7610815113 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -107,6 +107,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
PTRRELOC(&__start___lwsync_fixup),
PTRRELOC(&__stop___lwsync_fixup));
+ do_final_fixups();
+
return KERNELBASE + offset;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1a9dea80a69b..fb9bb46e7e88 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -359,6 +359,7 @@ void __init setup_system(void)
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
do_lwsync_fixups(cur_cpu_spec->cpu_features,
&__start___lwsync_fixup, &__stop___lwsync_fixup);
+ do_final_fixups();
/*
* Unflatten the device-tree passed by prom_init or kexec
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 78b76dc54dfb..836a5a19eb2c 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -97,7 +97,7 @@ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
compat_sigset_t cset;
switch (_NSIG_WORDS) {
- case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
+ case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
cset.sig[7] = set->sig[3] >> 32;
case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
cset.sig[5] = set->sig[2] >> 32;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4e5908264d1a..5459d148a0f6 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1298,14 +1298,12 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
if (user_mode(regs)) {
current->thread.dbcr0 &= ~DBCR0_IC;
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
current->thread.dbcr1))
regs->msr |= MSR_DE;
else
/* Make sure the IDM bit is off */
current->thread.dbcr0 &= ~DBCR0_IDM;
-#endif
}
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0cdbc07cec14..0cb137a9b038 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -44,6 +44,7 @@
#include <asm/processor.h>
#include <asm/cputhreads.h>
#include <asm/page.h>
+#include <asm/hvcall.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index bc4d50dec78b..3c791e1eb675 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -151,16 +151,14 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
#ifdef CONFIG_PPC_BOOK3S_64
if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
kvmppc_mmu_book3s_64_init(vcpu);
- if (!to_book3s(vcpu)->hior_sregs)
- to_book3s(vcpu)->hior = 0xfff00000;
+ to_book3s(vcpu)->hior = 0xfff00000;
to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
vcpu->arch.cpu_type = KVM_CPU_3S_64;
} else
#endif
{
kvmppc_mmu_book3s_32_init(vcpu);
- if (!to_book3s(vcpu)->hior_sregs)
- to_book3s(vcpu)->hior = 0;
+ to_book3s(vcpu)->hior = 0;
to_book3s(vcpu)->msr_mask = 0xffffffffULL;
vcpu->arch.cpu_type = KVM_CPU_3S_32;
}
@@ -797,9 +795,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
}
}
- if (sregs->u.s.flags & KVM_SREGS_S_HIOR)
- sregs->u.s.hior = to_book3s(vcpu)->hior;
-
return 0;
}
@@ -836,11 +831,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
/* Flush the MMU after messing with the segments */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
- if (sregs->u.s.flags & KVM_SREGS_S_HIOR) {
- to_book3s(vcpu)->hior_sregs = true;
- to_book3s(vcpu)->hior = sregs->u.s.hior;
- }
-
return 0;
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index efbf9ad87203..607fbdf24b84 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -208,7 +208,6 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PPC_BOOKE_SREGS:
#else
case KVM_CAP_PPC_SEGSTATE:
- case KVM_CAP_PPC_HIOR:
case KVM_CAP_PPC_PAPR:
#endif
case KVM_CAP_PPC_UNSET_IRQ:
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 0d08d0171392..7a8a7487cee8 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -18,6 +18,8 @@
#include <linux/init.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
+#include <asm/page.h>
+#include <asm/sections.h>
struct fixup_entry {
@@ -128,6 +130,27 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
}
+void do_final_fixups(void)
+{
+#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
+ int *src, *dest;
+ unsigned long length;
+
+ if (PHYSICAL_START == 0)
+ return;
+
+ src = (int *)(KERNELBASE + PHYSICAL_START);
+ dest = (int *)KERNELBASE;
+ length = (__end_interrupts - _stext) / sizeof(int);
+
+ while (length--) {
+ patch_instruction(dest, *src);
+ src++;
+ dest++;
+ }
+#endif
+}
+
#ifdef CONFIG_FTR_FIXUP_SELFTEST
#define check(x) \
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e4588721ef34..3fe6d927ad70 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -347,7 +347,7 @@ config SIMPLE_GPIO
config MCU_MPC8349EMITX
bool "MPC8349E-mITX MCU driver"
- depends on I2C && PPC_83xx
+ depends on I2C=y && PPC_83xx
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 404bc52b7806..1d6f4f478fe2 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -88,6 +88,7 @@ struct ps3_private {
struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
u64 ppe_id;
u64 thread_id;
+ unsigned long ipi_mask;
};
static DEFINE_PER_CPU(struct ps3_private, ps3_private);
@@ -144,7 +145,11 @@ static void ps3_chip_unmask(struct irq_data *d)
static void ps3_chip_eoi(struct irq_data *d)
{
const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
- lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
+
+ /* non-IPIs are EOIed here. */
+
+ if (!test_bit(63 - d->irq, &pd->ipi_mask))
+ lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
}
/**
@@ -691,6 +696,16 @@ void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
cpu, virq, pd->bmp.ipi_debug_brk_mask);
}
+void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
+{
+ struct ps3_private *pd = &per_cpu(ps3_private, cpu);
+
+ set_bit(63 - virq, &pd->ipi_mask);
+
+ DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
+ cpu, virq, pd->ipi_mask);
+}
+
static unsigned int ps3_get_irq(void)
{
struct ps3_private *pd = &__get_cpu_var(ps3_private);
@@ -720,6 +735,12 @@ static unsigned int ps3_get_irq(void)
BUG();
}
#endif
+
+ /* IPIs are EOIed here. */
+
+ if (test_bit(63 - plug, &pd->ipi_mask))
+ lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
+
return plug;
}
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 9a196a88eda7..1a633ed0fe98 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -43,6 +43,7 @@ void ps3_mm_shutdown(void);
void ps3_init_IRQ(void);
void ps3_shutdown_IRQ(int cpu);
void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
+void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
/* smp */
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 4c44794faac0..efc1cd8c034a 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -59,46 +59,49 @@ static void ps3_smp_message_pass(int cpu, int msg)
static int ps3_smp_probe(void)
{
- return 2;
-}
+ int cpu;
-static void __init ps3_smp_setup_cpu(int cpu)
-{
- int result;
- unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
- int i;
+ for (cpu = 0; cpu < 2; cpu++) {
+ int result;
+ unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
+ int i;
- DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
+ DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
- /*
- * Check assumptions on ps3_ipi_virqs[] indexing. If this
- * check fails, then a different mapping of PPC_MSG_
- * to index needs to be setup.
- */
+ /*
+ * Check assumptions on ps3_ipi_virqs[] indexing. If this
+ * check fails, then a different mapping of PPC_MSG_
+ * to index needs to be setup.
+ */
- BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
- BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
- BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
- BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
+ BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
+ BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
+ BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
+ BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
- for (i = 0; i < MSG_COUNT; i++) {
- result = ps3_event_receive_port_setup(cpu, &virqs[i]);
+ for (i = 0; i < MSG_COUNT; i++) {
+ result = ps3_event_receive_port_setup(cpu, &virqs[i]);
- if (result)
- continue;
+ if (result)
+ continue;
- DBG("%s:%d: (%d, %d) => virq %u\n",
- __func__, __LINE__, cpu, i, virqs[i]);
+ DBG("%s:%d: (%d, %d) => virq %u\n",
+ __func__, __LINE__, cpu, i, virqs[i]);
- result = smp_request_message_ipi(virqs[i], i);
+ result = smp_request_message_ipi(virqs[i], i);
- if (result)
- virqs[i] = NO_IRQ;
- }
+ if (result)
+ virqs[i] = NO_IRQ;
+ else
+ ps3_register_ipi_irq(cpu, virqs[i]);
+ }
- ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
+ ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
- DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+ DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+ }
+
+ return 2;
}
void ps3_smp_cleanup_cpu(int cpu)
@@ -121,7 +124,6 @@ static struct smp_ops_t ps3_smp_ops = {
.probe = ps3_smp_probe,
.message_pass = ps3_smp_message_pass,
.kick_cpu = smp_generic_kick_cpu,
- .setup_cpu = ps3_smp_setup_cpu,
};
void smp_init_ps3(void)