summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S13
-rw-r--r--arch/powerpc/kernel/kgdb.c5
-rw-r--r--arch/powerpc/kernel/time.c9
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c2
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c7
6 files changed, 33 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index d22e73e4618b..e514de57a125 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -439,6 +439,8 @@ ret_from_fork:
ret_from_kernel_thread:
REST_NVGPRS(r1)
bl schedule_tail
+ li r3,0
+ stw r3,0(r1)
mtlr r14
mr r3,r15
PPC440EP_ERR42
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b310a0573625..3d990d3bd8ba 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -664,6 +664,19 @@ resume_kernel:
ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_NEED_RESCHED
bne 1b
+
+ /*
+ * arch_local_irq_restore() from preempt_schedule_irq above may
+ * enable hard interrupt but we really should disable interrupts
+ * when we return from the interrupt, and so that we don't get
+ * interrupted after loading SRR0/1.
+ */
+#ifdef CONFIG_PPC_BOOK3E
+ wrteei 0
+#else
+ ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+ mtmsrd r10,1 /* Update machine state */
+#endif /* CONFIG_PPC_BOOK3E */
#endif /* CONFIG_PREEMPT */
.globl fast_exc_return_irq
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index c470a40b29f5..a7bc7521c064 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -154,12 +154,12 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
static int kgdb_singlestep(struct pt_regs *regs)
{
struct thread_info *thread_info, *exception_thread_info;
- struct thread_info *backup_current_thread_info = \
- (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
+ struct thread_info *backup_current_thread_info;
if (user_mode(regs))
return 0;
+ backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
/*
* On Book E and perhaps other processors, singlestep is handled on
* the critical exception stack. This causes current_thread_info()
@@ -185,6 +185,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
+ kfree(backup_current_thread_info);
return 1;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6f6b1cccc916..127361e093f4 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -494,10 +494,15 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(DECREMENTER_MAX);
/* Some implementations of hotplug will get timer interrupts while
- * offline, just ignore these
+ * offline, just ignore these and we also need to set
+ * decrementers_next_tb as MAX to make sure __check_irq_replay
+ * don't replay timer interrupt when return, otherwise we'll trap
+ * here infinitely :(
*/
- if (!cpu_online(smp_processor_id()))
+ if (!cpu_online(smp_processor_id())) {
+ *next_tb = ~(u64)0;
return;
+ }
/* Conditionally hard-enable interrupts now that the DEC has been
* bumped to its maximum value
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 315f9495e9b2..f444b94935f5 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -52,7 +52,7 @@ static int power7_marked_instr_event(u64 mmcr1)
for (pmc = 0; pmc < 4; pmc++) {
psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
<< (OPROFILE_MAX_PMC_NUM - pmc)
- * OPROFILE_MAX_PMC_NUM);
+ * OPROFILE_PMSEL_FIELD_WIDTH);
psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
* OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 95d00173029f..890f30e70f98 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -236,6 +236,13 @@ out:
static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
+ /*
+ * We don't support CPU hotplug. Don't unmap after the system
+ * has already made it to a running state.
+ */
+ if (system_state != SYSTEM_BOOTING)
+ return 0;
+
if (sdcasr_mapbase)
iounmap(sdcasr_mapbase);
if (sdcpwr_mapbase)