diff options
author | Arnd Bergmann <arnd@arndb.de> | 2008-02-29 15:16:48 +1100 |
---|---|---|
committer | Jeremy Kerr <jk@ozlabs.org> | 2008-02-29 15:16:48 +1100 |
commit | fae9ca791507876c3ccaa8ab686b2ce42dc7a560 (patch) | |
tree | a64e844ad1074a52be048ac5d82c5e53047f0abd /arch | |
parent | 71791bee90dd29b292c7e55c1c00857578c912bd (diff) |
[POWERPC] spufs: synchronize IRQ when disabling
There is a small race between the context save procedure
and the SPU interrupt handling, where we expect all interrupt
processing to have finished after disabling them, while
an interrupt is still being processed on another CPU.
The obvious fix is to call synchronize_irq() after disabling
the interrupts at the start of the context save procedure
to make sure we never access the SPU any more during an
ongoing save or even after that.
Thanks to Benjamin Herrenschmidt for pointing this out.
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 6f5886c7b1f9..e9dc7a55d1b9 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c @@ -34,6 +34,7 @@ #include <linux/module.h> #include <linux/errno.h> +#include <linux/hardirq.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) * Write INT_MASK_class1 with value of 0. * Save INT_Mask_class2 in CSA. * Write INT_MASK_class2 with value of 0. + * Synchronize all three interrupts to be sure + * we no longer execute a handler on another CPU. */ spin_lock_irq(&spu->register_lock); if (csa) { @@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) spu_int_mask_set(spu, 2, 0ul); eieio(); spin_unlock_irq(&spu->register_lock); + synchronize_irq(spu->irqs[0]); + synchronize_irq(spu->irqs[1]); + synchronize_irq(spu->irqs[2]); } static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) |