diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Makefile | 3 | ||||
-rw-r--r-- | kernel/irq/manage.c | 23 | ||||
-rw-r--r-- | kernel/irq/migration.c | 65 |
3 files changed, 84 insertions, 7 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 49378738ff5e..2b33f852be3e 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile @@ -1,5 +1,4 @@ -obj-y := handle.o manage.o spurious.o +obj-y := handle.o manage.o spurious.o migration.o obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_PROC_FS) += proc.o - diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 97d5559997d2..6edfcef291e8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -204,10 +204,14 @@ int setup_irq(unsigned int irq, struct irqaction * new) p = &desc->action; if ((old = *p) != NULL) { /* Can't share interrupts unless both agree to */ - if (!(old->flags & new->flags & SA_SHIRQ)) { - spin_unlock_irqrestore(&desc->lock,flags); - return -EBUSY; - } + if (!(old->flags & new->flags & SA_SHIRQ)) + goto mismatch; + +#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) + /* All handlers must agree on per-cpuness */ + if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU)) + goto mismatch; +#endif /* add new interrupt at end of irq queue */ do { @@ -218,7 +222,10 @@ int setup_irq(unsigned int irq, struct irqaction * new) } *p = new; - +#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) + if (new->flags & SA_PERCPU_IRQ) + desc->status |= IRQ_PER_CPU; +#endif if (!shared) { desc->depth = 0; desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | @@ -236,6 +243,12 @@ int setup_irq(unsigned int irq, struct irqaction * new) register_handler_proc(irq, new); return 0; + +mismatch: + spin_unlock_irqrestore(&desc->lock, flags); + printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__); + dump_stack(); + return -EBUSY; } /** diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c new file mode 100644 index 000000000000..52a8655fa080 --- /dev/null +++ b/kernel/irq/migration.c @@ -0,0 +1,65 @@ +#include <linux/irq.h> + +#if defined(CONFIG_GENERIC_PENDING_IRQ) + +void set_pending_irq(unsigned int irq, cpumask_t mask) +{ + irq_desc_t *desc = irq_desc + irq; + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + desc->move_irq = 1; + pending_irq_cpumask[irq] = mask; + spin_unlock_irqrestore(&desc->lock, flags); +} + +void move_native_irq(int irq) +{ + cpumask_t tmp; + irq_desc_t *desc = irq_descp(irq); + + if (likely(!desc->move_irq)) + return; + + /* + * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. + */ + if (CHECK_IRQ_PER_CPU(desc->status)) { + WARN_ON(1); + return; + } + + desc->move_irq = 0; + + if (likely(cpus_empty(pending_irq_cpumask[irq]))) + return; + + if (!desc->handler->set_affinity) + return; + + assert_spin_locked(&desc->lock); + + cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); + + /* + * If there was a valid mask to work with, please + * do the disable, re-program, enable sequence. + * This is *not* particularly important for level triggered + * but in a edge trigger case, we might be setting rte + * when an active trigger is comming in. This could + * cause some ioapics to mal-function. + * Being paranoid i guess! + */ + if (unlikely(!cpus_empty(tmp))) { + if (likely(!(desc->status & IRQ_DISABLED))) + desc->handler->disable(irq); + + desc->handler->set_affinity(irq,tmp); + + if (likely(!(desc->status & IRQ_DISABLED))) + desc->handler->enable(irq); + } + cpus_clear(pending_irq_cpumask[irq]); +} + +#endif |