diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-11 16:32:21 +0900 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-11 16:32:21 +0900 |
commit | 05ad391dbc2bd27b8d868cf9c3ec1b68a2126a16 (patch) | |
tree | abfc59028a00a15428ab8b14abe8a9e99f69a9a3 /arch/arm64/kernel/irq.c | |
parent | 8b5baa460b69c27389353eeff0dbe51dc695da60 (diff) | |
parent | 67317c2689567c24d18e0dd43ab6d409fd42dc6e (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64
Pull ARM64 update from Catalin Marinas:
"Main features:
- Ticket-based spinlock implementation and lockless lockref support
- Big endian support
- CPU hotplug support, currently for PSCI (Power State Coordination
Interface) capable firmware
- Virtual address space extended to 42-bit in the 64K page
configuration (maximum VA space with 2 levels of page tables)
- Compat (AArch32) kuser helpers updated to ARMv8 (make use of
load-acquire/store-release instructions)
- Code cleanup, defconfig update and minor fixes"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64: (43 commits)
ARM64: /proc/interrupts: display IPIs of online CPUs only
arm64: locks: Remove CONFIG_GENERIC_LOCKBREAK
arm64: KVM: vgic: byteswap GICv2 access on world switch if BE
arm64: KVM: initialize HYP mode following the kernel endianness
arm64: compat: Clear the IT state independent of the 32-bit ARM or Thumb-2 mode
arm64: Use 42-bit address space with 64K pages
arm64: module: ensure instruction is little-endian before manipulation
arm64: defconfig: Enable CONFIG_PREEMPT by default
arm64: fix access to preempt_count from assembly code
arm64: move enabling of GIC before CPUs are set online
arm64: use generic RW_DATA_SECTION macro in linker script
arm64: Slightly improve the warning on CPU0 enable-method
ARM64: simplify cpu_read_bootcpu_ops using OF/DT helper
ARM64: DT: define ARM64 specific arch_match_cpu_phys_id
arm64: allow ioremap_cache() to use existing RAM mappings
arm64: update 32-bit kuser helpers to ARMv8
arm64: perf: fix event number mask
arm64: kconfig: allow CPU_BIG_ENDIAN to be selected
arm64: Fix the endianness of arch_spinlock_t
arm64: big-endian: write CPU holding pen address as LE
...
Diffstat (limited to 'arch/arm64/kernel/irq.c')
-rw-r--r-- | arch/arm64/kernel/irq.c | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index ecb3354292ed..473e5dbf8f39 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -81,3 +81,64 @@ void __init init_IRQ(void) if (!handle_arch_irq) panic("No interrupt controller found."); } + +#ifdef CONFIG_HOTPLUG_CPU +static bool migrate_one_irq(struct irq_desc *desc) +{ + struct irq_data *d = irq_desc_get_irq_data(desc); + const struct cpumask *affinity = d->affinity; + struct irq_chip *c; + bool ret = false; + + /* + * If this is a per-CPU interrupt, or the affinity does not + * include this CPU, then we have nothing to do. + */ + if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) + return false; + + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { + affinity = cpu_online_mask; + ret = true; + } + + c = irq_data_get_irq_chip(d); + if (!c->irq_set_affinity) + pr_debug("IRQ%u: unable to set affinity\n", d->irq); + else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + cpumask_copy(d->affinity, affinity); + + return ret; +} + +/* + * The current CPU has been marked offline. Migrate IRQs off this CPU. + * If the affinity settings do not allow other CPUs, force them onto any + * available CPU. + * + * Note: we must iterate over all IRQs, whether they have an attached + * action structure or not, as we need to get chained interrupts too. + */ +void migrate_irqs(void) +{ + unsigned int i; + struct irq_desc *desc; + unsigned long flags; + + local_irq_save(flags); + + for_each_irq_desc(i, desc) { + bool affinity_broken; + + raw_spin_lock(&desc->lock); + affinity_broken = migrate_one_irq(desc); + raw_spin_unlock(&desc->lock); + + if (affinity_broken) + pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", + i, smp_processor_id()); + } + + local_irq_restore(flags); +} +#endif /* CONFIG_HOTPLUG_CPU */ |