diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 16:01:50 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 16:01:50 -0700 |
commit | ff8ce5f67ddca709fe59e6173f89260f0fdc2b22 (patch) | |
tree | 90d3ad380b290d251b54590be485b2ffb4528e5a /arch/arm/include/asm/mmu_context.h | |
parent | 4f6ade91532b5b05ea28219b891f12a3cec528cd (diff) | |
parent | 4ab1056766a4e49f6b9ef324313dd1583f8f8f4e (diff) |
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull core ARM updates from Russell King:
"This is the bulk of the core ARM updates for this merge window.
Included in here is a different way to handle the VIVT cache flushing
on context switch, which should allow scheduler folk to remove a
special case in their core code.
We have architectured timer support here, which is a set of timers
specified by the ARM architecture for future SoCs. So we should see
less variability in timer design going forward.
The last big thing here is my cleanup to the way we handle PCI across
ARM, fixing some oddities in some platforms which hadn't realised
there was a way to deal with their private data already built in to
our PCI backend.
I've also removed support for the ARMv3 architecture; it hasn't worked
properly for years so it seems pointless to keep it around."
* 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (47 commits)
ARM: PCI: remove per-pci_hw list of buses
ARM: PCI: dove/kirkwood/mv78xx0: use sys->private_data
ARM: PCI: provide a default bus scan implementation
ARM: PCI: get rid of pci_std_swizzle()
ARM: PCI: versatile: fix PCI interrupt setup
ARM: PCI: integrator: use common PCI swizzle
ARM: 7416/1: LPAE: Remove unused L_PTE_(BUFFERABLE|CACHEABLE) macros
ARM: 7415/1: vfp: convert printk's to pr_*'s
ARM: decompressor: avoid speculative prefetch from non-RAM areas
ARM: Remove ARMv3 support from decompressor
ARM: 7413/1: move read_{boot,persistent}_clock to the architecture level
ARM: Remove support for ARMv3 ARM610 and ARM710 CPUs
ARM: 7363/1: DEBUG_LL: limit early mapping to the minimum
ARM: 7391/1: versatile: add some auxdata for device trees
ARM: 7389/2: plat-versatile: modernize FPGA IRQ controller
AMBA: get rid of last two uses of NO_IRQ
ARM: 7408/1: cacheflush: return error to userspace when flushing syscall fails
ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
ARM: 7404/1: cmpxchg64: use atomic64 and local64 routines for cmpxchg64
ARM: 7347/1: SCU: use cpu_logical_map for per-CPU low power mode
...
Diffstat (limited to 'arch/arm/include/asm/mmu_context.h')
-rw-r--r-- | arch/arm/include/asm/mmu_context.h | 104 |
1 files changed, 79 insertions, 25 deletions
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a0b3cac0547c..0306bc642c0d 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -43,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; -#ifdef CONFIG_SMP -DECLARE_PER_CPU(struct mm_struct *, current_mm); -#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); +void cpu_set_reserved_ttbr0(void); -static inline void check_context(struct mm_struct *mm) +static inline void switch_new_context(struct mm_struct *mm) { - /* - * This code is executed with interrupts enabled. Therefore, - * mm->context.id cannot be updated to the latest ASID version - * on a different CPU (and condition below not triggered) - * without first getting an IPI to reset the context. The - * alternative is to take a read_lock on mm->context.id_lock - * (after changing its type to rwlock_t). - */ - if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) - __new_context(mm); + unsigned long flags; + + __new_context(mm); + + local_irq_save(flags); + cpu_switch_mm(mm->pgd, mm); + local_irq_restore(flags); +} +static inline void check_and_switch_context(struct mm_struct *mm, + struct task_struct *tsk) +{ if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); + + /* + * Required during context switch to avoid speculative page table + * walking with the wrong TTBR. + */ + cpu_set_reserved_ttbr0(); + + if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) + /* + * The ASID is from the current generation, just switch to the + * new pgd. This condition is only true for calls from + * context_switch() and interrupts are already disabled. + */ + cpu_switch_mm(mm->pgd, mm); + else if (irqs_disabled()) + /* + * Defer the new ASID allocation until after the context + * switch critical region since __new_context() cannot be + * called with interrupts disabled (it sends IPIs). + */ + set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + else + /* + * That is a direct call to switch_mm() or activate_mm() with + * interrupts enabled and a new context. + */ + switch_new_context(mm); } #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) -#else - -static inline void check_context(struct mm_struct *mm) +#define finish_arch_post_lock_switch \ + finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) { + if (test_and_clear_thread_flag(TIF_SWITCH_MM)) + switch_new_context(current->mm); +} + +#else /* !CONFIG_CPU_HAS_ASID */ + #ifdef CONFIG_MMU + +static inline void check_and_switch_context(struct mm_struct *mm, + struct task_struct *tsk) +{ if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) __check_kvm_seq(mm); -#endif + + if (irqs_disabled()) + /* + * cpu_switch_mm() needs to flush the VIVT caches. To avoid + * high interrupt latencies, defer the call and continue + * running with the old mm. Since we only support UP systems + * on non-ASID CPUs, the old mm will remain valid until the + * finish_arch_post_lock_switch() call. + */ + set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + else + cpu_switch_mm(mm->pgd, mm); } +#define finish_arch_post_lock_switch \ + finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { + struct mm_struct *mm = current->mm; + cpu_switch_mm(mm->pgd, mm); + } +} + +#endif /* CONFIG_MMU */ + #define init_new_context(tsk,mm) 0 -#endif +#endif /* CONFIG_CPU_HAS_ASID */ #define destroy_context(mm) do { } while(0) @@ -119,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { -#ifdef CONFIG_SMP - struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); - *crt_mm = next; -#endif - check_context(next); - cpu_switch_mm(next->pgd, next); + check_and_switch_context(next, tsk); if (cache_is_vivt()) cpumask_clear_cpu(cpu, mm_cpumask(prev)); } |