diff options
Diffstat (limited to 'arch/arm/mm/context.c')
| -rw-r--r-- | arch/arm/mm/context.c | 124 | 
1 files changed, 110 insertions, 14 deletions
| diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a9e22e31eaa1..b0ee9ba3cfab 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -10,12 +10,17 @@  #include <linux/init.h>  #include <linux/sched.h>  #include <linux/mm.h> +#include <linux/smp.h> +#include <linux/percpu.h>  #include <asm/mmu_context.h>  #include <asm/tlbflush.h>  static DEFINE_SPINLOCK(cpu_asid_lock);  unsigned int cpu_last_asid = ASID_FIRST_VERSION; +#ifdef CONFIG_SMP +DEFINE_PER_CPU(struct mm_struct *, current_mm); +#endif  /*   * We fork()ed a process, and we need a new context for the child @@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;  void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)  {  	mm->context.id = 0; +	spin_lock_init(&mm->context.id_lock);  } +static void flush_context(void) +{ +	/* set the reserved ASID before flushing the TLB */ +	asm("mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (0)); +	isb(); +	local_flush_tlb_all(); +	if (icache_is_vivt_asid_tagged()) { +		__flush_icache_all(); +		dsb(); +	} +} + +#ifdef CONFIG_SMP + +static void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ +	unsigned long flags; + +	/* +	 * Locking needed for multi-threaded applications where the +	 * same mm->context.id could be set from different CPUs during +	 * the broadcast. This function is also called via IPI so the +	 * mm->context.id_lock has to be IRQ-safe. +	 */ +	spin_lock_irqsave(&mm->context.id_lock, flags); +	if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { +		/* +		 * Old version of ASID found. Set the new one and +		 * reset mm_cpumask(mm). +		 */ +		mm->context.id = asid; +		cpumask_clear(mm_cpumask(mm)); +	} +	spin_unlock_irqrestore(&mm->context.id_lock, flags); + +	/* +	 * Set the mm_cpumask(mm) bit for the current CPU. +	 */ +	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); +} + +/* + * Reset the ASID on the current CPU. This function call is broadcast + * from the CPU handling the ASID rollover and holding cpu_asid_lock. + */ +static void reset_context(void *info) +{ +	unsigned int asid; +	unsigned int cpu = smp_processor_id(); +	struct mm_struct *mm = per_cpu(current_mm, cpu); + +	/* +	 * Check if a current_mm was set on this CPU as it might still +	 * be in the early booting stages and using the reserved ASID. +	 */ +	if (!mm) +		return; + +	smp_rmb(); +	asid = cpu_last_asid + cpu + 1; + +	flush_context(); +	set_mm_context(mm, asid); + +	/* set the new ASID */ +	asm("mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); +	isb(); +} + +#else + +static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ +	mm->context.id = asid; +	cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); +} + +#endif +  void __new_context(struct mm_struct *mm)  {  	unsigned int asid;  	spin_lock(&cpu_asid_lock); +#ifdef CONFIG_SMP +	/* +	 * Check the ASID again, in case the change was broadcast from +	 * another CPU before we acquired the lock. +	 */ +	if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { +		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); +		spin_unlock(&cpu_asid_lock); +		return; +	} +#endif +	/* +	 * At this point, it is guaranteed that the current mm (with +	 * an old ASID) isn't active on any other CPU since the ASIDs +	 * are changed simultaneously via IPI. +	 */  	asid = ++cpu_last_asid;  	if (asid == 0)  		asid = cpu_last_asid = ASID_FIRST_VERSION; @@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm)  	 * to start a new version and flush the TLB.  	 */  	if (unlikely((asid & ~ASID_MASK) == 0)) { -		asid = ++cpu_last_asid; -		/* set the reserved ASID before flushing the TLB */ -		asm("mcr	p15, 0, %0, c13, c0, 1	@ set reserved context ID\n" -		    : -		    : "r" (0)); -		isb(); -		flush_tlb_all(); -		if (icache_is_vivt_asid_tagged()) { -			__flush_icache_all(); -			dsb(); -		} +		asid = cpu_last_asid + smp_processor_id() + 1; +		flush_context(); +#ifdef CONFIG_SMP +		smp_wmb(); +		smp_call_function(reset_context, NULL, 1); +#endif +		cpu_last_asid += NR_CPUS;  	} -	spin_unlock(&cpu_asid_lock); -	cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); -	mm->context.id = asid; +	set_mm_context(mm, asid); +	spin_unlock(&cpu_asid_lock);  } | 
