summaryrefslogtreecommitdiff
path: root/arch/ia64/mm/tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm/tlb.c')
-rw-r--r--arch/ia64/mm/tlb.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 987fb754d6ad..c93e0f2b5fea 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -87,10 +87,15 @@ wrap_mmu_context (struct mm_struct *mm)
}
void
-ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
+ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits)
{
static DEFINE_SPINLOCK(ptcg_lock);
+ if (mm != current->active_mm) {
+ flush_tlb_all();
+ return;
+ }
+
/* HW requires global serialization of ptc.ga. */
spin_lock(&ptcg_lock);
{
@@ -136,15 +141,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
unsigned long size = end - start;
unsigned long nbits;
+#ifndef CONFIG_SMP
if (mm != current->active_mm) {
- /* this does happen, but perhaps it's not worth optimizing for? */
-#ifdef CONFIG_SMP
- flush_tlb_all();
-#else
mm->context = 0;
-#endif
return;
}
+#endif
nbits = ia64_fls(size + 0xfff);
while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
@@ -154,7 +156,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
start &= ~((1UL << nbits) - 1);
# ifdef CONFIG_SMP
- platform_global_tlb_purge(start, end, nbits);
+ platform_global_tlb_purge(mm, start, end, nbits);
# else
do {
ia64_ptcl(start, (nbits<<2));