summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-28 14:48:30 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-11-04 13:51:47 +0100
commit106078641f32a6a10d9759f809f809725695cb09 (patch)
treea65201fd31994348a067c63e820530ad15387476 /arch/s390/mm
parentbe39f1968e33ca641af120a2d659421ad2225dea (diff)
s390/mm,tlb: correct tlb flush on page table upgrade
The IDTE instruction used to flush TLB entries for a specific address space uses the address-space-control element (ASCE) to identify affected TLB entries. The upgrade of a page table adds a new top level page table which changes the ASCE. The TLB entries associated with the old ASCE need to be flushed and the ASCE for the address space needs to be replaced synchronously on all CPUs which currently use it. The concept of a lazy ASCE update with an exception handler is broken. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/fault.c37
-rw-r--r--arch/s390/mm/mmap.c12
-rw-r--r--arch/s390/mm/pgtable.c18
3 files changed, 20 insertions, 47 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 8f29762671cf..d95265b2719f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -423,43 +423,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
do_fault_error(regs, fault);
}
-#ifdef CONFIG_64BIT
-void __kprobes do_asce_exception(struct pt_regs *regs)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long trans_exc_code;
-
- /*
- * The instruction that caused the program check has
- * been nullified. Don't signal single step via SIGTRAP.
- */
- clear_tsk_thread_flag(current, TIF_PER_TRAP);
-
- trans_exc_code = regs->int_parm_long;
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
- goto no_context;
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
- up_read(&mm->mmap_sem);
-
- if (vma) {
- update_mm(mm, current);
- return;
- }
-
- /* User mode accesses just cause a SIGSEGV */
- if (user_mode(regs)) {
- do_sigsegv(regs, SEGV_MAPERR);
- return;
- }
-
-no_context:
- do_no_context(regs);
-}
-#endif
-
int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{
struct pt_regs regs;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 40023290ee5b..6bcb045d2bd2 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,18 +101,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
{
- int rc;
-
if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
return 0;
if (!(flags & MAP_FIXED))
addr = 0;
- if ((addr + len) >= TASK_SIZE) {
- rc = crst_table_upgrade(current->mm, 1UL << 53);
- if (rc)
- return rc;
- update_mm(current->mm, current);
- }
+ if ((addr + len) >= TASK_SIZE)
+ return crst_table_upgrade(current->mm, 1UL << 53);
return 0;
}
@@ -132,7 +126,6 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
- update_mm(mm, current);
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
}
return area;
@@ -155,7 +148,6 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
- update_mm(mm, current);
area = arch_get_unmapped_area_topdown(filp, addr, len,
pgoff, flags);
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index a9be08899b0c..0a2e5e086749 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -48,12 +48,23 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
}
#ifdef CONFIG_64BIT
+static void __crst_table_upgrade(void *arg)
+{
+ struct mm_struct *mm = arg;
+
+ if (current->active_mm == mm)
+ update_mm(mm, current);
+ __tlb_flush_local();
+}
+
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
{
unsigned long *table, *pgd;
unsigned long entry;
+ int flush;
BUG_ON(limit > (1UL << 53));
+ flush = 0;
repeat:
table = crst_table_alloc(mm);
if (!table)
@@ -79,12 +90,15 @@ repeat:
mm->pgd = (pgd_t *) table;
mm->task_size = mm->context.asce_limit;
table = NULL;
+ flush = 1;
}
spin_unlock_bh(&mm->page_table_lock);
if (table)
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
goto repeat;
+ if (flush)
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
@@ -92,6 +106,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
+ if (current->active_mm == mm)
+ __tlb_flush_mm(mm);
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -114,6 +130,8 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
+ if (current->active_mm == mm)
+ update_mm(mm, current);
}
#endif