summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/cache.S6
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/fault.c8
-rw-r--r--arch/arm64/mm/mmu.c67
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/arm64/mm/tlb.S71
7 files changed, 40 insertions, 118 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index b51d36401d83..3ecb56c624d3 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,5 +1,5 @@
obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
- context.o tlb.o proc.o
+ context.o proc.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index fda756875fa6..23663837acff 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -31,7 +31,7 @@
* Corrupted registers: x0-x7, x9-x11
*/
__flush_dcache_all:
- dsb sy // ensure ordering with previous memory accesses
+ dmb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
lsr x3, x3, #23 // left align loc bit field
@@ -128,7 +128,7 @@ USER(9f, dc cvau, x4 ) // clean D line to PoU
add x4, x4, x2
cmp x4, x1
b.lo 1b
- dsb sy
+ dsb ish
icache_line_size x2, x3
sub x3, x2, #1
@@ -139,7 +139,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
cmp x4, x1
b.lo 1b
9: // ignore any faulting cache operation
- dsb sy
+ dsb ish
isb
ret
ENDPROC(flush_icache_range)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index c851eb44dc50..4164c5ace9f8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -115,7 +115,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
for (i = 0; i < (size >> PAGE_SHIFT); i++)
map[i] = page + i;
coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
- __get_dma_pgprot(attrs, pgprot_default, false));
+ __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
kfree(map);
if (!coherent_ptr)
goto no_map;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c23751b06120..bcc965e2cce1 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -32,6 +32,7 @@
#include <asm/exception.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -123,6 +124,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
}
tsk->thread.fault_address = addr;
+ tsk->thread.fault_code = esr;
si.si_signo = sig;
si.si_errno = 0;
si.si_code = code;
@@ -148,8 +150,6 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
-#define ESR_WRITE (1 << 6)
-#define ESR_CM (1 << 8)
#define ESR_LNX_EXEC (1 << 24)
static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
@@ -218,7 +218,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (esr & ESR_LNX_EXEC) {
vm_flags = VM_EXEC;
- } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+ } else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}
@@ -525,7 +525,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
info.si_errno = 0;
info.si_code = inf->code;
info.si_addr = (void __user *)addr;
- arm64_notify_die("", regs, &info, esr);
+ arm64_notify_die("", regs, &info, 0);
return 0;
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 4a829a210bb6..c43f1dd19489 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -43,11 +43,6 @@
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-pgprot_t pgprot_default;
-EXPORT_SYMBOL(pgprot_default);
-
-static pmdval_t prot_sect_kernel;
-
struct cachepolicy {
const char policy[16];
u64 mair;
@@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p)
}
early_param("cachepolicy", early_cachepolicy);
-/*
- * Adjust the PMD section entries according to the CPU in use.
- */
-void __init init_mem_pgprot(void)
-{
- pteval_t default_pgprot;
- int i;
-
- default_pgprot = PTE_ATTRINDX(MT_NORMAL);
- prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
-
-#ifdef CONFIG_SMP
- /*
- * Mark memory with the "shared" attribute for SMP systems
- */
- default_pgprot |= PTE_SHARED;
- prot_sect_kernel |= PMD_SECT_S;
-#endif
-
- for (i = 0; i < 16; i++) {
- unsigned long v = pgprot_val(protection_map[i]);
- protection_map[i] = __pgprot(v | default_pgprot);
- }
-
- pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
-}
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -196,11 +164,10 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
pgprot_t prot_pte;
if (map_io) {
- prot_sect = PMD_TYPE_SECT | PMD_SECT_AF |
- PMD_ATTRINDX(MT_DEVICE_nGnRE);
+ prot_sect = PROT_SECT_DEVICE_nGnRE;
prot_pte = __pgprot(PROT_DEVICE_nGnRE);
} else {
- prot_sect = prot_sect_kernel;
+ prot_sect = PROT_SECT_NORMAL_EXEC;
prot_pte = PAGE_KERNEL_EXEC;
}
@@ -242,7 +209,30 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
do {
next = pud_addr_end(addr, end);
- alloc_init_pmd(pud, addr, next, phys, map_io);
+
+ /*
+ * For 4K granule only, attempt to put down a 1GB block
+ */
+ if (!map_io && (PAGE_SHIFT == 12) &&
+ ((addr | next | phys) & ~PUD_MASK) == 0) {
+ pud_t old_pud = *pud;
+ set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
+
+ /*
+ * If we have an old value for a pud, it will
+ * be pointing to a pmd table that we no longer
+ * need (from swapper_pg_dir).
+ *
+ * Look up the old pmd table and free it.
+ */
+ if (!pud_none(old_pud)) {
+ phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+ memblock_free(table, PAGE_SIZE);
+ flush_tlb_all();
+ }
+ } else {
+ alloc_init_pmd(pud, addr, next, phys, map_io);
+ }
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -399,6 +389,9 @@ int kern_addr_valid(unsigned long addr)
if (pud_none(*pud))
return 0;
+ if (pud_sect(*pud))
+ return pfn_valid(pud_pfn(*pud));
+
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return 0;
@@ -446,7 +439,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!p)
return -ENOMEM;
- set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
+ set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 9042aff5e9e3..7736779c9809 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -182,7 +182,7 @@ ENDPROC(cpu_do_switch_mm)
ENTRY(__cpu_setup)
ic iallu // I+BTB cache invalidate
tlbi vmalle1is // invalidate I + D TLBs
- dsb sy
+ dsb ish
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
deleted file mode 100644
index 19da91e0cd27..000000000000
--- a/arch/arm64/mm/tlb.S
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Based on arch/arm/mm/tlb.S
- *
- * Copyright (C) 1997-2002 Russell King
- * Copyright (C) 2012 ARM Ltd.
- * Written by Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/asm-offsets.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-#include "proc-macros.S"
-
-/*
- * __cpu_flush_user_tlb_range(start, end, vma)
- *
- * Invalidate a range of TLB entries in the specified address space.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- * - vma - vma_struct describing address range
- */
-ENTRY(__cpu_flush_user_tlb_range)
- vma_vm_mm x3, x2 // get vma->vm_mm
- mmid w3, x3 // get vm_mm->context.id
- dsb sy
- lsr x0, x0, #12 // align address
- lsr x1, x1, #12
- bfi x0, x3, #48, #16 // start VA and ASID
- bfi x1, x3, #48, #16 // end VA and ASID
-1: tlbi vae1is, x0 // TLB invalidate by address and ASID
- add x0, x0, #1
- cmp x0, x1
- b.lo 1b
- dsb sy
- ret
-ENDPROC(__cpu_flush_user_tlb_range)
-
-/*
- * __cpu_flush_kern_tlb_range(start,end)
- *
- * Invalidate a range of kernel TLB entries.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- */
-ENTRY(__cpu_flush_kern_tlb_range)
- dsb sy
- lsr x0, x0, #12 // align address
- lsr x1, x1, #12
-1: tlbi vaae1is, x0 // TLB invalidate by address
- add x0, x0, #1
- cmp x0, x1
- b.lo 1b
- dsb sy
- isb
- ret
-ENDPROC(__cpu_flush_kern_tlb_range)