diff options
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r-- | arch/arm/cpu/arm926ejs/cache.c | 6 | ||||
-rw-r--r-- | arch/arm/cpu/armv7/cache_v7.c | 6 | ||||
-rw-r--r-- | arch/arm/cpu/armv7/exception_level.c | 8 | ||||
-rw-r--r-- | arch/arm/cpu/armv7/lowlevel_init.S | 4 | ||||
-rw-r--r-- | arch/arm/cpu/armv7/start.S | 4 | ||||
-rw-r--r-- | arch/arm/cpu/armv7m/cache.c | 6 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/cache_v8.c | 119 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/exception_level.c | 8 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/fel_utils.S | 7 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/spl_data.c | 13 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/u-boot-spl.lds | 1 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/u-boot.lds | 59 | ||||
-rw-r--r-- | arch/arm/cpu/u-boot.lds | 29 |
13 files changed, 190 insertions, 80 deletions
diff --git a/arch/arm/cpu/arm926ejs/cache.c b/arch/arm/cpu/arm926ejs/cache.c index 5b87a3af91b..71b8ad0f71d 100644 --- a/arch/arm/cpu/arm926ejs/cache.c +++ b/arch/arm/cpu/arm926ejs/cache.c @@ -5,6 +5,7 @@ */ #include <cpu_func.h> #include <asm/cache.h> +#include <linux/errno.h> #include <linux/types.h> #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) @@ -88,3 +89,8 @@ void enable_caches(void) dcache_enable(); #endif } + +int __weak pgprot_set_attrs(phys_addr_t addr, size_t size, enum pgprot_attrs perm) +{ + return -ENOSYS; +} diff --git a/arch/arm/cpu/armv7/cache_v7.c b/arch/arm/cpu/armv7/cache_v7.c index d11420d2fdd..371dc92cd46 100644 --- a/arch/arm/cpu/armv7/cache_v7.c +++ b/arch/arm/cpu/armv7/cache_v7.c @@ -6,6 +6,7 @@ */ #include <cpu_func.h> #include <asm/cache.h> +#include <linux/errno.h> #include <linux/types.h> #include <asm/armv7.h> #include <asm/utils.h> @@ -209,3 +210,8 @@ __weak void v7_outer_cache_flush_all(void) {} __weak void v7_outer_cache_inval_all(void) {} __weak void v7_outer_cache_flush_range(u32 start, u32 end) {} __weak void v7_outer_cache_inval_range(u32 start, u32 end) {} + +int __weak pgprot_set_attrs(phys_addr_t addr, size_t size, enum pgprot_attrs perm) +{ + return -ENOSYS; +} diff --git a/arch/arm/cpu/armv7/exception_level.c b/arch/arm/cpu/armv7/exception_level.c index 7baade61b07..a55c158ce51 100644 --- a/arch/arm/cpu/armv7/exception_level.c +++ b/arch/arm/cpu/armv7/exception_level.c @@ -11,9 +11,9 @@ #include <bootm.h> #include <cpu_func.h> #include <log.h> +#include <setjmp.h> #include <asm/armv7.h> #include <asm/secure.h> -#include <asm/setjmp.h> /** * entry_non_secure() - entry point when switching to non-secure mode @@ -24,7 +24,7 @@ * * @non_secure_jmp: jump buffer for restoring stack and registers */ -static void entry_non_secure(struct jmp_buf_data *non_secure_jmp) +static void entry_non_secure(jmp_buf non_secure_jmp) { dcache_enable(); debug("Reached non-secure mode\n"); @@ -42,10 +42,10 @@ static void entry_non_secure(struct jmp_buf_data *non_secure_jmp) void switch_to_non_secure_mode(void) { static bool is_nonsec; - struct jmp_buf_data non_secure_jmp; + jmp_buf non_secure_jmp; if (armv7_boot_nonsec() && !is_nonsec) { - if (setjmp(&non_secure_jmp)) + if (setjmp(non_secure_jmp)) return; dcache_disable(); /* flush cache before switch to HYP */ armv7_init_nonsec(); diff --git a/arch/arm/cpu/armv7/lowlevel_init.S b/arch/arm/cpu/armv7/lowlevel_init.S index a6c844b7e3d..72b7b7d082c 100644 --- a/arch/arm/cpu/armv7/lowlevel_init.S +++ b/arch/arm/cpu/armv7/lowlevel_init.S @@ -26,8 +26,8 @@ WEAK(lowlevel_init) /* * Setup a temporary stack. Global data is not available yet. */ -#if defined(CONFIG_XPL_BUILD) && defined(CONFIG_SPL_STACK) - ldr sp, =CONFIG_SPL_STACK +#if CONFIG_IS_ENABLED(HAVE_INIT_STACK) + ldr sp, =CONFIG_VAL(STACK) #else ldr sp, =SYS_INIT_SP_ADDR #endif diff --git a/arch/arm/cpu/armv7/start.S b/arch/arm/cpu/armv7/start.S index b63481b43ca..959251957de 100644 --- a/arch/arm/cpu/armv7/start.S +++ b/arch/arm/cpu/armv7/start.S @@ -279,8 +279,8 @@ ENTRY(cpu_init_cp15) orr r2, r4, r2 @ r2 has combined CPU variant + revision /* Early stack for ERRATA that needs into call C code */ -#if defined(CONFIG_XPL_BUILD) && defined(CONFIG_SPL_STACK) - ldr r0, =(CONFIG_SPL_STACK) +#if CONFIG_IS_ENABLED(HAVE_INIT_STACK) + ldr r0, =CONFIG_VAL(STACK) #else ldr r0, =(SYS_INIT_SP_ADDR) #endif diff --git a/arch/arm/cpu/armv7m/cache.c b/arch/arm/cpu/armv7m/cache.c index b6d08b7aad7..8e7db734055 100644 --- a/arch/arm/cpu/armv7m/cache.c +++ b/arch/arm/cpu/armv7m/cache.c @@ -11,6 +11,7 @@ #include <asm/cache.h> #include <asm/io.h> #include <linux/bitops.h> +#include <linux/errno.h> /* Cache maintenance operation registers */ @@ -370,3 +371,8 @@ void enable_caches(void) dcache_enable(); #endif } + +int __weak pgprot_set_attrs(phys_addr_t addr, size_t size, enum pgprot_attrs perm) +{ + return -ENOSYS; +} diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index 5d6953ffedd..1c1e33bec24 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -14,6 +14,7 @@ #include <asm/global_data.h> #include <asm/system.h> #include <asm/armv8/mmu.h> +#include <linux/errno.h> DECLARE_GLOBAL_DATA_PTR; @@ -421,7 +422,7 @@ static int count_ranges(void) return count; } -#define ALL_ATTRS (3 << 8 | PMD_ATTRINDX_MASK) +#define ALL_ATTRS (3 << 8 | PMD_ATTRMASK) #define PTE_IS_TABLE(pte, level) (pte_type(&(pte)) == PTE_TYPE_TABLE && (level) < 3) enum walker_state { @@ -568,6 +569,24 @@ static void pretty_print_table_attrs(u64 pte) static void pretty_print_block_attrs(u64 pte) { u64 attrs = pte & PMD_ATTRINDX_MASK; + u64 perm_attrs = pte & PMD_ATTRMASK; + char mem_attrs[16] = { 0 }; + int cnt = 0; + + if (perm_attrs & PTE_BLOCK_PXN) + cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "PXN "); + if (perm_attrs & PTE_BLOCK_UXN) { + if (get_effective_el() == 1) + cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "UXN "); + else + cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "XN "); + } + if (perm_attrs & PTE_BLOCK_RO) + cnt += snprintf(mem_attrs + cnt, sizeof(mem_attrs) - cnt, "RO"); + if (!mem_attrs[0]) + snprintf(mem_attrs, sizeof(mem_attrs), "RWX "); + + printf(" | %-10s", mem_attrs); switch (attrs) { case PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE): @@ -613,6 +632,7 @@ static void print_pte(u64 pte, int level) { if (PTE_IS_TABLE(pte, level)) { printf(" %-5s", "Table"); + printf(" %-12s", "|"); pretty_print_table_attrs(pte); } else { pretty_print_pte_type(pte); @@ -642,9 +662,9 @@ static bool pagetable_print_entry(u64 start_attrs, u64 end, int va_bits, int lev printf("%*s", indent * 2, ""); if (PTE_IS_TABLE(start_attrs, level)) - printf("[%#011llx]%14s", _addr, ""); + printf("[%#016llx]%19s", _addr, ""); else - printf("[%#011llx - %#011llx]", _addr, end); + printf("[%#016llx - %#016llx]", _addr, end); printf("%*s | ", (3 - level) * 2, ""); print_pte(start_attrs, level); @@ -952,61 +972,109 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, flush_dcache_range(real_start, real_start + real_size); } -/* - * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits. - * The procecess is break-before-make. The target region will be marked as - * invalid during the process of changing. - */ -void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs) +void mmu_change_region_attr_nobreak(phys_addr_t addr, size_t siz, u64 attrs) { int level; u64 r, size, start; - start = addr; - size = siz; /* * Loop through the address range until we find a page granule that fits - * our alignment constraints, then set it to "invalid". + * our alignment constraints and set the new permissions */ + start = addr; + size = siz; while (size > 0) { for (level = 1; level < 4; level++) { - /* Set PTE to fault */ - r = set_one_region(start, size, PTE_TYPE_FAULT, true, - level); + /* Set PTE to new attributes */ + r = set_one_region(start, size, attrs, true, level); if (r) { - /* PTE successfully invalidated */ + /* PTE successfully updated */ size -= r; start += r; break; } } } - flush_dcache_range(gd->arch.tlb_addr, gd->arch.tlb_addr + gd->arch.tlb_size); __asm_invalidate_tlb_all(); +} + +/* + * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits. + * The procecess is break-before-make. The target region will be marked as + * invalid during the process of changing. + */ +void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs) +{ + int level; + u64 r, size, start; + start = addr; + size = siz; /* * Loop through the address range until we find a page granule that fits - * our alignment constraints, then set it to the new cache attributes + * our alignment constraints, then set it to "invalid". */ - start = addr; - size = siz; while (size > 0) { for (level = 1; level < 4; level++) { - /* Set PTE to new attributes */ - r = set_one_region(start, size, attrs, true, level); + /* Set PTE to fault */ + r = set_one_region(start, size, PTE_TYPE_FAULT, true, + level); if (r) { - /* PTE successfully updated */ + /* PTE successfully invalidated */ size -= r; start += r; break; } } } + flush_dcache_range(gd->arch.tlb_addr, gd->arch.tlb_addr + gd->arch.tlb_size); __asm_invalidate_tlb_all(); + + mmu_change_region_attr_nobreak(addr, siz, attrs); +} + +int pgprot_set_attrs(phys_addr_t addr, size_t size, enum pgprot_attrs perm) +{ + u64 attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_INNER_SHARE | PTE_TYPE_VALID; + + switch (perm) { + case MMU_ATTR_RO: + /* + * get_effective_el() will return 1 if + * - Running in EL1 so we assume an EL1 translation regime + * with HCR_EL2.{NV, NV1} != {1,1} + * - Running in EL2 with HCR_EL2.E2H = 1 so we assume an + * EL2&0 translation regime. Since we don't have accesses + * from EL0 we don't have to check HCR_EL2.TGE + * + * Both of these requires PXN to be set + */ + if (get_effective_el() == 1) + attrs |= PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_RO; + else + attrs |= PTE_BLOCK_UXN | PTE_BLOCK_RO; + break; + case MMU_ATTR_RX: + attrs |= PTE_BLOCK_RO; + break; + case MMU_ATTR_RW: + if (get_effective_el() == 1) + attrs |= PTE_BLOCK_PXN | PTE_BLOCK_UXN; + else + attrs |= PTE_BLOCK_UXN; + break; + default: + log_err("Unknown attribute %d\n", perm); + return -EINVAL; + } + + mmu_change_region_attr_nobreak(addr, size, attrs); + + return 0; } #else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */ @@ -1112,3 +1180,8 @@ void __weak enable_caches(void) icache_enable(); dcache_enable(); } + +void arch_dump_mem_attrs(void) +{ + dump_pagetable(gd->arch.tlb_addr, get_tcr(NULL, NULL)); +} diff --git a/arch/arm/cpu/armv8/exception_level.c b/arch/arm/cpu/armv8/exception_level.c index 85c78f55789..746737861e7 100644 --- a/arch/arm/cpu/armv8/exception_level.c +++ b/arch/arm/cpu/armv8/exception_level.c @@ -11,8 +11,8 @@ #include <bootm.h> #include <cpu_func.h> #include <log.h> +#include <setjmp.h> #include <asm/cache.h> -#include <asm/setjmp.h> /** * entry_non_secure() - entry point when switching to non-secure mode @@ -23,7 +23,7 @@ * * @non_secure_jmp: jump buffer for restoring stack and registers */ -static void entry_non_secure(struct jmp_buf_data *non_secure_jmp) +static void entry_non_secure(jmp_buf non_secure_jmp) { dcache_enable(); debug("Reached non-secure mode\n"); @@ -42,11 +42,11 @@ static void entry_non_secure(struct jmp_buf_data *non_secure_jmp) */ void switch_to_non_secure_mode(void) { - struct jmp_buf_data non_secure_jmp; + jmp_buf non_secure_jmp; /* On AArch64 we need to make sure we call our payload in < EL3 */ if (current_el() == 3) { - if (setjmp(&non_secure_jmp)) + if (setjmp(non_secure_jmp)) return; dcache_disable(); /* flush cache before switch to EL2 */ diff --git a/arch/arm/cpu/armv8/fel_utils.S b/arch/arm/cpu/armv8/fel_utils.S index 939869b9ffa..f7707acdf1a 100644 --- a/arch/arm/cpu/armv8/fel_utils.S +++ b/arch/arm/cpu/armv8/fel_utils.S @@ -63,9 +63,12 @@ ENTRY(return_to_fel) 1: wfi b 1b +fel_stash_addr: // must immediately precede back_in_32: + .word 0x00000000 // receives fel_stash addr, by AA64 code above + /* AArch32 code to restore the state from fel_stash and return back to FEL. */ back_in_32: - .word 0xe59f0028 // ldr r0, [pc, #40] ; load fel_stash address + .word 0xe51f000c // ldr r0, [pc, #-12] ; load fel_stash address .word 0xe5901008 // ldr r1, [r0, #8] .word 0xe129f001 // msr CPSR_fc, r1 .word 0xf57ff06f // isb @@ -77,6 +80,4 @@ back_in_32: .word 0xee011f10 // mcr 15, 0, r1, cr1, cr0, {0} ; SCTLR .word 0xf57ff06f // isb .word 0xe12fff1e // bx lr ; return to FEL -fel_stash_addr: - .word 0x00000000 // receives fel_stash addr, by AA64 code above ENDPROC(return_to_fel) diff --git a/arch/arm/cpu/armv8/spl_data.c b/arch/arm/cpu/armv8/spl_data.c index 259b49ff364..492353c93df 100644 --- a/arch/arm/cpu/armv8/spl_data.c +++ b/arch/arm/cpu/armv8/spl_data.c @@ -5,23 +5,28 @@ #include <spl.h> +char __data_start[0] __section(".__data_start"); char __data_save_start[0] __section(".__data_save_start"); char __data_save_end[0] __section(".__data_save_end"); u32 cold_reboot_flag = 1; +u32 __weak reset_flag(void) +{ + return 1; +} + void spl_save_restore_data(void) { u32 data_size = __data_save_end - __data_save_start; + cold_reboot_flag = reset_flag(); if (cold_reboot_flag == 1) { /* Save data section to data_save section */ - memcpy(__data_save_start, __data_save_start - data_size, - data_size); + memcpy(__data_save_start, __data_start, data_size); } else { /* Restore the data_save section to data section */ - memcpy(__data_save_start - data_size, __data_save_start, - data_size); + memcpy(__data_start, __data_save_start, data_size); } cold_reboot_flag++; diff --git a/arch/arm/cpu/armv8/u-boot-spl.lds b/arch/arm/cpu/armv8/u-boot-spl.lds index fed69644b55..c4f83ec9cfc 100644 --- a/arch/arm/cpu/armv8/u-boot-spl.lds +++ b/arch/arm/cpu/armv8/u-boot-spl.lds @@ -37,6 +37,7 @@ SECTIONS .data : { . = ALIGN(8); + *(.__data_start) *(.data*) } >.sram diff --git a/arch/arm/cpu/armv8/u-boot.lds b/arch/arm/cpu/armv8/u-boot.lds index 857f44412e0..f4ce98c82c8 100644 --- a/arch/arm/cpu/armv8/u-boot.lds +++ b/arch/arm/cpu/armv8/u-boot.lds @@ -36,9 +36,18 @@ SECTIONS __efi_runtime_stop = .; } +#ifdef CONFIG_MMU_PGPROT + .text_rest ALIGN(CONSTANT(COMMONPAGESIZE)) : +#else .text_rest : +#endif { + __text_start = .; *(.text*) +#ifdef CONFIG_MMU_PGPROT + . = ALIGN(CONSTANT(COMMONPAGESIZE)); +#endif + __text_end = .; } #ifdef CONFIG_ARMV8_PSCI @@ -97,35 +106,43 @@ SECTIONS LONG(0x1d1071c); /* Must output something to reset LMA */ } #endif - - . = ALIGN(8); - .rodata : { *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) } - - . = ALIGN(8); - .data : { - *(.data*) + .efi_runtime_rel : { + __efi_runtime_rel_start = .; + *(.rel*.efi_runtime) + *(.rel*.efi_runtime.*) + __efi_runtime_rel_stop = .; } - . = ALIGN(8); - - . = .; +#ifdef CONFIG_MMU_PGPROT + .rodata ALIGN(CONSTANT(COMMONPAGESIZE)): { +#else + .rodata ALIGN(8) : { +#endif + __start_rodata = .; + *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.rodata*))) + } - . = ALIGN(8); - __u_boot_list : { + __u_boot_list ALIGN(8) : { KEEP(*(SORT(__u_boot_list*))); +#ifdef CONFIG_MMU_PGPROT + . = ALIGN(CONSTANT(COMMONPAGESIZE)); +#endif + __end_rodata = .; } - .efi_runtime_rel : { - __efi_runtime_rel_start = .; - *(.rel*.efi_runtime) - *(.rel*.efi_runtime.*) - __efi_runtime_rel_stop = .; +#ifdef CONFIG_MMU_PGPROT + .data ALIGN(CONSTANT(COMMONPAGESIZE)) : { +#else + .data ALIGN(8) : { +#endif + __start_data = .; + *(.data*) } . = ALIGN(8); __image_copy_end = .; - .rela.dyn : { + .rela.dyn ALIGN(8) : { __rel_dyn_start = .; *(.rela*) __rel_dyn_end = .; @@ -136,11 +153,15 @@ SECTIONS /* * arch/arm/lib/crt0_64.S assumes __bss_start - __bss_end % 8 == 0 */ - .bss ALIGN(8) : { + .bss ADDR(.rela.dyn) (OVERLAY) : { __bss_start = .; *(.bss*) . = ALIGN(8); __bss_end = .; +#ifdef CONFIG_MMU_PGPROT + . = ALIGN(CONSTANT(COMMONPAGESIZE)); +#endif + __end_data = .; } /DISCARD/ : { *(.dynsym) } diff --git a/arch/arm/cpu/u-boot.lds b/arch/arm/cpu/u-boot.lds index 2f50087f57a..817e7a983ae 100644 --- a/arch/arm/cpu/u-boot.lds +++ b/arch/arm/cpu/u-boot.lds @@ -169,15 +169,6 @@ SECTIONS _end = .; _image_binary_end = .; - /* - * Deprecated: this MMU section is used by pxa at present but - * should not be used by new boards/CPUs. - */ - . = ALIGN(4096); - .mmutable : { - *(.mmutable) - } - /* * These sections occupy the same memory, but their lifetimes do * not overlap: U-Boot initializes .bss only after applying dynamic @@ -190,14 +181,14 @@ SECTIONS __bss_end = .; } - .dynsym _image_binary_end : { *(.dynsym) } - .dynbss : { *(.dynbss) } - .dynstr : { *(.dynstr*) } - .dynamic : { *(.dynamic*) } - .plt : { *(.plt*) } - .interp : { *(.interp*) } - .gnu.hash : { *(.gnu.hash) } - .gnu : { *(.gnu*) } - .ARM.exidx : { *(.ARM.exidx*) } - .gnu.linkonce.armexidx : { *(.gnu.linkonce.armexidx.*) } + /DISCARD/ : { *(.dynsym) } + /DISCARD/ : { *(.dynbss) } + /DISCARD/ : { *(.dynstr*) } + /DISCARD/ : { *(.dynamic*) } + /DISCARD/ : { *(.plt*) } + /DISCARD/ : { *(.interp*) } + /DISCARD/ : { *(.gnu.hash) } + /DISCARD/ : { *(.gnu*) } + /DISCARD/ : { *(.ARM.exidx*) } + /DISCARD/ : { *(.gnu.linkonce.armexidx.*) } } |