diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2007-05-08 22:27:46 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2007-05-08 22:55:53 +0100 |
commit | bbe888864ec32435e93923c40b9d6ce2bb73844b (patch) | |
tree | db607482fe5898da9a599368d0bbe579a56b272c /arch/arm/mm | |
parent | 5b94f675f57e4ff16c8fda09088d7480a84dcd91 (diff) |
[ARM] armv7: add support for ARMv7 cores.
This patch adds support for the ARMv7 cores.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/abort-ev7.S | 32 | ||||
-rw-r--r-- | arch/arm/mm/cache-v7.S | 253 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 12 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 262 |
4 files changed, 559 insertions, 0 deletions
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S new file mode 100644 index 000000000000..eb90bce38e14 --- /dev/null +++ b/arch/arm/mm/abort-ev7.S @@ -0,0 +1,32 @@ +#include <linux/linkage.h> +#include <asm/assembler.h> +/* + * Function: v7_early_abort + * + * Params : r2 = address of aborted instruction + * : r3 = saved SPSR + * + * Returns : r0 = address of abort + * : r1 = FSR, bit 11 = write + * : r2-r8 = corrupted + * : r9 = preserved + * : sp = pointer to registers + * + * Purpose : obtain information about current aborted instruction. + */ + .align 5 +ENTRY(v7_early_abort) + /* + * The effect of data aborts on on the exclusive access monitor are + * UNPREDICTABLE. Do a CLREX to clear the state + */ + clrex + + mrc p15, 0, r1, c5, c0, 0 @ get FSR + mrc p15, 0, r0, c6, c0, 0 @ get FAR + + /* + * V6 code adjusts the returned DFSR. + * New designs should not need to patch up faults. + */ + mov pc, lr diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S new file mode 100644 index 000000000000..35ffc4d95997 --- /dev/null +++ b/arch/arm/mm/cache-v7.S @@ -0,0 +1,253 @@ +/* + * linux/arch/arm/mm/cache-v7.S + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * Copyright (C) 2005 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This is the "shell" of the ARMv7 processor support. + */ +#include <linux/linkage.h> +#include <linux/init.h> +#include <asm/assembler.h> + +#include "proc-macros.S" + +/* + * v7_flush_dcache_all() + * + * Flush the whole D-cache. + * + * Corrupted registers: r0-r5, r7, r9-r11 + * + * - mm - mm_struct describing address space + */ +ENTRY(v7_flush_dcache_all) + mrc p15, 1, r0, c0, c0, 1 @ read clidr + ands r3, r0, #0x7000000 @ extract loc from clidr + mov r3, r3, lsr #23 @ left align loc bit field + beq finished @ if loc is 0, then no need to clean + mov r10, #0 @ start clean at cache level 0 +loop1: + add r2, r10, r10, lsr #1 @ work out 3x current cache level + mov r1, r0, lsr r2 @ extract cache type bits from clidr + and r1, r1, #7 @ mask of the bits for current cache only + cmp r1, #2 @ see what cache we have at this level + blt skip @ skip if no cache, or just i-cache + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + isb @ isb to sych the new cssr&csidr + mrc p15, 1, r1, c0, c0, 0 @ read the new csidr + and r2, r1, #7 @ extract the length of the cache lines + add r2, r2, #4 @ add 4 (line length offset) + ldr r4, =0x3ff + ands r4, r4, r1, lsr #3 @ find maximum number on the way size + clz r5, r4 @ find bit position of way size increment + ldr r7, =0x7fff + ands r7, r7, r1, lsr #13 @ extract max number of the index size +loop2: + mov r9, r4 @ create working copy of max way size +loop3: + orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 + orr r11, r11, r7, lsl r2 @ factor index number into r11 + mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way + subs r9, r9, #1 @ decrement the way + bge loop3 + subs r7, r7, #1 @ decrement the index + bge loop2 +skip: + add r10, r10, #2 @ increment cache number + cmp r3, r10 + bgt loop1 +finished: + mov r10, #0 @ swith back to cache level 0 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + isb + mov pc, lr + +/* + * v7_flush_cache_all() + * + * Flush the entire cache system. + * The data cache flush is now achieved using atomic clean / invalidates + * working outwards from L1 cache. This is done using Set/Way based cache + * maintainance instructions. + * The instruction cache can still be invalidated back to the point of + * unification in a single instruction. + * + */ +ENTRY(v7_flush_kern_cache_all) + stmfd sp!, {r4-r5, r7, r9-r11, lr} + bl v7_flush_dcache_all + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate + ldmfd sp!, {r4-r5, r7, r9-r11, lr} + mov pc, lr + +/* + * v7_flush_cache_all() + * + * Flush all TLB entries in a particular address space + * + * - mm - mm_struct describing address space + */ +ENTRY(v7_flush_user_cache_all) + /*FALLTHROUGH*/ + +/* + * v7_flush_cache_range(start, end, flags) + * + * Flush a range of TLB entries in the specified address space. + * + * - start - start address (may not be aligned) + * - end - end address (exclusive, may not be aligned) + * - flags - vm_area_struct flags describing address space + * + * It is assumed that: + * - we have a VIPT cache. + */ +ENTRY(v7_flush_user_cache_range) + mov pc, lr + +/* + * v7_coherent_kern_range(start,end) + * + * Ensure that the I and D caches are coherent within specified + * region. This is typically used when code has been written to + * a memory region, and will be executed. + * + * - start - virtual start address of region + * - end - virtual end address of region + * + * It is assumed that: + * - the Icache does not read data from the write buffer + */ +ENTRY(v7_coherent_kern_range) + /* FALLTHROUGH */ + +/* + * v7_coherent_user_range(start,end) + * + * Ensure that the I and D caches are coherent within specified + * region. This is typically used when code has been written to + * a memory region, and will be executed. + * + * - start - virtual start address of region + * - end - virtual end address of region + * + * It is assumed that: + * - the Icache does not read data from the write buffer + */ +ENTRY(v7_coherent_user_range) + dcache_line_size r2, r3 + sub r3, r2, #1 + bic r0, r0, r3 +1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification + dsb + mcr p15, 0, r0, c7, c5, 1 @ invalidate I line + add r0, r0, r2 + cmp r0, r1 + blo 1b + mov r0, #0 + mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB + dsb + isb + mov pc, lr + +/* + * v7_flush_kern_dcache_page(kaddr) + * + * Ensure that the data held in the page kaddr is written back + * to the page in question. + * + * - kaddr - kernel address (guaranteed to be page aligned) + */ +ENTRY(v7_flush_kern_dcache_page) + dcache_line_size r2, r3 + add r1, r0, #PAGE_SZ +1: + mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line + add r0, r0, r2 + cmp r0, r1 + blo 1b + dsb + mov pc, lr + +/* + * v7_dma_inv_range(start,end) + * + * Invalidate the data cache within the specified region; we will + * be performing a DMA operation in this region and we want to + * purge old data in the cache. + * + * - start - virtual start address of region + * - end - virtual end address of region + */ +ENTRY(v7_dma_inv_range) + dcache_line_size r2, r3 + sub r3, r2, #1 + tst r0, r3 + bic r0, r0, r3 + mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line + + tst r1, r3 + bic r1, r1, r3 + mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line +1: + mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line + add r0, r0, r2 + cmp r0, r1 + blo 1b + dsb + mov pc, lr + +/* + * v7_dma_clean_range(start,end) + * - start - virtual start address of region + * - end - virtual end address of region + */ +ENTRY(v7_dma_clean_range) + dcache_line_size r2, r3 + sub r3, r2, #1 + bic r0, r0, r3 +1: + mcr p15, 0, r0, c7, c10, 1 @ clean D / U line + add r0, r0, r2 + cmp r0, r1 + blo 1b + dsb + mov pc, lr + +/* + * v7_dma_flush_range(start,end) + * - start - virtual start address of region + * - end - virtual end address of region + */ +ENTRY(v7_dma_flush_range) + dcache_line_size r2, r3 + sub r3, r2, #1 + bic r0, r0, r3 +1: + mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line + add r0, r0, r2 + cmp r0, r1 + blo 1b + dsb + mov pc, lr + + __INITDATA + + .type v7_cache_fns, #object +ENTRY(v7_cache_fns) + .long v7_flush_kern_cache_all + .long v7_flush_user_cache_all + .long v7_flush_user_cache_range + .long v7_coherent_kern_range + .long v7_coherent_user_range + .long v7_flush_kern_dcache_page + .long v7_dma_inv_range + .long v7_dma_clean_range + .long v7_dma_flush_range + .size v7_cache_fns, . - v7_cache_fns diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 9e2c89eb2115..b13150052a76 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -59,3 +59,15 @@ .word \ucset #endif .endm + +/* + * cache_line_size - get the cache line size from the CSIDR register + * (available on ARMv7+). It assumes that the CSSR register was configured + * to access the L1 data cache CSIDR. + */ + .macro dcache_line_size, reg, tmp + mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR + and \tmp, \tmp, #7 @ cache line size encoding + mov \reg, #16 @ size offset + mov \reg, \reg, lsl \tmp @ actual cache line size + .endm diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S new file mode 100644 index 000000000000..dd823dd4a374 --- /dev/null +++ b/arch/arm/mm/proc-v7.S @@ -0,0 +1,262 @@ +/* + * linux/arch/arm/mm/proc-v7.S + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This is the "shell" of the ARMv7 processor support. + */ +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/asm-offsets.h> +#include <asm/elf.h> +#include <asm/pgtable-hwdef.h> +#include <asm/pgtable.h> + +#include "proc-macros.S" + +#define TTB_C (1 << 0) +#define TTB_S (1 << 1) +#define TTB_RGN_OC_WT (2 << 3) +#define TTB_RGN_OC_WB (3 << 3) + +ENTRY(cpu_v7_proc_init) + mov pc, lr + +ENTRY(cpu_v7_proc_fin) + mov pc, lr + +/* + * cpu_v7_reset(loc) + * + * Perform a soft reset of the system. Put the CPU into the + * same state as it would be if it had been reset, and branch + * to what would be the reset vector. + * + * - loc - location to jump to for soft reset + * + * It is assumed that: + */ + .align 5 +ENTRY(cpu_v7_reset) + mov pc, r0 + +/* + * cpu_v7_do_idle() + * + * Idle the processor (eg, wait for interrupt). + * + * IRQs are already disabled. + */ +ENTRY(cpu_v7_do_idle) + .long 0xe320f003 @ ARM V7 WFI instruction + mov pc, lr + +ENTRY(cpu_v7_dcache_clean_area) +#ifndef TLB_CAN_READ_FROM_L1_CACHE + dcache_line_size r2, r3 +1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + add r0, r0, r2 + subs r1, r1, r2 + bhi 1b + dsb +#endif + mov pc, lr + +/* + * cpu_v7_switch_mm(pgd_phys, tsk) + * + * Set the translation table base pointer to be pgd_phys + * + * - pgd_phys - physical address of new TTB + * + * It is assumed that: + * - we are not using split page tables + */ +ENTRY(cpu_v7_switch_mm) + mov r2, #0 + ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id + orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB + mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID + isb +1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 + isb + mcr p15, 0, r1, c13, c0, 1 @ set context ID + isb + mov pc, lr + +/* + * cpu_v7_set_pte_ext(ptep, pte) + * + * Set a level 2 translation table entry. + * + * - ptep - pointer to level 2 translation table entry + * (hardware version is stored at -1024 bytes) + * - pte - PTE value to store + * - ext - value for extended PTE bits + * + * Permissions: + * YUWD APX AP1 AP0 SVC User + * 0xxx 0 0 0 no acc no acc + * 100x 1 0 1 r/o no acc + * 10x0 1 0 1 r/o no acc + * 1011 0 0 1 r/w no acc + * 110x 0 1 0 r/w r/o + * 11x0 0 1 0 r/w r/o + * 1111 0 1 1 r/w r/w + */ +ENTRY(cpu_v7_set_pte_ext) + str r1, [r0], #-2048 @ linux version + + bic r3, r1, #0x000003f0 + bic r3, r3, #0x00000003 + orr r3, r3, r2 + orr r3, r3, #PTE_EXT_AP0 | 2 + + tst r1, #L_PTE_WRITE + tstne r1, #L_PTE_DIRTY + orreq r3, r3, #PTE_EXT_APX + + tst r1, #L_PTE_USER + orrne r3, r3, #PTE_EXT_AP1 + tstne r3, #PTE_EXT_APX + bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 + + tst r1, #L_PTE_YOUNG + biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK + + tst r1, #L_PTE_EXEC + orreq r3, r3, #PTE_EXT_XN + + tst r1, #L_PTE_PRESENT + moveq r3, #0 + + str r3, [r0] + mcr p15, 0, r0, c7, c10, 1 @ flush_pte + mov pc, lr + +cpu_v7_name: + .ascii "ARMv7 Processor" + .align + + .section ".text.init", #alloc, #execinstr + +/* + * __v7_setup + * + * Initialise TLB, Caches, and MMU state ready to switch the MMU + * on. Return in r0 the new CP15 C1 control register setting. + * + * We automatically detect if we have a Harvard cache, and use the + * Harvard cache control instructions insead of the unified cache + * control instructions. + * + * This should be able to cover all ARMv7 cores. + * + * It is assumed that: + * - cache type register is implemented + */ +__v7_setup: + adr r12, __v7_setup_stack @ the local stack + stmia r12, {r0-r5, r7, r9, r11, lr} + bl v7_flush_dcache_all + ldmia r12, {r0-r5, r7, r9, r11, lr} + mov r10, #0 +#ifdef HARVARD_CACHE + mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate +#endif + dsb + mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs + mcr p15, 0, r10, c2, c0, 2 @ TTB control register + orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB + mcr p15, 0, r4, c2, c0, 0 @ load TTB0 + mcr p15, 0, r4, c2, c0, 1 @ load TTB1 + mov r10, #0x1f @ domains 0, 1 = manager + mcr p15, 0, r10, c3, c0, 0 @ load domain access register +#ifndef CONFIG_CPU_L2CACHE_DISABLE + @ L2 cache configuration in the L2 aux control register + mrc p15, 1, r10, c9, c0, 2 + bic r10, r10, #(1 << 16) @ L2 outer cache + mcr p15, 1, r10, c9, c0, 2 + @ L2 cache is enabled in the aux control register + mrc p15, 0, r10, c1, c0, 1 + orr r10, r10, #2 + mcr p15, 0, r10, c1, c0, 1 +#endif + mrc p15, 0, r0, c1, c0, 0 @ read control register + ldr r10, cr1_clear @ get mask for bits to clear + bic r0, r0, r10 @ clear bits them + ldr r10, cr1_set @ get mask for bits to set + orr r0, r0, r10 @ set them + mov pc, lr @ return to head.S:__ret + + /* + * V X F I D LR + * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM + * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced + * 0 110 0011 1.00 .111 1101 < we want + */ + .type cr1_clear, #object + .type cr1_set, #object +cr1_clear: + .word 0x0120c302 +cr1_set: + .word 0x00c0387d + +__v7_setup_stack: + .space 4 * 11 @ 11 registers + + .type v7_processor_functions, #object +ENTRY(v7_processor_functions) + .word v7_early_abort + .word cpu_v7_proc_init + .word cpu_v7_proc_fin + .word cpu_v7_reset + .word cpu_v7_do_idle + .word cpu_v7_dcache_clean_area + .word cpu_v7_switch_mm + .word cpu_v7_set_pte_ext + .size v7_processor_functions, . - v7_processor_functions + + .type cpu_arch_name, #object +cpu_arch_name: + .asciz "armv7" + .size cpu_arch_name, . - cpu_arch_name + + .type cpu_elf_name, #object +cpu_elf_name: + .asciz "v7" + .size cpu_elf_name, . - cpu_elf_name + .align + + .section ".proc.info.init", #alloc, #execinstr + + /* + * Match any ARMv7 processor core. + */ + .type __v7_proc_info, #object +__v7_proc_info: + .long 0x000f0000 @ Required ID value + .long 0x000f0000 @ Mask for ID + .long PMD_TYPE_SECT | \ + PMD_SECT_BUFFERABLE | \ + PMD_SECT_CACHEABLE | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + .long PMD_TYPE_SECT | \ + PMD_SECT_XN | \ + PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ + b __v7_setup + .long cpu_arch_name + .long cpu_elf_name + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP + .long cpu_v7_name + .long v7_processor_functions + .long v6wbi_tlb_fns + .long v6_user_fns + .long v7_cache_fns + .size __v7_proc_info, . - __v7_proc_info |