diff options
-rw-r--r-- | arch/arm/cpu/armv8/cache.S | 54 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/cache_v8.c | 453 | ||||
-rw-r--r-- | arch/arm/include/asm/armv8/mmu.h | 68 | ||||
-rw-r--r-- | arch/arm/include/asm/global_data.h | 5 | ||||
-rw-r--r-- | arch/arm/include/asm/system.h | 14 | ||||
-rw-r--r-- | include/configs/thunderx_88xx.h | 14 |
6 files changed, 500 insertions, 108 deletions
diff --git a/arch/arm/cpu/armv8/cache.S b/arch/arm/cpu/armv8/cache.S index ab8c08917ad..a9f4fec387f 100644 --- a/arch/arm/cpu/armv8/cache.S +++ b/arch/arm/cpu/armv8/cache.S @@ -10,6 +10,7 @@ #include <asm-offsets.h> #include <config.h> #include <asm/macro.h> +#include <asm/system.h> #include <linux/linkage.h> /* @@ -160,3 +161,56 @@ ENTRY(__asm_flush_l3_cache) ret ENDPROC(__asm_flush_l3_cache) .weak __asm_flush_l3_cache + +/* + * void __asm_switch_ttbr(ulong new_ttbr) + * + * Safely switches to a new page table. + */ +ENTRY(__asm_switch_ttbr) + /* x2 = SCTLR (alive throghout the function) */ + switch_el x4, 3f, 2f, 1f +3: mrs x2, sctlr_el3 + b 0f +2: mrs x2, sctlr_el2 + b 0f +1: mrs x2, sctlr_el1 +0: + + /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */ + movn x1, #(CR_M | CR_C | CR_I) + and x1, x2, x1 + switch_el x4, 3f, 2f, 1f +3: msr sctlr_el3, x1 + b 0f +2: msr sctlr_el2, x1 + b 0f +1: msr sctlr_el1, x1 +0: isb + + /* This call only clobbers x30 (lr) and x9 (unused) */ + mov x3, x30 + bl __asm_invalidate_tlb_all + + /* From here on we're running safely with caches disabled */ + + /* Set TTBR to our first argument */ + switch_el x4, 3f, 2f, 1f +3: msr ttbr0_el3, x0 + b 0f +2: msr ttbr0_el2, x0 + b 0f +1: msr ttbr0_el1, x0 +0: isb + + /* Restore original SCTLR and thus enable caches again */ + switch_el x4, 3f, 2f, 1f +3: msr sctlr_el3, x2 + b 0f +2: msr sctlr_el2, x2 + b 0f +1: msr sctlr_el1, x2 +0: isb + + ret x3 +ENDPROC(__asm_switch_ttbr) diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index d92f2d1768f..73628c96113 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -2,6 +2,9 @@ * (C) Copyright 2013 * David Feng <fenghua@phytium.com.cn> * + * (C) Copyright 2016 + * Alexander Graf <agraf@suse.de> + * * SPDX-License-Identifier: GPL-2.0+ */ @@ -13,31 +16,28 @@ DECLARE_GLOBAL_DATA_PTR; #ifndef CONFIG_SYS_DCACHE_OFF -#ifdef CONFIG_SYS_FULL_VA -static void set_ptl1_entry(u64 index, u64 ptl2_entry) -{ - u64 *pgd = (u64 *)gd->arch.tlb_addr; - u64 value; - - value = ptl2_entry | PTL1_TYPE_TABLE; - pgd[index] = value; -} - -static void set_ptl2_block(u64 ptl1, u64 bfn, u64 address, u64 memory_attrs) -{ - u64 *pmd = (u64 *)ptl1; - u64 value; - - value = address | PTL2_TYPE_BLOCK | PTL2_BLOCK_AF; - value |= memory_attrs; - pmd[bfn] = value; -} +/* + * With 4k page granule, a virtual address is split into 4 lookup parts + * spanning 9 bits each: + * + * _______________________________________________ + * | | | | | | | + * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off | + * |_______|_______|_______|_______|_______|_______| + * 63-48 47-39 38-30 29-21 20-12 11-00 + * + * mask page size + * + * Lv0: FF8000000000 -- + * Lv1: 7FC0000000 1G + * Lv2: 3FE00000 2M + * Lv3: 1FF000 4K + * off: FFF + */ +#ifdef CONFIG_SYS_FULL_VA static struct mm_region mem_map[] = CONFIG_SYS_MEM_MAP; -#define PTL1_ENTRIES CONFIG_SYS_PTL1_ENTRIES -#define PTL2_ENTRIES CONFIG_SYS_PTL2_ENTRIES - static u64 get_tcr(int el, u64 *pips, u64 *pva_bits) { u64 max_addr = 0; @@ -79,8 +79,8 @@ static u64 get_tcr(int el, u64 *pips, u64 *pva_bits) } /* PTWs cacheable, inner/outer WBWA and inner shareable */ - tcr |= TCR_TG0_64K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; - tcr |= TCR_T0SZ(VA_BITS); + tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; + tcr |= TCR_T0SZ(va_bits); if (pips) *pips = ips; @@ -90,39 +90,302 @@ static u64 get_tcr(int el, u64 *pips, u64 *pva_bits) return tcr; } -static void setup_pgtables(void) +#define MAX_PTE_ENTRIES 512 + +static int pte_type(u64 *pte) +{ + return *pte & PTE_TYPE_MASK; +} + +/* Returns the LSB number for a PTE on level <level> */ +static int level2shift(int level) +{ + /* Page is 12 bits wide, every level translates 9 bits */ + return (12 + 9 * (3 - level)); +} + +static u64 *find_pte(u64 addr, int level) { - int l1_e, l2_e; - unsigned long pmd = 0; - unsigned long address; - - /* Setup the PMD pointers */ - for (l1_e = 0; l1_e < CONFIG_SYS_MEM_MAP_SIZE; l1_e++) { - gd->arch.pmd_addr[l1_e] = gd->arch.tlb_addr + - PTL1_ENTRIES * sizeof(u64); - gd->arch.pmd_addr[l1_e] += PTL2_ENTRIES * sizeof(u64) * l1_e; - gd->arch.pmd_addr[l1_e] = ALIGN(gd->arch.pmd_addr[l1_e], - 0x10000UL); + int start_level = 0; + u64 *pte; + u64 idx; + u64 va_bits; + int i; + + debug("addr=%llx level=%d\n", addr, level); + + get_tcr(0, NULL, &va_bits); + if (va_bits < 39) + start_level = 1; + + if (level < start_level) + return NULL; + + /* Walk through all page table levels to find our PTE */ + pte = (u64*)gd->arch.tlb_addr; + for (i = start_level; i < 4; i++) { + idx = (addr >> level2shift(i)) & 0x1FF; + pte += idx; + debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte); + + /* Found it */ + if (i == level) + return pte; + /* PTE is no table (either invalid or block), can't traverse */ + if (pte_type(pte) != PTE_TYPE_TABLE) + return NULL; + /* Off to the next level */ + pte = (u64*)(*pte & 0x0000fffffffff000ULL); } - /* Setup the page tables */ - for (l1_e = 0; l1_e < PTL1_ENTRIES; l1_e++) { - if (mem_map[pmd].base == - (uintptr_t)l1_e << PTL2_BITS) { - set_ptl1_entry(l1_e, gd->arch.pmd_addr[pmd]); - - for (l2_e = 0; l2_e < PTL2_ENTRIES; l2_e++) { - address = mem_map[pmd].base - + (uintptr_t)l2_e * BLOCK_SIZE; - set_ptl2_block(gd->arch.pmd_addr[pmd], l2_e, - address, mem_map[pmd].attrs); + /* Should never reach here */ + return NULL; +} + +/* Returns and creates a new full table (512 entries) */ +static u64 *create_table(void) +{ + u64 *new_table = (u64*)gd->arch.tlb_fillptr; + u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64); + + /* Allocate MAX_PTE_ENTRIES pte entries */ + gd->arch.tlb_fillptr += pt_len; + + if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size) + panic("Insufficient RAM for page table: 0x%lx > 0x%lx. " + "Please increase the size in get_page_table_size()", + gd->arch.tlb_fillptr - gd->arch.tlb_addr, + gd->arch.tlb_size); + + /* Mark all entries as invalid */ + memset(new_table, 0, pt_len); + + return new_table; +} + +static void set_pte_table(u64 *pte, u64 *table) +{ + /* Point *pte to the new table */ + debug("Setting %p to addr=%p\n", pte, table); + *pte = PTE_TYPE_TABLE | (ulong)table; +} + +/* Add one mm_region map entry to the page tables */ +static void add_map(struct mm_region *map) +{ + u64 *pte; + u64 addr = map->base; + u64 size = map->size; + u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; + u64 blocksize; + int level; + u64 *new_table; + + while (size) { + pte = find_pte(addr, 0); + if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { + debug("Creating table for addr 0x%llx\n", addr); + new_table = create_table(); + set_pte_table(pte, new_table); + } + + for (level = 1; level < 4; level++) { + pte = find_pte(addr, level); + blocksize = 1ULL << level2shift(level); + debug("Checking if pte fits for addr=%llx size=%llx " + "blocksize=%llx\n", addr, size, blocksize); + if (size >= blocksize && !(addr & (blocksize - 1))) { + /* Page fits, create block PTE */ + debug("Setting PTE %p to block addr=%llx\n", + pte, addr); + *pte = addr | attrs; + addr += blocksize; + size -= blocksize; + break; + } else if ((pte_type(pte) == PTE_TYPE_FAULT)) { + /* Page doesn't fit, create subpages */ + debug("Creating subtable for addr 0x%llx " + "blksize=%llx\n", addr, blocksize); + new_table = create_table(); + set_pte_table(pte, new_table); } + } + } +} + +/* Splits a block PTE into table with subpages spanning the old block */ +static void split_block(u64 *pte, int level) +{ + u64 old_pte = *pte; + u64 *new_table; + u64 i = 0; + /* level describes the parent level, we need the child ones */ + int levelshift = level2shift(level + 1); + + if (pte_type(pte) != PTE_TYPE_BLOCK) + panic("PTE %p (%llx) is not a block. Some driver code wants to " + "modify dcache settings for an range not covered in " + "mem_map.", pte, old_pte); + + new_table = create_table(); + debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table); + + for (i = 0; i < MAX_PTE_ENTRIES; i++) { + new_table[i] = old_pte | (i << levelshift); + + /* Level 3 block PTEs have the table type */ + if ((level + 1) == 3) + new_table[i] |= PTE_TYPE_TABLE; + + debug("Setting new_table[%lld] = %llx\n", i, new_table[i]); + } + + /* Set the new table into effect */ + set_pte_table(pte, new_table); +} + +enum pte_type { + PTE_INVAL, + PTE_BLOCK, + PTE_LEVEL, +}; - pmd++; - } else { - set_ptl1_entry(l1_e, 0); +/* + * This is a recursively called function to count the number of + * page tables we need to cover a particular PTE range. If you + * call this with level = -1 you basically get the full 48 bit + * coverage. + */ +static int count_required_pts(u64 addr, int level, u64 maxaddr) +{ + int levelshift = level2shift(level); + u64 levelsize = 1ULL << levelshift; + u64 levelmask = levelsize - 1; + u64 levelend = addr + levelsize; + int r = 0; + int i; + enum pte_type pte_type = PTE_INVAL; + + for (i = 0; i < ARRAY_SIZE(mem_map); i++) { + struct mm_region *map = &mem_map[i]; + u64 start = map->base; + u64 end = start + map->size; + + /* Check if the PTE would overlap with the map */ + if (max(addr, start) <= min(levelend, end)) { + start = max(addr, start); + end = min(levelend, end); + + /* We need a sub-pt for this level */ + if ((start & levelmask) || (end & levelmask)) { + pte_type = PTE_LEVEL; + break; + } + + /* Lv0 can not do block PTEs, so do levels here too */ + if (level <= 0) { + pte_type = PTE_LEVEL; + break; + } + + /* PTE is active, but fits into a block */ + pte_type = PTE_BLOCK; } } + + /* + * Block PTEs at this level are already covered by the parent page + * table, so we only need to count sub page tables. + */ + if (pte_type == PTE_LEVEL) { + int sublevel = level + 1; + u64 sublevelsize = 1ULL << level2shift(sublevel); + + /* Account for the new sub page table ... */ + r = 1; + + /* ... and for all child page tables that one might have */ + for (i = 0; i < MAX_PTE_ENTRIES; i++) { + r += count_required_pts(addr, sublevel, maxaddr); + addr += sublevelsize; + + if (addr >= maxaddr) { + /* + * We reached the end of address space, no need + * to look any further. + */ + break; + } + } + } + + return r; +} + +/* Returns the estimated required size of all page tables */ +u64 get_page_table_size(void) +{ + u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); + u64 size = 0; + u64 va_bits; + int start_level = 0; + + get_tcr(0, NULL, &va_bits); + if (va_bits < 39) + start_level = 1; + + /* Account for all page tables we would need to cover our memory map */ + size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits); + + /* + * We need to duplicate our page table once to have an emergency pt to + * resort to when splitting page tables later on + */ + size *= 2; + + /* + * We may need to split page tables later on if dcache settings change, + * so reserve up to 4 (random pick) page tables for that. + */ + size += one_pt * 4; + + return size; +} + +static void setup_pgtables(void) +{ + int i; + + /* + * Allocate the first level we're on with invalidate entries. + * If the starting level is 0 (va_bits >= 39), then this is our + * Lv0 page table, otherwise it's the entry Lv1 page table. + */ + create_table(); + + /* Now add all MMU table entries one after another to the table */ + for (i = 0; i < ARRAY_SIZE(mem_map); i++) + add_map(&mem_map[i]); + + /* Create the same thing once more for our emergency page table */ + create_table(); +} + +static void setup_all_pgtables(void) +{ + u64 tlb_addr = gd->arch.tlb_addr; + + /* Reset the fill ptr */ + gd->arch.tlb_fillptr = tlb_addr; + + /* Create normal system page tables */ + setup_pgtables(); + + /* Create emergency page tables */ + gd->arch.tlb_addr = gd->arch.tlb_fillptr; + setup_pgtables(); + gd->arch.tlb_emerg = gd->arch.tlb_addr; + gd->arch.tlb_addr = tlb_addr; } #else @@ -157,11 +420,9 @@ __weak void mmu_setup(void) int el; #ifdef CONFIG_SYS_FULL_VA - unsigned long coreid = read_mpidr() & CONFIG_COREID_MASK; - - /* Set up page tables only on BSP */ - if (coreid == BSP_COREID) - setup_pgtables(); + /* Set up page tables only once */ + if (!gd->arch.tlb_fillptr) + setup_all_pgtables(); el = current_el(); set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), @@ -311,6 +572,88 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, flush_dcache_range(start, end); asm volatile("dsb sy"); } +#else +static bool is_aligned(u64 addr, u64 size, u64 align) +{ + return !(addr & (align - 1)) && !(size & (align - 1)); +} + +static u64 set_one_region(u64 start, u64 size, u64 attrs, int level) +{ + int levelshift = level2shift(level); + u64 levelsize = 1ULL << levelshift; + u64 *pte = find_pte(start, level); + + /* Can we can just modify the current level block PTE? */ + if (is_aligned(start, size, levelsize)) { + *pte &= ~PMD_ATTRINDX_MASK; + *pte |= attrs; + debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level); + + return levelsize; + } + + /* Unaligned or doesn't fit, maybe split block into table */ + debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte); + + /* Maybe we need to split the block into a table */ + if (pte_type(pte) == PTE_TYPE_BLOCK) + split_block(pte, level); + + /* And then double-check it became a table or already is one */ + if (pte_type(pte) != PTE_TYPE_TABLE) + panic("PTE %p (%llx) for addr=%llx should be a table", + pte, *pte, start); + + /* Roll on to the next page table level */ + return 0; +} + +void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, + enum dcache_option option) +{ + u64 attrs = PMD_ATTRINDX(option); + u64 real_start = start; + u64 real_size = size; + + debug("start=%lx size=%lx\n", (ulong)start, (ulong)size); + + /* + * We can not modify page tables that we're currently running on, + * so we first need to switch to the "emergency" page tables where + * we can safely modify our primary page tables and then switch back + */ + __asm_switch_ttbr(gd->arch.tlb_emerg); + + /* + * Loop through the address range until we find a page granule that fits + * our alignment constraints, then set it to the new cache attributes + */ + while (size > 0) { + int level; + u64 r; + + for (level = 1; level < 4; level++) { + r = set_one_region(start, size, attrs, level); + if (r) { + /* PTE successfully replaced */ + size -= r; + start += r; + break; + } + } + + } + + /* We're done modifying page tables, switch back to our primary ones */ + __asm_switch_ttbr(gd->arch.tlb_addr); + + /* + * Make sure there's nothing stale in dcache for a region that might + * have caches off now + */ + flush_dcache_range(real_start, real_start + real_size); +} #endif #else /* CONFIG_SYS_DCACHE_OFF */ diff --git a/arch/arm/include/asm/armv8/mmu.h b/arch/arm/include/asm/armv8/mmu.h index 0080ae6ff52..1c490dcd100 100644 --- a/arch/arm/include/asm/armv8/mmu.h +++ b/arch/arm/include/asm/armv8/mmu.h @@ -26,15 +26,9 @@ #define VA_BITS (42) /* 42 bits virtual address */ #else #define VA_BITS CONFIG_SYS_VA_BITS -#define PTL2_BITS CONFIG_SYS_PTL2_BITS +#define PTE_BLOCK_BITS CONFIG_SYS_PTL2_BITS #endif -/* PAGE_SHIFT determines the page size */ -#undef PAGE_SIZE -#define PAGE_SHIFT 16 -#define PAGE_SIZE (1 << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - /* * block/section address mask and size definitions. */ @@ -42,10 +36,21 @@ #define SECTION_SHIFT 29 #define SECTION_SIZE (UL(1) << SECTION_SHIFT) #define SECTION_MASK (~(SECTION_SIZE-1)) + +/* PAGE_SHIFT determines the page size */ +#undef PAGE_SIZE +#define PAGE_SHIFT 16 +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + #else -#define BLOCK_SHIFT CONFIG_SYS_BLOCK_SHIFT -#define BLOCK_SIZE (UL(1) << BLOCK_SHIFT) -#define BLOCK_MASK (~(BLOCK_SIZE-1)) + +/* PAGE_SHIFT determines the page size */ +#undef PAGE_SIZE +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + #endif /***************************************************************/ @@ -71,39 +76,28 @@ */ #ifdef CONFIG_SYS_FULL_VA -/* - * Level 1 descriptor (PGD). - */ -#define PTL1_TYPE_MASK (3 << 0) -#define PTL1_TYPE_TABLE (3 << 0) - -#define PTL1_TABLE_PXN (1UL << 59) -#define PTL1_TABLE_XN (1UL << 60) -#define PTL1_TABLE_AP (1UL << 61) -#define PTL1_TABLE_NS (1UL << 63) - - -/* - * Level 2 descriptor (PMD). - */ +#define PTE_TYPE_MASK (3 << 0) +#define PTE_TYPE_FAULT (0 << 0) +#define PTE_TYPE_TABLE (3 << 0) +#define PTE_TYPE_BLOCK (1 << 0) -#define PTL2_TYPE_MASK (3 << 0) -#define PTL2_TYPE_FAULT (0 << 0) -#define PTL2_TYPE_TABLE (3 << 0) -#define PTL2_TYPE_BLOCK (1 << 0) +#define PTE_TABLE_PXN (1UL << 59) +#define PTE_TABLE_XN (1UL << 60) +#define PTE_TABLE_AP (1UL << 61) +#define PTE_TABLE_NS (1UL << 63) /* * Block */ -#define PTL2_MEMTYPE(x) ((x) << 2) -#define PTL2_BLOCK_NON_SHARE (0 << 8) -#define PTL2_BLOCK_OUTER_SHARE (2 << 8) -#define PTL2_BLOCK_INNER_SHARE (3 << 8) -#define PTL2_BLOCK_AF (1 << 10) -#define PTL2_BLOCK_NG (1 << 11) -#define PTL2_BLOCK_PXN (UL(1) << 53) -#define PTL2_BLOCK_UXN (UL(1) << 54) +#define PTE_BLOCK_MEMTYPE(x) ((x) << 2) +#define PTE_BLOCK_NON_SHARE (0 << 8) +#define PTE_BLOCK_OUTER_SHARE (2 << 8) +#define PTE_BLOCK_INNER_SHARE (3 << 8) +#define PTE_BLOCK_AF (1 << 10) +#define PTE_BLOCK_NG (1 << 11) +#define PTE_BLOCK_PXN (UL(1) << 53) +#define PTE_BLOCK_UXN (UL(1) << 54) #else /* diff --git a/arch/arm/include/asm/global_data.h b/arch/arm/include/asm/global_data.h index dcfa0985b5b..259daa100e5 100644 --- a/arch/arm/include/asm/global_data.h +++ b/arch/arm/include/asm/global_data.h @@ -38,10 +38,11 @@ struct arch_global_data { unsigned long long timer_reset_value; #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) unsigned long tlb_addr; + unsigned long tlb_size; #if defined(CONFIG_SYS_FULL_VA) - unsigned long pmd_addr[CONFIG_SYS_PTL1_ENTRIES]; + unsigned long tlb_fillptr; + unsigned long tlb_emerg; #endif - unsigned long tlb_size; #endif #ifdef CONFIG_OMAP_COMMON diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 026e7ef83bb..9b1cbf2c431 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -17,18 +17,19 @@ #define CR_WXN (1 << 19) /* Write Permision Imply XN */ #define CR_EE (1 << 25) /* Exception (Big) Endian */ -#ifndef CONFIG_SYS_FULL_VA -#define PGTABLE_SIZE (0x10000) -#else -#define PGTABLE_SIZE CONFIG_SYS_PGTABLE_SIZE -#endif - /* 2MB granularity */ #define MMU_SECTION_SHIFT 21 #define MMU_SECTION_SIZE (1 << MMU_SECTION_SHIFT) #ifndef __ASSEMBLY__ +#ifndef CONFIG_SYS_FULL_VA +#define PGTABLE_SIZE (0x10000) +#else +u64 get_page_table_size(void); +#define PGTABLE_SIZE get_page_table_size() +#endif + enum dcache_option { DCACHE_OFF = 0x3, }; @@ -97,6 +98,7 @@ void __asm_flush_dcache_range(u64 start, u64 end); void __asm_invalidate_tlb_all(void); void __asm_invalidate_icache_all(void); int __asm_flush_l3_cache(void); +void __asm_switch_ttbr(u64 new_ttbr); void armv8_switch_to_el2(void); void armv8_switch_to_el1(void); diff --git a/include/configs/thunderx_88xx.h b/include/configs/thunderx_88xx.h index dba98ad422c..36b6ce8df28 100644 --- a/include/configs/thunderx_88xx.h +++ b/include/configs/thunderx_88xx.h @@ -22,21 +22,19 @@ #define MEM_BASE 0x00500000 -#define CONFIG_COREID_MASK 0xffffff - #define CONFIG_SYS_FULL_VA #define CONFIG_SYS_LOWMEM_BASE MEM_BASE #define CONFIG_SYS_MEM_MAP {{0x000000000000UL, 0x40000000000UL, \ - PTL2_MEMTYPE(MT_NORMAL) | \ - PTL2_BLOCK_NON_SHARE}, \ + PTE_BLOCK_MEMTYPE(MT_NORMAL) | \ + PTE_BLOCK_NON_SHARE}, \ {0x800000000000UL, 0x40000000000UL, \ - PTL2_MEMTYPE(MT_DEVICE_NGNRNE) | \ - PTL2_BLOCK_NON_SHARE}, \ + PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | \ + PTE_BLOCK_NON_SHARE}, \ {0x840000000000UL, 0x40000000000UL, \ - PTL2_MEMTYPE(MT_DEVICE_NGNRNE) | \ - PTL2_BLOCK_NON_SHARE}, \ + PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | \ + PTE_BLOCK_NON_SHARE}, \ } #define CONFIG_SYS_MEM_MAP_SIZE 3 |