diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/compiler-rt/builtins/ctzdi2.c | 29 | ||||
-rw-r--r-- | lib/compiler-rt/compiler-rt.mk | 3 | ||||
-rw-r--r-- | lib/psci/psci_on.c | 13 | ||||
-rw-r--r-- | lib/psci/psci_suspend.c | 11 | ||||
-rw-r--r-- | lib/stdlib/assert.c | 4 | ||||
-rw-r--r-- | lib/xlat_tables/aarch32/xlat_tables.c | 53 | ||||
-rw-r--r-- | lib/xlat_tables/aarch64/xlat_tables.c | 61 | ||||
-rw-r--r-- | lib/xlat_tables/xlat_tables_private.h | 39 | ||||
-rw-r--r-- | lib/xlat_tables_v2/aarch32/xlat_tables_arch.c | 79 | ||||
-rw-r--r-- | lib/xlat_tables_v2/aarch32/xlat_tables_arch.h | 72 | ||||
-rw-r--r-- | lib/xlat_tables_v2/aarch64/xlat_tables_arch.c | 130 | ||||
-rw-r--r-- | lib/xlat_tables_v2/aarch64/xlat_tables_arch.h | 85 | ||||
-rw-r--r-- | lib/xlat_tables_v2/xlat_tables.mk | 1 | ||||
-rw-r--r-- | lib/xlat_tables_v2/xlat_tables_common.c | 144 | ||||
-rw-r--r-- | lib/xlat_tables_v2/xlat_tables_internal.c | 170 | ||||
-rw-r--r-- | lib/xlat_tables_v2/xlat_tables_private.h | 119 |
16 files changed, 359 insertions, 654 deletions
diff --git a/lib/compiler-rt/builtins/ctzdi2.c b/lib/compiler-rt/builtins/ctzdi2.c new file mode 100644 index 00000000..db3c6fdc --- /dev/null +++ b/lib/compiler-rt/builtins/ctzdi2.c @@ -0,0 +1,29 @@ +/* ===-- ctzdi2.c - Implement __ctzdi2 -------------------------------------=== + * + * The LLVM Compiler Infrastructure + * + * This file is dual licensed under the MIT and the University of Illinois Open + * Source Licenses. See LICENSE.TXT for details. + * + * ===----------------------------------------------------------------------=== + * + * This file implements __ctzdi2 for the compiler_rt library. + * + * ===----------------------------------------------------------------------=== + */ + +#include "int_lib.h" + +/* Returns: the number of trailing 0-bits */ + +/* Precondition: a != 0 */ + +COMPILER_RT_ABI si_int +__ctzdi2(di_int a) +{ + dwords x; + x.all = a; + const si_int f = -(x.s.low == 0); + return __builtin_ctz((x.s.high & f) | (x.s.low & ~f)) + + (f & ((si_int)(sizeof(si_int) * CHAR_BIT))); +} diff --git a/lib/compiler-rt/compiler-rt.mk b/lib/compiler-rt/compiler-rt.mk index 3bdd3190..cb5ab31c 100644 --- a/lib/compiler-rt/compiler-rt.mk +++ b/lib/compiler-rt/compiler-rt.mk @@ -30,5 +30,6 @@ ifeq (${ARCH},aarch32) COMPILER_RT_SRCS := lib/compiler-rt/builtins/arm/aeabi_uldivmod.S \ - lib/compiler-rt/builtins/udivmoddi4.c + lib/compiler-rt/builtins/udivmoddi4.c \ + lib/compiler-rt/builtins/ctzdi2.c endif diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c index 16b22c2a..d3d0e2ff 100644 --- a/lib/psci/psci_on.c +++ b/lib/psci/psci_on.c @@ -64,7 +64,20 @@ int psci_cpu_on_start(u_register_t target_cpu, /* * Generic management: Ensure that the cpu is off to be * turned on. + * Perform cache maintanence ahead of reading the target CPU state to + * ensure that the data is not stale. + * There is a theoretical edge case where the cache may contain stale + * data for the target CPU data - this can occur under the following + * conditions: + * - the target CPU is in another cluster from the current + * - the target CPU was the last CPU to shutdown on its cluster + * - the cluster was removed from coherency as part of the CPU shutdown + * + * In this case the cache maintenace that was performed as part of the + * target CPUs shutdown was not seen by the current CPU's cluster. And + * so the cache may contain stale data for the target CPU. */ + flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); if (rc != PSCI_E_SUCCESS) goto exit; diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c index 0d1589ee..40ecdeea 100644 --- a/lib/psci/psci_suspend.c +++ b/lib/psci/psci_suspend.c @@ -80,6 +80,17 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, if (psci_spd_pm && psci_spd_pm->svc_suspend) psci_spd_pm->svc_suspend(max_off_lvl); +#if !HW_ASSISTED_COHERENCY + /* + * Plat. management: Allow the platform to perform any early + * actions required to power down the CPU. This might be useful for + * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these + * actions with data caches enabled. + */ + if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early) + psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info); +#endif + /* * Store the re-entry information for the non-secure world. */ diff --git a/lib/stdlib/assert.c b/lib/stdlib/assert.c index 41f70703..97fab4b0 100644 --- a/lib/stdlib/assert.c +++ b/lib/stdlib/assert.c @@ -17,14 +17,14 @@ #if PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_VERBOSE void __assert(const char *file, unsigned int line, const char *assertion) { - tf_printf("ASSERT: %s <%d> : %s\n", file, line, assertion); + tf_printf("ASSERT: %s:%d:%s\n", file, line, assertion); console_flush(); plat_panic_handler(); } #elif PLAT_LOG_LEVEL_ASSERT >= LOG_LEVEL_INFO void __assert(const char *file, unsigned int line) { - tf_printf("ASSERT: %s <%d>\n", file, line); + tf_printf("ASSERT: %s:%d\n", file, line); console_flush(); plat_panic_handler(); } diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c index 9c156240..c7e34f20 100644 --- a/lib/xlat_tables/aarch32/xlat_tables.c +++ b/lib/xlat_tables/aarch32/xlat_tables.c @@ -7,56 +7,17 @@ #include <arch.h> #include <arch_helpers.h> #include <assert.h> -#include <cassert.h> #include <platform_def.h> #include <utils.h> +#include <xlat_tables_arch.h> #include <xlat_tables.h> #include "../xlat_tables_private.h" -/* - * Each platform can define the size of the virtual address space, which is - * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus - * the width of said address space. The value of TTBCR.TxSZ must be in the - * range 0 to 7 [1], which means that the virtual address space width must be - * in the range 32 to 25 bits. - * - * Here we calculate the initial lookup level from the value of - * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual - * address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or - * narrower address spaces are not supported. As a result, level 3 cannot be - * used as initial lookup level with 4 KB granularity [1]. - * - * For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE == - * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table - * G4-5 in the ARM ARM, the initial lookup level for an address space like that - * is 1. - * - * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more - * information: - * [1] Section G4.6.5 - */ - -#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN)) - -# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big." - -#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT) - -# define XLAT_TABLE_LEVEL_BASE 1 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT) - -#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX)) - -# define XLAT_TABLE_LEVEL_BASE 2 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT) - -#else - -# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small." +#define XLAT_TABLE_LEVEL_BASE \ + GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE) -#endif +#define NUM_BASE_LEVEL_ENTRIES \ + GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES] __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t)); @@ -127,13 +88,13 @@ void enable_mmu_secure(unsigned int flags) ttbcr = TTBCR_EAE_BIT | TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC | TTBCR_RGN0_INNER_NC | - (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE)); } else { /* Inner & outer WBWA & shareable. */ ttbcr = TTBCR_EAE_BIT | TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | TTBCR_RGN0_INNER_WBA | - (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + (32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE)); } ttbcr |= TTBCR_EPD1_BIT; write_ttbcr(ttbcr); diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c index 309cb9bd..2ddf8cba 100644 --- a/lib/xlat_tables/aarch64/xlat_tables.c +++ b/lib/xlat_tables/aarch64/xlat_tables.c @@ -8,66 +8,19 @@ #include <arch_helpers.h> #include <assert.h> #include <bl_common.h> -#include <cassert.h> #include <common_def.h> #include <platform_def.h> #include <sys/types.h> #include <utils.h> #include <xlat_tables.h> +#include <xlat_tables_arch.h> #include "../xlat_tables_private.h" -/* - * Each platform can define the size of the virtual address space, which is - * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the - * width of said address space. The value of TCR.TxSZ must be in the range 16 - * to 39 [1], which means that the virtual address space width must be in the - * range 48 to 25 bits. - * - * Here we calculate the initial lookup level from the value of - * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual - * address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2 - * from 30 to 25. Wider or narrower address spaces are not supported. As a - * result, level 3 cannot be used as initial lookup level with 4 KB - * granularity. [2] - * - * For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE == - * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table - * D4-11 in the ARM ARM, the initial lookup level for an address space like - * that is 1. - * - * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more - * information: - * [1] Page 1730: 'Input address size', 'For all translation stages'. - * [2] Section D4.2.5 - */ - -#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN)) - -# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big." - -#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT) - -# define XLAT_TABLE_LEVEL_BASE 0 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT) - -#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT) - -# define XLAT_TABLE_LEVEL_BASE 1 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT) - -#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX)) - -# define XLAT_TABLE_LEVEL_BASE 2 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT) - -#else - -# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small." +#define XLAT_TABLE_LEVEL_BASE \ + GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE) -#endif +#define NUM_BASE_LEVEL_ENTRIES \ + GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES] __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t)); @@ -192,12 +145,12 @@ void init_xlat_tables(void) /* Inner & outer non-cacheable non-shareable. */\ tcr = TCR_SH_NON_SHAREABLE | \ TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \ - (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ + (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\ } else { \ /* Inner & outer WBWA & shareable. */ \ tcr = TCR_SH_INNER_SHAREABLE | \ TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \ - (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ + (64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE));\ } \ tcr |= _tcr_extra; \ write_tcr_el##_el(tcr); \ diff --git a/lib/xlat_tables/xlat_tables_private.h b/lib/xlat_tables/xlat_tables_private.h index b5c3ac84..50d6bd59 100644 --- a/lib/xlat_tables/xlat_tables_private.h +++ b/lib/xlat_tables/xlat_tables_private.h @@ -9,7 +9,7 @@ #include <cassert.h> #include <platform_def.h> -#include <utils_def.h> +#include <xlat_tables_arch.h> /* * If the platform hasn't defined a physical and a virtual address space size @@ -28,41 +28,14 @@ # endif #endif -/* The virtual and physical address space sizes must be powers of two. */ -CASSERT(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE), +CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(PLAT_VIRT_ADDR_SPACE_SIZE), assert_valid_virt_addr_space_size); -CASSERT(IS_POWER_OF_TWO(PLAT_PHY_ADDR_SPACE_SIZE), - assert_valid_phy_addr_space_size); - -/* - * In AArch32 state, the MMU only supports 4KB page granularity, which means - * that the first translation table level is either 1 or 2. Both of them are - * allowed to have block and table descriptors. See section G4.5.6 of the - * ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information. - * - * In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page - * granularity. For 4KB granularity, a level 0 table descriptor doesn't support - * block translation. For 16KB, the same thing happens to levels 0 and 1. For - * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture - * Reference Manual (DDI 0487A.k) for more information. - * - * The define below specifies the first table level that allows block - * descriptors. - */ -#ifdef AARCH32 - -# define XLAT_BLOCK_LEVEL_MIN 1 - -#else /* if AArch64 */ - -# if PAGE_SIZE == (4*1024) /* 4KB */ -# define XLAT_BLOCK_LEVEL_MIN 1 -# else /* 16KB or 64KB */ -# define XLAT_BLOCK_LEVEL_MIN 2 -# endif +CASSERT(CHECK_PHY_ADDR_SPACE_SIZE(PLAT_PHY_ADDR_SPACE_SIZE), + assert_valid_phy_addr_space_size); -#endif /* AARCH32 */ +/* Alias to retain compatibility with the old #define name */ +#define XLAT_BLOCK_LEVEL_MIN MIN_LVL_BLOCK_DESC void print_mmap(void); diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c index 40fd2d0b..be18552e 100644 --- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c @@ -14,7 +14,7 @@ #include "../xlat_tables_private.h" #if ENABLE_ASSERTIONS -static unsigned long long xlat_arch_get_max_supported_pa(void) +unsigned long long xlat_arch_get_max_supported_pa(void) { /* Physical address space size for long descriptor format. */ return (1ull << 40) - 1ull; @@ -81,24 +81,22 @@ uint64_t xlat_arch_get_xn_desc(int el __unused) return UPPER_ATTRS(XN); } -void init_xlat_tables_arch(unsigned long long max_pa) -{ - assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= - xlat_arch_get_max_supported_pa()); -} - /******************************************************************************* - * Function for enabling the MMU in Secure PL1, assuming that the - * page-tables have already been created. + * Function for enabling the MMU in Secure PL1, assuming that the page tables + * have already been created. ******************************************************************************/ -void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table) - +void enable_mmu_arch(unsigned int flags, + uint64_t *base_table, + unsigned long long max_pa, + uintptr_t max_va) { u_register_t mair0, ttbcr, sctlr; uint64_t ttbr0; assert(IS_IN_SECURE()); - assert((read_sctlr() & SCTLR_M_BIT) == 0); + + sctlr = read_sctlr(); + assert((sctlr & SCTLR_M_BIT) == 0); /* Invalidate TLBs at the current exception level */ tlbiall(); @@ -109,29 +107,56 @@ void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table) ATTR_IWBWA_OWBWA_NTR_INDEX); mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); - write_mair0(mair0); /* - * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1. + * Configure the control register for stage 1 of the PL1&0 translation + * regime. + */ + + /* Use the Long-descriptor translation table format. */ + ttbcr = TTBCR_EAE_BIT; + + /* + * Disable translation table walk for addresses that are translated + * using TTBR1. Therefore, only TTBR0 is used. + */ + ttbcr |= TTBCR_EPD1_BIT; + + /* + * Limit the input address ranges and memory region sizes translated + * using TTBR0 to the given virtual address space size, if smaller than + * 32 bits. + */ + if (max_va != UINT32_MAX) { + uintptr_t virtual_addr_space_size = max_va + 1; + assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size)); + /* + * __builtin_ctzll(0) is undefined but here we are guaranteed + * that virtual_addr_space_size is in the range [1, UINT32_MAX]. + */ + ttbcr |= 32 - __builtin_ctzll(virtual_addr_space_size); + } + + /* + * Set the cacheability and shareability attributes for memory + * associated with translation table walks using TTBR0. */ if (flags & XLAT_TABLE_NC) { /* Inner & outer non-cacheable non-shareable. */ - ttbcr = TTBCR_EAE_BIT | - TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC | - TTBCR_RGN0_INNER_NC | - (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + ttbcr |= TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC | + TTBCR_RGN0_INNER_NC; } else { /* Inner & outer WBWA & shareable. */ - ttbcr = TTBCR_EAE_BIT | - TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | - TTBCR_RGN0_INNER_WBA | - (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE)); + ttbcr |= TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA | + TTBCR_RGN0_INNER_WBA; } - ttbcr |= TTBCR_EPD1_BIT; - write_ttbcr(ttbcr); /* Set TTBR0 bits as well */ ttbr0 = (uint64_t)(uintptr_t) base_table; + + /* Now program the relevant system registers */ + write_mair0(mair0); + write_ttbcr(ttbcr); write64_ttbr0(ttbr0); write64_ttbr1(0); @@ -144,7 +169,6 @@ void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table) dsbish(); isb(); - sctlr = read_sctlr(); sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; if (flags & DISABLE_DCACHE) @@ -157,8 +181,3 @@ void enable_mmu_internal_secure(unsigned int flags, uint64_t *base_table) /* Ensure the MMU enable takes effect immediately */ isb(); } - -void enable_mmu_arch(unsigned int flags, uint64_t *base_table) -{ - enable_mmu_internal_secure(flags, base_table); -} diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.h b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.h deleted file mode 100644 index f75ab791..00000000 --- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef __XLAT_TABLES_ARCH_H__ -#define __XLAT_TABLES_ARCH_H__ - -#include <arch.h> -#include <platform_def.h> -#include <xlat_tables_defs.h> -#include "../xlat_tables_private.h" - -/* - * In AArch32 state, the MMU only supports 4KB page granularity, which means - * that the first translation table level is either 1 or 2. Both of them are - * allowed to have block and table descriptors. See section G4.5.6 of the - * ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information. - * - * The define below specifies the first table level that allows block - * descriptors. - */ - -#define MIN_LVL_BLOCK_DESC 1 - -/* - * Each platform can define the size of the virtual address space, which is - * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus - * the width of said address space. The value of TTBCR.TxSZ must be in the - * range 0 to 7 [1], which means that the virtual address space width must be - * in the range 32 to 25 bits. - * - * Here we calculate the initial lookup level from the value of - * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual - * address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or - * narrower address spaces are not supported. As a result, level 3 cannot be - * used as initial lookup level with 4 KB granularity [1]. - * - * For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE == - * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table - * G4-5 in the ARM ARM, the initial lookup level for an address space like that - * is 1. - * - * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more - * information: - * [1] Section G4.6.5 - */ - -#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN)) - -# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big." - -#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT) - -# define XLAT_TABLE_LEVEL_BASE 1 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT) - -#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX)) - -# define XLAT_TABLE_LEVEL_BASE 2 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT) - -#else - -# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small." - -#endif - -#endif /* __XLAT_TABLES_ARCH_H__ */ diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c index 14f6cd6a..61eac106 100644 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -22,8 +22,6 @@ # define IMAGE_EL 1 #endif -static unsigned long long tcr_ps_bits; - static unsigned long long calc_physical_addr_size_bits( unsigned long long max_addr) { @@ -60,7 +58,7 @@ static const unsigned int pa_range_bits_arr[] = { PARANGE_0101 }; -static unsigned long long xlat_arch_get_max_supported_pa(void) +unsigned long long xlat_arch_get_max_supported_pa(void) { u_register_t pa_range = read_id_aa64mmfr0_el1() & ID_AA64MMFR0_EL1_PARANGE_MASK; @@ -146,73 +144,28 @@ uint64_t xlat_arch_get_xn_desc(int el) } } -void init_xlat_tables_arch(unsigned long long max_pa) -{ - assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= - xlat_arch_get_max_supported_pa()); - - /* - * If dynamic allocation of new regions is enabled the code can't make - * assumptions about the max physical address because it could change - * after adding new regions. If this functionality is disabled it is - * safer to restrict the max physical address as much as possible. - */ -#ifdef PLAT_XLAT_TABLES_DYNAMIC - tcr_ps_bits = calc_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE); -#else - tcr_ps_bits = calc_physical_addr_size_bits(max_pa); -#endif -} - /******************************************************************************* * Macro generating the code for the function enabling the MMU in the given * exception level, assuming that the pagetables have already been created. * * _el: Exception level at which the function will run - * _tcr_extra: Extra bits to set in the TCR register. This mask will - * be OR'ed with the default TCR value. * _tlbi_fct: Function to invalidate the TLBs at the current * exception level ******************************************************************************/ -#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \ - void enable_mmu_internal_el##_el(unsigned int flags, \ - uint64_t *base_table) \ +#define DEFINE_ENABLE_MMU_EL(_el, _tlbi_fct) \ + static void enable_mmu_internal_el##_el(int flags, \ + uint64_t mair, \ + uint64_t tcr, \ + uint64_t ttbr) \ { \ - uint64_t mair, tcr, ttbr; \ - uint32_t sctlr; \ - \ - assert(IS_IN_EL(_el)); \ - assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0); \ + uint32_t sctlr = read_sctlr_el##_el(); \ + assert((sctlr & SCTLR_M_BIT) == 0); \ \ /* Invalidate TLBs at the current exception level */ \ _tlbi_fct(); \ \ - /* Set attributes in the right indices of the MAIR */ \ - mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \ - mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \ - ATTR_IWBWA_OWBWA_NTR_INDEX); \ - mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \ - ATTR_NON_CACHEABLE_INDEX); \ write_mair_el##_el(mair); \ - \ - /* Set TCR bits as well. */ \ - /* Set T0SZ to (64 - width of virtual address space) */ \ - if (flags & XLAT_TABLE_NC) { \ - /* Inner & outer non-cacheable non-shareable. */\ - tcr = TCR_SH_NON_SHAREABLE | \ - TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \ - (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ - } else { \ - /* Inner & outer WBWA & shareable. */ \ - tcr = TCR_SH_INNER_SHAREABLE | \ - TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \ - (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\ - } \ - tcr |= _tcr_extra; \ write_tcr_el##_el(tcr); \ - \ - /* Set TTBR bits as well */ \ - ttbr = (uint64_t) base_table; \ write_ttbr0_el##_el(ttbr); \ \ /* Ensure all translation table writes have drained */ \ @@ -222,9 +175,7 @@ void init_xlat_tables_arch(unsigned long long max_pa) dsbish(); \ isb(); \ \ - sctlr = read_sctlr_el##_el(); \ sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \ - \ if (flags & DISABLE_DCACHE) \ sctlr &= ~SCTLR_C_BIT; \ else \ @@ -238,22 +189,69 @@ void init_xlat_tables_arch(unsigned long long max_pa) /* Define EL1 and EL3 variants of the function enabling the MMU */ #if IMAGE_EL == 1 -DEFINE_ENABLE_MMU_EL(1, - (tcr_ps_bits << TCR_EL1_IPS_SHIFT), - tlbivmalle1) +DEFINE_ENABLE_MMU_EL(1, tlbivmalle1) #elif IMAGE_EL == 3 -DEFINE_ENABLE_MMU_EL(3, - TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT), - tlbialle3) +DEFINE_ENABLE_MMU_EL(3, tlbialle3) #endif -void enable_mmu_arch(unsigned int flags, uint64_t *base_table) +void enable_mmu_arch(unsigned int flags, + uint64_t *base_table, + unsigned long long max_pa, + uintptr_t max_va) { + uint64_t mair, ttbr, tcr; + + /* Set attributes in the right indices of the MAIR. */ + mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); + mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX); + mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); + + ttbr = (uint64_t) base_table; + + /* + * Set TCR bits as well. + */ + + /* + * Limit the input address ranges and memory region sizes translated + * using TTBR0 to the given virtual address space size. + */ + assert(max_va < UINTPTR_MAX); + uintptr_t virtual_addr_space_size = max_va + 1; + assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size)); + /* + * __builtin_ctzll(0) is undefined but here we are guaranteed that + * virtual_addr_space_size is in the range [1,UINTPTR_MAX]. + */ + tcr = 64 - __builtin_ctzll(virtual_addr_space_size); + + /* + * Set the cacheability and shareability attributes for memory + * associated with translation table walks. + */ + if (flags & XLAT_TABLE_NC) { + /* Inner & outer non-cacheable non-shareable. */ + tcr |= TCR_SH_NON_SHAREABLE | + TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC; + } else { + /* Inner & outer WBWA & shareable. */ + tcr |= TCR_SH_INNER_SHAREABLE | + TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA; + } + + /* + * It is safer to restrict the max physical address accessible by the + * hardware as much as possible. + */ + unsigned long long tcr_ps_bits = calc_physical_addr_size_bits(max_pa); + #if IMAGE_EL == 1 assert(IS_IN_EL(1)); - enable_mmu_internal_el1(flags, base_table); + tcr |= tcr_ps_bits << TCR_EL1_IPS_SHIFT; + enable_mmu_internal_el1(flags, mair, tcr, ttbr); #elif IMAGE_EL == 3 assert(IS_IN_EL(3)); - enable_mmu_internal_el3(flags, base_table); + tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT); + enable_mmu_internal_el3(flags, mair, tcr, ttbr); #endif } diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.h b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.h deleted file mode 100644 index caccb736..00000000 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef __XLAT_TABLES_ARCH_H__ -#define __XLAT_TABLES_ARCH_H__ - -#include <arch.h> -#include <platform_def.h> -#include <xlat_tables_defs.h> -#include "../xlat_tables_private.h" - -/* - * In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page - * granularity. For 4KB granularity, a level 0 table descriptor doesn't support - * block translation. For 16KB, the same thing happens to levels 0 and 1. For - * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture - * Reference Manual (DDI 0487A.k) for more information. - * - * The define below specifies the first table level that allows block - * descriptors. - */ - -#if PAGE_SIZE == (4*1024) /* 4KB */ -# define MIN_LVL_BLOCK_DESC 1 -#else /* 16KB or 64KB */ -# define MIN_LVL_BLOCK_DESC 2 -#endif - -/* - * Each platform can define the size of the virtual address space, which is - * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the - * width of said address space. The value of TCR.TxSZ must be in the range 16 - * to 39 [1], which means that the virtual address space width must be in the - * range 48 to 25 bits. - * - * Here we calculate the initial lookup level from the value of - * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual - * address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2 - * from 30 to 25. Wider or narrower address spaces are not supported. As a - * result, level 3 cannot be used as initial lookup level with 4 KB - * granularity. [2] - * - * For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE == - * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table - * D4-11 in the ARM ARM, the initial lookup level for an address space like - * that is 1. - * - * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more - * information: - * [1] Page 1730: 'Input address size', 'For all translation stages'. - * [2] Section D4.2.5 - */ - -#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN)) - -# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big." - -#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT) - -# define XLAT_TABLE_LEVEL_BASE 0 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT) - -#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT) - -# define XLAT_TABLE_LEVEL_BASE 1 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT) - -#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX)) - -# define XLAT_TABLE_LEVEL_BASE 2 -# define NUM_BASE_LEVEL_ENTRIES \ - (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT) - -#else - -# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small." - -#endif - -#endif /* __XLAT_TABLES_ARCH_H__ */ diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk index 4f804341..b94ce5d0 100644 --- a/lib/xlat_tables_v2/xlat_tables.mk +++ b/lib/xlat_tables_v2/xlat_tables.mk @@ -6,5 +6,4 @@ XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \ ${ARCH}/xlat_tables_arch.c \ - xlat_tables_common.c \ xlat_tables_internal.c) diff --git a/lib/xlat_tables_v2/xlat_tables_common.c b/lib/xlat_tables_v2/xlat_tables_common.c deleted file mode 100644 index f20bf93a..00000000 --- a/lib/xlat_tables_v2/xlat_tables_common.c +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include <arch.h> -#include <arch_helpers.h> -#include <assert.h> -#include <cassert.h> -#include <common_def.h> -#include <debug.h> -#include <errno.h> -#include <platform_def.h> -#include <string.h> -#include <types.h> -#include <utils.h> -#include <xlat_tables_v2.h> -#ifdef AARCH32 -# include "aarch32/xlat_tables_arch.h" -#else -# include "aarch64/xlat_tables_arch.h" -#endif -#include "xlat_tables_private.h" - -/* - * Private variables used by the TF - */ -static mmap_region_t tf_mmap[MAX_MMAP_REGIONS + 1]; - -static uint64_t tf_xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES] - __aligned(XLAT_TABLE_SIZE) __section("xlat_table"); - -static uint64_t tf_base_xlat_table[NUM_BASE_LEVEL_ENTRIES] - __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t)); - -#if PLAT_XLAT_TABLES_DYNAMIC -static int xlat_tables_mapped_regions[MAX_XLAT_TABLES]; -#endif /* PLAT_XLAT_TABLES_DYNAMIC */ - -xlat_ctx_t tf_xlat_ctx = { - - .pa_max_address = PLAT_PHY_ADDR_SPACE_SIZE - 1, - .va_max_address = PLAT_VIRT_ADDR_SPACE_SIZE - 1, - - .mmap = tf_mmap, - .mmap_num = MAX_MMAP_REGIONS, - - .tables = tf_xlat_tables, - .tables_num = MAX_XLAT_TABLES, -#if PLAT_XLAT_TABLES_DYNAMIC - .tables_mapped_regions = xlat_tables_mapped_regions, -#endif /* PLAT_XLAT_TABLES_DYNAMIC */ - - .base_table = tf_base_xlat_table, - .base_table_entries = NUM_BASE_LEVEL_ENTRIES, - - .max_pa = 0, - .max_va = 0, - - .next_table = 0, - - .base_level = XLAT_TABLE_LEVEL_BASE, - - .initialized = 0 -}; - -void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, - size_t size, mmap_attr_t attr) -{ - mmap_region_t mm = { - .base_va = base_va, - .base_pa = base_pa, - .size = size, - .attr = attr, - }; - mmap_add_region_ctx(&tf_xlat_ctx, (mmap_region_t *)&mm); -} - -void mmap_add(const mmap_region_t *mm) -{ - while (mm->size) { - mmap_add_region_ctx(&tf_xlat_ctx, (mmap_region_t *)mm); - mm++; - } -} - -#if PLAT_XLAT_TABLES_DYNAMIC - -int mmap_add_dynamic_region(unsigned long long base_pa, - uintptr_t base_va, size_t size, mmap_attr_t attr) -{ - mmap_region_t mm = { - .base_va = base_va, - .base_pa = base_pa, - .size = size, - .attr = attr, - }; - return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm); -} - -int mmap_remove_dynamic_region(uintptr_t base_va, size_t size) -{ - return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, base_va, size); -} - -#endif /* PLAT_XLAT_TABLES_DYNAMIC */ - -void init_xlat_tables(void) -{ - assert(!is_mmu_enabled()); - assert(!tf_xlat_ctx.initialized); - print_mmap(tf_xlat_ctx.mmap); - tf_xlat_ctx.execute_never_mask = - xlat_arch_get_xn_desc(xlat_arch_current_el()); - init_xlation_table(&tf_xlat_ctx); - xlat_tables_print(&tf_xlat_ctx); - - assert(tf_xlat_ctx.max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1); - assert(tf_xlat_ctx.max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1); - - init_xlat_tables_arch(tf_xlat_ctx.max_pa); -} - -#ifdef AARCH32 - -void enable_mmu_secure(unsigned int flags) -{ - enable_mmu_arch(flags, tf_xlat_ctx.base_table); -} - -#else - -void enable_mmu_el1(unsigned int flags) -{ - enable_mmu_arch(flags, tf_xlat_ctx.base_table); -} - -void enable_mmu_el3(unsigned int flags) -{ - enable_mmu_arch(flags, tf_xlat_ctx.base_table); -} - -#endif /* AARCH32 */ diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c index f60d78c1..cd6e11c0 100644 --- a/lib/xlat_tables_v2/xlat_tables_internal.c +++ b/lib/xlat_tables_v2/xlat_tables_internal.c @@ -7,7 +7,6 @@ #include <arch.h> #include <arch_helpers.h> #include <assert.h> -#include <cassert.h> #include <common_def.h> #include <debug.h> #include <errno.h> @@ -15,14 +14,37 @@ #include <string.h> #include <types.h> #include <utils.h> +#include <xlat_tables_arch.h> +#include <xlat_tables_defs.h> #include <xlat_tables_v2.h> -#ifdef AARCH32 -# include "aarch32/xlat_tables_arch.h" -#else -# include "aarch64/xlat_tables_arch.h" -#endif + #include "xlat_tables_private.h" +/* + * Each platform can define the size of its physical and virtual address spaces. + * If the platform hasn't defined one or both of them, default to + * ADDR_SPACE_SIZE. The latter is deprecated, though. + */ +#if ERROR_DEPRECATED +# ifdef ADDR_SPACE_SIZE +# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead." +# endif +#elif defined(ADDR_SPACE_SIZE) +# ifndef PLAT_PHY_ADDR_SPACE_SIZE +# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE +# endif +# ifndef PLAT_VIRT_ADDR_SPACE_SIZE +# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE +# endif +#endif + +/* + * Allocate and initialise the default translation context for the BL image + * currently executing. + */ +REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES, + PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE); + #if PLAT_XLAT_TABLES_DYNAMIC /* @@ -335,7 +357,7 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm, */ static action_t xlat_tables_map_region_action(const mmap_region_t *mm, const int desc_type, const unsigned long long dest_pa, - const uintptr_t table_entry_base_va, const int level) + const uintptr_t table_entry_base_va, const unsigned int level) { uintptr_t mm_end_va = mm->base_va + mm->size - 1; uintptr_t table_entry_end_va = @@ -666,7 +688,7 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa, return 0; } -void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) +void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm) { mmap_region_t *mm_cursor = ctx->mmap; mmap_region_t *mm_last = mm_cursor + ctx->mmap_num; @@ -743,6 +765,34 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) ctx->max_va = end_va; } +void mmap_add_region(unsigned long long base_pa, + uintptr_t base_va, + size_t size, + mmap_attr_t attr) +{ + mmap_region_t mm = { + .base_va = base_va, + .base_pa = base_pa, + .size = size, + .attr = attr, + }; + mmap_add_region_ctx(&tf_xlat_ctx, &mm); +} + + +void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm) +{ + while (mm->size) { + mmap_add_region_ctx(ctx, mm); + mm++; + } +} + +void mmap_add(const mmap_region_t *mm) +{ + mmap_add_ctx(&tf_xlat_ctx, mm); +} + #if PLAT_XLAT_TABLES_DYNAMIC int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) @@ -839,6 +889,18 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm) return 0; } +int mmap_add_dynamic_region(unsigned long long base_pa, + uintptr_t base_va, size_t size, mmap_attr_t attr) +{ + mmap_region_t mm = { + .base_va = base_va, + .base_pa = base_pa, + .size = size, + .attr = attr, + }; + return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm); +} + /* * Removes the region with given base Virtual Address and size from the given * context. @@ -914,6 +976,12 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va, return 0; } +int mmap_remove_dynamic_region(uintptr_t base_va, size_t size) +{ + return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx, + base_va, size); +} + #endif /* PLAT_XLAT_TABLES_DYNAMIC */ #if LOG_LEVEL >= LOG_LEVEL_VERBOSE @@ -1042,15 +1110,47 @@ static void xlat_tables_print_internal(const uintptr_t table_base_va, void xlat_tables_print(xlat_ctx_t *ctx) { #if LOG_LEVEL >= LOG_LEVEL_VERBOSE + VERBOSE("Translation tables state:\n"); + VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address); + VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address); + VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa); + VERBOSE(" Max mapped VA: %p\n", (void *) ctx->max_va); + + VERBOSE(" Initial lookup level: %i\n", ctx->base_level); + VERBOSE(" Entries @initial lookup level: %i\n", + ctx->base_table_entries); + + int used_page_tables; +#if PLAT_XLAT_TABLES_DYNAMIC + used_page_tables = 0; + for (unsigned int i = 0; i < ctx->tables_num; ++i) { + if (ctx->tables_mapped_regions[i] != 0) + ++used_page_tables; + } +#else + used_page_tables = ctx->next_table; +#endif + VERBOSE(" Used %i sub-tables out of %i (spare: %i)\n", + used_page_tables, ctx->tables_num, + ctx->tables_num - used_page_tables); + xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries, ctx->base_level, ctx->execute_never_mask); #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */ } -void init_xlation_table(xlat_ctx_t *ctx) +void init_xlat_tables_ctx(xlat_ctx_t *ctx) { mmap_region_t *mm = ctx->mmap; + assert(!is_mmu_enabled()); + assert(!ctx->initialized); + + print_mmap(mm); + + ctx->execute_never_mask = + xlat_arch_get_xn_desc(xlat_arch_current_el()); + /* All tables must be zeroed before mapping any region. */ for (unsigned int i = 0; i < ctx->base_table_entries; i++) @@ -1078,5 +1178,57 @@ void init_xlation_table(xlat_ctx_t *ctx) mm++; } + assert(ctx->pa_max_address <= xlat_arch_get_max_supported_pa()); + assert(ctx->max_va <= ctx->va_max_address); + assert(ctx->max_pa <= ctx->pa_max_address); + ctx->initialized = 1; + + xlat_tables_print(ctx); +} + +void init_xlat_tables(void) +{ + init_xlat_tables_ctx(&tf_xlat_ctx); +} + +/* + * If dynamic allocation of new regions is disabled then by the time we call the + * function enabling the MMU, we'll have registered all the memory regions to + * map for the system's lifetime. Therefore, at this point we know the maximum + * physical address that will ever be mapped. + * + * If dynamic allocation is enabled then we can't make any such assumption + * because the maximum physical address could get pushed while adding a new + * region. Therefore, in this case we have to assume that the whole address + * space size might be mapped. + */ +#ifdef PLAT_XLAT_TABLES_DYNAMIC +#define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address +#else +#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa +#endif + +#ifdef AARCH32 + +void enable_mmu_secure(unsigned int flags) +{ + enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR, + tf_xlat_ctx.va_max_address); } + +#else + +void enable_mmu_el1(unsigned int flags) +{ + enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR, + tf_xlat_ctx.va_max_address); +} + +void enable_mmu_el3(unsigned int flags) +{ + enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR, + tf_xlat_ctx.va_max_address); +} + +#endif /* AARCH32 */ diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h index 83e0b6ea..d352583c 100644 --- a/lib/xlat_tables_v2/xlat_tables_private.h +++ b/lib/xlat_tables_v2/xlat_tables_private.h @@ -7,99 +7,8 @@ #ifndef __XLAT_TABLES_PRIVATE_H__ #define __XLAT_TABLES_PRIVATE_H__ -#include <cassert.h> #include <platform_def.h> -#include <utils_def.h> - -/* - * If the platform hasn't defined a physical and a virtual address space size - * default to ADDR_SPACE_SIZE. - */ -#if ERROR_DEPRECATED -# ifdef ADDR_SPACE_SIZE -# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead." -# endif -#elif defined(ADDR_SPACE_SIZE) -# ifndef PLAT_PHY_ADDR_SPACE_SIZE -# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE -# endif -# ifndef PLAT_VIRT_ADDR_SPACE_SIZE -# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE -# endif -#endif - -/* The virtual and physical address space sizes must be powers of two. */ -CASSERT(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE), - assert_valid_virt_addr_space_size); -CASSERT(IS_POWER_OF_TWO(PLAT_PHY_ADDR_SPACE_SIZE), - assert_valid_phy_addr_space_size); - -/* Struct that holds all information about the translation tables. */ -typedef struct { - - /* - * Max allowed Virtual and Physical Addresses. - */ - unsigned long long pa_max_address; - uintptr_t va_max_address; - - /* - * Array of all memory regions stored in order of ascending end address - * and ascending size to simplify the code that allows overlapping - * regions. The list is terminated by the first entry with size == 0. - * The max size of the list is stored in `mmap_num`. `mmap` points to an - * array of mmap_num + 1 elements, so that there is space for the final - * null entry. - */ - mmap_region_t *mmap; - unsigned int mmap_num; - - /* - * Array of finer-grain translation tables. - * For example, if the initial lookup level is 1 then this array would - * contain both level-2 and level-3 entries. - */ - uint64_t (*tables)[XLAT_TABLE_ENTRIES]; - unsigned int tables_num; - /* - * Keep track of how many regions are mapped in each table. The base - * table can't be unmapped so it isn't needed to keep track of it. - */ -#if PLAT_XLAT_TABLES_DYNAMIC - int *tables_mapped_regions; -#endif /* PLAT_XLAT_TABLES_DYNAMIC */ - - unsigned int next_table; - - /* - * Base translation table. It doesn't need to have the same amount of - * entries as the ones used for other levels. - */ - uint64_t *base_table; - unsigned int base_table_entries; - - /* - * Max Physical and Virtual addresses currently in use by the - * translation tables. These might get updated as we map/unmap memory - * regions but they will never go beyond pa/va_max_address. - */ - unsigned long long max_pa; - uintptr_t max_va; - - /* Level of the base translation table. */ - unsigned int base_level; - - /* Set to 1 when the translation tables are initialized. */ - unsigned int initialized; - - /* - * Bit mask that has to be ORed to the rest of a translation table - * descriptor in order to prohibit execution of code at the exception - * level of this translation context. - */ - uint64_t execute_never_mask; - -} xlat_ctx_t; +#include <xlat_tables_defs.h> #if PLAT_XLAT_TABLES_DYNAMIC /* @@ -138,13 +47,6 @@ void xlat_arch_tlbi_va(uintptr_t va); */ void xlat_arch_tlbi_va_sync(void); -/* Add a dynamic region to the specified context. */ -int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm); - -/* Remove a dynamic region from the specified context. */ -int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va, - size_t size); - #endif /* PLAT_XLAT_TABLES_DYNAMIC */ /* Print VA, PA, size and attributes of all regions in the mmap array. */ @@ -157,15 +59,6 @@ void print_mmap(mmap_region_t *const mmap); void xlat_tables_print(xlat_ctx_t *ctx); /* - * Initialize the translation tables by mapping all regions added to the - * specified context. - */ -void init_xlation_table(xlat_ctx_t *ctx); - -/* Add a static region to the specified context. */ -void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm); - -/* * Architecture-specific initialization code. */ @@ -179,11 +72,15 @@ int xlat_arch_current_el(void); */ uint64_t xlat_arch_get_xn_desc(int el); -/* Execute architecture-specific translation table initialization code. */ -void init_xlat_tables_arch(unsigned long long max_pa); +/* + * Return the maximum physical address supported by the hardware. + * This value depends on the execution state (AArch32/AArch64). + */ +unsigned long long xlat_arch_get_max_supported_pa(void); /* Enable MMU and configure it to use the specified translation tables. */ -void enable_mmu_arch(unsigned int flags, uint64_t *base_table); +void enable_mmu_arch(unsigned int flags, uint64_t *base_table, + unsigned long long pa, uintptr_t max_va); /* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */ int is_mmu_enabled(void); |