summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/cpus/aarch32/aem_generic.S4
-rw-r--r--lib/cpus/aarch32/cortex_a32.S5
-rw-r--r--lib/cpus/aarch32/cpu_helpers.S59
-rw-r--r--lib/cpus/aarch64/aem_generic.S8
-rw-r--r--lib/cpus/aarch64/cortex_a35.S5
-rw-r--r--lib/cpus/aarch64/cortex_a53.S5
-rw-r--r--lib/cpus/aarch64/cortex_a57.S5
-rw-r--r--lib/cpus/aarch64/cortex_a72.S5
-rw-r--r--lib/cpus/aarch64/cortex_a73.S5
-rw-r--r--lib/cpus/aarch64/cpu_helpers.S44
-rw-r--r--lib/cpus/aarch64/denver.S5
-rw-r--r--lib/psci/aarch32/psci_helpers.S19
-rw-r--r--lib/psci/aarch64/psci_helpers.S20
-rw-r--r--lib/psci/psci_off.c17
-rw-r--r--lib/psci/psci_suspend.c17
-rw-r--r--lib/xlat_tables/aarch32/xlat_tables.c56
-rw-r--r--lib/xlat_tables/aarch64/xlat_tables.c77
-rw-r--r--lib/xlat_tables/xlat_tables_common.c16
-rw-r--r--lib/xlat_tables/xlat_tables_private.h55
19 files changed, 278 insertions, 149 deletions
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S
index 10ea4e47..3d6064c9 100644
--- a/lib/cpus/aarch32/aem_generic.S
+++ b/lib/cpus/aarch32/aem_generic.S
@@ -65,4 +65,6 @@ func aem_generic_cluster_pwr_dwn
endfunc aem_generic_cluster_pwr_dwn
/* cpu_ops for Base AEM FVP */
-declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a32.S b/lib/cpus/aarch32/cortex_a32.S
index f2b85a31..f631c4cf 100644
--- a/lib/cpus/aarch32/cortex_a32.S
+++ b/lib/cpus/aarch32/cortex_a32.S
@@ -141,4 +141,7 @@ func cortex_a32_cluster_pwr_dwn
b cortex_a32_disable_smp
endfunc cortex_a32_cluster_pwr_dwn
-declare_cpu_ops cortex_a32, CORTEX_A32_MIDR
+declare_cpu_ops cortex_a32, CORTEX_A32_MIDR, \
+ cortex_a32_reset_func, \
+ cortex_a32_core_pwr_dwn, \
+ cortex_a32_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
index a4dfe5f2..900d158c 100644
--- a/lib/cpus/aarch32/cpu_helpers.S
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -70,50 +70,39 @@ endfunc reset_handler
#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
/*
- * The prepare core power down function for all platforms. After
- * the cpu_ops pointer is retrieved from cpu_data, the corresponding
- * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS.
+ * void prepare_cpu_pwr_dwn(unsigned int power_level)
+ *
+ * Prepare CPU power down function for all platforms. The function takes
+ * a domain level to be powered down as its parameter. After the cpu_ops
+ * pointer is retrieved from cpu_data, the handler for requested power
+ * level is called.
*/
- .globl prepare_core_pwr_dwn
-func prepare_core_pwr_dwn
- /* r12 is pushed to meet the 8 byte stack alignment requirement */
- push {r12, lr}
- bl _cpu_data
- pop {r12, lr}
-
- ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
-#if ASM_ASSERTION
- cmp r1, #0
- ASM_ASSERT(ne)
-#endif
-
- /* Get the cpu_ops core_pwr_dwn handler */
- ldr r0, [r1, #CPU_PWR_DWN_CORE]
- bx r0
-endfunc prepare_core_pwr_dwn
-
+ .globl prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
/*
- * The prepare cluster power down function for all platforms. After
- * the cpu_ops pointer is retrieved from cpu_data, the corresponding
- * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS.
+ * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+ * power down handler for the last power level
*/
- .globl prepare_cluster_pwr_dwn
-func prepare_cluster_pwr_dwn
- /* r12 is pushed to meet the 8 byte stack alignment requirement */
- push {r12, lr}
+ mov r2, #(CPU_MAX_PWR_DWN_OPS - 1)
+ cmp r0, r2
+ movhi r0, r2
+
+ push {r0, lr}
bl _cpu_data
- pop {r12, lr}
+ pop {r2, lr}
- ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR]
+ ldr r0, [r0, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
- cmp r1, #0
+ cmp r0, #0
ASM_ASSERT(ne)
#endif
- /* Get the cpu_ops cluster_pwr_dwn handler */
- ldr r0, [r1, #CPU_PWR_DWN_CLUSTER]
- bx r0
-endfunc prepare_cluster_pwr_dwn
+ /* Get the appropriate power down handler */
+ mov r1, #CPU_PWR_DWN_OPS
+ add r1, r1, r2, lsl #2
+ ldr r1, [r0, r1]
+ bx r1
+endfunc prepare_cpu_pwr_dwn
/*
* Initializes the cpu_ops_ptr if not already initialized
diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S
index 0ab5253e..0cedd855 100644
--- a/lib/cpus/aarch64/aem_generic.S
+++ b/lib/cpus/aarch64/aem_generic.S
@@ -90,7 +90,11 @@ endfunc aem_generic_cpu_reg_dump
/* cpu_ops for Base AEM FVP */
-declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
+declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
/* cpu_ops for Foundation FVP */
-declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, 1
+declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \
+ aem_generic_core_pwr_dwn, \
+ aem_generic_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S
index ba29d6d4..c17c8f17 100644
--- a/lib/cpus/aarch64/cortex_a35.S
+++ b/lib/cpus/aarch64/cortex_a35.S
@@ -157,4 +157,7 @@ func cortex_a35_cpu_reg_dump
ret
endfunc cortex_a35_cpu_reg_dump
-declare_cpu_ops cortex_a35, CORTEX_A35_MIDR
+declare_cpu_ops cortex_a35, CORTEX_A35_MIDR, \
+ cortex_a35_reset_func, \
+ cortex_a35_core_pwr_dwn, \
+ cortex_a35_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S
index ed546e7e..06be9ce6 100644
--- a/lib/cpus/aarch64/cortex_a53.S
+++ b/lib/cpus/aarch64/cortex_a53.S
@@ -244,4 +244,7 @@ func cortex_a53_cpu_reg_dump
ret
endfunc cortex_a53_cpu_reg_dump
-declare_cpu_ops cortex_a53, CORTEX_A53_MIDR
+declare_cpu_ops cortex_a53, CORTEX_A53_MIDR, \
+ cortex_a53_reset_func, \
+ cortex_a53_core_pwr_dwn, \
+ cortex_a53_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S
index d6b181d0..e531b1e3 100644
--- a/lib/cpus/aarch64/cortex_a57.S
+++ b/lib/cpus/aarch64/cortex_a57.S
@@ -488,4 +488,7 @@ func cortex_a57_cpu_reg_dump
endfunc cortex_a57_cpu_reg_dump
-declare_cpu_ops cortex_a57, CORTEX_A57_MIDR
+declare_cpu_ops cortex_a57, CORTEX_A57_MIDR, \
+ cortex_a57_reset_func, \
+ cortex_a57_core_pwr_dwn, \
+ cortex_a57_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S
index 9f04fb72..fffc99f5 100644
--- a/lib/cpus/aarch64/cortex_a72.S
+++ b/lib/cpus/aarch64/cortex_a72.S
@@ -242,4 +242,7 @@ func cortex_a72_cpu_reg_dump
endfunc cortex_a72_cpu_reg_dump
-declare_cpu_ops cortex_a72, CORTEX_A72_MIDR
+declare_cpu_ops cortex_a72, CORTEX_A72_MIDR, \
+ cortex_a72_reset_func, \
+ cortex_a72_core_pwr_dwn, \
+ cortex_a72_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S
index e1615dbd..49d5449a 100644
--- a/lib/cpus/aarch64/cortex_a73.S
+++ b/lib/cpus/aarch64/cortex_a73.S
@@ -153,4 +153,7 @@ func cortex_a73_cpu_reg_dump
ret
endfunc cortex_a73_cpu_reg_dump
-declare_cpu_ops cortex_a73, CORTEX_A73_MIDR
+declare_cpu_ops cortex_a73, CORTEX_A73_MIDR, \
+ cortex_a73_reset_func, \
+ cortex_a73_core_pwr_dwn, \
+ cortex_a73_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S
index dab933c7..ec7f1dde 100644
--- a/lib/cpus/aarch64/cpu_helpers.S
+++ b/lib/cpus/aarch64/cpu_helpers.S
@@ -74,31 +74,23 @@ endfunc reset_handler
#if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
/*
- * The prepare core power down function for all platforms. After
- * the cpu_ops pointer is retrieved from cpu_data, the corresponding
- * pwr_dwn_core in the cpu_ops is invoked.
+ * void prepare_cpu_pwr_dwn(unsigned int power_level)
+ *
+ * Prepare CPU power down function for all platforms. The function takes
+ * a domain level to be powered down as its parameter. After the cpu_ops
+ * pointer is retrieved from cpu_data, the handler for requested power
+ * level is called.
*/
- .globl prepare_core_pwr_dwn
-func prepare_core_pwr_dwn
- mrs x1, tpidr_el3
- ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
-#if ASM_ASSERTION
- cmp x0, #0
- ASM_ASSERT(ne)
-#endif
-
- /* Get the cpu_ops core_pwr_dwn handler */
- ldr x1, [x0, #CPU_PWR_DWN_CORE]
- br x1
-endfunc prepare_core_pwr_dwn
-
+ .globl prepare_cpu_pwr_dwn
+func prepare_cpu_pwr_dwn
/*
- * The prepare cluster power down function for all platforms. After
- * the cpu_ops pointer is retrieved from cpu_data, the corresponding
- * pwr_dwn_cluster in the cpu_ops is invoked.
+ * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
+ * power down handler for the last power level
*/
- .globl prepare_cluster_pwr_dwn
-func prepare_cluster_pwr_dwn
+ mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
+ cmp x0, x2
+ csel x2, x2, x0, hi
+
mrs x1, tpidr_el3
ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
#if ASM_ASSERTION
@@ -106,10 +98,12 @@ func prepare_cluster_pwr_dwn
ASM_ASSERT(ne)
#endif
- /* Get the cpu_ops cluster_pwr_dwn handler */
- ldr x1, [x0, #CPU_PWR_DWN_CLUSTER]
+ /* Get the appropriate power down handler */
+ mov x1, #CPU_PWR_DWN_OPS
+ add x1, x1, x2, lsl #3
+ ldr x1, [x0, x1]
br x1
-endfunc prepare_cluster_pwr_dwn
+endfunc prepare_cpu_pwr_dwn
/*
diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S
index bce05737..0b61440d 100644
--- a/lib/cpus/aarch64/denver.S
+++ b/lib/cpus/aarch64/denver.S
@@ -163,4 +163,7 @@ func denver_cpu_reg_dump
ret
endfunc denver_cpu_reg_dump
-declare_cpu_ops denver, DENVER_1_0_MIDR
+declare_cpu_ops denver, DENVER_1_0_MIDR, \
+ denver_reset_func, \
+ denver_core_pwr_dwn, \
+ denver_cluster_pwr_dwn
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S
index 5a41ff31..9f991dfe 100644
--- a/lib/psci/aarch32/psci_helpers.S
+++ b/lib/psci/aarch32/psci_helpers.S
@@ -65,22 +65,13 @@ func psci_do_pwrdown_cache_maintenance
bl do_stack_maintenance
/* ---------------------------------------------
- * Determine how many levels of cache will be
- * subject to cache maintenance. Power level
- * 0 implies that only the cpu is being powered
- * down. Only the L1 data cache needs to be
- * flushed to the PoU in this case. For a higher
- * power level we are assuming that a flush
- * of L1 data and L2 unified cache is enough.
- * This information should be provided by the
- * platform.
+ * Invoke CPU-specifc power down operations for
+ * the appropriate level
* ---------------------------------------------
*/
- cmp r4, #PSCI_CPU_PWR_LVL
- pop {r4,lr}
-
- beq prepare_core_pwr_dwn
- b prepare_cluster_pwr_dwn
+ mov r0, r4
+ pop {r4, lr}
+ b prepare_cpu_pwr_dwn
endfunc psci_do_pwrdown_cache_maintenance
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
index eaa17c72..108f0687 100644
--- a/lib/psci/aarch64/psci_helpers.S
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -59,24 +59,11 @@ func psci_do_pwrdown_cache_maintenance
stp x19, x20, [sp,#-16]!
/* ---------------------------------------------
- * Determine to how many levels of cache will be
- * subject to cache maintenance. Power level
- * 0 implies that only the cpu is being powered
- * down. Only the L1 data cache needs to be
- * flushed to the PoU in this case. For a higher
- * power level we are assuming that a flush
- * of L1 data and L2 unified cache is enough.
- * This information should be provided by the
- * platform.
+ * Invoke CPU-specific power down operations for
+ * the appropriate level
* ---------------------------------------------
*/
- cmp w0, #PSCI_CPU_PWR_LVL
- b.eq do_core_pwr_dwn
- bl prepare_cluster_pwr_dwn
- b do_stack_maintenance
-
-do_core_pwr_dwn:
- bl prepare_core_pwr_dwn
+ bl prepare_cpu_pwr_dwn
/* ---------------------------------------------
* Do stack maintenance by flushing the used
@@ -84,7 +71,6 @@ do_core_pwr_dwn:
* remainder.
* ---------------------------------------------
*/
-do_stack_maintenance:
bl plat_get_my_stack
/* ---------------------------------------------
diff --git a/lib/psci/psci_off.c b/lib/psci/psci_off.c
index 1cc6ede3..897bf319 100644
--- a/lib/psci/psci_off.c
+++ b/lib/psci/psci_off.c
@@ -107,12 +107,29 @@ int psci_do_cpu_off(unsigned int end_pwrlvl)
psci_stats_update_pwr_down(end_pwrlvl, &state_info);
#endif
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+ /*
+ * Flush cache line so that even if CPU power down happens
+ * the timestamp update is reflected in memory.
+ */
+ PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+ RT_INSTR_ENTER_CFLUSH,
+ PMF_CACHE_MAINT);
+#endif
+
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+ RT_INSTR_EXIT_CFLUSH,
+ PMF_NO_CACHE_MAINT);
+#endif
+
/*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index 10d2481d..dc2ab774 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -109,6 +109,17 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
*/
cm_init_my_context(ep);
+#if ENABLE_RUNTIME_INSTRUMENTATION
+
+ /*
+ * Flush cache line so that even if CPU power down happens
+ * the timestamp update is reflected in memory.
+ */
+ PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+ RT_INSTR_ENTER_CFLUSH,
+ PMF_CACHE_MAINT);
+#endif
+
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches. Currently we assume that the power level correspond
@@ -117,6 +128,12 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
* and the cpu-ops power down to perform from the platform.
*/
psci_do_pwrdown_cache_maintenance(max_off_lvl);
+
+#if ENABLE_RUNTIME_INSTRUMENTATION
+ PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
+ RT_INSTR_EXIT_CFLUSH,
+ PMF_NO_CACHE_MAINT);
+#endif
}
/*******************************************************************************
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index d70a6ef5..e8408da8 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -39,49 +39,60 @@
/*
* Each platform can define the size of the virtual address space, which is
- * defined in ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus the width
- * of said address space. The value of TTBCR.TxSZ must be in the range 0 to
- * 7 [1], which means that the virtual address space width must be in the range
- * 32 to 25 bits.
+ * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus
+ * the width of said address space. The value of TTBCR.TxSZ must be in the
+ * range 0 to 7 [1], which means that the virtual address space width must be
+ * in the range 32 to 25 bits.
*
- * Here we calculate the initial lookup level from the value of ADDR_SPACE_SIZE.
- * For a 4 KB page size, level 1 supports virtual address spaces of widths 32
- * to 31 bits, and level 2 from 30 to 25. Wider or narrower address spaces are
- * not supported. As a result, level 3 cannot be used as initial lookup level
- * with 4 KB granularity [1].
+ * Here we calculate the initial lookup level from the value of
+ * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual
+ * address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or
+ * narrower address spaces are not supported. As a result, level 3 cannot be
+ * used as initial lookup level with 4 KB granularity [1].
*
- * For example, for a 31-bit address space (i.e. ADDR_SPACE_SIZE == 1 << 31),
- * TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table G4-5 in
- * the ARM ARM, the initial lookup level for such an address space is 1.
+ * For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
+ * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
+ * G4-5 in the ARM ARM, the initial lookup level for an address space like that
+ * is 1.
*
* See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information:
* [1] Section G4.6.5
*/
-#if ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
+#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
-# error "ADDR_SPACE_SIZE is too big."
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
-#elif ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
+#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 1
-# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+# define NUM_BASE_LEVEL_ENTRIES \
+ (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
-#elif ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
+#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
# define XLAT_TABLE_LEVEL_BASE 2
-# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
+# define NUM_BASE_LEVEL_ENTRIES \
+ (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
#else
-# error "ADDR_SPACE_SIZE is too small."
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
#endif
static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
+#if DEBUG
+static unsigned long long get_max_supported_pa(void)
+{
+ /* Physical address space size for long descriptor format. */
+ return (1ULL << 40) - 1ULL;
+}
+#endif
+
void init_xlat_tables(void)
{
unsigned long long max_pa;
@@ -89,7 +100,10 @@ void init_xlat_tables(void)
print_mmap();
init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
&max_va, &max_pa);
- assert(max_va < ADDR_SPACE_SIZE);
+
+ assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
+ assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
+ assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
}
/*******************************************************************************
@@ -122,7 +136,7 @@ void enable_mmu_secure(unsigned int flags)
ttbcr = TTBCR_EAE_BIT |
TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
TTBCR_RGN0_INNER_WBA |
- (32 - __builtin_ctzl((uintptr_t)ADDR_SPACE_SIZE));
+ (32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
ttbcr |= TTBCR_EPD1_BIT;
write_ttbcr(ttbcr);
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index 5b639b7a..a168636b 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -31,28 +31,33 @@
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
+#include <bl_common.h>
#include <cassert.h>
+#include <common_def.h>
#include <platform_def.h>
+#include <sys/types.h>
#include <utils.h>
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
/*
* Each platform can define the size of the virtual address space, which is
- * defined in ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the width of
- * said address space. The value of TCR.TxSZ must be in the range 16 to 39 [1],
- * which means that the virtual address space width must be in the range 48 to
- * 25 bits.
+ * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TCR.TxSZ is calculated as 64 minus the
+ * width of said address space. The value of TCR.TxSZ must be in the range 16
+ * to 39 [1], which means that the virtual address space width must be in the
+ * range 48 to 25 bits.
*
- * Here we calculate the initial lookup level from the value of ADDR_SPACE_SIZE.
- * For a 4 KB page size, level 0 supports virtual address spaces of widths 48 to
- * 40 bits, level 1 from 39 to 31, and level 2 from 30 to 25. Wider or narrower
- * address spaces are not supported. As a result, level 3 cannot be used as
- * initial lookup level with 4 KB granularity. [2]
+ * Here we calculate the initial lookup level from the value of
+ * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 0 supports virtual
+ * address spaces of widths 48 to 40 bits, level 1 from 39 to 31, and level 2
+ * from 30 to 25. Wider or narrower address spaces are not supported. As a
+ * result, level 3 cannot be used as initial lookup level with 4 KB
+ * granularity. [2]
*
- * For example, for a 35-bit address space (i.e. ADDR_SPACE_SIZE == 1 << 35),
- * TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table D4-11 in
- * the ARM ARM, the initial lookup level for such an address space is 1.
+ * For example, for a 35-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
+ * 1 << 35), TCR.TxSZ will be programmed to (64 - 35) = 29. According to Table
+ * D4-11 in the ARM ARM, the initial lookup level for an address space like
+ * that is 1.
*
* See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
* information:
@@ -60,28 +65,31 @@
* [2] Section D4.2.5
*/
-#if ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
+#if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (64 - TCR_TxSZ_MIN))
-# error "ADDR_SPACE_SIZE is too big."
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
-#elif ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
+#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << L0_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 0
-# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
+# define NUM_BASE_LEVEL_ENTRIES \
+ (PLAT_VIRT_ADDR_SPACE_SIZE >> L0_XLAT_ADDRESS_SHIFT)
-#elif ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
+#elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
# define XLAT_TABLE_LEVEL_BASE 1
-# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
+# define NUM_BASE_LEVEL_ENTRIES \
+ (PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
-#elif ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
+#elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (64 - TCR_TxSZ_MAX))
# define XLAT_TABLE_LEVEL_BASE 2
-# define NUM_BASE_LEVEL_ENTRIES (ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
+# define NUM_BASE_LEVEL_ENTRIES \
+ (PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
#else
-# error "ADDR_SPACE_SIZE is too small."
+# error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
#endif
@@ -119,6 +127,25 @@ static unsigned long long calc_physical_addr_size_bits(
return TCR_PS_BITS_4GB;
}
+#if DEBUG
+/* Physical Address ranges supported in the AArch64 Memory Model */
+static const unsigned int pa_range_bits_arr[] = {
+ PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
+ PARANGE_0101
+};
+
+static unsigned long long get_max_supported_pa(void)
+{
+ u_register_t pa_range = read_id_aa64mmfr0_el1() &
+ ID_AA64MMFR0_EL1_PARANGE_MASK;
+
+ /* All other values are reserved */
+ assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
+
+ return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
+}
+#endif
+
void init_xlat_tables(void)
{
unsigned long long max_pa;
@@ -126,8 +153,12 @@ void init_xlat_tables(void)
print_mmap();
init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
&max_va, &max_pa);
+
+ assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
+ assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
+ assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
+
tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
- assert(max_va < ADDR_SPACE_SIZE);
}
/*******************************************************************************
@@ -165,7 +196,7 @@ void init_xlat_tables(void)
/* Set T0SZ to (64 - width of virtual address space) */ \
tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | \
TCR_RGN_INNER_WBA | \
- (64 - __builtin_ctzl(ADDR_SPACE_SIZE)); \
+ (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE));\
tcr |= _tcr_extra; \
write_tcr_el##_el(tcr); \
\
diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c
index ebbc9161..81c4dc68 100644
--- a/lib/xlat_tables/xlat_tables_common.c
+++ b/lib/xlat_tables/xlat_tables_common.c
@@ -32,12 +32,14 @@
#include <arch_helpers.h>
#include <assert.h>
#include <cassert.h>
+#include <common_def.h>
#include <debug.h>
#include <platform_def.h>
#include <string.h>
#include <types.h>
#include <utils.h>
#include <xlat_tables.h>
+#include "xlat_tables_private.h"
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
#define LVL0_SPACER ""
@@ -102,6 +104,11 @@ void mmap_add_region(unsigned long long base_pa, uintptr_t base_va,
assert(base_pa < end_pa); /* Check for overflows */
assert(base_va < end_va);
+ assert((base_va + (uintptr_t)size - (uintptr_t)1) <=
+ (PLAT_VIRT_ADDR_SPACE_SIZE - 1));
+ assert((base_pa + (unsigned long long)size - 1ULL) <=
+ (PLAT_PHY_ADDR_SPACE_SIZE - 1));
+
#if DEBUG
/* Check for PAs and VAs overlaps with all other regions */
@@ -198,6 +205,9 @@ static uint64_t mmap_desc(unsigned attr, unsigned long long addr_pa,
uint64_t desc;
int mem_type;
+ /* Make sure that the granularity is fine enough to map this address. */
+ assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
+
desc = addr_pa;
/*
* There are different translation table descriptors for level 3 and the
@@ -343,7 +353,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
if (mm->base_va > base_va + level_size - 1) {
/* Next region is after this area. Nothing to map yet */
desc = INVALID_DESC;
- } else {
+ /* Make sure that the current level allows block descriptors */
+ } else if (level >= XLAT_BLOCK_LEVEL_MIN) {
/*
* Try to get attributes of this area. It will fail if
* there are partially overlapping regions. On success,
@@ -372,7 +383,8 @@ static mmap_region_t *init_xlation_table_inner(mmap_region_t *mm,
*table++ = desc;
base_va += level_size;
- } while ((base_va & level_index_mask) && (base_va - 1 < ADDR_SPACE_SIZE - 1));
+ } while ((base_va & level_index_mask) &&
+ (base_va - 1 < PLAT_VIRT_ADDR_SPACE_SIZE - 1));
return mm;
}
diff --git a/lib/xlat_tables/xlat_tables_private.h b/lib/xlat_tables/xlat_tables_private.h
index 159d071b..f0f656bd 100644
--- a/lib/xlat_tables/xlat_tables_private.h
+++ b/lib/xlat_tables/xlat_tables_private.h
@@ -32,10 +32,61 @@
#define __XLAT_TABLES_PRIVATE_H__
#include <cassert.h>
+#include <platform_def.h>
#include <utils.h>
-/* The virtual address space size must be a power of two. */
-CASSERT(IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size);
+/*
+ * If the platform hasn't defined a physical and a virtual address space size
+ * default to ADDR_SPACE_SIZE.
+ */
+#if ERROR_DEPRECATED
+# ifdef ADDR_SPACE_SIZE
+# error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
+# endif
+#elif defined(ADDR_SPACE_SIZE)
+# ifndef PLAT_PHY_ADDR_SPACE_SIZE
+# define PLAT_PHY_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
+# endif
+# ifndef PLAT_VIRT_ADDR_SPACE_SIZE
+# define PLAT_VIRT_ADDR_SPACE_SIZE ADDR_SPACE_SIZE
+# endif
+#endif
+
+/* The virtual and physical address space sizes must be powers of two. */
+CASSERT(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE),
+ assert_valid_virt_addr_space_size);
+CASSERT(IS_POWER_OF_TWO(PLAT_PHY_ADDR_SPACE_SIZE),
+ assert_valid_phy_addr_space_size);
+
+/*
+ * In AArch32 state, the MMU only supports 4KB page granularity, which means
+ * that the first translation table level is either 1 or 2. Both of them are
+ * allowed to have block and table descriptors. See section G4.5.6 of the
+ * ARMv8-A Architecture Reference Manual (DDI 0487A.k) for more information.
+ *
+ * In AArch64 state, the MMU may support 4 KB, 16 KB and 64 KB page
+ * granularity. For 4KB granularity, a level 0 table descriptor doesn't support
+ * block translation. For 16KB, the same thing happens to levels 0 and 1. For
+ * 64KB, same for level 1. See section D4.3.1 of the ARMv8-A Architecture
+ * Reference Manual (DDI 0487A.k) for more information.
+ *
+ * The define below specifies the first table level that allows block
+ * descriptors.
+ */
+
+#ifdef AARCH32
+
+# define XLAT_BLOCK_LEVEL_MIN 1
+
+#else /* if AArch64 */
+
+# if PAGE_SIZE == (4*1024) /* 4KB */
+# define XLAT_BLOCK_LEVEL_MIN 1
+# else /* 16KB or 64KB */
+# define XLAT_BLOCK_LEVEL_MIN 2
+# endif
+
+#endif /* AARCH32 */
void print_mmap(void);
void init_xlation_table(uintptr_t base_va, uint64_t *table,