summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch32/arm32_aeabi_divmod.c203
-rw-r--r--lib/aarch32/arm32_aeabi_divmod_a32.S30
-rw-r--r--lib/aarch64/misc_helpers.S30
-rw-r--r--lib/cpus/aarch32/cortex_a12.S75
-rw-r--r--lib/cpus/aarch32/cortex_a15.S81
-rw-r--r--lib/cpus/aarch32/cortex_a17.S75
-rw-r--r--lib/cpus/aarch32/cortex_a5.S75
-rw-r--r--lib/cpus/aarch32/cortex_a53.S4
-rw-r--r--lib/cpus/aarch32/cortex_a57.S4
-rw-r--r--lib/cpus/aarch32/cortex_a7.S75
-rw-r--r--lib/cpus/aarch32/cortex_a72.S4
-rw-r--r--lib/cpus/aarch32/cortex_a9.S75
-rw-r--r--lib/cpus/aarch64/cortex_a75.S29
-rw-r--r--lib/el3_runtime/aarch32/context_mgmt.c19
-rw-r--r--lib/el3_runtime/aarch64/context.S51
-rw-r--r--lib/el3_runtime/aarch64/context_mgmt.c101
-rw-r--r--lib/extensions/amu/aarch32/amu.c32
-rw-r--r--lib/extensions/amu/aarch64/amu.c40
-rw-r--r--lib/extensions/spe/spe.c85
-rw-r--r--lib/extensions/sve/sve.c126
-rw-r--r--lib/locks/exclusive/aarch32/spinlock.S12
-rw-r--r--lib/psci/psci_common.c4
-rw-r--r--lib/psci/psci_lib.mk1
-rw-r--r--lib/psci/psci_main.c33
-rw-r--r--lib/psci/psci_mem_protect.c38
-rw-r--r--lib/psci/psci_on.c3
-rw-r--r--lib/psci/psci_private.h9
-rw-r--r--lib/psci/psci_setup.c9
-rw-r--r--lib/psci/psci_suspend.c14
-rw-r--r--lib/psci/psci_system_off.c30
-rw-r--r--lib/utils/mem_region.c78
-rw-r--r--lib/xlat_tables/aarch32/xlat_tables.c4
-rw-r--r--lib/xlat_tables/aarch64/xlat_tables.c11
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c26
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h22
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c80
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h28
-rw-r--r--lib/xlat_tables_v2/xlat_tables.mk2
-rw-r--r--lib/xlat_tables_v2/xlat_tables_internal.c561
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h34
40 files changed, 2003 insertions, 210 deletions
diff --git a/lib/aarch32/arm32_aeabi_divmod.c b/lib/aarch32/arm32_aeabi_divmod.c
new file mode 100644
index 00000000..a8f2e742
--- /dev/null
+++ b/lib/aarch32/arm32_aeabi_divmod.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Form ABI specifications:
+ * int __aeabi_idiv(int numerator, int denominator);
+ * unsigned __aeabi_uidiv(unsigned numerator, unsigned denominator);
+ *
+ * typedef struct { int quot; int rem; } idiv_return;
+ * typedef struct { unsigned quot; unsigned rem; } uidiv_return;
+ *
+ * __value_in_regs idiv_return __aeabi_idivmod(int numerator,
+ * int *denominator);
+ * __value_in_regs uidiv_return __aeabi_uidivmod(unsigned *numerator,
+ * unsigned denominator);
+ */
+
+/* struct qr - stores qutient/remainder to handle divmod EABI interfaces. */
+struct qr {
+ unsigned int q; /* computed quotient */
+ unsigned int r; /* computed remainder */
+ unsigned int q_n; /* specficies if quotient shall be negative */
+ unsigned int r_n; /* specficies if remainder shall be negative */
+};
+
+static void uint_div_qr(unsigned int numerator, unsigned int denominator,
+ struct qr *qr);
+
+/* returns in R0 and R1 by tail calling an asm function */
+unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator);
+
+unsigned int __aeabi_uidiv(unsigned int numerator, unsigned int denominator);
+unsigned int __aeabi_uimod(unsigned int numerator, unsigned int denominator);
+
+/* returns in R0 and R1 by tail calling an asm function */
+signed int __aeabi_idivmod(signed int numerator, signed int denominator);
+
+signed int __aeabi_idiv(signed int numerator, signed int denominator);
+signed int __aeabi_imod(signed int numerator, signed int denominator);
+
+/*
+ * __ste_idivmod_ret_t __aeabi_idivmod(signed numerator, signed denominator)
+ * Numerator and Denominator are received in R0 and R1.
+ * Where __ste_idivmod_ret_t is returned in R0 and R1.
+ *
+ * __ste_uidivmod_ret_t __aeabi_uidivmod(unsigned numerator,
+ * unsigned denominator)
+ * Numerator and Denominator are received in R0 and R1.
+ * Where __ste_uidivmod_ret_t is returned in R0 and R1.
+ */
+#ifdef __GNUC__
+signed int ret_idivmod_values(signed int quotient, signed int remainder);
+unsigned int ret_uidivmod_values(unsigned int quotient, unsigned int remainder);
+#else
+#error "Compiler not supported"
+#endif
+
+static void division_qr(unsigned int n, unsigned int p, struct qr *qr)
+{
+ unsigned int i = 1, q = 0;
+
+ if (p == 0) {
+ qr->r = 0xFFFFFFFF; /* division by 0 */
+ return;
+ }
+
+ while ((p >> 31) == 0) {
+ i = i << 1; /* count the max division steps */
+ p = p << 1; /* increase p until it has maximum size*/
+ }
+
+ while (i > 0) {
+ q = q << 1; /* write bit in q at index (size-1) */
+ if (n >= p) {
+ n -= p;
+ q++;
+ }
+ p = p >> 1; /* decrease p */
+ i = i >> 1; /* decrease remaining size in q */
+ }
+ qr->r = n;
+ qr->q = q;
+}
+
+static void uint_div_qr(unsigned int numerator, unsigned int denominator,
+ struct qr *qr)
+{
+ division_qr(numerator, denominator, qr);
+
+ /* negate quotient and/or remainder according to requester */
+ if (qr->q_n)
+ qr->q = -qr->q;
+ if (qr->r_n)
+ qr->r = -qr->r;
+}
+
+unsigned int __aeabi_uidiv(unsigned int numerator, unsigned int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return qr.q;
+}
+
+unsigned int __aeabi_uimod(unsigned int numerator, unsigned int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return qr.r;
+}
+
+unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return ret_uidivmod_values(qr.q, qr.r);
+}
+
+signed int __aeabi_idiv(signed int numerator, signed int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ if (((numerator < 0) && (denominator > 0)) ||
+ ((numerator > 0) && (denominator < 0)))
+ qr.q_n = 1; /* quotient shall be negate */
+
+ if (numerator < 0) {
+ numerator = -numerator;
+ qr.r_n = 1; /* remainder shall be negate */
+ }
+
+ if (denominator < 0)
+ denominator = -denominator;
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return qr.q;
+}
+
+signed int __aeabi_imod(signed int numerator, signed int denominator)
+{
+ signed int s;
+ signed int i;
+ signed int j;
+ signed int h;
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ /* in case modulo of a power of 2 */
+ for (i = 0, j = 0, h = 0, s = denominator; (s != 0) || (h > 1); i++) {
+ if (s & 1) {
+ j = i;
+ h++;
+ }
+ s = s >> 1;
+ }
+ if (h == 1)
+ return numerator >> j;
+
+ if (((numerator < 0) && (denominator > 0)) ||
+ ((numerator > 0) && (denominator < 0)))
+ qr.q_n = 1; /* quotient shall be negate */
+
+ if (numerator < 0) {
+ numerator = -numerator;
+ qr.r_n = 1; /* remainder shall be negate */
+ }
+
+ if (denominator < 0)
+ denominator = -denominator;
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return qr.r;
+}
+
+signed int __aeabi_idivmod(signed int numerator, signed int denominator)
+{
+ struct qr qr = { .q_n = 0, .r_n = 0 };
+
+ if (((numerator < 0) && (denominator > 0)) ||
+ ((numerator > 0) && (denominator < 0)))
+ qr.q_n = 1; /* quotient shall be negate */
+
+ if (numerator < 0) {
+ numerator = -numerator;
+ qr.r_n = 1; /* remainder shall be negate */
+ }
+
+ if (denominator < 0)
+ denominator = -denominator;
+
+ uint_div_qr(numerator, denominator, &qr);
+
+ return ret_idivmod_values(qr.q, qr.r);
+}
diff --git a/lib/aarch32/arm32_aeabi_divmod_a32.S b/lib/aarch32/arm32_aeabi_divmod_a32.S
new file mode 100644
index 00000000..6915dcd8
--- /dev/null
+++ b/lib/aarch32/arm32_aeabi_divmod_a32.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+/*
+ * EABI wrappers from the udivmod and idivmod functions
+ */
+
+ .globl ret_uidivmod_values
+ .globl ret_idivmod_values
+
+/*
+ * signed ret_idivmod_values(signed quot, signed rem);
+ * return quotient and remaining the EABI way (regs r0,r1)
+ */
+func ret_idivmod_values
+ bx lr
+endfunc ret_idivmod_values
+
+/*
+ * unsigned ret_uidivmod_values(unsigned quot, unsigned rem);
+ * return quotient and remaining the EABI way (regs r0,r1)
+ */
+func ret_uidivmod_values
+ bx lr
+endfunc ret_uidivmod_values
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index 78153bfb..9dfe46a2 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -18,7 +18,9 @@
.globl zeromem16
.globl memcpy16
+ .globl disable_mmu_el1
.globl disable_mmu_el3
+ .globl disable_mmu_icache_el1
.globl disable_mmu_icache_el3
#if SUPPORT_VFP
@@ -451,11 +453,11 @@ endfunc memcpy16
func disable_mmu_el3
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
-do_disable_mmu:
+do_disable_mmu_el3:
mrs x0, sctlr_el3
bic x0, x0, x1
msr sctlr_el3, x0
- isb // ensure MMU is off
+ isb /* ensure MMU is off */
dsb sy
ret
endfunc disable_mmu_el3
@@ -463,10 +465,32 @@ endfunc disable_mmu_el3
func disable_mmu_icache_el3
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
- b do_disable_mmu
+ b do_disable_mmu_el3
endfunc disable_mmu_icache_el3
/* ---------------------------------------------------------------------------
+ * Disable the MMU at EL1
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu_el1
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu_el1:
+ mrs x0, sctlr_el1
+ bic x0, x0, x1
+ msr sctlr_el1, x0
+ isb /* ensure MMU is off */
+ dsb sy
+ ret
+endfunc disable_mmu_el1
+
+
+func disable_mmu_icache_el1
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu_el1
+endfunc disable_mmu_icache_el1
+
+/* ---------------------------------------------------------------------------
* Enable the use of VFP at EL3
* ---------------------------------------------------------------------------
*/
diff --git a/lib/cpus/aarch32/cortex_a12.S b/lib/cpus/aarch32/cortex_a12.S
new file mode 100644
index 00000000..73c97507
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a12.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a12.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a12_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A12_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a12_disable_smp
+
+func cortex_a12_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A12_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a12_enable_smp
+
+func cortex_a12_reset_func
+ b cortex_a12_enable_smp
+endfunc cortex_a12_reset_func
+
+func cortex_a12_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a12_disable_smp
+endfunc cortex_a12_core_pwr_dwn
+
+func cortex_a12_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a12_disable_smp
+endfunc cortex_a12_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a12, CORTEX_A12_MIDR, \
+ cortex_a12_reset_func, \
+ cortex_a12_core_pwr_dwn, \
+ cortex_a12_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a15.S b/lib/cpus/aarch32/cortex_a15.S
new file mode 100644
index 00000000..0d5a1165
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a15.S
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a15.h>
+#include <cpu_macros.S>
+
+/*
+ * Cortex-A15 support LPAE and Virtualization Extensions.
+ * Don't care if confiugration uses or not LPAE and VE.
+ * Therefore, where we don't check ARCH_IS_ARMV7_WITH_LPAE/VE
+ */
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a15_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A15_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a15_disable_smp
+
+func cortex_a15_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A15_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a15_enable_smp
+
+func cortex_a15_reset_func
+ b cortex_a15_enable_smp
+endfunc cortex_a15_reset_func
+
+func cortex_a15_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a15_disable_smp
+endfunc cortex_a15_core_pwr_dwn
+
+func cortex_a15_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a15_disable_smp
+endfunc cortex_a15_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a15, CORTEX_A15_MIDR, \
+ cortex_a15_reset_func, \
+ cortex_a15_core_pwr_dwn, \
+ cortex_a15_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a17.S b/lib/cpus/aarch32/cortex_a17.S
new file mode 100644
index 00000000..316d4f05
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a17.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a17.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a17_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A17_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a17_disable_smp
+
+func cortex_a17_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A17_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a17_enable_smp
+
+func cortex_a17_reset_func
+ b cortex_a17_enable_smp
+endfunc cortex_a17_reset_func
+
+func cortex_a17_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a17_disable_smp
+endfunc cortex_a17_core_pwr_dwn
+
+func cortex_a17_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a17_disable_smp
+endfunc cortex_a17_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a17, CORTEX_A17_MIDR, \
+ cortex_a17_reset_func, \
+ cortex_a17_core_pwr_dwn, \
+ cortex_a17_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a5.S b/lib/cpus/aarch32/cortex_a5.S
new file mode 100644
index 00000000..c07c13ea
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a5.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a5.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a5_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A5_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a5_disable_smp
+
+func cortex_a5_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A5_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a5_enable_smp
+
+func cortex_a5_reset_func
+ b cortex_a5_enable_smp
+endfunc cortex_a5_reset_func
+
+func cortex_a5_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a5_disable_smp
+endfunc cortex_a5_core_pwr_dwn
+
+func cortex_a5_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a5_disable_smp
+endfunc cortex_a5_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a5, CORTEX_A5_MIDR, \
+ cortex_a5_reset_func, \
+ cortex_a5_core_pwr_dwn, \
+ cortex_a5_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a53.S b/lib/cpus/aarch32/cortex_a53.S
index 74cedc35..1647e169 100644
--- a/lib/cpus/aarch32/cortex_a53.S
+++ b/lib/cpus/aarch32/cortex_a53.S
@@ -174,7 +174,7 @@ func cortex_a53_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
@@ -204,7 +204,7 @@ func cortex_a53_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S
index b5189e77..64a6d674 100644
--- a/lib/cpus/aarch32/cortex_a57.S
+++ b/lib/cpus/aarch32/cortex_a57.S
@@ -406,7 +406,7 @@ func cortex_a57_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
@@ -448,7 +448,7 @@ func cortex_a57_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
diff --git a/lib/cpus/aarch32/cortex_a7.S b/lib/cpus/aarch32/cortex_a7.S
new file mode 100644
index 00000000..0278d1fd
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a7.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a7.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a7_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A7_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a7_disable_smp
+
+func cortex_a7_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A7_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a7_enable_smp
+
+func cortex_a7_reset_func
+ b cortex_a7_enable_smp
+endfunc cortex_a7_reset_func
+
+func cortex_a7_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a7_disable_smp
+endfunc cortex_a7_core_pwr_dwn
+
+func cortex_a7_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a7_disable_smp
+endfunc cortex_a7_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a7, CORTEX_A7_MIDR, \
+ cortex_a7_reset_func, \
+ cortex_a7_core_pwr_dwn, \
+ cortex_a7_cluster_pwr_dwn
diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S
index 69cc2ea5..75505206 100644
--- a/lib/cpus/aarch32/cortex_a72.S
+++ b/lib/cpus/aarch32/cortex_a72.S
@@ -120,7 +120,7 @@ func cortex_a72_core_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
@@ -167,7 +167,7 @@ func cortex_a72_cluster_pwr_dwn
push {r12, lr}
/* Assert if cache is enabled */
-#if ASM_ASSERTION
+#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
diff --git a/lib/cpus/aarch32/cortex_a9.S b/lib/cpus/aarch32/cortex_a9.S
new file mode 100644
index 00000000..4f30f84a
--- /dev/null
+++ b/lib/cpus/aarch32/cortex_a9.S
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <cortex_a9.h>
+#include <cpu_macros.S>
+
+ .macro assert_cache_enabled
+#if ENABLE_ASSERTIONS
+ ldcopr r0, SCTLR
+ tst r0, #SCTLR_C_BIT
+ ASM_ASSERT(eq)
+#endif
+ .endm
+
+func cortex_a9_disable_smp
+ ldcopr r0, ACTLR
+ bic r0, #CORTEX_A9_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ dsb sy
+ bx lr
+endfunc cortex_a9_disable_smp
+
+func cortex_a9_enable_smp
+ ldcopr r0, ACTLR
+ orr r0, #CORTEX_A9_ACTLR_SMP_BIT
+ stcopr r0, ACTLR
+ isb
+ bx lr
+endfunc cortex_a9_enable_smp
+
+func cortex_a9_reset_func
+ b cortex_a9_enable_smp
+endfunc cortex_a9_reset_func
+
+func cortex_a9_core_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 cache */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a9_disable_smp
+endfunc cortex_a9_core_pwr_dwn
+
+func cortex_a9_cluster_pwr_dwn
+ push {r12, lr}
+
+ assert_cache_enabled
+
+ /* Flush L1 caches */
+ mov r0, #DC_OP_CISW
+ bl dcsw_op_level1
+
+ bl plat_disable_acp
+
+ /* Exit cluster coherency */
+ pop {r12, lr}
+ b cortex_a9_disable_smp
+endfunc cortex_a9_cluster_pwr_dwn
+
+declare_cpu_ops cortex_a9, CORTEX_A9_MIDR, \
+ cortex_a9_reset_func, \
+ cortex_a9_core_pwr_dwn, \
+ cortex_a9_cluster_pwr_dwn
diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S
index 1f4500cb..4cab9e4f 100644
--- a/lib/cpus/aarch64/cortex_a75.S
+++ b/lib/cpus/aarch64/cortex_a75.S
@@ -11,6 +11,33 @@
#include <plat_macros.S>
#include <cortex_a75.h>
+func cortex_a75_reset_func
+#if ENABLE_AMU
+ /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
+ mrs x0, actlr_el3
+ orr x0, x0, #CORTEX_A75_ACTLR_AMEN_BIT
+ msr actlr_el3, x0
+ isb
+
+ /* Make sure accesses from EL0/EL1 are not trapped to EL2 */
+ mrs x0, actlr_el2
+ orr x0, x0, #CORTEX_A75_ACTLR_AMEN_BIT
+ msr actlr_el2, x0
+ isb
+
+ /* Enable group0 counters */
+ mov x0, #CORTEX_A75_AMU_GROUP0_MASK
+ msr CPUAMCNTENSET_EL0, x0
+ isb
+
+ /* Enable group1 counters */
+ mov x0, #CORTEX_A75_AMU_GROUP1_MASK
+ msr CPUAMCNTENSET_EL0, x0
+ isb
+#endif
+ ret
+endfunc cortex_a75_reset_func
+
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
@@ -47,5 +74,5 @@ func cortex_a75_cpu_reg_dump
endfunc cortex_a75_cpu_reg_dump
declare_cpu_ops cortex_a75, CORTEX_A75_MIDR, \
- CPU_NO_RESET_FUNC, \
+ cortex_a75_reset_func, \
cortex_a75_core_pwr_dwn
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 3e7a5b73..76e440e3 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <amu.h>
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
@@ -125,6 +126,20 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
}
/*******************************************************************************
+ * Enable architecture extensions on first entry to Non-secure world.
+ * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
+ * it is zero.
+ ******************************************************************************/
+static void enable_extensions_nonsecure(int el2_unused)
+{
+#if IMAGE_BL32
+#if ENABLE_AMU
+ amu_enable(el2_unused);
+#endif
+#endif
+}
+
+/*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as
* specified by the entry_point_info structure.
@@ -161,6 +176,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{
uint32_t hsctlr, scr;
cpu_context_t *ctx = cm_get_context(security_state);
+ int el2_unused = 0;
assert(ctx);
@@ -185,6 +201,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
isb();
} else if (read_id_pfr1() &
(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
+ el2_unused = 1;
+
/*
* Set the NS bit to access NS copies of certain banked
* registers
@@ -283,5 +301,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_scr(read_scr() & ~SCR_NS_BIT);
isb();
}
+ enable_extensions_nonsecure(el2_unused);
}
}
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 8a6c11b7..620ec16f 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -9,7 +9,6 @@
#include <context.h>
.global el1_sysregs_context_save
- .global el1_sysregs_context_save_post_ops
.global el1_sysregs_context_restore
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
@@ -74,6 +73,9 @@ func el1_sysregs_context_save
mrs x9, vbar_el1
stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
+ mrs x10, pmcr_el0
+ str x10, [x0, #CTX_PMCR_EL0]
+
/* Save AArch32 system registers if the build has instructed so */
#if CTX_INCLUDE_AARCH32_REGS
mrs x11, spsr_abt
@@ -87,9 +89,6 @@ func el1_sysregs_context_save
mrs x15, dacr32_el2
mrs x16, ifsr32_el2
stp x15, x16, [x0, #CTX_DACR32_EL2]
-
- mrs x17, fpexc32_el2
- str x17, [x0, #CTX_FP_FPEXC32_EL2]
#endif
/* Save NS timer registers if the build has instructed so */
@@ -112,36 +111,6 @@ endfunc el1_sysregs_context_save
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
- * to do post operations after saving the EL1 system
- * register context.
- * -----------------------------------------------------
- */
-func el1_sysregs_context_save_post_ops
-#if ENABLE_SPE_FOR_LOWER_ELS
- /* Detect if SPE is implemented */
- mrs x9, id_aa64dfr0_el1
- ubfx x9, x9, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
- cmp x9, #0x1
- b.ne 1f
-
- /*
- * Before switching from normal world to secure world
- * the profiling buffers need to be drained out to memory. This is
- * required to avoid an invalid memory access when TTBR is switched
- * for entry to SEL1.
- */
- .arch armv8.2-a+profile
- psb csync
- dsb nsh
- .arch armv8-a
-1:
-#endif
- ret
-endfunc el1_sysregs_context_save_post_ops
-
-/* -----------------------------------------------------
- * The following function strictly follows the AArch64
- * PCS to use x9-x17 (temporary caller-saved registers)
* to restore EL1 system register context. It assumes
* that 'x0' is pointing to a 'el1_sys_regs' structure
* from where the register context will be restored
@@ -193,6 +162,9 @@ func el1_sysregs_context_restore
msr contextidr_el1, x17
msr vbar_el1, x9
+ ldr x10, [x0, #CTX_PMCR_EL0]
+ msr pmcr_el0, x10
+
/* Restore AArch32 system registers if the build has instructed so */
#if CTX_INCLUDE_AARCH32_REGS
ldp x11, x12, [x0, #CTX_SPSR_ABT]
@@ -206,9 +178,6 @@ func el1_sysregs_context_restore
ldp x15, x16, [x0, #CTX_DACR32_EL2]
msr dacr32_el2, x15
msr ifsr32_el2, x16
-
- ldr x17, [x0, #CTX_FP_FPEXC32_EL2]
- msr fpexc32_el2, x17
#endif
/* Restore NS timer registers if the build has instructed so */
#if NS_TIMER_SWITCH
@@ -269,6 +238,10 @@ func fpregs_context_save
mrs x10, fpcr
str x10, [x0, #CTX_FP_FPCR]
+#if CTX_INCLUDE_AARCH32_REGS
+ mrs x11, fpexc32_el2
+ str x11, [x0, #CTX_FP_FPEXC32_EL2]
+#endif
ret
endfunc fpregs_context_save
@@ -312,6 +285,10 @@ func fpregs_context_restore
ldr x10, [x0, #CTX_FP_FPCR]
msr fpcr, x10
+#if CTX_INCLUDE_AARCH32_REGS
+ ldr x11, [x0, #CTX_FP_FPEXC32_EL2]
+ msr fpexc32_el2, x11
+#endif
/*
* No explict ISB required here as ERET to
* switch to secure EL1 or non-secure world
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 3d26056a..c6c2249a 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -4,6 +4,7 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <amu.h>
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
@@ -13,8 +14,11 @@
#include <interrupt_mgmt.h>
#include <platform.h>
#include <platform_def.h>
+#include <pubsub_events.h>
#include <smcc_helpers.h>
+#include <spe.h>
#include <string.h>
+#include <sve.h>
#include <utils.h>
@@ -58,7 +62,7 @@ void cm_init(void)
static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
{
unsigned int security_state;
- uint32_t scr_el3;
+ uint32_t scr_el3, pmcr_el0;
el3_state_t *state;
gp_regs_t *gp_regs;
unsigned long sctlr_elx;
@@ -164,11 +168,35 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
/*
* Store the initialised SCTLR_EL1 value in the cpu_context - SCTLR_EL2
- * and other EL2 resgisters are set up by cm_preapre_ns_entry() as they
+ * and other EL2 registers are set up by cm_preapre_ns_entry() as they
* are not part of the stored cpu_context.
*/
write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
+ if (security_state == SECURE) {
+ /*
+ * Initialise PMCR_EL0 for secure context only, setting all
+ * fields rather than relying on hw. Some fields are
+ * architecturally UNKNOWN on reset.
+ *
+ * PMCR_EL0.LC: Set to one so that cycle counter overflow, that
+ * is recorded in PMOVSCLR_EL0[31], occurs on the increment
+ * that changes PMCCNTR_EL0[63] from 1 to 0.
+ *
+ * PMCR_EL0.DP: Set to one so that the cycle counter,
+ * PMCCNTR_EL0 does not count when event counting is prohibited.
+ *
+ * PMCR_EL0.X: Set to zero to disable export of events.
+ *
+ * PMCR_EL0.D: Set to zero so that, when enabled, PMCCNTR_EL0
+ * counts on every clock cycle.
+ */
+ pmcr_el0 = ((PMCR_EL0_RESET_VAL | PMCR_EL0_LC_BIT
+ | PMCR_EL0_DP_BIT)
+ & ~(PMCR_EL0_X_BIT | PMCR_EL0_D_BIT));
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_PMCR_EL0, pmcr_el0);
+ }
+
/* Populate EL3 state so that we've the right context before doing ERET */
state = get_el3state_ctx(ctx);
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
@@ -184,6 +212,28 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
}
/*******************************************************************************
+ * Enable architecture extensions on first entry to Non-secure world.
+ * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
+ * it is zero.
+ ******************************************************************************/
+static void enable_extensions_nonsecure(int el2_unused)
+{
+#if IMAGE_BL31
+#if ENABLE_SPE_FOR_LOWER_ELS
+ spe_enable(el2_unused);
+#endif
+
+#if ENABLE_AMU
+ amu_enable(el2_unused);
+#endif
+
+#if ENABLE_SVE_FOR_NS
+ sve_enable(el2_unused);
+#endif
+#endif
+}
+
+/*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as
* specified by the entry_point_info structure.
@@ -220,6 +270,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{
uint32_t sctlr_elx, scr_el3, mdcr_el2;
cpu_context_t *ctx = cm_get_context(security_state);
+ int el2_unused = 0;
assert(ctx);
@@ -233,6 +284,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
sctlr_elx |= SCTLR_EL2_RES1;
write_sctlr_el2(sctlr_elx);
} else if (EL_IMPLEMENTED(2)) {
+ el2_unused = 1;
+
/*
* EL2 present but unused, need to disable safely.
* SCTLR_EL2 can be ignored in this case.
@@ -315,13 +368,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
* relying on hw. Some fields are architecturally
* UNKNOWN on reset.
*
- * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
- * profiling controls to EL2.
- *
- * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in non-secure
- * state. Accesses to profiling buffer controls at
- * non-secure EL1 are not trapped to EL2.
- *
* MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
* EL1 System register accesses to the Debug ROM
* registers are not trapped to EL2.
@@ -358,22 +404,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
| MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT
| MDCR_EL2_TPMCR_BIT));
-#if ENABLE_SPE_FOR_LOWER_ELS
- uint64_t id_aa64dfr0_el1;
-
- /* Detect if SPE is implemented */
- id_aa64dfr0_el1 = read_id_aa64dfr0_el1() >>
- ID_AA64DFR0_PMS_SHIFT;
- if ((id_aa64dfr0_el1 & ID_AA64DFR0_PMS_MASK) == 1) {
- /*
- * Make sure traps to EL2 are not generated if
- * EL2 is implemented but not used.
- */
- mdcr_el2 &= ~MDCR_EL2_TPMS;
- mdcr_el2 |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
- }
-#endif
-
write_mdcr_el2(mdcr_el2);
/*
@@ -395,11 +425,11 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
~(CNTHP_CTL_ENABLE_BIT));
}
+ enable_extensions_nonsecure(el2_unused);
}
- el1_sysregs_context_restore(get_sysregs_ctx(ctx));
-
- cm_set_next_context(ctx);
+ cm_el1_sysregs_context_restore(security_state);
+ cm_set_next_eret_context(security_state);
}
/*******************************************************************************
@@ -415,7 +445,13 @@ void cm_el1_sysregs_context_save(uint32_t security_state)
assert(ctx);
el1_sysregs_context_save(get_sysregs_ctx(ctx));
- el1_sysregs_context_save_post_ops();
+
+#if IMAGE_BL31
+ if (security_state == SECURE)
+ PUBLISH_EVENT(cm_exited_secure_world);
+ else
+ PUBLISH_EVENT(cm_exited_normal_world);
+#endif
}
void cm_el1_sysregs_context_restore(uint32_t security_state)
@@ -426,6 +462,13 @@ void cm_el1_sysregs_context_restore(uint32_t security_state)
assert(ctx);
el1_sysregs_context_restore(get_sysregs_ctx(ctx));
+
+#if IMAGE_BL31
+ if (security_state == SECURE)
+ PUBLISH_EVENT(cm_entering_secure_world);
+ else
+ PUBLISH_EVENT(cm_entering_normal_world);
+#endif
}
/*******************************************************************************
diff --git a/lib/extensions/amu/aarch32/amu.c b/lib/extensions/amu/aarch32/amu.c
new file mode 100644
index 00000000..d450bd69
--- /dev/null
+++ b/lib/extensions/amu/aarch32/amu.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <amu.h>
+#include <arch.h>
+#include <arch_helpers.h>
+
+void amu_enable(int el2_unused)
+{
+ uint64_t features;
+
+ features = read_id_pfr0() >> ID_PFR0_AMU_SHIFT;
+ if ((features & ID_PFR0_AMU_MASK) == 1) {
+ if (el2_unused) {
+ uint64_t v;
+
+ /*
+ * Non-secure access from EL0 or EL1 to the Activity Monitor
+ * registers do not trap to EL2.
+ */
+ v = read_hcptr();
+ v &= ~TAM_BIT;
+ write_hcptr(v);
+ }
+
+ /* Enable group 0 counters */
+ write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
+ }
+}
diff --git a/lib/extensions/amu/aarch64/amu.c b/lib/extensions/amu/aarch64/amu.c
new file mode 100644
index 00000000..007b3494
--- /dev/null
+++ b/lib/extensions/amu/aarch64/amu.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <amu.h>
+#include <arch.h>
+#include <arch_helpers.h>
+
+void amu_enable(int el2_unused)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
+ if ((features & ID_AA64PFR0_AMU_MASK) == 1) {
+ uint64_t v;
+
+ if (el2_unused) {
+ /*
+ * CPTR_EL2.TAM: Set to zero so any accesses to
+ * the Activity Monitor registers do not trap to EL2.
+ */
+ v = read_cptr_el2();
+ v &= ~CPTR_EL2_TAM_BIT;
+ write_cptr_el2(v);
+ }
+
+ /*
+ * CPTR_EL3.TAM: Set to zero so that any accesses to
+ * the Activity Monitor registers do not trap to EL3.
+ */
+ v = read_cptr_el3();
+ v &= ~TAM_BIT;
+ write_cptr_el3(v);
+
+ /* Enable group 0 counters */
+ write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
+ }
+}
diff --git a/lib/extensions/spe/spe.c b/lib/extensions/spe/spe.c
new file mode 100644
index 00000000..3b297f21
--- /dev/null
+++ b/lib/extensions/spe/spe.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <pubsub.h>
+
+/*
+ * The assembler does not yet understand the psb csync mnemonic
+ * so use the equivalent hint instruction.
+ */
+#define psb_csync() asm volatile("hint #17")
+
+void spe_enable(int el2_unused)
+{
+ uint64_t features;
+
+ features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
+ if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
+ uint64_t v;
+
+ if (el2_unused) {
+ /*
+ * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
+ * profiling controls to EL2.
+ *
+ * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
+ * state. Accesses to profiling buffer controls at
+ * Non-secure EL1 are not trapped to EL2.
+ */
+ v = read_mdcr_el2();
+ v &= ~MDCR_EL2_TPMS;
+ v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
+ write_mdcr_el2(v);
+ }
+
+ /*
+ * MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state
+ * and disabled in secure state. Accesses to SPE registers at
+ * S-EL1 generate trap exceptions to EL3.
+ */
+ v = read_mdcr_el3();
+ v |= MDCR_NSPB(MDCR_NSPB_EL1);
+ write_mdcr_el3(v);
+ }
+}
+
+void spe_disable(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
+ if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
+ uint64_t v;
+
+ /* Drain buffered data */
+ psb_csync();
+ dsbnsh();
+
+ /* Disable profiling buffer */
+ v = read_pmblimitr_el1();
+ v &= ~(1ULL << 0);
+ write_pmblimitr_el1(v);
+ isb();
+ }
+}
+
+static void *spe_drain_buffers_hook(const void *arg)
+{
+ uint64_t features;
+
+ features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
+ if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
+ /* Drain buffered data */
+ psb_csync();
+ dsbnsh();
+ }
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(cm_entering_secure_world, spe_drain_buffers_hook);
diff --git a/lib/extensions/sve/sve.c b/lib/extensions/sve/sve.c
new file mode 100644
index 00000000..14e51bd8
--- /dev/null
+++ b/lib/extensions/sve/sve.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <pubsub.h>
+#include <sve.h>
+
+static void *disable_sve_hook(const void *arg)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
+ if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
+ uint64_t cptr;
+
+ /*
+ * Disable SVE, SIMD and FP access for the Secure world.
+ * As the SIMD/FP registers are part of the SVE Z-registers, any
+ * use of SIMD/FP functionality will corrupt the SVE registers.
+ * Therefore it is necessary to prevent use of SIMD/FP support
+ * in the Secure world as well as SVE functionality.
+ */
+ cptr = read_cptr_el3();
+ cptr = (cptr | TFP_BIT) & ~(CPTR_EZ_BIT);
+ write_cptr_el3(cptr);
+
+ /*
+ * No explicit ISB required here as ERET to switch to Secure
+ * world covers it
+ */
+ }
+ return 0;
+}
+
+static void *enable_sve_hook(const void *arg)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
+ if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
+ uint64_t cptr;
+
+ /*
+ * Enable SVE, SIMD and FP access for the Non-secure world.
+ */
+ cptr = read_cptr_el3();
+ cptr = (cptr | CPTR_EZ_BIT) & ~(TFP_BIT);
+ write_cptr_el3(cptr);
+
+ /*
+ * No explicit ISB required here as ERET to switch to Non-secure
+ * world covers it
+ */
+ }
+ return 0;
+}
+
+void sve_enable(int el2_unused)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT;
+ if ((features & ID_AA64PFR0_SVE_MASK) == 1) {
+ uint64_t cptr;
+#if CTX_INCLUDE_FPREGS
+ /*
+ * CTX_INCLUDE_FPREGS is not supported on SVE enabled systems.
+ */
+ assert(0);
+#endif
+ /*
+ * Update CPTR_EL3 to enable access to SVE functionality for the
+ * Non-secure world.
+ * NOTE - assumed that CPTR_EL3.TFP is set to allow access to
+ * the SIMD, floating-point and SVE support.
+ *
+ * CPTR_EL3.EZ: Set to 1 to enable access to SVE functionality
+ * in the Non-secure world.
+ */
+ cptr = read_cptr_el3();
+ cptr |= CPTR_EZ_BIT;
+ write_cptr_el3(cptr);
+
+ /*
+ * Need explicit ISB here to guarantee that update to ZCR_ELx
+ * and CPTR_EL2.TZ do not result in trap to EL3.
+ */
+ isb();
+
+ /*
+ * Ensure lower ELs have access to full vector length.
+ */
+ write_zcr_el3(ZCR_EL3_LEN_MASK);
+
+ if (el2_unused) {
+ /*
+ * Update CPTR_EL2 to enable access to SVE functionality
+ * for Non-secure world, EL2 and Non-secure EL1 and EL0.
+ * NOTE - assumed that CPTR_EL2.TFP is set to allow
+ * access to the SIMD, floating-point and SVE support.
+ *
+ * CPTR_EL2.TZ: Set to 0 to enable access to SVE support
+ * for EL2 and Non-secure EL1 and EL0.
+ */
+ cptr = read_cptr_el2();
+ cptr &= ~(CPTR_EL2_TZ_BIT);
+ write_cptr_el2(cptr);
+
+ /*
+ * Ensure lower ELs have access to full vector length.
+ */
+ write_zcr_el2(ZCR_EL2_LEN_MASK);
+ }
+ /*
+ * No explicit ISB required here as ERET to switch to
+ * Non-secure world covers it.
+ */
+ }
+}
+
+SUBSCRIBE_TO_EVENT(cm_exited_normal_world, disable_sve_hook);
+SUBSCRIBE_TO_EVENT(cm_entering_normal_world, enable_sve_hook);
diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S
index bc77bc9c..9492cc08 100644
--- a/lib/locks/exclusive/aarch32/spinlock.S
+++ b/lib/locks/exclusive/aarch32/spinlock.S
@@ -9,6 +9,17 @@
.globl spin_lock
.globl spin_unlock
+#if ARM_ARCH_AT_LEAST(8, 0)
+/*
+ * According to the ARMv8-A Architecture Reference Manual, "when the global
+ * monitor for a PE changes from Exclusive Access state to Open Access state,
+ * an event is generated.". This applies to both AArch32 and AArch64 modes of
+ * ARMv8-A. As a result, no explicit SEV with unlock is required.
+ */
+#define COND_SEV()
+#else
+#define COND_SEV() sev
+#endif
func spin_lock
mov r2, #1
@@ -27,5 +38,6 @@ endfunc spin_lock
func spin_unlock
mov r1, #0
stl r1, [r0]
+ COND_SEV()
bx lr
endfunc spin_unlock
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index 4502c24b..2220a745 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -767,12 +767,12 @@ void psci_warmboot_entrypoint(void)
psci_acquire_pwr_domain_locks(end_pwrlvl,
cpu_idx);
+ psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
+
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_stop(&state_info);
#endif
- psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
-
/*
* This CPU could be resuming from suspend or it could have just been
* turned on. To distinguish between these 2 cases, we examine the
diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk
index 29080dbb..1d4aac4a 100644
--- a/lib/psci/psci_lib.mk
+++ b/lib/psci/psci_lib.mk
@@ -17,6 +17,7 @@ PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
lib/psci/psci_main.c \
lib/psci/psci_setup.c \
lib/psci/psci_system_off.c \
+ lib/psci/psci_mem_protect.c \
lib/psci/${ARCH}/psci_helpers.S
ifeq (${ARCH}, aarch64)
diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c
index 257479aa..8e41cf02 100644
--- a/lib/psci/psci_main.c
+++ b/lib/psci/psci_main.c
@@ -220,6 +220,23 @@ int psci_affinity_info(u_register_t target_affinity,
if (target_idx == -1)
return PSCI_E_INVALID_PARAMS;
+ /*
+ * Generic management:
+ * Perform cache maintanence ahead of reading the target CPU state to
+ * ensure that the data is not stale.
+ * There is a theoretical edge case where the cache may contain stale
+ * data for the target CPU data - this can occur under the following
+ * conditions:
+ * - the target CPU is in another cluster from the current
+ * - the target CPU was the last CPU to shutdown on its cluster
+ * - the cluster was removed from coherency as part of the CPU shutdown
+ *
+ * In this case the cache maintenace that was performed as part of the
+ * target CPUs shutdown was not seen by the current CPU's cluster. And
+ * so the cache may contain stale data for the target CPU.
+ */
+ flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+
return psci_get_aff_info_state_by_idx(target_idx);
}
@@ -408,6 +425,15 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
case PSCI_STAT_COUNT_AARCH32:
return psci_stat_count(x1, x2);
#endif
+ case PSCI_MEM_PROTECT:
+ return psci_mem_protect(x1);
+
+ case PSCI_MEM_CHK_RANGE_AARCH32:
+ return psci_mem_chk_range(x1, x2);
+
+ case PSCI_SYSTEM_RESET2_AARCH32:
+ /* We should never return from psci_system_reset2() */
+ return psci_system_reset2(x1, x2);
default:
break;
@@ -445,6 +471,13 @@ u_register_t psci_smc_handler(uint32_t smc_fid,
return psci_stat_count(x1, x2);
#endif
+ case PSCI_MEM_CHK_RANGE_AARCH64:
+ return psci_mem_chk_range(x1, x2);
+
+ case PSCI_SYSTEM_RESET2_AARCH64:
+ /* We should never return from psci_system_reset2() */
+ return psci_system_reset2(x1, x2);
+
default:
break;
}
diff --git a/lib/psci/psci_mem_protect.c b/lib/psci/psci_mem_protect.c
new file mode 100644
index 00000000..fca84e90
--- /dev/null
+++ b/lib/psci/psci_mem_protect.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <utils.h>
+#include "psci_private.h"
+
+int psci_mem_protect(unsigned int enable)
+{
+ int val;
+
+ assert(psci_plat_pm_ops->read_mem_protect);
+ assert(psci_plat_pm_ops->write_mem_protect);
+
+ if (psci_plat_pm_ops->read_mem_protect(&val) < 0)
+ return PSCI_E_NOT_SUPPORTED;
+ if (psci_plat_pm_ops->write_mem_protect(enable) < 0)
+ return PSCI_E_NOT_SUPPORTED;
+
+ return val != 0;
+}
+
+int psci_mem_chk_range(uintptr_t base, u_register_t length)
+{
+ int ret;
+
+ assert(psci_plat_pm_ops->mem_protect_chk);
+
+ if (length == 0 || check_uptr_overflow(base, length-1))
+ return PSCI_E_DENIED;
+
+ ret = psci_plat_pm_ops->mem_protect_chk(base, length);
+ return (ret < 0) ? PSCI_E_DENIED : PSCI_E_SUCCESS;
+}
diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c
index d3d0e2ff..53b044ec 100644
--- a/lib/psci/psci_on.c
+++ b/lib/psci/psci_on.c
@@ -11,6 +11,7 @@
#include <context_mgmt.h>
#include <debug.h>
#include <platform.h>
+#include <pubsub_events.h>
#include <stddef.h>
#include "psci_private.h"
@@ -188,6 +189,8 @@ void psci_cpu_on_finish(unsigned int cpu_idx,
if (psci_spd_pm && psci_spd_pm->svc_on_finish)
psci_spd_pm->svc_on_finish(0);
+ PUBLISH_EVENT(psci_cpu_on_finish);
+
/* Populate the mpidr field within the cpu node array */
/* This needs to be done only once */
psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
index da6a20fa..504fb9e4 100644
--- a/lib/psci/psci_private.h
+++ b/lib/psci/psci_private.h
@@ -89,7 +89,9 @@
define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) | \
define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \
define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \
- define_psci_cap(PSCI_STAT_COUNT_AARCH64))
+ define_psci_cap(PSCI_STAT_COUNT_AARCH64) | \
+ define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) | \
+ define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
/*
* Helper macros to get/set the fields of PSCI per-cpu data.
@@ -258,6 +260,7 @@ void psci_do_pwrup_cache_maintenance(void);
/* Private exported functions from psci_system_off.c */
void __dead2 psci_system_off(void);
void __dead2 psci_system_reset(void);
+int psci_system_reset2(uint32_t reset_type, u_register_t cookie);
/* Private exported functions from psci_stat.c */
void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
@@ -269,4 +272,8 @@ u_register_t psci_stat_residency(u_register_t target_cpu,
u_register_t psci_stat_count(u_register_t target_cpu,
unsigned int power_state);
+/* Private exported functions from psci_mem_protect.c */
+int psci_mem_protect(unsigned int enable);
+int psci_mem_chk_range(uintptr_t base, u_register_t length);
+
#endif /* __PSCI_PRIVATE_H__ */
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
index f70e34da..c00bd94a 100644
--- a/lib/psci/psci_setup.c
+++ b/lib/psci/psci_setup.c
@@ -243,6 +243,13 @@ int psci_setup(const psci_lib_args_t *lib_args)
psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET);
if (psci_plat_pm_ops->get_node_hw_state)
psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
+ if (psci_plat_pm_ops->read_mem_protect &&
+ psci_plat_pm_ops->write_mem_protect)
+ psci_caps |= define_psci_cap(PSCI_MEM_PROTECT);
+ if (psci_plat_pm_ops->mem_protect_chk)
+ psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64);
+ if (psci_plat_pm_ops->system_reset2)
+ psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64);
#if ENABLE_PSCI_STAT
psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
@@ -259,8 +266,10 @@ int psci_setup(const psci_lib_args_t *lib_args)
******************************************************************************/
void psci_arch_setup(void)
{
+#if ARM_ARCH_MAJOR > 7 || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
/* Program the counter frequency */
write_cntfrq_el0(plat_get_syscnt_freq2());
+#endif
/* Initialize the cpu_ops pointer. */
init_cpu_ops();
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
index 40ecdeea..d9490672 100644
--- a/lib/psci/psci_suspend.c
+++ b/lib/psci/psci_suspend.c
@@ -37,6 +37,11 @@ static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
*/
psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
+#if ENABLE_PSCI_STAT
+ plat_psci_stat_accounting_stop(&state_info);
+ psci_stats_update_pwr_up(end_pwrlvl, &state_info);
+#endif
+
/*
* Plat. management: Allow the platform to do operations
* on waking up from retention.
@@ -236,10 +241,6 @@ exit:
PMF_NO_CACHE_MAINT);
#endif
-#if ENABLE_PSCI_STAT
- plat_psci_stat_accounting_start(state_info);
-#endif
-
/*
* We will reach here if only retention/standby states have been
* requested at multiple power levels. This means that the cpu
@@ -247,11 +248,6 @@ exit:
*/
wfi();
-#if ENABLE_PSCI_STAT
- plat_psci_stat_accounting_stop(state_info);
- psci_stats_update_pwr_up(end_pwrlvl, state_info);
-#endif
-
#if ENABLE_RUNTIME_INSTRUMENTATION
PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
RT_INSTR_EXIT_HW_LOW_PWR,
diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c
index ef5d3d1d..13e9f4aa 100644
--- a/lib/psci/psci_system_off.c
+++ b/lib/psci/psci_system_off.c
@@ -49,3 +49,33 @@ void __dead2 psci_system_reset(void)
/* This function does not return. We should never get here */
}
+
+int psci_system_reset2(uint32_t reset_type, u_register_t cookie)
+{
+ int is_vendor;
+
+ psci_print_power_domain_map();
+
+ assert(psci_plat_pm_ops->system_reset2);
+
+ is_vendor = (reset_type >> PSCI_RESET2_TYPE_VENDOR_SHIFT) & 1;
+ if (!is_vendor) {
+ /*
+ * Only WARM_RESET is allowed for architectural type resets.
+ */
+ if (reset_type != PSCI_RESET2_SYSTEM_WARM_RESET)
+ return PSCI_E_INVALID_PARAMS;
+ if (psci_plat_pm_ops->write_mem_protect &&
+ psci_plat_pm_ops->write_mem_protect(0) < 0) {
+ return PSCI_E_NOT_SUPPORTED;
+ }
+ }
+
+ /* Notify the Secure Payload Dispatcher */
+ if (psci_spd_pm && psci_spd_pm->svc_system_reset) {
+ psci_spd_pm->svc_system_reset();
+ }
+ console_flush();
+
+ return psci_plat_pm_ops->system_reset2(is_vendor, reset_type, cookie);
+}
diff --git a/lib/utils/mem_region.c b/lib/utils/mem_region.c
new file mode 100644
index 00000000..31c6231f
--- /dev/null
+++ b/lib/utils/mem_region.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <utils.h>
+
+/*
+ * All the regions defined in mem_region_t must have the following properties
+ *
+ * - Any contiguous regions must be merged into a single entry.
+ * - The number of bytes of each region must be greater than zero.
+ * - The calculation of the highest address within the region (base + nbytes-1)
+ * doesn't produce an overflow.
+ *
+ * These conditions must be fulfilled by the caller and they aren't checked
+ * at runtime.
+ */
+
+/*
+ * zero_normalmem all the regions defined in tbl.
+ * It assumes that MMU is enabled and the memory is Normal memory.
+ * tbl must be a valid pointer to a memory mem_region_t array,
+ * nregions is the size of the array.
+ */
+void clear_mem_regions(mem_region_t *tbl, size_t nregions)
+{
+ size_t i;
+
+ assert(tbl);
+ assert(nregions > 0);
+
+ for (i = 0; i < nregions; i++) {
+ assert(tbl->nbytes > 0);
+ assert(!check_uptr_overflow(tbl->base, tbl->nbytes-1));
+ zero_normalmem((void *) (tbl->base), tbl->nbytes);
+ tbl++;
+ }
+}
+
+/*
+ * This function checks that a region (addr + nbytes-1) of memory is totally
+ * covered by one of the regions defined in tbl.
+ * tbl must be a valid pointer to a memory mem_region_t array, nregions
+ * is the size of the array and the region described by addr and nbytes must
+ * not generate an overflow.
+ * Returns:
+ * -1 means that the region is not covered by any of the regions
+ * described in tbl.
+ * 0 the region (addr + nbytes-1) is covered by one of the regions described
+ * in tbl
+ */
+int mem_region_in_array_chk(mem_region_t *tbl, size_t nregions,
+ uintptr_t addr, size_t nbytes)
+{
+ uintptr_t region_start, region_end, start, end;
+ size_t i;
+
+ assert(tbl);
+ assert(nbytes > 0);
+ assert(!check_uptr_overflow(addr, nbytes-1));
+
+ region_start = addr;
+ region_end = addr + (nbytes - 1);
+ for (i = 0; i < nregions; i++) {
+ assert(tbl->nbytes > 0);
+ assert(!check_uptr_overflow(tbl->base, tbl->nbytes-1));
+ start = tbl->base;
+ end = start + (tbl->nbytes - 1);
+ if (region_start >= start && region_end <= end)
+ return 0;
+ tbl++;
+ }
+
+ return -1;
+}
diff --git a/lib/xlat_tables/aarch32/xlat_tables.c b/lib/xlat_tables/aarch32/xlat_tables.c
index c7e34f20..720d4461 100644
--- a/lib/xlat_tables/aarch32/xlat_tables.c
+++ b/lib/xlat_tables/aarch32/xlat_tables.c
@@ -13,6 +13,10 @@
#include <xlat_tables.h>
#include "../xlat_tables_private.h"
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#error ARMv7 target does not support LPAE MMU descriptors
+#endif
+
#define XLAT_TABLE_LEVEL_BASE \
GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index 2ddf8cba..eabc3df9 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -60,7 +60,10 @@ static unsigned long long calc_physical_addr_size_bits(
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
- PARANGE_0101
+ PARANGE_0101,
+#if ARM_ARCH_AT_LEAST(8, 2)
+ PARANGE_0110,
+#endif
};
static unsigned long long get_max_supported_pa(void)
@@ -182,7 +185,11 @@ void init_xlat_tables(void)
/* Define EL1 and EL3 variants of the function enabling the MMU */
DEFINE_ENABLE_MMU_EL(1,
- (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
+ /*
+ * TCR_EL1.EPD1: Disable translation table walk for addresses
+ * that are translated using TTBR1_EL1.
+ */
+ TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
tlbivmalle1)
DEFINE_ENABLE_MMU_EL(3,
TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index e66b9275..fc7ca46a 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -14,6 +14,10 @@
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
+#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
+#error ARMv7 target does not support LPAE MMU descriptors
+#endif
+
#if ENABLE_ASSERTIONS
unsigned long long xlat_arch_get_max_supported_pa(void)
{
@@ -22,13 +26,11 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
}
#endif /* ENABLE_ASSERTIONS*/
-int is_mmu_enabled(void)
+int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
{
return (read_sctlr() & SCTLR_M_BIT) != 0;
}
-#if PLAT_XLAT_TABLES_DYNAMIC
-
void xlat_arch_tlbi_va(uintptr_t va)
{
/*
@@ -40,6 +42,17 @@ void xlat_arch_tlbi_va(uintptr_t va)
tlbimvaais(TLBI_ADDR(va));
}
+void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime __unused)
+{
+ /*
+ * Ensure the translation table write has drained into memory before
+ * invalidating the TLB entry.
+ */
+ dsbishst();
+
+ tlbimvaais(TLBI_ADDR(va));
+}
+
void xlat_arch_tlbi_va_sync(void)
{
/* Invalidate all entries from branch predictors. */
@@ -66,8 +79,6 @@ void xlat_arch_tlbi_va_sync(void)
isb();
}
-#endif /* PLAT_XLAT_TABLES_DYNAMIC */
-
int xlat_arch_current_el(void)
{
/*
@@ -77,11 +88,6 @@ int xlat_arch_current_el(void)
return 3;
}
-uint64_t xlat_arch_get_xn_desc(int el __unused)
-{
- return UPPER_ATTRS(XN);
-}
-
/*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the page tables
* have already been created.
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
new file mode 100644
index 00000000..509395d8
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch_private.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
+#define __XLAT_TABLES_ARCH_PRIVATE_H__
+
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at the
+ * given translation regime.
+ */
+static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime __unused)
+{
+ return UPPER_ATTRS(XN);
+}
+
+#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 097e815c..aa5b9e54 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -10,21 +10,13 @@
#include <bl_common.h>
#include <cassert.h>
#include <common_def.h>
-#include <platform_def.h>
#include <sys/types.h>
#include <utils.h>
#include <utils_def.h>
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
-#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
-# define IMAGE_EL 3
-#else
-# define IMAGE_EL 1
-#endif
-
-static unsigned long long calc_physical_addr_size_bits(
- unsigned long long max_addr)
+unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
{
/* Physical address can't exceed 48 bits */
assert((max_addr & ADDR_MASK_48_TO_63) == 0);
@@ -56,7 +48,10 @@ static unsigned long long calc_physical_addr_size_bits(
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
- PARANGE_0101
+ PARANGE_0101,
+#if ARM_ARCH_AT_LEAST(8, 2)
+ PARANGE_0110,
+#endif
};
unsigned long long xlat_arch_get_max_supported_pa(void)
@@ -71,20 +66,31 @@ unsigned long long xlat_arch_get_max_supported_pa(void)
}
#endif /* ENABLE_ASSERTIONS*/
-int is_mmu_enabled(void)
+int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
+{
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1);
+ return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3);
+ return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
+ }
+}
+
+
+void xlat_arch_tlbi_va(uintptr_t va)
{
#if IMAGE_EL == 1
assert(IS_IN_EL(1));
- return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
+ xlat_arch_tlbi_va_regime(va, EL1_EL0_REGIME);
#elif IMAGE_EL == 3
assert(IS_IN_EL(3));
- return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
+ xlat_arch_tlbi_va_regime(va, EL3_REGIME);
#endif
}
-#if PLAT_XLAT_TABLES_DYNAMIC
-
-void xlat_arch_tlbi_va(uintptr_t va)
+void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime)
{
/*
* Ensure the translation table write has drained into memory before
@@ -92,13 +98,21 @@ void xlat_arch_tlbi_va(uintptr_t va)
*/
dsbishst();
-#if IMAGE_EL == 1
- assert(IS_IN_EL(1));
- tlbivaae1is(TLBI_ADDR(va));
-#elif IMAGE_EL == 3
- assert(IS_IN_EL(3));
- tlbivae3is(TLBI_ADDR(va));
-#endif
+ /*
+ * This function only supports invalidation of TLB entries for the EL3
+ * and EL1&0 translation regimes.
+ *
+ * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
+ * exception level (see section D4.9.2 of the ARM ARM rev B.a).
+ */
+ if (xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1);
+ tlbivaae1is(TLBI_ADDR(va));
+ } else {
+ assert(xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3);
+ tlbivae3is(TLBI_ADDR(va));
+ }
}
void xlat_arch_tlbi_va_sync(void)
@@ -124,8 +138,6 @@ void xlat_arch_tlbi_va_sync(void)
isb();
}
-#endif /* PLAT_XLAT_TABLES_DYNAMIC */
-
int xlat_arch_current_el(void)
{
int el = GET_EL(read_CurrentEl());
@@ -135,16 +147,6 @@ int xlat_arch_current_el(void)
return el;
}
-uint64_t xlat_arch_get_xn_desc(int el)
-{
- if (el == 3) {
- return UPPER_ATTRS(XN);
- } else {
- assert(el == 1);
- return UPPER_ATTRS(PXN);
- }
-}
-
/*******************************************************************************
* Macro generating the code for the function enabling the MMU in the given
* exception level, assuming that the pagetables have already been created.
@@ -252,11 +254,15 @@ void enable_mmu_arch(unsigned int flags,
* It is safer to restrict the max physical address accessible by the
* hardware as much as possible.
*/
- unsigned long long tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
+ unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
#if IMAGE_EL == 1
assert(IS_IN_EL(1));
- tcr |= tcr_ps_bits << TCR_EL1_IPS_SHIFT;
+ /*
+ * TCR_EL1.EPD1: Disable translation table walk for addresses that are
+ * translated using TTBR1_EL1.
+ */
+ tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
enable_mmu_internal_el1(flags, mair, tcr, ttbr);
#elif IMAGE_EL == 3
assert(IS_IN_EL(3));
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
new file mode 100644
index 00000000..d201590a
--- /dev/null
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch_private.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
+#define __XLAT_TABLES_ARCH_PRIVATE_H__
+
+#include <assert.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at all ELs
+ * that are part of the given translation regime.
+ */
+static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime)
+{
+ if (regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
+ } else {
+ assert(regime == EL3_REGIME);
+ return UPPER_ATTRS(XN);
+ }
+}
+
+#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
diff --git a/lib/xlat_tables_v2/xlat_tables.mk b/lib/xlat_tables_v2/xlat_tables.mk
index b94ce5d0..06dd844a 100644
--- a/lib/xlat_tables_v2/xlat_tables.mk
+++ b/lib/xlat_tables_v2/xlat_tables.mk
@@ -7,3 +7,5 @@
XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
${ARCH}/xlat_tables_arch.c \
xlat_tables_internal.c)
+
+INCLUDES += -Ilib/xlat_tables_v2/${ARCH}
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 47929906..0acfacbf 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -14,7 +14,7 @@
#include <string.h>
#include <types.h>
#include <utils.h>
-#include <xlat_tables_arch.h>
+#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
@@ -112,9 +112,11 @@ static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
-/* Returns a block/page table descriptor for the given level and attributes. */
-static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
- int level, uint64_t execute_never_mask)
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, mmap_attr_t attr,
+ unsigned long long addr_pa, int level)
{
uint64_t desc;
int mem_type;
@@ -133,9 +135,28 @@ static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
* Deduce other fields of the descriptor based on the MT_NS and MT_RW
* memory region attributes.
*/
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+
desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
- desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+ /*
+ * Do not allow unprivileged access when the mapping is for a privileged
+ * EL. For translation regimes that do not have mappings for access for
+ * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
+ */
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ if (attr & MT_USER) {
+ /* EL0 mapping requested, so we give User access */
+ desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
+ } else {
+ /* EL1 mapping requested, no User access granted */
+ desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
+ }
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
+ }
/*
* Deduce shareability domain and executability of the memory region
@@ -156,7 +177,7 @@ static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
* fetch, which could be an issue if this memory region
* corresponds to a read-sensitive peripheral.
*/
- desc |= execute_never_mask;
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
} else { /* Normal memory */
/*
@@ -171,10 +192,13 @@ static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
* translation table.
*
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
- * attribute to figure out the value of the XN bit.
+ * attribute to figure out the value of the XN bit. The actual
+ * XN bit(s) to set in the descriptor depends on the context's
+ * translation regime and the policy applied in
+ * xlat_arch_regime_get_xn_desc().
*/
if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
- desc |= execute_never_mask;
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
}
if (mem_type == MT_MEMORY) {
@@ -314,7 +338,7 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] = INVALID_DESC;
- xlat_arch_tlbi_va(table_idx_va);
+ xlat_arch_tlbi_va_regime(table_idx_va, ctx->xlat_regime);
} else if (action == ACTION_RECURSE_INTO_TABLE) {
@@ -330,7 +354,8 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
*/
if (xlat_table_is_empty(ctx, subtable)) {
table_base[table_idx] = INVALID_DESC;
- xlat_arch_tlbi_va(table_idx_va);
+ xlat_arch_tlbi_va_regime(table_idx_va,
+ ctx->xlat_regime);
}
} else {
@@ -417,7 +442,8 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
* descriptors. If not, create a table instead.
*/
if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
- (level < MIN_LVL_BLOCK_DESC))
+ (level < MIN_LVL_BLOCK_DESC) ||
+ (mm->granularity < XLAT_BLOCK_SIZE(level)))
return ACTION_CREATE_NEW_TABLE;
else
return ACTION_WRITE_BLOCK_ENTRY;
@@ -535,8 +561,7 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] =
- xlat_desc(mm->attr, table_idx_pa, level,
- ctx->execute_never_mask);
+ xlat_desc(ctx, mm->attr, table_idx_pa, level);
} else if (action == ACTION_CREATE_NEW_TABLE) {
@@ -590,9 +615,10 @@ void print_mmap(mmap_region_t *const mmap)
mmap_region_t *mm = mmap;
while (mm->size) {
- tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
+ tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x",
(void *)mm->base_va, mm->base_pa,
mm->size, mm->attr);
+ tf_printf(" granularity:0x%zx\n", mm->granularity);
++mm;
};
tf_printf("\n");
@@ -608,11 +634,13 @@ void print_mmap(mmap_region_t *const mmap)
* ENOMEM: There is not enough memory in the mmap array.
* EPERM: Region overlaps another one in an invalid way.
*/
-static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
- uintptr_t base_va, size_t size,
- mmap_attr_t attr)
+static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
- mmap_region_t *mm = ctx->mmap;
+ unsigned long long base_pa = mm->base_pa;
+ uintptr_t base_va = mm->base_va;
+ size_t size = mm->size;
+ size_t granularity = mm->granularity;
+
unsigned long long end_pa = base_pa + size - 1;
uintptr_t end_va = base_va + size - 1;
@@ -620,6 +648,12 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
!IS_PAGE_ALIGNED(size))
return -EINVAL;
+ if ((granularity != XLAT_BLOCK_SIZE(1)) &&
+ (granularity != XLAT_BLOCK_SIZE(2)) &&
+ (granularity != XLAT_BLOCK_SIZE(3))) {
+ return -EINVAL;
+ }
+
/* Check for overflows */
if ((base_pa > end_pa) || (base_va > end_va))
return -ERANGE;
@@ -630,22 +664,27 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
return -ERANGE;
- /* Check that there is space in the mmap array */
+ /* Check that there is space in the ctx->mmap array */
if (ctx->mmap[ctx->mmap_num - 1].size != 0)
return -ENOMEM;
/* Check for PAs and VAs overlaps with all other regions */
- for (mm = ctx->mmap; mm->size; ++mm) {
+ for (mmap_region_t *mm_cursor = ctx->mmap;
+ mm_cursor->size; ++mm_cursor) {
- uintptr_t mm_end_va = mm->base_va + mm->size - 1;
+ uintptr_t mm_cursor_end_va = mm_cursor->base_va
+ + mm_cursor->size - 1;
/*
* Check if one of the regions is completely inside the other
* one.
*/
int fully_overlapped_va =
- ((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
- ((mm->base_va >= base_va) && (mm_end_va <= end_va));
+ ((base_va >= mm_cursor->base_va) &&
+ (end_va <= mm_cursor_end_va)) ||
+
+ ((mm_cursor->base_va >= base_va) &&
+ (mm_cursor_end_va <= end_va));
/*
* Full VA overlaps are only allowed if both regions are
@@ -656,13 +695,16 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
if (fully_overlapped_va) {
#if PLAT_XLAT_TABLES_DYNAMIC
- if ((attr & MT_DYNAMIC) || (mm->attr & MT_DYNAMIC))
+ if ((mm->attr & MT_DYNAMIC) ||
+ (mm_cursor->attr & MT_DYNAMIC))
return -EPERM;
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
- if ((mm->base_va - mm->base_pa) != (base_va - base_pa))
+ if ((mm_cursor->base_va - mm_cursor->base_pa) !=
+ (base_va - base_pa))
return -EPERM;
- if ((base_va == mm->base_va) && (size == mm->size))
+ if ((base_va == mm_cursor->base_va) &&
+ (size == mm_cursor->size))
return -EPERM;
} else {
@@ -672,13 +714,15 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
* Partial overlaps are not allowed
*/
- unsigned long long mm_end_pa =
- mm->base_pa + mm->size - 1;
+ unsigned long long mm_cursor_end_pa =
+ mm_cursor->base_pa + mm_cursor->size - 1;
int separated_pa =
- (end_pa < mm->base_pa) || (base_pa > mm_end_pa);
+ (end_pa < mm_cursor->base_pa) ||
+ (base_pa > mm_cursor_end_pa);
int separated_va =
- (end_va < mm->base_va) || (base_va > mm_end_va);
+ (end_va < mm_cursor->base_va) ||
+ (base_va > mm_cursor_end_va);
if (!(separated_va && separated_pa))
return -EPERM;
@@ -703,8 +747,7 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
/* Static regions must be added before initializing the xlat tables. */
assert(!ctx->initialized);
- ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size,
- mm->attr);
+ ret = mmap_add_region_check(ctx, mm);
if (ret != 0) {
ERROR("mmap_add_region_check() failed. error %d\n", ret);
assert(0);
@@ -799,7 +842,10 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
if (!mm->size)
return 0;
- ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size, mm->attr | MT_DYNAMIC);
+ /* Now this region is a dynamic one */
+ mm->attr |= MT_DYNAMIC;
+
+ ret = mmap_add_region_check(ctx, mm);
if (ret != 0)
return ret;
@@ -808,14 +854,17 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
* static regions in mmap_add_region_ctx().
*/
- while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va && mm_cursor->size)
+ while ((mm_cursor->base_va + mm_cursor->size - 1)
+ < end_va && mm_cursor->size)
++mm_cursor;
- while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) && (mm_cursor->size < mm->size))
+ while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
+ && (mm_cursor->size < mm->size))
++mm_cursor;
/* Make room for new region by moving other regions up by one place */
- memmove(mm_cursor + 1, mm_cursor, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+ memmove(mm_cursor + 1, mm_cursor,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
* Check we haven't lost the empty sentinal from the end of the array.
@@ -825,19 +874,20 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
assert(mm_last->size == 0);
*mm_cursor = *mm;
- mm_cursor->attr |= MT_DYNAMIC;
/*
* Update the translation tables if the xlat tables are initialized. If
* not, this region will be mapped when they are initialized.
*/
if (ctx->initialized) {
- uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor, 0, ctx->base_table,
- ctx->base_table_entries, ctx->base_level);
+ uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor,
+ 0, ctx->base_table, ctx->base_table_entries,
+ ctx->base_level);
/* Failed to map, remove mmap entry, unmap and return error. */
if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
- memmove(mm_cursor, mm_cursor + 1, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
+ memmove(mm_cursor, mm_cursor + 1,
+ (uintptr_t)mm_last - (uintptr_t)mm_cursor);
/*
* Check if the mapping function actually managed to map
@@ -847,8 +897,8 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
return -ENOMEM;
/*
- * Something went wrong after mapping some table entries,
- * undo every change done up to this point.
+ * Something went wrong after mapping some table
+ * entries, undo every change done up to this point.
*/
mmap_region_t unmap_mm = {
.base_pa = 0,
@@ -972,9 +1022,10 @@ int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* Print the attributes of the specified block descriptor. */
-static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask)
+static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
{
int mem_type_index = ATTR_INDEX_GET(desc);
+ xlat_regime_t xlat_regime = ctx->xlat_regime;
if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
tf_printf("MEM");
@@ -985,9 +1036,49 @@ static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask)
tf_printf("DEV");
}
- tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
+ const char *priv_str = "(PRIV)";
+ const char *user_str = "(USER)";
+
+ /*
+ * Showing Privileged vs Unprivileged only makes sense for EL1&0
+ * mappings
+ */
+ const char *ro_str = "-RO";
+ const char *rw_str = "-RW";
+ const char *no_access_str = "-NOACCESS";
+
+ if (xlat_regime == EL3_REGIME) {
+ /* For EL3, the AP[2] bit is all what matters */
+ tf_printf((desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str);
+ } else {
+ const char *ap_str = (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str;
+ tf_printf(ap_str);
+ tf_printf(priv_str);
+ /*
+ * EL0 can only have the same permissions as EL1 or no
+ * permissions at all.
+ */
+ tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
+ ? ap_str : no_access_str);
+ tf_printf(user_str);
+ }
+
+ const char *xn_str = "-XN";
+ const char *exec_str = "-EXEC";
+
+ if (xlat_regime == EL3_REGIME) {
+ /* For EL3, the XN bit is all what matters */
+ tf_printf(LOWER_ATTRS(XN) & desc ? xn_str : exec_str);
+ } else {
+ /* For EL0 and EL1, we need to know who has which rights */
+ tf_printf(LOWER_ATTRS(PXN) & desc ? xn_str : exec_str);
+ tf_printf(priv_str);
+
+ tf_printf(LOWER_ATTRS(UXN) & desc ? xn_str : exec_str);
+ tf_printf(user_str);
+ }
+
tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
- tf_printf(execute_never_mask & desc ? "-XN" : "-EXEC");
}
static const char * const level_spacers[] = {
@@ -1004,9 +1095,10 @@ static const char *invalid_descriptors_ommited =
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
-static void xlat_tables_print_internal(const uintptr_t table_base_va,
+static void xlat_tables_print_internal(xlat_ctx_t *ctx,
+ const uintptr_t table_base_va,
uint64_t *const table_base, const int table_entries,
- const unsigned int level, const uint64_t execute_never_mask)
+ const unsigned int level)
{
assert(level <= XLAT_TABLE_LEVEL_MAX);
@@ -1065,17 +1157,16 @@ static void xlat_tables_print_internal(const uintptr_t table_base_va,
uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
- xlat_tables_print_internal(table_idx_va,
+ xlat_tables_print_internal(ctx, table_idx_va,
(uint64_t *)addr_inner,
- XLAT_TABLE_ENTRIES, level+1,
- execute_never_mask);
+ XLAT_TABLE_ENTRIES, level + 1);
} else {
tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
level_spacers[level],
(void *)table_idx_va,
(unsigned long long)(desc & TABLE_ADDR_MASK),
level_size);
- xlat_desc_print(desc, execute_never_mask);
+ xlat_desc_print(ctx, desc);
tf_printf("\n");
}
}
@@ -1095,7 +1186,15 @@ static void xlat_tables_print_internal(const uintptr_t table_base_va,
void xlat_tables_print(xlat_ctx_t *ctx)
{
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ const char *xlat_regime_str;
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ xlat_regime_str = "1&0";
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ xlat_regime_str = "3";
+ }
VERBOSE("Translation tables state:\n");
+ VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
@@ -1119,22 +1218,21 @@ void xlat_tables_print(xlat_ctx_t *ctx)
used_page_tables, ctx->tables_num,
ctx->tables_num - used_page_tables);
- xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
- ctx->base_level, ctx->execute_never_mask);
+ xlat_tables_print_internal(ctx, 0, ctx->base_table,
+ ctx->base_table_entries, ctx->base_level);
#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
}
void init_xlat_tables_ctx(xlat_ctx_t *ctx)
{
- mmap_region_t *mm = ctx->mmap;
-
- assert(!is_mmu_enabled());
+ assert(ctx != NULL);
assert(!ctx->initialized);
+ assert(ctx->xlat_regime == EL3_REGIME || ctx->xlat_regime == EL1_EL0_REGIME);
+ assert(!is_mmu_enabled_ctx(ctx));
- print_mmap(mm);
+ mmap_region_t *mm = ctx->mmap;
- ctx->execute_never_mask =
- xlat_arch_get_xn_desc(xlat_arch_current_el());
+ print_mmap(mm);
/* All tables must be zeroed before mapping any region. */
@@ -1217,3 +1315,348 @@ void enable_mmu_el3(unsigned int flags)
}
#endif /* AARCH32 */
+
+/*
+ * Do a translation table walk to find the block or page descriptor that maps
+ * virtual_addr.
+ *
+ * On success, return the address of the descriptor within the translation
+ * table. Its lookup level is stored in '*out_level'.
+ * On error, return NULL.
+ *
+ * xlat_table_base
+ * Base address for the initial lookup level.
+ * xlat_table_base_entries
+ * Number of entries in the translation table for the initial lookup level.
+ * virt_addr_space_size
+ * Size in bytes of the virtual address space.
+ */
+static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
+ void *xlat_table_base,
+ int xlat_table_base_entries,
+ unsigned long long virt_addr_space_size,
+ int *out_level)
+{
+ unsigned int start_level;
+ uint64_t *table;
+ int entries;
+
+ VERBOSE("%s(%p)\n", __func__, (void *)virtual_addr);
+
+ start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
+ VERBOSE("Starting translation table walk from level %i\n", start_level);
+
+ table = xlat_table_base;
+ entries = xlat_table_base_entries;
+
+ for (unsigned int level = start_level;
+ level <= XLAT_TABLE_LEVEL_MAX;
+ ++level) {
+ int idx;
+ uint64_t desc;
+ uint64_t desc_type;
+
+ VERBOSE("Table address: %p\n", (void *)table);
+
+ idx = XLAT_TABLE_IDX(virtual_addr, level);
+ VERBOSE("Index into level %i table: %i\n", level, idx);
+ if (idx >= entries) {
+ VERBOSE("Invalid address\n");
+ return NULL;
+ }
+
+ desc = table[idx];
+ desc_type = desc & DESC_MASK;
+ VERBOSE("Descriptor at level %i: 0x%llx\n", level,
+ (unsigned long long)desc);
+
+ if (desc_type == INVALID_DESC) {
+ VERBOSE("Invalid entry (memory not mapped)\n");
+ return NULL;
+ }
+
+ if (level == XLAT_TABLE_LEVEL_MAX) {
+ /*
+ * There can't be table entries at the final lookup
+ * level.
+ */
+ assert(desc_type == PAGE_DESC);
+ VERBOSE("Descriptor mapping a memory page (size: 0x%llx)\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(XLAT_TABLE_LEVEL_MAX));
+ *out_level = level;
+ return &table[idx];
+ }
+
+ if (desc_type == BLOCK_DESC) {
+ VERBOSE("Descriptor mapping a memory block (size: 0x%llx)\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(level));
+ *out_level = level;
+ return &table[idx];
+ }
+
+ assert(desc_type == TABLE_DESC);
+ VERBOSE("Table descriptor, continuing xlat table walk...\n");
+ table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ entries = XLAT_TABLE_ENTRIES;
+ }
+
+ /*
+ * This shouldn't be reached, the translation table walk should end at
+ * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
+ */
+ assert(0);
+
+ return NULL;
+}
+
+
+static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
+ mmap_attr_t *attributes, uint64_t **table_entry,
+ unsigned long long *addr_pa, int *table_level)
+{
+ uint64_t *entry;
+ uint64_t desc;
+ int level;
+ unsigned long long virt_addr_space_size;
+
+ /*
+ * Sanity-check arguments.
+ */
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+ assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
+
+ virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
+ assert(virt_addr_space_size > 0);
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address %p is not mapped.\n", (void *)base_va);
+ return -EINVAL;
+ }
+
+ if (addr_pa != NULL) {
+ *addr_pa = *entry & TABLE_ADDR_MASK;
+ }
+
+ if (table_entry != NULL) {
+ *table_entry = entry;
+ }
+
+ if (table_level != NULL) {
+ *table_level = level;
+ }
+
+ desc = *entry;
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ VERBOSE("Attributes: ");
+ xlat_desc_print(ctx, desc);
+ tf_printf("\n");
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+ assert(attributes != NULL);
+ *attributes = 0;
+
+ int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+
+ if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ *attributes |= MT_MEMORY;
+ } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
+ *attributes |= MT_NON_CACHEABLE;
+ } else {
+ assert(attr_index == ATTR_DEVICE_INDEX);
+ *attributes |= MT_DEVICE;
+ }
+
+ int ap2_bit = (desc >> AP2_SHIFT) & 1;
+
+ if (ap2_bit == AP2_RW)
+ *attributes |= MT_RW;
+
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ int ap1_bit = (desc >> AP1_SHIFT) & 1;
+ if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
+ *attributes |= MT_USER;
+ }
+
+ int ns_bit = (desc >> NS_SHIFT) & 1;
+
+ if (ns_bit == 1)
+ *attributes |= MT_NS;
+
+ uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+ if ((desc & xn_mask) == xn_mask) {
+ *attributes |= MT_EXECUTE_NEVER;
+ } else {
+ assert((desc & xn_mask) == 0);
+ }
+
+ return 0;
+}
+
+
+int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
+ mmap_attr_t *attributes)
+{
+ return get_mem_attributes_internal(ctx, base_va, attributes,
+ NULL, NULL, NULL);
+}
+
+
+int change_mem_attributes(xlat_ctx_t *ctx,
+ uintptr_t base_va,
+ size_t size,
+ mmap_attr_t attr)
+{
+ /* Note: This implementation isn't optimized. */
+
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+
+ unsigned long long virt_addr_space_size =
+ (unsigned long long)ctx->va_max_address + 1;
+ assert(virt_addr_space_size > 0);
+
+ if (!IS_PAGE_ALIGNED(base_va)) {
+ WARN("%s: Address %p is not aligned on a page boundary.\n",
+ __func__, (void *)base_va);
+ return -EINVAL;
+ }
+
+ if (size == 0) {
+ WARN("%s: Size is 0.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((size % PAGE_SIZE) != 0) {
+ WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
+ WARN("%s() doesn't allow to remap memory as read-write and executable.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ int pages_count = size / PAGE_SIZE;
+
+ VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
+ pages_count, (void *)base_va);
+
+ uintptr_t base_va_original = base_va;
+
+ /*
+ * Sanity checks.
+ */
+ for (int i = 0; i < pages_count; ++i) {
+ uint64_t *entry;
+ uint64_t desc;
+ int level;
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address %p is not mapped.\n", (void *)base_va);
+ return -EINVAL;
+ }
+
+ desc = *entry;
+
+ /*
+ * Check that all the required pages are mapped at page
+ * granularity.
+ */
+ if (((desc & DESC_MASK) != PAGE_DESC) ||
+ (level != XLAT_TABLE_LEVEL_MAX)) {
+ WARN("Address %p is not mapped at the right granularity.\n",
+ (void *)base_va);
+ WARN("Granularity is 0x%llx, should be 0x%x.\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * If the region type is device, it shouldn't be executable.
+ */
+ int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ if (attr_index == ATTR_DEVICE_INDEX) {
+ if ((attr & MT_EXECUTE_NEVER) == 0) {
+ WARN("Setting device memory as executable at address %p.",
+ (void *)base_va);
+ return -EINVAL;
+ }
+ }
+
+ base_va += PAGE_SIZE;
+ }
+
+ /* Restore original value. */
+ base_va = base_va_original;
+
+ VERBOSE("%s: All pages are mapped, now changing their attributes...\n",
+ __func__);
+
+ for (int i = 0; i < pages_count; ++i) {
+
+ mmap_attr_t old_attr, new_attr;
+ uint64_t *entry;
+ int level;
+ unsigned long long addr_pa;
+
+ get_mem_attributes_internal(ctx, base_va, &old_attr,
+ &entry, &addr_pa, &level);
+
+ VERBOSE("Old attributes: 0x%x\n", old_attr);
+
+ /*
+ * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
+ * MT_USER/MT_PRIVILEGED are taken into account. Any other
+ * information is ignored.
+ */
+
+ /* Clean the old attributes so that they can be rebuilt. */
+ new_attr = old_attr & ~(MT_RW|MT_EXECUTE_NEVER|MT_USER);
+
+ /*
+ * Update attributes, but filter out the ones this function
+ * isn't allowed to change.
+ */
+ new_attr |= attr & (MT_RW|MT_EXECUTE_NEVER|MT_USER);
+
+ VERBOSE("New attributes: 0x%x\n", new_attr);
+
+ /*
+ * The break-before-make sequence requires writing an invalid
+ * descriptor and making sure that the system sees the change
+ * before writing the new descriptor.
+ */
+ *entry = INVALID_DESC;
+
+ /* Invalidate any cached copy of this mapping in the TLBs. */
+ xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
+
+ /* Ensure completion of the invalidation. */
+ xlat_arch_tlbi_va_sync();
+
+ /* Write new descriptor */
+ *entry = xlat_desc(ctx, new_attr, addr_pa, level);
+
+ base_va += PAGE_SIZE;
+ }
+
+ /* Ensure that the last descriptor writen is seen by the system. */
+ dsbish();
+
+ return 0;
+}
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index d352583c..79efbebb 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -34,12 +34,24 @@ typedef enum {
MT_DYNAMIC = 1 << MT_DYN_SHIFT
} mmap_priv_attr_t;
+#endif /* PLAT_XLAT_TABLES_DYNAMIC */
+
/*
- * Function used to invalidate all levels of the translation walk for a given
- * virtual address. It must be called for every translation table entry that is
- * modified.
+ * Invalidate all TLB entries that match the given virtual address. This
+ * operation applies to all PEs in the same Inner Shareable domain as the PE
+ * that executes this function. This functions must be called for every
+ * translation table entry that is modified.
+ *
+ * xlat_arch_tlbi_va() applies the invalidation to the exception level of the
+ * current translation regime, whereas xlat_arch_tlbi_va_regime() applies it to
+ * the given translation regime.
+ *
+ * Note, however, that it is architecturally UNDEFINED to invalidate TLB entries
+ * pertaining to a higher exception level, e.g. invalidating EL3 entries from
+ * S-EL1.
*/
void xlat_arch_tlbi_va(uintptr_t va);
+void xlat_arch_tlbi_va_regime(uintptr_t va, xlat_regime_t xlat_regime);
/*
* This function has to be called at the end of any code that uses the function
@@ -47,8 +59,6 @@ void xlat_arch_tlbi_va(uintptr_t va);
*/
void xlat_arch_tlbi_va_sync(void);
-#endif /* PLAT_XLAT_TABLES_DYNAMIC */
-
/* Print VA, PA, size and attributes of all regions in the mmap array. */
void print_mmap(mmap_region_t *const mmap);
@@ -66,13 +76,6 @@ void xlat_tables_print(xlat_ctx_t *ctx);
int xlat_arch_current_el(void);
/*
- * Returns the bit mask that has to be ORed to the rest of a translation table
- * descriptor so that execution of code is prohibited at the given Exception
- * Level.
- */
-uint64_t xlat_arch_get_xn_desc(int el);
-
-/*
* Return the maximum physical address supported by the hardware.
* This value depends on the execution state (AArch32/AArch64).
*/
@@ -82,7 +85,10 @@ unsigned long long xlat_arch_get_max_supported_pa(void);
void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
unsigned long long pa, uintptr_t max_va);
-/* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */
-int is_mmu_enabled(void);
+/*
+ * Return 1 if the MMU of the translation regime managed by the given xlat_ctx_t
+ * is enabled, 0 otherwise.
+ */
+int is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
#endif /* __XLAT_TABLES_PRIVATE_H__ */