summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/el3_runtime/aarch32/context_mgmt.c15
-rw-r--r--lib/el3_runtime/aarch64/context.S31
-rw-r--r--lib/el3_runtime/aarch64/context_mgmt.c43
-rw-r--r--lib/extensions/spe/spe.c85
-rw-r--r--lib/xlat_tables/aarch64/xlat_tables.c5
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c5
6 files changed, 127 insertions, 57 deletions
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c
index 3e7a5b73..a8672d6c 100644
--- a/lib/el3_runtime/aarch32/context_mgmt.c
+++ b/lib/el3_runtime/aarch32/context_mgmt.c
@@ -125,6 +125,17 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
}
/*******************************************************************************
+ * Enable architecture extensions on first entry to Non-secure world.
+ * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
+ * it is zero.
+ ******************************************************************************/
+static void enable_extensions_nonsecure(int el2_unused)
+{
+#if IMAGE_BL32
+#endif
+}
+
+/*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as
* specified by the entry_point_info structure.
@@ -161,6 +172,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{
uint32_t hsctlr, scr;
cpu_context_t *ctx = cm_get_context(security_state);
+ int el2_unused = 0;
assert(ctx);
@@ -185,6 +197,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
isb();
} else if (read_id_pfr1() &
(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
+ el2_unused = 1;
+
/*
* Set the NS bit to access NS copies of certain banked
* registers
@@ -283,5 +297,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_scr(read_scr() & ~SCR_NS_BIT);
isb();
}
+ enable_extensions_nonsecure(el2_unused);
}
}
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 143da956..620ec16f 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -9,7 +9,6 @@
#include <context.h>
.global el1_sysregs_context_save
- .global el1_sysregs_context_save_post_ops
.global el1_sysregs_context_restore
#if CTX_INCLUDE_FPREGS
.global fpregs_context_save
@@ -112,36 +111,6 @@ endfunc el1_sysregs_context_save
/* -----------------------------------------------------
* The following function strictly follows the AArch64
* PCS to use x9-x17 (temporary caller-saved registers)
- * to do post operations after saving the EL1 system
- * register context.
- * -----------------------------------------------------
- */
-func el1_sysregs_context_save_post_ops
-#if ENABLE_SPE_FOR_LOWER_ELS
- /* Detect if SPE is implemented */
- mrs x9, id_aa64dfr0_el1
- ubfx x9, x9, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH
- cmp x9, #0x1
- b.ne 1f
-
- /*
- * Before switching from normal world to secure world
- * the profiling buffers need to be drained out to memory. This is
- * required to avoid an invalid memory access when TTBR is switched
- * for entry to SEL1.
- */
- .arch armv8.2-a+profile
- psb csync
- dsb nsh
- .arch armv8-a
-1:
-#endif
- ret
-endfunc el1_sysregs_context_save_post_ops
-
-/* -----------------------------------------------------
- * The following function strictly follows the AArch64
- * PCS to use x9-x17 (temporary caller-saved registers)
* to restore EL1 system register context. It assumes
* that 'x0' is pointing to a 'el1_sys_regs' structure
* from where the register context will be restored
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index c8232df9..8f1523f0 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -15,6 +15,7 @@
#include <platform_def.h>
#include <pubsub_events.h>
#include <smcc_helpers.h>
+#include <spe.h>
#include <string.h>
#include <utils.h>
@@ -209,6 +210,20 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t
}
/*******************************************************************************
+ * Enable architecture extensions on first entry to Non-secure world.
+ * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
+ * it is zero.
+ ******************************************************************************/
+static void enable_extensions_nonsecure(int el2_unused)
+{
+#if IMAGE_BL31
+#if ENABLE_SPE_FOR_LOWER_ELS
+ spe_enable(el2_unused);
+#endif
+#endif
+}
+
+/*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as
* specified by the entry_point_info structure.
@@ -245,6 +260,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
{
uint32_t sctlr_elx, scr_el3, mdcr_el2;
cpu_context_t *ctx = cm_get_context(security_state);
+ int el2_unused = 0;
assert(ctx);
@@ -258,6 +274,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
sctlr_elx |= SCTLR_EL2_RES1;
write_sctlr_el2(sctlr_elx);
} else if (EL_IMPLEMENTED(2)) {
+ el2_unused = 1;
+
/*
* EL2 present but unused, need to disable safely.
* SCTLR_EL2 can be ignored in this case.
@@ -340,13 +358,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
* relying on hw. Some fields are architecturally
* UNKNOWN on reset.
*
- * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
- * profiling controls to EL2.
- *
- * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in non-secure
- * state. Accesses to profiling buffer controls at
- * non-secure EL1 are not trapped to EL2.
- *
* MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
* EL1 System register accesses to the Debug ROM
* registers are not trapped to EL2.
@@ -383,22 +394,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
| MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT
| MDCR_EL2_TPMCR_BIT));
-#if ENABLE_SPE_FOR_LOWER_ELS
- uint64_t id_aa64dfr0_el1;
-
- /* Detect if SPE is implemented */
- id_aa64dfr0_el1 = read_id_aa64dfr0_el1() >>
- ID_AA64DFR0_PMS_SHIFT;
- if ((id_aa64dfr0_el1 & ID_AA64DFR0_PMS_MASK) == 1) {
- /*
- * Make sure traps to EL2 are not generated if
- * EL2 is implemented but not used.
- */
- mdcr_el2 &= ~MDCR_EL2_TPMS;
- mdcr_el2 |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
- }
-#endif
-
write_mdcr_el2(mdcr_el2);
/*
@@ -420,6 +415,7 @@ void cm_prepare_el3_exit(uint32_t security_state)
write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
~(CNTHP_CTL_ENABLE_BIT));
}
+ enable_extensions_nonsecure(el2_unused);
}
cm_el1_sysregs_context_restore(security_state);
@@ -439,7 +435,6 @@ void cm_el1_sysregs_context_save(uint32_t security_state)
assert(ctx);
el1_sysregs_context_save(get_sysregs_ctx(ctx));
- el1_sysregs_context_save_post_ops();
#if IMAGE_BL31
if (security_state == SECURE)
diff --git a/lib/extensions/spe/spe.c b/lib/extensions/spe/spe.c
new file mode 100644
index 00000000..3b297f21
--- /dev/null
+++ b/lib/extensions/spe/spe.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <pubsub.h>
+
+/*
+ * The assembler does not yet understand the psb csync mnemonic
+ * so use the equivalent hint instruction.
+ */
+#define psb_csync() asm volatile("hint #17")
+
+void spe_enable(int el2_unused)
+{
+ uint64_t features;
+
+ features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
+ if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
+ uint64_t v;
+
+ if (el2_unused) {
+ /*
+ * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
+ * profiling controls to EL2.
+ *
+ * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
+ * state. Accesses to profiling buffer controls at
+ * Non-secure EL1 are not trapped to EL2.
+ */
+ v = read_mdcr_el2();
+ v &= ~MDCR_EL2_TPMS;
+ v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
+ write_mdcr_el2(v);
+ }
+
+ /*
+ * MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state
+ * and disabled in secure state. Accesses to SPE registers at
+ * S-EL1 generate trap exceptions to EL3.
+ */
+ v = read_mdcr_el3();
+ v |= MDCR_NSPB(MDCR_NSPB_EL1);
+ write_mdcr_el3(v);
+ }
+}
+
+void spe_disable(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
+ if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
+ uint64_t v;
+
+ /* Drain buffered data */
+ psb_csync();
+ dsbnsh();
+
+ /* Disable profiling buffer */
+ v = read_pmblimitr_el1();
+ v &= ~(1ULL << 0);
+ write_pmblimitr_el1(v);
+ isb();
+ }
+}
+
+static void *spe_drain_buffers_hook(const void *arg)
+{
+ uint64_t features;
+
+ features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT;
+ if ((features & ID_AA64DFR0_PMS_MASK) == 1) {
+ /* Drain buffered data */
+ psb_csync();
+ dsbnsh();
+ }
+
+ return 0;
+}
+
+SUBSCRIBE_TO_EVENT(cm_entering_secure_world, spe_drain_buffers_hook);
diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c
index 28ae1f73..eabc3df9 100644
--- a/lib/xlat_tables/aarch64/xlat_tables.c
+++ b/lib/xlat_tables/aarch64/xlat_tables.c
@@ -60,7 +60,10 @@ static unsigned long long calc_physical_addr_size_bits(
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
- PARANGE_0101
+ PARANGE_0101,
+#if ARM_ARCH_AT_LEAST(8, 2)
+ PARANGE_0110,
+#endif
};
static unsigned long long get_max_supported_pa(void)
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 43311079..aa5b9e54 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -48,7 +48,10 @@ unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
/* Physical Address ranges supported in the AArch64 Memory Model */
static const unsigned int pa_range_bits_arr[] = {
PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
- PARANGE_0101
+ PARANGE_0101,
+#if ARM_ARCH_AT_LEAST(8, 2)
+ PARANGE_0110,
+#endif
};
unsigned long long xlat_arch_get_max_supported_pa(void)