diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/el3_runtime/aarch32/context_mgmt.c | 15 | ||||
-rw-r--r-- | lib/el3_runtime/aarch64/context.S | 45 | ||||
-rw-r--r-- | lib/el3_runtime/aarch64/context_mgmt.c | 43 | ||||
-rw-r--r-- | lib/extensions/spe/spe.c | 85 | ||||
-rw-r--r-- | lib/psci/psci_common.c | 4 | ||||
-rw-r--r-- | lib/psci/psci_main.c | 17 | ||||
-rw-r--r-- | lib/psci/psci_suspend.c | 14 | ||||
-rw-r--r-- | lib/xlat_tables/aarch64/xlat_tables.c | 5 | ||||
-rw-r--r-- | lib/xlat_tables_v2/aarch64/xlat_tables_arch.c | 10 |
9 files changed, 161 insertions, 77 deletions
diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c index 3e7a5b73..a8672d6c 100644 --- a/lib/el3_runtime/aarch32/context_mgmt.c +++ b/lib/el3_runtime/aarch32/context_mgmt.c @@ -125,6 +125,17 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t } /******************************************************************************* + * Enable architecture extensions on first entry to Non-secure world. + * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise + * it is zero. + ******************************************************************************/ +static void enable_extensions_nonsecure(int el2_unused) +{ +#if IMAGE_BL32 +#endif +} + +/******************************************************************************* * The following function initializes the cpu_context for a CPU specified by * its `cpu_idx` for first use, and sets the initial entrypoint state as * specified by the entry_point_info structure. @@ -161,6 +172,7 @@ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t hsctlr, scr; cpu_context_t *ctx = cm_get_context(security_state); + int el2_unused = 0; assert(ctx); @@ -185,6 +197,8 @@ void cm_prepare_el3_exit(uint32_t security_state) isb(); } else if (read_id_pfr1() & (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { + el2_unused = 1; + /* * Set the NS bit to access NS copies of certain banked * registers @@ -283,5 +297,6 @@ void cm_prepare_el3_exit(uint32_t security_state) write_scr(read_scr() & ~SCR_NS_BIT); isb(); } + enable_extensions_nonsecure(el2_unused); } } diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index db16a9f0..620ec16f 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -9,7 +9,6 @@ #include <context.h> .global el1_sysregs_context_save - .global el1_sysregs_context_save_post_ops .global el1_sysregs_context_restore #if CTX_INCLUDE_FPREGS .global fpregs_context_save @@ -90,9 +89,6 @@ func el1_sysregs_context_save mrs x15, dacr32_el2 mrs x16, ifsr32_el2 stp x15, x16, [x0, #CTX_DACR32_EL2] - - mrs x17, fpexc32_el2 - str x17, [x0, #CTX_FP_FPEXC32_EL2] #endif /* Save NS timer registers if the build has instructed so */ @@ -115,36 +111,6 @@ endfunc el1_sysregs_context_save /* ----------------------------------------------------- * The following function strictly follows the AArch64 * PCS to use x9-x17 (temporary caller-saved registers) - * to do post operations after saving the EL1 system - * register context. - * ----------------------------------------------------- - */ -func el1_sysregs_context_save_post_ops -#if ENABLE_SPE_FOR_LOWER_ELS - /* Detect if SPE is implemented */ - mrs x9, id_aa64dfr0_el1 - ubfx x9, x9, #ID_AA64DFR0_PMS_SHIFT, #ID_AA64DFR0_PMS_LENGTH - cmp x9, #0x1 - b.ne 1f - - /* - * Before switching from normal world to secure world - * the profiling buffers need to be drained out to memory. This is - * required to avoid an invalid memory access when TTBR is switched - * for entry to SEL1. - */ - .arch armv8.2-a+profile - psb csync - dsb nsh - .arch armv8-a -1: -#endif - ret -endfunc el1_sysregs_context_save_post_ops - -/* ----------------------------------------------------- - * The following function strictly follows the AArch64 - * PCS to use x9-x17 (temporary caller-saved registers) * to restore EL1 system register context. It assumes * that 'x0' is pointing to a 'el1_sys_regs' structure * from where the register context will be restored @@ -212,9 +178,6 @@ func el1_sysregs_context_restore ldp x15, x16, [x0, #CTX_DACR32_EL2] msr dacr32_el2, x15 msr ifsr32_el2, x16 - - ldr x17, [x0, #CTX_FP_FPEXC32_EL2] - msr fpexc32_el2, x17 #endif /* Restore NS timer registers if the build has instructed so */ #if NS_TIMER_SWITCH @@ -275,6 +238,10 @@ func fpregs_context_save mrs x10, fpcr str x10, [x0, #CTX_FP_FPCR] +#if CTX_INCLUDE_AARCH32_REGS + mrs x11, fpexc32_el2 + str x11, [x0, #CTX_FP_FPEXC32_EL2] +#endif ret endfunc fpregs_context_save @@ -318,6 +285,10 @@ func fpregs_context_restore ldr x10, [x0, #CTX_FP_FPCR] msr fpcr, x10 +#if CTX_INCLUDE_AARCH32_REGS + ldr x11, [x0, #CTX_FP_FPEXC32_EL2] + msr fpexc32_el2, x11 +#endif /* * No explict ISB required here as ERET to * switch to secure EL1 or non-secure world diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c index c8232df9..8f1523f0 100644 --- a/lib/el3_runtime/aarch64/context_mgmt.c +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -15,6 +15,7 @@ #include <platform_def.h> #include <pubsub_events.h> #include <smcc_helpers.h> +#include <spe.h> #include <string.h> #include <utils.h> @@ -209,6 +210,20 @@ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t } /******************************************************************************* + * Enable architecture extensions on first entry to Non-secure world. + * When EL2 is implemented but unused `el2_unused` is non-zero, otherwise + * it is zero. + ******************************************************************************/ +static void enable_extensions_nonsecure(int el2_unused) +{ +#if IMAGE_BL31 +#if ENABLE_SPE_FOR_LOWER_ELS + spe_enable(el2_unused); +#endif +#endif +} + +/******************************************************************************* * The following function initializes the cpu_context for a CPU specified by * its `cpu_idx` for first use, and sets the initial entrypoint state as * specified by the entry_point_info structure. @@ -245,6 +260,7 @@ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t sctlr_elx, scr_el3, mdcr_el2; cpu_context_t *ctx = cm_get_context(security_state); + int el2_unused = 0; assert(ctx); @@ -258,6 +274,8 @@ void cm_prepare_el3_exit(uint32_t security_state) sctlr_elx |= SCTLR_EL2_RES1; write_sctlr_el2(sctlr_elx); } else if (EL_IMPLEMENTED(2)) { + el2_unused = 1; + /* * EL2 present but unused, need to disable safely. * SCTLR_EL2 can be ignored in this case. @@ -340,13 +358,6 @@ void cm_prepare_el3_exit(uint32_t security_state) * relying on hw. Some fields are architecturally * UNKNOWN on reset. * - * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical - * profiling controls to EL2. - * - * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in non-secure - * state. Accesses to profiling buffer controls at - * non-secure EL1 are not trapped to EL2. - * * MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and * EL1 System register accesses to the Debug ROM * registers are not trapped to EL2. @@ -383,22 +394,6 @@ void cm_prepare_el3_exit(uint32_t security_state) | MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT | MDCR_EL2_TPMCR_BIT)); -#if ENABLE_SPE_FOR_LOWER_ELS - uint64_t id_aa64dfr0_el1; - - /* Detect if SPE is implemented */ - id_aa64dfr0_el1 = read_id_aa64dfr0_el1() >> - ID_AA64DFR0_PMS_SHIFT; - if ((id_aa64dfr0_el1 & ID_AA64DFR0_PMS_MASK) == 1) { - /* - * Make sure traps to EL2 are not generated if - * EL2 is implemented but not used. - */ - mdcr_el2 &= ~MDCR_EL2_TPMS; - mdcr_el2 |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1); - } -#endif - write_mdcr_el2(mdcr_el2); /* @@ -420,6 +415,7 @@ void cm_prepare_el3_exit(uint32_t security_state) write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL & ~(CNTHP_CTL_ENABLE_BIT)); } + enable_extensions_nonsecure(el2_unused); } cm_el1_sysregs_context_restore(security_state); @@ -439,7 +435,6 @@ void cm_el1_sysregs_context_save(uint32_t security_state) assert(ctx); el1_sysregs_context_save(get_sysregs_ctx(ctx)); - el1_sysregs_context_save_post_ops(); #if IMAGE_BL31 if (security_state == SECURE) diff --git a/lib/extensions/spe/spe.c b/lib/extensions/spe/spe.c new file mode 100644 index 00000000..3b297f21 --- /dev/null +++ b/lib/extensions/spe/spe.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <pubsub.h> + +/* + * The assembler does not yet understand the psb csync mnemonic + * so use the equivalent hint instruction. + */ +#define psb_csync() asm volatile("hint #17") + +void spe_enable(int el2_unused) +{ + uint64_t features; + + features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT; + if ((features & ID_AA64DFR0_PMS_MASK) == 1) { + uint64_t v; + + if (el2_unused) { + /* + * MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical + * profiling controls to EL2. + * + * MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure + * state. Accesses to profiling buffer controls at + * Non-secure EL1 are not trapped to EL2. + */ + v = read_mdcr_el2(); + v &= ~MDCR_EL2_TPMS; + v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1); + write_mdcr_el2(v); + } + + /* + * MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state + * and disabled in secure state. Accesses to SPE registers at + * S-EL1 generate trap exceptions to EL3. + */ + v = read_mdcr_el3(); + v |= MDCR_NSPB(MDCR_NSPB_EL1); + write_mdcr_el3(v); + } +} + +void spe_disable(void) +{ + uint64_t features; + + features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT; + if ((features & ID_AA64DFR0_PMS_MASK) == 1) { + uint64_t v; + + /* Drain buffered data */ + psb_csync(); + dsbnsh(); + + /* Disable profiling buffer */ + v = read_pmblimitr_el1(); + v &= ~(1ULL << 0); + write_pmblimitr_el1(v); + isb(); + } +} + +static void *spe_drain_buffers_hook(const void *arg) +{ + uint64_t features; + + features = read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT; + if ((features & ID_AA64DFR0_PMS_MASK) == 1) { + /* Drain buffered data */ + psb_csync(); + dsbnsh(); + } + + return 0; +} + +SUBSCRIBE_TO_EVENT(cm_entering_secure_world, spe_drain_buffers_hook); diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c index 4502c24b..2220a745 100644 --- a/lib/psci/psci_common.c +++ b/lib/psci/psci_common.c @@ -767,12 +767,12 @@ void psci_warmboot_entrypoint(void) psci_acquire_pwr_domain_locks(end_pwrlvl, cpu_idx); + psci_get_target_local_pwr_states(end_pwrlvl, &state_info); + #if ENABLE_PSCI_STAT plat_psci_stat_accounting_stop(&state_info); #endif - psci_get_target_local_pwr_states(end_pwrlvl, &state_info); - /* * This CPU could be resuming from suspend or it could have just been * turned on. To distinguish between these 2 cases, we examine the diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c index 4105e63b..8e41cf02 100644 --- a/lib/psci/psci_main.c +++ b/lib/psci/psci_main.c @@ -220,6 +220,23 @@ int psci_affinity_info(u_register_t target_affinity, if (target_idx == -1) return PSCI_E_INVALID_PARAMS; + /* + * Generic management: + * Perform cache maintanence ahead of reading the target CPU state to + * ensure that the data is not stale. + * There is a theoretical edge case where the cache may contain stale + * data for the target CPU data - this can occur under the following + * conditions: + * - the target CPU is in another cluster from the current + * - the target CPU was the last CPU to shutdown on its cluster + * - the cluster was removed from coherency as part of the CPU shutdown + * + * In this case the cache maintenace that was performed as part of the + * target CPUs shutdown was not seen by the current CPU's cluster. And + * so the cache may contain stale data for the target CPU. + */ + flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); + return psci_get_aff_info_state_by_idx(target_idx); } diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c index 40ecdeea..d9490672 100644 --- a/lib/psci/psci_suspend.c +++ b/lib/psci/psci_suspend.c @@ -37,6 +37,11 @@ static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, */ psci_get_target_local_pwr_states(end_pwrlvl, &state_info); +#if ENABLE_PSCI_STAT + plat_psci_stat_accounting_stop(&state_info); + psci_stats_update_pwr_up(end_pwrlvl, &state_info); +#endif + /* * Plat. management: Allow the platform to do operations * on waking up from retention. @@ -236,10 +241,6 @@ exit: PMF_NO_CACHE_MAINT); #endif -#if ENABLE_PSCI_STAT - plat_psci_stat_accounting_start(state_info); -#endif - /* * We will reach here if only retention/standby states have been * requested at multiple power levels. This means that the cpu @@ -247,11 +248,6 @@ exit: */ wfi(); -#if ENABLE_PSCI_STAT - plat_psci_stat_accounting_stop(state_info); - psci_stats_update_pwr_up(end_pwrlvl, state_info); -#endif - #if ENABLE_RUNTIME_INSTRUMENTATION PMF_CAPTURE_TIMESTAMP(rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR, diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c index 28ae1f73..eabc3df9 100644 --- a/lib/xlat_tables/aarch64/xlat_tables.c +++ b/lib/xlat_tables/aarch64/xlat_tables.c @@ -60,7 +60,10 @@ static unsigned long long calc_physical_addr_size_bits( /* Physical Address ranges supported in the AArch64 Memory Model */ static const unsigned int pa_range_bits_arr[] = { PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, - PARANGE_0101 + PARANGE_0101, +#if ARM_ARCH_AT_LEAST(8, 2) + PARANGE_0110, +#endif }; static unsigned long long get_max_supported_pa(void) diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c index eda38d34..aa5b9e54 100644 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -16,8 +16,7 @@ #include <xlat_tables_v2.h> #include "../xlat_tables_private.h" -static unsigned long long calc_physical_addr_size_bits( - unsigned long long max_addr) +unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr) { /* Physical address can't exceed 48 bits */ assert((max_addr & ADDR_MASK_48_TO_63) == 0); @@ -49,7 +48,10 @@ static unsigned long long calc_physical_addr_size_bits( /* Physical Address ranges supported in the AArch64 Memory Model */ static const unsigned int pa_range_bits_arr[] = { PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100, - PARANGE_0101 + PARANGE_0101, +#if ARM_ARCH_AT_LEAST(8, 2) + PARANGE_0110, +#endif }; unsigned long long xlat_arch_get_max_supported_pa(void) @@ -252,7 +254,7 @@ void enable_mmu_arch(unsigned int flags, * It is safer to restrict the max physical address accessible by the * hardware as much as possible. */ - unsigned long long tcr_ps_bits = calc_physical_addr_size_bits(max_pa); + unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa); #if IMAGE_EL == 1 assert(IS_IN_EL(1)); |