From 770f853dcb47ad856c060ffc6fefd626ae40e52c Mon Sep 17 00:00:00 2001 From: Dimitris Papastamos Date: Tue, 29 May 2018 09:28:05 +0100 Subject: Merge pull request #1392 from dp-arm/dp/cve_2018_3639 Implement workaround for CVE-2018-3639 on Cortex A57/A72/A73 and A75 Conflicts: services/arm_arch_svc/arm_arch_svc_setup.c --- lib/cpus/aarch32/cortex_a57.S | 18 ++ lib/cpus/aarch32/cortex_a72.S | 19 ++ lib/cpus/aarch64/cortex_a57.S | 23 +- lib/cpus/aarch64/cortex_a72.S | 23 +- lib/cpus/aarch64/cortex_a73.S | 22 +- lib/cpus/aarch64/cortex_a75.S | 22 +- lib/cpus/aarch64/cpu_helpers.S | 32 +- lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S | 357 +++++++++++++++++++++ lib/cpus/aarch64/wa_cve_2017_5715_mmu.S | 148 +++++++++ lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S | 357 --------------------- lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S | 148 --------- lib/cpus/cpu-ops.mk | 15 + lib/el3_runtime/aarch64/context.S | 9 + 13 files changed, 676 insertions(+), 517 deletions(-) create mode 100644 lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S create mode 100644 lib/cpus/aarch64/wa_cve_2017_5715_mmu.S delete mode 100644 lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S delete mode 100644 lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S (limited to 'lib') diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S index f446bfff..dff86be7 100644 --- a/lib/cpus/aarch32/cortex_a57.S +++ b/lib/cpus/aarch32/cortex_a57.S @@ -337,6 +337,15 @@ func check_errata_cve_2017_5715 bx lr endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov r0, #ERRATA_APPLIES +#else + mov r0, #ERRATA_MISSING +#endif + bx lr +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A57. * Shall clobber: r0-r6 @@ -392,6 +401,14 @@ func cortex_a57_reset_func bl errata_a57_859972_wa #endif +#if WORKAROUND_CVE_2018_3639 + ldcopr16 r0, r1, CORTEX_A57_CPUACTLR + orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE + stcopr16 r0, r1, CORTEX_A57_CPUACTLR + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -525,6 +542,7 @@ func cortex_a57_errata_report report_errata ERRATA_A57_833471, cortex_a57, 833471 report_errata ERRATA_A57_859972, cortex_a57, 859972 report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639 pop {r12, lr} bx lr diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S index 56e91f5c..3bc3388b 100644 --- a/lib/cpus/aarch32/cortex_a72.S +++ b/lib/cpus/aarch32/cortex_a72.S @@ -92,6 +92,15 @@ func check_errata_cve_2017_5715 bx lr endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov r0, #ERRATA_APPLIES +#else + mov r0, #ERRATA_MISSING +#endif + bx lr +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A72. * ------------------------------------------------- @@ -105,6 +114,15 @@ func cortex_a72_reset_func mov r0, r4 bl errata_a72_859971_wa #endif + +#if WORKAROUND_CVE_2018_3639 + ldcopr16 r0, r1, CORTEX_A72_CPUACTLR + orr64_imm r0, r1, CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE + stcopr16 r0, r1, CORTEX_A72_CPUACTLR + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -241,6 +259,7 @@ func cortex_a72_errata_report */ report_errata ERRATA_A72_859971, cortex_a72, 859971 report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639 pop {r12, lr} bx lr diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index 4d072e11..07fadd15 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -337,6 +337,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A57. * Shall clobber: x0-x19 @@ -393,10 +402,18 @@ func cortex_a57_reset_func #endif #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 - adr x0, workaround_mmu_runtime_exceptions + adr x0, wa_cve_2017_5715_mmu_vbar msr vbar_el3, x0 #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A57_CPUACTLR_EL1 + orr x0, x0, #CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE + msr CORTEX_A57_CPUACTLR_EL1, x0 + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -528,6 +545,7 @@ func cortex_a57_errata_report report_errata ERRATA_A57_833471, cortex_a57, 833471 report_errata ERRATA_A57_859972, cortex_a57, 859972 report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639 ldp x8, x30, [sp], #16 ret @@ -555,8 +573,9 @@ func cortex_a57_cpu_reg_dump ret endfunc cortex_a57_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a57, CORTEX_A57_MIDR, \ +declare_cpu_ops_wa cortex_a57, CORTEX_A57_MIDR, \ cortex_a57_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a57_core_pwr_dwn, \ cortex_a57_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S index 29fa77b9..bb9381d1 100644 --- a/lib/cpus/aarch64/cortex_a72.S +++ b/lib/cpus/aarch64/cortex_a72.S @@ -110,6 +110,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A72. * ------------------------------------------------- @@ -126,11 +135,19 @@ func cortex_a72_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 cpu_check_csv2 x0, 1f - adr x0, workaround_mmu_runtime_exceptions + adr x0, wa_cve_2017_5715_mmu_vbar msr vbar_el3, x0 1: #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A72_CPUACTLR_EL1 + orr x0, x0, #CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE + msr CORTEX_A72_CPUACTLR_EL1, x0 + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -265,6 +282,7 @@ func cortex_a72_errata_report */ report_errata ERRATA_A72_859971, cortex_a72, 859971 report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639 ldp x8, x30, [sp], #16 ret @@ -292,8 +310,9 @@ func cortex_a72_cpu_reg_dump ret endfunc cortex_a72_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a72, CORTEX_A72_MIDR, \ +declare_cpu_ops_wa cortex_a72, CORTEX_A72_MIDR, \ cortex_a72_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a72_core_pwr_dwn, \ cortex_a72_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index 0a961ea3..d595f128 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -38,11 +38,18 @@ endfunc cortex_a73_disable_smp func cortex_a73_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 cpu_check_csv2 x0, 1f - adr x0, workaround_bpiall_vbar0_runtime_exceptions + adr x0, wa_cve_2017_5715_bpiall_vbar msr vbar_el3, x0 1: #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A73_IMP_DEF_REG1 + orr x0, x0, #CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE + msr CORTEX_A73_IMP_DEF_REG1, x0 + isb +#endif + /* --------------------------------------------- * Enable the SMP bit. * Clobbers : x0 @@ -129,6 +136,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + #if REPORT_ERRATA /* * Errata printing function for Cortex A75. Must follow AAPCS. @@ -144,6 +160,7 @@ func cortex_a73_errata_report * checking functions of each errata. */ report_errata WORKAROUND_CVE_2017_5715, cortex_a73, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a73, cve_2018_3639 ldp x8, x30, [sp], #16 ret @@ -170,8 +187,9 @@ func cortex_a73_cpu_reg_dump ret endfunc cortex_a73_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a73, CORTEX_A73_MIDR, \ +declare_cpu_ops_wa cortex_a73, CORTEX_A73_MIDR, \ cortex_a73_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a73_core_pwr_dwn, \ cortex_a73_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index 288f5afe..20ec32ce 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -13,11 +13,18 @@ func cortex_a75_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 cpu_check_csv2 x0, 1f - adr x0, workaround_bpiall_vbar0_runtime_exceptions + adr x0, wa_cve_2017_5715_bpiall_vbar msr vbar_el3, x0 1: #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A75_CPUACTLR_EL1 + orr x0, x0, #CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE + msr CORTEX_A75_CPUACTLR_EL1, x0 + isb +#endif + #if ENABLE_AMU /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */ mrs x0, actlr_el3 @@ -57,6 +64,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + /* --------------------------------------------- * HW will do the cache maintenance while powering down * --------------------------------------------- @@ -88,6 +104,7 @@ func cortex_a75_errata_report * checking functions of each errata. */ report_errata WORKAROUND_CVE_2017_5715, cortex_a75, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a75, cve_2018_3639 ldp x8, x30, [sp], #16 ret @@ -113,7 +130,8 @@ func cortex_a75_cpu_reg_dump ret endfunc cortex_a75_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a75, CORTEX_A75_MIDR, \ +declare_cpu_ops_wa cortex_a75, CORTEX_A75_MIDR, \ cortex_a75_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a75_core_pwr_dwn diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index 5a9226d8..652cfe63 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -281,7 +281,7 @@ endfunc print_errata_status #endif /* - * int check_workaround_cve_2017_5715(void); + * int check_wa_cve_2017_5715(void); * * This function returns: * - ERRATA_APPLIES when firmware mitigation is required. @@ -292,8 +292,8 @@ endfunc print_errata_status * NOTE: Must be called only after cpu_ops have been initialized * in per-CPU data. */ - .globl check_workaround_cve_2017_5715 -func check_workaround_cve_2017_5715 + .globl check_wa_cve_2017_5715 +func check_wa_cve_2017_5715 mrs x0, tpidr_el3 #if ENABLE_ASSERTIONS cmp x0, #0 @@ -311,4 +311,28 @@ func check_workaround_cve_2017_5715 1: mov x0, #ERRATA_NOT_APPLIES ret -endfunc check_workaround_cve_2017_5715 +endfunc check_wa_cve_2017_5715 + +/* + * void *wa_cve_2018_3639_get_disable_ptr(void); + * + * Returns a function pointer which is used to disable mitigation + * for CVE-2018-3639. + * The function pointer is only returned on cores that employ + * dynamic mitigation. If the core uses static mitigation or is + * unaffected by CVE-2018-3639 this function returns NULL. + * + * NOTE: Must be called only after cpu_ops have been initialized + * in per-CPU data. + */ + .globl wa_cve_2018_3639_get_disable_ptr +func wa_cve_2018_3639_get_disable_ptr + mrs x0, tpidr_el3 +#if ENABLE_ASSERTIONS + cmp x0, #0 + ASM_ASSERT(ne) +#endif + ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] + ldr x0, [x0, #CPU_EXTRA2_FUNC] + ret +endfunc wa_cve_2018_3639_get_disable_ptr diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S new file mode 100644 index 00000000..84371551 --- /dev/null +++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include + + .globl wa_cve_2017_5715_bpiall_vbar + +#define EMIT_BPIALL 0xee070fd5 +#define EMIT_SMC 0xe1600070 +#define ESR_EL3_A64_SMC0 0x5e000000 + + .macro apply_cve_2017_5715_wa _from_vector + /* + * Save register state to enable a call to AArch32 S-EL1 and return + * Identify the original calling vector in w2 (==_from_vector) + * Use w3-w6 for additional register state preservation while in S-EL1 + */ + + /* Save GP regs */ + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + + /* Identify the original exception vector */ + mov w2, \_from_vector + + /* Preserve 32-bit system registers in GP registers through the workaround */ + mrs x3, esr_el3 + mrs x4, spsr_el3 + mrs x5, scr_el3 + mrs x6, sctlr_el1 + + /* + * Preserve LR and ELR_EL3 registers in the GP regs context. + * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3 + * through the workaround. This is OK because at this point the + * current state for this context's SP_EL0 is in the live system + * register, which is unmodified by the workaround. + */ + mrs x7, elr_el3 + stp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + + /* + * Load system registers for entry to S-EL1. + */ + + /* Mask all interrupts and set AArch32 Supervisor mode */ + movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK) + + /* Switch EL3 exception vectors while the workaround is executing. */ + adr x9, wa_cve_2017_5715_bpiall_ret_vbar + + /* Setup SCTLR_EL1 with MMU off and I$ on */ + ldr x10, stub_sel1_sctlr + + /* Land at the S-EL1 workaround stub */ + adr x11, aarch32_stub + + /* + * Setting SCR_EL3 to all zeroes means that the NS, RW + * and SMD bits are configured as expected. + */ + msr scr_el3, xzr + msr spsr_el3, x8 + msr vbar_el3, x9 + msr sctlr_el1, x10 + msr elr_el3, x11 + + eret + .endm + + /* --------------------------------------------------------------------- + * This vector table is used at runtime to enter the workaround at + * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround + * is not enabled, the existing runtime exception vector table is used. + * --------------------------------------------------------------------- + */ +vector_base wa_cve_2017_5715_bpiall_vbar + + /* --------------------------------------------------------------------- + * Current EL with SP_EL0 : 0x0 - 0x200 + * --------------------------------------------------------------------- + */ +vector_entry bpiall_sync_exception_sp_el0 + b sync_exception_sp_el0 + nop /* to force 8 byte alignment for the following stub */ + + /* + * Since each vector table entry is 128 bytes, we can store the + * stub context in the unused space to minimize memory footprint. + */ +stub_sel1_sctlr: + .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT + +aarch32_stub: + .word EMIT_BPIALL + .word EMIT_SMC + + check_vector_size bpiall_sync_exception_sp_el0 + +vector_entry bpiall_irq_sp_el0 + b irq_sp_el0 + check_vector_size bpiall_irq_sp_el0 + +vector_entry bpiall_fiq_sp_el0 + b fiq_sp_el0 + check_vector_size bpiall_fiq_sp_el0 + +vector_entry bpiall_serror_sp_el0 + b serror_sp_el0 + check_vector_size bpiall_serror_sp_el0 + + /* --------------------------------------------------------------------- + * Current EL with SP_ELx: 0x200 - 0x400 + * --------------------------------------------------------------------- + */ +vector_entry bpiall_sync_exception_sp_elx + b sync_exception_sp_elx + check_vector_size bpiall_sync_exception_sp_elx + +vector_entry bpiall_irq_sp_elx + b irq_sp_elx + check_vector_size bpiall_irq_sp_elx + +vector_entry bpiall_fiq_sp_elx + b fiq_sp_elx + check_vector_size bpiall_fiq_sp_elx + +vector_entry bpiall_serror_sp_elx + b serror_sp_elx + check_vector_size bpiall_serror_sp_elx + + /* --------------------------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x600 + * --------------------------------------------------------------------- + */ +vector_entry bpiall_sync_exception_aarch64 + apply_cve_2017_5715_wa 1 + check_vector_size bpiall_sync_exception_aarch64 + +vector_entry bpiall_irq_aarch64 + apply_cve_2017_5715_wa 2 + check_vector_size bpiall_irq_aarch64 + +vector_entry bpiall_fiq_aarch64 + apply_cve_2017_5715_wa 4 + check_vector_size bpiall_fiq_aarch64 + +vector_entry bpiall_serror_aarch64 + apply_cve_2017_5715_wa 8 + check_vector_size bpiall_serror_aarch64 + + /* --------------------------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x800 + * --------------------------------------------------------------------- + */ +vector_entry bpiall_sync_exception_aarch32 + apply_cve_2017_5715_wa 1 + check_vector_size bpiall_sync_exception_aarch32 + +vector_entry bpiall_irq_aarch32 + apply_cve_2017_5715_wa 2 + check_vector_size bpiall_irq_aarch32 + +vector_entry bpiall_fiq_aarch32 + apply_cve_2017_5715_wa 4 + check_vector_size bpiall_fiq_aarch32 + +vector_entry bpiall_serror_aarch32 + apply_cve_2017_5715_wa 8 + check_vector_size bpiall_serror_aarch32 + + /* --------------------------------------------------------------------- + * This vector table is used while the workaround is executing. It + * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError + * workaround stubs to enter EL3 from S-EL1. It restores the previous + * EL3 state before proceeding with the normal runtime exception vector. + * --------------------------------------------------------------------- + */ +vector_base wa_cve_2017_5715_bpiall_ret_vbar + + /* --------------------------------------------------------------------- + * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED) + * --------------------------------------------------------------------- + */ +vector_entry bpiall_ret_sync_exception_sp_el0 + b report_unhandled_exception + check_vector_size bpiall_ret_sync_exception_sp_el0 + +vector_entry bpiall_ret_irq_sp_el0 + b report_unhandled_interrupt + check_vector_size bpiall_ret_irq_sp_el0 + +vector_entry bpiall_ret_fiq_sp_el0 + b report_unhandled_interrupt + check_vector_size bpiall_ret_fiq_sp_el0 + +vector_entry bpiall_ret_serror_sp_el0 + b report_unhandled_exception + check_vector_size bpiall_ret_serror_sp_el0 + + /* --------------------------------------------------------------------- + * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED) + * --------------------------------------------------------------------- + */ +vector_entry bpiall_ret_sync_exception_sp_elx + b report_unhandled_exception + check_vector_size bpiall_ret_sync_exception_sp_elx + +vector_entry bpiall_ret_irq_sp_elx + b report_unhandled_interrupt + check_vector_size bpiall_ret_irq_sp_elx + +vector_entry bpiall_ret_fiq_sp_elx + b report_unhandled_interrupt + check_vector_size bpiall_ret_fiq_sp_elx + +vector_entry bpiall_ret_serror_sp_elx + b report_unhandled_exception + check_vector_size bpiall_ret_serror_sp_elx + + /* --------------------------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED) + * --------------------------------------------------------------------- + */ +vector_entry bpiall_ret_sync_exception_aarch64 + b report_unhandled_exception + check_vector_size bpiall_ret_sync_exception_aarch64 + +vector_entry bpiall_ret_irq_aarch64 + b report_unhandled_interrupt + check_vector_size bpiall_ret_irq_aarch64 + +vector_entry bpiall_ret_fiq_aarch64 + b report_unhandled_interrupt + check_vector_size bpiall_ret_fiq_aarch64 + +vector_entry bpiall_ret_serror_aarch64 + b report_unhandled_exception + check_vector_size bpiall_ret_serror_aarch64 + + /* --------------------------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x800 + * --------------------------------------------------------------------- + */ +vector_entry bpiall_ret_sync_exception_aarch32 + /* + * w2 indicates which SEL1 stub was run and thus which original vector was used + * w3-w6 contain saved system register state (esr_el3 in w3) + * Restore LR and ELR_EL3 register state from the GP regs context + */ + ldp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + + /* Apply the restored system register state */ + msr esr_el3, x3 + msr spsr_el3, x4 + msr scr_el3, x5 + msr sctlr_el1, x6 + msr elr_el3, x7 + + /* + * Workaround is complete, so swap VBAR_EL3 to point + * to workaround entry table in preparation for subsequent + * Sync/IRQ/FIQ/SError exceptions. + */ + adr x0, wa_cve_2017_5715_bpiall_vbar + msr vbar_el3, x0 + + /* + * Restore all GP regs except x2 and x3 (esr). The value in x2 + * indicates the type of the original exception. + */ + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + + /* Fast path Sync exceptions. Static predictor will fall through. */ + tbz w2, #0, workaround_not_sync + + /* + * Check if SMC is coming from A64 state on #0 + * with W0 = SMCCC_ARCH_WORKAROUND_1 + * + * This sequence evaluates as: + * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE) + * allowing use of a single branch operation + */ + orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1 + cmp w0, w2 + mov_imm w2, ESR_EL3_A64_SMC0 + ccmp w3, w2, #0, eq + /* Static predictor will predict a fall through */ + bne 1f + eret +1: + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + b sync_exception_aarch64 + check_vector_size bpiall_ret_sync_exception_aarch32 + +vector_entry bpiall_ret_irq_aarch32 + b report_unhandled_interrupt + + /* + * Post-workaround fan-out for non-sync exceptions + */ +workaround_not_sync: + tbnz w2, #3, bpiall_ret_serror + tbnz w2, #2, bpiall_ret_fiq + /* IRQ */ + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + b irq_aarch64 + +bpiall_ret_fiq: + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + b fiq_aarch64 + +bpiall_ret_serror: + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + b serror_aarch64 + check_vector_size bpiall_ret_irq_aarch32 + +vector_entry bpiall_ret_fiq_aarch32 + b report_unhandled_interrupt + check_vector_size bpiall_ret_fiq_aarch32 + +vector_entry bpiall_ret_serror_aarch32 + b report_unhandled_exception + check_vector_size bpiall_ret_serror_aarch32 diff --git a/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S new file mode 100644 index 00000000..039e373c --- /dev/null +++ b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include + + .globl wa_cve_2017_5715_mmu_vbar + +#define ESR_EL3_A64_SMC0 0x5e000000 + +vector_base wa_cve_2017_5715_mmu_vbar + + .macro apply_cve_2017_5715_wa _is_sync_exception + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + mrs x1, sctlr_el3 + /* Disable MMU */ + bic x1, x1, #SCTLR_M_BIT + msr sctlr_el3, x1 + isb + /* Enable MMU */ + orr x1, x1, #SCTLR_M_BIT + msr sctlr_el3, x1 + /* + * Defer ISB to avoid synchronizing twice in case we hit + * the workaround SMC call which will implicitly synchronize + * because of the ERET instruction. + */ + + /* + * Ensure SMC is coming from A64 state on #0 + * with W0 = SMCCC_ARCH_WORKAROUND_1 + * + * This sequence evaluates as: + * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE) + * allowing use of a single branch operation + */ + .if \_is_sync_exception + orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1 + cmp w0, w1 + mrs x0, esr_el3 + mov_imm w1, ESR_EL3_A64_SMC0 + ccmp w0, w1, #0, eq + /* Static predictor will predict a fall through */ + bne 1f + eret +1: + .endif + + /* + * Synchronize now to enable the MMU. This is required + * to ensure the load pair below reads the data stored earlier. + */ + isb + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + .endm + + /* --------------------------------------------------------------------- + * Current EL with SP_EL0 : 0x0 - 0x200 + * --------------------------------------------------------------------- + */ +vector_entry mmu_sync_exception_sp_el0 + b sync_exception_sp_el0 + check_vector_size mmu_sync_exception_sp_el0 + +vector_entry mmu_irq_sp_el0 + b irq_sp_el0 + check_vector_size mmu_irq_sp_el0 + +vector_entry mmu_fiq_sp_el0 + b fiq_sp_el0 + check_vector_size mmu_fiq_sp_el0 + +vector_entry mmu_serror_sp_el0 + b serror_sp_el0 + check_vector_size mmu_serror_sp_el0 + + /* --------------------------------------------------------------------- + * Current EL with SP_ELx: 0x200 - 0x400 + * --------------------------------------------------------------------- + */ +vector_entry mmu_sync_exception_sp_elx + b sync_exception_sp_elx + check_vector_size mmu_sync_exception_sp_elx + +vector_entry mmu_irq_sp_elx + b irq_sp_elx + check_vector_size mmu_irq_sp_elx + +vector_entry mmu_fiq_sp_elx + b fiq_sp_elx + check_vector_size mmu_fiq_sp_elx + +vector_entry mmu_serror_sp_elx + b serror_sp_elx + check_vector_size mmu_serror_sp_elx + + /* --------------------------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x600 + * --------------------------------------------------------------------- + */ +vector_entry mmu_sync_exception_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=1 + b sync_exception_aarch64 + check_vector_size mmu_sync_exception_aarch64 + +vector_entry mmu_irq_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=0 + b irq_aarch64 + check_vector_size mmu_irq_aarch64 + +vector_entry mmu_fiq_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=0 + b fiq_aarch64 + check_vector_size mmu_fiq_aarch64 + +vector_entry mmu_serror_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=0 + b serror_aarch64 + check_vector_size mmu_serror_aarch64 + + /* --------------------------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x800 + * --------------------------------------------------------------------- + */ +vector_entry mmu_sync_exception_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=1 + b sync_exception_aarch32 + check_vector_size mmu_sync_exception_aarch32 + +vector_entry mmu_irq_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=0 + b irq_aarch32 + check_vector_size mmu_irq_aarch32 + +vector_entry mmu_fiq_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=0 + b fiq_aarch32 + check_vector_size mmu_fiq_aarch32 + +vector_entry mmu_serror_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=0 + b serror_aarch32 + check_vector_size mmu_serror_aarch32 diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S deleted file mode 100644 index cd824973..00000000 --- a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S +++ /dev/null @@ -1,357 +0,0 @@ -/* - * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include -#include -#include - - .globl workaround_bpiall_vbar0_runtime_exceptions - -#define EMIT_BPIALL 0xee070fd5 -#define EMIT_SMC 0xe1600070 -#define ESR_EL3_A64_SMC0 0x5e000000 - - .macro enter_workaround _from_vector - /* - * Save register state to enable a call to AArch32 S-EL1 and return - * Identify the original calling vector in w2 (==_from_vector) - * Use w3-w6 for additional register state preservation while in S-EL1 - */ - - /* Save GP regs */ - stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] - stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] - stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] - stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] - stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] - stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] - - /* Identify the original exception vector */ - mov w2, \_from_vector - - /* Preserve 32-bit system registers in GP registers through the workaround */ - mrs x3, esr_el3 - mrs x4, spsr_el3 - mrs x5, scr_el3 - mrs x6, sctlr_el1 - - /* - * Preserve LR and ELR_EL3 registers in the GP regs context. - * Temporarily use the CTX_GPREG_SP_EL0 slot to preserve ELR_EL3 - * through the workaround. This is OK because at this point the - * current state for this context's SP_EL0 is in the live system - * register, which is unmodified by the workaround. - */ - mrs x7, elr_el3 - stp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - - /* - * Load system registers for entry to S-EL1. - */ - - /* Mask all interrupts and set AArch32 Supervisor mode */ - movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK) - - /* Switch EL3 exception vectors while the workaround is executing. */ - adr x9, workaround_bpiall_vbar1_runtime_exceptions - - /* Setup SCTLR_EL1 with MMU off and I$ on */ - ldr x10, stub_sel1_sctlr - - /* Land at the S-EL1 workaround stub */ - adr x11, aarch32_stub - - /* - * Setting SCR_EL3 to all zeroes means that the NS, RW - * and SMD bits are configured as expected. - */ - msr scr_el3, xzr - msr spsr_el3, x8 - msr vbar_el3, x9 - msr sctlr_el1, x10 - msr elr_el3, x11 - - eret - .endm - - /* --------------------------------------------------------------------- - * This vector table is used at runtime to enter the workaround at - * AArch32 S-EL1 for Sync/IRQ/FIQ/SError exceptions. If the workaround - * is not enabled, the existing runtime exception vector table is used. - * --------------------------------------------------------------------- - */ -vector_base workaround_bpiall_vbar0_runtime_exceptions - - /* --------------------------------------------------------------------- - * Current EL with SP_EL0 : 0x0 - 0x200 - * --------------------------------------------------------------------- - */ -vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0 - b sync_exception_sp_el0 - nop /* to force 8 byte alignment for the following stub */ - - /* - * Since each vector table entry is 128 bytes, we can store the - * stub context in the unused space to minimize memory footprint. - */ -stub_sel1_sctlr: - .quad SCTLR_AARCH32_EL1_RES1 | SCTLR_I_BIT - -aarch32_stub: - .word EMIT_BPIALL - .word EMIT_SMC - - check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0 - -vector_entry workaround_bpiall_vbar0_irq_sp_el0 - b irq_sp_el0 - check_vector_size workaround_bpiall_vbar0_irq_sp_el0 - -vector_entry workaround_bpiall_vbar0_fiq_sp_el0 - b fiq_sp_el0 - check_vector_size workaround_bpiall_vbar0_fiq_sp_el0 - -vector_entry workaround_bpiall_vbar0_serror_sp_el0 - b serror_sp_el0 - check_vector_size workaround_bpiall_vbar0_serror_sp_el0 - - /* --------------------------------------------------------------------- - * Current EL with SP_ELx: 0x200 - 0x400 - * --------------------------------------------------------------------- - */ -vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx - b sync_exception_sp_elx - check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx - -vector_entry workaround_bpiall_vbar0_irq_sp_elx - b irq_sp_elx - check_vector_size workaround_bpiall_vbar0_irq_sp_elx - -vector_entry workaround_bpiall_vbar0_fiq_sp_elx - b fiq_sp_elx - check_vector_size workaround_bpiall_vbar0_fiq_sp_elx - -vector_entry workaround_bpiall_vbar0_serror_sp_elx - b serror_sp_elx - check_vector_size workaround_bpiall_vbar0_serror_sp_elx - - /* --------------------------------------------------------------------- - * Lower EL using AArch64 : 0x400 - 0x600 - * --------------------------------------------------------------------- - */ -vector_entry workaround_bpiall_vbar0_sync_exception_aarch64 - enter_workaround 1 - check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64 - -vector_entry workaround_bpiall_vbar0_irq_aarch64 - enter_workaround 2 - check_vector_size workaround_bpiall_vbar0_irq_aarch64 - -vector_entry workaround_bpiall_vbar0_fiq_aarch64 - enter_workaround 4 - check_vector_size workaround_bpiall_vbar0_fiq_aarch64 - -vector_entry workaround_bpiall_vbar0_serror_aarch64 - enter_workaround 8 - check_vector_size workaround_bpiall_vbar0_serror_aarch64 - - /* --------------------------------------------------------------------- - * Lower EL using AArch32 : 0x600 - 0x800 - * --------------------------------------------------------------------- - */ -vector_entry workaround_bpiall_vbar0_sync_exception_aarch32 - enter_workaround 1 - check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32 - -vector_entry workaround_bpiall_vbar0_irq_aarch32 - enter_workaround 2 - check_vector_size workaround_bpiall_vbar0_irq_aarch32 - -vector_entry workaround_bpiall_vbar0_fiq_aarch32 - enter_workaround 4 - check_vector_size workaround_bpiall_vbar0_fiq_aarch32 - -vector_entry workaround_bpiall_vbar0_serror_aarch32 - enter_workaround 8 - check_vector_size workaround_bpiall_vbar0_serror_aarch32 - - /* --------------------------------------------------------------------- - * This vector table is used while the workaround is executing. It - * installs a simple SMC handler to allow the Sync/IRQ/FIQ/SError - * workaround stubs to enter EL3 from S-EL1. It restores the previous - * EL3 state before proceeding with the normal runtime exception vector. - * --------------------------------------------------------------------- - */ -vector_base workaround_bpiall_vbar1_runtime_exceptions - - /* --------------------------------------------------------------------- - * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED) - * --------------------------------------------------------------------- - */ -vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0 - b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0 - -vector_entry workaround_bpiall_vbar1_irq_sp_el0 - b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_irq_sp_el0 - -vector_entry workaround_bpiall_vbar1_fiq_sp_el0 - b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_sp_el0 - -vector_entry workaround_bpiall_vbar1_serror_sp_el0 - b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_sp_el0 - - /* --------------------------------------------------------------------- - * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED) - * --------------------------------------------------------------------- - */ -vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx - b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx - -vector_entry workaround_bpiall_vbar1_irq_sp_elx - b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_irq_sp_elx - -vector_entry workaround_bpiall_vbar1_fiq_sp_elx - b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_sp_elx - -vector_entry workaround_bpiall_vbar1_serror_sp_elx - b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_sp_elx - - /* --------------------------------------------------------------------- - * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED) - * --------------------------------------------------------------------- - */ -vector_entry workaround_bpiall_vbar1_sync_exception_aarch64 - b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64 - -vector_entry workaround_bpiall_vbar1_irq_aarch64 - b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_irq_aarch64 - -vector_entry workaround_bpiall_vbar1_fiq_aarch64 - b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_aarch64 - -vector_entry workaround_bpiall_vbar1_serror_aarch64 - b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_aarch64 - - /* --------------------------------------------------------------------- - * Lower EL using AArch32 : 0x600 - 0x800 - * --------------------------------------------------------------------- - */ -vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 - /* - * w2 indicates which SEL1 stub was run and thus which original vector was used - * w3-w6 contain saved system register state (esr_el3 in w3) - * Restore LR and ELR_EL3 register state from the GP regs context - */ - ldp x30, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - - /* Apply the restored system register state */ - msr esr_el3, x3 - msr spsr_el3, x4 - msr scr_el3, x5 - msr sctlr_el1, x6 - msr elr_el3, x7 - - /* - * Workaround is complete, so swap VBAR_EL3 to point - * to workaround entry table in preparation for subsequent - * Sync/IRQ/FIQ/SError exceptions. - */ - adr x0, workaround_bpiall_vbar0_runtime_exceptions - msr vbar_el3, x0 - - /* - * Restore all GP regs except x2 and x3 (esr). The value in x2 - * indicates the type of the original exception. - */ - ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] - ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] - ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] - ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] - ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] - ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] - - /* Fast path Sync exceptions. Static predictor will fall through. */ - tbz w2, #0, workaround_not_sync - - /* - * Check if SMC is coming from A64 state on #0 - * with W0 = SMCCC_ARCH_WORKAROUND_1 - * - * This sequence evaluates as: - * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE) - * allowing use of a single branch operation - */ - orr w2, wzr, #SMCCC_ARCH_WORKAROUND_1 - cmp w0, w2 - mov_imm w2, ESR_EL3_A64_SMC0 - ccmp w3, w2, #0, eq - /* Static predictor will predict a fall through */ - bne 1f - eret -1: - ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - b sync_exception_aarch64 - check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32 - -vector_entry workaround_bpiall_vbar1_irq_aarch32 - b report_unhandled_interrupt - - /* - * Post-workaround fan-out for non-sync exceptions - */ -workaround_not_sync: - tbnz w2, #3, workaround_bpiall_vbar1_serror - tbnz w2, #2, workaround_bpiall_vbar1_fiq - /* IRQ */ - ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - b irq_aarch64 - -workaround_bpiall_vbar1_fiq: - ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - b fiq_aarch64 - -workaround_bpiall_vbar1_serror: - ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - b serror_aarch64 - check_vector_size workaround_bpiall_vbar1_irq_aarch32 - -vector_entry workaround_bpiall_vbar1_fiq_aarch32 - b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_aarch32 - -vector_entry workaround_bpiall_vbar1_serror_aarch32 - b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_aarch32 diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S deleted file mode 100644 index b24b620c..00000000 --- a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include -#include -#include - - .globl workaround_mmu_runtime_exceptions - -#define ESR_EL3_A64_SMC0 0x5e000000 - -vector_base workaround_mmu_runtime_exceptions - - .macro apply_workaround _is_sync_exception - stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - mrs x1, sctlr_el3 - /* Disable MMU */ - bic x1, x1, #SCTLR_M_BIT - msr sctlr_el3, x1 - isb - /* Enable MMU */ - orr x1, x1, #SCTLR_M_BIT - msr sctlr_el3, x1 - /* - * Defer ISB to avoid synchronizing twice in case we hit - * the workaround SMC call which will implicitly synchronize - * because of the ERET instruction. - */ - - /* - * Ensure SMC is coming from A64 state on #0 - * with W0 = SMCCC_ARCH_WORKAROUND_1 - * - * This sequence evaluates as: - * (W0==SMCCC_ARCH_WORKAROUND_1) ? (ESR_EL3==SMC#0) : (NE) - * allowing use of a single branch operation - */ - .if \_is_sync_exception - orr w1, wzr, #SMCCC_ARCH_WORKAROUND_1 - cmp w0, w1 - mrs x0, esr_el3 - mov_imm w1, ESR_EL3_A64_SMC0 - ccmp w0, w1, #0, eq - /* Static predictor will predict a fall through */ - bne 1f - eret -1: - .endif - - /* - * Synchronize now to enable the MMU. This is required - * to ensure the load pair below reads the data stored earlier. - */ - isb - ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - .endm - - /* --------------------------------------------------------------------- - * Current EL with SP_EL0 : 0x0 - 0x200 - * --------------------------------------------------------------------- - */ -vector_entry workaround_mmu_sync_exception_sp_el0 - b sync_exception_sp_el0 - check_vector_size workaround_mmu_sync_exception_sp_el0 - -vector_entry workaround_mmu_irq_sp_el0 - b irq_sp_el0 - check_vector_size workaround_mmu_irq_sp_el0 - -vector_entry workaround_mmu_fiq_sp_el0 - b fiq_sp_el0 - check_vector_size workaround_mmu_fiq_sp_el0 - -vector_entry workaround_mmu_serror_sp_el0 - b serror_sp_el0 - check_vector_size workaround_mmu_serror_sp_el0 - - /* --------------------------------------------------------------------- - * Current EL with SP_ELx: 0x200 - 0x400 - * --------------------------------------------------------------------- - */ -vector_entry workaround_mmu_sync_exception_sp_elx - b sync_exception_sp_elx - check_vector_size workaround_mmu_sync_exception_sp_elx - -vector_entry workaround_mmu_irq_sp_elx - b irq_sp_elx - check_vector_size workaround_mmu_irq_sp_elx - -vector_entry workaround_mmu_fiq_sp_elx - b fiq_sp_elx - check_vector_size workaround_mmu_fiq_sp_elx - -vector_entry workaround_mmu_serror_sp_elx - b serror_sp_elx - check_vector_size workaround_mmu_serror_sp_elx - - /* --------------------------------------------------------------------- - * Lower EL using AArch64 : 0x400 - 0x600 - * --------------------------------------------------------------------- - */ -vector_entry workaround_mmu_sync_exception_aarch64 - apply_workaround _is_sync_exception=1 - b sync_exception_aarch64 - check_vector_size workaround_mmu_sync_exception_aarch64 - -vector_entry workaround_mmu_irq_aarch64 - apply_workaround _is_sync_exception=0 - b irq_aarch64 - check_vector_size workaround_mmu_irq_aarch64 - -vector_entry workaround_mmu_fiq_aarch64 - apply_workaround _is_sync_exception=0 - b fiq_aarch64 - check_vector_size workaround_mmu_fiq_aarch64 - -vector_entry workaround_mmu_serror_aarch64 - apply_workaround _is_sync_exception=0 - b serror_aarch64 - check_vector_size workaround_mmu_serror_aarch64 - - /* --------------------------------------------------------------------- - * Lower EL using AArch32 : 0x600 - 0x800 - * --------------------------------------------------------------------- - */ -vector_entry workaround_mmu_sync_exception_aarch32 - apply_workaround _is_sync_exception=1 - b sync_exception_aarch32 - check_vector_size workaround_mmu_sync_exception_aarch32 - -vector_entry workaround_mmu_irq_aarch32 - apply_workaround _is_sync_exception=0 - b irq_aarch32 - check_vector_size workaround_mmu_irq_aarch32 - -vector_entry workaround_mmu_fiq_aarch32 - apply_workaround _is_sync_exception=0 - b fiq_aarch32 - check_vector_size workaround_mmu_fiq_aarch32 - -vector_entry workaround_mmu_serror_aarch32 - apply_workaround _is_sync_exception=0 - b serror_aarch32 - check_vector_size workaround_mmu_serror_aarch32 diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk index 3ba8c1fc..434c13ea 100644 --- a/lib/cpus/cpu-ops.mk +++ b/lib/cpus/cpu-ops.mk @@ -17,6 +17,8 @@ A53_DISABLE_NON_TEMPORAL_HINT ?=1 A57_DISABLE_NON_TEMPORAL_HINT ?=1 WORKAROUND_CVE_2017_5715 ?=1 +WORKAROUND_CVE_2018_3639 ?=1 +DYNAMIC_WORKAROUND_CVE_2018_3639 ?=0 # Process SKIP_A57_L1_FLUSH_PWR_DWN flag $(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN)) @@ -34,6 +36,19 @@ $(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT)) $(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715)) $(eval $(call add_define,WORKAROUND_CVE_2017_5715)) +# Process WORKAROUND_CVE_2018_3639 flag +$(eval $(call assert_boolean,WORKAROUND_CVE_2018_3639)) +$(eval $(call add_define,WORKAROUND_CVE_2018_3639)) + +$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639)) +$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639)) + +ifneq (${DYNAMIC_WORKAROUND_CVE_2018_3639},0) + ifeq (${WORKAROUND_CVE_2018_3639},0) + $(error "Error: WORKAROUND_CVE_2018_3639 must be 1 if DYNAMIC_WORKAROUND_CVE_2018_3639 is 1") + endif +endif + # CPU Errata Build flags. # These should be enabled by the platform if the erratum workaround needs to be # applied. diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index 620ec16f..caaf413f 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -384,6 +384,15 @@ func el3_exit msr spsr_el3, x16 msr elr_el3, x17 +#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 + /* Restore mitigation state as it was on entry to EL3 */ + ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE] + cmp x17, xzr + beq 1f + blr x17 +#endif + +1: /* Restore saved general purpose registers and return */ b restore_gp_registers_eret endfunc el3_exit -- cgit v1.2.3