diff options
author | Dimitris Papastamos <dimitris.papastamos@arm.com> | 2018-05-29 09:28:05 +0100 |
---|---|---|
committer | Anson Huang <Anson.Huang@nxp.com> | 2018-06-21 13:29:21 +0800 |
commit | 770f853dcb47ad856c060ffc6fefd626ae40e52c (patch) | |
tree | 89e973cc90a4341ebdec51f4e26805fa1a81319e /lib | |
parent | 959392911781f3bd4263a01d703097998d0aaa0f (diff) |
Merge pull request #1392 from dp-arm/dp/cve_2018_3639
Implement workaround for CVE-2018-3639 on Cortex A57/A72/A73 and A75
Conflicts:
services/arm_arch_svc/arm_arch_svc_setup.c
Diffstat (limited to 'lib')
-rw-r--r-- | lib/cpus/aarch32/cortex_a57.S | 18 | ||||
-rw-r--r-- | lib/cpus/aarch32/cortex_a72.S | 19 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a57.S | 23 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a72.S | 23 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a73.S | 22 | ||||
-rw-r--r-- | lib/cpus/aarch64/cortex_a75.S | 22 | ||||
-rw-r--r-- | lib/cpus/aarch64/cpu_helpers.S | 32 | ||||
-rw-r--r-- | lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S (renamed from lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S) | 164 | ||||
-rw-r--r-- | lib/cpus/aarch64/wa_cve_2017_5715_mmu.S (renamed from lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S) | 86 | ||||
-rw-r--r-- | lib/cpus/cpu-ops.mk | 15 | ||||
-rw-r--r-- | lib/el3_runtime/aarch64/context.S | 9 |
11 files changed, 296 insertions, 137 deletions
diff --git a/lib/cpus/aarch32/cortex_a57.S b/lib/cpus/aarch32/cortex_a57.S index f446bfff..dff86be7 100644 --- a/lib/cpus/aarch32/cortex_a57.S +++ b/lib/cpus/aarch32/cortex_a57.S @@ -337,6 +337,15 @@ func check_errata_cve_2017_5715 bx lr endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov r0, #ERRATA_APPLIES +#else + mov r0, #ERRATA_MISSING +#endif + bx lr +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A57. * Shall clobber: r0-r6 @@ -392,6 +401,14 @@ func cortex_a57_reset_func bl errata_a57_859972_wa #endif +#if WORKAROUND_CVE_2018_3639 + ldcopr16 r0, r1, CORTEX_A57_CPUACTLR + orr64_imm r0, r1, CORTEX_A57_CPUACTLR_DIS_LOAD_PASS_STORE + stcopr16 r0, r1, CORTEX_A57_CPUACTLR + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -525,6 +542,7 @@ func cortex_a57_errata_report report_errata ERRATA_A57_833471, cortex_a57, 833471 report_errata ERRATA_A57_859972, cortex_a57, 859972 report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639 pop {r12, lr} bx lr diff --git a/lib/cpus/aarch32/cortex_a72.S b/lib/cpus/aarch32/cortex_a72.S index 56e91f5c..3bc3388b 100644 --- a/lib/cpus/aarch32/cortex_a72.S +++ b/lib/cpus/aarch32/cortex_a72.S @@ -92,6 +92,15 @@ func check_errata_cve_2017_5715 bx lr endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov r0, #ERRATA_APPLIES +#else + mov r0, #ERRATA_MISSING +#endif + bx lr +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A72. * ------------------------------------------------- @@ -105,6 +114,15 @@ func cortex_a72_reset_func mov r0, r4 bl errata_a72_859971_wa #endif + +#if WORKAROUND_CVE_2018_3639 + ldcopr16 r0, r1, CORTEX_A72_CPUACTLR + orr64_imm r0, r1, CORTEX_A72_CPUACTLR_DIS_LOAD_PASS_STORE + stcopr16 r0, r1, CORTEX_A72_CPUACTLR + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -241,6 +259,7 @@ func cortex_a72_errata_report */ report_errata ERRATA_A72_859971, cortex_a72, 859971 report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639 pop {r12, lr} bx lr diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index 4d072e11..07fadd15 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -337,6 +337,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A57. * Shall clobber: x0-x19 @@ -393,10 +402,18 @@ func cortex_a57_reset_func #endif #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 - adr x0, workaround_mmu_runtime_exceptions + adr x0, wa_cve_2017_5715_mmu_vbar msr vbar_el3, x0 #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A57_CPUACTLR_EL1 + orr x0, x0, #CORTEX_A57_CPUACTLR_EL1_DIS_LOAD_PASS_STORE + msr CORTEX_A57_CPUACTLR_EL1, x0 + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -528,6 +545,7 @@ func cortex_a57_errata_report report_errata ERRATA_A57_833471, cortex_a57, 833471 report_errata ERRATA_A57_859972, cortex_a57, 859972 report_errata WORKAROUND_CVE_2017_5715, cortex_a57, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a57, cve_2018_3639 ldp x8, x30, [sp], #16 ret @@ -555,8 +573,9 @@ func cortex_a57_cpu_reg_dump ret endfunc cortex_a57_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a57, CORTEX_A57_MIDR, \ +declare_cpu_ops_wa cortex_a57, CORTEX_A57_MIDR, \ cortex_a57_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a57_core_pwr_dwn, \ cortex_a57_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S index 29fa77b9..bb9381d1 100644 --- a/lib/cpus/aarch64/cortex_a72.S +++ b/lib/cpus/aarch64/cortex_a72.S @@ -110,6 +110,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + /* ------------------------------------------------- * The CPU Ops reset function for Cortex-A72. * ------------------------------------------------- @@ -126,11 +135,19 @@ func cortex_a72_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 cpu_check_csv2 x0, 1f - adr x0, workaround_mmu_runtime_exceptions + adr x0, wa_cve_2017_5715_mmu_vbar msr vbar_el3, x0 1: #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A72_CPUACTLR_EL1 + orr x0, x0, #CORTEX_A72_CPUACTLR_EL1_DIS_LOAD_PASS_STORE + msr CORTEX_A72_CPUACTLR_EL1, x0 + isb + dsb sy +#endif + /* --------------------------------------------- * Enable the SMP bit. * --------------------------------------------- @@ -265,6 +282,7 @@ func cortex_a72_errata_report */ report_errata ERRATA_A72_859971, cortex_a72, 859971 report_errata WORKAROUND_CVE_2017_5715, cortex_a72, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a72, cve_2018_3639 ldp x8, x30, [sp], #16 ret @@ -292,8 +310,9 @@ func cortex_a72_cpu_reg_dump ret endfunc cortex_a72_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a72, CORTEX_A72_MIDR, \ +declare_cpu_ops_wa cortex_a72, CORTEX_A72_MIDR, \ cortex_a72_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a72_core_pwr_dwn, \ cortex_a72_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index 0a961ea3..d595f128 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -38,11 +38,18 @@ endfunc cortex_a73_disable_smp func cortex_a73_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 cpu_check_csv2 x0, 1f - adr x0, workaround_bpiall_vbar0_runtime_exceptions + adr x0, wa_cve_2017_5715_bpiall_vbar msr vbar_el3, x0 1: #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A73_IMP_DEF_REG1 + orr x0, x0, #CORTEX_A73_IMP_DEF_REG1_DISABLE_LOAD_PASS_STORE + msr CORTEX_A73_IMP_DEF_REG1, x0 + isb +#endif + /* --------------------------------------------- * Enable the SMP bit. * Clobbers : x0 @@ -129,6 +136,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + #if REPORT_ERRATA /* * Errata printing function for Cortex A75. Must follow AAPCS. @@ -144,6 +160,7 @@ func cortex_a73_errata_report * checking functions of each errata. */ report_errata WORKAROUND_CVE_2017_5715, cortex_a73, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a73, cve_2018_3639 ldp x8, x30, [sp], #16 ret @@ -170,8 +187,9 @@ func cortex_a73_cpu_reg_dump ret endfunc cortex_a73_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a73, CORTEX_A73_MIDR, \ +declare_cpu_ops_wa cortex_a73, CORTEX_A73_MIDR, \ cortex_a73_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a73_core_pwr_dwn, \ cortex_a73_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index 288f5afe..20ec32ce 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -13,11 +13,18 @@ func cortex_a75_reset_func #if IMAGE_BL31 && WORKAROUND_CVE_2017_5715 cpu_check_csv2 x0, 1f - adr x0, workaround_bpiall_vbar0_runtime_exceptions + adr x0, wa_cve_2017_5715_bpiall_vbar msr vbar_el3, x0 1: #endif +#if WORKAROUND_CVE_2018_3639 + mrs x0, CORTEX_A75_CPUACTLR_EL1 + orr x0, x0, #CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE + msr CORTEX_A75_CPUACTLR_EL1, x0 + isb +#endif + #if ENABLE_AMU /* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */ mrs x0, actlr_el3 @@ -57,6 +64,15 @@ func check_errata_cve_2017_5715 ret endfunc check_errata_cve_2017_5715 +func check_errata_cve_2018_3639 +#if WORKAROUND_CVE_2018_3639 + mov x0, #ERRATA_APPLIES +#else + mov x0, #ERRATA_MISSING +#endif + ret +endfunc check_errata_cve_2018_3639 + /* --------------------------------------------- * HW will do the cache maintenance while powering down * --------------------------------------------- @@ -88,6 +104,7 @@ func cortex_a75_errata_report * checking functions of each errata. */ report_errata WORKAROUND_CVE_2017_5715, cortex_a75, cve_2017_5715 + report_errata WORKAROUND_CVE_2018_3639, cortex_a75, cve_2018_3639 ldp x8, x30, [sp], #16 ret @@ -113,7 +130,8 @@ func cortex_a75_cpu_reg_dump ret endfunc cortex_a75_cpu_reg_dump -declare_cpu_ops_workaround_cve_2017_5715 cortex_a75, CORTEX_A75_MIDR, \ +declare_cpu_ops_wa cortex_a75, CORTEX_A75_MIDR, \ cortex_a75_reset_func, \ check_errata_cve_2017_5715, \ + CPU_NO_EXTRA2_FUNC, \ cortex_a75_core_pwr_dwn diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index 5a9226d8..652cfe63 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -281,7 +281,7 @@ endfunc print_errata_status #endif /* - * int check_workaround_cve_2017_5715(void); + * int check_wa_cve_2017_5715(void); * * This function returns: * - ERRATA_APPLIES when firmware mitigation is required. @@ -292,8 +292,8 @@ endfunc print_errata_status * NOTE: Must be called only after cpu_ops have been initialized * in per-CPU data. */ - .globl check_workaround_cve_2017_5715 -func check_workaround_cve_2017_5715 + .globl check_wa_cve_2017_5715 +func check_wa_cve_2017_5715 mrs x0, tpidr_el3 #if ENABLE_ASSERTIONS cmp x0, #0 @@ -311,4 +311,28 @@ func check_workaround_cve_2017_5715 1: mov x0, #ERRATA_NOT_APPLIES ret -endfunc check_workaround_cve_2017_5715 +endfunc check_wa_cve_2017_5715 + +/* + * void *wa_cve_2018_3639_get_disable_ptr(void); + * + * Returns a function pointer which is used to disable mitigation + * for CVE-2018-3639. + * The function pointer is only returned on cores that employ + * dynamic mitigation. If the core uses static mitigation or is + * unaffected by CVE-2018-3639 this function returns NULL. + * + * NOTE: Must be called only after cpu_ops have been initialized + * in per-CPU data. + */ + .globl wa_cve_2018_3639_get_disable_ptr +func wa_cve_2018_3639_get_disable_ptr + mrs x0, tpidr_el3 +#if ENABLE_ASSERTIONS + cmp x0, #0 + ASM_ASSERT(ne) +#endif + ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR] + ldr x0, [x0, #CPU_EXTRA2_FUNC] + ret +endfunc wa_cve_2018_3639_get_disable_ptr diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S index cd824973..84371551 100644 --- a/lib/cpus/aarch64/workaround_cve_2017_5715_bpiall.S +++ b/lib/cpus/aarch64/wa_cve_2017_5715_bpiall.S @@ -9,13 +9,13 @@ #include <asm_macros.S> #include <context.h> - .globl workaround_bpiall_vbar0_runtime_exceptions + .globl wa_cve_2017_5715_bpiall_vbar #define EMIT_BPIALL 0xee070fd5 #define EMIT_SMC 0xe1600070 #define ESR_EL3_A64_SMC0 0x5e000000 - .macro enter_workaround _from_vector + .macro apply_cve_2017_5715_wa _from_vector /* * Save register state to enable a call to AArch32 S-EL1 and return * Identify the original calling vector in w2 (==_from_vector) @@ -66,7 +66,7 @@ movz w8, SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, SPSR_AIF_MASK) /* Switch EL3 exception vectors while the workaround is executing. */ - adr x9, workaround_bpiall_vbar1_runtime_exceptions + adr x9, wa_cve_2017_5715_bpiall_ret_vbar /* Setup SCTLR_EL1 with MMU off and I$ on */ ldr x10, stub_sel1_sctlr @@ -93,13 +93,13 @@ * is not enabled, the existing runtime exception vector table is used. * --------------------------------------------------------------------- */ -vector_base workaround_bpiall_vbar0_runtime_exceptions +vector_base wa_cve_2017_5715_bpiall_vbar /* --------------------------------------------------------------------- * Current EL with SP_EL0 : 0x0 - 0x200 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar0_sync_exception_sp_el0 +vector_entry bpiall_sync_exception_sp_el0 b sync_exception_sp_el0 nop /* to force 8 byte alignment for the following stub */ @@ -114,79 +114,79 @@ aarch32_stub: .word EMIT_BPIALL .word EMIT_SMC - check_vector_size workaround_bpiall_vbar0_sync_exception_sp_el0 + check_vector_size bpiall_sync_exception_sp_el0 -vector_entry workaround_bpiall_vbar0_irq_sp_el0 +vector_entry bpiall_irq_sp_el0 b irq_sp_el0 - check_vector_size workaround_bpiall_vbar0_irq_sp_el0 + check_vector_size bpiall_irq_sp_el0 -vector_entry workaround_bpiall_vbar0_fiq_sp_el0 +vector_entry bpiall_fiq_sp_el0 b fiq_sp_el0 - check_vector_size workaround_bpiall_vbar0_fiq_sp_el0 + check_vector_size bpiall_fiq_sp_el0 -vector_entry workaround_bpiall_vbar0_serror_sp_el0 +vector_entry bpiall_serror_sp_el0 b serror_sp_el0 - check_vector_size workaround_bpiall_vbar0_serror_sp_el0 + check_vector_size bpiall_serror_sp_el0 /* --------------------------------------------------------------------- * Current EL with SP_ELx: 0x200 - 0x400 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar0_sync_exception_sp_elx +vector_entry bpiall_sync_exception_sp_elx b sync_exception_sp_elx - check_vector_size workaround_bpiall_vbar0_sync_exception_sp_elx + check_vector_size bpiall_sync_exception_sp_elx -vector_entry workaround_bpiall_vbar0_irq_sp_elx +vector_entry bpiall_irq_sp_elx b irq_sp_elx - check_vector_size workaround_bpiall_vbar0_irq_sp_elx + check_vector_size bpiall_irq_sp_elx -vector_entry workaround_bpiall_vbar0_fiq_sp_elx +vector_entry bpiall_fiq_sp_elx b fiq_sp_elx - check_vector_size workaround_bpiall_vbar0_fiq_sp_elx + check_vector_size bpiall_fiq_sp_elx -vector_entry workaround_bpiall_vbar0_serror_sp_elx +vector_entry bpiall_serror_sp_elx b serror_sp_elx - check_vector_size workaround_bpiall_vbar0_serror_sp_elx + check_vector_size bpiall_serror_sp_elx /* --------------------------------------------------------------------- * Lower EL using AArch64 : 0x400 - 0x600 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar0_sync_exception_aarch64 - enter_workaround 1 - check_vector_size workaround_bpiall_vbar0_sync_exception_aarch64 +vector_entry bpiall_sync_exception_aarch64 + apply_cve_2017_5715_wa 1 + check_vector_size bpiall_sync_exception_aarch64 -vector_entry workaround_bpiall_vbar0_irq_aarch64 - enter_workaround 2 - check_vector_size workaround_bpiall_vbar0_irq_aarch64 +vector_entry bpiall_irq_aarch64 + apply_cve_2017_5715_wa 2 + check_vector_size bpiall_irq_aarch64 -vector_entry workaround_bpiall_vbar0_fiq_aarch64 - enter_workaround 4 - check_vector_size workaround_bpiall_vbar0_fiq_aarch64 +vector_entry bpiall_fiq_aarch64 + apply_cve_2017_5715_wa 4 + check_vector_size bpiall_fiq_aarch64 -vector_entry workaround_bpiall_vbar0_serror_aarch64 - enter_workaround 8 - check_vector_size workaround_bpiall_vbar0_serror_aarch64 +vector_entry bpiall_serror_aarch64 + apply_cve_2017_5715_wa 8 + check_vector_size bpiall_serror_aarch64 /* --------------------------------------------------------------------- * Lower EL using AArch32 : 0x600 - 0x800 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar0_sync_exception_aarch32 - enter_workaround 1 - check_vector_size workaround_bpiall_vbar0_sync_exception_aarch32 +vector_entry bpiall_sync_exception_aarch32 + apply_cve_2017_5715_wa 1 + check_vector_size bpiall_sync_exception_aarch32 -vector_entry workaround_bpiall_vbar0_irq_aarch32 - enter_workaround 2 - check_vector_size workaround_bpiall_vbar0_irq_aarch32 +vector_entry bpiall_irq_aarch32 + apply_cve_2017_5715_wa 2 + check_vector_size bpiall_irq_aarch32 -vector_entry workaround_bpiall_vbar0_fiq_aarch32 - enter_workaround 4 - check_vector_size workaround_bpiall_vbar0_fiq_aarch32 +vector_entry bpiall_fiq_aarch32 + apply_cve_2017_5715_wa 4 + check_vector_size bpiall_fiq_aarch32 -vector_entry workaround_bpiall_vbar0_serror_aarch32 - enter_workaround 8 - check_vector_size workaround_bpiall_vbar0_serror_aarch32 +vector_entry bpiall_serror_aarch32 + apply_cve_2017_5715_wa 8 + check_vector_size bpiall_serror_aarch32 /* --------------------------------------------------------------------- * This vector table is used while the workaround is executing. It @@ -195,73 +195,73 @@ vector_entry workaround_bpiall_vbar0_serror_aarch32 * EL3 state before proceeding with the normal runtime exception vector. * --------------------------------------------------------------------- */ -vector_base workaround_bpiall_vbar1_runtime_exceptions +vector_base wa_cve_2017_5715_bpiall_ret_vbar /* --------------------------------------------------------------------- * Current EL with SP_EL0 : 0x0 - 0x200 (UNUSED) * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar1_sync_exception_sp_el0 +vector_entry bpiall_ret_sync_exception_sp_el0 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_sync_exception_sp_el0 + check_vector_size bpiall_ret_sync_exception_sp_el0 -vector_entry workaround_bpiall_vbar1_irq_sp_el0 +vector_entry bpiall_ret_irq_sp_el0 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_irq_sp_el0 + check_vector_size bpiall_ret_irq_sp_el0 -vector_entry workaround_bpiall_vbar1_fiq_sp_el0 +vector_entry bpiall_ret_fiq_sp_el0 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_sp_el0 + check_vector_size bpiall_ret_fiq_sp_el0 -vector_entry workaround_bpiall_vbar1_serror_sp_el0 +vector_entry bpiall_ret_serror_sp_el0 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_sp_el0 + check_vector_size bpiall_ret_serror_sp_el0 /* --------------------------------------------------------------------- * Current EL with SP_ELx: 0x200 - 0x400 (UNUSED) * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar1_sync_exception_sp_elx +vector_entry bpiall_ret_sync_exception_sp_elx b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_sync_exception_sp_elx + check_vector_size bpiall_ret_sync_exception_sp_elx -vector_entry workaround_bpiall_vbar1_irq_sp_elx +vector_entry bpiall_ret_irq_sp_elx b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_irq_sp_elx + check_vector_size bpiall_ret_irq_sp_elx -vector_entry workaround_bpiall_vbar1_fiq_sp_elx +vector_entry bpiall_ret_fiq_sp_elx b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_sp_elx + check_vector_size bpiall_ret_fiq_sp_elx -vector_entry workaround_bpiall_vbar1_serror_sp_elx +vector_entry bpiall_ret_serror_sp_elx b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_sp_elx + check_vector_size bpiall_ret_serror_sp_elx /* --------------------------------------------------------------------- * Lower EL using AArch64 : 0x400 - 0x600 (UNUSED) * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar1_sync_exception_aarch64 +vector_entry bpiall_ret_sync_exception_aarch64 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_sync_exception_aarch64 + check_vector_size bpiall_ret_sync_exception_aarch64 -vector_entry workaround_bpiall_vbar1_irq_aarch64 +vector_entry bpiall_ret_irq_aarch64 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_irq_aarch64 + check_vector_size bpiall_ret_irq_aarch64 -vector_entry workaround_bpiall_vbar1_fiq_aarch64 +vector_entry bpiall_ret_fiq_aarch64 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_aarch64 + check_vector_size bpiall_ret_fiq_aarch64 -vector_entry workaround_bpiall_vbar1_serror_aarch64 +vector_entry bpiall_ret_serror_aarch64 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_aarch64 + check_vector_size bpiall_ret_serror_aarch64 /* --------------------------------------------------------------------- * Lower EL using AArch32 : 0x600 - 0x800 * --------------------------------------------------------------------- */ -vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 +vector_entry bpiall_ret_sync_exception_aarch32 /* * w2 indicates which SEL1 stub was run and thus which original vector was used * w3-w6 contain saved system register state (esr_el3 in w3) @@ -281,7 +281,7 @@ vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 * to workaround entry table in preparation for subsequent * Sync/IRQ/FIQ/SError exceptions. */ - adr x0, workaround_bpiall_vbar0_runtime_exceptions + adr x0, wa_cve_2017_5715_bpiall_vbar msr vbar_el3, x0 /* @@ -324,34 +324,34 @@ vector_entry workaround_bpiall_vbar1_sync_exception_aarch32 1: ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] b sync_exception_aarch64 - check_vector_size workaround_bpiall_vbar1_sync_exception_aarch32 + check_vector_size bpiall_ret_sync_exception_aarch32 -vector_entry workaround_bpiall_vbar1_irq_aarch32 +vector_entry bpiall_ret_irq_aarch32 b report_unhandled_interrupt /* * Post-workaround fan-out for non-sync exceptions */ workaround_not_sync: - tbnz w2, #3, workaround_bpiall_vbar1_serror - tbnz w2, #2, workaround_bpiall_vbar1_fiq + tbnz w2, #3, bpiall_ret_serror + tbnz w2, #2, bpiall_ret_fiq /* IRQ */ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] b irq_aarch64 -workaround_bpiall_vbar1_fiq: +bpiall_ret_fiq: ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] b fiq_aarch64 -workaround_bpiall_vbar1_serror: +bpiall_ret_serror: ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] b serror_aarch64 - check_vector_size workaround_bpiall_vbar1_irq_aarch32 + check_vector_size bpiall_ret_irq_aarch32 -vector_entry workaround_bpiall_vbar1_fiq_aarch32 +vector_entry bpiall_ret_fiq_aarch32 b report_unhandled_interrupt - check_vector_size workaround_bpiall_vbar1_fiq_aarch32 + check_vector_size bpiall_ret_fiq_aarch32 -vector_entry workaround_bpiall_vbar1_serror_aarch32 +vector_entry bpiall_ret_serror_aarch32 b report_unhandled_exception - check_vector_size workaround_bpiall_vbar1_serror_aarch32 + check_vector_size bpiall_ret_serror_aarch32 diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S index b24b620c..039e373c 100644 --- a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S +++ b/lib/cpus/aarch64/wa_cve_2017_5715_mmu.S @@ -9,13 +9,13 @@ #include <asm_macros.S> #include <context.h> - .globl workaround_mmu_runtime_exceptions + .globl wa_cve_2017_5715_mmu_vbar #define ESR_EL3_A64_SMC0 0x5e000000 -vector_base workaround_mmu_runtime_exceptions +vector_base wa_cve_2017_5715_mmu_vbar - .macro apply_workaround _is_sync_exception + .macro apply_cve_2017_5715_wa _is_sync_exception stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] mrs x1, sctlr_el3 /* Disable MMU */ @@ -63,86 +63,86 @@ vector_base workaround_mmu_runtime_exceptions * Current EL with SP_EL0 : 0x0 - 0x200 * --------------------------------------------------------------------- */ -vector_entry workaround_mmu_sync_exception_sp_el0 +vector_entry mmu_sync_exception_sp_el0 b sync_exception_sp_el0 - check_vector_size workaround_mmu_sync_exception_sp_el0 + check_vector_size mmu_sync_exception_sp_el0 -vector_entry workaround_mmu_irq_sp_el0 +vector_entry mmu_irq_sp_el0 b irq_sp_el0 - check_vector_size workaround_mmu_irq_sp_el0 + check_vector_size mmu_irq_sp_el0 -vector_entry workaround_mmu_fiq_sp_el0 +vector_entry mmu_fiq_sp_el0 b fiq_sp_el0 - check_vector_size workaround_mmu_fiq_sp_el0 + check_vector_size mmu_fiq_sp_el0 -vector_entry workaround_mmu_serror_sp_el0 +vector_entry mmu_serror_sp_el0 b serror_sp_el0 - check_vector_size workaround_mmu_serror_sp_el0 + check_vector_size mmu_serror_sp_el0 /* --------------------------------------------------------------------- * Current EL with SP_ELx: 0x200 - 0x400 * --------------------------------------------------------------------- */ -vector_entry workaround_mmu_sync_exception_sp_elx +vector_entry mmu_sync_exception_sp_elx b sync_exception_sp_elx - check_vector_size workaround_mmu_sync_exception_sp_elx + check_vector_size mmu_sync_exception_sp_elx -vector_entry workaround_mmu_irq_sp_elx +vector_entry mmu_irq_sp_elx b irq_sp_elx - check_vector_size workaround_mmu_irq_sp_elx + check_vector_size mmu_irq_sp_elx -vector_entry workaround_mmu_fiq_sp_elx +vector_entry mmu_fiq_sp_elx b fiq_sp_elx - check_vector_size workaround_mmu_fiq_sp_elx + check_vector_size mmu_fiq_sp_elx -vector_entry workaround_mmu_serror_sp_elx +vector_entry mmu_serror_sp_elx b serror_sp_elx - check_vector_size workaround_mmu_serror_sp_elx + check_vector_size mmu_serror_sp_elx /* --------------------------------------------------------------------- * Lower EL using AArch64 : 0x400 - 0x600 * --------------------------------------------------------------------- */ -vector_entry workaround_mmu_sync_exception_aarch64 - apply_workaround _is_sync_exception=1 +vector_entry mmu_sync_exception_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=1 b sync_exception_aarch64 - check_vector_size workaround_mmu_sync_exception_aarch64 + check_vector_size mmu_sync_exception_aarch64 -vector_entry workaround_mmu_irq_aarch64 - apply_workaround _is_sync_exception=0 +vector_entry mmu_irq_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=0 b irq_aarch64 - check_vector_size workaround_mmu_irq_aarch64 + check_vector_size mmu_irq_aarch64 -vector_entry workaround_mmu_fiq_aarch64 - apply_workaround _is_sync_exception=0 +vector_entry mmu_fiq_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=0 b fiq_aarch64 - check_vector_size workaround_mmu_fiq_aarch64 + check_vector_size mmu_fiq_aarch64 -vector_entry workaround_mmu_serror_aarch64 - apply_workaround _is_sync_exception=0 +vector_entry mmu_serror_aarch64 + apply_cve_2017_5715_wa _is_sync_exception=0 b serror_aarch64 - check_vector_size workaround_mmu_serror_aarch64 + check_vector_size mmu_serror_aarch64 /* --------------------------------------------------------------------- * Lower EL using AArch32 : 0x600 - 0x800 * --------------------------------------------------------------------- */ -vector_entry workaround_mmu_sync_exception_aarch32 - apply_workaround _is_sync_exception=1 +vector_entry mmu_sync_exception_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=1 b sync_exception_aarch32 - check_vector_size workaround_mmu_sync_exception_aarch32 + check_vector_size mmu_sync_exception_aarch32 -vector_entry workaround_mmu_irq_aarch32 - apply_workaround _is_sync_exception=0 +vector_entry mmu_irq_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=0 b irq_aarch32 - check_vector_size workaround_mmu_irq_aarch32 + check_vector_size mmu_irq_aarch32 -vector_entry workaround_mmu_fiq_aarch32 - apply_workaround _is_sync_exception=0 +vector_entry mmu_fiq_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=0 b fiq_aarch32 - check_vector_size workaround_mmu_fiq_aarch32 + check_vector_size mmu_fiq_aarch32 -vector_entry workaround_mmu_serror_aarch32 - apply_workaround _is_sync_exception=0 +vector_entry mmu_serror_aarch32 + apply_cve_2017_5715_wa _is_sync_exception=0 b serror_aarch32 - check_vector_size workaround_mmu_serror_aarch32 + check_vector_size mmu_serror_aarch32 diff --git a/lib/cpus/cpu-ops.mk b/lib/cpus/cpu-ops.mk index 3ba8c1fc..434c13ea 100644 --- a/lib/cpus/cpu-ops.mk +++ b/lib/cpus/cpu-ops.mk @@ -17,6 +17,8 @@ A53_DISABLE_NON_TEMPORAL_HINT ?=1 A57_DISABLE_NON_TEMPORAL_HINT ?=1 WORKAROUND_CVE_2017_5715 ?=1 +WORKAROUND_CVE_2018_3639 ?=1 +DYNAMIC_WORKAROUND_CVE_2018_3639 ?=0 # Process SKIP_A57_L1_FLUSH_PWR_DWN flag $(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN)) @@ -34,6 +36,19 @@ $(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT)) $(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715)) $(eval $(call add_define,WORKAROUND_CVE_2017_5715)) +# Process WORKAROUND_CVE_2018_3639 flag +$(eval $(call assert_boolean,WORKAROUND_CVE_2018_3639)) +$(eval $(call add_define,WORKAROUND_CVE_2018_3639)) + +$(eval $(call assert_boolean,DYNAMIC_WORKAROUND_CVE_2018_3639)) +$(eval $(call add_define,DYNAMIC_WORKAROUND_CVE_2018_3639)) + +ifneq (${DYNAMIC_WORKAROUND_CVE_2018_3639},0) + ifeq (${WORKAROUND_CVE_2018_3639},0) + $(error "Error: WORKAROUND_CVE_2018_3639 must be 1 if DYNAMIC_WORKAROUND_CVE_2018_3639 is 1") + endif +endif + # CPU Errata Build flags. # These should be enabled by the platform if the erratum workaround needs to be # applied. diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S index 620ec16f..caaf413f 100644 --- a/lib/el3_runtime/aarch64/context.S +++ b/lib/el3_runtime/aarch64/context.S @@ -384,6 +384,15 @@ func el3_exit msr spsr_el3, x16 msr elr_el3, x17 +#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639 + /* Restore mitigation state as it was on entry to EL3 */ + ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE] + cmp x17, xzr + beq 1f + blr x17 +#endif + +1: /* Restore saved general purpose registers and return */ b restore_gp_registers_eret endfunc el3_exit |