diff options
author | Tom Rini <trini@konsulko.com> | 2022-03-02 13:59:33 -0500 |
---|---|---|
committer | Tom Rini <trini@konsulko.com> | 2022-03-02 13:59:33 -0500 |
commit | 2dfdba4a5a48de33a3cedc908c183b86b6ff7bd5 (patch) | |
tree | da9d57e285034143ed1e9cd4dfc30577792275ad /arch/arm/cpu/armv8/start.S | |
parent | f861ffa660dc47c4017c220b94d10a64439d46a7 (diff) | |
parent | 5ff4857d3569710c0f1ce1848f1e7486e3a4cfbe (diff) |
Merge branch '2022-03-02-armv8-fixes-and-cleanups' into next
To quote the author:
I was looking into the arm64 boot code lately and stumbled upon some
issues. Also Nishanth brought back memories of a lengthy debug session,
which was caused due to U-Boot keeping SErrors masked. As the resulting
patches are all somewhat related, I gathered this series here to address
those problems.
Patches 1 to 3 address exception handling issues, with the SError
enablement being the most prominent fix here.
Patch 4 cleans up asm/io.h. This was on the list before[1], but was
somehow lost when it was intercepted by a shorter version of itself.
Patches 5 and 6 clean up some unnecessarily complicated AArch64 assembly
code.
Diffstat (limited to 'arch/arm/cpu/armv8/start.S')
-rw-r--r-- | arch/arm/cpu/armv8/start.S | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/arch/arm/cpu/armv8/start.S b/arch/arm/cpu/armv8/start.S index 91b00a46cce..6a6a4f86502 100644 --- a/arch/arm/cpu/armv8/start.S +++ b/arch/arm/cpu/armv8/start.S @@ -126,6 +126,8 @@ pie_fixup_done: b 0f 2: mrs x1, hcr_el2 tbnz x1, #34, 1f /* HCR_EL2.E2H */ + orr x1, x1, #HCR_EL2_AMO_EL2 /* Route SErrors to EL2 */ + msr hcr_el2, x1 set_vbar vbar_el2, x0 mov x0, #0x33ff msr cptr_el2, x0 /* Enable FP/SIMD */ @@ -134,6 +136,7 @@ pie_fixup_done: mov x0, #3 << 20 msr cpacr_el1, x0 /* Enable FP/SIMD */ 0: + msr daifclr, #0x4 /* Unmask SError interrupts */ #ifdef COUNTER_FREQUENCY branch_if_not_highest_el x0, 4f @@ -172,11 +175,11 @@ pie_fixup_done: bl lowlevel_init #if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD) - branch_if_master x0, x1, master_cpu + branch_if_master x0, master_cpu b spin_table_secondary_jump /* never return */ #elif defined(CONFIG_ARMV8_MULTIENTRY) - branch_if_master x0, x1, master_cpu + branch_if_master x0, master_cpu /* * Slave CPUs @@ -189,6 +192,7 @@ slave_cpu: br x0 /* branch to the given address */ #endif /* CONFIG_ARMV8_MULTIENTRY */ master_cpu: + msr SPSel, #1 /* make sure we use SP_ELx */ bl _main /*-----------------------------------------------------------------------*/ @@ -301,7 +305,7 @@ WEAK(lowlevel_init) #endif #ifdef CONFIG_ARMV8_MULTIENTRY - branch_if_master x0, x1, 2f + branch_if_master x0, 2f /* * Slave should wait for master clearing spin table. |