summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2022-03-15 18:24:12 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-03-19 13:40:15 +0100
commit26129ea2953b6b2c59d89e472e5465e77e3b8c7c (patch)
tree52c1a5648b366cdbc5fddffc587837df6ecce947 /arch/arm64/kvm
parent1b735c8dc1fb8392f62c056d6972a7b9ae9a1f9e (diff)
KVM: arm64: Add templates for BHB mitigation sequences
KVM writes the Spectre-v2 mitigation template at the beginning of each vector when a CPU requires a specific sequence to run. Because the template is copied, it can not be modified by the alternatives at runtime. Add templates for calling ARCH_WORKAROUND_3 and one for each value of K in the brancy-loop. Instead of adding dummy functions for 'fn', which would disable the Spectre-v2 mitigation, add template_start to indicate that a template (and which one) is in use. Finally add a copy of install_bp_hardening_cb() that is able to install these. Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S54
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index f36aad0f207b..2ad750208e33 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -347,4 +347,58 @@ ENTRY(__smccc_workaround_1_smc_start)
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
ENTRY(__smccc_workaround_1_smc_end)
+
+ENTRY(__smccc_workaround_3_smc_start)
+ esb
+ sub sp, sp, #(8 * 4)
+ stp x2, x3, [sp, #(8 * 0)]
+ stp x0, x1, [sp, #(8 * 2)]
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
+ smc #0
+ ldp x2, x3, [sp, #(8 * 0)]
+ ldp x0, x1, [sp, #(8 * 2)]
+ add sp, sp, #(8 * 4)
+ENTRY(__smccc_workaround_3_smc_end)
+
+ENTRY(__spectre_bhb_loop_k8_start)
+ esb
+ sub sp, sp, #(8 * 2)
+ stp x0, x1, [sp, #(8 * 0)]
+ mov x0, #8
+2: b . + 4
+ subs x0, x0, #1
+ b.ne 2b
+ dsb nsh
+ isb
+ ldp x0, x1, [sp, #(8 * 0)]
+ add sp, sp, #(8 * 2)
+ENTRY(__spectre_bhb_loop_k8_end)
+
+ENTRY(__spectre_bhb_loop_k24_start)
+ esb
+ sub sp, sp, #(8 * 2)
+ stp x0, x1, [sp, #(8 * 0)]
+ mov x0, #24
+2: b . + 4
+ subs x0, x0, #1
+ b.ne 2b
+ dsb nsh
+ isb
+ ldp x0, x1, [sp, #(8 * 0)]
+ add sp, sp, #(8 * 2)
+ENTRY(__spectre_bhb_loop_k24_end)
+
+ENTRY(__spectre_bhb_loop_k32_start)
+ esb
+ sub sp, sp, #(8 * 2)
+ stp x0, x1, [sp, #(8 * 0)]
+ mov x0, #32
+2: b . + 4
+ subs x0, x0, #1
+ b.ne 2b
+ dsb nsh
+ isb
+ ldp x0, x1, [sp, #(8 * 0)]
+ add sp, sp, #(8 * 2)
+ENTRY(__spectre_bhb_loop_k32_end)
#endif