summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2022-03-15 18:24:12 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-03-19 13:40:15 +0100
commit26129ea2953b6b2c59d89e472e5465e77e3b8c7c (patch)
tree52c1a5648b366cdbc5fddffc587837df6ecce947 /arch/arm64/include
parent1b735c8dc1fb8392f62c056d6972a7b9ae9a1f9e (diff)
KVM: arm64: Add templates for BHB mitigation sequences
KVM writes the Spectre-v2 mitigation template at the beginning of each vector when a CPU requires a specific sequence to run. Because the template is copied, it can not be modified by the alternatives at runtime. Add templates for calling ARCH_WORKAROUND_3 and one for each value of K in the brancy-loop. Instead of adding dummy functions for 'fn', which would disable the Spectre-v2 mitigation, add template_start to indicate that a template (and which one) is in use. Finally add a copy of install_bp_hardening_cb() that is able to install these. Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h6
-rw-r--r--arch/arm64/include/asm/mmu.h6
3 files changed, 12 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 1dc3c762fdcb..4ffa86149d28 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -55,7 +55,8 @@
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
#define ARM64_WORKAROUND_1542419 47
+#define ARM64_SPECTRE_BHB 48
-#define ARM64_NCAPS 48
+#define ARM64_NCAPS 49
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index befe37d4bc0e..78d110667c0c 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -478,7 +478,8 @@ static inline void *kvm_get_hyp_vector(void)
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
int slot = -1;
- if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
+ if ((cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) ||
+ cpus_have_const_cap(ARM64_SPECTRE_BHB)) && data->template_start) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
slot = data->hyp_vectors_slot;
}
@@ -507,7 +508,8 @@ static inline int kvm_map_vectors(void)
* !HBP + HEL2 -> allocate one vector slot and use exec mapping
* HBP + HEL2 -> use hardened vertors and use exec mapping
*/
- if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
+ if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) ||
+ cpus_have_const_cap(ARM64_SPECTRE_BHB)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 353450011e3d..1b9e49fb0e1b 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -82,6 +82,12 @@ typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data {
int hyp_vectors_slot;
bp_hardening_cb_t fn;
+
+ /*
+ * template_start is only used by the BHB mitigation to identify the
+ * hyp_vectors_slot sequence.
+ */
+ const char *template_start;
};
#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \