summaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/bugs.c26
-rw-r--r--arch/x86/kernel/process.c22
2 files changed, 43 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 46d01fd4c3c8..4f0957600076 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -32,7 +32,7 @@ static void __init ssb_select_mitigation(void);
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
* writes to SPEC_CTRL contain whatever reserved bits have been set.
*/
-static u64 __ro_after_init x86_spec_ctrl_base;
+u64 __ro_after_init x86_spec_ctrl_base;
/*
* The vendor and possibly platform specific bits which can be modified in
@@ -139,25 +139,41 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
u64 x86_spec_ctrl_get_default(void)
{
- return x86_spec_ctrl_base;
+ u64 msrval = x86_spec_ctrl_base;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+ return msrval;
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
{
+ u64 host = x86_spec_ctrl_base;
+
if (!boot_cpu_has(X86_FEATURE_IBRS))
return;
- if (x86_spec_ctrl_base != guest_spec_ctrl)
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
{
+ u64 host = x86_spec_ctrl_base;
+
if (!boot_cpu_has(X86_FEATURE_IBRS))
return;
- if (x86_spec_ctrl_base != guest_spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+
+ if (host != guest_spec_ctrl)
+ wrmsrl(MSR_IA32_SPEC_CTRL, host);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index b7e3822238ad..9c48e18d4aeb 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -33,6 +33,7 @@
#include <asm/mce.h>
#include <asm/vm86.h>
#include <asm/switch_to.h>
+#include <asm/spec-ctrl.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -202,6 +203,24 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
}
}
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+{
+ u64 msr;
+
+ if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
+ msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ } else {
+ msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ }
+}
+
+void speculative_store_bypass_update(void)
+{
+ __speculative_store_bypass_update(current_thread_info()->flags);
+}
+
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss)
{
@@ -230,6 +249,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOTSC)
cr4_toggle_bits(X86_CR4_TSD);
+
+ if ((tifp ^ tifn) & _TIF_RDS)
+ __speculative_store_bypass_update(tifn);
}
/*