summaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/apic.h12
-rw-r--r--arch/x86/include/asm/asm.h19
-rw-r--r--arch/x86/include/asm/debugreg.h4
-rw-r--r--arch/x86/include/asm/fred.h1
-rw-r--r--arch/x86/include/asm/fsgsbase.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/microcode.h4
-rw-r--r--arch/x86/include/asm/mshyperv.h3
-rw-r--r--arch/x86/include/asm/msr-index.h12
-rw-r--r--arch/x86/include/asm/msr.h213
-rw-r--r--arch/x86/include/asm/paravirt.h59
-rw-r--r--arch/x86/include/asm/paravirt_types.h10
-rw-r--r--arch/x86/include/asm/resctrl.h2
-rw-r--r--arch/x86/include/asm/sev-internal.h2
-rw-r--r--arch/x86/include/asm/spec-ctrl.h2
-rw-r--r--arch/x86/include/asm/suspend_32.h1
-rw-r--r--arch/x86/include/asm/suspend_64.h1
-rw-r--r--arch/x86/include/asm/switch_to.h4
-rw-r--r--arch/x86/include/asm/tsc.h55
19 files changed, 209 insertions, 202 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index c903d358405d..68e10e30fe9b 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -120,7 +120,7 @@ static inline bool apic_is_x2apic_enabled(void)
{
u64 msr;
- if (rdmsrl_safe(MSR_IA32_APICBASE, &msr))
+ if (rdmsrq_safe(MSR_IA32_APICBASE, &msr))
return false;
return msr & X2APIC_ENABLE;
}
@@ -209,12 +209,12 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
reg == APIC_LVR)
return;
- wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
+ wrmsrq(APIC_BASE_MSR + (reg >> 4), v);
}
static inline void native_apic_msr_eoi(void)
{
- __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+ native_wrmsrq(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK);
}
static inline u32 native_apic_msr_read(u32 reg)
@@ -224,20 +224,20 @@ static inline u32 native_apic_msr_read(u32 reg)
if (reg == APIC_DFR)
return -1;
- rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
+ rdmsrq(APIC_BASE_MSR + (reg >> 4), msr);
return (u32)msr;
}
static inline void native_x2apic_icr_write(u32 low, u32 id)
{
- wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
+ wrmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
}
static inline u64 native_x2apic_icr_read(void)
{
unsigned long val;
- rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
+ rdmsrq(APIC_BASE_MSR + (APIC_ICR >> 4), val);
return val;
}
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index eef0771512de..f963848024a5 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -238,5 +238,24 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
#define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT)
+/*
+ * Both i386 and x86_64 returns 64-bit values in edx:eax for certain
+ * instructions, but GCC's "A" constraint has different meanings.
+ * For i386, "A" means exactly edx:eax, while for x86_64 it
+ * means rax *or* rdx.
+ *
+ * These helpers wrapping these semantic differences save one instruction
+ * clearing the high half of 'low':
+ */
+#ifdef CONFIG_X86_64
+# define EAX_EDX_DECLARE_ARGS(val, low, high) unsigned long low, high
+# define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
+# define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
+#else
+# define EAX_EDX_DECLARE_ARGS(val, low, high) u64 val
+# define EAX_EDX_VAL(val, low, high) (val)
+# define EAX_EDX_RET(val, low, high) "=A" (val)
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 719d95f1ab5e..363110e6b2e3 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -169,7 +169,7 @@ static inline unsigned long get_debugctlmsr(void)
if (boot_cpu_data.x86 < 6)
return 0;
#endif
- rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
return debugctlmsr;
}
@@ -180,7 +180,7 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
if (boot_cpu_data.x86 < 6)
return;
#endif
- wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}
#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h
index 2a29e5216881..12b34d5b2953 100644
--- a/arch/x86/include/asm/fred.h
+++ b/arch/x86/include/asm/fred.h
@@ -9,6 +9,7 @@
#include <linux/const.h>
#include <asm/asm.h>
+#include <asm/msr.h>
#include <asm/trapnr.h>
/*
diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h
index 02f239569b93..ab2547f97c2c 100644
--- a/arch/x86/include/asm/fsgsbase.h
+++ b/arch/x86/include/asm/fsgsbase.h
@@ -60,7 +60,7 @@ static inline unsigned long x86_fsbase_read_cpu(void)
if (boot_cpu_has(X86_FEATURE_FSGSBASE))
fsbase = rdfsbase();
else
- rdmsrl(MSR_FS_BASE, fsbase);
+ rdmsrq(MSR_FS_BASE, fsbase);
return fsbase;
}
@@ -70,7 +70,7 @@ static inline void x86_fsbase_write_cpu(unsigned long fsbase)
if (boot_cpu_has(X86_FEATURE_FSGSBASE))
wrfsbase(fsbase);
else
- wrmsrl(MSR_FS_BASE, fsbase);
+ wrmsrq(MSR_FS_BASE, fsbase);
}
extern unsigned long x86_gsbase_read_cpu_inactive(void);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7bc174a1f1cb..9c971f846108 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
#include <asm/desc.h>
#include <asm/mtrr.h>
#include <asm/msr-index.h>
+#include <asm/msr.h>
#include <asm/asm.h>
#include <asm/irq_remapping.h>
#include <asm/kvm_page_track.h>
@@ -2278,7 +2279,7 @@ static inline unsigned long read_msr(unsigned long msr)
{
u64 value;
- rdmsrl(msr, value);
+ rdmsrq(msr, value);
return value;
}
#endif
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index be7cddc414e4..8b41f26f003b 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_MICROCODE_H
#define _ASM_X86_MICROCODE_H
+#include <asm/msr.h>
+
struct cpu_signature {
unsigned int sig;
unsigned int pf;
@@ -63,7 +65,7 @@ static inline u32 intel_get_microcode_revision(void)
{
u32 rev, dummy;
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
+ native_wrmsrq(MSR_IA32_UCODE_REV, 0);
/* As documented in the SDM: Do a CPUID 1 here */
native_cpuid_eax(1);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index bab5ccfc60a7..778444310cfb 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <asm/nospec-branch.h>
#include <asm/paravirt.h>
+#include <asm/msr.h>
#include <hyperv/hvhdk.h>
/*
@@ -304,7 +305,7 @@ void hv_set_non_nested_msr(unsigned int reg, u64 value);
static __always_inline u64 hv_raw_get_msr(unsigned int reg)
{
- return __rdmsr(reg);
+ return native_rdmsrq(reg);
}
#else /* CONFIG_HYPERV */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e6134ef2263d..e0d60805b869 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -525,7 +525,7 @@
#define MSR_HWP_CAPABILITIES 0x00000771
#define MSR_HWP_REQUEST_PKG 0x00000772
#define MSR_HWP_INTERRUPT 0x00000773
-#define MSR_HWP_REQUEST 0x00000774
+#define MSR_HWP_REQUEST 0x00000774
#define MSR_HWP_STATUS 0x00000777
/* CPUID.6.EAX */
@@ -542,16 +542,16 @@
#define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff)
/* IA32_HWP_REQUEST */
-#define HWP_MIN_PERF(x) (x & 0xff)
-#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
+#define HWP_MIN_PERF(x) (x & 0xff)
+#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
-#define HWP_ENERGY_PERF_PREFERENCE(x) (((unsigned long long) x & 0xff) << 24)
+#define HWP_ENERGY_PERF_PREFERENCE(x) (((u64)x & 0xff) << 24)
#define HWP_EPP_PERFORMANCE 0x00
#define HWP_EPP_BALANCE_PERFORMANCE 0x80
#define HWP_EPP_BALANCE_POWERSAVE 0xC0
#define HWP_EPP_POWERSAVE 0xFF
-#define HWP_ACTIVITY_WINDOW(x) ((unsigned long long)(x & 0xff3) << 32)
-#define HWP_PACKAGE_CONTROL(x) ((unsigned long long)(x & 0x1) << 42)
+#define HWP_ACTIVITY_WINDOW(x) ((u64)(x & 0xff3) << 32)
+#define HWP_PACKAGE_CONTROL(x) ((u64)(x & 0x1) << 42)
/* IA32_HWP_STATUS */
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 9397a319d165..a9ce56fc8785 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -12,6 +12,7 @@
#include <uapi/asm/msr.h>
#include <asm/shared/msr.h>
+#include <linux/types.h>
#include <linux/percpu.h>
struct msr_info {
@@ -37,23 +38,6 @@ struct saved_msrs {
};
/*
- * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
- * constraint has different meanings. For i386, "A" means exactly
- * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
- * it means rax *or* rdx.
- */
-#ifdef CONFIG_X86_64
-/* Using 64-bit values saves one instruction clearing the high half of low */
-#define DECLARE_ARGS(val, low, high) unsigned long low, high
-#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
-#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
-#else
-#define DECLARE_ARGS(val, low, high) unsigned long long val
-#define EAX_EDX_VAL(val, low, high) (val)
-#define EAX_EDX_RET(val, low, high) "=A" (val)
-#endif
-
-/*
* Be very careful with includes. This header is prone to include loops.
*/
#include <asm/atomic.h>
@@ -63,13 +47,13 @@ struct saved_msrs {
DECLARE_TRACEPOINT(read_msr);
DECLARE_TRACEPOINT(write_msr);
DECLARE_TRACEPOINT(rdpmc);
-extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
-extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
-extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
+extern void do_trace_write_msr(u32 msr, u64 val, int failed);
+extern void do_trace_read_msr(u32 msr, u64 val, int failed);
+extern void do_trace_rdpmc(u32 msr, u64 val, int failed);
#else
-static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
-static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
-static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
+static inline void do_trace_write_msr(u32 msr, u64 val, int failed) {}
+static inline void do_trace_read_msr(u32 msr, u64 val, int failed) {}
+static inline void do_trace_rdpmc(u32 msr, u64 val, int failed) {}
#endif
/*
@@ -79,9 +63,9 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
* think of extending them - you will be slapped with a stinking trout or a frozen
* shark will reach you, wherever you are! You've been warned.
*/
-static __always_inline unsigned long long __rdmsr(unsigned int msr)
+static __always_inline u64 __rdmsr(u32 msr)
{
- DECLARE_ARGS(val, low, high);
+ EAX_EDX_DECLARE_ARGS(val, low, high);
asm volatile("1: rdmsr\n"
"2:\n"
@@ -91,12 +75,12 @@ static __always_inline unsigned long long __rdmsr(unsigned int msr)
return EAX_EDX_VAL(val, low, high);
}
-static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
+static __always_inline void __wrmsrq(u32 msr, u64 val)
{
asm volatile("1: wrmsr\n"
"2:\n"
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
- : : "c" (msr), "a"(low), "d" (high) : "memory");
+ : : "c" (msr), "a" ((u32)val), "d" ((u32)(val >> 32)) : "memory");
}
#define native_rdmsr(msr, val1, val2) \
@@ -106,16 +90,20 @@ do { \
(void)((val2) = (u32)(__val >> 32)); \
} while (0)
+static __always_inline u64 native_rdmsrq(u32 msr)
+{
+ return __rdmsr(msr);
+}
+
#define native_wrmsr(msr, low, high) \
- __wrmsr(msr, low, high)
+ __wrmsrq((msr), (u64)(high) << 32 | (low))
-#define native_wrmsrl(msr, val) \
- __wrmsr((msr), (u32)((u64)(val)), \
- (u32)((u64)(val) >> 32))
+#define native_wrmsrq(msr, val) \
+ __wrmsrq((msr), (val))
-static inline unsigned long long native_read_msr(unsigned int msr)
+static inline u64 native_read_msr(u32 msr)
{
- unsigned long long val;
+ u64 val;
val = __rdmsr(msr);
@@ -125,34 +113,35 @@ static inline unsigned long long native_read_msr(unsigned int msr)
return val;
}
-static inline unsigned long long native_read_msr_safe(unsigned int msr,
- int *err)
+static inline int native_read_msr_safe(u32 msr, u64 *p)
{
- DECLARE_ARGS(val, low, high);
+ int err;
+ EAX_EDX_DECLARE_ARGS(val, low, high);
asm volatile("1: rdmsr ; xor %[err],%[err]\n"
"2:\n\t"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
- : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
+ : [err] "=r" (err), EAX_EDX_RET(val, low, high)
: "c" (msr));
if (tracepoint_enabled(read_msr))
- do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
- return EAX_EDX_VAL(val, low, high);
+ do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), err);
+
+ *p = EAX_EDX_VAL(val, low, high);
+
+ return err;
}
/* Can be uninlined because referenced by paravirt */
-static inline void notrace
-native_write_msr(unsigned int msr, u32 low, u32 high)
+static inline void notrace native_write_msr(u32 msr, u64 val)
{
- __wrmsr(msr, low, high);
+ native_wrmsrq(msr, val);
if (tracepoint_enabled(write_msr))
- do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
+ do_trace_write_msr(msr, val, 0);
}
/* Can be uninlined because referenced by paravirt */
-static inline int notrace
-native_write_msr_safe(unsigned int msr, u32 low, u32 high)
+static inline int notrace native_write_msr_safe(u32 msr, u64 val)
{
int err;
@@ -160,73 +149,19 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high)
"2:\n\t"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
: [err] "=a" (err)
- : "c" (msr), "0" (low), "d" (high)
+ : "c" (msr), "0" ((u32)val), "d" ((u32)(val >> 32))
: "memory");
if (tracepoint_enabled(write_msr))
- do_trace_write_msr(msr, ((u64)high << 32 | low), err);
+ do_trace_write_msr(msr, val, err);
return err;
}
extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]);
-/**
- * rdtsc() - returns the current TSC without ordering constraints
- *
- * rdtsc() returns the result of RDTSC as a 64-bit integer. The
- * only ordering constraint it supplies is the ordering implied by
- * "asm volatile": it will put the RDTSC in the place you expect. The
- * CPU can and will speculatively execute that RDTSC, though, so the
- * results can be non-monotonic if compared on different CPUs.
- */
-static __always_inline unsigned long long rdtsc(void)
+static inline u64 native_read_pmc(int counter)
{
- DECLARE_ARGS(val, low, high);
-
- asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
-
- return EAX_EDX_VAL(val, low, high);
-}
-
-/**
- * rdtsc_ordered() - read the current TSC in program order
- *
- * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
- * It is ordered like a load to a global in-memory counter. It should
- * be impossible to observe non-monotonic rdtsc_unordered() behavior
- * across multiple CPUs as long as the TSC is synced.
- */
-static __always_inline unsigned long long rdtsc_ordered(void)
-{
- DECLARE_ARGS(val, low, high);
-
- /*
- * The RDTSC instruction is not ordered relative to memory
- * access. The Intel SDM and the AMD APM are both vague on this
- * point, but empirically an RDTSC instruction can be
- * speculatively executed before prior loads. An RDTSC
- * immediately after an appropriate barrier appears to be
- * ordered as a normal load, that is, it provides the same
- * ordering guarantees as reading from a global memory location
- * that some other imaginary CPU is updating continuously with a
- * time stamp.
- *
- * Thus, use the preferred barrier on the respective CPU, aiming for
- * RDTSCP as the default.
- */
- asm volatile(ALTERNATIVE_2("rdtsc",
- "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
- "rdtscp", X86_FEATURE_RDTSCP)
- : EAX_EDX_RET(val, low, high)
- /* RDTSCP clobbers ECX with MSR_TSC_AUX. */
- :: "ecx");
-
- return EAX_EDX_VAL(val, low, high);
-}
-
-static inline unsigned long long native_read_pmc(int counter)
-{
- DECLARE_ARGS(val, low, high);
+ EAX_EDX_DECLARE_ARGS(val, low, high);
asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
if (tracepoint_enabled(rdpmc))
@@ -251,51 +186,44 @@ do { \
(void)((high) = (u32)(__val >> 32)); \
} while (0)
-static inline void wrmsr(unsigned int msr, u32 low, u32 high)
+static inline void wrmsr(u32 msr, u32 low, u32 high)
{
- native_write_msr(msr, low, high);
+ native_write_msr(msr, (u64)high << 32 | low);
}
-#define rdmsrl(msr, val) \
+#define rdmsrq(msr, val) \
((val) = native_read_msr((msr)))
-static inline void wrmsrl(unsigned int msr, u64 val)
+static inline void wrmsrq(u32 msr, u64 val)
{
- native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
+ native_write_msr(msr, val);
}
/* wrmsr with exception handling */
-static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
+static inline int wrmsrq_safe(u32 msr, u64 val)
{
- return native_write_msr_safe(msr, low, high);
+ return native_write_msr_safe(msr, val);
}
/* rdmsr with exception handling */
#define rdmsr_safe(msr, low, high) \
({ \
- int __err; \
- u64 __val = native_read_msr_safe((msr), &__err); \
+ u64 __val; \
+ int __err = native_read_msr_safe((msr), &__val); \
(*low) = (u32)__val; \
(*high) = (u32)(__val >> 32); \
__err; \
})
-static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
+static inline int rdmsrq_safe(u32 msr, u64 *p)
{
- int err;
-
- *p = native_read_msr_safe(msr, &err);
- return err;
+ return native_read_msr_safe(msr, p);
}
-#define rdpmc(counter, low, high) \
-do { \
- u64 _l = native_read_pmc((counter)); \
- (low) = (u32)_l; \
- (high) = (u32)(_l >> 32); \
-} while (0)
-
-#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
+static __always_inline u64 rdpmc(int counter)
+{
+ return native_read_pmc(counter);
+}
#endif /* !CONFIG_PARAVIRT_XXL */
@@ -315,11 +243,11 @@ static __always_inline void wrmsrns(u32 msr, u64 val)
}
/*
- * 64-bit version of wrmsr_safe():
+ * Dual u32 version of wrmsrq_safe():
*/
-static inline int wrmsrl_safe(u32 msr, u64 val)
+static inline int wrmsr_safe(u32 msr, u32 low, u32 high)
{
- return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
+ return wrmsrq_safe(msr, (u64)high << 32 | low);
}
struct msr __percpu *msrs_alloc(void);
@@ -330,14 +258,14 @@ int msr_clear_bit(u32 msr, u8 bit);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
-int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
+int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
-int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
-int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
+int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
#else /* CONFIG_SMP */
@@ -351,14 +279,14 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
wrmsr(msr_no, l, h);
return 0;
}
-static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+static inline int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
- rdmsrl(msr_no, *q);
+ rdmsrq(msr_no, *q);
return 0;
}
-static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
- wrmsrl(msr_no, q);
+ wrmsrq(msr_no, q);
return 0;
}
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
@@ -380,13 +308,13 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
return wrmsr_safe(msr_no, l, h);
}
-static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+static inline int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
{
- return rdmsrl_safe(msr_no, q);
+ return rdmsrq_safe(msr_no, q);
}
-static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+static inline int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
{
- return wrmsrl_safe(msr_no, q);
+ return wrmsrq_safe(msr_no, q);
}
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
@@ -397,5 +325,10 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
return wrmsr_safe_regs(regs);
}
#endif /* CONFIG_SMP */
+
+/* Compatibility wrappers: */
+#define rdmsrl(msr, val) rdmsrq(msr, val)
+#define wrmsrl(msr, val) wrmsrq(msr, val)
+
#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c4c23190925c..03f680d1057a 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -175,26 +175,24 @@ static inline void __write_cr4(unsigned long x)
PVOP_VCALL1(cpu.write_cr4, x);
}
-static inline u64 paravirt_read_msr(unsigned msr)
+static inline u64 paravirt_read_msr(u32 msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);
}
-static inline void paravirt_write_msr(unsigned msr,
- unsigned low, unsigned high)
+static inline void paravirt_write_msr(u32 msr, u64 val)
{
- PVOP_VCALL3(cpu.write_msr, msr, low, high);
+ PVOP_VCALL2(cpu.write_msr, msr, val);
}
-static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
+static inline int paravirt_read_msr_safe(u32 msr, u64 *val)
{
- return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
+ return PVOP_CALL2(int, cpu.read_msr_safe, msr, val);
}
-static inline int paravirt_write_msr_safe(unsigned msr,
- unsigned low, unsigned high)
+static inline int paravirt_write_msr_safe(u32 msr, u64 val)
{
- return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
+ return PVOP_CALL2(int, cpu.write_msr_safe, msr, val);
}
#define rdmsr(msr, val1, val2) \
@@ -204,55 +202,46 @@ do { \
val2 = _l >> 32; \
} while (0)
-#define wrmsr(msr, val1, val2) \
-do { \
- paravirt_write_msr(msr, val1, val2); \
-} while (0)
+static __always_inline void wrmsr(u32 msr, u32 low, u32 high)
+{
+ paravirt_write_msr(msr, (u64)high << 32 | low);
+}
-#define rdmsrl(msr, val) \
+#define rdmsrq(msr, val) \
do { \
val = paravirt_read_msr(msr); \
} while (0)
-static inline void wrmsrl(unsigned msr, u64 val)
+static inline void wrmsrq(u32 msr, u64 val)
{
- wrmsr(msr, (u32)val, (u32)(val>>32));
+ paravirt_write_msr(msr, val);
}
-#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
+static inline int wrmsrq_safe(u32 msr, u64 val)
+{
+ return paravirt_write_msr_safe(msr, val);
+}
/* rdmsr with exception handling */
#define rdmsr_safe(msr, a, b) \
({ \
- int _err; \
- u64 _l = paravirt_read_msr_safe(msr, &_err); \
+ u64 _l; \
+ int _err = paravirt_read_msr_safe((msr), &_l); \
(*a) = (u32)_l; \
- (*b) = _l >> 32; \
+ (*b) = (u32)(_l >> 32); \
_err; \
})
-static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+static __always_inline int rdmsrq_safe(u32 msr, u64 *p)
{
- int err;
-
- *p = paravirt_read_msr_safe(msr, &err);
- return err;
+ return paravirt_read_msr_safe(msr, p);
}
-static inline unsigned long long paravirt_read_pmc(int counter)
+static __always_inline u64 rdpmc(int counter)
{
return PVOP_CALL1(u64, cpu.read_pmc, counter);
}
-#define rdpmc(counter, low, high) \
-do { \
- u64 _l = paravirt_read_pmc(counter); \
- low = (u32)_l; \
- high = _l >> 32; \
-} while (0)
-
-#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
-
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 631c306ce1ff..b08b9d3122d6 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -91,15 +91,15 @@ struct pv_cpu_ops {
unsigned int *ecx, unsigned int *edx);
/* Unsafe MSR operations. These will warn or panic on failure. */
- u64 (*read_msr)(unsigned int msr);
- void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
+ u64 (*read_msr)(u32 msr);
+ void (*write_msr)(u32 msr, u64 val);
/*
* Safe MSR operations.
- * read sets err to 0 or -EIO. write returns 0 or -EIO.
+ * Returns 0 or -EIO.
*/
- u64 (*read_msr_safe)(unsigned int msr, int *err);
- int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
+ int (*read_msr_safe)(u32 msr, u64 *val);
+ int (*write_msr_safe)(u32 msr, u64 val);
u64 (*read_pmc)(int counter);
diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
index 011bf67a1866..bd6afe805cf6 100644
--- a/arch/x86/include/asm/resctrl.h
+++ b/arch/x86/include/asm/resctrl.h
@@ -9,6 +9,8 @@
#include <linux/resctrl_types.h>
#include <linux/sched.h>
+#include <asm/msr.h>
+
/*
* This value can never be a valid CLOSID, and is used when mapping a
* (closid, rmid) pair to an index and back. On x86 only the RMID is
diff --git a/arch/x86/include/asm/sev-internal.h b/arch/x86/include/asm/sev-internal.h
index b7232081f8f7..3dfd306d1c9e 100644
--- a/arch/x86/include/asm/sev-internal.h
+++ b/arch/x86/include/asm/sev-internal.h
@@ -84,7 +84,7 @@ int svsm_perform_call_protocol(struct svsm_call *call);
static inline u64 sev_es_rd_ghcb_msr(void)
{
- return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
+ return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
}
static __always_inline void sev_es_wr_ghcb_msr(u64 val)
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index 658b690b2ccb..00b7e0398210 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -84,7 +84,7 @@ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
static __always_inline void __update_spec_ctrl(u64 val)
{
__this_cpu_write(x86_spec_ctrl_current, val);
- native_wrmsrl(MSR_IA32_SPEC_CTRL, val);
+ native_wrmsrq(MSR_IA32_SPEC_CTRL, val);
}
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index d8416b3bf832..e8e5aab06255 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -9,6 +9,7 @@
#include <asm/desc.h>
#include <asm/fpu/api.h>
+#include <asm/msr.h>
/* image of the saved processor state */
struct saved_context {
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 54df06687d83..b512f9665f78 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -9,6 +9,7 @@
#include <asm/desc.h>
#include <asm/fpu/api.h>
+#include <asm/msr.h>
/*
* Image of the saved processor state, used by the low level ACPI suspend to
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 75248546403d..499b1c15cc8b 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -52,6 +52,8 @@ do { \
} while (0)
#ifdef CONFIG_X86_32
+#include <asm/msr.h>
+
static inline void refresh_sysenter_cs(struct thread_struct *thread)
{
/* Only happens when SEP is enabled, no need to test "SEP"arately: */
@@ -59,7 +61,7 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
return;
this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
- wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ wrmsrq(MSR_IA32_SYSENTER_CS, thread->sysenter_cs);
}
#endif
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 94408a784c8e..4f7f09f50552 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -5,10 +5,65 @@
#ifndef _ASM_X86_TSC_H
#define _ASM_X86_TSC_H
+#include <asm/asm.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/msr.h>
+/**
+ * rdtsc() - returns the current TSC without ordering constraints
+ *
+ * rdtsc() returns the result of RDTSC as a 64-bit integer. The
+ * only ordering constraint it supplies is the ordering implied by
+ * "asm volatile": it will put the RDTSC in the place you expect. The
+ * CPU can and will speculatively execute that RDTSC, though, so the
+ * results can be non-monotonic if compared on different CPUs.
+ */
+static __always_inline u64 rdtsc(void)
+{
+ EAX_EDX_DECLARE_ARGS(val, low, high);
+
+ asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
+
+ return EAX_EDX_VAL(val, low, high);
+}
+
+/**
+ * rdtsc_ordered() - read the current TSC in program order
+ *
+ * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
+ * It is ordered like a load to a global in-memory counter. It should
+ * be impossible to observe non-monotonic rdtsc_unordered() behavior
+ * across multiple CPUs as long as the TSC is synced.
+ */
+static __always_inline u64 rdtsc_ordered(void)
+{
+ EAX_EDX_DECLARE_ARGS(val, low, high);
+
+ /*
+ * The RDTSC instruction is not ordered relative to memory
+ * access. The Intel SDM and the AMD APM are both vague on this
+ * point, but empirically an RDTSC instruction can be
+ * speculatively executed before prior loads. An RDTSC
+ * immediately after an appropriate barrier appears to be
+ * ordered as a normal load, that is, it provides the same
+ * ordering guarantees as reading from a global memory location
+ * that some other imaginary CPU is updating continuously with a
+ * time stamp.
+ *
+ * Thus, use the preferred barrier on the respective CPU, aiming for
+ * RDTSCP as the default.
+ */
+ asm volatile(ALTERNATIVE_2("rdtsc",
+ "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
+ "rdtscp", X86_FEATURE_RDTSCP)
+ : EAX_EDX_RET(val, low, high)
+ /* RDTSCP clobbers ECX with MSR_TSC_AUX. */
+ :: "ecx");
+
+ return EAX_EDX_VAL(val, low, high);
+}
+
/*
* Standard way to access the cycle counter.
*/