diff options
Diffstat (limited to 'arch/x86/include/asm')
| -rw-r--r-- | arch/x86/include/asm/smp.h | 23 | ||||
| -rw-r--r-- | arch/x86/include/asm/special_insns.h | 29 |
2 files changed, 48 insertions, 4 deletions
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 0c1c68039d6f..22bfebe6776d 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -112,7 +112,10 @@ void __noreturn hlt_play_dead(void); void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); -int wbinvd_on_all_cpus(void); +void wbinvd_on_all_cpus(void); +void wbinvd_on_cpus_mask(struct cpumask *cpus); +void wbnoinvd_on_all_cpus(void); +void wbnoinvd_on_cpus_mask(struct cpumask *cpus); void smp_kick_mwait_play_dead(void); void __noreturn mwait_play_dead(unsigned int eax_hint); @@ -148,10 +151,24 @@ static inline struct cpumask *cpu_l2c_shared_mask(int cpu) #else /* !CONFIG_SMP */ #define wbinvd_on_cpu(cpu) wbinvd() -static inline int wbinvd_on_all_cpus(void) +static inline void wbinvd_on_all_cpus(void) { wbinvd(); - return 0; +} + +static inline void wbinvd_on_cpus_mask(struct cpumask *cpus) +{ + wbinvd(); +} + +static inline void wbnoinvd_on_all_cpus(void) +{ + wbnoinvd(); +} + +static inline void wbnoinvd_on_cpus_mask(struct cpumask *cpus) +{ + wbnoinvd(); } static inline struct cpumask *cpu_llc_shared_mask(int cpu) diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index ecda17efa042..fde2bd7af19e 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -104,9 +104,36 @@ static inline void wrpkru(u32 pkru) } #endif +/* + * Write back all modified lines in all levels of cache associated with this + * logical processor to main memory, and then invalidate all caches. Depending + * on the micro-architecture, WBINVD (and WBNOINVD below) may or may not affect + * lower level caches associated with another logical processor that shares any + * level of this processor's cache hierarchy. + */ static __always_inline void wbinvd(void) { - asm volatile("wbinvd": : :"memory"); + asm volatile("wbinvd" : : : "memory"); +} + +/* Instruction encoding provided for binutils backwards compatibility. */ +#define ASM_WBNOINVD _ASM_BYTES(0xf3,0x0f,0x09) + +/* + * Write back all modified lines in all levels of cache associated with this + * logical processor to main memory, but do NOT explicitly invalidate caches, + * i.e. leave all/most cache lines in the hierarchy in non-modified state. + */ +static __always_inline void wbnoinvd(void) +{ + /* + * Explicitly encode WBINVD if X86_FEATURE_WBNOINVD is unavailable even + * though WBNOINVD is backwards compatible (it's simply WBINVD with an + * ignored REP prefix), to guarantee that WBNOINVD isn't used if it + * needs to be avoided for any reason. For all supported usage in the + * kernel, WBINVD is functionally a superset of WBNOINVD. + */ + alternative("wbinvd", ASM_WBNOINVD, X86_FEATURE_WBNOINVD); } static inline unsigned long __read_cr4(void) |
