summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/spec-ctrl.h
blob: 45ef00ad51050250729a43f4cc106d23c2b72d81 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_SPECCTRL_H_
#define _ASM_X86_SPECCTRL_H_

#include <linux/thread_info.h>
#include <asm/nospec-branch.h>

/*
 * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
 * the guest has, while on VMEXIT we restore the host view. This
 * would be easier if SPEC_CTRL were architecturally maskable or
 * shadowable for guests but this is not (currently) the case.
 * Takes the guest view of SPEC_CTRL MSR as a parameter.
 */
extern void x86_spec_ctrl_set_guest(u64);
extern void x86_spec_ctrl_restore_host(u64);

/* AMD specific Speculative Store Bypass MSR data */
extern u64 x86_amd_ls_cfg_base;
extern u64 x86_amd_ls_cfg_rds_mask;

/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;

static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
{
	BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
	return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
}

static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
{
	return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
}

extern void speculative_store_bypass_update(void);

#endif