summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2026-02-05 09:17:48 +0000
committerMarc Zyngier <maz@kernel.org>2026-02-05 09:17:48 +0000
commit1df3f01ebfb9e7d00fd10711fe9e98b25e01e59a (patch)
treeeb0dac26a6ac9ef707665fb2b90133ef0776cf6b
parent3ef5ba663a33aa3da901117acd3555ca582341f0 (diff)
parentedba407843340c4b66134fce6c54a007c1ac83a2 (diff)
Merge branch kvm-arm64/resx into kvmarm-master/next
* kvm-arm64/resx: : . : Add infrastructure to deal with the full gamut of RESx bits : for NV. As a result, it is now possible to have the expected : semantics for some bits such as SCTLR_EL2.SPAN. : . KVM: arm64: Add debugfs file dumping computed RESx values KVM: arm64: Add sanitisation to SCTLR_EL2 KVM: arm64: Remove all traces of HCR_EL2.MIOCNCE KVM: arm64: Remove all traces of FEAT_TME KVM: arm64: Simplify handling of full register invalid constraint KVM: arm64: Get rid of FIXED_VALUE altogether KVM: arm64: Simplify handling of HCR_EL2.E2H RESx KVM: arm64: Move RESx into individual register descriptors KVM: arm64: Add RES1_WHEN_E2Hx constraints as configuration flags KVM: arm64: Add REQUIRES_E2H1 constraint as configuration flags KVM: arm64: Simplify FIXED_VALUE handling KVM: arm64: Convert HCR_EL2.RW to AS_RES1 KVM: arm64: Correctly handle SCTLR_EL1 RES1 bits for unsupported features KVM: arm64: Allow RES1 bits to be inferred from configuration KVM: arm64: Inherit RESx bits from FGT register descriptors KVM: arm64: Extend unified RESx handling to runtime sanitisation KVM: arm64: Introduce data structure tracking both RES0 and RES1 bits KVM: arm64: Introduce standalone FGU computing primitive KVM: arm64: Remove duplicate configuration for SCTLR_EL1.{EE,E0E} arm64: Convert SCTLR_EL2 to sysreg infrastructure Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/include/asm/kvm_host.h38
-rw-r--r--arch/arm64/include/asm/sysreg.h7
-rw-r--r--arch/arm64/kvm/config.c427
-rw-r--r--arch/arm64/kvm/emulate-nested.c10
-rw-r--r--arch/arm64/kvm/nested.c151
-rw-r--r--arch/arm64/kvm/sys_regs.c68
-rw-r--r--arch/arm64/tools/sysreg82
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h6
-rw-r--r--tools/perf/Documentation/perf-arm-spe.txt1
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c1
10 files changed, 478 insertions, 313 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e364acfb6c9a..26c1c3faedc3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -492,7 +492,6 @@ enum vcpu_sysreg {
DBGVCR32_EL2, /* Debug Vector Catch Register */
/* EL2 registers */
- SCTLR_EL2, /* System Control Register (EL2) */
ACTLR_EL2, /* Auxiliary Control Register (EL2) */
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
HACR_EL2, /* Hypervisor Auxiliary Control Register */
@@ -523,6 +522,7 @@ enum vcpu_sysreg {
/* Anything from this can be RES0/RES1 sanitised */
MARKER(__SANITISED_REG_START__),
+ SCTLR_EL2, /* System Control Register (EL2) */
TCR2_EL2, /* Extended Translation Control Register (EL2) */
SCTLR2_EL2, /* System Control Register 2 (EL2) */
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
@@ -623,13 +623,39 @@ enum vcpu_sysreg {
NR_SYS_REGS /* Nothing after this line! */
};
+struct resx {
+ u64 res0;
+ u64 res1;
+};
+
struct kvm_sysreg_masks {
- struct {
- u64 res0;
- u64 res1;
- } mask[NR_SYS_REGS - __SANITISED_REG_START__];
+ struct resx mask[NR_SYS_REGS - __SANITISED_REG_START__];
};
+static inline struct resx __kvm_get_sysreg_resx(struct kvm_arch *arch,
+ enum vcpu_sysreg sr)
+{
+ struct kvm_sysreg_masks *masks;
+
+ masks = arch->sysreg_masks;
+ if (likely(masks &&
+ sr >= __SANITISED_REG_START__ && sr < NR_SYS_REGS))
+ return masks->mask[sr - __SANITISED_REG_START__];
+
+ return (struct resx){};
+}
+
+#define kvm_get_sysreg_resx(k, sr) __kvm_get_sysreg_resx(&(k)->arch, (sr))
+
+static inline void __kvm_set_sysreg_resx(struct kvm_arch *arch,
+ enum vcpu_sysreg sr, struct resx resx)
+{
+ arch->sysreg_masks->mask[sr - __SANITISED_REG_START__] = resx;
+}
+
+#define kvm_set_sysreg_resx(k, sr, resx) \
+ __kvm_set_sysreg_resx(&(k)->arch, (sr), (resx))
+
struct fgt_masks {
const char *str;
u64 mask;
@@ -1604,7 +1630,7 @@ static inline bool kvm_arch_has_irq_bypass(void)
}
void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
-void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1);
+struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg);
void check_feature_map(void);
void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 9faf503429c6..f4436ecc630c 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -504,7 +504,6 @@
#define SYS_VPIDR_EL2 sys_reg(3, 4, 0, 0, 0)
#define SYS_VMPIDR_EL2 sys_reg(3, 4, 0, 0, 5)
-#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
#define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1)
#define SYS_SCTLR2_EL2 sys_reg(3, 4, 1, 0, 3)
#define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0)
@@ -836,12 +835,6 @@
#define SCTLR_ELx_A (BIT(1))
#define SCTLR_ELx_M (BIT(0))
-/* SCTLR_EL2 specific flags. */
-#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
- (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
- (BIT(29)))
-
-#define SCTLR_EL2_BT (BIT(36))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
#else
diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
index 9c04f895d376..d9f553cbf9df 100644
--- a/arch/arm64/kvm/config.c
+++ b/arch/arm64/kvm/config.c
@@ -22,8 +22,12 @@ struct reg_bits_to_feat_map {
#define NEVER_FGU BIT(0) /* Can trap, but never UNDEF */
#define CALL_FUNC BIT(1) /* Needs to evaluate tons of crap */
-#define FIXED_VALUE BIT(2) /* RAZ/WI or RAO/WI in KVM */
+#define FORCE_RESx BIT(2) /* Unconditional RESx */
#define MASKS_POINTER BIT(3) /* Pointer to fgt_masks struct instead of bits */
+#define AS_RES1 BIT(4) /* RES1 when not supported */
+#define REQUIRES_E2H1 BIT(5) /* Add HCR_EL2.E2H RES1 as a pre-condition */
+#define RES1_WHEN_E2H0 BIT(6) /* RES1 when E2H=0 and not supported */
+#define RES1_WHEN_E2H1 BIT(7) /* RES1 when E2H=1 and not supported */
unsigned long flags;
@@ -36,7 +40,6 @@ struct reg_bits_to_feat_map {
s8 lo_lim;
};
bool (*match)(struct kvm *);
- bool (*fval)(struct kvm *, u64 *);
};
};
@@ -69,18 +72,17 @@ struct reg_feat_map_desc {
.lo_lim = id ##_## fld ##_## lim \
}
-#define __NEEDS_FEAT_2(m, f, w, fun, dummy) \
+#define __NEEDS_FEAT_1(m, f, w, fun) \
{ \
.w = (m), \
.flags = (f) | CALL_FUNC, \
- .fval = (fun), \
+ .match = (fun), \
}
-#define __NEEDS_FEAT_1(m, f, w, fun) \
+#define __NEEDS_FEAT_0(m, f, w, ...) \
{ \
.w = (m), \
- .flags = (f) | CALL_FUNC, \
- .match = (fun), \
+ .flags = (f), \
}
#define __NEEDS_FEAT_FLAG(m, f, w, ...) \
@@ -89,9 +91,6 @@ struct reg_feat_map_desc {
#define NEEDS_FEAT_FLAG(m, f, ...) \
__NEEDS_FEAT_FLAG(m, f, bits, __VA_ARGS__)
-#define NEEDS_FEAT_FIXED(m, ...) \
- __NEEDS_FEAT_FLAG(m, FIXED_VALUE, bits, __VA_ARGS__, 0)
-
#define NEEDS_FEAT_MASKS(p, ...) \
__NEEDS_FEAT_FLAG(p, MASKS_POINTER, masks, __VA_ARGS__)
@@ -101,10 +100,14 @@ struct reg_feat_map_desc {
*/
#define NEEDS_FEAT(m, ...) NEEDS_FEAT_FLAG(m, 0, __VA_ARGS__)
+/* Declare fixed RESx bits */
+#define FORCE_RES0(m) NEEDS_FEAT_FLAG(m, FORCE_RESx)
+#define FORCE_RES1(m) NEEDS_FEAT_FLAG(m, FORCE_RESx | AS_RES1)
+
/*
- * Declare the dependency between a non-FGT register, a set of
- * feature, and the set of individual bits it contains. This generates
- * a struct reg_feat_map_desc.
+ * Declare the dependency between a non-FGT register, a set of features,
+ * and the set of individual bits it contains. This generates a struct
+ * reg_feat_map_desc.
*/
#define DECLARE_FEAT_MAP(n, r, m, f) \
struct reg_feat_map_desc n = { \
@@ -184,7 +187,6 @@ struct reg_feat_map_desc {
#define FEAT_RME ID_AA64PFR0_EL1, RME, IMP
#define FEAT_MPAM ID_AA64PFR0_EL1, MPAM, 1
#define FEAT_S2FWB ID_AA64MMFR2_EL1, FWB, IMP
-#define FEAT_TME ID_AA64ISAR0_EL1, TME, IMP
#define FEAT_TWED ID_AA64MMFR1_EL1, TWED, IMP
#define FEAT_E2H0 ID_AA64MMFR4_EL1, E2H0, IMP
#define FEAT_SRMASK ID_AA64MMFR4_EL1, SRMASK, IMP
@@ -310,21 +312,6 @@ static bool feat_trbe_mpam(struct kvm *kvm)
(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_MPAM));
}
-static bool feat_asid2_e2h1(struct kvm *kvm)
-{
- return kvm_has_feat(kvm, FEAT_ASID2) && !kvm_has_feat(kvm, FEAT_E2H0);
-}
-
-static bool feat_d128_e2h1(struct kvm *kvm)
-{
- return kvm_has_feat(kvm, FEAT_D128) && !kvm_has_feat(kvm, FEAT_E2H0);
-}
-
-static bool feat_mec_e2h1(struct kvm *kvm)
-{
- return kvm_has_feat(kvm, FEAT_MEC) && !kvm_has_feat(kvm, FEAT_E2H0);
-}
-
static bool feat_ebep_pmuv3_ss(struct kvm *kvm)
{
return kvm_has_feat(kvm, FEAT_EBEP) || kvm_has_feat(kvm, FEAT_PMUv3_SS);
@@ -388,31 +375,6 @@ static bool feat_vmid16(struct kvm *kvm)
return kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16);
}
-static bool compute_hcr_rw(struct kvm *kvm, u64 *bits)
-{
- /* This is purely academic: AArch32 and NV are mutually exclusive */
- if (bits) {
- if (kvm_has_feat(kvm, FEAT_AA32EL1))
- *bits &= ~HCR_EL2_RW;
- else
- *bits |= HCR_EL2_RW;
- }
-
- return true;
-}
-
-static bool compute_hcr_e2h(struct kvm *kvm, u64 *bits)
-{
- if (bits) {
- if (kvm_has_feat(kvm, FEAT_E2H0))
- *bits &= ~HCR_EL2_E2H;
- else
- *bits |= HCR_EL2_E2H;
- }
-
- return true;
-}
-
static const struct reg_bits_to_feat_map hfgrtr_feat_map[] = {
NEEDS_FEAT(HFGRTR_EL2_nAMAIR2_EL1 |
HFGRTR_EL2_nMAIR2_EL1,
@@ -966,7 +928,7 @@ static const DECLARE_FEAT_MAP(hcrx_desc, __HCRX_EL2,
static const struct reg_bits_to_feat_map hcr_feat_map[] = {
NEEDS_FEAT(HCR_EL2_TID0, FEAT_AA32EL0),
- NEEDS_FEAT_FIXED(HCR_EL2_RW, compute_hcr_rw),
+ NEEDS_FEAT_FLAG(HCR_EL2_RW, AS_RES1, FEAT_AA32EL1),
NEEDS_FEAT(HCR_EL2_HCD, not_feat_aa64el3),
NEEDS_FEAT(HCR_EL2_AMO |
HCR_EL2_BSU |
@@ -976,7 +938,6 @@ static const struct reg_bits_to_feat_map hcr_feat_map[] = {
HCR_EL2_FMO |
HCR_EL2_ID |
HCR_EL2_IMO |
- HCR_EL2_MIOCNCE |
HCR_EL2_PTW |
HCR_EL2_SWIO |
HCR_EL2_TACR |
@@ -1028,11 +989,12 @@ static const struct reg_bits_to_feat_map hcr_feat_map[] = {
NEEDS_FEAT(HCR_EL2_FIEN, feat_rasv1p1),
NEEDS_FEAT(HCR_EL2_GPF, FEAT_RME),
NEEDS_FEAT(HCR_EL2_FWB, FEAT_S2FWB),
- NEEDS_FEAT(HCR_EL2_TME, FEAT_TME),
NEEDS_FEAT(HCR_EL2_TWEDEL |
HCR_EL2_TWEDEn,
FEAT_TWED),
- NEEDS_FEAT_FIXED(HCR_EL2_E2H, compute_hcr_e2h),
+ NEEDS_FEAT_FLAG(HCR_EL2_E2H, RES1_WHEN_E2H1 | FORCE_RESx),
+ FORCE_RES0(HCR_EL2_RES0),
+ FORCE_RES1(HCR_EL2_RES1),
};
static const DECLARE_FEAT_MAP(hcr_desc, HCR_EL2,
@@ -1053,21 +1015,23 @@ static const struct reg_bits_to_feat_map sctlr2_feat_map[] = {
SCTLR2_EL1_CPTM |
SCTLR2_EL1_CPTM0,
FEAT_CPA2),
+ FORCE_RES0(SCTLR2_EL1_RES0),
+ FORCE_RES1(SCTLR2_EL1_RES1),
};
static const DECLARE_FEAT_MAP(sctlr2_desc, SCTLR2_EL1,
sctlr2_feat_map, FEAT_SCTLR2);
static const struct reg_bits_to_feat_map tcr2_el2_feat_map[] = {
- NEEDS_FEAT(TCR2_EL2_FNG1 |
- TCR2_EL2_FNG0 |
- TCR2_EL2_A2,
- feat_asid2_e2h1),
- NEEDS_FEAT(TCR2_EL2_DisCH1 |
- TCR2_EL2_DisCH0 |
- TCR2_EL2_D128,
- feat_d128_e2h1),
- NEEDS_FEAT(TCR2_EL2_AMEC1, feat_mec_e2h1),
+ NEEDS_FEAT_FLAG(TCR2_EL2_FNG1 |
+ TCR2_EL2_FNG0 |
+ TCR2_EL2_A2,
+ REQUIRES_E2H1, FEAT_ASID2),
+ NEEDS_FEAT_FLAG(TCR2_EL2_DisCH1 |
+ TCR2_EL2_DisCH0 |
+ TCR2_EL2_D128,
+ REQUIRES_E2H1, FEAT_D128),
+ NEEDS_FEAT_FLAG(TCR2_EL2_AMEC1, REQUIRES_E2H1, FEAT_MEC),
NEEDS_FEAT(TCR2_EL2_AMEC0, FEAT_MEC),
NEEDS_FEAT(TCR2_EL2_HAFT, FEAT_HAFT),
NEEDS_FEAT(TCR2_EL2_PTTWI |
@@ -1078,33 +1042,36 @@ static const struct reg_bits_to_feat_map tcr2_el2_feat_map[] = {
TCR2_EL2_E0POE,
FEAT_S1POE),
NEEDS_FEAT(TCR2_EL2_PIE, FEAT_S1PIE),
+ FORCE_RES0(TCR2_EL2_RES0),
+ FORCE_RES1(TCR2_EL2_RES1),
};
static const DECLARE_FEAT_MAP(tcr2_el2_desc, TCR2_EL2,
tcr2_el2_feat_map, FEAT_TCR2);
static const struct reg_bits_to_feat_map sctlr_el1_feat_map[] = {
- NEEDS_FEAT(SCTLR_EL1_CP15BEN |
- SCTLR_EL1_ITD |
- SCTLR_EL1_SED,
- FEAT_AA32EL0),
+ NEEDS_FEAT(SCTLR_EL1_CP15BEN, FEAT_AA32EL0),
+ NEEDS_FEAT_FLAG(SCTLR_EL1_ITD |
+ SCTLR_EL1_SED,
+ AS_RES1, FEAT_AA32EL0),
NEEDS_FEAT(SCTLR_EL1_BT0 |
SCTLR_EL1_BT1,
FEAT_BTI),
NEEDS_FEAT(SCTLR_EL1_CMOW, FEAT_CMOW),
- NEEDS_FEAT(SCTLR_EL1_TSCXT, feat_csv2_2_csv2_1p2),
- NEEDS_FEAT(SCTLR_EL1_EIS |
- SCTLR_EL1_EOS,
- FEAT_ExS),
+ NEEDS_FEAT_FLAG(SCTLR_EL1_TSCXT,
+ AS_RES1, feat_csv2_2_csv2_1p2),
+ NEEDS_FEAT_FLAG(SCTLR_EL1_EIS |
+ SCTLR_EL1_EOS,
+ AS_RES1, FEAT_ExS),
NEEDS_FEAT(SCTLR_EL1_EnFPM, FEAT_FPMR),
NEEDS_FEAT(SCTLR_EL1_IESB, FEAT_IESB),
NEEDS_FEAT(SCTLR_EL1_EnALS, FEAT_LS64),
NEEDS_FEAT(SCTLR_EL1_EnAS0, FEAT_LS64_ACCDATA),
NEEDS_FEAT(SCTLR_EL1_EnASR, FEAT_LS64_V),
NEEDS_FEAT(SCTLR_EL1_nAA, FEAT_LSE2),
- NEEDS_FEAT(SCTLR_EL1_LSMAOE |
- SCTLR_EL1_nTLSMD,
- FEAT_LSMAOC),
+ NEEDS_FEAT_FLAG(SCTLR_EL1_LSMAOE |
+ SCTLR_EL1_nTLSMD,
+ AS_RES1, FEAT_LSMAOC),
NEEDS_FEAT(SCTLR_EL1_EE, FEAT_MixedEnd),
NEEDS_FEAT(SCTLR_EL1_E0E, feat_mixedendel0),
NEEDS_FEAT(SCTLR_EL1_MSCEn, FEAT_MOPS),
@@ -1120,7 +1087,8 @@ static const struct reg_bits_to_feat_map sctlr_el1_feat_map[] = {
NEEDS_FEAT(SCTLR_EL1_NMI |
SCTLR_EL1_SPINTMASK,
FEAT_NMI),
- NEEDS_FEAT(SCTLR_EL1_SPAN, FEAT_PAN),
+ NEEDS_FEAT_FLAG(SCTLR_EL1_SPAN,
+ AS_RES1, FEAT_PAN),
NEEDS_FEAT(SCTLR_EL1_EPAN, FEAT_PAN3),
NEEDS_FEAT(SCTLR_EL1_EnDA |
SCTLR_EL1_EnDB |
@@ -1131,17 +1099,10 @@ static const struct reg_bits_to_feat_map sctlr_el1_feat_map[] = {
NEEDS_FEAT(SCTLR_EL1_EnRCTX, FEAT_SPECRES),
NEEDS_FEAT(SCTLR_EL1_DSSBS, FEAT_SSBS),
NEEDS_FEAT(SCTLR_EL1_TIDCP, FEAT_TIDCP1),
- NEEDS_FEAT(SCTLR_EL1_TME0 |
- SCTLR_EL1_TME |
- SCTLR_EL1_TMT0 |
- SCTLR_EL1_TMT,
- FEAT_TME),
NEEDS_FEAT(SCTLR_EL1_TWEDEL |
SCTLR_EL1_TWEDEn,
FEAT_TWED),
NEEDS_FEAT(SCTLR_EL1_UCI |
- SCTLR_EL1_EE |
- SCTLR_EL1_E0E |
SCTLR_EL1_WXN |
SCTLR_EL1_nTWE |
SCTLR_EL1_nTWI |
@@ -1155,11 +1116,91 @@ static const struct reg_bits_to_feat_map sctlr_el1_feat_map[] = {
SCTLR_EL1_A |
SCTLR_EL1_M,
FEAT_AA64EL1),
+ FORCE_RES0(SCTLR_EL1_RES0),
+ FORCE_RES1(SCTLR_EL1_RES1),
};
static const DECLARE_FEAT_MAP(sctlr_el1_desc, SCTLR_EL1,
sctlr_el1_feat_map, FEAT_AA64EL1);
+static const struct reg_bits_to_feat_map sctlr_el2_feat_map[] = {
+ NEEDS_FEAT_FLAG(SCTLR_EL2_CP15BEN,
+ RES1_WHEN_E2H0 | REQUIRES_E2H1,
+ FEAT_AA32EL0),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_ITD |
+ SCTLR_EL2_SED,
+ RES1_WHEN_E2H1 | REQUIRES_E2H1,
+ FEAT_AA32EL0),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_BT0, REQUIRES_E2H1, FEAT_BTI),
+ NEEDS_FEAT(SCTLR_EL2_BT, FEAT_BTI),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_CMOW, REQUIRES_E2H1, FEAT_CMOW),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_TSCXT,
+ RES1_WHEN_E2H1 | REQUIRES_E2H1,
+ feat_csv2_2_csv2_1p2),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_EIS |
+ SCTLR_EL2_EOS,
+ AS_RES1, FEAT_ExS),
+ NEEDS_FEAT(SCTLR_EL2_EnFPM, FEAT_FPMR),
+ NEEDS_FEAT(SCTLR_EL2_IESB, FEAT_IESB),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_EnALS, REQUIRES_E2H1, FEAT_LS64),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_EnAS0, REQUIRES_E2H1, FEAT_LS64_ACCDATA),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_EnASR, REQUIRES_E2H1, FEAT_LS64_V),
+ NEEDS_FEAT(SCTLR_EL2_nAA, FEAT_LSE2),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_LSMAOE |
+ SCTLR_EL2_nTLSMD,
+ AS_RES1 | REQUIRES_E2H1, FEAT_LSMAOC),
+ NEEDS_FEAT(SCTLR_EL2_EE, FEAT_MixedEnd),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_E0E, REQUIRES_E2H1, feat_mixedendel0),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_MSCEn, REQUIRES_E2H1, FEAT_MOPS),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_ATA0 |
+ SCTLR_EL2_TCF0,
+ REQUIRES_E2H1, FEAT_MTE2),
+ NEEDS_FEAT(SCTLR_EL2_ATA |
+ SCTLR_EL2_TCF,
+ FEAT_MTE2),
+ NEEDS_FEAT(SCTLR_EL2_ITFSB, feat_mte_async),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_TCSO0, REQUIRES_E2H1, FEAT_MTE_STORE_ONLY),
+ NEEDS_FEAT(SCTLR_EL2_TCSO,
+ FEAT_MTE_STORE_ONLY),
+ NEEDS_FEAT(SCTLR_EL2_NMI |
+ SCTLR_EL2_SPINTMASK,
+ FEAT_NMI),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_SPAN, AS_RES1 | REQUIRES_E2H1, FEAT_PAN),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_EPAN, REQUIRES_E2H1, FEAT_PAN3),
+ NEEDS_FEAT(SCTLR_EL2_EnDA |
+ SCTLR_EL2_EnDB |
+ SCTLR_EL2_EnIA |
+ SCTLR_EL2_EnIB,
+ feat_pauth),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_EnTP2, REQUIRES_E2H1, FEAT_SME),
+ NEEDS_FEAT(SCTLR_EL2_EnRCTX, FEAT_SPECRES),
+ NEEDS_FEAT(SCTLR_EL2_DSSBS, FEAT_SSBS),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_TIDCP, REQUIRES_E2H1, FEAT_TIDCP1),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_TWEDEL |
+ SCTLR_EL2_TWEDEn,
+ REQUIRES_E2H1, FEAT_TWED),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_nTWE |
+ SCTLR_EL2_nTWI,
+ AS_RES1 | REQUIRES_E2H1, FEAT_AA64EL2),
+ NEEDS_FEAT_FLAG(SCTLR_EL2_UCI |
+ SCTLR_EL2_UCT |
+ SCTLR_EL2_DZE |
+ SCTLR_EL2_SA0,
+ REQUIRES_E2H1, FEAT_AA64EL2),
+ NEEDS_FEAT(SCTLR_EL2_WXN |
+ SCTLR_EL2_I |
+ SCTLR_EL2_SA |
+ SCTLR_EL2_C |
+ SCTLR_EL2_A |
+ SCTLR_EL2_M,
+ FEAT_AA64EL2),
+ FORCE_RES0(SCTLR_EL2_RES0),
+ FORCE_RES1(SCTLR_EL2_RES1),
+};
+
+static const DECLARE_FEAT_MAP(sctlr_el2_desc, SCTLR_EL2,
+ sctlr_el2_feat_map, FEAT_AA64EL2);
+
static const struct reg_bits_to_feat_map mdcr_el2_feat_map[] = {
NEEDS_FEAT(MDCR_EL2_EBWE, FEAT_Debugv8p9),
NEEDS_FEAT(MDCR_EL2_TDOSA, FEAT_DoubleLock),
@@ -1189,6 +1230,8 @@ static const struct reg_bits_to_feat_map mdcr_el2_feat_map[] = {
MDCR_EL2_TDE |
MDCR_EL2_TDRA,
FEAT_AA64EL1),
+ FORCE_RES0(MDCR_EL2_RES0),
+ FORCE_RES1(MDCR_EL2_RES1),
};
static const DECLARE_FEAT_MAP(mdcr_el2_desc, MDCR_EL2,
@@ -1227,6 +1270,8 @@ static const struct reg_bits_to_feat_map vtcr_el2_feat_map[] = {
VTCR_EL2_SL0 |
VTCR_EL2_T0SZ,
FEAT_AA64EL1),
+ FORCE_RES0(VTCR_EL2_RES0),
+ FORCE_RES1(VTCR_EL2_RES1),
};
static const DECLARE_FEAT_MAP(vtcr_el2_desc, VTCR_EL2,
@@ -1237,8 +1282,14 @@ static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
{
u64 mask = 0;
+ /*
+ * Don't account for FORCE_RESx that are architectural, and
+ * therefore part of the resx parameter. Other FORCE_RESx bits
+ * are implementation choices, and therefore accounted for.
+ */
for (int i = 0; i < map_size; i++)
- mask |= map[i].bits;
+ if (!((map[i].flags & FORCE_RESx) && (map[i].bits & resx)))
+ mask |= map[i].bits;
if (mask != ~resx)
kvm_err("Undefined %s behaviour, bits %016llx\n",
@@ -1274,6 +1325,7 @@ void __init check_feature_map(void)
check_reg_desc(&sctlr2_desc);
check_reg_desc(&tcr2_el2_desc);
check_reg_desc(&sctlr_el1_desc);
+ check_reg_desc(&sctlr_el2_desc);
check_reg_desc(&mdcr_el2_desc);
check_reg_desc(&vtcr_el2_desc);
}
@@ -1292,14 +1344,14 @@ static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map
}
}
-static u64 __compute_fixed_bits(struct kvm *kvm,
- const struct reg_bits_to_feat_map *map,
- int map_size,
- u64 *fixed_bits,
- unsigned long require,
- unsigned long exclude)
+static struct resx compute_resx_bits(struct kvm *kvm,
+ const struct reg_bits_to_feat_map *map,
+ int map_size,
+ unsigned long require,
+ unsigned long exclude)
{
- u64 val = 0;
+ bool e2h0 = kvm_has_feat(kvm, FEAT_E2H0);
+ struct resx resx = {};
for (int i = 0; i < map_size; i++) {
bool match;
@@ -1310,60 +1362,72 @@ static u64 __compute_fixed_bits(struct kvm *kvm,
if (map[i].flags & exclude)
continue;
- if (map[i].flags & CALL_FUNC)
- match = (map[i].flags & FIXED_VALUE) ?
- map[i].fval(kvm, fixed_bits) :
- map[i].match(kvm);
+ if (map[i].flags & FORCE_RESx)
+ match = false;
+ else if (map[i].flags & CALL_FUNC)
+ match = map[i].match(kvm);
else
match = idreg_feat_match(kvm, &map[i]);
- if (!match || (map[i].flags & FIXED_VALUE))
- val |= reg_feat_map_bits(&map[i]);
- }
+ if (map[i].flags & REQUIRES_E2H1)
+ match &= !e2h0;
- return val;
-}
+ if (!match) {
+ u64 bits = reg_feat_map_bits(&map[i]);
-static u64 compute_res0_bits(struct kvm *kvm,
- const struct reg_bits_to_feat_map *map,
- int map_size,
- unsigned long require,
- unsigned long exclude)
-{
- return __compute_fixed_bits(kvm, map, map_size, NULL,
- require, exclude | FIXED_VALUE);
-}
+ if ((map[i].flags & AS_RES1) ||
+ (e2h0 && (map[i].flags & RES1_WHEN_E2H0)) ||
+ (!e2h0 && (map[i].flags & RES1_WHEN_E2H1)))
+ resx.res1 |= bits;
+ else
+ resx.res0 |= bits;
+ }
+ }
-static u64 compute_reg_res0_bits(struct kvm *kvm,
- const struct reg_feat_map_desc *r,
- unsigned long require, unsigned long exclude)
+ return resx;
+}
+static struct resx compute_reg_resx_bits(struct kvm *kvm,
+ const struct reg_feat_map_desc *r,
+ unsigned long require,
+ unsigned long exclude)
{
- u64 res0;
+ struct resx resx;
- res0 = compute_res0_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
+ resx = compute_resx_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
require, exclude);
+ if (r->feat_map.flags & MASKS_POINTER) {
+ resx.res0 |= r->feat_map.masks->res0;
+ resx.res1 |= r->feat_map.masks->res1;
+ }
+
/*
- * If computing FGUs, don't take RES0 or register existence
- * into account -- we're not computing bits for the register
- * itself.
+ * If the register itself was not valid, all the non-RESx bits are
+ * now considered RES0 (this matches the behaviour of registers such
+ * as SCTLR2 and TCR2). Weed out any potential (though unlikely)
+ * overlap with RES1 bits coming from the previous computation.
*/
- if (!(exclude & NEVER_FGU)) {
- res0 |= compute_res0_bits(kvm, &r->feat_map, 1, require, exclude);
- res0 |= ~reg_feat_map_bits(&r->feat_map);
- }
+ resx.res0 |= compute_resx_bits(kvm, &r->feat_map, 1, require, exclude).res0;
+ resx.res1 &= ~resx.res0;
- return res0;
+ return resx;
}
-static u64 compute_reg_fixed_bits(struct kvm *kvm,
- const struct reg_feat_map_desc *r,
- u64 *fixed_bits, unsigned long require,
- unsigned long exclude)
+static u64 compute_fgu_bits(struct kvm *kvm, const struct reg_feat_map_desc *r)
{
- return __compute_fixed_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
- fixed_bits, require | FIXED_VALUE, exclude);
+ struct resx resx;
+
+ /*
+ * If computing FGUs, we collect the unsupported feature bits as
+ * RESx bits, but don't take the actual RESx bits or register
+ * existence into account -- we're not computing bits for the
+ * register itself.
+ */
+ resx = compute_resx_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
+ 0, NEVER_FGU);
+
+ return resx.res0 | resx.res1;
}
void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
@@ -1372,40 +1436,29 @@ void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
switch (fgt) {
case HFGRTR_GROUP:
- val |= compute_reg_res0_bits(kvm, &hfgrtr_desc,
- 0, NEVER_FGU);
- val |= compute_reg_res0_bits(kvm, &hfgwtr_desc,
- 0, NEVER_FGU);
+ val |= compute_fgu_bits(kvm, &hfgrtr_desc);
+ val |= compute_fgu_bits(kvm, &hfgwtr_desc);
break;
case HFGITR_GROUP:
- val |= compute_reg_res0_bits(kvm, &hfgitr_desc,
- 0, NEVER_FGU);
+ val |= compute_fgu_bits(kvm, &hfgitr_desc);
break;
case HDFGRTR_GROUP:
- val |= compute_reg_res0_bits(kvm, &hdfgrtr_desc,
- 0, NEVER_FGU);
- val |= compute_reg_res0_bits(kvm, &hdfgwtr_desc,
- 0, NEVER_FGU);
+ val |= compute_fgu_bits(kvm, &hdfgrtr_desc);
+ val |= compute_fgu_bits(kvm, &hdfgwtr_desc);
break;
case HAFGRTR_GROUP:
- val |= compute_reg_res0_bits(kvm, &hafgrtr_desc,
- 0, NEVER_FGU);
+ val |= compute_fgu_bits(kvm, &hafgrtr_desc);
break;
case HFGRTR2_GROUP:
- val |= compute_reg_res0_bits(kvm, &hfgrtr2_desc,
- 0, NEVER_FGU);
- val |= compute_reg_res0_bits(kvm, &hfgwtr2_desc,
- 0, NEVER_FGU);
+ val |= compute_fgu_bits(kvm, &hfgrtr2_desc);
+ val |= compute_fgu_bits(kvm, &hfgwtr2_desc);
break;
case HFGITR2_GROUP:
- val |= compute_reg_res0_bits(kvm, &hfgitr2_desc,
- 0, NEVER_FGU);
+ val |= compute_fgu_bits(kvm, &hfgitr2_desc);
break;
case HDFGRTR2_GROUP:
- val |= compute_reg_res0_bits(kvm, &hdfgrtr2_desc,
- 0, NEVER_FGU);
- val |= compute_reg_res0_bits(kvm, &hdfgwtr2_desc,
- 0, NEVER_FGU);
+ val |= compute_fgu_bits(kvm, &hdfgrtr2_desc);
+ val |= compute_fgu_bits(kvm, &hdfgwtr2_desc);
break;
default:
BUG();
@@ -1414,91 +1467,77 @@ void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
kvm->arch.fgu[fgt] = val;
}
-void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1)
+struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg)
{
- u64 fixed = 0, mask;
+ struct resx resx;
switch (reg) {
case HFGRTR_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hfgrtr_desc, 0, 0);
- *res1 = HFGRTR_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hfgrtr_desc, 0, 0);
break;
case HFGWTR_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hfgwtr_desc, 0, 0);
- *res1 = HFGWTR_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hfgwtr_desc, 0, 0);
break;
case HFGITR_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hfgitr_desc, 0, 0);
- *res1 = HFGITR_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hfgitr_desc, 0, 0);
break;
case HDFGRTR_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hdfgrtr_desc, 0, 0);
- *res1 = HDFGRTR_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hdfgrtr_desc, 0, 0);
break;
case HDFGWTR_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hdfgwtr_desc, 0, 0);
- *res1 = HDFGWTR_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hdfgwtr_desc, 0, 0);
break;
case HAFGRTR_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hafgrtr_desc, 0, 0);
- *res1 = HAFGRTR_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hafgrtr_desc, 0, 0);
break;
case HFGRTR2_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hfgrtr2_desc, 0, 0);
- *res1 = HFGRTR2_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hfgrtr2_desc, 0, 0);
break;
case HFGWTR2_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hfgwtr2_desc, 0, 0);
- *res1 = HFGWTR2_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hfgwtr2_desc, 0, 0);
break;
case HFGITR2_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hfgitr2_desc, 0, 0);
- *res1 = HFGITR2_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hfgitr2_desc, 0, 0);
break;
case HDFGRTR2_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hdfgrtr2_desc, 0, 0);
- *res1 = HDFGRTR2_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hdfgrtr2_desc, 0, 0);
break;
case HDFGWTR2_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hdfgwtr2_desc, 0, 0);
- *res1 = HDFGWTR2_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hdfgwtr2_desc, 0, 0);
break;
case HCRX_EL2:
- *res0 = compute_reg_res0_bits(kvm, &hcrx_desc, 0, 0);
- *res1 = __HCRX_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &hcrx_desc, 0, 0);
+ resx.res1 |= __HCRX_EL2_RES1;
break;
case HCR_EL2:
- mask = compute_reg_fixed_bits(kvm, &hcr_desc, &fixed, 0, 0);
- *res0 = compute_reg_res0_bits(kvm, &hcr_desc, 0, 0);
- *res0 |= (mask & ~fixed);
- *res1 = HCR_EL2_RES1 | (mask & fixed);
+ resx = compute_reg_resx_bits(kvm, &hcr_desc, 0, 0);
break;
case SCTLR2_EL1:
case SCTLR2_EL2:
- *res0 = compute_reg_res0_bits(kvm, &sctlr2_desc, 0, 0);
- *res1 = SCTLR2_EL1_RES1;
+ resx = compute_reg_resx_bits(kvm, &sctlr2_desc, 0, 0);
break;
case TCR2_EL2:
- *res0 = compute_reg_res0_bits(kvm, &tcr2_el2_desc, 0, 0);
- *res1 = TCR2_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &tcr2_el2_desc, 0, 0);
break;
case SCTLR_EL1:
- *res0 = compute_reg_res0_bits(kvm, &sctlr_el1_desc, 0, 0);
- *res1 = SCTLR_EL1_RES1;
+ resx = compute_reg_resx_bits(kvm, &sctlr_el1_desc, 0, 0);
+ break;
+ case SCTLR_EL2:
+ resx = compute_reg_resx_bits(kvm, &sctlr_el2_desc, 0, 0);
break;
case MDCR_EL2:
- *res0 = compute_reg_res0_bits(kvm, &mdcr_el2_desc, 0, 0);
- *res1 = MDCR_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &mdcr_el2_desc, 0, 0);
break;
case VTCR_EL2:
- *res0 = compute_reg_res0_bits(kvm, &vtcr_el2_desc, 0, 0);
- *res1 = VTCR_EL2_RES1;
+ resx = compute_reg_resx_bits(kvm, &vtcr_el2_desc, 0, 0);
break;
default:
WARN_ON_ONCE(1);
- *res0 = *res1 = 0;
+ resx = (typeof(resx)){};
break;
}
+
+ return resx;
}
static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg)
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 0af83a48d20e..bcc553307171 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -2435,15 +2435,7 @@ static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu,
static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr)
{
- struct kvm_sysreg_masks *masks;
-
- /* Only handle the VNCR-backed regs for now */
- if (sr < __VNCR_START__)
- return 0;
-
- masks = kvm->arch.sysreg_masks;
-
- return masks->mask[sr - __SANITISED_REG_START__].res0;
+ return kvm_get_sysreg_resx(kvm, sr).res0;
}
static bool check_fgt_bit(struct kvm_vcpu *vcpu, enum vcpu_sysreg sr,
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 486eba72bb02..ed710228484f 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -1505,11 +1505,6 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
u64 orig_val = val;
switch (reg) {
- case SYS_ID_AA64ISAR0_EL1:
- /* Support everything but TME */
- val &= ~ID_AA64ISAR0_EL1_TME;
- break;
-
case SYS_ID_AA64ISAR1_EL1:
/* Support everything but LS64 and Spec Invalidation */
val &= ~(ID_AA64ISAR1_EL1_LS64 |
@@ -1669,36 +1664,28 @@ u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
enum vcpu_sysreg sr, u64 v)
{
- struct kvm_sysreg_masks *masks;
+ struct resx resx;
- masks = vcpu->kvm->arch.sysreg_masks;
-
- if (masks) {
- sr -= __SANITISED_REG_START__;
-
- v &= ~masks->mask[sr].res0;
- v |= masks->mask[sr].res1;
- }
+ resx = kvm_get_sysreg_resx(vcpu->kvm, sr);
+ v &= ~resx.res0;
+ v |= resx.res1;
return v;
}
-static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
+static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, struct resx resx)
{
- int i = sr - __SANITISED_REG_START__;
-
BUILD_BUG_ON(!__builtin_constant_p(sr));
BUILD_BUG_ON(sr < __SANITISED_REG_START__);
BUILD_BUG_ON(sr >= NR_SYS_REGS);
- kvm->arch.sysreg_masks->mask[i].res0 = res0;
- kvm->arch.sysreg_masks->mask[i].res1 = res1;
+ kvm_set_sysreg_resx(kvm, sr, resx);
}
int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- u64 res0, res1;
+ struct resx resx;
lockdep_assert_held(&kvm->arch.config_lock);
@@ -1711,110 +1698,116 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
return -ENOMEM;
/* VTTBR_EL2 */
- res0 = res1 = 0;
+ resx = (typeof(resx)){};
if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
- res0 |= GENMASK(63, 56);
+ resx.res0 |= GENMASK(63, 56);
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
- res0 |= VTTBR_CNP_BIT;
- set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
+ resx.res0 |= VTTBR_CNP_BIT;
+ set_sysreg_masks(kvm, VTTBR_EL2, resx);
/* VTCR_EL2 */
- get_reg_fixed_bits(kvm, VTCR_EL2, &res0, &res1);
- set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, VTCR_EL2);
+ set_sysreg_masks(kvm, VTCR_EL2, resx);
/* VMPIDR_EL2 */
- res0 = GENMASK(63, 40) | GENMASK(30, 24);
- res1 = BIT(31);
- set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
+ resx.res0 = GENMASK(63, 40) | GENMASK(30, 24);
+ resx.res1 = BIT(31);
+ set_sysreg_masks(kvm, VMPIDR_EL2, resx);
/* HCR_EL2 */
- get_reg_fixed_bits(kvm, HCR_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HCR_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, HCR_EL2);
+ set_sysreg_masks(kvm, HCR_EL2, resx);
/* HCRX_EL2 */
- get_reg_fixed_bits(kvm, HCRX_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, HCRX_EL2);
+ set_sysreg_masks(kvm, HCRX_EL2, resx);
/* HFG[RW]TR_EL2 */
- get_reg_fixed_bits(kvm, HFGRTR_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HFGRTR_EL2, res0, res1);
- get_reg_fixed_bits(kvm, HFGWTR_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HFGWTR_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, HFGRTR_EL2);
+ set_sysreg_masks(kvm, HFGRTR_EL2, resx);
+ resx = get_reg_fixed_bits(kvm, HFGWTR_EL2);
+ set_sysreg_masks(kvm, HFGWTR_EL2, resx);
/* HDFG[RW]TR_EL2 */
- get_reg_fixed_bits(kvm, HDFGRTR_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HDFGRTR_EL2, res0, res1);
- get_reg_fixed_bits(kvm, HDFGWTR_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HDFGWTR_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, HDFGRTR_EL2);
+ set_sysreg_masks(kvm, HDFGRTR_EL2, resx);
+ resx = get_reg_fixed_bits(kvm, HDFGWTR_EL2);
+ set_sysreg_masks(kvm, HDFGWTR_EL2, resx);
/* HFGITR_EL2 */
- get_reg_fixed_bits(kvm, HFGITR_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HFGITR_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, HFGITR_EL2);
+ set_sysreg_masks(kvm, HFGITR_EL2, resx);
/* HAFGRTR_EL2 - not a lot to see here */
- get_reg_fixed_bits(kvm, HAFGRTR_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, HAFGRTR_EL2);
+ set_sysreg_masks(kvm, HAFGRTR_EL2, resx);
/* HFG[RW]TR2_EL2 */
- get_reg_fixed_bits(kvm, HFGRTR2_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HFGRTR2_EL2, res0, res1);
- get_reg_fixed_bits(kvm, HFGWTR2_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HFGWTR2_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, HFGRTR2_EL2);
+ set_sysreg_masks(kvm, HFGRTR2_EL2, resx);
+ resx = get_reg_fixed_bits(kvm, HFGWTR2_EL2);
+ set_sysreg_masks(kvm, HFGWTR2_EL2, resx);
/* HDFG[RW]TR2_EL2 */
- get_reg_fixed_bits(kvm, HDFGRTR2_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HDFGRTR2_EL2, res0, res1);
- get_reg_fixed_bits(kvm, HDFGWTR2_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HDFGWTR2_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, HDFGRTR2_EL2);
+ set_sysreg_masks(kvm, HDFGRTR2_EL2, resx);
+ resx = get_reg_fixed_bits(kvm, HDFGWTR2_EL2);
+ set_sysreg_masks(kvm, HDFGWTR2_EL2, resx);
/* HFGITR2_EL2 */
- get_reg_fixed_bits(kvm, HFGITR2_EL2, &res0, &res1);
- set_sysreg_masks(kvm, HFGITR2_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, HFGITR2_EL2);
+ set_sysreg_masks(kvm, HFGITR2_EL2, resx);
/* TCR2_EL2 */
- get_reg_fixed_bits(kvm, TCR2_EL2, &res0, &res1);
- set_sysreg_masks(kvm, TCR2_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, TCR2_EL2);
+ set_sysreg_masks(kvm, TCR2_EL2, resx);
/* SCTLR_EL1 */
- get_reg_fixed_bits(kvm, SCTLR_EL1, &res0, &res1);
- set_sysreg_masks(kvm, SCTLR_EL1, res0, res1);
+ resx = get_reg_fixed_bits(kvm, SCTLR_EL1);
+ set_sysreg_masks(kvm, SCTLR_EL1, resx);
+
+ /* SCTLR_EL2 */
+ resx = get_reg_fixed_bits(kvm, SCTLR_EL2);
+ set_sysreg_masks(kvm, SCTLR_EL2, resx);
/* SCTLR2_ELx */
- get_reg_fixed_bits(kvm, SCTLR2_EL1, &res0, &res1);
- set_sysreg_masks(kvm, SCTLR2_EL1, res0, res1);
- get_reg_fixed_bits(kvm, SCTLR2_EL2, &res0, &res1);
- set_sysreg_masks(kvm, SCTLR2_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, SCTLR2_EL1);
+ set_sysreg_masks(kvm, SCTLR2_EL1, resx);
+ resx = get_reg_fixed_bits(kvm, SCTLR2_EL2);
+ set_sysreg_masks(kvm, SCTLR2_EL2, resx);
/* MDCR_EL2 */
- get_reg_fixed_bits(kvm, MDCR_EL2, &res0, &res1);
- set_sysreg_masks(kvm, MDCR_EL2, res0, res1);
+ resx = get_reg_fixed_bits(kvm, MDCR_EL2);
+ set_sysreg_masks(kvm, MDCR_EL2, resx);
/* CNTHCTL_EL2 */
- res0 = GENMASK(63, 20);
- res1 = 0;
+ resx.res0 = GENMASK(63, 20);
+ resx.res1 = 0;
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RME, IMP))
- res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK;
+ resx.res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK;
if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, CNTPOFF)) {
- res0 |= CNTHCTL_ECV;
+ resx.res0 |= CNTHCTL_ECV;
if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, IMP))
- res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT |
- CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT);
+ resx.res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT |
+ CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT);
}
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
- res0 |= GENMASK(11, 8);
- set_sysreg_masks(kvm, CNTHCTL_EL2, res0, res1);
+ resx.res0 |= GENMASK(11, 8);
+ set_sysreg_masks(kvm, CNTHCTL_EL2, resx);
/* ICH_HCR_EL2 */
- res0 = ICH_HCR_EL2_RES0;
- res1 = ICH_HCR_EL2_RES1;
+ resx.res0 = ICH_HCR_EL2_RES0;
+ resx.res1 = ICH_HCR_EL2_RES1;
if (!(kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_TDS))
- res0 |= ICH_HCR_EL2_TDIR;
+ resx.res0 |= ICH_HCR_EL2_TDIR;
/* No GICv4 is presented to the guest */
- res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount;
- set_sysreg_masks(kvm, ICH_HCR_EL2, res0, res1);
+ resx.res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount;
+ set_sysreg_masks(kvm, ICH_HCR_EL2, resx);
/* VNCR_EL2 */
- set_sysreg_masks(kvm, VNCR_EL2, VNCR_EL2_RES0, VNCR_EL2_RES1);
+ resx.res0 = VNCR_EL2_RES0;
+ resx.res1 = VNCR_EL2_RES1;
+ set_sysreg_masks(kvm, VNCR_EL2, resx);
out:
for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 95c467c090ba..a7cd0badc20c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -5056,10 +5056,78 @@ static const struct seq_operations idregs_debug_sops = {
DEFINE_SEQ_ATTRIBUTE(idregs_debug);
+static const struct sys_reg_desc *sr_resx_find(struct kvm *kvm, loff_t pos)
+{
+ unsigned long i, sr_idx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
+ const struct sys_reg_desc *r = &sys_reg_descs[i];
+
+ if (r->reg < __SANITISED_REG_START__)
+ continue;
+
+ if (sr_idx++ == pos)
+ return r;
+ }
+
+ return NULL;
+}
+
+static void *sr_resx_start(struct seq_file *s, loff_t *pos)
+{
+ struct kvm *kvm = s->private;
+
+ if (!kvm->arch.sysreg_masks)
+ return NULL;
+
+ return (void *)sr_resx_find(kvm, *pos);
+}
+
+static void *sr_resx_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct kvm *kvm = s->private;
+
+ (*pos)++;
+
+ return (void *)sr_resx_find(kvm, *pos);
+}
+
+static void sr_resx_stop(struct seq_file *s, void *v)
+{
+}
+
+static int sr_resx_show(struct seq_file *s, void *v)
+{
+ const struct sys_reg_desc *desc = v;
+ struct kvm *kvm = s->private;
+ struct resx resx;
+
+ if (!desc)
+ return 0;
+
+ resx = kvm_get_sysreg_resx(kvm, desc->reg);
+
+ seq_printf(s, "%20s:\tRES0:%016llx\tRES1:%016llx\n",
+ desc->name, resx.res0, resx.res1);
+
+ return 0;
+}
+
+static const struct seq_operations sr_resx_sops = {
+ .start = sr_resx_start,
+ .next = sr_resx_next,
+ .stop = sr_resx_stop,
+ .show = sr_resx_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(sr_resx);
+
void kvm_sys_regs_create_debugfs(struct kvm *kvm)
{
debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
&idregs_debug_fops);
+ debugfs_create_file("resx", 0444, kvm->debugfs_dentry, kvm,
+ &sr_resx_fops);
}
static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 87585591b931..9d1c21108057 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -1856,10 +1856,7 @@ UnsignedEnum 31:28 RDM
0b0000 NI
0b0001 IMP
EndEnum
-UnsignedEnum 27:24 TME
- 0b0000 NI
- 0b0001 IMP
-EndEnum
+Res0 27:24
UnsignedEnum 23:20 ATOMIC
0b0000 NI
0b0010 IMP
@@ -2433,10 +2430,7 @@ Field 57 EPAN
Field 56 EnALS
Field 55 EnAS0
Field 54 EnASR
-Field 53 TME
-Field 52 TME0
-Field 51 TMT
-Field 50 TMT0
+Res0 53:50
Field 49:46 TWEDEL
Field 45 TWEDEn
Field 44 DSSBS
@@ -3750,6 +3744,75 @@ UnsignedEnum 2:0 F8S1
EndEnum
EndSysreg
+Sysreg SCTLR_EL2 3 4 1 0 0
+Field 63 TIDCP
+Field 62 SPINTMASK
+Field 61 NMI
+Field 60 EnTP2
+Field 59 TCSO
+Field 58 TCSO0
+Field 57 EPAN
+Field 56 EnALS
+Field 55 EnAS0
+Field 54 EnASR
+Res0 53:50
+Field 49:46 TWEDEL
+Field 45 TWEDEn
+Field 44 DSSBS
+Field 43 ATA
+Field 42 ATA0
+Enum 41:40 TCF
+ 0b00 NONE
+ 0b01 SYNC
+ 0b10 ASYNC
+ 0b11 ASYMM
+EndEnum
+Enum 39:38 TCF0
+ 0b00 NONE
+ 0b01 SYNC
+ 0b10 ASYNC
+ 0b11 ASYMM
+EndEnum
+Field 37 ITFSB
+Field 36 BT
+Field 35 BT0
+Field 34 EnFPM
+Field 33 MSCEn
+Field 32 CMOW
+Field 31 EnIA
+Field 30 EnIB
+Field 29 LSMAOE
+Field 28 nTLSMD
+Field 27 EnDA
+Field 26 UCI
+Field 25 EE
+Field 24 E0E
+Field 23 SPAN
+Field 22 EIS
+Field 21 IESB
+Field 20 TSCXT
+Field 19 WXN
+Field 18 nTWE
+Res0 17
+Field 16 nTWI
+Field 15 UCT
+Field 14 DZE
+Field 13 EnDB
+Field 12 I
+Field 11 EOS
+Field 10 EnRCTX
+Res0 9
+Field 8 SED
+Field 7 ITD
+Field 6 nAA
+Field 5 CP15BEN
+Field 4 SA0
+Field 3 SA
+Field 2 C
+Field 1 A
+Field 0 M
+EndSysreg
+
Sysreg HCR_EL2 3 4 1 1 0
Field 63:60 TWEDEL
Field 59 TWEDEn
@@ -3772,8 +3835,7 @@ Field 43 NV1
Field 42 NV
Field 41 API
Field 40 APK
-Field 39 TME
-Field 38 MIOCNCE
+Res0 39:38
Field 37 TEA
Field 36 TERR
Field 35 TLOR
diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
index 178b7322bf04..f75efe98e9df 100644
--- a/tools/arch/arm64/include/asm/sysreg.h
+++ b/tools/arch/arm64/include/asm/sysreg.h
@@ -847,12 +847,6 @@
#define SCTLR_ELx_A (BIT(1))
#define SCTLR_ELx_M (BIT(0))
-/* SCTLR_EL2 specific flags. */
-#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
- (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
- (BIT(29)))
-
-#define SCTLR_EL2_BT (BIT(36))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
#else
diff --git a/tools/perf/Documentation/perf-arm-spe.txt b/tools/perf/Documentation/perf-arm-spe.txt
index 8b02e5b983fa..201a82bec0de 100644
--- a/tools/perf/Documentation/perf-arm-spe.txt
+++ b/tools/perf/Documentation/perf-arm-spe.txt
@@ -176,7 +176,6 @@ and inv_event_filter are:
bit 10 - Remote access (FEAT_SPEv1p4)
bit 11 - Misaligned access (FEAT_SPEv1p1)
bit 12-15 - IMPLEMENTATION DEFINED events (when implemented)
- bit 16 - Transaction (FEAT_TME)
bit 17 - Partial or empty SME or SVE predicate (FEAT_SPEv1p1)
bit 18 - Empty SME or SVE predicate (FEAT_SPEv1p1)
bit 19 - L2D access (FEAT_SPEv1p4)
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c
index c4815d365816..73de5be58bab 100644
--- a/tools/testing/selftests/kvm/arm64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c
@@ -91,7 +91,6 @@ static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),
- REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),