summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2025-08-25 13:13:56 +0100
committerMarc Zyngier <maz@kernel.org>2025-09-20 11:05:14 +0100
commit50f77dc87f133b09db44a5bbfdd64b1ca83a8d8e (patch)
tree3880d00b8dd0358c381af00b9f006df4c9d5fef1
parentb8e625167a321138f83b1f6c99cf25d1290cb04e (diff)
KVM: arm64: Populate level on S1PTW SEA injection
Our fault injection mechanism is mildly primitive, and doesn't really implement the architecture when it comes to reporting the level of a failing S1 PTW (we blindly report a SEA outside of a PTW). Now that we can walk the S1 page tables and look for a particular IPA in the descriptors, it is pretty easy to improve the SEA injection code. Note that we only do it for AArch64 guests, and that 32bit guests are left to their own device (oddly enough, I don't fancy writing a 32bit PTW...). Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/kvm/inject_fault.c27
1 files changed, 25 insertions, 2 deletions
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 6745f38b64f9..dfcd66c65517 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -106,7 +106,30 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
- u64 esr = 0;
+ u64 esr = 0, fsc;
+ int level;
+
+ /*
+ * If injecting an abort from a failed S1PTW, rewalk the S1 PTs to
+ * find the failing level. If we can't find it, assume the error was
+ * transient and restart without changing the state.
+ */
+ if (kvm_vcpu_abt_iss1tw(vcpu)) {
+ u64 hpfar = kvm_vcpu_get_fault_ipa(vcpu);
+ int ret;
+
+ if (hpfar == INVALID_GPA)
+ return;
+
+ ret = __kvm_find_s1_desc_level(vcpu, addr, hpfar, &level);
+ if (ret)
+ return;
+
+ WARN_ON_ONCE(level < -1 || level > 3);
+ fsc = ESR_ELx_FSC_SEA_TTW(level);
+ } else {
+ fsc = ESR_ELx_FSC_EXTABT;
+ }
/* This delight is brought to you by FEAT_DoubleFault2. */
if (effective_sctlr2_ease(vcpu))
@@ -133,7 +156,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
if (!is_iabt)
esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
- esr |= ESR_ELx_FSC_EXTABT;
+ esr |= fsc;
vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu));
vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));