summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2026-03-15 15:45:22 +0000
committerMarc Zyngier <maz@kernel.org>2026-03-28 11:29:42 +0000
commite9550374d13a4bfd0b8a711733f5d423c2e56b96 (patch)
tree50eeb85342aaa8ef1bae811980c98d005049be23
parentfb9888fdfada0b5ad977f08f7550432a08aacbb1 (diff)
KVM: arm64: Convert gmem_abort() to struct kvm_s2_fault_desc
Having fully converted user_mem_abort() to kvm_s2_fault_desc and co, convert gmem_abort() to it as well. The change is obviously much simpler. Tested-by: Fuad Tabba <tabba@google.com> Reviewed-by: Fuad Tabba <tabba@google.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/kvm/mmu.c43
1 files changed, 20 insertions, 23 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7863e428920a..03e1f389339c 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1572,34 +1572,32 @@ struct kvm_s2_fault_desc {
unsigned long hva;
};
-static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_s2_trans *nested,
- struct kvm_memory_slot *memslot, bool is_perm)
+static int gmem_abort(const struct kvm_s2_fault_desc *s2fd)
{
bool write_fault, exec_fault;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
- struct kvm_pgtable *pgt = vcpu->arch.hw_mmu->pgt;
+ struct kvm_pgtable *pgt = s2fd->vcpu->arch.hw_mmu->pgt;
unsigned long mmu_seq;
struct page *page;
- struct kvm *kvm = vcpu->kvm;
+ struct kvm *kvm = s2fd->vcpu->kvm;
void *memcache;
kvm_pfn_t pfn;
gfn_t gfn;
int ret;
- memcache = get_mmu_memcache(vcpu);
- ret = topup_mmu_memcache(vcpu, memcache);
+ memcache = get_mmu_memcache(s2fd->vcpu);
+ ret = topup_mmu_memcache(s2fd->vcpu, memcache);
if (ret)
return ret;
- if (nested)
- gfn = kvm_s2_trans_output(nested) >> PAGE_SHIFT;
+ if (s2fd->nested)
+ gfn = kvm_s2_trans_output(s2fd->nested) >> PAGE_SHIFT;
else
- gfn = fault_ipa >> PAGE_SHIFT;
+ gfn = s2fd->fault_ipa >> PAGE_SHIFT;
- write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
+ write_fault = kvm_is_write_fault(s2fd->vcpu);
+ exec_fault = kvm_vcpu_trap_is_exec_fault(s2fd->vcpu);
VM_WARN_ON_ONCE(write_fault && exec_fault);
@@ -1607,24 +1605,24 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
/* Pairs with the smp_wmb() in kvm_mmu_invalidate_end(). */
smp_rmb();
- ret = kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, &page, NULL);
+ ret = kvm_gmem_get_pfn(kvm, s2fd->memslot, gfn, &pfn, &page, NULL);
if (ret) {
- kvm_prepare_memory_fault_exit(vcpu, fault_ipa, PAGE_SIZE,
+ kvm_prepare_memory_fault_exit(s2fd->vcpu, s2fd->fault_ipa, PAGE_SIZE,
write_fault, exec_fault, false);
return ret;
}
- if (!(memslot->flags & KVM_MEM_READONLY))
+ if (!(s2fd->memslot->flags & KVM_MEM_READONLY))
prot |= KVM_PGTABLE_PROT_W;
- if (nested)
- prot = adjust_nested_fault_perms(nested, prot);
+ if (s2fd->nested)
+ prot = adjust_nested_fault_perms(s2fd->nested, prot);
if (exec_fault || cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
prot |= KVM_PGTABLE_PROT_X;
- if (nested)
- prot = adjust_nested_exec_perms(kvm, nested, prot);
+ if (s2fd->nested)
+ prot = adjust_nested_exec_perms(kvm, s2fd->nested, prot);
kvm_fault_lock(kvm);
if (mmu_invalidate_retry(kvm, mmu_seq)) {
@@ -1632,7 +1630,7 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
goto out_unlock;
}
- ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault_ipa, PAGE_SIZE,
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, s2fd->fault_ipa, PAGE_SIZE,
__pfn_to_phys(pfn), prot,
memcache, flags);
@@ -1641,7 +1639,7 @@ out_unlock:
kvm_fault_unlock(kvm);
if ((prot & KVM_PGTABLE_PROT_W) && !ret)
- mark_page_dirty_in_slot(kvm, memslot, gfn);
+ mark_page_dirty_in_slot(kvm, s2fd->memslot, gfn);
return ret != -EAGAIN ? ret : 0;
}
@@ -2299,8 +2297,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
};
if (kvm_slot_has_gmem(memslot))
- ret = gmem_abort(vcpu, fault_ipa, nested, memslot,
- esr_fsc_is_permission_fault(esr));
+ ret = gmem_abort(&s2fd);
else
ret = user_mem_abort(&s2fd);