diff options
| author | Sean Christopherson <seanjc@google.com> | 2025-05-22 18:17:53 -0700 |
|---|---|---|
| committer | Sean Christopherson <seanjc@google.com> | 2025-06-24 13:29:31 -0700 |
| commit | ffe9d7966d0190a7f6db4dcacda0c8a12084ca09 (patch) | |
| tree | 7e03b4c2d545c09c9c38d7ba5f359dba7c65bfcd /arch | |
| parent | c126b46e6fa87eb27e08e2120a732ec988f20eb2 (diff) | |
KVM: x86/mmu: Locally cache whether a PFN is host MMIO when making a SPTE
When making a SPTE, cache whether or not the target PFN is host MMIO in
order to avoid multiple rounds of the slow path of kvm_is_mmio_pfn(), e.g.
hitting pat_pfn_immune_to_uc_mtrr() in particular can be problematic. KVM
currently avoids multiple calls by virtue of the two users being mutually
exclusive (.get_mt_mask() is Intel-only, shadow_me_value is AMD-only), but
that won't hold true if/when KVM needs to detect host MMIO mappings for
other reasons, e.g. for mitigating the MMIO Stale Data vulnerability.
No functional change intended.
Tested-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Link: https://lore.kernel.org/r/20250523011756.3243624-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/x86/kvm/mmu/spte.c | 22 |
1 files changed, 18 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index f262c380f40e..3f16c91aa042 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -104,7 +104,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) return spte; } -static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) +static bool __kvm_is_mmio_pfn(kvm_pfn_t pfn) { if (pfn_valid(pfn)) return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && @@ -125,6 +125,19 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } +static bool kvm_is_mmio_pfn(kvm_pfn_t pfn, int *is_host_mmio) +{ + /* + * Determining if a PFN is host MMIO is relative expensive. Cache the + * result locally (in the sole caller) to avoid doing the full query + * multiple times when creating a single SPTE. + */ + if (*is_host_mmio < 0) + *is_host_mmio = __kvm_is_mmio_pfn(pfn); + + return *is_host_mmio; +} + /* * Returns true if the SPTE needs to be updated atomically due to having bits * that may be changed without holding mmu_lock, and for which KVM must not @@ -162,6 +175,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, { int level = sp->role.level; u64 spte = SPTE_MMU_PRESENT_MASK; + int is_host_mmio = -1; bool wrprot = false; /* @@ -210,14 +224,14 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, spte |= PT_PAGE_SIZE_MASK; if (kvm_x86_ops.get_mt_mask) - spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn)); - + spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, + kvm_is_mmio_pfn(pfn, &is_host_mmio)); if (host_writable) spte |= shadow_host_writable_mask; else pte_access &= ~ACC_WRITE_MASK; - if (shadow_me_value && !kvm_is_mmio_pfn(pfn)) + if (shadow_me_value && !kvm_is_mmio_pfn(pfn, &is_host_mmio)) spte |= shadow_me_value; spte |= (u64)pfn << PAGE_SHIFT; |
