summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFuad Tabba <tabba@google.com>2026-02-02 15:22:53 +0000
committerMarc Zyngier <maz@kernel.org>2026-02-02 15:44:39 +0000
commit0c4762e26879acc101790269382f230f22fd6905 (patch)
treec6a4990e9a7e0751958e51602fd62a0b6977c88c
parent82a32eacbacc6f7e372f98999e5ee1ee0dd7462d (diff)
KVM: arm64: nv: Avoid NV stage-2 code when NV is not supported
The NV stage-2 manipulation functions kvm_nested_s2_unmap(), kvm_nested_s2_wp(), and others, are being called for any stage-2 manipulation regardless of whether nested virtualization is supported or enabled for the VM. For protected KVM (pKVM), `struct kvm_pgtable` uses the `pkvm_mappings` member of the union. This member aliases `ia_bits`, which is used by the non-protected NV code paths. Attempting to read `pgt->ia_bits` in these functions results in treating protected mapping pointers or state values as bit-shift amounts. This triggers a UBSAN shift-out-of-bounds error: UBSAN: shift-out-of-bounds in arch/arm64/kvm/nested.c:1127:34 shift exponent 174565952 is too large for 64-bit type 'unsigned long' Call trace: __ubsan_handle_shift_out_of_bounds+0x28c/0x2c0 kvm_nested_s2_unmap+0x228/0x248 kvm_arch_flush_shadow_memslot+0x98/0xc0 kvm_set_memslot+0x248/0xce0 Since pKVM and NV are mutually exclusive, prevent entry into these NV handling functions if the VM has not allocated any nested MMUs (i.e., `kvm->arch.nested_mmus_size` is 0). Fixes: 7270cc9157f47 ("KVM: arm64: nv: Handle VNCR_EL2 invalidation from MMU notifiers") Suggested-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Fuad Tabba <tabba@google.com> Link: https://patch.msgid.link/20260202152310.113467-1-tabba@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/kvm/nested.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index cdeeb8f09e72..d03e9b71bf6c 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -1101,6 +1101,9 @@ void kvm_nested_s2_wp(struct kvm *kvm)
lockdep_assert_held_write(&kvm->mmu_lock);
+ if (!kvm->arch.nested_mmus_size)
+ return;
+
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
@@ -1117,6 +1120,9 @@ void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
lockdep_assert_held_write(&kvm->mmu_lock);
+ if (!kvm->arch.nested_mmus_size)
+ return;
+
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
@@ -1133,6 +1139,9 @@ void kvm_nested_s2_flush(struct kvm *kvm)
lockdep_assert_held_write(&kvm->mmu_lock);
+ if (!kvm->arch.nested_mmus_size)
+ return;
+
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
@@ -1145,6 +1154,9 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
int i;
+ if (!kvm->arch.nested_mmus_size)
+ return;
+
for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];