diff options
| author | Marc Zyngier <maz@kernel.org> | 2026-02-05 09:17:58 +0000 |
|---|---|---|
| committer | Marc Zyngier <maz@kernel.org> | 2026-02-05 09:17:58 +0000 |
| commit | 6316366129d2885fae07c2774f4b7ae0a45fb55d (patch) | |
| tree | 86f2aeb0a8c1ee8627c4a0965e58d1ef1de792e0 | |
| parent | 1df3f01ebfb9e7d00fd10711fe9e98b25e01e59a (diff) | |
| parent | 0c4762e26879acc101790269382f230f22fd6905 (diff) | |
Merge branch kvm-arm64/misc-6.20 into kvmarm-master/next
* kvm-arm64/misc-6.20:
: .
: Misc KVM/arm64 changes for 6.20
:
: - Trivial FPSIMD cleanups
:
: - Calculate hyp VA size only once, avoiding potential mapping issues when
: VA bits is smaller than expected
:
: - Silence sparse warning for the HYP stack base
:
: - Fix error checking when handling FFA_VERSION
:
: - Add missing trap configuration for DBGWCR15_EL1
:
: - Don't try to deal with nested S2 when NV isn't enabled for a guest
:
: - Various spelling fixes
: .
KVM: arm64: nv: Avoid NV stage-2 code when NV is not supported
KVM: arm64: Fix various comments
KVM: arm64: nv: Add trap config for DBGWCR<15>_EL1
KVM: arm64: Fix error checking for FFA_VERSION
KVM: arm64: Fix missing <asm/stackpage/nvhe.h> include
KVM: arm64: Calculate hyp VA size only once
KVM: arm64: Remove ISB after writing FPEXC32_EL2
KVM: arm64: Shuffle KVM_HOST_DATA_FLAG_* indices
KVM: arm64: Fix comment in fpsimd_lazy_switch_to_host()
Signed-off-by: Marc Zyngier <maz@kernel.org>
| -rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 12 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 3 | ||||
| -rw-r--r-- | arch/arm64/kvm/arm.c | 5 | ||||
| -rw-r--r-- | arch/arm64/kvm/emulate-nested.c | 1 | ||||
| -rw-r--r-- | arch/arm64/kvm/hyp/include/hyp/switch.h | 6 | ||||
| -rw-r--r-- | arch/arm64/kvm/hyp/nvhe/ffa.c | 4 | ||||
| -rw-r--r-- | arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 2 | ||||
| -rw-r--r-- | arch/arm64/kvm/mmu.c | 28 | ||||
| -rw-r--r-- | arch/arm64/kvm/nested.c | 12 | ||||
| -rw-r--r-- | arch/arm64/kvm/va_layout.c | 33 | ||||
| -rw-r--r-- | arch/arm64/kvm/vgic/vgic-v3-nested.c | 2 |
11 files changed, 62 insertions, 46 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 26c1c3faedc3..5d5a3bbdb95e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -201,7 +201,7 @@ struct kvm_s2_mmu { * host to parse the guest S2. * This either contains: * - the virtual VTTBR programmed by the guest hypervisor with - * CnP cleared + * CnP cleared * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid * * We also cache the full VTCR which gets used for TLB invalidation, @@ -734,11 +734,11 @@ struct cpu_sve_state { struct kvm_host_data { #define KVM_HOST_DATA_FLAG_HAS_SPE 0 #define KVM_HOST_DATA_FLAG_HAS_TRBE 1 -#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4 -#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 -#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6 -#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7 -#define KVM_HOST_DATA_FLAG_HAS_BRBE 8 +#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 2 +#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 3 +#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 4 +#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 5 +#define KVM_HOST_DATA_FLAG_HAS_BRBE 6 unsigned long flags; struct kvm_cpu_context host_ctxt; diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 2dc5e6e742bb..d968aca0461a 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -103,6 +103,7 @@ alternative_cb_end void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void kvm_compute_layout(void); +u32 kvm_hyp_va_bits(void); void kvm_apply_hyp_relocations(void); #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset) @@ -185,7 +186,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_get_idmap_vector(void); -int __init kvm_mmu_init(u32 *hyp_va_bits); +int __init kvm_mmu_init(u32 hyp_va_bits); static inline void *__kvm_vector_slot2addr(void *base, enum arm64_hyp_spectre_vector slot) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index a3729196b91c..0f40d3da0550 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -40,6 +40,7 @@ #include <asm/kvm_pkvm.h> #include <asm/kvm_ptrauth.h> #include <asm/sections.h> +#include <asm/stacktrace/nvhe.h> #include <kvm/arm_hypercalls.h> #include <kvm/arm_pmu.h> @@ -2623,7 +2624,7 @@ static void pkvm_hyp_init_ptrauth(void) /* Inits Hyp-mode on all online CPUs */ static int __init init_hyp_mode(void) { - u32 hyp_va_bits; + u32 hyp_va_bits = kvm_hyp_va_bits(); int cpu; int err = -ENOMEM; @@ -2637,7 +2638,7 @@ static int __init init_hyp_mode(void) /* * Allocate Hyp PGD and setup Hyp identity mapping */ - err = kvm_mmu_init(&hyp_va_bits); + err = kvm_mmu_init(hyp_va_bits); if (err) goto out_err; diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index bcc553307171..22d497554c94 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -1174,6 +1174,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(SYS_DBGWCRn_EL1(12), CGT_MDCR_TDE_TDA), SR_TRAP(SYS_DBGWCRn_EL1(13), CGT_MDCR_TDE_TDA), SR_TRAP(SYS_DBGWCRn_EL1(14), CGT_MDCR_TDE_TDA), + SR_TRAP(SYS_DBGWCRn_EL1(15), CGT_MDCR_TDE_TDA), SR_TRAP(SYS_DBGCLAIMSET_EL1, CGT_MDCR_TDE_TDA), SR_TRAP(SYS_DBGCLAIMCLR_EL1, CGT_MDCR_TDE_TDA), SR_TRAP(SYS_DBGAUTHSTATUS_EL1, CGT_MDCR_TDE_TDA), diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index afecbdd3c1e9..2597e8bda867 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -59,10 +59,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to * it will cause an exception. */ - if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) { + if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) write_sysreg(1 << 30, fpexc32_el2); - isb(); - } } static inline void __activate_cptr_traps_nvhe(struct kvm_vcpu *vcpu) @@ -495,7 +493,7 @@ static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu) /* * When the guest owns the FP regs, we know that guest+hyp traps for * any FPSIMD/SVE/SME features exposed to the guest have been disabled - * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd() + * by either __activate_cptr_traps() or kvm_hyp_handle_fpsimd() * prior to __guest_entry(). As __guest_entry() guarantees a context * synchronization event, we don't need an ISB here to avoid taking * traps for anything that was exposed to the guest. diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index f731cc4c3f28..94161ea1cd60 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -792,7 +792,7 @@ static void do_ffa_version(struct arm_smccc_1_2_regs *res, .a0 = FFA_VERSION, .a1 = ffa_req_version, }, res); - if (res->a0 == FFA_RET_NOT_SUPPORTED) + if ((s32)res->a0 == FFA_RET_NOT_SUPPORTED) goto unlock; hyp_ffa_version = ffa_req_version; @@ -943,7 +943,7 @@ int hyp_ffa_init(void *pages) .a0 = FFA_VERSION, .a1 = FFA_VERSION_1_2, }, &res); - if (res.a0 == FFA_RET_NOT_SUPPORTED) + if ((s32)res.a0 == FFA_RET_NOT_SUPPORTED) return 0; /* diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index f28c6cf4fe1b..b254d442e54e 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -205,7 +205,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu) /* * When running a normal EL1 guest, we only load a new vcpu - * after a context switch, which imvolves a DSB, so all + * after a context switch, which involves a DSB, so all * speculative EL1&0 walks will have already completed. * If running NV, the vcpu may transition between vEL1 and * vEL2 without a context switch, so make sure we complete diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index e613ec5adc7a..8c5d259810b2 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -2294,11 +2294,9 @@ static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { .virt_to_phys = kvm_host_pa, }; -int __init kvm_mmu_init(u32 *hyp_va_bits) +int __init kvm_mmu_init(u32 hyp_va_bits) { int err; - u32 idmap_bits; - u32 kernel_bits; hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); @@ -2312,25 +2310,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits) */ BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); - /* - * The ID map is always configured for 48 bits of translation, which - * may be fewer than the number of VA bits used by the regular kernel - * stage 1, when VA_BITS=52. - * - * At EL2, there is only one TTBR register, and we can't switch between - * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom - * line: we need to use the extended range with *both* our translation - * tables. - * - * So use the maximum of the idmap VA bits and the regular kernel stage - * 1 VA bits to assure that the hypervisor can both ID map its code page - * and map any kernel memory. - */ - idmap_bits = IDMAP_VA_BITS; - kernel_bits = vabits_actual; - *hyp_va_bits = max(idmap_bits, kernel_bits); - - kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); + kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits); kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); kvm_debug("HYP VA range: %lx:%lx\n", kern_hyp_va(PAGE_OFFSET), @@ -2355,7 +2335,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits) goto out; } - err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); + err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits, &kvm_hyp_mm_ops); if (err) goto out_free_pgtable; @@ -2364,7 +2344,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits) goto out_destroy_pgtable; io_map_base = hyp_idmap_start; - __hyp_va_bits = *hyp_va_bits; + __hyp_va_bits = hyp_va_bits; return 0; out_destroy_pgtable: diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index ed710228484f..eeea5e692370 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -1101,6 +1101,9 @@ void kvm_nested_s2_wp(struct kvm *kvm) lockdep_assert_held_write(&kvm->mmu_lock); + if (!kvm->arch.nested_mmus_size) + return; + for (i = 0; i < kvm->arch.nested_mmus_size; i++) { struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; @@ -1117,6 +1120,9 @@ void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block) lockdep_assert_held_write(&kvm->mmu_lock); + if (!kvm->arch.nested_mmus_size) + return; + for (i = 0; i < kvm->arch.nested_mmus_size; i++) { struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; @@ -1133,6 +1139,9 @@ void kvm_nested_s2_flush(struct kvm *kvm) lockdep_assert_held_write(&kvm->mmu_lock); + if (!kvm->arch.nested_mmus_size) + return; + for (i = 0; i < kvm->arch.nested_mmus_size; i++) { struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; @@ -1145,6 +1154,9 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) { int i; + if (!kvm->arch.nested_mmus_size) + return; + for (i = 0; i < kvm->arch.nested_mmus_size; i++) { struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 91b22a014610..2346f9435a71 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -47,8 +47,30 @@ static void init_hyp_physvirt_offset(void) } /* + * Calculate the actual VA size used by the hypervisor + */ +__init u32 kvm_hyp_va_bits(void) +{ + /* + * The ID map is always configured for 48 bits of translation, which may + * be different from the number of VA bits used by the regular kernel + * stage 1. + * + * At EL2, there is only one TTBR register, and we can't switch between + * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom + * line: we need to use the extended range with *both* our translation + * tables. + * + * So use the maximum of the idmap VA bits and the regular kernel stage + * 1 VA bits as the hypervisor VA size to assure that the hypervisor can + * both ID map its code page and map any kernel memory. + */ + return max(IDMAP_VA_BITS, vabits_actual); +} + +/* * We want to generate a hyp VA with the following format (with V == - * vabits_actual): + * hypervisor VA bits): * * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0 * --------------------------------------------------------- @@ -61,10 +83,11 @@ __init void kvm_compute_layout(void) { phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); u64 hyp_va_msb; + u32 hyp_va_bits = kvm_hyp_va_bits(); /* Where is my RAM region? */ - hyp_va_msb = idmap_addr & BIT(vabits_actual - 1); - hyp_va_msb ^= BIT(vabits_actual - 1); + hyp_va_msb = idmap_addr & BIT(hyp_va_bits - 1); + hyp_va_msb ^= BIT(hyp_va_bits - 1); tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^ (u64)(high_memory - 1)); @@ -72,9 +95,9 @@ __init void kvm_compute_layout(void) va_mask = GENMASK_ULL(tag_lsb - 1, 0); tag_val = hyp_va_msb; - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) { + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (hyp_va_bits - 1)) { /* We have some free bits to insert a random tag. */ - tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb); + tag_val |= get_random_long() & GENMASK_ULL(hyp_va_bits - 2, tag_lsb); } tag_val >>= tag_lsb; diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c index c9e35ec67117..5c69fa615823 100644 --- a/arch/arm64/kvm/vgic/vgic-v3-nested.c +++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c @@ -57,7 +57,7 @@ static int lr_map_idx_to_shadow_idx(struct shadow_if *shadow_if, int idx) * as the L1 guest is in charge of provisioning the interrupts via its own * view of the ICH_LR*_EL2 registers, which conveniently live in the VNCR * page. This means that the flow described above does work (there is no - * state to rebuild in the L0 hypervisor), and that most things happed on L2 + * state to rebuild in the L0 hypervisor), and that most things happen on L2 * load/put: * * - on L2 load: move the in-memory L1 vGIC configuration into a shadow, |
