From 17a2c62fbf1ea46f3ae32c601350488d2b6df730 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 14 Mar 2025 19:44:02 -0700 Subject: KVM: nVMX: Check MSR load/store list counts during VM-Enter consistency checks Explicitly verify the MSR load/store list counts are below the advertised limit as part of the initial consistency checks on the lists, so that code that consumes the count doesn't need to worry about extreme edge cases. Enforcing the limit during the initial checks fixes a flaw on 32-bit KVM where a sufficiently high @count could lead to overflow: arch/x86/kvm/vmx/nested.c:834 nested_vmx_check_msr_switch() warn: potential user controlled sizeof overflow 'addr + count * 16' '0-u64max + 16-68719476720' arch/x86/kvm/vmx/nested.c 827 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 828 u32 count, u64 addr) 829 { 830 if (count == 0) 831 return 0; 832 833 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || --> 834 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ While the SDM doesn't explicitly state an illegal count results in VM-Fail, the SDM states that exceeding the limit may result in undefined behavior. I.e. the SDM gives hardware, and thus KVM, carte blanche to do literally anything in response to a count that exceeds the "recommended" limit. If the limit is exceeded, undefined processor behavior may result (including a machine check during the VMX transition). KVM already enforces the limit when processing the MSRs, i.e. already signals a late VM-Exit Consistency Check for VM-Enter, and generates a VMX Abort for VM-Exit. I.e. explicitly checking the limits simply means KVM will signal VM-Fail instead of VM-Exit or VMX Abort. Reported-by: Dan Carpenter Closes: https://lore.kernel.org/all/44961459-2759-4164-b604-f6bd43da8ce9@stanley.mountain Link: https://lore.kernel.org/r/20250315024402.2363098-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/nested.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index e073e3008b16..fd2bf3262e11 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -824,12 +824,30 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, return 0; } +static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, + vmx->nested.msrs.misc_high); + + return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; +} + static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, u32 count, u64 addr) { if (count == 0) return 0; + /* + * Exceeding the limit results in architecturally _undefined_ behavior, + * i.e. KVM is allowed to do literally anything in response to a bad + * limit. Immediately generate a consistency check so that code that + * consumes the count doesn't need to worry about extreme edge cases. + */ + if (count > nested_vmx_max_atomic_switch_msrs(vcpu)) + return -EINVAL; + if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) || !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1))) return -EINVAL; @@ -940,15 +958,6 @@ static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, return 0; } -static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, - vmx->nested.msrs.misc_high); - - return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; -} - /* * Load guest's/host's msr at nested entry/exit. * return 0 for success, entry index for failure. @@ -965,7 +974,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); for (i = 0; i < count; i++) { - if (unlikely(i >= max_msr_list_size)) + if (WARN_ON_ONCE(i >= max_msr_list_size)) goto fail; if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), @@ -1053,7 +1062,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); for (i = 0; i < count; i++) { - if (unlikely(i >= max_msr_list_size)) + if (WARN_ON_ONCE(i >= max_msr_list_size)) return -EINVAL; if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) -- cgit v1.2.3 From 798b9b1cb0e55c10fe761f42de5e28ecec8ef1f4 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Mon, 14 Apr 2025 10:10:51 +0200 Subject: KVM: VMX: Use LEAVE in vmx_do_interrupt_irqoff() Micro-optimize vmx_do_interrupt_irqoff() by substituting MOV %RBP,%RSP; POP %RBP instruction sequence with equivalent LEAVE instruction. GCC compiler does this by default for a generic tuning and for all modern processors: DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave", m_386 | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE | m_ZHAOXIN | m_TREMONT | m_CORE_HYBRID | m_CORE_ATOM | m_GENERIC) The new code also saves a couple of bytes, from: 27: 48 89 ec mov %rbp,%rsp 2a: 5d pop %rbp to: 27: c9 leave No functional change intended. Signed-off-by: Uros Bizjak Cc: Sean Christopherson Cc: Paolo Bonzini Cc: Vitaly Kuznetsov Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Link: https://lore.kernel.org/r/20250414081131.97374-2-ubizjak@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmenter.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index f6986dee6f8c..0a6cf5bff2aa 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -59,8 +59,7 @@ * without the explicit restore, thinks the stack is getting walloped. * Using an unwind hint is problematic due to x86-64's dynamic alignment. */ - mov %_ASM_BP, %_ASM_SP - pop %_ASM_BP + leave RET .endm -- cgit v1.2.3 From a0ee1d5faff135e28810f29e0f06328c66f89852 Mon Sep 17 00:00:00 2001 From: Chao Gao Date: Mon, 24 Mar 2025 22:08:48 +0800 Subject: KVM: VMX: Flush shadow VMCS on emergency reboot Ensure the shadow VMCS cache is evicted during an emergency reboot to prevent potential memory corruption if the cache is evicted after reboot. This issue was identified through code inspection, as __loaded_vmcs_clear() flushes both the normal VMCS and the shadow VMCS. Avoid checking the "launched" state during an emergency reboot, unlike the behavior in __loaded_vmcs_clear(). This is important because reboot NMIs can interfere with operations like copy_shadow_to_vmcs12(), where shadow VMCSes are loaded directly using VMPTRLD. In such cases, if NMIs occur right after the VMCS load, the shadow VMCSes will be active but the "launched" state may not be set. Fixes: 16f5b9034b69 ("KVM: nVMX: Copy processor-specific shadow-vmcs to VMCS12") Cc: stable@vger.kernel.org Signed-off-by: Chao Gao Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20250324140849.2099723-1-chao.gao@intel.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ef2d7208dd20..848c4963bdb8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -770,8 +770,11 @@ void vmx_emergency_disable_virtualization_cpu(void) return; list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), - loaded_vmcss_on_cpu_link) + loaded_vmcss_on_cpu_link) { vmcs_clear(v->vmcs); + if (v->shadow_vmcs) + vmcs_clear(v->shadow_vmcs); + } kvm_cpu_vmxoff(); } -- cgit v1.2.3 From f2d7993314a31bc3acd3cb224f9866e90c5c96d8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 2 May 2025 13:34:21 -0700 Subject: KVM: x86: Revert kvm_x86_ops.mem_enc_ioctl() back to an OPTIONAL hook Restore KVM's handling of a NULL kvm_x86_ops.mem_enc_ioctl, as the hook is NULL on SVM when CONFIG_KVM_AMD_SEV=n, and TDX will soon follow suit. ------------[ cut here ]------------ WARNING: CPU: 0 PID: 1 at arch/x86/include/asm/kvm-x86-ops.h:130 kvm_x86_vendor_init+0x178b/0x18e0 Modules linked in: CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.15.0-rc2-dc1aead1a985-sink-vm #2 NONE Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:kvm_x86_vendor_init+0x178b/0x18e0 Call Trace: svm_init+0x2e/0x60 do_one_initcall+0x56/0x290 kernel_init_freeable+0x192/0x1e0 kernel_init+0x16/0x130 ret_from_fork+0x30/0x50 ret_from_fork_asm+0x1a/0x30 ---[ end trace 0000000000000000 ]--- Opportunistically drop the superfluous curly braces. Link: https://lore.kernel.org/all/20250318-vverma7-cleanup_x86_ops-v2-4-701e82d6b779@intel.com Fixes: b2aaf38ced69 ("KVM: TDX: Add place holder for TDX VM specific mem_enc_op ioctl") Link: https://lore.kernel.org/r/20250502203421.865686-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm-x86-ops.h | 2 +- arch/x86/kvm/x86.c | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 79406bf07a1c..8d50e3e0a19b 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -127,7 +127,7 @@ KVM_X86_OP(leave_smm) KVM_X86_OP(enable_smi_window) #endif KVM_X86_OP_OPTIONAL(dev_get_attr) -KVM_X86_OP(mem_enc_ioctl) +KVM_X86_OP_OPTIONAL(mem_enc_ioctl) KVM_X86_OP_OPTIONAL(vcpu_mem_enc_ioctl) KVM_X86_OP_OPTIONAL(mem_enc_register_region) KVM_X86_OP_OPTIONAL(mem_enc_unregister_region) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f6ce044b090a..247d54de101a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7324,10 +7324,13 @@ set_pit2_out: r = READ_ONCE(kvm->arch.default_tsc_khz); goto out; } - case KVM_MEMORY_ENCRYPT_OP: { + case KVM_MEMORY_ENCRYPT_OP: + r = -ENOTTY; + if (!kvm_x86_ops.mem_enc_ioctl) + goto out; + r = kvm_x86_call(mem_enc_ioctl)(kvm, argp); break; - } case KVM_MEMORY_ENCRYPT_REG_REGION: { struct kvm_enc_region region; -- cgit v1.2.3 From 84ad4d834ce98542ad2910cecb409ba8fcfffa4b Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Tue, 18 Mar 2025 00:35:07 -0600 Subject: KVM: VMX: Move vt_apicv_pre_state_restore() to posted_intr.c and tweak name In preparation for a cleanup of the kvm_x86_ops struct for TDX, all vt_* functions are expected to act as glue functions that route to either tdx_* or vmx_* based on the VM type. Specifically, the pattern is: vt_abc: if (is_td()) return tdx_abc(); return vmx_abc(); But vt_apicv_pre_state_restore() does not follow this pattern. To facilitate that cleanup, rename and move vt_apicv_pre_state_restore() into posted_intr.c. Opportunistically turn vcpu_to_pi_desc() back into a static function, as the only reason it was exposed outside of posted_intr.c was for vt_apicv_pre_state_restore(). No functional change intended. Suggested-by: Sean Christopherson Link: https://lore.kernel.org/kvm/Z6v9yjWLNTU6X90d@google.com/ Cc: Sean Christopherson Cc: Rick Edgecombe Reviewed-by: Binbin Wu Signed-off-by: Vishal Verma Link: https://lore.kernel.org/r/20250318-vverma7-cleanup_x86_ops-v2-2-701e82d6b779@intel.com [sean: apply Chao's suggestions, massage shortlog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/main.c | 10 +--------- arch/x86/kvm/vmx/posted_intr.c | 10 +++++++++- arch/x86/kvm/vmx/posted_intr.h | 3 +-- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 94d5d907d37b..f56bcad2e698 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -315,14 +315,6 @@ static void vt_set_virtual_apic_mode(struct kvm_vcpu *vcpu) return vmx_set_virtual_apic_mode(vcpu); } -static void vt_apicv_pre_state_restore(struct kvm_vcpu *vcpu) -{ - struct pi_desc *pi = vcpu_to_pi_desc(vcpu); - - pi_clear_on(pi); - memset(pi->pir, 0, sizeof(pi->pir)); -} - static void vt_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) { if (is_td_vcpu(vcpu)) @@ -983,7 +975,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .set_apic_access_page_addr = vt_set_apic_access_page_addr, .refresh_apicv_exec_ctrl = vt_refresh_apicv_exec_ctrl, .load_eoi_exitmap = vt_load_eoi_exitmap, - .apicv_pre_state_restore = vt_apicv_pre_state_restore, + .apicv_pre_state_restore = pi_apicv_pre_state_restore, .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS, .hwapic_isr_update = vt_hwapic_isr_update, .sync_pir_to_irr = vt_sync_pir_to_irr, diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 99d1d599ff8c..d31272691b9b 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -34,7 +34,7 @@ static DEFINE_PER_CPU(raw_spinlock_t, wakeup_vcpus_on_cpu_lock); #define PI_LOCK_SCHED_OUT SINGLE_DEPTH_NESTING -struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) +static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) { return &(to_vt(vcpu)->pi_desc); } @@ -264,6 +264,14 @@ void __init pi_init_cpu(int cpu) raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu)); } +void pi_apicv_pre_state_restore(struct kvm_vcpu *vcpu) +{ + struct pi_desc *pi = vcpu_to_pi_desc(vcpu); + + pi_clear_on(pi); + memset(pi->pir, 0, sizeof(pi->pir)); +} + bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h index 68605ca7ef68..25e8d5529f68 100644 --- a/arch/x86/kvm/vmx/posted_intr.h +++ b/arch/x86/kvm/vmx/posted_intr.h @@ -5,12 +5,11 @@ #include #include -struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu); - void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu); void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu); void pi_wakeup_handler(void); void __init pi_init_cpu(int cpu); +void pi_apicv_pre_state_restore(struct kvm_vcpu *vcpu); bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu); int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); -- cgit v1.2.3 From 1a81d9d5a1da862ccc49cedec1df603aafa7c600 Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Tue, 18 Mar 2025 00:35:08 -0600 Subject: KVM: VMX: Define a VMX glue macro for kvm_complete_insn_gp() Define kvm_complete_insn_gp() as vmx_complete_emulated_msr() and use the glue wrapper in vt_complete_emulated_msr() so that VT's .complete_emulated_msr() implementation follows the soon-to-be-standard pattern of: vt_abc: if (is_td()) return tdx_abc(); return vmx_abc(); This will allow generating such wrappers via a macro, which in turn will make it trivially easy to skip the wrappers entirely when KVM_INTEL_TDX=n. Suggested-by: Sean Christopherson Link: https://lore.kernel.org/kvm/Z6v9yjWLNTU6X90d@google.com/ Cc: Sean Christopherson Cc: Rick Edgecombe Signed-off-by: Vishal Verma Link: https://lore.kernel.org/r/20250318-vverma7-cleanup_x86_ops-v2-3-701e82d6b779@intel.com [sean: massage shortlog+changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/main.c | 2 +- arch/x86/kvm/vmx/x86_ops.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index f56bcad2e698..05e0c7c1b66d 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -240,7 +240,7 @@ static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) if (is_td_vcpu(vcpu)) return tdx_complete_emulated_msr(vcpu, err); - return kvm_complete_insn_gp(vcpu, err); + return vmx_complete_emulated_msr(vcpu, err); } #ifdef CONFIG_KVM_SMM diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 6bf8be570b2e..d3b58496ffd0 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -57,6 +57,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); int vmx_get_feature_msr(u32 msr, u64 *data); int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); +#define vmx_complete_emulated_msr kvm_complete_insn_gp u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg); void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); -- cgit v1.2.3 From 907092bf7cbddee4381729f23a33780d84b1bb7c Mon Sep 17 00:00:00 2001 From: Vishal Verma Date: Tue, 18 Mar 2025 00:35:09 -0600 Subject: KVM: VMX: Clean up and macrofy x86_ops Eliminate a lot of stub definitions by using macros to define the TDX vs non-TDX versions of various x86_ops. Moving the x86_ops wrappers under CONFIG_KVM_INTEL_TDX also allows nearly all of vmx/main.c to go under a single #ifdef, eliminating trampolines in the generated code, and almost all of the stubs. For example, with CONFIG_KVM_INTEL_TDX=n, before this cleanup, vt_refresh_apicv_exec_ctrl() would produce: 0000000000036490 : 36490: f3 0f 1e fa endbr64 36494: e8 00 00 00 00 call 36499 36495: R_X86_64_PLT32 __fentry__-0x4 36499: e9 00 00 00 00 jmp 3649e 3649a: R_X86_64_PLT32 vmx_refresh_apicv_exec_ctrl-0x4 3649e: 66 90 xchg %ax,%ax After this patch, this is completely eliminated. Based on a patch by Sean Christopherson Link: https://lore.kernel.org/kvm/Z6v9yjWLNTU6X90d@google.com/ Cc: Sean Christopherson Cc: Rick Edgecombe Signed-off-by: Vishal Verma Link: https://lore.kernel.org/r/20250318-vverma7-cleanup_x86_ops-v2-4-701e82d6b779@intel.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/main.c | 196 +++++++++++++++++++++++---------------------- arch/x86/kvm/vmx/x86_ops.h | 65 --------------- 2 files changed, 101 insertions(+), 160 deletions(-) diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 05e0c7c1b66d..d1e02e567b57 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -12,7 +12,6 @@ #ifdef CONFIG_KVM_INTEL_TDX static_assert(offsetof(struct vcpu_vmx, vt) == offsetof(struct vcpu_tdx, vt)); -#endif static void vt_disable_virtualization_cpu(void) { @@ -880,6 +879,13 @@ static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) return 0; } +#define vt_op(name) vt_##name +#define vt_op_tdx_only(name) vt_##name +#else /* CONFIG_KVM_INTEL_TDX */ +#define vt_op(name) vmx_##name +#define vt_op_tdx_only(name) NULL +#endif /* CONFIG_KVM_INTEL_TDX */ + #define VMX_REQUIRED_APICV_INHIBITS \ (BIT(APICV_INHIBIT_REASON_DISABLED) | \ BIT(APICV_INHIBIT_REASON_ABSENT) | \ @@ -897,113 +903,113 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .hardware_unsetup = vmx_hardware_unsetup, .enable_virtualization_cpu = vmx_enable_virtualization_cpu, - .disable_virtualization_cpu = vt_disable_virtualization_cpu, + .disable_virtualization_cpu = vt_op(disable_virtualization_cpu), .emergency_disable_virtualization_cpu = vmx_emergency_disable_virtualization_cpu, - .has_emulated_msr = vt_has_emulated_msr, + .has_emulated_msr = vt_op(has_emulated_msr), .vm_size = sizeof(struct kvm_vmx), - .vm_init = vt_vm_init, - .vm_pre_destroy = vt_vm_pre_destroy, - .vm_destroy = vt_vm_destroy, + .vm_init = vt_op(vm_init), + .vm_destroy = vt_op(vm_destroy), + .vm_pre_destroy = vt_op_tdx_only(vm_pre_destroy), - .vcpu_precreate = vt_vcpu_precreate, - .vcpu_create = vt_vcpu_create, - .vcpu_free = vt_vcpu_free, - .vcpu_reset = vt_vcpu_reset, + .vcpu_precreate = vt_op(vcpu_precreate), + .vcpu_create = vt_op(vcpu_create), + .vcpu_free = vt_op(vcpu_free), + .vcpu_reset = vt_op(vcpu_reset), - .prepare_switch_to_guest = vt_prepare_switch_to_guest, - .vcpu_load = vt_vcpu_load, - .vcpu_put = vt_vcpu_put, + .prepare_switch_to_guest = vt_op(prepare_switch_to_guest), + .vcpu_load = vt_op(vcpu_load), + .vcpu_put = vt_op(vcpu_put), - .update_exception_bitmap = vt_update_exception_bitmap, + .update_exception_bitmap = vt_op(update_exception_bitmap), .get_feature_msr = vmx_get_feature_msr, - .get_msr = vt_get_msr, - .set_msr = vt_set_msr, - - .get_segment_base = vt_get_segment_base, - .get_segment = vt_get_segment, - .set_segment = vt_set_segment, - .get_cpl = vt_get_cpl, - .get_cpl_no_cache = vt_get_cpl_no_cache, - .get_cs_db_l_bits = vt_get_cs_db_l_bits, - .is_valid_cr0 = vt_is_valid_cr0, - .set_cr0 = vt_set_cr0, - .is_valid_cr4 = vt_is_valid_cr4, - .set_cr4 = vt_set_cr4, - .set_efer = vt_set_efer, - .get_idt = vt_get_idt, - .set_idt = vt_set_idt, - .get_gdt = vt_get_gdt, - .set_gdt = vt_set_gdt, - .set_dr6 = vt_set_dr6, - .set_dr7 = vt_set_dr7, - .sync_dirty_debug_regs = vt_sync_dirty_debug_regs, - .cache_reg = vt_cache_reg, - .get_rflags = vt_get_rflags, - .set_rflags = vt_set_rflags, - .get_if_flag = vt_get_if_flag, - - .flush_tlb_all = vt_flush_tlb_all, - .flush_tlb_current = vt_flush_tlb_current, - .flush_tlb_gva = vt_flush_tlb_gva, - .flush_tlb_guest = vt_flush_tlb_guest, - - .vcpu_pre_run = vt_vcpu_pre_run, - .vcpu_run = vt_vcpu_run, - .handle_exit = vt_handle_exit, + .get_msr = vt_op(get_msr), + .set_msr = vt_op(set_msr), + + .get_segment_base = vt_op(get_segment_base), + .get_segment = vt_op(get_segment), + .set_segment = vt_op(set_segment), + .get_cpl = vt_op(get_cpl), + .get_cpl_no_cache = vt_op(get_cpl_no_cache), + .get_cs_db_l_bits = vt_op(get_cs_db_l_bits), + .is_valid_cr0 = vt_op(is_valid_cr0), + .set_cr0 = vt_op(set_cr0), + .is_valid_cr4 = vt_op(is_valid_cr4), + .set_cr4 = vt_op(set_cr4), + .set_efer = vt_op(set_efer), + .get_idt = vt_op(get_idt), + .set_idt = vt_op(set_idt), + .get_gdt = vt_op(get_gdt), + .set_gdt = vt_op(set_gdt), + .set_dr6 = vt_op(set_dr6), + .set_dr7 = vt_op(set_dr7), + .sync_dirty_debug_regs = vt_op(sync_dirty_debug_regs), + .cache_reg = vt_op(cache_reg), + .get_rflags = vt_op(get_rflags), + .set_rflags = vt_op(set_rflags), + .get_if_flag = vt_op(get_if_flag), + + .flush_tlb_all = vt_op(flush_tlb_all), + .flush_tlb_current = vt_op(flush_tlb_current), + .flush_tlb_gva = vt_op(flush_tlb_gva), + .flush_tlb_guest = vt_op(flush_tlb_guest), + + .vcpu_pre_run = vt_op(vcpu_pre_run), + .vcpu_run = vt_op(vcpu_run), + .handle_exit = vt_op(handle_exit), .skip_emulated_instruction = vmx_skip_emulated_instruction, .update_emulated_instruction = vmx_update_emulated_instruction, - .set_interrupt_shadow = vt_set_interrupt_shadow, - .get_interrupt_shadow = vt_get_interrupt_shadow, - .patch_hypercall = vt_patch_hypercall, - .inject_irq = vt_inject_irq, - .inject_nmi = vt_inject_nmi, - .inject_exception = vt_inject_exception, - .cancel_injection = vt_cancel_injection, - .interrupt_allowed = vt_interrupt_allowed, - .nmi_allowed = vt_nmi_allowed, - .get_nmi_mask = vt_get_nmi_mask, - .set_nmi_mask = vt_set_nmi_mask, - .enable_nmi_window = vt_enable_nmi_window, - .enable_irq_window = vt_enable_irq_window, - .update_cr8_intercept = vt_update_cr8_intercept, + .set_interrupt_shadow = vt_op(set_interrupt_shadow), + .get_interrupt_shadow = vt_op(get_interrupt_shadow), + .patch_hypercall = vt_op(patch_hypercall), + .inject_irq = vt_op(inject_irq), + .inject_nmi = vt_op(inject_nmi), + .inject_exception = vt_op(inject_exception), + .cancel_injection = vt_op(cancel_injection), + .interrupt_allowed = vt_op(interrupt_allowed), + .nmi_allowed = vt_op(nmi_allowed), + .get_nmi_mask = vt_op(get_nmi_mask), + .set_nmi_mask = vt_op(set_nmi_mask), + .enable_nmi_window = vt_op(enable_nmi_window), + .enable_irq_window = vt_op(enable_irq_window), + .update_cr8_intercept = vt_op(update_cr8_intercept), .x2apic_icr_is_split = false, - .set_virtual_apic_mode = vt_set_virtual_apic_mode, - .set_apic_access_page_addr = vt_set_apic_access_page_addr, - .refresh_apicv_exec_ctrl = vt_refresh_apicv_exec_ctrl, - .load_eoi_exitmap = vt_load_eoi_exitmap, + .set_virtual_apic_mode = vt_op(set_virtual_apic_mode), + .set_apic_access_page_addr = vt_op(set_apic_access_page_addr), + .refresh_apicv_exec_ctrl = vt_op(refresh_apicv_exec_ctrl), + .load_eoi_exitmap = vt_op(load_eoi_exitmap), .apicv_pre_state_restore = pi_apicv_pre_state_restore, .required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS, - .hwapic_isr_update = vt_hwapic_isr_update, - .sync_pir_to_irr = vt_sync_pir_to_irr, - .deliver_interrupt = vt_deliver_interrupt, + .hwapic_isr_update = vt_op(hwapic_isr_update), + .sync_pir_to_irr = vt_op(sync_pir_to_irr), + .deliver_interrupt = vt_op(deliver_interrupt), .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt, - .set_tss_addr = vt_set_tss_addr, - .set_identity_map_addr = vt_set_identity_map_addr, + .set_tss_addr = vt_op(set_tss_addr), + .set_identity_map_addr = vt_op(set_identity_map_addr), .get_mt_mask = vmx_get_mt_mask, - .get_exit_info = vt_get_exit_info, - .get_entry_info = vt_get_entry_info, + .get_exit_info = vt_op(get_exit_info), + .get_entry_info = vt_op(get_entry_info), - .vcpu_after_set_cpuid = vt_vcpu_after_set_cpuid, + .vcpu_after_set_cpuid = vt_op(vcpu_after_set_cpuid), .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, - .get_l2_tsc_offset = vt_get_l2_tsc_offset, - .get_l2_tsc_multiplier = vt_get_l2_tsc_multiplier, - .write_tsc_offset = vt_write_tsc_offset, - .write_tsc_multiplier = vt_write_tsc_multiplier, + .get_l2_tsc_offset = vt_op(get_l2_tsc_offset), + .get_l2_tsc_multiplier = vt_op(get_l2_tsc_multiplier), + .write_tsc_offset = vt_op(write_tsc_offset), + .write_tsc_multiplier = vt_op(write_tsc_multiplier), - .load_mmu_pgd = vt_load_mmu_pgd, + .load_mmu_pgd = vt_op(load_mmu_pgd), .check_intercept = vmx_check_intercept, .handle_exit_irqoff = vmx_handle_exit_irqoff, - .update_cpu_dirty_logging = vt_update_cpu_dirty_logging, + .update_cpu_dirty_logging = vt_op(update_cpu_dirty_logging), .nested_ops = &vmx_nested_ops, @@ -1011,38 +1017,38 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .pi_start_assignment = vmx_pi_start_assignment, #ifdef CONFIG_X86_64 - .set_hv_timer = vt_set_hv_timer, - .cancel_hv_timer = vt_cancel_hv_timer, + .set_hv_timer = vt_op(set_hv_timer), + .cancel_hv_timer = vt_op(cancel_hv_timer), #endif - .setup_mce = vt_setup_mce, + .setup_mce = vt_op(setup_mce), #ifdef CONFIG_KVM_SMM - .smi_allowed = vt_smi_allowed, - .enter_smm = vt_enter_smm, - .leave_smm = vt_leave_smm, - .enable_smi_window = vt_enable_smi_window, + .smi_allowed = vt_op(smi_allowed), + .enter_smm = vt_op(enter_smm), + .leave_smm = vt_op(leave_smm), + .enable_smi_window = vt_op(enable_smi_window), #endif - .check_emulate_instruction = vt_check_emulate_instruction, - .apic_init_signal_blocked = vt_apic_init_signal_blocked, + .check_emulate_instruction = vt_op(check_emulate_instruction), + .apic_init_signal_blocked = vt_op(apic_init_signal_blocked), .migrate_timers = vmx_migrate_timers, - .msr_filter_changed = vt_msr_filter_changed, - .complete_emulated_msr = vt_complete_emulated_msr, + .msr_filter_changed = vt_op(msr_filter_changed), + .complete_emulated_msr = vt_op(complete_emulated_msr), .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, .get_untagged_addr = vmx_get_untagged_addr, - .mem_enc_ioctl = vt_mem_enc_ioctl, - .vcpu_mem_enc_ioctl = vt_vcpu_mem_enc_ioctl, + .mem_enc_ioctl = vt_op_tdx_only(mem_enc_ioctl), + .vcpu_mem_enc_ioctl = vt_op_tdx_only(vcpu_mem_enc_ioctl), - .private_max_mapping_level = vt_gmem_private_max_mapping_level + .private_max_mapping_level = vt_op_tdx_only(gmem_private_max_mapping_level) }; struct kvm_x86_init_ops vt_init_ops __initdata = { - .hardware_setup = vt_hardware_setup, + .hardware_setup = vt_op(hardware_setup), .handle_intel_pt_intr = NULL, .runtime_ops = &vt_x86_ops, diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index d3b58496ffd0..b4596f651232 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -164,71 +164,6 @@ void tdx_flush_tlb_current(struct kvm_vcpu *vcpu); void tdx_flush_tlb_all(struct kvm_vcpu *vcpu); void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); -#else -static inline void tdx_disable_virtualization_cpu(void) {} -static inline int tdx_vm_init(struct kvm *kvm) { return -EOPNOTSUPP; } -static inline void tdx_mmu_release_hkid(struct kvm *kvm) {} -static inline void tdx_vm_destroy(struct kvm *kvm) {} -static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; } - -static inline int tdx_vcpu_create(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; } -static inline void tdx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) {} -static inline void tdx_vcpu_free(struct kvm_vcpu *vcpu) {} -static inline void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {} -static inline int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu) { return -EOPNOTSUPP; } -static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) -{ - return EXIT_FASTPATH_NONE; -} -static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {} -static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {} -static inline bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { return false; } -static inline int tdx_handle_exit(struct kvm_vcpu *vcpu, - enum exit_fastpath_completion fastpath) { return 0; } - -static inline void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, - int trig_mode, int vector) {} -static inline void tdx_inject_nmi(struct kvm_vcpu *vcpu) {} -static inline void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1, - u64 *info2, u32 *intr_info, u32 *error_code) {} -static inline bool tdx_has_emulated_msr(u32 index) { return false; } -static inline int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; } -static inline int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; } - -static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; } - -static inline int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn, - enum pg_level level, - void *private_spt) -{ - return -EOPNOTSUPP; -} - -static inline int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn, - enum pg_level level, - void *private_spt) -{ - return -EOPNOTSUPP; -} - -static inline int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn, - enum pg_level level, - kvm_pfn_t pfn) -{ - return -EOPNOTSUPP; -} - -static inline int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn, - enum pg_level level, - kvm_pfn_t pfn) -{ - return -EOPNOTSUPP; -} - -static inline void tdx_flush_tlb_current(struct kvm_vcpu *vcpu) {} -static inline void tdx_flush_tlb_all(struct kvm_vcpu *vcpu) {} -static inline void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level) {} -static inline int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) { return 0; } #endif #endif /* __KVM_X86_VMX_X86_OPS_H */ -- cgit v1.2.3