From 6308630bd3dbb6a8a883c4c571ce5e5a759a8a0e Mon Sep 17 00:00:00 2001 From: Andrey Smetanin Date: Tue, 10 Nov 2015 15:36:32 +0300 Subject: kvm/x86: split ioapic-handled and EOI exit bitmaps The function to determine if the vector is handled by ioapic used to rely on the fact that only ioapic-handled vectors were set up to cause vmexits when virtual apic was in use. We're going to break this assumption when introducing Hyper-V synthetic interrupts: they may need to cause vmexits too. To achieve that, introduce a new bitmap dedicated specifically for ioapic-handled vectors, and populate EOI exit bitmap from it for now. Signed-off-by: Andrey Smetanin Reviewed-by: Roman Kagan Signed-off-by: Denis V. Lunev CC: Gleb Natapov CC: Paolo Bonzini CC: Roman Kagan CC: Denis V. Lunev CC: qemu-devel@nongnu.org Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/kvm/svm.c') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 83a1c643f9a5..ebb76e8a91e1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3564,7 +3564,7 @@ static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu) return 0; } -static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu) +static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { return; } -- cgit v1.2.3 From d62caabb41f33d96333f9ef15e09cd26e1c12760 Mon Sep 17 00:00:00 2001 From: Andrey Smetanin Date: Tue, 10 Nov 2015 15:36:33 +0300 Subject: kvm/x86: per-vcpu apicv deactivation support The decision on whether to use hardware APIC virtualization used to be taken globally, based on the availability of the feature in the CPU and the value of a module parameter. However, under certain circumstances we want to control it on per-vcpu basis. In particular, when the userspace activates HyperV synthetic interrupt controller (SynIC), APICv has to be disabled as it's incompatible with SynIC auto-EOI behavior. To achieve that, introduce 'apicv_active' flag on struct kvm_vcpu_arch, and kvm_vcpu_deactivate_apicv() function to turn APICv off. The flag is initialized based on the module parameter and CPU capability, and consulted whenever an APICv-specific action is performed. Signed-off-by: Andrey Smetanin Reviewed-by: Roman Kagan Signed-off-by: Denis V. Lunev CC: Gleb Natapov CC: Paolo Bonzini CC: Roman Kagan CC: Denis V. Lunev CC: qemu-devel@nongnu.org Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/x86/kvm/svm.c') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ebb76e8a91e1..2401fc88905b 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3559,9 +3559,13 @@ static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) return; } -static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu) +static bool svm_get_enable_apicv(void) +{ + return false; +} + +static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { - return 0; } static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) @@ -4328,7 +4332,8 @@ static struct kvm_x86_ops svm_x86_ops = { .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, - .cpu_uses_apicv = svm_cpu_uses_apicv, + .get_enable_apicv = svm_get_enable_apicv, + .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, .load_eoi_exitmap = svm_load_eoi_exitmap, .sync_pir_to_irr = svm_sync_pir_to_irr, -- cgit v1.2.3 From 46896c73c1a4dde527c3a3cc43379deeb41985a1 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 12 Nov 2015 14:49:16 +0100 Subject: KVM: svm: add support for RDTSCP RDTSCP was never supported for AMD CPUs, which nobody noticed because Linux does not use it. But exactly the fact that Linux does not use it makes the implementation very simple; we can freely trash MSR_TSC_AUX while running the guest. Cc: Joerg Roedel Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) (limited to 'arch/x86/kvm/svm.c') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2401fc88905b..af342150558a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -86,6 +86,7 @@ static const u32 host_save_user_msrs[] = { MSR_FS_BASE, #endif MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, + MSR_TSC_AUX, }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) @@ -135,6 +136,7 @@ struct vcpu_svm { uint64_t asid_generation; uint64_t sysenter_esp; uint64_t sysenter_eip; + uint64_t tsc_aux; u64 next_rip; @@ -1238,6 +1240,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); } } + /* This assumes that the kernel never uses MSR_TSC_AUX */ + if (static_cpu_has(X86_FEATURE_RDTSCP)) + wrmsrl(MSR_TSC_AUX, svm->tsc_aux); } static void svm_vcpu_put(struct kvm_vcpu *vcpu) @@ -3024,6 +3029,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_SYSENTER_ESP: msr_info->data = svm->sysenter_esp; break; + case MSR_TSC_AUX: + if (!boot_cpu_has(X86_FEATURE_RDTSCP)) + return 1; + msr_info->data = svm->tsc_aux; + break; /* * Nobody will change the following 5 values in the VMCB so we can * safely return them on rdmsr. They will always be 0 until LBRV is @@ -3145,6 +3155,18 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->sysenter_esp = data; svm->vmcb->save.sysenter_esp = data; break; + case MSR_TSC_AUX: + if (!boot_cpu_has(X86_FEATURE_RDTSCP)) + return 1; + + /* + * This is rare, so we update the MSR here instead of using + * direct_access_msrs. Doing that would require a rdmsr in + * svm_vcpu_put. + */ + svm->tsc_aux = data; + wrmsrl(MSR_TSC_AUX, svm->tsc_aux); + break; case MSR_IA32_DEBUGCTLMSR: if (!boot_cpu_has(X86_FEATURE_LBRV)) { vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", @@ -4041,7 +4063,7 @@ static int svm_get_lpage_level(void) static bool svm_rdtscp_supported(void) { - return false; + return boot_cpu_has(X86_FEATURE_RDTSCP); } static bool svm_invpcid_supported(void) -- cgit v1.2.3