diff options
author | Anthony Liguori <aliguori@us.ibm.com> | 2007-04-23 09:17:21 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-05-03 10:52:31 +0300 |
commit | 7807fa6ca5af2e5660a0eb3cd90276ca0c5bdfc8 (patch) | |
tree | 8f97af3538acc40a6a72af29f31351d8f0f69296 /drivers/kvm | |
parent | 4c690a1e8667a84b61a6114a4ad293681f32cb11 (diff) |
KVM: Lazy FPU support for SVM
Avoid saving and restoring the guest fpu state on every exit. This
shaves ~100 cycles off the guest/host switch.
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm.h | 2 | ||||
-rw-r--r-- | drivers/kvm/svm.c | 35 |
2 files changed, 33 insertions, 4 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index d1a90c5d76ce..61ff085df7e6 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h @@ -63,6 +63,7 @@ #define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN) #define DE_VECTOR 0 +#define NM_VECTOR 7 #define DF_VECTOR 8 #define TS_VECTOR 10 #define NP_VECTOR 11 @@ -301,6 +302,7 @@ struct kvm_vcpu { char fx_buf[FX_BUF_SIZE]; char *host_fx_image; char *guest_fx_image; + int fpu_active; int mmio_needed; int mmio_read_completed; diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 644efc5381ad..2a7a0390bfb1 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -587,6 +587,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu) init_vmcb(vcpu->svm->vmcb); fx_init(vcpu); + vcpu->fpu_active = 1; vcpu->apic_base = 0xfee00000 | /*for vcpu 0*/ MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE; @@ -756,6 +757,11 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } } #endif + if ((vcpu->cr0 & CR0_TS_MASK) && !(cr0 & CR0_TS_MASK)) { + vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); + vcpu->fpu_active = 1; + } + vcpu->cr0 = cr0; cr0 |= CR0_PG_MASK | CR0_WP_MASK; cr0 &= ~(CR0_CD_MASK | CR0_NW_MASK); @@ -928,6 +934,16 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return 0; } +static int nm_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + vcpu->svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); + if (!(vcpu->cr0 & CR0_TS_MASK)) + vcpu->svm->vmcb->save.cr0 &= ~CR0_TS_MASK; + vcpu->fpu_active = 1; + + return 1; +} + static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { /* @@ -1292,6 +1308,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, [SVM_EXIT_WRITE_DR5] = emulate_on_interception, [SVM_EXIT_WRITE_DR7] = emulate_on_interception, [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, + [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, [SVM_EXIT_INTR] = nop_on_interception, [SVM_EXIT_NMI] = nop_on_interception, [SVM_EXIT_SMI] = nop_on_interception, @@ -1481,8 +1498,10 @@ again: load_db_regs(vcpu->svm->db_regs); } - fx_save(vcpu->host_fx_image); - fx_restore(vcpu->guest_fx_image); + if (vcpu->fpu_active) { + fx_save(vcpu->host_fx_image); + fx_restore(vcpu->guest_fx_image); + } asm volatile ( #ifdef CONFIG_X86_64 @@ -1593,8 +1612,10 @@ again: #endif : "cc", "memory" ); - fx_save(vcpu->guest_fx_image); - fx_restore(vcpu->host_fx_image); + if (vcpu->fpu_active) { + fx_save(vcpu->guest_fx_image); + fx_restore(vcpu->host_fx_image); + } if ((vcpu->svm->vmcb->save.dr7 & 0xff)) load_db_regs(vcpu->svm->host_db_regs); @@ -1664,6 +1685,12 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) { vcpu->svm->vmcb->save.cr3 = root; force_new_asid(vcpu); + + if (vcpu->fpu_active) { + vcpu->svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); + vcpu->svm->vmcb->save.cr0 |= CR0_TS_MASK; + vcpu->fpu_active = 0; + } } static void svm_inject_page_fault(struct kvm_vcpu *vcpu, |