diff options
author | Avi Kivity <avi@redhat.com> | 2011-04-24 12:25:50 +0300 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-05-22 08:39:46 -0400 |
commit | 781e0743af3c5ba356d55bc60df59f2dded1e938 (patch) | |
tree | 549ba079bf78ce53ee93852bd1ae36f810082bc0 /arch/x86/kvm | |
parent | 62aaa2f05abd59598f132e6ebad86318291b5be0 (diff) |
KVM: MMU: Add unlikely() annotations to walk_addr_generic()
walk_addr_generic() is a hot path and is also hard for the cpu to predict -
some of the parameters (fetch_fault in particular) vary wildly from
invocation to invocation.
Add unlikely() annotations where appropriate; all walk failures are
considered unlikely, as are cases where we have to mark the accessed or
dirty bit, as they are slow paths both in kvm and on real processors.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a32a1c809149..652d56c081f7 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -172,49 +172,51 @@ walk: real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), PFERR_USER_MASK|PFERR_WRITE_MASK); - if (real_gfn == UNMAPPED_GVA) { + if (unlikely(real_gfn == UNMAPPED_GVA)) { present = false; break; } real_gfn = gpa_to_gfn(real_gfn); host_addr = gfn_to_hva(vcpu->kvm, real_gfn); - if (kvm_is_error_hva(host_addr)) { + if (unlikely(kvm_is_error_hva(host_addr))) { present = false; break; } ptep_user = (pt_element_t __user *)((void *)host_addr + offset); - if (get_user(pte, ptep_user)) { + if (unlikely(get_user(pte, ptep_user))) { present = false; break; } trace_kvm_mmu_paging_element(pte, walker->level); - if (!is_present_gpte(pte)) { + if (unlikely(!is_present_gpte(pte))) { present = false; break; } - if (is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level)) { + if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte, + walker->level))) { rsvd_fault = true; break; } - if (write_fault && !is_writable_pte(pte)) - if (user_fault || is_write_protection(vcpu)) - eperm = true; + if (unlikely(write_fault && !is_writable_pte(pte) + && (user_fault || is_write_protection(vcpu)))) + eperm = true; - if (user_fault && !(pte & PT_USER_MASK)) + if (unlikely(user_fault && !(pte & PT_USER_MASK))) eperm = true; #if PTTYPE == 64 - if (fetch_fault && (pte & PT64_NX_MASK)) + if (unlikely(fetch_fault && (pte & PT64_NX_MASK))) eperm = true; #endif - if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) { + if (!eperm && !rsvd_fault + && unlikely(!(pte & PT_ACCESSED_MASK))) { int ret; trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); @@ -270,10 +272,10 @@ walk: --walker->level; } - if (!present || eperm || rsvd_fault) + if (unlikely(!present || eperm || rsvd_fault)) goto error; - if (write_fault && !is_dirty_gpte(pte)) { + if (write_fault && unlikely(!is_dirty_gpte(pte))) { int ret; trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); |