summaryrefslogtreecommitdiff
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2013-11-04 22:36:25 +0200
committerGleb Natapov <gleb@redhat.com>2013-11-06 09:32:31 +0200
commit01b71917b55d28c09ade9fb8c683cf0d2aad1858 (patch)
tree5ec0842876cd80c754c486dac1a810850d98ecd6 /arch/x86/kvm
parentce332f662deb545c8a4f3f58debcca26bb2e44b0 (diff)
kvm: optimize out smp_mb after srcu_read_unlock
I noticed that srcu_read_lock/unlock both have a memory barrier, so just by moving srcu_read_unlock earlier we can get rid of one call to smp_mb() using smp_mb__after_srcu_read_unlock instead. Unsurprisingly, the gain is small but measureable using the unit test microbenchmark: before vmcall in the ballpark of 1410 cycles after vmcall in the ballpark of 1360 cycles Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/x86.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 07c127fc2064..21ef1ba184ae 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5966,10 +5966,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = IN_GUEST_MODE;
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
/* We should set ->mode before check ->requests,
* see the comment in make_all_cpus_request.
*/
- smp_mb();
+ smp_mb__after_srcu_read_unlock();
local_irq_disable();
@@ -5979,12 +5981,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
smp_wmb();
local_irq_enable();
preempt_enable();
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = 1;
goto cancel_injection;
}
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);