summaryrefslogtreecommitdiff
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-04-30 17:05:38 +0300
committerAvi Kivity <avi@qumranet.com>2007-07-16 12:05:38 +0300
commita25f7e1f8c1ff68213a63dada9d5e32dc1a0f587 (patch)
treef1c08d4c6f6736561a42dd4d9462fb97db9c75c3 /drivers/kvm
parent05e0c8c344dd356b42e81bdf0d47d2b884bf49b5 (diff)
KVM: Reduce misfirings of the fork detector
The kvm mmu tries to detects forks by looking for repeated writes to a page table. If it sees a fork, it unshadows the page table so the page table copying can proceed at native speed instead of being emulated. However, the detector also triggered on simple demand paging access patterns: a linear walk of memory would of course cause repeated writes to the same pagetable page, causing it to unshadow prematurely. Fix by resetting the fork detector if we detect a demand fault. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/paging_tmpl.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 73ffbffb1097..bc64cceec039 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -421,6 +421,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
pgprintk("%s: guest page fault\n", __FUNCTION__);
inject_page_fault(vcpu, addr, walker.error_code);
FNAME(release_walker)(&walker);
+ vcpu->last_pt_write_count = 0; /* reset fork detector */
return 0;
}
@@ -442,6 +443,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
FNAME(release_walker)(&walker);
+ if (!write_pt)
+ vcpu->last_pt_write_count = 0; /* reset fork detector */
+
/*
* mmio: emulate if accessible, otherwise its a guest fault.
*/