diff options
author | Gleb Natapov <gleb@redhat.com> | 2011-02-01 13:21:47 +0200 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-04-06 13:15:55 +0300 |
commit | 0857b9e95c1af8bfe84630ef6747b9d4d61de4c6 (patch) | |
tree | df3892f624910d2a6b210e30549a6a82a79e5474 /virt/kvm/kvm_main.c | |
parent | 9e02fb963352c5ad075d80dd3e852fbee9585575 (diff) |
KVM: Enable async page fault processing
If asynchronous hva_to_pfn() is requested call GUP with FOLL_NOWAIT to
avoid sleeping on IO. Check for hwpoison is done at the same time,
otherwise check_user_page_hwpoison() will call GUP again and will put
vcpu to sleep.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 556e3efe5325..6330653480e4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1037,6 +1037,17 @@ static pfn_t get_fault_pfn(void) return fault_pfn; } +int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, int write, struct page **page) +{ + int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; + + if (write) + flags |= FOLL_WRITE; + + return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); +} + static inline int check_user_page_hwpoison(unsigned long addr) { int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; @@ -1070,7 +1081,14 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, if (writable) *writable = write_fault; - npages = get_user_pages_fast(addr, 1, write_fault, page); + if (async) { + down_read(¤t->mm->mmap_sem); + npages = get_user_page_nowait(current, current->mm, + addr, write_fault, page); + up_read(¤t->mm->mmap_sem); + } else + npages = get_user_pages_fast(addr, 1, write_fault, + page); /* map read fault as writable if possible */ if (unlikely(!write_fault) && npages == 1) { @@ -1093,7 +1111,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, return get_fault_pfn(); down_read(¤t->mm->mmap_sem); - if (check_user_page_hwpoison(addr)) { + if (npages == -EHWPOISON || + (!async && check_user_page_hwpoison(addr))) { up_read(¤t->mm->mmap_sem); get_page(hwpoison_page); return page_to_pfn(hwpoison_page); |