diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-24 09:07:03 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-24 09:07:03 -0700 |
commit | 5fabc487c96819dd12ddb9414835d170fd9cd6d5 (patch) | |
tree | 01532d492e5074b0d3add29bf92ebf9a9d161e9e /virt/kvm/kvm_main.c | |
parent | c61264f98c1a974ee6f545f61a4ab33b141d6bda (diff) | |
parent | 3f68b0318bbbd61bf08478ab99a149f0d9e5156e (diff) |
Merge branch 'kvm-updates/3.1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/3.1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (143 commits)
KVM: IOMMU: Disable device assignment without interrupt remapping
KVM: MMU: trace mmio page fault
KVM: MMU: mmio page fault support
KVM: MMU: reorganize struct kvm_shadow_walk_iterator
KVM: MMU: lockless walking shadow page table
KVM: MMU: do not need atomicly to set/clear spte
KVM: MMU: introduce the rules to modify shadow page table
KVM: MMU: abstract some functions to handle fault pfn
KVM: MMU: filter out the mmio pfn from the fault pfn
KVM: MMU: remove bypass_guest_pf
KVM: MMU: split kvm_mmu_free_page
KVM: MMU: count used shadow pages on prepareing path
KVM: MMU: rename 'pt_write' to 'emulate'
KVM: MMU: cleanup for FNAME(fetch)
KVM: MMU: optimize to handle dirty bit
KVM: MMU: cache mmio info on page fault path
KVM: x86: introduce vcpu_mmio_gva_to_gpa to cleanup the code
KVM: MMU: do not update slot bitmap if spte is nonpresent
KVM: MMU: fix walking shadow page table
KVM guest: KVM Steal time registration
...
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 110 |
1 files changed, 100 insertions, 10 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 96ebc0679415..aefdda390f5e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -84,6 +84,10 @@ struct dentry *kvm_debugfs_dir; static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); +#ifdef CONFIG_COMPAT +static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, + unsigned long arg); +#endif static int hardware_enable_all(void); static void hardware_disable_all(void); @@ -97,8 +101,8 @@ static bool largepages_enabled = true; static struct page *hwpoison_page; static pfn_t hwpoison_pfn; -static struct page *fault_page; -static pfn_t fault_pfn; +struct page *fault_page; +pfn_t fault_pfn; inline int kvm_is_mmio_pfn(pfn_t pfn) { @@ -827,6 +831,13 @@ skip_lpage: kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); + /* + * If the new memory slot is created, we need to clear all + * mmio sptes. + */ + if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) + kvm_arch_flush_shadow(kvm); + kvm_free_physmem_slot(&old, &new); kfree(old_memslots); @@ -927,6 +938,18 @@ int is_fault_pfn(pfn_t pfn) } EXPORT_SYMBOL_GPL(is_fault_pfn); +int is_noslot_pfn(pfn_t pfn) +{ + return pfn == bad_pfn; +} +EXPORT_SYMBOL_GPL(is_noslot_pfn); + +int is_invalid_pfn(pfn_t pfn) +{ + return pfn == hwpoison_pfn || pfn == fault_pfn; +} +EXPORT_SYMBOL_GPL(is_invalid_pfn); + static inline unsigned long bad_hva(void) { return PAGE_OFFSET; @@ -1345,7 +1368,7 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return -EFAULT; - r = copy_to_user((void __user *)addr + offset, data, len); + r = __copy_to_user((void __user *)addr + offset, data, len); if (r) return -EFAULT; mark_page_dirty(kvm, gfn); @@ -1405,7 +1428,7 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, if (kvm_is_error_hva(ghc->hva)) return -EFAULT; - r = copy_to_user((void __user *)ghc->hva, data, len); + r = __copy_to_user((void __user *)ghc->hva, data, len); if (r) return -EFAULT; mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); @@ -1414,6 +1437,26 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, } EXPORT_SYMBOL_GPL(kvm_write_guest_cached); +int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + int r; + + if (slots->generation != ghc->generation) + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + + if (kvm_is_error_hva(ghc->hva)) + return -EFAULT; + + r = __copy_from_user(data, (void __user *)ghc->hva, len); + if (r) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(kvm_read_guest_cached); + int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) { return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page, @@ -1586,7 +1629,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) static struct file_operations kvm_vcpu_fops = { .release = kvm_vcpu_release, .unlocked_ioctl = kvm_vcpu_ioctl, - .compat_ioctl = kvm_vcpu_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = kvm_vcpu_compat_ioctl, +#endif .mmap = kvm_vcpu_mmap, .llseek = noop_llseek, }; @@ -1615,18 +1660,18 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) r = kvm_arch_vcpu_setup(vcpu); if (r) - return r; + goto vcpu_destroy; mutex_lock(&kvm->lock); if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { r = -EINVAL; - goto vcpu_destroy; + goto unlock_vcpu_destroy; } kvm_for_each_vcpu(r, v, kvm) if (v->vcpu_id == id) { r = -EEXIST; - goto vcpu_destroy; + goto unlock_vcpu_destroy; } BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); @@ -1636,7 +1681,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) r = create_vcpu_fd(vcpu); if (r < 0) { kvm_put_kvm(kvm); - goto vcpu_destroy; + goto unlock_vcpu_destroy; } kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; @@ -1650,8 +1695,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) mutex_unlock(&kvm->lock); return r; -vcpu_destroy: +unlock_vcpu_destroy: mutex_unlock(&kvm->lock); +vcpu_destroy: kvm_arch_vcpu_destroy(vcpu); return r; } @@ -1874,6 +1920,50 @@ out: return r; } +#ifdef CONFIG_COMPAT +static long kvm_vcpu_compat_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = compat_ptr(arg); + int r; + + if (vcpu->kvm->mm != current->mm) + return -EIO; + + switch (ioctl) { + case KVM_SET_SIGNAL_MASK: { + struct kvm_signal_mask __user *sigmask_arg = argp; + struct kvm_signal_mask kvm_sigmask; + compat_sigset_t csigset; + sigset_t sigset; + + if (argp) { + r = -EFAULT; + if (copy_from_user(&kvm_sigmask, argp, + sizeof kvm_sigmask)) + goto out; + r = -EINVAL; + if (kvm_sigmask.len != sizeof csigset) + goto out; + r = -EFAULT; + if (copy_from_user(&csigset, sigmask_arg->sigset, + sizeof csigset)) + goto out; + } + sigset_from_compat(&sigset, &csigset); + r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); + break; + } + default: + r = kvm_vcpu_ioctl(filp, ioctl, arg); + } + +out: + return r; +} +#endif + static long kvm_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { |