summaryrefslogtreecommitdiff
path: root/virt/kvm/iommu.c
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2012-04-27 16:54:08 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-05-07 08:56:35 -0700
commita674bcab9066a2b2541d8276f5e9ff86f50ce13e (patch)
tree9af94ff5807b7ea08528c7a0763b67aa612519cb /virt/kvm/iommu.c
parent9239fabf848397ec26356b5f267c787840ba4bb7 (diff)
KVM: unmap pages from the iommu when slots are removed
commit 32f6daad4651a748a58a3ab6da0611862175722f upstream. We've been adding new mappings, but not destroying old mappings. This can lead to a page leak as pages are pinned using get_user_pages, but only unpinned with put_page if they still exist in the memslots list on vm shutdown. A memslot that is destroyed while an iommu domain is enabled for the guest will therefore result in an elevated page reference count that is never cleared. Additionally, without this fix, the iommu is only programmed with the first translation for a gpa. This can result in peer-to-peer errors if a mapping is destroyed and replaced by a new mapping at the same gpa as the iommu will still be pointing to the original, pinned memory address. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Jonathan Nieder <jrnieder@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'virt/kvm/iommu.c')
-rw-r--r--virt/kvm/iommu.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 62a9caf0563c..fb0f6e469bb4 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -285,6 +285,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
}
}
+void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
+}
+
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int i, idx;
@@ -293,10 +298,9 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
- for (i = 0; i < slots->nmemslots; i++) {
- kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
- slots->memslots[i].npages);
- }
+ for (i = 0; i < slots->nmemslots; i++)
+ kvm_iommu_unmap_pages(kvm, &slots->memslots[i]);
+
srcu_read_unlock(&kvm->srcu, idx);
return 0;