diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2011-12-02 18:35:24 +0100 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-12-27 11:22:15 +0200 |
commit | 3d56cbdf359c953f8bfcab68aa5cf766e4480799 (patch) | |
tree | 64c81ba2658afe8b91b2e8609f5c9eadc701cc27 /arch/x86/kvm/mmu.c | |
parent | 086c9855019935854311b477b33498a6ea357ef6 (diff) |
KVM: MMU: Drop unused return value of kvm_mmu_remove_some_alloc_mmu_pages
freed_pages is never evaluated, so remove it as well as the return code
kvm_mmu_remove_some_alloc_mmu_pages so far delivered to its only user.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7a8e99c6dc81..2a2a9b40db19 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3890,14 +3890,14 @@ restart: spin_unlock(&kvm->mmu_lock); } -static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, - struct list_head *invalid_list) +static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, + struct list_head *invalid_list) { struct kvm_mmu_page *page; page = container_of(kvm->arch.active_mmu_pages.prev, struct kvm_mmu_page, link); - return kvm_mmu_prepare_zap_page(kvm, page, invalid_list); + kvm_mmu_prepare_zap_page(kvm, page, invalid_list); } static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) @@ -3912,15 +3912,15 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) raw_spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { - int idx, freed_pages; + int idx; LIST_HEAD(invalid_list); idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); if (!kvm_freed && nr_to_scan > 0 && kvm->arch.n_used_mmu_pages > 0) { - freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm, - &invalid_list); + kvm_mmu_remove_some_alloc_mmu_pages(kvm, + &invalid_list); kvm_freed = kvm; } nr_to_scan--; |