summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHillf Danton <dhillf@gmail.com>2012-03-21 16:34:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 17:54:57 -0700
commit9e81130b7ce23050335b1197bb51743517b5b9d0 (patch)
tree6a359131ad2c36ca9e50f0c96a17ef755ad7e168
parentfcf4d8212a8f38334679e82ff14532b908b4b451 (diff)
mm: hugetlb: bail out unmapping after serving reference page
When unmapping a given VM range, we could bail out if a reference page is supplied and is unmapped, which is a minor optimization. Signed-off-by: Hillf Danton <dhillf@gmail.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/hugetlb.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index afe3e1ff919b..62f9fada4d6d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2280,6 +2280,10 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
if (pte_dirty(pte))
set_page_dirty(page);
list_add(&page->lru, &page_list);
+
+ /* Bail out after unmapping reference page if supplied */
+ if (ref_page)
+ break;
}
flush_tlb_range(vma, start, end);
spin_unlock(&mm->page_table_lock);