From 096a7cf44712ab531101bb4689f75f7fcd9b9f18 Mon Sep 17 00:00:00 2001 From: Ying Han Date: Tue, 29 May 2012 15:06:25 -0700 Subject: mm: rename is_mlocked_vma() to mlocked_vma_newpage() Andrew pointed out that the is_mlocked_vma() is misnamed. A function with name like that would expect bool return and no side-effects. Since it is called on the fault path for new page, rename it in this patch. Signed-off-by: Ying Han Reviewed-by: Rik van Riel Acked-by: KOSAKI Motohiro Acked-by: KAMEZAWA Hiroyuki Reviewed-by: Minchan Kim [akpm@linux-foundation.org: s/mlock_vma_newpage/mlock_vma_newpage/, per Minchan] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/internal.h | 5 +++-- mm/vmscan.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index aee4761cf9a9..8b0fc8da8028 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -164,7 +164,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma) * to determine if it's being mapped into a LOCKED vma. * If so, mark page as mlocked. */ -static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) +static inline int mlocked_vma_newpage(struct vm_area_struct *vma, + struct page *page) { VM_BUG_ON(PageLRU(page)); @@ -222,7 +223,7 @@ extern unsigned long vma_address(struct page *page, struct vm_area_struct *vma); #endif #else /* !CONFIG_MMU */ -static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) +static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p) { return 0; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 8fffc65a84de..44f04364a304 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3321,7 +3321,7 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) if (mapping_unevictable(page_mapping(page))) return 0; - if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) + if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page))) return 0; return 1; -- cgit v1.2.3