diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 7 | ||||
-rw-r--r-- | mm/swap.c | 17 | ||||
-rw-r--r-- | mm/vmscan.c | 5 |
3 files changed, 13 insertions, 16 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index 6280da86b5d6..e22ceeb6e5ec 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1093,9 +1093,10 @@ void page_add_new_anon_rmap(struct page *page, else __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); __page_set_anon_rmap(page, vma, address, 1); - if (!mlocked_vma_newpage(vma, page)) - lru_cache_add_lru(page, LRU_ACTIVE_ANON); - else + if (!mlocked_vma_newpage(vma, page)) { + SetPageActive(page); + lru_cache_add(page); + } else add_page_to_unevictable_list(page); } diff --git a/mm/swap.c b/mm/swap.c index 6a9d0c43924a..4a1d0d2c52fa 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -494,15 +494,10 @@ EXPORT_SYMBOL(mark_page_accessed); * pagevec is drained. This gives a chance for the caller of __lru_cache_add() * have the page added to the active list using mark_page_accessed(). */ -void __lru_cache_add(struct page *page, enum lru_list lru) +void __lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvec); - if (is_active_lru(lru)) - SetPageActive(page); - else - ClearPageActive(page); - page_cache_get(page); if (!pagevec_space(pvec)) __pagevec_lru_add(pvec); @@ -512,11 +507,10 @@ void __lru_cache_add(struct page *page, enum lru_list lru) EXPORT_SYMBOL(__lru_cache_add); /** - * lru_cache_add_lru - add a page to a page list + * lru_cache_add - add a page to a page list * @page: the page to be added to the LRU. - * @lru: the LRU list to which the page is added. */ -void lru_cache_add_lru(struct page *page, enum lru_list lru) +void lru_cache_add(struct page *page) { if (PageActive(page)) { VM_BUG_ON(PageUnevictable(page)); @@ -525,7 +519,7 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru) } VM_BUG_ON(PageLRU(page)); - __lru_cache_add(page, lru); + __lru_cache_add(page); } /** @@ -745,6 +739,9 @@ void release_pages(struct page **pages, int nr, int cold) del_page_from_lru_list(page, lruvec, page_off_lru(page)); } + /* Clear Active bit in case of parallel mark_page_accessed */ + ClearPageActive(page); + list_add(&page->lru, &pages_to_free); } if (zone) diff --git a/mm/vmscan.c b/mm/vmscan.c index c85794399848..99b3ac7771ad 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -546,7 +546,6 @@ int remove_mapping(struct address_space *mapping, struct page *page) void putback_lru_page(struct page *page) { int lru; - int active = !!TestClearPageActive(page); int was_unevictable = PageUnevictable(page); VM_BUG_ON(PageLRU(page)); @@ -561,8 +560,8 @@ redo: * unevictable page on [in]active list. * We know how to handle that. */ - lru = active + page_lru_base_type(page); - lru_cache_add_lru(page, lru); + lru = page_lru_base_type(page); + lru_cache_add(page); } else { /* * Put unevictable pages directly on zone's unevictable |