diff options
author | Johannes Weiner <jweiner@redhat.com> | 2012-01-12 17:18:15 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 20:13:05 -0800 |
commit | 925b7673cce39116ce61e7a06683a4a0dad1e72a (patch) | |
tree | 66c134db836e531e196ee3dfc23c124ff74ac827 /include/linux/mm_inline.h | |
parent | 6290df545814990ca2663baf6e894669132d5f73 (diff) |
mm: make per-memcg LRU lists exclusive
Now that all code that operated on global per-zone LRU lists is
converted to operate on per-memory cgroup LRU lists instead, there is no
reason to keep the double-LRU scheme around any longer.
The pc->lru member is removed and page->lru is linked directly to the
per-memory cgroup LRU lists, which removes two pointers from a
descriptor that exists for every page frame in the system.
Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Ying Han <yinghan@google.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm_inline.h')
-rw-r--r-- | include/linux/mm_inline.h | 21 |
1 files changed, 8 insertions, 13 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index e6a7ffe16d31..4e3478e71926 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -22,26 +22,21 @@ static inline int page_is_file_cache(struct page *page) } static inline void -__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l, - struct list_head *head) -{ - list_add(&page->lru, head); - __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page)); - mem_cgroup_add_lru_list(page, l); -} - -static inline void add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) { - __add_page_to_lru_list(zone, page, l, &zone->lruvec.lists[l]); + struct lruvec *lruvec; + + lruvec = mem_cgroup_lru_add_list(zone, page, l); + list_add(&page->lru, &lruvec->lists[l]); + __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page)); } static inline void del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) { + mem_cgroup_lru_del_list(page, l); list_del(&page->lru); __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); - mem_cgroup_del_lru_list(page, l); } /** @@ -64,7 +59,6 @@ del_page_from_lru(struct zone *zone, struct page *page) { enum lru_list l; - list_del(&page->lru); if (PageUnevictable(page)) { __ClearPageUnevictable(page); l = LRU_UNEVICTABLE; @@ -75,8 +69,9 @@ del_page_from_lru(struct zone *zone, struct page *page) l += LRU_ACTIVE; } } + mem_cgroup_lru_del_list(page, l); + list_del(&page->lru); __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); - mem_cgroup_del_lru_list(page, l); } /** |