diff options
| author | Ingo Molnar <mingo@kernel.org> | 2014-05-22 10:28:56 +0200 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-05-22 10:28:56 +0200 |
| commit | 65c2ce70046c779974af8b5dfc25a0df489089b5 (patch) | |
| tree | b16f152eb62b71cf5a1edc51da865b357c989922 /mm/vmscan.c | |
| parent | 842514849a616e9b61acad65771c7afe01e651f9 (diff) | |
| parent | 4b660a7f5c8099d88d1a43d8ae138965112592c7 (diff) | |
Merge tag 'v3.15-rc6' into sched/core, to pick up the latest fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/vmscan.c')
| -rw-r--r-- | mm/vmscan.c | 18 |
1 files changed, 18 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 3f56c8deb3c0..32c661d66a45 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1916,6 +1916,24 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, get_lru_size(lruvec, LRU_INACTIVE_FILE); /* + * Prevent the reclaimer from falling into the cache trap: as + * cache pages start out inactive, every cache fault will tip + * the scan balance towards the file LRU. And as the file LRU + * shrinks, so does the window for rotation from references. + * This means we have a runaway feedback loop where a tiny + * thrashing file LRU becomes infinitely more attractive than + * anon pages. Try to detect this based on file LRU size. + */ + if (global_reclaim(sc)) { + unsigned long free = zone_page_state(zone, NR_FREE_PAGES); + + if (unlikely(file + free <= high_wmark_pages(zone))) { + scan_balance = SCAN_ANON; + goto out; + } + } + + /* * There is enough inactive page cache, do not reclaim * anything from the anonymous working set right now. */ |
