diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 53 |
1 files changed, 45 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 692807f2228b..4649929401f8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1083,6 +1083,48 @@ static int too_many_isolated(struct zone *zone, int file, } /* + * Returns true if the caller should wait to clean dirty/writeback pages. + * + * If we are direct reclaiming for contiguous pages and we do not reclaim + * everything in the list, try again and wait for writeback IO to complete. + * This will stall high-order allocations noticeably. Only do that when really + * need to free the pages under high memory pressure. + */ +static inline bool should_reclaim_stall(unsigned long nr_taken, + unsigned long nr_freed, + int priority, + int lumpy_reclaim, + struct scan_control *sc) +{ + int lumpy_stall_priority; + + /* kswapd should not stall on sync IO */ + if (current_is_kswapd()) + return false; + + /* Only stall on lumpy reclaim */ + if (!lumpy_reclaim) + return false; + + /* If we have relaimed everything on the isolated list, no stall */ + if (nr_freed == nr_taken) + return false; + + /* + * For high-order allocations, there are two stall thresholds. + * High-cost allocations stall immediately where as lower + * order allocations such as stacks require the scanning + * priority to be much higher before stalling. + */ + if (sc->order > PAGE_ALLOC_COSTLY_ORDER) + lumpy_stall_priority = DEF_PRIORITY; + else + lumpy_stall_priority = DEF_PRIORITY / 3; + + return priority <= lumpy_stall_priority; +} + +/* * shrink_inactive_list() is a helper for shrink_zone(). It returns the number * of reclaimed pages */ @@ -1176,14 +1218,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, nr_scanned += nr_scan; nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); - /* - * If we are direct reclaiming for contiguous pages and we do - * not reclaim everything in the list, try again and wait - * for IO to complete. This will stall high-order allocations - * but that should be acceptable to the caller - */ - if (nr_freed < nr_taken && !current_is_kswapd() && - lumpy_reclaim) { + /* Check if we should syncronously wait for writeback */ + if (should_reclaim_stall(nr_taken, nr_freed, priority, + lumpy_reclaim, sc)) { congestion_wait(BLK_RW_ASYNC, HZ/10); /* |