summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@citi.umich.edu>2009-11-23 12:34:58 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2009-11-23 12:34:58 -0500
commit9b8b317d58084b9a44f6f33b355c4278d9f841fb (patch)
treee0df89800bf4301c4017db3cdf04a2056ec1a852 /mm/vmscan.c
parent78c210efdefe07131f91ed512a3308b15bb14e2f (diff)
parent648f4e3e50c4793d9dbf9a09afa193631f76fa26 (diff)
Merge commit 'v2.6.32-rc8' into HEAD
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 64e438898832..777af57fd8c8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -544,6 +544,16 @@ redo:
*/
lru = LRU_UNEVICTABLE;
add_page_to_unevictable_list(page);
+ /*
+ * When racing with an mlock clearing (page is
+ * unlocked), make sure that if the other thread does
+ * not observe our setting of PG_lru and fails
+ * isolation, we see PG_mlocked cleared below and move
+ * the page back to the evictable list.
+ *
+ * The other side is TestClearPageMlocked().
+ */
+ smp_mb();
}
/*
@@ -1088,7 +1098,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
int lumpy_reclaim = 0;
while (unlikely(too_many_isolated(zone, file, sc))) {
- congestion_wait(WRITE, HZ/10);
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
/* We are about to die and free our memory. Return now. */
if (fatal_signal_pending(current))
@@ -1356,7 +1366,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* IO, plus JVM can create lots of anon VM_EXEC pages,
* so we ignore them here.
*/
- if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
+ if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
list_add(&page->lru, &l_active);
continue;
}