diff options
author | Felipe Balbi <balbi@ti.com> | 2013-12-23 11:22:46 -0600 |
---|---|---|
committer | Felipe Balbi <balbi@ti.com> | 2013-12-23 11:22:46 -0600 |
commit | e90b8417af0d01cf8c64da6937c914c89ccf6dc1 (patch) | |
tree | cbc5e3b975b2efbb786e12b91714f8c3c3979316 /mm/page_alloc.c | |
parent | 845c071b7853c0046693022f4e95c9cdd043e2db (diff) | |
parent | 413541dd66d51f791a0b169d9b9014e4f56be13c (diff) |
Merge tag 'v3.13-rc5' into next
Linux 3.13-rc5
* tag 'v3.13-rc5': (231 commits)
Linux 3.13-rc5
aio: clean up and fix aio_setup_ring page mapping
aio/migratepages: make aio migrate pages sane
aio: fix kioctx leak introduced by "aio: Fix a trinity splat"
Don't set the INITRD_COMPRESS environment variable automatically
mm: fix build of split ptlock code
pstore: Don't allow high traffic options on fragile devices
mm: do not allocate page->ptl dynamically, if spinlock_t fits to long
mm: page_alloc: revert NUMA aspect of fair allocation policy
Revert "mm: page_alloc: exclude unreclaimable allocations from zone fairness policy"
mm: Fix NULL pointer dereference in madvise(MADV_WILLNEED) support
qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure
target: Remove extra percpu_ref_init
arm64: ptrace: avoid using HW_BREAKPOINT_EMPTY for disabled events
ARC: Allow conditional multiple inclusion of uapi/asm/unistd.h
target/file: Update hw_max_sectors based on current block_size
iser-target: Move INIT_WORK setup into isert_create_device_ib_res
iscsi-target: Fix incorrect np->np_thread NULL assignment
mm/hugetlb: check for pte NULL pointer in __page_check_address()
fix build with make 3.80
...
Conflicts:
drivers/usb/phy/Kconfig
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 580a5f075ed0..5248fe070aa4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist) static bool zone_local(struct zone *local_zone, struct zone *zone) { - return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE; + return local_zone->node == zone->node; } static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) @@ -1913,18 +1913,17 @@ zonelist_scan: * page was allocated in should have no effect on the * time the page has in memory before being reclaimed. * - * When zone_reclaim_mode is enabled, try to stay in - * local zones in the fastpath. If that fails, the - * slowpath is entered, which will do another pass - * starting with the local zones, but ultimately fall - * back to remote zones that do not partake in the - * fairness round-robin cycle of this zonelist. + * Try to stay in local zones in the fastpath. If + * that fails, the slowpath is entered, which will do + * another pass starting with the local zones, but + * ultimately fall back to remote zones that do not + * partake in the fairness round-robin cycle of this + * zonelist. */ if (alloc_flags & ALLOC_WMARK_LOW) { if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) continue; - if (zone_reclaim_mode && - !zone_local(preferred_zone, zone)) + if (!zone_local(preferred_zone, zone)) continue; } /* @@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order, * thrash fairness information for zones that are not * actually part of this zonelist's round-robin cycle. */ - if (zone_reclaim_mode && !zone_local(preferred_zone, zone)) + if (!zone_local(preferred_zone, zone)) continue; mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - |