summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2012-10-08 16:33:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 16:22:56 +0900
commit957f822a0ab95e88b146638bad6209bbc315bedd (patch)
tree2e1336ddc1c574f54d582c6b74dcc1d1230482f8
parenta0c5e813f087dffc0d9b173d2e7d3328b1482fd5 (diff)
mm, numa: reclaim from all nodes within reclaim distance
RECLAIM_DISTANCE represents the distance between nodes at which it is deemed too costly to allocate from; it's preferred to try to reclaim from a local zone before falling back to allocating on a remote node with such a distance. To do this, zone_reclaim_mode is set if the distance between any two nodes on the system is greather than this distance. This, however, ends up causing the page allocator to reclaim from every zone regardless of its affinity. What we really want is to reclaim only from zones that are closer than RECLAIM_DISTANCE. This patch adds a nodemask to each node that represents the set of nodes that are within this distance. During the zone iteration, if the bit for a zone's node is set for the local node, then reclaim is attempted; otherwise, the zone is skipped. [akpm@linux-foundation.org: fix CONFIG_NUMA=n build] Signed-off-by: David Rientjes <rientjes@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Minchan Kim <minchan@kernel.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--mm/page_alloc.c41
2 files changed, 31 insertions, 11 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d240efa8f846..a5578871d033 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -709,6 +709,7 @@ typedef struct pglist_data {
unsigned long node_spanned_pages; /* total size of physical page
range, including holes */
int node_id;
+ nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index dbb53866c3aa..9b8e6243a524 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1799,6 +1799,22 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
}
+static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
+{
+ return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
+}
+
+static void __paginginit init_zone_allows_reclaim(int nid)
+{
+ int i;
+
+ for_each_online_node(i)
+ if (node_distance(nid, i) <= RECLAIM_DISTANCE) {
+ node_set(i, NODE_DATA(nid)->reclaim_nodes);
+ zone_reclaim_mode = 1;
+ }
+}
+
#else /* CONFIG_NUMA */
static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
@@ -1819,6 +1835,15 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
static void zlc_clear_zones_full(struct zonelist *zonelist)
{
}
+
+static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
+{
+ return true;
+}
+
+static inline void init_zone_allows_reclaim(int nid)
+{
+}
#endif /* CONFIG_NUMA */
/*
@@ -1903,7 +1928,8 @@ zonelist_scan:
did_zlc_setup = 1;
}
- if (zone_reclaim_mode == 0)
+ if (zone_reclaim_mode == 0 ||
+ !zone_allows_reclaim(preferred_zone, zone))
goto this_zone_full;
/*
@@ -3364,21 +3390,13 @@ static void build_zonelists(pg_data_t *pgdat)
j = 0;
while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
- int distance = node_distance(local_node, node);
-
- /*
- * If another node is sufficiently far away then it is better
- * to reclaim pages in a zone before going off node.
- */
- if (distance > RECLAIM_DISTANCE)
- zone_reclaim_mode = 1;
-
/*
* We don't want to pressure a particular node.
* So adding penalty to the first node in same
* distance group to make it round-robin.
*/
- if (distance != node_distance(local_node, prev_node))
+ if (node_distance(local_node, node) !=
+ node_distance(local_node, prev_node))
node_load[node] = load;
prev_node = node;
@@ -4552,6 +4570,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
+ init_zone_allows_reclaim(nid);
calculate_node_totalpages(pgdat, zones_size, zholes_size);
alloc_node_mem_map(pgdat);