summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-06-23 02:03:47 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 07:42:52 -0700
commitbd1e22b8e0a90f9a91e4c27db14ca15773659bf7 (patch)
tree8d32bdc39977af9dd3ba577b1fa34c0106b7f18e /mm
parente0a42726794f71336ff4b26084d453dd597471ce (diff)
[PATCH] initialise total_memory() earlier
Initialise total_memory earlier in boot. Because if for some reason we run page reclaim early in boot, we don't want total_memory to be zero when we use it as a divisor. And rename total_memory to vm_total_pages to avoid naming clashes with architectures. Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Martin Bligh <mbligh@google.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/vmscan.c5
2 files changed, 5 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5af33186a25f..71a0b2a23f5b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1725,9 +1725,9 @@ void __meminit build_all_zonelists(void)
stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
/* cpuset refresh routine should be here */
}
-
- printk("Built %i zonelists\n", num_online_nodes());
-
+ vm_total_pages = nr_free_pagecache_pages();
+ printk("Built %i zonelists. Total pages: %ld\n",
+ num_online_nodes(), vm_total_pages);
}
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 71a02e295037..72babac71dea 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -110,7 +110,7 @@ struct shrinker {
* From 0 .. 100. Higher means more swappy.
*/
int vm_swappiness = 60;
-static long total_memory;
+long vm_total_pages; /* The total number of pages which the VM controls */
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
@@ -743,7 +743,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* how much memory
* is mapped.
*/
- mapped_ratio = (sc->nr_mapped * 100) / total_memory;
+ mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages;
/*
* Now decide how much we really want to unmap some pages. The
@@ -1482,7 +1482,6 @@ static int __init kswapd_init(void)
pgdat->kswapd = find_task_by_pid(pid);
read_unlock(&tasklist_lock);
}
- total_memory = nr_free_pagecache_pages();
hotcpu_notifier(cpu_callback, 0);
return 0;
}