summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 15:46:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 16:07:41 -0700
commit11fb998986a72aa7e997d96d63d52582a01228c5 (patch)
treefd9db095081c4fe2212db7de2757bfdf4645dc04 /drivers/base
parent4b9d0fab7166c9323f06d708518a35cf3a90426c (diff)
mm: move most file-based accounting to the node
There are now a number of accounting oddities such as mapped file pages being accounted for on the node while the total number of file pages are accounted on the zone. This can be coped with to some extent but it's confusing so this patch moves the relevant file-based accounted. Due to throttling logic in the page allocator for reliable OOM detection, it is still necessary to track dirty and writeback pages on a per-zone basis. [mgorman@techsingularity.net: fix NR_ZONE_WRITE_PENDING accounting] Link: http://lkml.kernel.org/r/1468404004-5085-5-git-send-email-mgorman@techsingularity.net Link: http://lkml.kernel.org/r/1467970510-21195-20-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/node.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 6cd9ff43ee22..264cc214c4df 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -118,28 +118,28 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d ShmemPmdMapped: %8lu kB\n"
#endif
,
- nid, K(sum_zone_node_page_state(nid, NR_FILE_DIRTY)),
- nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK)),
- nid, K(sum_zone_node_page_state(nid, NR_FILE_PAGES)),
+ nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
+ nid, K(node_page_state(pgdat, NR_WRITEBACK)),
+ nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
nid, K(i.sharedram),
nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
- nid, K(sum_zone_node_page_state(nid, NR_UNSTABLE_NFS)),
+ nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
- nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK_TEMP)),
+ nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) +
sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
- nid, K(sum_zone_node_page_state(nid, NR_ANON_THPS) *
+ nid, K(node_page_state(pgdat, NR_ANON_THPS) *
HPAGE_PMD_NR),
- nid, K(sum_zone_node_page_state(nid, NR_SHMEM_THPS) *
+ nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
HPAGE_PMD_NR),
- nid, K(sum_zone_node_page_state(nid, NR_SHMEM_PMDMAPPED) *
+ nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
HPAGE_PMD_NR));
#else
nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));