summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c26
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/sparse.c10
-rw-r--r--mm/vmstat.c1
4 files changed, 34 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9b648bd63451..2e0bfc93484b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -533,6 +533,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup_per_zone *mz;
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
/*
* Should page_cgroup's go to their own slab?
* One could optimize the performance of the charging routine
@@ -665,6 +668,9 @@ void mem_cgroup_uncharge_page(struct page *page)
struct mem_cgroup_per_zone *mz;
unsigned long flags;
+ if (mem_cgroup_subsys.disabled)
+ return;
+
/*
* Check if our page_cgroup is valid
*/
@@ -705,6 +711,9 @@ int mem_cgroup_prepare_migration(struct page *page)
{
struct page_cgroup *pc;
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
lock_page_cgroup(page);
pc = page_get_page_cgroup(page);
if (pc)
@@ -803,6 +812,9 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
int ret = -EBUSY;
int node, zid;
+ if (mem_cgroup_subsys.disabled)
+ return 0;
+
css_get(&mem->css);
/*
* page reclaim code (kswapd etc..) will move pages between
@@ -966,7 +978,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
struct mem_cgroup_per_node *pn;
struct mem_cgroup_per_zone *mz;
- int zone;
+ int zone, tmp = node;
/*
* This routine is called against possible nodes.
* But it's BUG to call kmalloc() against offline node.
@@ -975,10 +987,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
* never be onlined. It's better to use memory hotplug callback
* function.
*/
- if (node_state(node, N_HIGH_MEMORY))
- pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
- else
- pn = kmalloc(sizeof(*pn), GFP_KERNEL);
+ if (!node_state(node, N_NORMAL_MEMORY))
+ tmp = -1;
+ pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
if (!pn)
return 1;
@@ -1053,6 +1064,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
static int mem_cgroup_populate(struct cgroup_subsys *ss,
struct cgroup *cont)
{
+ if (mem_cgroup_subsys.disabled)
+ return 0;
return cgroup_add_files(cont, ss, mem_cgroup_files,
ARRAY_SIZE(mem_cgroup_files));
}
@@ -1065,6 +1078,9 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct mm_struct *mm;
struct mem_cgroup *mem, *old_mem;
+ if (mem_cgroup_subsys.disabled)
+ return;
+
mm = get_task_mm(p);
if (mm == NULL)
return;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f255eda693b0..beb592fe9389 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -423,7 +423,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
struct task_struct *p;
cgroup_lock();
- rcu_read_lock();
+ read_lock(&tasklist_lock);
retry:
p = select_bad_process(&points, mem);
if (PTR_ERR(p) == -1UL)
@@ -436,7 +436,7 @@ retry:
"Memory cgroup out of memory"))
goto retry;
out:
- rcu_read_unlock();
+ read_unlock(&tasklist_lock);
cgroup_unlock();
}
#endif
diff --git a/mm/sparse.c b/mm/sparse.c
index f6a43c09c322..98d6b39c3472 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -149,8 +149,18 @@ static inline int sparse_early_nid(struct mem_section *section)
/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
{
+ unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
unsigned long pfn;
+ /*
+ * Sanity checks - do not allow an architecture to pass
+ * in larger pfns than the maximum scope of sparsemem:
+ */
+ if (start >= max_arch_pfn)
+ return;
+ if (end >= max_arch_pfn)
+ end = max_arch_pfn;
+
start &= PAGE_SECTION_MASK;
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
unsigned long section = pfn_to_section_nr(pfn);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 422d960ffcd8..7c7286e9506d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -388,6 +388,7 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
"Reclaimable",
"Movable",
"Reserve",
+ "Isolate",
};
static void *frag_start(struct seq_file *m, loff_t *pos)