diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/mempolicy.c | 5 | ||||
-rw-r--r-- | mm/migrate.c | 3 | ||||
-rw-r--r-- | mm/mmap.c | 11 | ||||
-rw-r--r-- | mm/mprotect.c | 6 | ||||
-rw-r--r-- | mm/oom_kill.c | 15 |
7 files changed, 18 insertions, 27 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 920366399eed..5209e47b7fe3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -841,7 +841,7 @@ static void shrink_readahead_size_eio(struct file *filp, /** * do_generic_mapping_read - generic file read routine * @mapping: address_space to be read - * @_ra: file's readahead state + * @ra: file's readahead state * @filp: the file to read * @ppos: current file position * @desc: read_descriptor diff --git a/mm/memory.c b/mm/memory.c index bd16dcaeefb8..142683df8755 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -259,9 +259,6 @@ void free_pgd_range(struct mmu_gather **tlb, continue; free_pud_range(*tlb, pgd, addr, next, floor, ceiling); } while (pgd++, addr = next, addr != end); - - if (!(*tlb)->fullmm) - flush_tlb_pgtables((*tlb)->mm, start, end); } void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 568152ae6caf..c1592a94582f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -78,6 +78,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/module.h> +#include <linux/nsproxy.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compat.h> @@ -940,7 +941,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, /* Find the mm_struct */ read_lock(&tasklist_lock); - task = pid ? find_task_by_pid(pid) : current; + task = pid ? find_task_by_vpid(pid) : current; if (!task) { read_unlock(&tasklist_lock); return -ESRCH; @@ -1388,7 +1389,6 @@ EXPORT_SYMBOL(alloc_pages_current); * keeps mempolicies cpuset relative after its cpuset moves. See * further kernel/cpuset.c update_nodemask(). */ -void *cpuset_being_rebound; /* Slow path of a mempolicy copy */ struct mempolicy *__mpol_copy(struct mempolicy *old) @@ -2019,4 +2019,3 @@ out: m->version = (vma != priv->tail_vma) ? vma->vm_start : 0; return 0; } - diff --git a/mm/migrate.c b/mm/migrate.c index 06d0877a66ef..4d6ee03db946 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -19,6 +19,7 @@ #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/mm_inline.h> +#include <linux/nsproxy.h> #include <linux/pagevec.h> #include <linux/rmap.h> #include <linux/topology.h> @@ -924,7 +925,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, /* Find the mm_struct */ read_lock(&tasklist_lock); - task = pid ? find_task_by_pid(pid) : current; + task = pid ? find_task_by_vpid(pid) : current; if (!task) { read_unlock(&tasklist_lock); return -ESRCH; diff --git a/mm/mmap.c b/mm/mmap.c index 4275e81e25ba..7a30c4988231 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1048,8 +1048,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma) /* The open routine did something to the protections already? */ if (pgprot_val(vma->vm_page_prot) != - pgprot_val(protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)])) + pgprot_val(vm_get_page_prot(vm_flags))) return 0; /* Specialty mapping? */ @@ -1130,8 +1129,7 @@ munmap_back: vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; - vma->vm_page_prot = protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { @@ -2002,8 +2000,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) vma->vm_end = addr + len; vma->vm_pgoff = pgoff; vma->vm_flags = flags; - vma->vm_page_prot = protection_map[flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: mm->total_vm += len >> PAGE_SHIFT; @@ -2209,7 +2206,7 @@ int install_special_mapping(struct mm_struct *mm, vma->vm_end = addr + len; vma->vm_flags = vm_flags | mm->def_flags; - vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = &special_mapping_vmops; vma->vm_private_data = pages; diff --git a/mm/mprotect.c b/mm/mprotect.c index 1d4d69790e59..55227845abbe 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -192,11 +192,9 @@ success: * held in write mode. */ vma->vm_flags = newflags; - vma->vm_page_prot = protection_map[newflags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; + vma->vm_page_prot = vm_get_page_prot(newflags); if (vma_wants_writenotify(vma)) { - vma->vm_page_prot = protection_map[newflags & - (VM_READ|VM_WRITE|VM_EXEC)]; + vma->vm_page_prot = vm_get_page_prot(newflags); dirty_accountable = 1; } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index a64decb5b13f..824cade07827 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -212,7 +212,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) if (!p->mm) continue; /* skip the init task */ - if (is_init(p)) + if (is_global_init(p)) continue; /* @@ -265,7 +265,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) */ static void __oom_kill_task(struct task_struct *p, int verbose) { - if (is_init(p)) { + if (is_global_init(p)) { WARN_ON(1); printk(KERN_WARNING "tried to kill init!\n"); return; @@ -278,7 +278,8 @@ static void __oom_kill_task(struct task_struct *p, int verbose) } if (verbose) - printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm); + printk(KERN_ERR "Killed process %d (%s)\n", + task_pid_nr(p), p->comm); /* * We give our sacrificial lamb high priority and access to @@ -326,7 +327,7 @@ static int oom_kill_task(struct task_struct *p) * to memory reserves though, otherwise we might deplete all memory. */ do_each_thread(g, q) { - if (q->mm == mm && q->tgid != p->tgid) + if (q->mm == mm && !same_thread_group(q, p)) force_sig(SIGKILL, q); } while_each_thread(g, q); @@ -337,7 +338,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, unsigned long points, const char *message) { struct task_struct *c; - struct list_head *tsk; if (printk_ratelimit()) { printk(KERN_WARNING "%s invoked oom-killer: " @@ -357,11 +357,10 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, } printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n", - message, p->pid, p->comm, points); + message, task_pid_nr(p), p->comm, points); /* Try to kill a child first */ - list_for_each(tsk, &p->children) { - c = list_entry(tsk, struct task_struct, sibling); + list_for_each_entry(c, &p->children, sibling) { if (c->mm == p->mm) continue; if (!oom_kill_task(c)) |