diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2010-01-14 11:39:39 -0800 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-01-18 10:19:11 -0800 |
commit | 1f51eb3a881359e97dc2c228e55c83fba598e349 (patch) | |
tree | d408fd227f329d2df0007b7677d27b64375b1ccb /mm | |
parent | c3a8e0eaafb438d5a7e6ca55c09b921291fcc1b1 (diff) |
untangle the do_mremap() mess
This backports the following upstream commits all as one patch:
54f5de709984bae0d31d823ff03de755f9dcac54
ecc1a8993751de4e82eb18640d631dae1f626bd6
1a0ef85f84feb13f07b604fcf5b90ef7c2b5c82f
f106af4e90eadd76cfc0b5325f659619e08fb762
097eed103862f9c6a97f2e415e21d1134017b135
935874141df839c706cd6cdc438e85eb69d1525e
0ec62d290912bb4b989be7563851bc364ec73b56
c4caa778157dbbf04116f0ac2111e389b5cd7a29
2ea1d13f64efdf49319e86c87d9ba38c30902782
570dcf2c15463842e384eb597a87c1e39bead99b
564b3bffc619dcbdd160de597b0547a7017ea010
0067bd8a55862ac9dd212bd1c4f6f5bff1ca1301
f8b7256096a20436f6d0926747e3ac3d64c81d24
8c7b49b3ecd48923eb64ff57e07a1cdb74782970
9206de95b1ea68357996ec02be5db0638a0de2c1
2c6a10161d0b5fc047b5bd81b03693b9af99fab5
05d72faa6d13c9d857478a5d35c85db9adada685
bb52d6694002b9d632bb355f64daa045c6293a4e
e77414e0aad6a1b063ba5e5750c582c75327ea6a
aa65607373a4daf2010e8c3867b6317619f3c1a3
Backport done by Greg Kroah-Hartman. Only minor tweaks were needed.
Cc: David S. Miller <davem@davemloft.net>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mmap.c | 42 | ||||
-rw-r--r-- | mm/mremap.c | 241 | ||||
-rw-r--r-- | mm/util.c | 44 |
3 files changed, 211 insertions, 116 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index 73f5e4b64010..ae197468b352 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -932,13 +932,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); - error = arch_mmap_check(addr, len, flags); - if (error) - return error; - /* Careful about overflows.. */ len = PAGE_ALIGN(len); - if (!len || len > TASK_SIZE) + if (!len) return -ENOMEM; /* offset overflow? */ @@ -949,24 +945,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, if (mm->map_count > sysctl_max_map_count) return -ENOMEM; - if (flags & MAP_HUGETLB) { - struct user_struct *user = NULL; - if (file) - return -EINVAL; - - /* - * VM_NORESERVE is used because the reservations will be - * taken when vm_ops->mmap() is called - * A dummy user value is used because we are not locking - * memory so no accounting is necessary - */ - len = ALIGN(len, huge_page_size(&default_hstate)); - file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, - &user, HUGETLB_ANONHUGE_INODE); - if (IS_ERR(file)) - return PTR_ERR(file); - } - /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ @@ -1459,6 +1437,14 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); + unsigned long error = arch_mmap_check(addr, len, flags); + if (error) + return error; + + /* Careful about overflows.. */ + if (len > TASK_SIZE) + return -ENOMEM; + get_area = current->mm->get_unmapped_area; if (file && file->f_op && file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; @@ -2003,20 +1989,14 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if (!len) return addr; - if ((addr + len) > TASK_SIZE || (addr + len) < addr) - return -EINVAL; - - if (is_hugepage_only_range(mm, addr, len)) - return -EINVAL; - error = security_file_mmap(NULL, 0, 0, 0, addr, 1); if (error) return error; flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; - error = arch_mmap_check(addr, len, flags); - if (error) + error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (error & ~PAGE_MASK) return error; /* diff --git a/mm/mremap.c b/mm/mremap.c index 97bff2547719..845190898d59 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -261,6 +261,137 @@ static unsigned long move_vma(struct vm_area_struct *vma, return new_addr; } +static struct vm_area_struct *vma_to_resize(unsigned long addr, + unsigned long old_len, unsigned long new_len, unsigned long *p) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = find_vma(mm, addr); + + if (!vma || vma->vm_start > addr) + goto Efault; + + if (is_vm_hugetlb_page(vma)) + goto Einval; + + /* We can't remap across vm area boundaries */ + if (old_len > vma->vm_end - addr) + goto Efault; + + if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { + if (new_len > old_len) + goto Efault; + } + + if (vma->vm_flags & VM_LOCKED) { + unsigned long locked, lock_limit; + locked = mm->locked_vm << PAGE_SHIFT; + lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; + locked += new_len - old_len; + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) + goto Eagain; + } + + if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) + goto Enomem; + + if (vma->vm_flags & VM_ACCOUNT) { + unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; + if (security_vm_enough_memory(charged)) + goto Efault; + *p = charged; + } + + return vma; + +Efault: /* very odd choice for most of the cases, but... */ + return ERR_PTR(-EFAULT); +Einval: + return ERR_PTR(-EINVAL); +Enomem: + return ERR_PTR(-ENOMEM); +Eagain: + return ERR_PTR(-EAGAIN); +} + +static unsigned long mremap_to(unsigned long addr, + unsigned long old_len, unsigned long new_addr, + unsigned long new_len) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long ret = -EINVAL; + unsigned long charged = 0; + unsigned long map_flags; + + if (new_addr & ~PAGE_MASK) + goto out; + + if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) + goto out; + + /* Check if the location we're moving into overlaps the + * old location at all, and fail if it does. + */ + if ((new_addr <= addr) && (new_addr+new_len) > addr) + goto out; + + if ((addr <= new_addr) && (addr+old_len) > new_addr) + goto out; + + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); + if (ret) + goto out; + + ret = do_munmap(mm, new_addr, new_len); + if (ret) + goto out; + + if (old_len >= new_len) { + ret = do_munmap(mm, addr+new_len, old_len - new_len); + if (ret && old_len != new_len) + goto out; + old_len = new_len; + } + + vma = vma_to_resize(addr, old_len, new_len, &charged); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out; + } + + map_flags = MAP_FIXED; + if (vma->vm_flags & VM_MAYSHARE) + map_flags |= MAP_SHARED; + + ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + + ((addr - vma->vm_start) >> PAGE_SHIFT), + map_flags); + if (ret & ~PAGE_MASK) + goto out1; + + ret = move_vma(vma, addr, old_len, new_len, new_addr); + if (!(ret & ~PAGE_MASK)) + goto out; +out1: + vm_unacct_memory(charged); + +out: + return ret; +} + +static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) +{ + unsigned long end = vma->vm_end + delta; + if (end < vma->vm_end) /* overflow */ + return 0; + if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ + return 0; + if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, + 0, MAP_FIXED) & ~PAGE_MASK) + return 0; + return 1; +} + /* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) @@ -294,32 +425,10 @@ unsigned long do_mremap(unsigned long addr, if (!new_len) goto out; - /* new_addr is only valid if MREMAP_FIXED is specified */ if (flags & MREMAP_FIXED) { - if (new_addr & ~PAGE_MASK) - goto out; - if (!(flags & MREMAP_MAYMOVE)) - goto out; - - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) - goto out; - - /* Check if the location we're moving into overlaps the - * old location at all, and fail if it does. - */ - if ((new_addr <= addr) && (new_addr+new_len) > addr) - goto out; - - if ((addr <= new_addr) && (addr+old_len) > new_addr) - goto out; - - ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); - if (ret) - goto out; - - ret = do_munmap(mm, new_addr, new_len); - if (ret) - goto out; + if (flags & MREMAP_MAYMOVE) + ret = mremap_to(addr, old_len, new_addr, new_len); + goto out; } /* @@ -332,60 +441,23 @@ unsigned long do_mremap(unsigned long addr, if (ret && old_len != new_len) goto out; ret = addr; - if (!(flags & MREMAP_FIXED) || (new_addr == addr)) - goto out; - old_len = new_len; + goto out; } /* - * Ok, we need to grow.. or relocate. + * Ok, we need to grow.. */ - ret = -EFAULT; - vma = find_vma(mm, addr); - if (!vma || vma->vm_start > addr) - goto out; - if (is_vm_hugetlb_page(vma)) { - ret = -EINVAL; - goto out; - } - /* We can't remap across vm area boundaries */ - if (old_len > vma->vm_end - addr) - goto out; - if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { - if (new_len > old_len) - goto out; - } - if (vma->vm_flags & VM_LOCKED) { - unsigned long locked, lock_limit; - locked = mm->locked_vm << PAGE_SHIFT; - lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; - locked += new_len - old_len; - ret = -EAGAIN; - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - goto out; - } - if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) { - ret = -ENOMEM; + vma = vma_to_resize(addr, old_len, new_len, &charged); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto out; } - if (vma->vm_flags & VM_ACCOUNT) { - charged = (new_len - old_len) >> PAGE_SHIFT; - if (security_vm_enough_memory(charged)) - goto out_nc; - } - /* old_len exactly to the end of the area.. - * And we're not relocating the area. */ - if (old_len == vma->vm_end - addr && - !((flags & MREMAP_FIXED) && (addr != new_addr)) && - (old_len != new_len || !(flags & MREMAP_MAYMOVE))) { - unsigned long max_addr = TASK_SIZE; - if (vma->vm_next) - max_addr = vma->vm_next->vm_start; + if (old_len == vma->vm_end - addr) { /* can we just expand the current mapping? */ - if (max_addr - addr >= new_len) { + if (vma_expandable(vma, new_len - old_len)) { int pages = (new_len - old_len) >> PAGE_SHIFT; vma_adjust(vma, vma->vm_start, @@ -409,28 +481,27 @@ unsigned long do_mremap(unsigned long addr, */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { - if (!(flags & MREMAP_FIXED)) { - unsigned long map_flags = 0; - if (vma->vm_flags & VM_MAYSHARE) - map_flags |= MAP_SHARED; - - new_addr = get_unmapped_area(vma->vm_file, 0, new_len, - vma->vm_pgoff, map_flags); - if (new_addr & ~PAGE_MASK) { - ret = new_addr; - goto out; - } - - ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); - if (ret) - goto out; + unsigned long map_flags = 0; + if (vma->vm_flags & VM_MAYSHARE) + map_flags |= MAP_SHARED; + + new_addr = get_unmapped_area(vma->vm_file, 0, new_len, + vma->vm_pgoff + + ((addr - vma->vm_start) >> PAGE_SHIFT), + map_flags); + if (new_addr & ~PAGE_MASK) { + ret = new_addr; + goto out; } + + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); + if (ret) + goto out; ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: if (ret & ~PAGE_MASK) vm_unacct_memory(charged); -out_nc: return ret; } diff --git a/mm/util.c b/mm/util.c index 7c35ad95f927..b377ce430803 100644 --- a/mm/util.c +++ b/mm/util.c @@ -4,6 +4,10 @@ #include <linux/module.h> #include <linux/err.h> #include <linux/sched.h> +#include <linux/hugetlb.h> +#include <linux/syscalls.h> +#include <linux/mman.h> +#include <linux/file.h> #include <asm/uaccess.h> #define CREATE_TRACE_POINTS @@ -268,6 +272,46 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start, } EXPORT_SYMBOL_GPL(get_user_pages_fast); +SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, pgoff) +{ + struct file * file = NULL; + unsigned long retval = -EBADF; + + if (!(flags & MAP_ANONYMOUS)) { + if (unlikely(flags & MAP_HUGETLB)) + return -EINVAL; + file = fget(fd); + if (!file) + goto out; + } else if (flags & MAP_HUGETLB) { + struct user_struct *user = NULL; + /* + * VM_NORESERVE is used because the reservations will be + * taken when vm_ops->mmap() is called + * A dummy user value is used because we are not locking + * memory so no accounting is necessary + */ + len = ALIGN(len, huge_page_size(&default_hstate)); + file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, + &user, HUGETLB_ANONHUGE_INODE); + if (IS_ERR(file)) + return PTR_ERR(file); + } + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + + down_write(¤t->mm->mmap_sem); + retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); + up_write(¤t->mm->mmap_sem); + + if (file) + fput(file); +out: + return retval; +} + /* Tracepoints definitions. */ EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); |