diff options
author | James Morris <jmorris@namei.org> | 2011-05-19 18:51:57 +1000 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2011-05-19 18:51:57 +1000 |
commit | 12a5a2621b1ee14d32beca35304d7c6076a58815 (patch) | |
tree | 213e13f99de690b3c4a510f504393b63ada626bd /mm/memory.c | |
parent | e77dc3460fa59be5759e9327ad882868eee9d61b (diff) | |
parent | 61c4f2c81c61f73549928dfd9f3e8f26aa36a8cf (diff) |
Merge branch 'master' into next
Conflicts:
include/linux/capability.h
Manually resolve merge conflict w/ thanks to Stephen Rothwell.
Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 21 |
1 files changed, 9 insertions, 12 deletions
diff --git a/mm/memory.c b/mm/memory.c index ce22a250926f..61e66f026563 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1359,7 +1359,7 @@ split_fallthrough: */ mark_page_accessed(page); } - if (flags & FOLL_MLOCK) { + if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE @@ -1412,9 +1412,8 @@ no_page_table: static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) { - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_stack_continue(vma->vm_prev, addr); + return stack_guard_page_start(vma, addr) || + stack_guard_page_end(vma, addr+PAGE_SIZE); } /** @@ -1551,13 +1550,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, continue; } - /* - * If we don't actually want the page itself, - * and it's the stack guard page, just skip it. - */ - if (!pages && stack_guard_page(vma, start)) - goto next_page; - do { struct page *page; unsigned int foll_flags = gup_flags; @@ -1574,6 +1566,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int ret; unsigned int fault_flags = 0; + /* For mlock, just skip the stack guard page. */ + if (foll_flags & FOLL_MLOCK) { + if (stack_guard_page(vma, start)) + goto next_page; + } if (foll_flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) @@ -3396,7 +3393,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ - if (unlikely(__pte_alloc(mm, vma, pmd, address))) + if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) |