diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index 06a6076c92e5..bb50cacc3ea5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1133,7 +1133,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct * * by another page fault trying to merge _that_. But that's ok: if it * is being set up, that automatically means that it will be a singleton * acceptable for merging, so we can do all of this optimistically. But - * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. + * we do that READ_ONCE() to make sure that we never re-load the pointer. * * IOW: that the "list_is_singular()" test on the anon_vma_chain only * matters for the 'stable anon_vma' case (ie the thing we want to avoid @@ -1147,7 +1147,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct * static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) { if (anon_vma_compatible(a, b)) { - struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); + struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); if (anon_vma && list_is_singular(&old->anon_vma_chain)) return anon_vma; @@ -1551,11 +1551,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr, /* Clear old maps */ error = -ENOMEM; -munmap_back: - if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { + while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, + &rb_parent)) { if (do_munmap(mm, addr, len)) return -ENOMEM; - goto munmap_back; } /* @@ -1571,7 +1570,8 @@ munmap_back: /* * Can we just expand an old mapping? */ - vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL); + vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, + NULL); if (vma) goto out; @@ -2100,7 +2100,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns actual_size = size; if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) actual_size -= PAGE_SIZE; - if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ @@ -2108,7 +2108,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns unsigned long locked; unsigned long limit; locked = mm->locked_vm + grow; - limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); + limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); limit >>= PAGE_SHIFT; if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; @@ -2739,11 +2739,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) /* * Clear old maps. this also does some error checking for us */ - munmap_back: - if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { + while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, + &rb_parent)) { if (do_munmap(mm, addr, len)) return -ENOMEM; - goto munmap_back; } /* Check against address space limits *after* clearing old maps... */ |