diff options
author | Varun Wadekar <vwadekar@nvidia.com> | 2012-06-11 11:29:44 +0530 |
---|---|---|
committer | Varun Wadekar <vwadekar@nvidia.com> | 2012-06-11 11:29:44 +0530 |
commit | e6dca42c2c2ad0f06c6704c73a03e5516dc14b13 (patch) | |
tree | 6ce2385fd736a6fc9a02ace4f5f139a6c8cfbaaf /mm | |
parent | 462ba8b0bca7e376722158f1f1a094ba1c0bd1ad (diff) | |
parent | 1f5547c7f183363eabe07e5d202a49f2e94e995a (diff) |
Merge commit 'v3.4.2' into android-tegra-nv-3.4
Conflicts:
drivers/i2c/busses/i2c-tegra.c
drivers/usb/gadget/fsl_udc_core.c
Change-Id: Ibfc3a8edc3665b832ddc94f89fc17b556629d104
Signed-off-by: Varun Wadekar <vwadekar@nvidia.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 29 | ||||
-rw-r--r-- | mm/mempolicy.c | 41 | ||||
-rw-r--r-- | mm/slub.c | 9 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
4 files changed, 47 insertions, 34 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ae8f708e3d75..263e17703b31 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma) kref_get(&reservations->refs); } +static void resv_map_put(struct vm_area_struct *vma) +{ + struct resv_map *reservations = vma_resv_map(vma); + + if (!reservations) + return; + kref_put(&reservations->refs, resv_map_release); +} + static void hugetlb_vm_op_close(struct vm_area_struct *vma) { struct hstate *h = hstate_vma(vma); @@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) reserve = (end - start) - region_count(&reservations->regions, start, end); - kref_put(&reservations->refs, resv_map_release); + resv_map_put(vma); if (reserve) { hugetlb_acct_memory(h, -reserve); @@ -2990,12 +2999,16 @@ int hugetlb_reserve_pages(struct inode *inode, set_vma_resv_flags(vma, HPAGE_RESV_OWNER); } - if (chg < 0) - return chg; + if (chg < 0) { + ret = chg; + goto out_err; + } /* There must be enough pages in the subpool for the mapping */ - if (hugepage_subpool_get_pages(spool, chg)) - return -ENOSPC; + if (hugepage_subpool_get_pages(spool, chg)) { + ret = -ENOSPC; + goto out_err; + } /* * Check enough hugepages are available for the reservation. @@ -3004,7 +3017,7 @@ int hugetlb_reserve_pages(struct inode *inode, ret = hugetlb_acct_memory(h, chg); if (ret < 0) { hugepage_subpool_put_pages(spool, chg); - return ret; + goto out_err; } /* @@ -3021,6 +3034,10 @@ int hugetlb_reserve_pages(struct inode *inode, if (!vma || vma->vm_flags & VM_MAYSHARE) region_add(&inode->i_mapping->private_list, from, to); return 0; +out_err: + if (vma) + resv_map_put(vma); + return ret; } void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b19569137529..bf5b485ecd31 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -607,27 +607,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, return first; } -/* Apply policy to a single VMA */ -static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new) -{ - int err = 0; - struct mempolicy *old = vma->vm_policy; - - pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", - vma->vm_start, vma->vm_end, vma->vm_pgoff, - vma->vm_ops, vma->vm_file, - vma->vm_ops ? vma->vm_ops->set_policy : NULL); - - if (vma->vm_ops && vma->vm_ops->set_policy) - err = vma->vm_ops->set_policy(vma, new); - if (!err) { - mpol_get(new); - vma->vm_policy = new; - mpol_put(old); - } - return err; -} - /* Step 2: apply policy to a range and do splits. */ static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) @@ -676,9 +655,23 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (err) goto out; } - err = policy_vma(vma, new_pol); - if (err) - goto out; + + /* + * Apply policy to a single VMA. The reference counting of + * policy for vma_policy linkages has already been handled by + * vma_merge and split_vma as necessary. If this is a shared + * policy then ->set_policy will increment the reference count + * for an sp node. + */ + pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", + vma->vm_start, vma->vm_end, vma->vm_pgoff, + vma->vm_ops, vma->vm_file, + vma->vm_ops ? vma->vm_ops->set_policy : NULL); + if (vma->vm_ops && vma->vm_ops->set_policy) { + err = vma->vm_ops->set_policy(vma, new_pol); + if (err) + goto out; + } } out: diff --git a/mm/slub.c b/mm/slub.c index 80848cd3901c..71de9b5685fa 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s, freelist = page->freelist; counters = page->counters; new.counters = counters; - if (mode) + if (mode) { new.inuse = page->objects; + new.freelist = NULL; + } else { + new.freelist = freelist; + } VM_BUG_ON(new.frozen); new.frozen = 1; } while (!__cmpxchg_double_slab(s, page, freelist, counters, - NULL, new.counters, + new.freelist, new.counters, "lock and freeze")); remove_partial(n, page); @@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s, object = t; available = page->objects - page->inuse; } else { - page->freelist = t; available = put_cpu_partial(s, page, 0); stat(s, CPU_PARTIAL_NODE); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 33dc256033b5..0932dc27e19d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -722,7 +722,7 @@ static enum page_references page_check_references(struct page *page, return PAGEREF_RECLAIM; if (referenced_ptes) { - if (PageAnon(page)) + if (PageSwapBacked(page)) return PAGEREF_ACTIVATE; /* * All mapped pages start out with page table |