diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 20 | ||||
-rw-r--r-- | mm/hugetlb.c | 5 | ||||
-rw-r--r-- | mm/khugepaged.c | 4 | ||||
-rw-r--r-- | mm/memblock.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 25 | ||||
-rw-r--r-- | mm/mmu_gather.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 19 | ||||
-rw-r--r-- | mm/shmem.c | 6 | ||||
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slab_common.c | 6 | ||||
-rw-r--r-- | mm/sparse.c | 16 | ||||
-rw-r--r-- | mm/swap.c | 3 | ||||
-rw-r--r-- | mm/vmscan.c | 22 |
13 files changed, 88 insertions, 46 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5da55b38b1b7..e84a10b0d310 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2144,23 +2144,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, */ old_pmd = pmdp_invalidate(vma, haddr, pmd); -#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION pmd_migration = is_pmd_migration_entry(old_pmd); - if (pmd_migration) { + if (unlikely(pmd_migration)) { swp_entry_t entry; entry = pmd_to_swp_entry(old_pmd); page = pfn_to_page(swp_offset(entry)); - } else -#endif + write = is_write_migration_entry(entry); + young = false; + soft_dirty = pmd_swp_soft_dirty(old_pmd); + } else { page = pmd_page(old_pmd); + if (pmd_dirty(old_pmd)) + SetPageDirty(page); + write = pmd_write(old_pmd); + young = pmd_young(old_pmd); + soft_dirty = pmd_soft_dirty(old_pmd); + } VM_BUG_ON_PAGE(!page_count(page), page); page_ref_add(page, HPAGE_PMD_NR - 1); - if (pmd_dirty(old_pmd)) - SetPageDirty(page); - write = pmd_write(old_pmd); - young = pmd_young(old_pmd); - soft_dirty = pmd_soft_dirty(old_pmd); /* * Withdraw the table only after we mark the pmd entry invalid. diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 705a3e9cc910..a80832487981 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1248,10 +1248,11 @@ void free_huge_page(struct page *page) (struct hugepage_subpool *)page_private(page); bool restore_reserve; - set_page_private(page, 0); - page->mapping = NULL; VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(page_mapcount(page), page); + + set_page_private(page, 0); + page->mapping = NULL; restore_reserve = PagePrivate(page); ClearPagePrivate(page); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 8e2ff195ecb3..43ce2f4d2551 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1225,7 +1225,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) { struct mm_struct *mm = mm_slot->mm; - VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); + lockdep_assert_held(&khugepaged_mm_lock); if (khugepaged_test_exit(mm)) { /* free mm_slot */ @@ -1653,7 +1653,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int progress = 0; VM_BUG_ON(!pages); - VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); + lockdep_assert_held(&khugepaged_mm_lock); if (khugepaged_scan.mm_slot) mm_slot = khugepaged_scan.mm_slot; diff --git a/mm/memblock.c b/mm/memblock.c index 9a2d5ae81ae1..81ae63ca78d0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1727,7 +1727,7 @@ static int __init_memblock memblock_search(struct memblock_type *type, phys_addr return -1; } -bool __init memblock_is_reserved(phys_addr_t addr) +bool __init_memblock memblock_is_reserved(phys_addr_t addr) { return memblock_search(&memblock.reserved, addr) != -1; } diff --git a/mm/mmap.c b/mm/mmap.c index 6c04292e16a7..7bb64381e77c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2066,6 +2066,15 @@ found_highest: return gap_end; } + +#ifndef arch_get_mmap_end +#define arch_get_mmap_end(addr) (TASK_SIZE) +#endif + +#ifndef arch_get_mmap_base +#define arch_get_mmap_base(addr, base) (base) +#endif + /* Get an address range which is currently unmapped. * For shmat() with addr=0. * @@ -2085,8 +2094,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; + const unsigned long mmap_end = arch_get_mmap_end(addr); - if (len > TASK_SIZE - mmap_min_addr) + if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) @@ -2095,7 +2105,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); - if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; @@ -2104,7 +2114,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; - info.high_limit = TASK_SIZE; + info.high_limit = mmap_end; info.align_mask = 0; return vm_unmapped_area(&info); } @@ -2124,9 +2134,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; + const unsigned long mmap_end = arch_get_mmap_end(addr); /* requested length too big for entire address space */ - if (len > TASK_SIZE - mmap_min_addr) + if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) @@ -2136,7 +2147,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); - if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; @@ -2145,7 +2156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); - info.high_limit = mm->mmap_base; + info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; addr = vm_unmapped_area(&info); @@ -2159,7 +2170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; - info.high_limit = TASK_SIZE; + info.high_limit = mmap_end; addr = vm_unmapped_area(&info); } diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 2a9fbc4a37d5..f2f03c655807 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -199,7 +199,7 @@ void tlb_table_flush(struct mmu_gather *tlb) if (*batch) { tlb_table_invalidate(tlb); - call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); + call_rcu(&(*batch)->rcu, tlb_remove_table_rcu); *batch = NULL; } } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2ec9cc407216..e95b5b7c9c3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5542,6 +5542,18 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, cond_resched(); } } +#ifdef CONFIG_SPARSEMEM + /* + * If the zone does not span the rest of the section then + * we should at least initialize those pages. Otherwise we + * could blow up on a poisoned page in some paths which depend + * on full sections being initialized (e.g. memory hotplug). + */ + while (end_pfn % PAGES_PER_SECTION) { + __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid); + end_pfn++; + } +#endif } #ifdef CONFIG_ZONE_DEVICE @@ -7802,11 +7814,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, * handle each tail page individually in migration. */ if (PageHuge(page)) { + struct page *head = compound_head(page); + unsigned int skip_pages; - if (!hugepage_migration_supported(page_hstate(page))) + if (!hugepage_migration_supported(page_hstate(head))) goto unmovable; - iter = round_up(iter + 1, 1<<compound_order(page)) - 1; + skip_pages = (1 << compound_order(head)) - (page - head); + iter += skip_pages - 1; continue; } diff --git a/mm/shmem.c b/mm/shmem.c index 921f80488bb3..375f3ac19bb8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -661,9 +661,7 @@ static int shmem_free_swap(struct address_space *mapping, { void *old; - xa_lock_irq(&mapping->i_pages); - old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0); - xa_unlock_irq(&mapping->i_pages); + old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); if (old != radswap) return -ENOENT; free_swap_and_cache(radix_to_swp_entry(radswap)); @@ -760,7 +758,7 @@ void shmem_unlock_mapping(struct address_space *mapping) break; index = indices[pvec.nr - 1] + 1; pagevec_remove_exceptionals(&pvec); - check_move_unevictable_pages(pvec.pages, pvec.nr); + check_move_unevictable_pages(&pvec); pagevec_release(&pvec); cond_resched(); } diff --git a/mm/slab.c b/mm/slab.c index 2a5654bb3b3f..3abb9feb3818 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -962,10 +962,10 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep, * To protect lockless access to n->shared during irq disabled context. * If n->shared isn't NULL in irq disabled context, accessing to it is * guaranteed to be valid until irq is re-enabled, because it will be - * freed after synchronize_sched(). + * freed after synchronize_rcu(). */ if (old_shared && force_change) - synchronize_sched(); + synchronize_rcu(); fail: kfree(old_shared); diff --git a/mm/slab_common.c b/mm/slab_common.c index 7eb8dc136c1c..9c11e8a937d2 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -724,7 +724,7 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, css_get(&s->memcg_params.memcg->css); s->memcg_params.deact_fn = deact_fn; - call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); + call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn); } void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) @@ -839,11 +839,11 @@ static void flush_memcg_workqueue(struct kmem_cache *s) mutex_unlock(&slab_mutex); /* - * SLUB deactivates the kmem_caches through call_rcu_sched. Make + * SLUB deactivates the kmem_caches through call_rcu. Make * sure all registered rcu callbacks have been invoked. */ if (IS_ENABLED(CONFIG_SLUB)) - rcu_barrier_sched(); + rcu_barrier(); /* * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB diff --git a/mm/sparse.c b/mm/sparse.c index 33307fc05c4d..3abc8cc50201 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -240,6 +240,22 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) } /* + * Mark all memblocks as present using memory_present(). This is a + * convienence function that is useful for a number of arches + * to mark all of the systems memory as present during initialization. + */ +void __init memblocks_present(void) +{ + struct memblock_region *reg; + + for_each_memblock(memory, reg) { + memory_present(memblock_get_region_node(reg), + memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); + } +} + +/* * Subtle, we encode the real pfn into the mem_map such that * the identity pfn - section_mem_map will return the actual * physical page frame number. diff --git a/mm/swap.c b/mm/swap.c index aa483719922e..5d786019eab9 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -823,8 +823,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, VM_BUG_ON_PAGE(!PageHead(page), page); VM_BUG_ON_PAGE(PageCompound(page_tail), page); VM_BUG_ON_PAGE(PageLRU(page_tail), page); - VM_BUG_ON(NR_CPUS != 1 && - !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock)); + lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); if (!list) SetPageLRU(page_tail); diff --git a/mm/vmscan.c b/mm/vmscan.c index 62ac0c488624..24ab1f7394ab 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -46,6 +46,7 @@ #include <linux/delayacct.h> #include <linux/sysctl.h> #include <linux/oom.h> +#include <linux/pagevec.h> #include <linux/prefetch.h> #include <linux/printk.h> #include <linux/dax.h> @@ -4182,17 +4183,16 @@ int page_evictable(struct page *page) return ret; } -#ifdef CONFIG_SHMEM /** - * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list - * @pages: array of pages to check - * @nr_pages: number of pages to check + * check_move_unevictable_pages - check pages for evictability and move to + * appropriate zone lru list + * @pvec: pagevec with lru pages to check * - * Checks pages for evictability and moves them to the appropriate lru list. - * - * This function is only used for SysV IPC SHM_UNLOCK. + * Checks pages for evictability, if an evictable page is in the unevictable + * lru list, moves it to the appropriate evictable lru list. This function + * should be only used for lru pages. */ -void check_move_unevictable_pages(struct page **pages, int nr_pages) +void check_move_unevictable_pages(struct pagevec *pvec) { struct lruvec *lruvec; struct pglist_data *pgdat = NULL; @@ -4200,8 +4200,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) int pgrescued = 0; int i; - for (i = 0; i < nr_pages; i++) { - struct page *page = pages[i]; + for (i = 0; i < pvec->nr; i++) { + struct page *page = pvec->pages[i]; struct pglist_data *pagepgdat = page_pgdat(page); pgscanned++; @@ -4233,4 +4233,4 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) spin_unlock_irq(&pgdat->lru_lock); } } -#endif /* CONFIG_SHMEM */ +EXPORT_SYMBOL_GPL(check_move_unevictable_pages); |