summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c2
-rw-r--r--mm/filemap.c11
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memblock.c9
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/memory-failure.c6
-rw-r--r--mm/memory.c4
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/readahead.c3
-rw-r--r--mm/shmem.c28
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmalloc.c26
-rw-r--r--mm/vmpressure.c8
15 files changed, 103 insertions, 29 deletions
diff --git a/mm/cma.c b/mm/cma.c
index a972c3440c40..26967c70e9c7 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -503,7 +503,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
*/
if (page) {
for (i = 0; i < count; i++)
- page_kasan_tag_reset(page + i);
+ page_kasan_tag_reset(nth_page(page, i));
}
if (ret && !no_warn) {
diff --git a/mm/filemap.c b/mm/filemap.c
index 81e28722edfa..b76deb24aeea 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2649,6 +2649,15 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
/*
+ * Pairs with a barrier in
+ * block_write_end()->mark_buffer_dirty() or other page
+ * dirtying routines like iomap_write_end() to ensure
+ * changes to page contents are visible before we see
+ * increased inode size.
+ */
+ smp_rmb();
+
+ /*
* Once we start copying data, we don't want to be touching any
* cachelines that might be contended:
*/
@@ -3209,7 +3218,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
}
}
- if (pmd_none(*vmf->pmd)) {
+ if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) {
vmf->ptl = pmd_lock(mm, vmf->pmd);
if (likely(pmd_none(*vmf->pmd))) {
mm_inc_nr_ptes(mm);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 887af873733b..8786cdfeaa91 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -457,7 +457,7 @@ bool kasan_report(unsigned long addr, size_t size, bool is_write,
return ret;
}
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
/*
* With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
* canonical half of the address space) cause out-of-bounds shadow memory reads
diff --git a/mm/memblock.c b/mm/memblock.c
index 2b7397781c99..2f2094b16416 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -661,6 +661,7 @@ repeat:
* @base: base address of the new region
* @size: size of the new region
* @nid: nid of the new region
+ * @flags: flags of the new region
*
* Add new memblock region [@base, @base + @size) to the "memory"
* type. See memblock_add_range() description for mode details
@@ -669,14 +670,14 @@ repeat:
* 0 on success, -errno on failure.
*/
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
- int nid)
+ int nid, enum memblock_flags flags)
{
phys_addr_t end = base + size - 1;
- memblock_dbg("%s: [%pa-%pa] nid=%d %pS\n", __func__,
- &base, &end, nid, (void *)_RET_IP_);
+ memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__,
+ &base, &end, nid, flags, (void *)_RET_IP_);
- return memblock_add_range(&memblock.memory, base, size, nid, 0);
+ return memblock_add_range(&memblock.memory, base, size, nid, flags);
}
/**
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b68b2fe639fd..6f969ba0d688 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2822,7 +2822,8 @@ retry:
* Moreover, it should not come from DMA buffer and is not readily
* reclaimable. So those GFP bits should be masked off.
*/
-#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
+#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
+ __GFP_ACCOUNT | __GFP_NOFAIL)
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp, bool new_page)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index bcd71d8736be..1f23baa98ee9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1331,7 +1331,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* This check implies we don't kill processes if their pages
* are in the swap cache early. Those are always late kills.
*/
- if (!page_mapped(hpage))
+ if (!page_mapped(p))
return true;
if (PageKsm(p)) {
@@ -1397,10 +1397,10 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
}
}
- unmap_success = !page_mapped(hpage);
+ unmap_success = !page_mapped(p);
if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
- pfn, page_mapcount(hpage));
+ pfn, page_mapcount(p));
/*
* try_to_unmap() might put mlocked page in lru cache, so call
diff --git a/mm/memory.c b/mm/memory.c
index 1bb01b12db53..6044d9a4bcd6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3451,8 +3451,8 @@ EXPORT_SYMBOL_GPL(unmap_mapping_pages);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows)
{
- pgoff_t hba = holebegin >> PAGE_SHIFT;
- pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
+ pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Check for overflow. */
if (sizeof(holelen) > sizeof(hlen)) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 81f2a97c886c..2d8e9fb4ce0b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1384,8 +1384,11 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
mem_hotplug_begin();
- if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
- memblock_add_node(start, size, nid);
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
+ ret = memblock_add_node(start, size, nid, MEMBLOCK_NONE);
+ if (ret)
+ goto error_mem_hotplug_end;
+ }
ret = __try_online_node(nid, false);
if (ret < 0)
@@ -1458,6 +1461,7 @@ error:
rollback_node_hotadd(nid);
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
+error_mem_hotplug_end:
mem_hotplug_done();
return ret;
}
@@ -1677,7 +1681,7 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
*/
if (HPageMigratable(head))
goto found;
- skip = compound_nr(head) - (page - head);
+ skip = compound_nr(head) - (pfn - page_to_pfn(head));
pfn += skip - 1;
}
return -ENOENT;
diff --git a/mm/migrate.c b/mm/migrate.c
index dd50b1cc089e..c7d5566623ad 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1788,6 +1788,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
const int __user *nodes,
int __user *status, int flags)
{
+ compat_uptr_t __user *compat_pages = (void __user *)pages;
int current_node = NUMA_NO_NODE;
LIST_HEAD(pagelist);
int start, i;
@@ -1801,8 +1802,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
int node;
err = -EFAULT;
- if (get_user(p, pages + i))
- goto out_flush;
+ if (in_compat_syscall()) {
+ compat_uptr_t cp;
+
+ if (get_user(cp, compat_pages + i))
+ goto out_flush;
+
+ p = compat_ptr(cp);
+ } else {
+ if (get_user(p, pages + i))
+ goto out_flush;
+ }
if (get_user(node, nodes + i))
goto out_flush;
addr = (unsigned long)untagged_addr(p);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f320ee2bd34a..ec3333a1f7fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -9481,6 +9481,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
next_page = page;
current_buddy = page + size;
}
+ page = next_page;
if (set_page_guard(zone, current_buddy, high, migratetype))
continue;
@@ -9488,7 +9489,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
if (current_buddy != target) {
add_to_free_list(current_buddy, zone, high, migratetype);
set_buddy_order(current_buddy, high);
- page = next_page;
}
}
}
diff --git a/mm/readahead.c b/mm/readahead.c
index 41b75d76d36e..a8f7e4e550f4 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -627,7 +627,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
*/
ret = -EINVAL;
if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
- !S_ISREG(file_inode(f.file)->i_mode))
+ (!S_ISREG(file_inode(f.file)->i_mode) &&
+ !S_ISBLK(file_inode(f.file)->i_mode)))
goto out;
ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
diff --git a/mm/shmem.c b/mm/shmem.c
index 41efb92c4e38..663fb117cd87 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3394,6 +3394,8 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
unsigned long long size;
char *rest;
int opt;
+ kuid_t kuid;
+ kgid_t kgid;
opt = fs_parse(fc, shmem_fs_parameters, param, &result);
if (opt < 0)
@@ -3429,14 +3431,32 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
ctx->mode = result.uint_32 & 07777;
break;
case Opt_uid:
- ctx->uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(ctx->uid))
+ kuid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(kuid))
goto bad_value;
+
+ /*
+ * The requested uid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kuid_has_mapping(fc->user_ns, kuid))
+ goto bad_value;
+
+ ctx->uid = kuid;
break;
case Opt_gid:
- ctx->gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(ctx->gid))
+ kgid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(kgid))
goto bad_value;
+
+ /*
+ * The requested gid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kgid_has_mapping(fc->user_ns, kgid))
+ goto bad_value;
+
+ ctx->gid = kgid;
break;
case Opt_huge:
ctx->huge = result.uint_32;
diff --git a/mm/util.c b/mm/util.c
index ea04979f131e..e89ef15085de 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1110,7 +1110,9 @@ void mem_dump_obj(void *object)
if (vmalloc_dump_obj(object))
return;
- if (virt_addr_valid(object))
+ if (is_vmalloc_addr(object))
+ type = "vmalloc memory";
+ else if (virt_addr_valid(object))
type = "non-slab/vmalloc memory";
else if (object == NULL)
type = "NULL pointer";
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c14481c3a3cd..3f8a868f8c84 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3825,14 +3825,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
#ifdef CONFIG_PRINTK
bool vmalloc_dump_obj(void *object)
{
- struct vm_struct *vm;
void *objp = (void *)PAGE_ALIGN((unsigned long)object);
+ const void *caller;
+ struct vm_struct *vm;
+ struct vmap_area *va;
+ unsigned long addr;
+ unsigned int nr_pages;
+
+ if (!spin_trylock(&vmap_area_lock))
+ return false;
+ va = __find_vmap_area((unsigned long)objp);
+ if (!va) {
+ spin_unlock(&vmap_area_lock);
+ return false;
+ }
- vm = find_vm_area(objp);
- if (!vm)
+ vm = va->vm;
+ if (!vm) {
+ spin_unlock(&vmap_area_lock);
return false;
+ }
+ addr = (unsigned long)vm->addr;
+ caller = vm->caller;
+ nr_pages = vm->nr_pages;
+ spin_unlock(&vmap_area_lock);
pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
- vm->nr_pages, (unsigned long)vm->addr, vm->caller);
+ nr_pages, addr, caller);
return true;
}
#endif
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 76518e4166dc..383e0463c025 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -244,6 +244,14 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
if (mem_cgroup_disabled())
return;
+ /*
+ * The in-kernel users only care about the reclaim efficiency
+ * for this @memcg rather than the whole subtree, and there
+ * isn't and won't be any in-kernel user in a legacy cgroup.
+ */
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !tree)
+ return;
+
vmpr = memcg_to_vmpressure(memcg);
/*