diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-18 20:50:32 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-18 20:50:32 -0800 |
| commit | eeccf287a2a517954b57cf9d733b3cf5d47afa34 (patch) | |
| tree | 46b5cd55d8da25cbc9aa96b38470506958851005 /fs | |
| parent | 956b9cbd7f156c8672dac94a00de3c6a0939c692 (diff) | |
| parent | ac1ea219590c09572ed5992dc233bbf7bb70fef9 (diff) | |
Merge tag 'mm-stable-2026-02-18-19-48' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more MM updates from Andrew Morton:
- "mm/vmscan: fix demotion targets checks in reclaim/demotion" fixes a
couple of issues in the demotion code - pages were failed demotion
and were finding themselves demoted into disallowed nodes (Bing Jiao)
- "Remove XA_ZERO from error recovery of dup_mmap()" fixes a rare
mapledtree race and performs a number of cleanups (Liam Howlett)
- "mm: add bitmap VMA flag helpers and convert all mmap_prepare to use
them" implements a lot of cleanups following on from the conversion
of the VMA flags into a bitmap (Lorenzo Stoakes)
- "support batch checking of references and unmapping for large folios"
implements batching to greatly improve the performance of reclaiming
clean file-backed large folios (Baolin Wang)
- "selftests/mm: add memory failure selftests" does as claimed (Miaohe
Lin)
* tag 'mm-stable-2026-02-18-19-48' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (36 commits)
mm/page_alloc: clear page->private in free_pages_prepare()
selftests/mm: add memory failure dirty pagecache test
selftests/mm: add memory failure clean pagecache test
selftests/mm: add memory failure anonymous page test
mm: rmap: support batched unmapping for file large folios
arm64: mm: implement the architecture-specific clear_flush_young_ptes()
arm64: mm: support batch clearing of the young flag for large folios
arm64: mm: factor out the address and ptep alignment into a new helper
mm: rmap: support batched checks of the references for large folios
tools/testing/vma: add VMA userland tests for VMA flag functions
tools/testing/vma: separate out vma_internal.h into logical headers
tools/testing/vma: separate VMA userland tests into separate files
mm: make vm_area_desc utilise vma_flags_t only
mm: update all remaining mmap_prepare users to use vma_flags_t
mm: update shmem_[kernel]_file_*() functions to use vma_flags_t
mm: update secretmem to use VMA flags on mmap_prepare
mm: update hugetlbfs to use VMA flags on mmap_prepare
mm: add basic VMA flag operation helper functions
tools: bitmap: add missing bitmap_[subset(), andnot()]
mm: add mk_vma_flags() bitmap flag macro helper
...
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/aio.c | 2 | ||||
| -rw-r--r-- | fs/erofs/data.c | 5 | ||||
| -rw-r--r-- | fs/ext4/file.c | 4 | ||||
| -rw-r--r-- | fs/hugetlbfs/inode.c | 14 | ||||
| -rw-r--r-- | fs/ntfs3/file.c | 2 | ||||
| -rw-r--r-- | fs/orangefs/file.c | 4 | ||||
| -rw-r--r-- | fs/ramfs/file-nommu.c | 2 | ||||
| -rw-r--r-- | fs/resctrl/pseudo_lock.c | 2 | ||||
| -rw-r--r-- | fs/romfs/mmap-nommu.c | 2 | ||||
| -rw-r--r-- | fs/xfs/scrub/xfile.c | 3 | ||||
| -rw-r--r-- | fs/xfs/xfs_buf_mem.c | 2 | ||||
| -rw-r--r-- | fs/xfs/xfs_file.c | 4 | ||||
| -rw-r--r-- | fs/zonefs/file.c | 3 |
13 files changed, 26 insertions, 23 deletions
@@ -394,7 +394,7 @@ static const struct vm_operations_struct aio_ring_vm_ops = { static int aio_ring_mmap_prepare(struct vm_area_desc *desc) { - desc->vm_flags |= VM_DONTEXPAND; + vma_desc_set_flags(desc, VMA_DONTEXPAND_BIT); desc->vm_ops = &aio_ring_vm_ops; return 0; } diff --git a/fs/erofs/data.c b/fs/erofs/data.c index a28084ef796b..f79ee80627d9 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -473,11 +473,12 @@ static int erofs_file_mmap_prepare(struct vm_area_desc *desc) if (!IS_DAX(file_inode(desc->file))) return generic_file_readonly_mmap_prepare(desc); - if ((desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE)) + if (vma_desc_test_flags(desc, VMA_SHARED_BIT) && + vma_desc_test_flags(desc, VMA_MAYWRITE_BIT)) return -EINVAL; desc->vm_ops = &erofs_dax_vm_ops; - desc->vm_flags |= VM_HUGEPAGE; + vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT); return 0; } #else diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 4320ebff74f3..f1dc5ce791a7 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -818,13 +818,13 @@ static int ext4_file_mmap_prepare(struct vm_area_desc *desc) * We don't support synchronous mappings for non-DAX files and * for DAX files if underneath dax_device is not synchronous. */ - if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), dax_dev)) + if (!daxdev_mapping_supported(desc, file_inode(file), dax_dev)) return -EOPNOTSUPP; file_accessed(file); if (IS_DAX(file_inode(file))) { desc->vm_ops = &ext4_dax_vm_ops; - desc->vm_flags |= VM_HUGEPAGE; + vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT); } else { desc->vm_ops = &ext4_file_vm_ops; } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 3b4c152c5c73..95a5b23b4808 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -109,7 +109,7 @@ static int hugetlbfs_file_mmap_prepare(struct vm_area_desc *desc) loff_t len, vma_len; int ret; struct hstate *h = hstate_file(file); - vm_flags_t vm_flags; + vma_flags_t vma_flags; /* * vma address alignment (but not the pgoff alignment) has @@ -119,7 +119,7 @@ static int hugetlbfs_file_mmap_prepare(struct vm_area_desc *desc) * way when do_mmap unwinds (may be important on powerpc * and ia64). */ - desc->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; + vma_desc_set_flags(desc, VMA_HUGETLB_BIT, VMA_DONTEXPAND_BIT); desc->vm_ops = &hugetlb_vm_ops; /* @@ -148,23 +148,23 @@ static int hugetlbfs_file_mmap_prepare(struct vm_area_desc *desc) ret = -ENOMEM; - vm_flags = desc->vm_flags; + vma_flags = desc->vma_flags; /* * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip * reserving here. Note: only for SHM hugetlbfs file, the inode * flag S_PRIVATE is set. */ if (inode->i_flags & S_PRIVATE) - vm_flags |= VM_NORESERVE; + vma_flags_set(&vma_flags, VMA_NORESERVE_BIT); if (hugetlb_reserve_pages(inode, desc->pgoff >> huge_page_order(h), len >> huge_page_shift(h), desc, - vm_flags) < 0) + vma_flags) < 0) goto out; ret = 0; - if ((desc->vm_flags & VM_WRITE) && inode->i_size < len) + if (vma_desc_test_flags(desc, VMA_WRITE_BIT) && inode->i_size < len) i_size_write(inode, len); out: inode_unlock(inode); @@ -1527,7 +1527,7 @@ static int get_hstate_idx(int page_size_log) * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. */ struct file *hugetlb_file_setup(const char *name, size_t size, - vm_flags_t acctflag, int creat_flags, + vma_flags_t acctflag, int creat_flags, int page_size_log) { struct inode *inode; diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index ae8c47cac406..f53037e0ecb6 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -276,7 +276,7 @@ static int ntfs_file_mmap_prepare(struct vm_area_desc *desc) struct file *file = desc->file; struct inode *inode = file_inode(file); struct ntfs_inode *ni = ntfs_i(inode); - bool rw = desc->vm_flags & VM_WRITE; + const bool rw = vma_desc_test_flags(desc, VMA_WRITE_BIT); int err; /* Avoid any operation if inode is bad. */ diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c index afd610a3fc68..42591252e239 100644 --- a/fs/orangefs/file.c +++ b/fs/orangefs/file.c @@ -411,8 +411,8 @@ static int orangefs_file_mmap_prepare(struct vm_area_desc *desc) "orangefs_file_mmap: called on %pD\n", file); /* set the sequential readahead hint */ - desc->vm_flags |= VM_SEQ_READ; - desc->vm_flags &= ~VM_RAND_READ; + vma_desc_set_flags(desc, VMA_SEQ_READ_BIT); + vma_desc_clear_flags(desc, VMA_RAND_READ_BIT); file_accessed(file); desc->vm_ops = &orangefs_file_vm_ops; diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 77b8ca2757e0..0f8e838ece07 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -264,7 +264,7 @@ out: */ static int ramfs_nommu_mmap_prepare(struct vm_area_desc *desc) { - if (!is_nommu_shared_mapping(desc->vm_flags)) + if (!is_nommu_shared_vma_flags(&desc->vma_flags)) return -ENOSYS; file_accessed(desc->file); diff --git a/fs/resctrl/pseudo_lock.c b/fs/resctrl/pseudo_lock.c index 0bfc13c5b96d..e81d71abfe54 100644 --- a/fs/resctrl/pseudo_lock.c +++ b/fs/resctrl/pseudo_lock.c @@ -1044,7 +1044,7 @@ static int pseudo_lock_dev_mmap_prepare(struct vm_area_desc *desc) * Ensure changes are carried directly to the memory being mapped, * do not allow copy-on-write mapping. */ - if (!(desc->vm_flags & VM_SHARED)) { + if (!vma_desc_test_flags(desc, VMA_SHARED_BIT)) { mutex_unlock(&rdtgroup_mutex); return -EINVAL; } diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c index 4b77c6dc4418..7c3a1a7fecee 100644 --- a/fs/romfs/mmap-nommu.c +++ b/fs/romfs/mmap-nommu.c @@ -63,7 +63,7 @@ static unsigned long romfs_get_unmapped_area(struct file *file, */ static int romfs_mmap_prepare(struct vm_area_desc *desc) { - return is_nommu_shared_mapping(desc->vm_flags) ? 0 : -ENOSYS; + return is_nommu_shared_vma_flags(&desc->vma_flags) ? 0 : -ENOSYS; } static unsigned romfs_mmap_capabilities(struct file *file) diff --git a/fs/xfs/scrub/xfile.c b/fs/xfs/scrub/xfile.c index 2998c9b62f4b..bee0662fbdb6 100644 --- a/fs/xfs/scrub/xfile.c +++ b/fs/xfs/scrub/xfile.c @@ -61,7 +61,8 @@ xfile_create( if (!xf) return -ENOMEM; - xf->file = shmem_kernel_file_setup(description, isize, VM_NORESERVE); + xf->file = shmem_kernel_file_setup(description, isize, + mk_vma_flags(VMA_NORESERVE_BIT)); if (IS_ERR(xf->file)) { error = PTR_ERR(xf->file); goto out_xfile; diff --git a/fs/xfs/xfs_buf_mem.c b/fs/xfs/xfs_buf_mem.c index 0106da0a9f44..f1f23623e4a4 100644 --- a/fs/xfs/xfs_buf_mem.c +++ b/fs/xfs/xfs_buf_mem.c @@ -62,7 +62,7 @@ xmbuf_alloc( if (!btp) return -ENOMEM; - file = shmem_kernel_file_setup(descr, 0, 0); + file = shmem_kernel_file_setup(descr, 0, EMPTY_VMA_FLAGS); if (IS_ERR(file)) { error = PTR_ERR(file); goto out_free_btp; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 43d088a3bceb..6246f34df9fd 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -2010,14 +2010,14 @@ xfs_file_mmap_prepare( * We don't support synchronous mappings for non-DAX files and * for DAX files if underneath dax_device is not synchronous. */ - if (!daxdev_mapping_supported(desc->vm_flags, file_inode(file), + if (!daxdev_mapping_supported(desc, file_inode(file), target->bt_daxdev)) return -EOPNOTSUPP; file_accessed(file); desc->vm_ops = &xfs_file_vm_ops; if (IS_DAX(inode)) - desc->vm_flags |= VM_HUGEPAGE; + vma_desc_set_flags(desc, VMA_HUGEPAGE_BIT); return 0; } diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c index c1e5e30e90a0..8a7161fc49e5 100644 --- a/fs/zonefs/file.c +++ b/fs/zonefs/file.c @@ -333,7 +333,8 @@ static int zonefs_file_mmap_prepare(struct vm_area_desc *desc) * ordering between msync() and page cache writeback. */ if (zonefs_inode_is_seq(file_inode(file)) && - (desc->vm_flags & VM_SHARED) && (desc->vm_flags & VM_MAYWRITE)) + vma_desc_test_flags(desc, VMA_SHARED_BIT) && + vma_desc_test_flags(desc, VMA_MAYWRITE_BIT)) return -EINVAL; file_accessed(file); |
