summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-11-05 16:53:31 -0800
committerAndrew Morton <akpm@linux-foundation.org>2024-11-05 16:53:31 -0800
commit48901e9d625232e2b10e5a5abb98fa48250f4a8f (patch)
tree5ede389e5538fe04df42fcf4835d8954b5419939 /include
parent59b723cd2adbac2a34fc8e12c74ae26ae45bf230 (diff)
parent5de195060b2e251a835f622759550e6202167641 (diff)
Merge branch 'mm-hotfixes-stable' into mm-stable.
Pick these into mm-stable: 5de195060b2e mm: resolve faulty mmap_region() error path behaviour 5baf8b037deb mm: refactor arch_calc_vm_flag_bits() and arm64 MTE handling 0fb4a7ad270b mm: refactor map_deny_write_exec() 4080ef1579b2 mm: unconditionally close VMAs on error 3dd6ed34ce1f mm: avoid unsafe VMA hook invocation when error arises on mmap hook f8f931bba0f9 mm/thp: fix deferred split unqueue naming and locking e66f3185fa04 mm/thp: fix deferred split queue not partially_mapped to get a clean merge of these from mm-unstable into mm-stable: Subject: memcg-v1: fully deprecate move_charge_at_immigrate Subject: memcg-v1: remove charge move code Subject: memcg-v1: no need for memcg locking for dirty tracking Subject: memcg-v1: no need for memcg locking for writeback tracking Subject: memcg-v1: no need for memcg locking for MGLRU Subject: memcg-v1: remove memcg move locking code Subject: tools: testing: add additional vma_internal.h stubs Subject: mm: isolate mmap internal logic to mm/vma.c Subject: mm: refactor __mmap_region() Subject: mm: remove unnecessary reset state logic on merge new VMA Subject: mm: defer second attempt at merge on mmap() Subject: mm/vma: the pgoff is correct if can_merge_right Subject: memcg: workingset: remove folio_memcg_rcu usage
Diffstat (limited to 'include')
-rw-r--r--include/linux/mman.h28
1 files changed, 22 insertions, 6 deletions
diff --git a/include/linux/mman.h b/include/linux/mman.h
index bcb201ab7a41..a842783ffa62 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_MMAN_H
#define _LINUX_MMAN_H
+#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/percpu_counter.h>
@@ -94,7 +95,7 @@ static inline void vm_unacct_memory(long pages)
#endif
#ifndef arch_calc_vm_flag_bits
-#define arch_calc_vm_flag_bits(flags) 0
+#define arch_calc_vm_flag_bits(file, flags) 0
#endif
#ifndef arch_validate_prot
@@ -151,13 +152,13 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
* Combine the mmap "flags" argument into "vm_flags" used internally.
*/
static inline unsigned long
-calc_vm_flag_bits(unsigned long flags)
+calc_vm_flag_bits(struct file *file, unsigned long flags)
{
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
_calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
- arch_calc_vm_flag_bits(flags);
+ arch_calc_vm_flag_bits(file, flags);
}
unsigned long vm_commit_limit(void);
@@ -188,16 +189,31 @@ static inline bool arch_memory_deny_write_exec_supported(void)
*
* d) mmap(PROT_READ | PROT_EXEC)
* mmap(PROT_READ | PROT_EXEC | PROT_BTI)
+ *
+ * This is only applicable if the user has set the Memory-Deny-Write-Execute
+ * (MDWE) protection mask for the current process.
+ *
+ * @old specifies the VMA flags the VMA originally possessed, and @new the ones
+ * we propose to set.
+ *
+ * Return: false if proposed change is OK, true if not ok and should be denied.
*/
-static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags)
+static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
{
+ /* If MDWE is disabled, we have nothing to deny. */
if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
return false;
- if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE))
+ /* If the new VMA is not executable, we have nothing to deny. */
+ if (!(new & VM_EXEC))
+ return false;
+
+ /* Under MDWE we do not accept newly writably executable VMAs... */
+ if (new & VM_WRITE)
return true;
- if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
+ /* ...nor previously non-executable VMAs becoming executable. */
+ if (!(old & VM_EXEC))
return true;
return false;