From 24139c07f413ef4b555482c758343d71392a19bc Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Sat, 22 Apr 2023 22:54:18 +0200 Subject: mm/ksm: unmerge and clear VM_MERGEABLE when setting PR_SET_MEMORY_MERGE=0 Patch series "mm/ksm: improve PR_SET_MEMORY_MERGE=0 handling and cleanup disabling KSM", v2. (1) Make PR_SET_MEMORY_MERGE=0 unmerge pages like setting MADV_UNMERGEABLE does, (2) add a selftest for it and (3) factor out disabling of KSM from s390/gmap code. This patch (of 3): Let's unmerge any KSM pages when setting PR_SET_MEMORY_MERGE=0, and clear the VM_MERGEABLE flag from all VMAs -- just like KSM would. Of course, only do that if we previously set PR_SET_MEMORY_MERGE=1. Link: https://lkml.kernel.org/r/20230422205420.30372-1-david@redhat.com Link: https://lkml.kernel.org/r/20230422205420.30372-2-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Stefan Roesch Cc: Christian Borntraeger Cc: Claudio Imbrenda Cc: Heiko Carstens Cc: Janosch Frank Cc: Johannes Weiner Cc: Michal Hocko Cc: Rik van Riel Cc: Shuah Khan Cc: Sven Schnelle Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- include/linux/ksm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 7a9b76fb6c3f..429efa6ff4ae 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -21,6 +21,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, void ksm_add_vma(struct vm_area_struct *vma); int ksm_enable_merge_any(struct mm_struct *mm); +int ksm_disable_merge_any(struct mm_struct *mm); int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); -- cgit v1.2.3 From 2c281f54f556e1f3266c8cb104adf9eea7a7b742 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Sat, 22 Apr 2023 23:01:56 +0200 Subject: mm/ksm: move disabling KSM from s390/gmap code to KSM code Let's factor out actual disabling of KSM. The existing "mm->def_flags &= ~VM_MERGEABLE;" was essentially a NOP and can be dropped, because def_flags should never include VM_MERGEABLE. Note that we don't currently prevent re-enabling KSM. This should now be faster in case KSM was never enabled, because we only conditionally iterate all VMAs. Further, it certainly looks cleaner. Link: https://lkml.kernel.org/r/20230422210156.33630-1-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Janosch Frank Acked-by: Stefan Roesch Cc: Christian Borntraeger Cc: Claudio Imbrenda Cc: Heiko Carstens Cc: Johannes Weiner Cc: Michal Hocko Cc: Rik van Riel Cc: Shuah Khan Cc: Sven Schnelle Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- include/linux/ksm.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 429efa6ff4ae..899a314bc487 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -22,6 +22,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, void ksm_add_vma(struct vm_area_struct *vma); int ksm_enable_merge_any(struct mm_struct *mm); int ksm_disable_merge_any(struct mm_struct *mm); +int ksm_disable(struct mm_struct *mm); int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); @@ -80,6 +81,11 @@ static inline void ksm_add_vma(struct vm_area_struct *vma) { } +static inline int ksm_disable(struct mm_struct *mm) +{ + return 0; +} + static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { return 0; -- cgit v1.2.3 From 245f0922689364b21163af4937a05ea0ba576fae Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 17 Apr 2023 12:53:23 +0800 Subject: mm: hwpoison: coredump: support recovery from dump_user_range() dump_user_range() is used to copy the user page to a coredump file, but if a hardware memory error occurred during copy, which called from __kernel_write_iter() in dump_user_range(), it crashes, CPU: 112 PID: 7014 Comm: mca-recover Not tainted 6.3.0-rc2 #425 pc : __memcpy+0x110/0x260 lr : _copy_from_iter+0x3bc/0x4c8 ... Call trace: __memcpy+0x110/0x260 copy_page_from_iter+0xcc/0x130 pipe_write+0x164/0x6d8 __kernel_write_iter+0x9c/0x210 dump_user_range+0xc8/0x1d8 elf_core_dump+0x308/0x368 do_coredump+0x2e8/0xa40 get_signal+0x59c/0x788 do_signal+0x118/0x1f8 do_notify_resume+0xf0/0x280 el0_da+0x130/0x138 el0t_64_sync_handler+0x68/0xc0 el0t_64_sync+0x188/0x190 Generally, the '->write_iter' of file ops will use copy_page_from_iter() and copy_page_from_iter_atomic(), change memcpy() to copy_mc_to_kernel() in both of them to handle #MC during source read, which stop coredump processing and kill the task instead of kernel panic, but the source address may not always a user address, so introduce a new copy_mc flag in struct iov_iter{} to indicate that the iter could do a safe memory copy, also introduce the helpers to set/cleck the flag, for now, it's only used in coredump's dump_user_range(), but it could expand to any other scenarios to fix the similar issue. Link: https://lkml.kernel.org/r/20230417045323.11054-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: Alexander Viro Cc: Christian Brauner Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Tong Tiangen Cc: Jens Axboe Signed-off-by: Andrew Morton --- include/linux/uio.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include') diff --git a/include/linux/uio.h b/include/linux/uio.h index 3d386849a758..044c1d8c230c 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -42,6 +42,7 @@ struct iov_iter_state { struct iov_iter { u8 iter_type; + bool copy_mc; bool nofault; bool data_source; bool user_backed; @@ -256,8 +257,22 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); #ifdef CONFIG_ARCH_HAS_COPY_MC size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); +static inline void iov_iter_set_copy_mc(struct iov_iter *i) +{ + i->copy_mc = true; +} + +static inline bool iov_iter_is_copy_mc(const struct iov_iter *i) +{ + return i->copy_mc; +} #else #define _copy_mc_to_iter _copy_to_iter +static inline void iov_iter_set_copy_mc(struct iov_iter *i) { } +static inline bool iov_iter_is_copy_mc(const struct iov_iter *i) +{ + return false; +} #endif size_t iov_iter_zero(size_t bytes, struct iov_iter *); @@ -380,6 +395,7 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) { .iter_type = ITER_UBUF, + .copy_mc = false, .user_backed = true, .data_source = direction, .ubuf = buf, -- cgit v1.2.3