From ebdf8321eeeb623aed60f7ed16f7445363230118 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 4 Sep 2020 16:35:58 -0700 Subject: mm/migrate: fixup setting UFFD_WP flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit f45ec5ff16a75 ("userfaultfd: wp: support swap and page migration") introduced support for tracking the uffd wp bit during page migration. However the non-swap PTE variant was used to set the flag for zone device private pages which are a type of swap page. This leads to corruption of the swap offset if the original PTE has the uffd_wp flag set. Fixes: f45ec5ff16a75 ("userfaultfd: wp: support swap and page migration") Signed-off-by: Alistair Popple Signed-off-by: Andrew Morton Reviewed-by: Peter Xu Cc: Jérôme Glisse Cc: John Hubbard Cc: Ralph Campbell Link: https://lkml.kernel.org/r/20200825064232.10023-1-alistair@popple.id.au Signed-off-by: Linus Torvalds --- mm/migrate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/migrate.c') diff --git a/mm/migrate.c b/mm/migrate.c index 34a842a8eb6a..ddb64253fe3e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -251,7 +251,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, entry = make_device_private_entry(new, pte_write(pte)); pte = swp_entry_to_pte(entry); if (pte_swp_uffd_wp(*pvmw.pte)) - pte = pte_mkuffd_wp(pte); + pte = pte_swp_mkuffd_wp(pte); } } -- cgit v1.2.3 From ad7df764b7e1c7dc64e016da7ada2e3e1bb90700 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 4 Sep 2020 16:36:01 -0700 Subject: mm/rmap: fixup copying of soft dirty and uffd ptes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During memory migration a pte is temporarily replaced with a migration swap pte. Some pte bits from the existing mapping such as the soft-dirty and uffd write-protect bits are preserved by copying these to the temporary migration swap pte. However these bits are not stored at the same location for swap and non-swap ptes. Therefore testing these bits requires using the appropriate helper function for the given pte type. Unfortunately several code locations were found where the wrong helper function is being used to test soft_dirty and uffd_wp bits which leads to them getting incorrectly set or cleared during page-migration. Fix these by using the correct tests based on pte type. Fixes: a5430dda8a3a ("mm/migrate: support un-addressable ZONE_DEVICE page in migration") Fixes: 8c3328f1f36a ("mm/migrate: migrate_vma() unmap page from vma while collecting pages") Fixes: f45ec5ff16a7 ("userfaultfd: wp: support swap and page migration") Signed-off-by: Alistair Popple Signed-off-by: Andrew Morton Reviewed-by: Peter Xu Cc: Jérôme Glisse Cc: John Hubbard Cc: Ralph Campbell Cc: Alistair Popple Cc: Link: https://lkml.kernel.org/r/20200825064232.10023-2-alistair@popple.id.au Signed-off-by: Linus Torvalds --- mm/migrate.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'mm/migrate.c') diff --git a/mm/migrate.c b/mm/migrate.c index ddb64253fe3e..12f63806d0ac 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2427,10 +2427,17 @@ again: entry = make_migration_entry(page, mpfn & MIGRATE_PFN_WRITE); swp_pte = swp_entry_to_pte(entry); - if (pte_soft_dirty(pte)) - swp_pte = pte_swp_mksoft_dirty(swp_pte); - if (pte_uffd_wp(pte)) - swp_pte = pte_swp_mkuffd_wp(swp_pte); + if (pte_present(pte)) { + if (pte_soft_dirty(pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pte)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } else { + if (pte_swp_soft_dirty(pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_swp_uffd_wp(pte)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } set_pte_at(mm, addr, ptep, swp_pte); /* -- cgit v1.2.3 From 6128763fc3244d1b4868e5f0aa401f7f987b5c4d Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Fri, 4 Sep 2020 16:36:04 -0700 Subject: mm/migrate: remove unnecessary is_zone_device_page() check Patch series "mm/migrate: preserve soft dirty in remove_migration_pte()". I happened to notice this from code inspection after seeing Alistair Popple's patch ("mm/rmap: Fixup copying of soft dirty and uffd ptes"). This patch (of 2): The check for is_zone_device_page() and is_device_private_page() is unnecessary since the latter is sufficient to determine if the page is a device private page. Simplify the code for easier reading. Signed-off-by: Ralph Campbell Signed-off-by: Andrew Morton Reviewed-by: Christoph Hellwig Cc: Jerome Glisse Cc: Alistair Popple Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Bharata B Rao Link: https://lkml.kernel.org/r/20200831212222.22409-1-rcampbell@nvidia.com Link: https://lkml.kernel.org/r/20200831212222.22409-2-rcampbell@nvidia.com Signed-off-by: Linus Torvalds --- mm/migrate.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'mm/migrate.c') diff --git a/mm/migrate.c b/mm/migrate.c index 12f63806d0ac..1d791d420725 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -246,13 +246,11 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, else if (pte_swp_uffd_wp(*pvmw.pte)) pte = pte_mkuffd_wp(pte); - if (unlikely(is_zone_device_page(new))) { - if (is_device_private_page(new)) { - entry = make_device_private_entry(new, pte_write(pte)); - pte = swp_entry_to_pte(entry); - if (pte_swp_uffd_wp(*pvmw.pte)) - pte = pte_swp_mkuffd_wp(pte); - } + if (unlikely(is_device_private_page(new))) { + entry = make_device_private_entry(new, pte_write(pte)); + pte = swp_entry_to_pte(entry); + if (pte_swp_uffd_wp(*pvmw.pte)) + pte = pte_swp_mkuffd_wp(pte); } #ifdef CONFIG_HUGETLB_PAGE -- cgit v1.2.3 From 3d321bf82c4be8e33261754a5775bc65fc5d2184 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Fri, 4 Sep 2020 16:36:07 -0700 Subject: mm/migrate: preserve soft dirty in remove_migration_pte() The code to remove a migration PTE and replace it with a device private PTE was not copying the soft dirty bit from the migration entry. This could lead to page contents not being marked dirty when faulting the page back from device private memory. Signed-off-by: Ralph Campbell Signed-off-by: Andrew Morton Reviewed-by: Christoph Hellwig Cc: Jerome Glisse Cc: Alistair Popple Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Bharata B Rao Link: https://lkml.kernel.org/r/20200831212222.22409-3-rcampbell@nvidia.com Signed-off-by: Linus Torvalds --- mm/migrate.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm/migrate.c') diff --git a/mm/migrate.c b/mm/migrate.c index 1d791d420725..941b89383cf3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -249,6 +249,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, if (unlikely(is_device_private_page(new))) { entry = make_device_private_entry(new, pte_write(pte)); pte = swp_entry_to_pte(entry); + if (pte_swp_soft_dirty(*pvmw.pte)) + pte = pte_swp_mksoft_dirty(pte); if (pte_swp_uffd_wp(*pvmw.pte)) pte = pte_swp_mkuffd_wp(pte); } -- cgit v1.2.3 From a333e3e73b6648d3bd3ef6b971a59a6363bfcfc5 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 18 Sep 2020 21:20:06 -0700 Subject: mm: migration of hugetlbfs page skip memcg hugetlbfs pages do not participate in memcg: so although they do find most of migrate_page_states() useful, it would be better if they did not call into mem_cgroup_migrate() - where Qian Cai reported that LTP's move_pages12 triggers the warning in Alex Shi's prospective commit "mm/memcg: warning on !memcg after readahead page charged". Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Reviewed-by: Shakeel Butt Acked-by: Johannes Weiner Cc: Alex Shi Cc: Michal Hocko Cc: Mike Kravetz Cc: Qian Cai Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008301359460.5954@eggly.anvils Signed-off-by: Linus Torvalds --- mm/migrate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm/migrate.c') diff --git a/mm/migrate.c b/mm/migrate.c index 941b89383cf3..aecb1433cf3c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -668,7 +668,8 @@ void migrate_page_states(struct page *newpage, struct page *page) copy_page_owner(page, newpage); - mem_cgroup_migrate(page, newpage); + if (!PageHuge(page)) + mem_cgroup_migrate(page, newpage); } EXPORT_SYMBOL(migrate_page_states); -- cgit v1.2.3