diff options
author | Cyrill Gorcunov <gorcunov@gmail.com> | 2013-08-27 12:37:18 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-27 09:36:17 -0700 |
commit | 6dec97dc92946eb479e6ebb54a61f8226cceefec (patch) | |
tree | 7065d6d5f27b19ce4773460d50c1502610848f64 /mm | |
parent | 9b506833210fdaea2cd00e39368805e28f49492d (diff) |
mm: move_ptes -- Set soft dirty bit depending on pte type
Dave reported corrupted swap entries
| [ 4588.541886] swap_free: Unused swap offset entry 00002d15
| [ 4588.541952] BUG: Bad page map in process trinity-kid12 pte:005a2a80 pmd:22c01f067
and Hugh pointed that in move_ptes _PAGE_SOFT_DIRTY bit set regardless
the type of entry pte consists of. The trick here is that when we carry
soft dirty status in swap entries we are to use _PAGE_SWP_SOFT_DIRTY
instead, because this is the only place in pte which can be used for own
needs without intersecting with bits owned by swap entry type/offset.
Reported-and-tested-by: Dave Jones <davej@redhat.com>
Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@parallels.com>
Analyzed-by: Hugh Dickins <hughd@google.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mremap.c | 21 |
1 files changed, 20 insertions, 1 deletions
diff --git a/mm/mremap.c b/mm/mremap.c index 457d34ef3bf2..0843feb66f3d 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -15,6 +15,7 @@ #include <linux/swap.h> #include <linux/capability.h> #include <linux/fs.h> +#include <linux/swapops.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> @@ -69,6 +70,23 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, return pmd; } +static pte_t move_soft_dirty_pte(pte_t pte) +{ + /* + * Set soft dirty bit so we can notice + * in userspace the ptes were moved. + */ +#ifdef CONFIG_MEM_SOFT_DIRTY + if (pte_present(pte)) + pte = pte_mksoft_dirty(pte); + else if (is_swap_pte(pte)) + pte = pte_swp_mksoft_dirty(pte); + else if (pte_file(pte)) + pte = pte_file_mksoft_dirty(pte); +#endif + return pte; +} + static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, @@ -126,7 +144,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, continue; pte = ptep_get_and_clear(mm, old_addr, old_pte); pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); - set_pte_at(mm, new_addr, new_pte, pte_mksoft_dirty(pte)); + pte = move_soft_dirty_pte(pte); + set_pte_at(mm, new_addr, new_pte, pte); } arch_leave_lazy_mmu_mode(); |