summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-12-14 17:59:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 08:53:20 -0800
commite9995ef978a7d5296fe04a9a2c5ca6e66d8bb4e5 (patch)
treedf4324273856e06b8277b7e4a0fa9289eb8e6385 /include/linux
parent407f9c8b0889ced1dbe2f9157e4e60c61329d5c9 (diff)
ksm: rmap_walk to remove_migation_ptes
A side-effect of making ksm pages swappable is that they have to be placed on the LRUs: which then exposes them to isolate_lru_page() and hence to page migration. Add rmap_walk() for remove_migration_ptes() to use: rmap_walk_anon() and rmap_walk_file() in rmap.c, but rmap_walk_ksm() in ksm.c. Perhaps some consolidation with existing code is possible, but don't attempt that yet (try_to_unmap needs to handle nonlinears, but migration pte removal does not). rmap_walk() is sadly less general than it appears: rmap_walk_anon(), like remove_anon_migration_ptes() which it replaces, avoids calling page_lock_anon_vma(), because that includes a page_mapped() test which fails when all migration ptes are in place. That was valid when NUMA page migration was introduced (holding mmap_sem provided the missing guarantee that anon_vma's slab had not already been destroyed), but I believe not valid in the memory hotremove case added since. For now do the same as before, and consider the best way to fix that unlikely race later on. When fixed, we can probably use rmap_walk() on hwpoisoned ksm pages too: for now, they remain among hwpoison's various exceptions (its PageKsm test comes before the page is locked, but its page_lock_anon_vma fails safely if an anon gets upgraded). Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Chris Wright <chrisw@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ksm.h13
-rw-r--r--include/linux/rmap.h6
2 files changed, 19 insertions, 0 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 157d83dbaef8..bed5f16ba827 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -88,6 +88,9 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
int page_referenced_ksm(struct page *page,
struct mem_cgroup *memcg, unsigned long *vm_flags);
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+void ksm_migrate_page(struct page *newpage, struct page *oldpage);
#else /* !CONFIG_KSM */
@@ -127,6 +130,16 @@ static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
{
return 0;
}
+
+static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ return 0;
+}
+
+static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+}
#endif /* !CONFIG_KSM */
#endif /* __LINUX_KSM_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 980094a527ee..b019ae64e2ab 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -164,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page);
void page_unlock_anon_vma(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+/*
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)