summaryrefslogtreecommitdiff
path: root/include/linux/rmap.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-01-13 15:06:28 +0900
committerPaul Mundt <lethal@linux-sh.org>2011-01-13 15:06:28 +0900
commitf43dc23d5ea91fca257be02138a255f02d98e806 (patch)
treeb29722f6e965316e90ac97abf79923ced250dc21 /include/linux/rmap.h
parentf8e53553f452dcbf67cb89c8cba63a1cd6eb4cc0 (diff)
parent4162cf64973df51fc885825bc9ca4d055891c49f (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into common/serial-rework
Conflicts: arch/sh/kernel/cpu/sh2/setup-sh7619.c arch/sh/kernel/cpu/sh2a/setup-mxg.c arch/sh/kernel/cpu/sh2a/setup-sh7201.c arch/sh/kernel/cpu/sh2a/setup-sh7203.c arch/sh/kernel/cpu/sh2a/setup-sh7206.c arch/sh/kernel/cpu/sh3/setup-sh7705.c arch/sh/kernel/cpu/sh3/setup-sh770x.c arch/sh/kernel/cpu/sh3/setup-sh7710.c arch/sh/kernel/cpu/sh3/setup-sh7720.c arch/sh/kernel/cpu/sh4/setup-sh4-202.c arch/sh/kernel/cpu/sh4/setup-sh7750.c arch/sh/kernel/cpu/sh4/setup-sh7760.c arch/sh/kernel/cpu/sh4a/setup-sh7343.c arch/sh/kernel/cpu/sh4a/setup-sh7366.c arch/sh/kernel/cpu/sh4a/setup-sh7722.c arch/sh/kernel/cpu/sh4a/setup-sh7723.c arch/sh/kernel/cpu/sh4a/setup-sh7724.c arch/sh/kernel/cpu/sh4a/setup-sh7763.c arch/sh/kernel/cpu/sh4a/setup-sh7770.c arch/sh/kernel/cpu/sh4a/setup-sh7780.c arch/sh/kernel/cpu/sh4a/setup-sh7785.c arch/sh/kernel/cpu/sh4a/setup-sh7786.c arch/sh/kernel/cpu/sh4a/setup-shx3.c arch/sh/kernel/cpu/sh5/setup-sh5.c drivers/serial/sh-sci.c drivers/serial/sh-sci.h include/linux/serial_sci.h
Diffstat (limited to 'include/linux/rmap.h')
-rw-r--r--include/linux/rmap.h191
1 files changed, 175 insertions, 16 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 216d024f830d..bb83c0da2071 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -25,7 +25,20 @@
* pointing to this anon_vma once its vma list is empty.
*/
struct anon_vma {
+ struct anon_vma *root; /* Root of this anon_vma tree */
spinlock_t lock; /* Serialize access to vma list */
+#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
+
+ /*
+ * The external_refcount is taken by either KSM or page migration
+ * to take a reference to an anon_vma when there is no
+ * guarantee that the vma of page tables will exist for
+ * the duration of the operation. A caller that takes
+ * the reference is responsible for clearing up the
+ * anon_vma if they are the last user on release
+ */
+ atomic_t external_refcount;
+#endif
/*
* NOTE: the LSB of the head.next is set by
* mm_take_all_locks() _after_ taking the above lock. So the
@@ -34,23 +47,96 @@ struct anon_vma {
* is serialized by a system wide lock only visible to
* mm_take_all_locks() (mm_all_locks_mutex).
*/
- struct list_head head; /* List of private "related" vmas */
+ struct list_head head; /* Chain of private "related" vmas */
+};
+
+/*
+ * The copy-on-write semantics of fork mean that an anon_vma
+ * can become associated with multiple processes. Furthermore,
+ * each child process will have its own anon_vma, where new
+ * pages for that process are instantiated.
+ *
+ * This structure allows us to find the anon_vmas associated
+ * with a VMA, or the VMAs associated with an anon_vma.
+ * The "same_vma" list contains the anon_vma_chains linking
+ * all the anon_vmas associated with this VMA.
+ * The "same_anon_vma" list contains the anon_vma_chains
+ * which link all the VMAs associated with this anon_vma.
+ */
+struct anon_vma_chain {
+ struct vm_area_struct *vma;
+ struct anon_vma *anon_vma;
+ struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
+ struct list_head same_anon_vma; /* locked by anon_vma->lock */
};
#ifdef CONFIG_MMU
+#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
+static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
+{
+ atomic_set(&anon_vma->external_refcount, 0);
+}
+
+static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
+{
+ return atomic_read(&anon_vma->external_refcount);
+}
+
+static inline void get_anon_vma(struct anon_vma *anon_vma)
+{
+ atomic_inc(&anon_vma->external_refcount);
+}
+
+void drop_anon_vma(struct anon_vma *);
+#else
+static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
+{
+}
+
+static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
+{
+ return 0;
+}
+
+static inline void get_anon_vma(struct anon_vma *anon_vma)
+{
+}
-static inline void anon_vma_lock(struct vm_area_struct *vma)
+static inline void drop_anon_vma(struct anon_vma *anon_vma)
+{
+}
+#endif /* CONFIG_KSM */
+
+static inline struct anon_vma *page_anon_vma(struct page *page)
+{
+ if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
+ PAGE_MAPPING_ANON)
+ return NULL;
+ return page_rmapping(page);
+}
+
+static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
- spin_lock(&anon_vma->lock);
+ spin_lock(&anon_vma->root->lock);
}
-static inline void anon_vma_unlock(struct vm_area_struct *vma)
+static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
- spin_unlock(&anon_vma->lock);
+ spin_unlock(&anon_vma->root->lock);
+}
+
+static inline void anon_vma_lock(struct anon_vma *anon_vma)
+{
+ spin_lock(&anon_vma->root->lock);
+}
+
+static inline void anon_vma_unlock(struct anon_vma *anon_vma)
+{
+ spin_unlock(&anon_vma->root->lock);
}
/*
@@ -58,41 +144,81 @@ static inline void anon_vma_unlock(struct vm_area_struct *vma)
*/
void anon_vma_init(void); /* create anon_vma_cachep */
int anon_vma_prepare(struct vm_area_struct *);
-void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
-void anon_vma_unlink(struct vm_area_struct *);
-void anon_vma_link(struct vm_area_struct *);
+void unlink_anon_vmas(struct vm_area_struct *);
+int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
+int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);
+void anon_vma_free(struct anon_vma *);
+
+static inline void anon_vma_merge(struct vm_area_struct *vma,
+ struct vm_area_struct *next)
+{
+ VM_BUG_ON(vma->anon_vma != next->anon_vma);
+ unlink_anon_vmas(next);
+}
/*
* rmap interfaces called when adding or removing pte of page
*/
+void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
+ unsigned long, int);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *);
-#ifdef CONFIG_DEBUG_VM
-void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
-#else
-static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
+void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
+ unsigned long);
+void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
+ unsigned long);
+
+static inline void page_dup_rmap(struct page *page)
{
atomic_inc(&page->_mapcount);
}
-#endif
/*
* Called from mm/vmscan.c to handle paging out
*/
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *cnt, unsigned long *vm_flags);
-int try_to_unmap(struct page *, int ignore_refs);
+int page_referenced_one(struct page *, struct vm_area_struct *,
+ unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
+
+enum ttu_flags {
+ TTU_UNMAP = 0, /* unmap mode */
+ TTU_MIGRATION = 1, /* migration mode */
+ TTU_MUNLOCK = 2, /* munlock mode */
+ TTU_ACTION_MASK = 0xff,
+
+ TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
+ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
+ TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+};
+#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
+
+int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap_one(struct page *, struct vm_area_struct *,
+ unsigned long address, enum ttu_flags flags);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
*/
-pte_t *page_check_address(struct page *, struct mm_struct *,
+pte_t *__page_check_address(struct page *, struct mm_struct *,
unsigned long, spinlock_t **, int);
+static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+ unsigned long address,
+ spinlock_t **ptlp, int sync)
+{
+ pte_t *ptep;
+
+ __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
+ ptlp, sync));
+ return ptep;
+}
+
/*
* Used by swapoff to help locate where page is expected in vma.
*/
@@ -112,13 +238,46 @@ int page_mkclean(struct page *);
*/
int try_to_munlock(struct page *);
+/*
+ * Called by memory-failure.c to kill processes.
+ */
+struct anon_vma *__page_lock_anon_vma(struct page *page);
+
+static inline struct anon_vma *page_lock_anon_vma(struct page *page)
+{
+ struct anon_vma *anon_vma;
+
+ __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
+
+ /* (void) is needed to make gcc happy */
+ (void) __cond_lock(&anon_vma->root->lock, anon_vma);
+
+ return anon_vma;
+}
+
+void page_unlock_anon_vma(struct anon_vma *anon_vma);
+int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+
+/*
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
#define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0)
-#define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page)
+static inline int page_referenced(struct page *page, int is_locked,
+ struct mem_cgroup *cnt,
+ unsigned long *vm_flags)
+{
+ *vm_flags = 0;
+ return 0;
+}
+
#define try_to_unmap(page, refs) SWAP_FAIL
static inline int page_mkclean(struct page *page)