summaryrefslogtreecommitdiff
path: root/include/linux/rmap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/rmap.h')
-rw-r--r--include/linux/rmap.h74
1 files changed, 18 insertions, 56 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index e9fd04ca1e51..2148b122779b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -7,7 +7,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/memcontrol.h>
/*
@@ -26,19 +26,16 @@
*/
struct anon_vma {
struct anon_vma *root; /* Root of this anon_vma tree */
- spinlock_t lock; /* Serialize access to vma list */
-#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
-
+ struct mutex mutex; /* Serialize access to vma list */
/*
- * The external_refcount is taken by either KSM or page migration
- * to take a reference to an anon_vma when there is no
+ * The refcount is taken on an anon_vma when there is no
* guarantee that the vma of page tables will exist for
* the duration of the operation. A caller that takes
* the reference is responsible for clearing up the
* anon_vma if they are the last user on release
*/
- atomic_t external_refcount;
-#endif
+ atomic_t refcount;
+
/*
* NOTE: the LSB of the head.next is set by
* mm_take_all_locks() _after_ taking the above lock. So the
@@ -67,46 +64,23 @@ struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
- struct list_head same_anon_vma; /* locked by anon_vma->lock */
+ struct list_head same_anon_vma; /* locked by anon_vma->mutex */
};
#ifdef CONFIG_MMU
-#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
-static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
-{
- atomic_set(&anon_vma->external_refcount, 0);
-}
-
-static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
-{
- return atomic_read(&anon_vma->external_refcount);
-}
-
static inline void get_anon_vma(struct anon_vma *anon_vma)
{
- atomic_inc(&anon_vma->external_refcount);
+ atomic_inc(&anon_vma->refcount);
}
-void drop_anon_vma(struct anon_vma *);
-#else
-static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
-{
-}
+void __put_anon_vma(struct anon_vma *anon_vma);
-static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
+static inline void put_anon_vma(struct anon_vma *anon_vma)
{
- return 0;
+ if (atomic_dec_and_test(&anon_vma->refcount))
+ __put_anon_vma(anon_vma);
}
-static inline void get_anon_vma(struct anon_vma *anon_vma)
-{
-}
-
-static inline void drop_anon_vma(struct anon_vma *anon_vma)
-{
-}
-#endif /* CONFIG_KSM */
-
static inline struct anon_vma *page_anon_vma(struct page *page)
{
if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
@@ -119,24 +93,24 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
- spin_lock(&anon_vma->root->lock);
+ mutex_lock(&anon_vma->root->mutex);
}
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma)
- spin_unlock(&anon_vma->root->lock);
+ mutex_unlock(&anon_vma->root->mutex);
}
static inline void anon_vma_lock(struct anon_vma *anon_vma)
{
- spin_lock(&anon_vma->root->lock);
+ mutex_lock(&anon_vma->root->mutex);
}
static inline void anon_vma_unlock(struct anon_vma *anon_vma)
{
- spin_unlock(&anon_vma->root->lock);
+ mutex_unlock(&anon_vma->root->mutex);
}
/*
@@ -148,7 +122,6 @@ void unlink_anon_vmas(struct vm_area_struct *);
int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);
-void anon_vma_free(struct anon_vma *);
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
@@ -157,6 +130,8 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}
+struct anon_vma *page_get_anon_vma(struct page *page);
+
/*
* rmap interfaces called when adding or removing pte of page
*/
@@ -243,20 +218,7 @@ int try_to_munlock(struct page *);
/*
* Called by memory-failure.c to kill processes.
*/
-struct anon_vma *__page_lock_anon_vma(struct page *page);
-
-static inline struct anon_vma *page_lock_anon_vma(struct page *page)
-{
- struct anon_vma *anon_vma;
-
- __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
-
- /* (void) is needed to make gcc happy */
- (void) __cond_lock(&anon_vma->root->lock, anon_vma);
-
- return anon_vma;
-}
-
+struct anon_vma *page_lock_anon_vma(struct page *page);
void page_unlock_anon_vma(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);