diff options
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 33 |
1 files changed, 21 insertions, 12 deletions
diff --git a/mm/swap.c b/mm/swap.c index a91caf754d9b..4c30605c6a69 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -31,6 +31,7 @@ #include <linux/backing-dev.h> #include <linux/memcontrol.h> #include <linux/gfp.h> +#include <linux/locallock.h> #include "internal.h" @@ -41,6 +42,9 @@ static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); +static DEFINE_LOCAL_IRQ_LOCK(swap_lock); + /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. @@ -267,11 +271,11 @@ void rotate_reclaimable_page(struct page *page) unsigned long flags; page_cache_get(page); - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } } @@ -327,12 +331,13 @@ static void activate_page_drain(int cpu) void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); + struct pagevec *pvec = &get_locked_var(swap_lock, + activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); - put_cpu_var(activate_page_pvecs); + put_locked_var(swap_lock, activate_page_pvecs); } } @@ -373,12 +378,12 @@ EXPORT_SYMBOL(mark_page_accessed); void __lru_cache_add(struct page *page, enum lru_list lru) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; + struct pagevec *pvec = &get_locked_var(swap_lock, lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page)) ____pagevec_lru_add(pvec, lru); - put_cpu_var(lru_add_pvecs); + put_locked_var(swap_lock, lru_add_pvecs); } EXPORT_SYMBOL(__lru_cache_add); @@ -512,9 +517,9 @@ static void drain_cpu_pagevecs(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } pvec = &per_cpu(lru_deactivate_pvecs, cpu); @@ -542,18 +547,19 @@ void deactivate_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { - struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); + struct pagevec *pvec = &get_locked_var(swap_lock, + lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); - put_cpu_var(lru_deactivate_pvecs); + put_locked_var(swap_lock, lru_deactivate_pvecs); } } void lru_add_drain(void) { - drain_cpu_pagevecs(get_cpu()); - put_cpu(); + drain_cpu_pagevecs(local_lock_cpu(swap_lock)); + local_unlock_cpu(swap_lock); } static void lru_add_drain_per_cpu(struct work_struct *dummy) @@ -783,6 +789,9 @@ void __init swap_setup(void) { unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); + local_irq_lock_init(rotate_lock); + local_irq_lock_init(swap_lock); + #ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); #endif |