diff options
-rw-r--r-- | include/linux/page_cgroup.h | 28 | ||||
-rw-r--r-- | mm/page_cgroup.c | 1 |
2 files changed, 29 insertions, 0 deletions
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 961ecc7d30bc..2927c084ff50 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -30,6 +30,10 @@ enum { */ struct page_cgroup { unsigned long flags; +#ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t pcg_lock; + spinlock_t pcm_lock; +#endif struct mem_cgroup *mem_cgroup; struct list_head lru; /* per cgroup LRU list */ }; @@ -96,30 +100,54 @@ static inline void lock_page_cgroup(struct page_cgroup *pc) * Don't take this lock in IRQ context. * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION */ +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(PCG_LOCK, &pc->flags); +#else + spin_lock(&pc->pcg_lock); +#endif } static inline void unlock_page_cgroup(struct page_cgroup *pc) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(PCG_LOCK, &pc->flags); +#else + spin_unlock(&pc->pcg_lock); +#endif } static inline void move_lock_page_cgroup(struct page_cgroup *pc, unsigned long *flags) { +#ifndef CONFIG_PREEMPT_RT_BASE /* * We know updates to pc->flags of page cache's stats are from both of * usual context or IRQ context. Disable IRQ to avoid deadlock. */ local_irq_save(*flags); bit_spin_lock(PCG_MOVE_LOCK, &pc->flags); +#else + spin_lock_irqsave(&pc->pcm_lock, *flags); +#endif } static inline void move_unlock_page_cgroup(struct page_cgroup *pc, unsigned long *flags) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags); local_irq_restore(*flags); +#else + spin_unlock_irqrestore(&pc->pcm_lock, *flags); +#endif +} + +static inline void page_cgroup_lock_init(struct page_cgroup *pc) +{ +#ifdef CONFIG_PREEMPT_RT_BASE + spin_lock_init(&pc->pcg_lock); + spin_lock_init(&pc->pcm_lock); +#endif } #ifdef CONFIG_SPARSEMEM diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 2d123f94a8df..2e0d18d8e432 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -17,6 +17,7 @@ static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id) set_page_cgroup_array_id(pc, id); pc->mem_cgroup = NULL; INIT_LIST_HEAD(&pc->lru); + page_cgroup_lock_init(pc); } static unsigned long total_usage; |