diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-07-03 08:30:37 -0500 |
---|---|---|
committer | Clark Williams <williams@redhat.com> | 2012-04-04 09:14:17 -0500 |
commit | 995004b43e63402aaf251ca68f2c84d816594729 (patch) | |
tree | d3c946ce0cfb772b5e8efce6858d72af50cfda85 | |
parent | 577dd3c958134e586ceb606f16de0e200d06948b (diff) |
mm: Prepare decoupling the page fault disabling logic
Add a pagefault_disabled variable to task_struct to allow decoupling
the pagefault-disabled logic from the preempt count.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | include/linux/uaccess.h | 33 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | mm/memory.c | 29 |
4 files changed, 34 insertions, 30 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index ed2b9f95c8ff..bce86f9cf6b7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1435,6 +1435,7 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif + int pagefault_disabled; #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 5ca0951e1855..9414a1b48f5c 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,37 +6,10 @@ /* * These routines enable/disable the pagefault handler in that - * it will not take any locks and go straight to the fixup table. - * - * They have great resemblance to the preempt_disable/enable calls - * and in fact they are identical; this is because currently there is - * no other way to make the pagefault handlers do this. So we do - * disable preemption but we don't necessarily care about that. + * it will not take any MM locks and go straight to the fixup table. */ -static inline void pagefault_disable(void) -{ - inc_preempt_count(); - /* - * make sure to have issued the store before a pagefault - * can hit. - */ - barrier(); -} - -static inline void pagefault_enable(void) -{ - /* - * make sure to issue those last loads/stores before enabling - * the pagefault handler again. - */ - barrier(); - dec_preempt_count(); - /* - * make sure we do.. - */ - barrier(); - preempt_check_resched(); -} +extern void pagefault_disable(void); +extern void pagefault_enable(void); #ifndef ARCH_HAS_NOCACHE_UACCESS diff --git a/kernel/fork.c b/kernel/fork.c index d8aa0c7ffa53..a17bc75049c1 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1196,6 +1196,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->hardirq_context = 0; p->softirq_context = 0; #endif + p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; diff --git a/mm/memory.c b/mm/memory.c index 829d43735402..52e3f6502b50 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3436,6 +3436,35 @@ unlock: return 0; } +void pagefault_disable(void) +{ + inc_preempt_count(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault + * can hit. + */ + barrier(); +} +EXPORT_SYMBOL_GPL(pagefault_disable); + +void pagefault_enable(void) +{ + /* + * make sure to issue those last loads/stores before enabling + * the pagefault handler again. + */ + barrier(); + current->pagefault_disabled--; + dec_preempt_count(); + /* + * make sure we do.. + */ + barrier(); + preempt_check_resched(); +} +EXPORT_SYMBOL_GPL(pagefault_enable); + /* * By the time we get here, we already hold the mm semaphore */ |