summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-25 18:56:56 +0200
committerIngo Molnar <mingo@elte.hu>2011-09-13 11:11:55 +0200
commitee30a7b2fc072f139dac44826860d2c1f422137c (patch)
tree4f11173879b25cc31822cb0a8d9dbd48395e74bc /kernel
parent07354eb1a74d1e1ece29f8bafe0b46e8c77a95ef (diff)
locking, sched: Annotate thread_group_cputimer as raw
The thread_group_cputimer lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/posix-cpu-timers.c12
-rw-r--r--kernel/sched_stats.h12
2 files changed, 12 insertions, 12 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 58f405b581e7..41440cca55a2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -274,7 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
struct task_cputime sum;
unsigned long flags;
- spin_lock_irqsave(&cputimer->lock, flags);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
if (!cputimer->running) {
cputimer->running = 1;
/*
@@ -287,7 +287,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
update_gt_cputime(&cputimer->cputime, &sum);
}
*times = cputimer->cputime;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
/*
@@ -997,9 +997,9 @@ static void stop_process_timers(struct signal_struct *sig)
struct thread_group_cputimer *cputimer = &sig->cputimer;
unsigned long flags;
- spin_lock_irqsave(&cputimer->lock, flags);
+ raw_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 0;
- spin_unlock_irqrestore(&cputimer->lock, flags);
+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
static u32 onecputick;
@@ -1289,9 +1289,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
if (sig->cputimer.running) {
struct task_cputime group_sample;
- spin_lock(&sig->cputimer.lock);
+ raw_spin_lock(&sig->cputimer.lock);
group_sample = sig->cputimer.cputime;
- spin_unlock(&sig->cputimer.lock);
+ raw_spin_unlock(&sig->cputimer.lock);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 331e01bcd026..87f9e36ea56e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -282,10 +282,10 @@ static inline void account_group_user_time(struct task_struct *tsk,
if (!cputimer->running)
return;
- spin_lock(&cputimer->lock);
+ raw_spin_lock(&cputimer->lock);
cputimer->cputime.utime =
cputime_add(cputimer->cputime.utime, cputime);
- spin_unlock(&cputimer->lock);
+ raw_spin_unlock(&cputimer->lock);
}
/**
@@ -306,10 +306,10 @@ static inline void account_group_system_time(struct task_struct *tsk,
if (!cputimer->running)
return;
- spin_lock(&cputimer->lock);
+ raw_spin_lock(&cputimer->lock);
cputimer->cputime.stime =
cputime_add(cputimer->cputime.stime, cputime);
- spin_unlock(&cputimer->lock);
+ raw_spin_unlock(&cputimer->lock);
}
/**
@@ -330,7 +330,7 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
if (!cputimer->running)
return;
- spin_lock(&cputimer->lock);
+ raw_spin_lock(&cputimer->lock);
cputimer->cputime.sum_exec_runtime += ns;
- spin_unlock(&cputimer->lock);
+ raw_spin_unlock(&cputimer->lock);
}