summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-13 09:44:22 +0100
committerIngo Molnar <mingo@elte.hu>2009-02-13 09:44:22 +0100
commitf8a6b2b9cee298a9663cbe38ce1eb5240987cb62 (patch)
treeb356490269c9e77d164dcc1477792b882fbb8bdb /include/linux/sched.h
parentba1511bf7fbda452138e4096bf10d5a382710f4f (diff)
parent071a0bc2ceace31266836801510879407a3701fa (diff)
Merge branch 'linus' into x86/apic
Conflicts: arch/x86/kernel/acpi/boot.c arch/x86/mm/fault.c
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h63
1 files changed, 34 insertions, 29 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2225c207801c..f0a50b20e8a0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -453,23 +453,33 @@ struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
- spinlock_t lock;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp stime
#define virt_exp utime
#define sched_exp sum_exec_runtime
+#define INIT_CPUTIME \
+ (struct task_cputime) { \
+ .utime = cputime_zero, \
+ .stime = cputime_zero, \
+ .sum_exec_runtime = 0, \
+ }
+
/**
- * struct thread_group_cputime - thread group interval timer counts
- * @totals: thread group interval timers; substructure for
- * uniprocessor kernel, per-cpu for SMP kernel.
+ * struct thread_group_cputimer - thread group interval timer counts
+ * @cputime: thread group interval timers.
+ * @running: non-zero when there are timers running and
+ * @cputime receives updates.
+ * @lock: lock for fields in this struct.
*
* This structure contains the version of task_cputime, above, that is
- * used for thread group CPU clock calculations.
+ * used for thread group CPU timer calculations.
*/
-struct thread_group_cputime {
- struct task_cputime totals;
+struct thread_group_cputimer {
+ struct task_cputime cputime;
+ int running;
+ spinlock_t lock;
};
/*
@@ -518,10 +528,10 @@ struct signal_struct {
cputime_t it_prof_incr, it_virt_incr;
/*
- * Thread group totals for process CPU clocks.
- * See thread_group_cputime(), et al, for details.
+ * Thread group totals for process CPU timers.
+ * See thread_group_cputimer(), et al, for details.
*/
- struct thread_group_cputime cputime;
+ struct thread_group_cputimer cputimer;
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
@@ -558,7 +568,7 @@ struct signal_struct {
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
- cputime_t cutime, cstime;
+ cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -567,6 +577,14 @@ struct signal_struct {
struct task_io_accounting ioac;
/*
+ * Cumulative ns of schedule CPU time fo dead threads in the
+ * group, not including a zombie group leader, (This only differs
+ * from jiffies_to_ns(utime + stime) if sched_clock uses something
+ * other than jiffies.)
+ */
+ unsigned long long sum_sched_runtime;
+
+ /*
* We don't bother to synchronize most readers of this at all,
* because there is no reader checking a limit that actually needs
* to get both rlim_cur and rlim_max atomically, and either one
@@ -2194,27 +2212,14 @@ static inline int spin_needbreak(spinlock_t *lock)
/*
* Thread group CPU time accounting.
*/
-
-static inline
-void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
-{
- struct task_cputime *totals = &tsk->signal->cputime.totals;
- unsigned long flags;
-
- spin_lock_irqsave(&totals->lock, flags);
- *times = *totals;
- spin_unlock_irqrestore(&totals->lock, flags);
-}
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
+void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline void thread_group_cputime_init(struct signal_struct *sig)
{
- sig->cputime.totals = (struct task_cputime){
- .utime = cputime_zero,
- .stime = cputime_zero,
- .sum_exec_runtime = 0,
- };
-
- spin_lock_init(&sig->cputime.totals.lock);
+ sig->cputimer.cputime = INIT_CPUTIME;
+ spin_lock_init(&sig->cputimer.lock);
+ sig->cputimer.running = 0;
}
static inline void thread_group_cputime_free(struct signal_struct *sig)