diff options
author | Peter Zijlstra <peterz@infradead.org> | 2009-02-26 21:40:16 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-26 21:56:07 +0100 |
commit | 8325d9c09dedf45476f4d6261d1b6a72e4a7453f (patch) | |
tree | 5b7f6f3b125aec59ec6a60d22130844a3addc8e9 /kernel/sched_clock.c | |
parent | 83ce400928680a6c8123d492684b27857f5a2d95 (diff) |
sched_clock: cleanups
- remove superfluous checks in __update_sched_clock()
- skip sched_clock_tick() for sched_clock_stable
- reinstate the simple !HAVE_UNSTABLE_SCHED_CLOCK code to please the bloatwatch
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r-- | kernel/sched_clock.c | 31 |
1 files changed, 20 insertions, 11 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index a755d023805a..390f33234bd0 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -44,9 +44,6 @@ static __read_mostly int sched_clock_running; #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK __read_mostly int sched_clock_stable; -#else -static const int sched_clock_stable = 1; -#endif struct sched_clock_data { /* @@ -115,14 +112,9 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) s64 delta = now - scd->tick_raw; u64 clock, min_clock, max_clock; - WARN_ON_ONCE(!irqs_disabled()); - if (unlikely(delta < 0)) delta = 0; - if (unlikely(!sched_clock_running)) - return 0ull; - /* * scd->clock = clamp(scd->tick_gtod + delta, * max(scd->tick_gtod, scd->clock), @@ -201,18 +193,20 @@ u64 sched_clock_cpu(int cpu) return clock; } -#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK - void sched_clock_tick(void) { - struct sched_clock_data *scd = this_scd(); + struct sched_clock_data *scd; u64 now, now_gtod; + if (sched_clock_stable) + return; + if (unlikely(!sched_clock_running)) return; WARN_ON_ONCE(!irqs_disabled()); + scd = this_scd(); now_gtod = ktime_to_ns(ktime_get()); now = sched_clock(); @@ -245,6 +239,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); +#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ + +void sched_clock_init(void) +{ + sched_clock_running = 1; +} + +u64 sched_clock_cpu(int cpu) +{ + if (unlikely(!sched_clock_running)) + return 0; + + return sched_clock(); +} + #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ unsigned long long cpu_clock(int cpu) |