diff options
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/ntp.c | 16 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 2 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 17 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 31 |
5 files changed, 32 insertions, 38 deletions
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index f6117a4c7cb8..b510ba954fe1 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -358,7 +358,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) { enum hrtimer_restart res = HRTIMER_NORESTART; - write_seqlock(&xtime_lock); + raw_write_seqlock(&xtime_lock); switch (time_state) { case TIME_OK: @@ -388,7 +388,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) break; } - write_sequnlock(&xtime_lock); + raw_write_sequnlock(&xtime_lock); return res; } @@ -663,7 +663,7 @@ int do_adjtimex(struct timex *txc) getnstimeofday(&ts); - write_seqlock_irq(&xtime_lock); + raw_write_seqlock_irq(&xtime_lock); if (txc->modes & ADJ_ADJTIME) { long save_adjust = time_adjust; @@ -705,7 +705,7 @@ int do_adjtimex(struct timex *txc) /* fill PPS status fields */ pps_fill_timex(txc); - write_sequnlock_irq(&xtime_lock); + raw_write_sequnlock_irq(&xtime_lock); txc->time.tv_sec = ts.tv_sec; txc->time.tv_usec = ts.tv_nsec; @@ -903,7 +903,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) pts_norm = pps_normalize_ts(*phase_ts); - write_seqlock_irqsave(&xtime_lock, flags); + raw_write_seqlock_irqsave(&xtime_lock, flags); /* clear the error bits, they will be set again if needed */ time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); @@ -916,7 +916,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) * just start the frequency interval */ if (unlikely(pps_fbase.tv_sec == 0)) { pps_fbase = *raw_ts; - write_sequnlock_irqrestore(&xtime_lock, flags); + raw_write_sequnlock_irqrestore(&xtime_lock, flags); return; } @@ -931,7 +931,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) time_status |= STA_PPSJITTER; /* restart the frequency calibration interval */ pps_fbase = *raw_ts; - write_sequnlock_irqrestore(&xtime_lock, flags); + raw_write_sequnlock_irqrestore(&xtime_lock, flags); pr_err("hardpps: PPSJITTER: bad pulse\n"); return; } @@ -948,7 +948,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) hardpps_update_phase(pts_norm.nsec); - write_sequnlock_irqrestore(&xtime_lock, flags); + raw_write_sequnlock_irqrestore(&xtime_lock, flags); } EXPORT_SYMBOL(hardpps); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index da6c9ecad4e4..c443a8fb87dc 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -63,13 +63,13 @@ int tick_is_oneshot_available(void) static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { - write_seqlock(&xtime_lock); + raw_write_seqlock(&xtime_lock); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); - write_sequnlock(&xtime_lock); + raw_write_sequnlock(&xtime_lock); } update_process_times(user_mode(get_irq_regs())); diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 4e265b901fed..ea6b7ae2a893 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -141,4 +141,4 @@ static inline int tick_device_is_functional(struct clock_event_device *dev) #endif extern void do_timer(unsigned long ticks); -extern seqlock_t xtime_lock; +extern raw_seqlock_t xtime_lock; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 40420644d0ba..57315c5780be 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -56,7 +56,7 @@ static void tick_do_update_jiffies64(ktime_t now) return; /* Reevalute with xtime_lock held */ - write_seqlock(&xtime_lock); + raw_write_seqlock(&xtime_lock); delta = ktime_sub(now, last_jiffies_update); if (delta.tv64 >= tick_period.tv64) { @@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now) /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); } - write_sequnlock(&xtime_lock); + raw_write_sequnlock(&xtime_lock); } /* @@ -89,12 +89,12 @@ static ktime_t tick_init_jiffy_update(void) { ktime_t period; - write_seqlock(&xtime_lock); + raw_write_seqlock(&xtime_lock); /* Did we start the jiffies update yet ? */ if (last_jiffies_update.tv64 == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; - write_sequnlock(&xtime_lock); + raw_write_sequnlock(&xtime_lock); return period; } @@ -332,13 +332,7 @@ void tick_nohz_stop_sched_tick(int inidle) goto end; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - static int ratelimit; - - if (ratelimit < 10) { - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - (unsigned int) local_softirq_pending()); - ratelimit++; - } + softirq_check_pending_idle(); goto end; } @@ -798,6 +792,7 @@ void tick_setup_sched_timer(void) * Emulate tick processing via per-CPU hrtimers: */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + ts->sched_timer.irqsafe = 1; ts->sched_timer.function = tick_sched_timer; /* Get the next period (per cpu) */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 237841378c03..22fd69e211eb 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -139,8 +139,7 @@ static inline s64 timekeeping_get_ns_raw(void) * This read-write spinlock protects us from races in SMP while * playing with xtime. */ -__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); - +__cacheline_aligned_in_smp DEFINE_RAW_SEQLOCK(xtime_lock); /* * The current time @@ -365,7 +364,7 @@ int do_settimeofday(const struct timespec *tv) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irqsave(&xtime_lock, flags); + raw_write_seqlock_irqsave(&xtime_lock, flags); timekeeping_forward_now(); @@ -381,7 +380,7 @@ int do_settimeofday(const struct timespec *tv) update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, timekeeper.mult); - write_sequnlock_irqrestore(&xtime_lock, flags); + raw_write_sequnlock_irqrestore(&xtime_lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -405,7 +404,7 @@ int timekeeping_inject_offset(struct timespec *ts) if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irqsave(&xtime_lock, flags); + raw_write_seqlock_irqsave(&xtime_lock, flags); timekeeping_forward_now(); @@ -418,7 +417,7 @@ int timekeeping_inject_offset(struct timespec *ts) update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, timekeeper.mult); - write_sequnlock_irqrestore(&xtime_lock, flags); + raw_write_sequnlock_irqrestore(&xtime_lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -572,7 +571,7 @@ void __init timekeeping_init(void) read_persistent_clock(&now); read_boot_clock(&boot); - write_seqlock_irqsave(&xtime_lock, flags); + raw_write_seqlock_irqsave(&xtime_lock, flags); ntp_init(); @@ -593,7 +592,7 @@ void __init timekeeping_init(void) -boot.tv_sec, -boot.tv_nsec); total_sleep_time.tv_sec = 0; total_sleep_time.tv_nsec = 0; - write_sequnlock_irqrestore(&xtime_lock, flags); + raw_write_sequnlock_irqrestore(&xtime_lock, flags); } /* time in seconds when suspend began */ @@ -640,7 +639,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta) if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) return; - write_seqlock_irqsave(&xtime_lock, flags); + raw_write_seqlock_irqsave(&xtime_lock, flags); timekeeping_forward_now(); __timekeeping_inject_sleeptime(delta); @@ -650,7 +649,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta) update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, timekeeper.mult); - write_sequnlock_irqrestore(&xtime_lock, flags); + raw_write_sequnlock_irqrestore(&xtime_lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -673,7 +672,7 @@ static void timekeeping_resume(void) clocksource_resume(); - write_seqlock_irqsave(&xtime_lock, flags); + raw_write_seqlock_irqsave(&xtime_lock, flags); if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { ts = timespec_sub(ts, timekeeping_suspend_time); @@ -683,7 +682,7 @@ static void timekeeping_resume(void) timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); timekeeper.ntp_error = 0; timekeeping_suspended = 0; - write_sequnlock_irqrestore(&xtime_lock, flags); + raw_write_sequnlock_irqrestore(&xtime_lock, flags); touch_softlockup_watchdog(); @@ -701,7 +700,7 @@ static int timekeeping_suspend(void) read_persistent_clock(&timekeeping_suspend_time); - write_seqlock_irqsave(&xtime_lock, flags); + raw_write_seqlock_irqsave(&xtime_lock, flags); timekeeping_forward_now(); timekeeping_suspended = 1; @@ -724,7 +723,7 @@ static int timekeeping_suspend(void) timekeeping_suspend_time = timespec_add(timekeeping_suspend_time, delta_delta); } - write_sequnlock_irqrestore(&xtime_lock, flags); + raw_write_sequnlock_irqrestore(&xtime_lock, flags); clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); clocksource_suspend(); @@ -1239,7 +1238,7 @@ ktime_t ktime_get_monotonic_offset(void) */ void xtime_update(unsigned long ticks) { - write_seqlock(&xtime_lock); + raw_write_seqlock(&xtime_lock); do_timer(ticks); - write_sequnlock(&xtime_lock); + raw_write_sequnlock(&xtime_lock); } |