diff options
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r-- | kernel/time/timekeeping.c | 338 |
1 files changed, 184 insertions, 154 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 0c6358186401..403c2a092830 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -25,6 +25,8 @@ struct timekeeper { /* Current clocksource used for timekeeping. */ struct clocksource *clock; + /* NTP adjusted clock multiplier */ + u32 mult; /* The shift value of the current clocksource. */ int shift; @@ -45,12 +47,47 @@ struct timekeeper { /* Shift conversion between clock shifted nano seconds and * ntp shifted nano seconds. */ int ntp_error_shift; - /* NTP adjusted clock multiplier */ - u32 mult; + + /* The current time */ + struct timespec xtime; + /* + * wall_to_monotonic is what we need to add to xtime (or xtime corrected + * for sub jiffie times) to get to monotonic time. Monotonic is pegged + * at zero at system boot time, so wall_to_monotonic will be negative, + * however, we will ALWAYS keep the tv_nsec part positive so we can use + * the usual normalization. + * + * wall_to_monotonic is moved after resume from suspend for the + * monotonic time not to jump. We need to add total_sleep_time to + * wall_to_monotonic to get the real boot based time offset. + * + * - wall_to_monotonic is no longer the boot time, getboottime must be + * used instead. + */ + struct timespec wall_to_monotonic; + /* time spent in suspend */ + struct timespec total_sleep_time; + /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ + struct timespec raw_time; + + /* Seqlock for all timekeeper values */ + seqlock_t lock; }; static struct timekeeper timekeeper; +/* + * This read-write spinlock protects us from races in SMP while + * playing with xtime. + */ +__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); + + +/* flag for if timekeeping is suspended */ +int __read_mostly timekeeping_suspended; + + + /** * timekeeper_setup_internals - Set up internals to use clocksource clock. * @@ -135,47 +172,28 @@ static inline s64 timekeeping_get_ns_raw(void) return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); } -/* - * This read-write spinlock protects us from races in SMP while - * playing with xtime. - */ -__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); - - -/* - * The current time - * wall_to_monotonic is what we need to add to xtime (or xtime corrected - * for sub jiffie times) to get to monotonic time. Monotonic is pegged - * at zero at system boot time, so wall_to_monotonic will be negative, - * however, we will ALWAYS keep the tv_nsec part positive so we can use - * the usual normalization. - * - * wall_to_monotonic is moved after resume from suspend for the monotonic - * time not to jump. We need to add total_sleep_time to wall_to_monotonic - * to get the real boot based time offset. - * - * - wall_to_monotonic is no longer the boot time, getboottime must be - * used instead. - */ -static struct timespec xtime __attribute__ ((aligned (16))); -static struct timespec wall_to_monotonic __attribute__ ((aligned (16))); -static struct timespec total_sleep_time; - -/* - * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. - */ -static struct timespec raw_time; +/* must hold write on timekeeper.lock */ +static void timekeeping_update(bool clearntp) +{ + if (clearntp) { + timekeeper.ntp_error = 0; + ntp_clear(); + } + update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic, + timekeeper.clock, timekeeper.mult); +} -/* flag for if timekeeping is suspended */ -int __read_mostly timekeeping_suspended; -/* must hold xtime_lock */ void timekeeping_leap_insert(int leapsecond) { - xtime.tv_sec += leapsecond; - wall_to_monotonic.tv_sec -= leapsecond; - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, - timekeeper.mult); + unsigned long flags; + + write_seqlock_irqsave(&timekeeper.lock, flags); + timekeeper.xtime.tv_sec += leapsecond; + timekeeper.wall_to_monotonic.tv_sec -= leapsecond; + timekeeping_update(false); + write_sequnlock_irqrestore(&timekeeper.lock, flags); + } /** @@ -202,10 +220,10 @@ static void timekeeping_forward_now(void) /* If arch requires, add in gettimeoffset() */ nsec += arch_gettimeoffset(); - timespec_add_ns(&xtime, nsec); + timespec_add_ns(&timekeeper.xtime, nsec); nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); - timespec_add_ns(&raw_time, nsec); + timespec_add_ns(&timekeeper.raw_time, nsec); } /** @@ -222,15 +240,15 @@ void getnstimeofday(struct timespec *ts) WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&xtime_lock); + seq = read_seqbegin(&timekeeper.lock); - *ts = xtime; + *ts = timekeeper.xtime; nsecs = timekeeping_get_ns(); /* If arch requires, add in gettimeoffset() */ nsecs += arch_gettimeoffset(); - } while (read_seqretry(&xtime_lock, seq)); + } while (read_seqretry(&timekeeper.lock, seq)); timespec_add_ns(ts, nsecs); } @@ -245,14 +263,16 @@ ktime_t ktime_get(void) WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&xtime_lock); - secs = xtime.tv_sec + wall_to_monotonic.tv_sec; - nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; + seq = read_seqbegin(&timekeeper.lock); + secs = timekeeper.xtime.tv_sec + + timekeeper.wall_to_monotonic.tv_sec; + nsecs = timekeeper.xtime.tv_nsec + + timekeeper.wall_to_monotonic.tv_nsec; nsecs += timekeeping_get_ns(); /* If arch requires, add in gettimeoffset() */ nsecs += arch_gettimeoffset(); - } while (read_seqretry(&xtime_lock, seq)); + } while (read_seqretry(&timekeeper.lock, seq)); /* * Use ktime_set/ktime_add_ns to create a proper ktime on * 32-bit architectures without CONFIG_KTIME_SCALAR. @@ -278,14 +298,14 @@ void ktime_get_ts(struct timespec *ts) WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&xtime_lock); - *ts = xtime; - tomono = wall_to_monotonic; + seq = read_seqbegin(&timekeeper.lock); + *ts = timekeeper.xtime; + tomono = timekeeper.wall_to_monotonic; nsecs = timekeeping_get_ns(); /* If arch requires, add in gettimeoffset() */ nsecs += arch_gettimeoffset(); - } while (read_seqretry(&xtime_lock, seq)); + } while (read_seqretry(&timekeeper.lock, seq)); set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, ts->tv_nsec + tomono.tv_nsec + nsecs); @@ -313,10 +333,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) do { u32 arch_offset; - seq = read_seqbegin(&xtime_lock); + seq = read_seqbegin(&timekeeper.lock); - *ts_raw = raw_time; - *ts_real = xtime; + *ts_raw = timekeeper.raw_time; + *ts_real = timekeeper.xtime; nsecs_raw = timekeeping_get_ns_raw(); nsecs_real = timekeeping_get_ns(); @@ -326,7 +346,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) nsecs_raw += arch_offset; nsecs_real += arch_offset; - } while (read_seqretry(&xtime_lock, seq)); + } while (read_seqretry(&timekeeper.lock, seq)); timespec_add_ns(ts_raw, nsecs_raw); timespec_add_ns(ts_real, nsecs_real); @@ -365,23 +385,19 @@ int do_settimeofday(const struct timespec *tv) if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irqsave(&xtime_lock, flags); + write_seqlock_irqsave(&timekeeper.lock, flags); timekeeping_forward_now(); - ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec; - ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec; - wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta); - - xtime = *tv; + ts_delta.tv_sec = tv->tv_sec - timekeeper.xtime.tv_sec; + ts_delta.tv_nsec = tv->tv_nsec - timekeeper.xtime.tv_nsec; + timekeeper.wall_to_monotonic = + timespec_sub(timekeeper.wall_to_monotonic, ts_delta); - timekeeper.ntp_error = 0; - ntp_clear(); - - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, - timekeeper.mult); + timekeeper.xtime = *tv; + timekeeping_update(true); - write_sequnlock_irqrestore(&xtime_lock, flags); + write_sequnlock_irqrestore(&timekeeper.lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -405,20 +421,17 @@ int timekeeping_inject_offset(struct timespec *ts) if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irqsave(&xtime_lock, flags); + write_seqlock_irqsave(&timekeeper.lock, flags); timekeeping_forward_now(); - xtime = timespec_add(xtime, *ts); - wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts); - - timekeeper.ntp_error = 0; - ntp_clear(); + timekeeper.xtime = timespec_add(timekeeper.xtime, *ts); + timekeeper.wall_to_monotonic = + timespec_sub(timekeeper.wall_to_monotonic, *ts); - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, - timekeeper.mult); + timekeeping_update(true); - write_sequnlock_irqrestore(&xtime_lock, flags); + write_sequnlock_irqrestore(&timekeeper.lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -490,11 +503,11 @@ void getrawmonotonic(struct timespec *ts) s64 nsecs; do { - seq = read_seqbegin(&xtime_lock); + seq = read_seqbegin(&timekeeper.lock); nsecs = timekeeping_get_ns_raw(); - *ts = raw_time; + *ts = timekeeper.raw_time; - } while (read_seqretry(&xtime_lock, seq)); + } while (read_seqretry(&timekeeper.lock, seq)); timespec_add_ns(ts, nsecs); } @@ -510,24 +523,30 @@ int timekeeping_valid_for_hres(void) int ret; do { - seq = read_seqbegin(&xtime_lock); + seq = read_seqbegin(&timekeeper.lock); ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; - } while (read_seqretry(&xtime_lock, seq)); + } while (read_seqretry(&timekeeper.lock, seq)); return ret; } /** * timekeeping_max_deferment - Returns max time the clocksource can be deferred - * - * Caller must observe xtime_lock via read_seqbegin/read_seqretry to - * ensure that the clocksource does not change! */ u64 timekeeping_max_deferment(void) { - return timekeeper.clock->max_idle_ns; + unsigned long seq; + u64 ret; + do { + seq = read_seqbegin(&timekeeper.lock); + + ret = timekeeper.clock->max_idle_ns; + + } while (read_seqretry(&timekeeper.lock, seq)); + + return ret; } /** @@ -572,28 +591,29 @@ void __init timekeeping_init(void) read_persistent_clock(&now); read_boot_clock(&boot); - write_seqlock_irqsave(&xtime_lock, flags); + seqlock_init(&timekeeper.lock); ntp_init(); + write_seqlock_irqsave(&timekeeper.lock, flags); clock = clocksource_default_clock(); if (clock->enable) clock->enable(clock); timekeeper_setup_internals(clock); - xtime.tv_sec = now.tv_sec; - xtime.tv_nsec = now.tv_nsec; - raw_time.tv_sec = 0; - raw_time.tv_nsec = 0; + timekeeper.xtime.tv_sec = now.tv_sec; + timekeeper.xtime.tv_nsec = now.tv_nsec; + timekeeper.raw_time.tv_sec = 0; + timekeeper.raw_time.tv_nsec = 0; if (boot.tv_sec == 0 && boot.tv_nsec == 0) { - boot.tv_sec = xtime.tv_sec; - boot.tv_nsec = xtime.tv_nsec; + boot.tv_sec = timekeeper.xtime.tv_sec; + boot.tv_nsec = timekeeper.xtime.tv_nsec; } - set_normalized_timespec(&wall_to_monotonic, + set_normalized_timespec(&timekeeper.wall_to_monotonic, -boot.tv_sec, -boot.tv_nsec); - total_sleep_time.tv_sec = 0; - total_sleep_time.tv_nsec = 0; - write_sequnlock_irqrestore(&xtime_lock, flags); + timekeeper.total_sleep_time.tv_sec = 0; + timekeeper.total_sleep_time.tv_nsec = 0; + write_sequnlock_irqrestore(&timekeeper.lock, flags); } /* time in seconds when suspend began */ @@ -614,9 +634,11 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta) return; } - xtime = timespec_add(xtime, *delta); - wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta); - total_sleep_time = timespec_add(total_sleep_time, *delta); + timekeeper.xtime = timespec_add(timekeeper.xtime, *delta); + timekeeper.wall_to_monotonic = + timespec_sub(timekeeper.wall_to_monotonic, *delta); + timekeeper.total_sleep_time = timespec_add( + timekeeper.total_sleep_time, *delta); } @@ -640,17 +662,15 @@ void timekeeping_inject_sleeptime(struct timespec *delta) if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) return; - write_seqlock_irqsave(&xtime_lock, flags); + write_seqlock_irqsave(&timekeeper.lock, flags); + timekeeping_forward_now(); __timekeeping_inject_sleeptime(delta); - timekeeper.ntp_error = 0; - ntp_clear(); - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, - timekeeper.mult); + timekeeping_update(true); - write_sequnlock_irqrestore(&xtime_lock, flags); + write_sequnlock_irqrestore(&timekeeper.lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -673,7 +693,7 @@ static void timekeeping_resume(void) clocksource_resume(); - write_seqlock_irqsave(&xtime_lock, flags); + write_seqlock_irqsave(&timekeeper.lock, flags); if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { ts = timespec_sub(ts, timekeeping_suspend_time); @@ -683,7 +703,7 @@ static void timekeeping_resume(void) timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); timekeeper.ntp_error = 0; timekeeping_suspended = 0; - write_sequnlock_irqrestore(&xtime_lock, flags); + write_sequnlock_irqrestore(&timekeeper.lock, flags); touch_softlockup_watchdog(); @@ -701,7 +721,7 @@ static int timekeeping_suspend(void) read_persistent_clock(&timekeeping_suspend_time); - write_seqlock_irqsave(&xtime_lock, flags); + write_seqlock_irqsave(&timekeeper.lock, flags); timekeeping_forward_now(); timekeeping_suspended = 1; @@ -711,7 +731,7 @@ static int timekeeping_suspend(void) * try to compensate so the difference in system time * and persistent_clock time stays close to constant. */ - delta = timespec_sub(xtime, timekeeping_suspend_time); + delta = timespec_sub(timekeeper.xtime, timekeeping_suspend_time); delta_delta = timespec_sub(delta, old_delta); if (abs(delta_delta.tv_sec) >= 2) { /* @@ -724,7 +744,7 @@ static int timekeeping_suspend(void) timekeeping_suspend_time = timespec_add(timekeeping_suspend_time, delta_delta); } - write_sequnlock_irqrestore(&xtime_lock, flags); + write_sequnlock_irqrestore(&timekeeper.lock, flags); clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); clocksource_suspend(); @@ -775,7 +795,7 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, * Now calculate the error in (1 << look_ahead) ticks, but first * remove the single look ahead already included in the error. */ - tick_error = tick_length >> (timekeeper.ntp_error_shift + 1); + tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1); tick_error -= timekeeper.xtime_interval >> 1; error = ((error - tick_error) >> look_ahead) + tick_error; @@ -943,22 +963,22 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; while (timekeeper.xtime_nsec >= nsecps) { timekeeper.xtime_nsec -= nsecps; - xtime.tv_sec++; + timekeeper.xtime.tv_sec++; second_overflow(); } /* Accumulate raw time */ raw_nsecs = timekeeper.raw_interval << shift; - raw_nsecs += raw_time.tv_nsec; + raw_nsecs += timekeeper.raw_time.tv_nsec; if (raw_nsecs >= NSEC_PER_SEC) { u64 raw_secs = raw_nsecs; raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); - raw_time.tv_sec += raw_secs; + timekeeper.raw_time.tv_sec += raw_secs; } - raw_time.tv_nsec = raw_nsecs; + timekeeper.raw_time.tv_nsec = raw_nsecs; /* Accumulate error between NTP and clock interval */ - timekeeper.ntp_error += tick_length << shift; + timekeeper.ntp_error += ntp_tick_length() << shift; timekeeper.ntp_error -= (timekeeper.xtime_interval + timekeeper.xtime_remainder) << (timekeeper.ntp_error_shift + shift); @@ -970,17 +990,19 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) /** * update_wall_time - Uses the current clocksource to increment the wall time * - * Called from the timer interrupt, must hold a write on xtime_lock. */ static void update_wall_time(void) { struct clocksource *clock; cycle_t offset; int shift = 0, maxshift; + unsigned long flags; + + write_seqlock_irqsave(&timekeeper.lock, flags); /* Make sure we're fully resumed: */ if (unlikely(timekeeping_suspended)) - return; + goto out; clock = timekeeper.clock; @@ -989,7 +1011,8 @@ static void update_wall_time(void) #else offset = (clock->read(clock) - clock->cycle_last) & clock->mask; #endif - timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; + timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec << + timekeeper.shift; /* * With NO_HZ we may have to accumulate many cycle_intervals @@ -1002,7 +1025,7 @@ static void update_wall_time(void) shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); shift = max(0, shift); /* Bound shift to one less then what overflows tick_length */ - maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1; + maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; shift = min(shift, maxshift); while (offset >= timekeeper.cycle_interval) { offset = logarithmic_accumulation(offset, shift); @@ -1040,8 +1063,10 @@ static void update_wall_time(void) * Store full nanoseconds into xtime after rounding it up and * add the remainder to the error difference. */ - xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; - timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift; + timekeeper.xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >> + timekeeper.shift) + 1; + timekeeper.xtime_nsec -= (s64)timekeeper.xtime.tv_nsec << + timekeeper.shift; timekeeper.ntp_error += timekeeper.xtime_nsec << timekeeper.ntp_error_shift; @@ -1049,15 +1074,17 @@ static void update_wall_time(void) * Finally, make sure that after the rounding * xtime.tv_nsec isn't larger then NSEC_PER_SEC */ - if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) { - xtime.tv_nsec -= NSEC_PER_SEC; - xtime.tv_sec++; + if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { + timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; + timekeeper.xtime.tv_sec++; second_overflow(); } - /* check to see if there is a new clocksource to use */ - update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, - timekeeper.mult); + timekeeping_update(false); + +out: + write_sequnlock_irqrestore(&timekeeper.lock, flags); + } /** @@ -1074,8 +1101,10 @@ static void update_wall_time(void) void getboottime(struct timespec *ts) { struct timespec boottime = { - .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec, - .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec + .tv_sec = timekeeper.wall_to_monotonic.tv_sec + + timekeeper.total_sleep_time.tv_sec, + .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec + + timekeeper.total_sleep_time.tv_nsec }; set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); @@ -1101,13 +1130,13 @@ void get_monotonic_boottime(struct timespec *ts) WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&xtime_lock); - *ts = xtime; - tomono = wall_to_monotonic; - sleep = total_sleep_time; + seq = read_seqbegin(&timekeeper.lock); + *ts = timekeeper.xtime; + tomono = timekeeper.wall_to_monotonic; + sleep = timekeeper.total_sleep_time; nsecs = timekeeping_get_ns(); - } while (read_seqretry(&xtime_lock, seq)); + } while (read_seqretry(&timekeeper.lock, seq)); set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs); @@ -1137,19 +1166,19 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime); */ void monotonic_to_bootbased(struct timespec *ts) { - *ts = timespec_add(*ts, total_sleep_time); + *ts = timespec_add(*ts, timekeeper.total_sleep_time); } EXPORT_SYMBOL_GPL(monotonic_to_bootbased); unsigned long get_seconds(void) { - return xtime.tv_sec; + return timekeeper.xtime.tv_sec; } EXPORT_SYMBOL(get_seconds); struct timespec __current_kernel_time(void) { - return xtime; + return timekeeper.xtime; } struct timespec current_kernel_time(void) @@ -1158,10 +1187,10 @@ struct timespec current_kernel_time(void) unsigned long seq; do { - seq = read_seqbegin(&xtime_lock); + seq = read_seqbegin(&timekeeper.lock); - now = xtime; - } while (read_seqretry(&xtime_lock, seq)); + now = timekeeper.xtime; + } while (read_seqretry(&timekeeper.lock, seq)); return now; } @@ -1173,11 +1202,11 @@ struct timespec get_monotonic_coarse(void) unsigned long seq; do { - seq = read_seqbegin(&xtime_lock); + seq = read_seqbegin(&timekeeper.lock); - now = xtime; - mono = wall_to_monotonic; - } while (read_seqretry(&xtime_lock, seq)); + now = timekeeper.xtime; + mono = timekeeper.wall_to_monotonic; + } while (read_seqretry(&timekeeper.lock, seq)); set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, now.tv_nsec + mono.tv_nsec); @@ -1209,11 +1238,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, unsigned long seq; do { - seq = read_seqbegin(&xtime_lock); - *xtim = xtime; - *wtom = wall_to_monotonic; - *sleep = total_sleep_time; - } while (read_seqretry(&xtime_lock, seq)); + seq = read_seqbegin(&timekeeper.lock); + *xtim = timekeeper.xtime; + *wtom = timekeeper.wall_to_monotonic; + *sleep = timekeeper.total_sleep_time; + } while (read_seqretry(&timekeeper.lock, seq)); } /** @@ -1225,9 +1254,10 @@ ktime_t ktime_get_monotonic_offset(void) struct timespec wtom; do { - seq = read_seqbegin(&xtime_lock); - wtom = wall_to_monotonic; - } while (read_seqretry(&xtime_lock, seq)); + seq = read_seqbegin(&timekeeper.lock); + wtom = timekeeper.wall_to_monotonic; + } while (read_seqretry(&timekeeper.lock, seq)); + return timespec_to_ktime(wtom); } |