diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-10 16:41:59 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2026-02-10 16:41:59 -0800 |
| commit | 353a7e8a69058591c3ec40028063af798b698559 (patch) | |
| tree | 3b211dd18afa821ae5fd32b225ca57a798b70e85 /kernel/time | |
| parent | 48295ab42dae91b6903221adf1c316f08ffa5e09 (diff) | |
| parent | 24989330fb99189cf9ec4accef6ac81c7b1c31f7 (diff) | |
Merge tag 'timers-core-2026-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer core updates from Thomas Gleixner:
- Inline timecounter_cyc2time() as that is now used in the networking
hotpath. Inlining it significantly improves performance.
- Optimize the tick dependency check in case that the tracepoint is
disabled, which improves the hotpath performance in the tick
management code, which is a hotpath on transitions in and out of
idle.
- The usual cleanups and improvements
* tag 'timers-core-2026-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
time/kunit: Document handling of negative years of is_leap()
tick/nohz: Optimize check_tick_dependency() with early return
time/sched_clock: Use ACCESS_PRIVATE() to evaluate hrtimer::function
hrtimer: Drop _tv64() helpers
hrtimer: Remove public definition of HIGH_RES_NSEC
hrtimer: Remove unused resolution constants
time/timecounter: Inline timecounter_cyc2time()
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/hrtimer.c | 14 | ||||
| -rw-r--r-- | kernel/time/sched_clock.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 3 | ||||
| -rw-r--r-- | kernel/time/time_test.c | 4 | ||||
| -rw-r--r-- | kernel/time/timecounter.c | 35 |
5 files changed, 18 insertions, 40 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 59d22f1bd0a8..860af7a58428 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -50,6 +50,14 @@ #include "tick-internal.h" /* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +#define HIGH_RES_NSEC 1 + +/* * Masks for selecting the soft and hard context timers from * cpu_base->active */ @@ -806,7 +814,7 @@ static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram) struct hrtimer_clock_base *base = timer->base; ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); - WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); + WARN_ON_ONCE(hrtimer_get_expires(timer) < 0); /* * CLOCK_REALTIME timer might be requested with an absolute @@ -1053,7 +1061,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) orun = ktime_divns(delta, incr); hrtimer_add_expires_ns(timer, incr * orun); - if (hrtimer_get_expires_tv64(timer) > now) + if (hrtimer_get_expires(timer) > now) return orun; /* * This (and the ktime_add() below) is the @@ -1835,7 +1843,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, * are right-of a not yet expired timer, because that * timer will have to trigger a wakeup anyway. */ - if (basenow < hrtimer_get_softexpires_tv64(timer)) + if (basenow < hrtimer_get_softexpires(timer)) break; __run_hrtimer(cpu_base, base, timer, &basenow, flags); diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index f39111830ca3..f3aaef695b8c 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -215,7 +215,7 @@ void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) update_clock_read_data(&rd); - if (sched_clock_timer.function != NULL) { + if (ACCESS_PRIVATE(&sched_clock_timer, function) != NULL) { /* update timeout for clock wrap */ hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 72e39c793117..f7907fadd63f 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -344,6 +344,9 @@ static bool check_tick_dependency(atomic_t *dep) { int val = atomic_read(dep); + if (likely(!tracepoint_enabled(tick_stop))) + return !val; + if (val & TICK_DEP_MASK_POSIX_TIMER) { trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); return true; diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c index 2889763165e5..1b99180da288 100644 --- a/kernel/time/time_test.c +++ b/kernel/time/time_test.c @@ -4,7 +4,9 @@ #include <linux/time.h> /* - * Traditional implementation of leap year evaluation. + * Traditional implementation of leap year evaluation, but note that long + * is a signed type and the tests do cover negative year values. So this + * can't use the is_leap_year() helper from rtc.h. */ static bool is_leap(long year) { diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c index 3d2a354cfe1c..2e64dbb6302d 100644 --- a/kernel/time/timecounter.c +++ b/kernel/time/timecounter.c @@ -62,38 +62,3 @@ u64 timecounter_read(struct timecounter *tc) } EXPORT_SYMBOL_GPL(timecounter_read); -/* - * This is like cyclecounter_cyc2ns(), but it is used for computing a - * time previous to the time stored in the cycle counter. - */ -static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, - u64 cycles, u64 mask, u64 frac) -{ - u64 ns = (u64) cycles; - - ns = ((ns * cc->mult) - frac) >> cc->shift; - - return ns; -} - -u64 timecounter_cyc2time(const struct timecounter *tc, - u64 cycle_tstamp) -{ - u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; - u64 nsec = tc->nsec, frac = tc->frac; - - /* - * Instead of always treating cycle_tstamp as more recent - * than tc->cycle_last, detect when it is too far in the - * future and treat it as old time stamp instead. - */ - if (delta > tc->cc->mask / 2) { - delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; - nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); - } else { - nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); - } - - return nsec; -} -EXPORT_SYMBOL_GPL(timecounter_cyc2time); |
