summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@kernel.org>2026-02-24 17:36:59 +0100
committerPeter Zijlstra <peterz@infradead.org>2026-02-27 16:40:09 +0100
commitf2e388a019e4cf83a15883a3d1f1384298e9a6aa (patch)
tree7d6e0ec5ae4552bdedd904df487dfe17a30d144d /kernel/time
parent513e744a0a4a70ebdb155611b897e9ed4d83831c (diff)
hrtimer: Reduce trace noise in hrtimer_start()
hrtimer_start() when invoked with an already armed timer traces like: <comm>-.. [032] d.h2. 5.002263: hrtimer_cancel: hrtimer= .... <comm>-.. [032] d.h1. 5.002263: hrtimer_start: hrtimer= .... Which is incorrect as the timer doesn't get canceled. Just the expiry time changes. The internal dequeue operation which is required for that is not really interesting for trace analysis. But it makes it tedious to keep real cancellations and the above case apart. Remove the cancel tracing in hrtimer_start() and add a 'was_armed' indicator to the hrtimer start tracepoint, which clearly indicates what the state of the hrtimer is when hrtimer_start() is invoked: <comm>-.. [032] d.h1. 6.200103: hrtimer_start: hrtimer= .... was_armed=0 <comm>-.. [032] d.h1. 6.200558: hrtimer_start: hrtimer= .... was_armed=1 Fixes: c6a2a1770245 ("hrtimer: Add tracepoint for hrtimers") Signed-off-by: Thomas Gleixner <tglx@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://patch.msgid.link/20260224163430.208491877@kernel.org
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/hrtimer.c43
1 files changed, 20 insertions, 23 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index fa63e0b20455..6e4ac8dea312 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -529,17 +529,10 @@ static inline void debug_setup_on_stack(struct hrtimer *timer, clockid_t clockid
trace_hrtimer_setup(timer, clockid, mode);
}
-static inline void debug_activate(struct hrtimer *timer,
- enum hrtimer_mode mode)
+static inline void debug_activate(struct hrtimer *timer, enum hrtimer_mode mode, bool was_armed)
{
debug_hrtimer_activate(timer, mode);
- trace_hrtimer_start(timer, mode);
-}
-
-static inline void debug_deactivate(struct hrtimer *timer)
-{
- debug_hrtimer_deactivate(timer);
- trace_hrtimer_cancel(timer);
+ trace_hrtimer_start(timer, mode, was_armed);
}
static struct hrtimer_clock_base *
@@ -1137,9 +1130,9 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
* Returns true when the new timer is the leftmost timer in the tree.
*/
static bool enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
- enum hrtimer_mode mode)
+ enum hrtimer_mode mode, bool was_armed)
{
- debug_activate(timer, mode);
+ debug_activate(timer, mode, was_armed);
WARN_ON_ONCE(!base->cpu_base->online);
base->cpu_base->active_bases |= 1 << base->index;
@@ -1199,6 +1192,8 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
if (state & HRTIMER_STATE_ENQUEUED) {
bool reprogram;
+ debug_hrtimer_deactivate(timer);
+
/*
* Remove the timer and force reprogramming when high
* resolution mode is active and the timer is on the current
@@ -1207,7 +1202,6 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
* reprogramming happens in the interrupt handler. This is a
* rare case and less expensive than a smp call.
*/
- debug_deactivate(timer);
reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
/*
@@ -1274,15 +1268,15 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
{
struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *new_base;
- bool force_local, first;
+ bool force_local, first, was_armed;
/*
* If the timer is on the local cpu base and is the first expiring
* timer then this might end up reprogramming the hardware twice
- * (on removal and on enqueue). To avoid that by prevent the
- * reprogram on removal, keep the timer local to the current CPU
- * and enforce reprogramming after it is queued no matter whether
- * it is the new first expiring timer again or not.
+ * (on removal and on enqueue). To avoid that prevent the reprogram
+ * on removal, keep the timer local to the current CPU and enforce
+ * reprogramming after it is queued no matter whether it is the new
+ * first expiring timer again or not.
*/
force_local = base->cpu_base == this_cpu_base;
force_local &= base->cpu_base->next_timer == timer;
@@ -1304,7 +1298,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* avoids programming the underlying clock event twice (once at
* removal and once after enqueue).
*/
- remove_hrtimer(timer, base, true, force_local);
+ was_armed = remove_hrtimer(timer, base, true, force_local);
if (mode & HRTIMER_MODE_REL)
tim = ktime_add_safe(tim, __hrtimer_cb_get_time(base->clockid));
@@ -1321,7 +1315,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
new_base = base;
}
- first = enqueue_hrtimer(timer, new_base, mode);
+ first = enqueue_hrtimer(timer, new_base, mode, was_armed);
/*
* If the hrtimer interrupt is running, then it will reevaluate the
@@ -1439,8 +1433,11 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
base = lock_hrtimer_base(timer, &flags);
- if (!hrtimer_callback_running(timer))
+ if (!hrtimer_callback_running(timer)) {
ret = remove_hrtimer(timer, base, false, false);
+ if (ret)
+ trace_hrtimer_cancel(timer);
+ }
unlock_hrtimer_base(timer, &flags);
@@ -1877,7 +1874,7 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
*/
if (restart != HRTIMER_NORESTART &&
!(timer->state & HRTIMER_STATE_ENQUEUED))
- enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
+ enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS, false);
/*
* Separate the ->running assignment from the ->state assignment.
@@ -2356,7 +2353,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
while ((node = timerqueue_getnext(&old_base->active))) {
timer = container_of(node, struct hrtimer, node);
BUG_ON(hrtimer_callback_running(timer));
- debug_deactivate(timer);
+ debug_hrtimer_deactivate(timer);
/*
* Mark it as ENQUEUED not INACTIVE otherwise the
@@ -2373,7 +2370,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
* sort out already expired timers and reprogram the
* event device.
*/
- enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
+ enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS, true);
}
}