summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-05-08 20:03:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-05-08 20:03:39 -0700
commit6e1e5a33e809105f44401b6a962a2c63e360f561 (patch)
tree6a249a35c63f47ce9e66862d9c30f3a4b9ae1f4f
parent7f0023215262221ca08d56be2203e8a4770be033 (diff)
parentbd3c45dd01283ada23b0a388c578dcf5600deb8a (diff)
Merge tag 'timers-urgent-2026-05-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer fix from Ingo Molnar: "Fix CPU hotplug activation race in the timer migration code, by Frederic Weisbecker" * tag 'timers-urgent-2026-05-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: timers/migration: Fix another hotplug activation race
-rw-r--r--kernel/time/timer_migration.c40
1 files changed, 29 insertions, 11 deletions
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 155eeaea4113..1d0d3a4058d5 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -1860,19 +1860,37 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node,
* child to the new parents. So tmigr_active_up() activates the
* new parents while walking up from the old root to the new.
*
- * * It is ensured that @start is active, as this setup path is
- * executed in hotplug prepare callback. This is executed by an
- * already connected and !idle CPU. Even if all other CPUs go idle,
- * the CPU executing the setup will be responsible up to current top
- * level group. And the next time it goes inactive, it will release
- * the new childmask and parent to subsequent walkers through this
- * @child. Therefore propagate active state unconditionally.
+ * * It is ensured that @start is active, (or on the way to be activated
+ * by another CPU that woke up before the current one) as this setup path
+ * is executed in hotplug prepare callback. This is executed by an already
+ * connected and !idle CPU in the hierarchy.
+ *
+ * * The below RmW atomic operation ensures that:
+ *
+ * 1) If the old root has been completely activated, the latest state is
+ * acquired (the below implicit acquire pairs with the implicit release
+ * from cmpxchg() in tmigr_active_up()).
+ *
+ * 2) If the old root is still on the way to be activated, the lagging behind
+ * CPU performing the activation will acquire the links up to the new root.
+ * (The below implicit release pairs with the implicit acquire from cmpxchg()
+ * in tmigr_active_up()).
+ *
+ * 3) Every subsequent CPU below the old root will acquire the new links while
+ * walking through the old root (The below implicit release pairs with the
+ * implicit acquire from cmpxchg() in either tmigr_active_up()) or
+ * tmigr_inactive_up().
*/
- state.state = atomic_read(&start->migr_state);
- WARN_ON_ONCE(!state.active);
+ state.state = atomic_fetch_or(0, &start->migr_state);
WARN_ON_ONCE(!start->parent);
- data.childmask = start->groupmask;
- __walk_groups_from(tmigr_active_up, &data, start, start->parent);
+ /*
+ * If the state of the old root is inactive, another CPU is on its way to activate
+ * it and propagate to the new root.
+ */
+ if (state.active) {
+ data.childmask = start->groupmask;
+ __walk_groups_from(tmigr_active_up, &data, start, start->parent);
+ }
}
/* Root update */