From 88a4df117ad66100d0f870aa02032dfb9cb29179 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 29 Apr 2025 08:55:01 +0200 Subject: genirq/cpuhotplug: Convert to lock guards Convert all lock/unlock pairs to guards and tidy up the code. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/all/20250429065420.560083665@linutronix.de --- kernel/irq/cpuhotplug.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'kernel/irq/cpuhotplug.c') diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 15a7654eff68..7bd4c2a5cef4 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -177,9 +177,8 @@ void irq_migrate_all_off_this_cpu(void) bool affinity_broken; desc = irq_to_desc(irq); - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); + scoped_guard(raw_spinlock, &desc->lock) + affinity_broken = migrate_one_irq(desc); if (affinity_broken) { pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n", @@ -244,9 +243,8 @@ int irq_affinity_online_cpu(unsigned int cpu) irq_lock_sparse(); for_each_active_irq(irq) { desc = irq_to_desc(irq); - raw_spin_lock_irq(&desc->lock); - irq_restore_affinity_of_irq(desc, cpu); - raw_spin_unlock_irq(&desc->lock); + scoped_guard(raw_spinlock, &desc->lock) + irq_restore_affinity_of_irq(desc, cpu); } irq_unlock_sparse(); -- cgit v1.2.3 From c855506257063f444044d0a85a2e9ad9ab1c7ecd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 8 May 2025 12:05:38 +0200 Subject: genirq/cpuhotplug: Fix up lock guards conversion brainf..t The lock guard conversion converted raw_spin_lock_irq() to scoped_guard(raw_spinlock), which is obviously bogus and makes lockdep mightily unhappy. Note to self: Copy and pasta without using brain is a patently bad idea. Fixes: 88a4df117ad6 ("genirq/cpuhotplug: Convert to lock guards") Reported-by: Borislav Petkov Signed-off-by: Thomas Gleixner Tested-by: Borislav Petkov --- kernel/irq/cpuhotplug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/irq/cpuhotplug.c') diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 7bd4c2a5cef4..e77ca6db5e11 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -243,7 +243,7 @@ int irq_affinity_online_cpu(unsigned int cpu) irq_lock_sparse(); for_each_active_irq(irq) { desc = irq_to_desc(irq); - scoped_guard(raw_spinlock, &desc->lock) + scoped_guard(raw_spinlock_irq, &desc->lock) irq_restore_affinity_of_irq(desc, cpu); } irq_unlock_sparse(); -- cgit v1.2.3 From 788019eb559fd0b365f501467ceafce540e377cc Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 14 May 2025 13:13:16 -0700 Subject: genirq: Retain disable depth for managed interrupts across CPU hotplug Affinity-managed interrupts can be shut down and restarted during CPU hotunplug/plug. Thereby the interrupt may be left in an unexpected state. Specifically: 1. Interrupt is affine to CPU N 2. disable_irq() -> depth is 1 3. CPU N goes offline 4. irq_shutdown() -> depth is set to 1 (again) 5. CPU N goes online 6. irq_startup() -> depth is set to 0 (BUG! driver expects that the interrupt still disabled) 7. enable_irq() -> depth underflow / unbalanced enable_irq() warning This is only a problem for managed interrupts and CPU hotplug, all other cases like request()/free()/request() truly needs to reset a possibly stale disable depth value. Provide a startup function, which takes the disable depth into account, and invoked it for the managed interrupts in the CPU hotplug path. This requires to change irq_shutdown() to do a depth increment instead of setting it to 1, which allows to retain the disable depth, but is harmless for the other code paths using irq_startup(), which will still reset the disable depth unconditionally to keep the original correct behaviour. A kunit tests will be added separately to cover some of these aspects. [ tglx: Massaged changelog ] Suggested-by: Thomas Gleixner Signed-off-by: Brian Norris Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/all/20250514201353.3481400-2-briannorris@chromium.org --- kernel/irq/cpuhotplug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/irq/cpuhotplug.c') diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index e77ca6db5e11..f07529ae4895 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -218,7 +218,7 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu) return; if (irqd_is_managed_and_shutdown(data)) - irq_startup(desc, IRQ_RESEND, IRQ_START_COND); + irq_startup_managed(desc); /* * If the interrupt can only be directed to a single target -- cgit v1.2.3