summaryrefslogtreecommitdiff
path: root/kernel/irq/proc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-05-27 07:46:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-05-27 07:46:58 -0700
commit60c1d948f79dc6626bf2fe4f2d2fba51e18a1e04 (patch)
tree73fa84c06b4717fc6848a4eadd7ccd13719c645e /kernel/irq/proc.c
parent0c1494015fea0935fbf6cd9d99c008fcbe1e4165 (diff)
parenta510bb87da72aa8d1504b0e4b343cfe013ee8a89 (diff)
Merge tag 'irq-core-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq core updates from Thomas Gleixner: "Updates for the generic interrupt subsystem core code: - Address a long standing subtle problem in the CPU hotplug code for affinity-managed interrupts. Affinity-managed interrupts are shut down by the core code when the last CPU in the affinity set goes offline and started up again when the first CPU in the affinity set becomes online again. This unfortunately does not take into account whether an interrupt has been disabled before the last CPU goes offline and starts up the interrupt unconditionally when the first CPU becomes online again. That's obviously not what drivers expect. Address this by preserving the disabled state for affinity-managed interrupts accross these CPU hotplug operations. All non-managed interrupts are not affected by this because startup/shutdown is coupled to request/free_irq() which obviously has to reset state. - Support three-cell scheme interrupts to allow GPIO drivers to specify interrupts from an already existing scheme - Switch the interrupt subsystem core to lock guards. This gets rid of quite some copy & pasta boilerplate code all over the place. - The usual small cleanups and improvements all over the place" * tag 'irq-core-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (59 commits) genirq/irqdesc: Remove double locking in hwirq_show() genirq: Retain disable depth for managed interrupts across CPU hotplug genirq: Bump the size of the local variable for sprintf() genirq/manage: Use the correct lock guard in irq_set_irq_wake() genirq: Consistently use '%u' format specifier for unsigned int variables genirq: Ensure flags in lock guard is consistently initialized genirq: Fix inverted condition in handle_nested_irq() genirq/cpuhotplug: Fix up lock guards conversion brainf..t genirq: Use scoped_guard() to shut clang up genirq: Remove unused remove_percpu_irq() genirq: Remove irq_[get|put]_desc*() genirq/manage: Rework irq_set_irqchip_state() genirq/manage: Rework irq_get_irqchip_state() genirq/manage: Rework teardown_percpu_nmi() genirq/manage: Rework prepare_percpu_nmi() genirq/manage: Rework disable_percpu_irq() genirq/manage: Rework irq_percpu_is_enabled() genirq/manage: Rework enable_percpu_irq() genirq/manage: Rework irq_set_parent() genirq/manage: Rework can_request_irq() ...
Diffstat (limited to 'kernel/irq/proc.c')
-rw-r--r--kernel/irq/proc.c67
1 files changed, 25 insertions, 42 deletions
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 8e29809de38d..29c2404e743b 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -81,20 +81,18 @@ static int show_irq_affinity(int type, struct seq_file *m)
static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
- unsigned long flags;
cpumask_var_t mask;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
- raw_spin_lock_irqsave(&desc->lock, flags);
- if (desc->affinity_hint)
- cpumask_copy(mask, desc->affinity_hint);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ scoped_guard(raw_spinlock_irq, &desc->lock) {
+ if (desc->affinity_hint)
+ cpumask_copy(mask, desc->affinity_hint);
+ }
seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
free_cpumask_var(mask);
-
return 0;
}
@@ -295,32 +293,26 @@ static int irq_spurious_proc_show(struct seq_file *m, void *v)
#define MAX_NAMELEN 128
-static int name_unique(unsigned int irq, struct irqaction *new_action)
+static bool name_unique(unsigned int irq, struct irqaction *new_action)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
- unsigned long flags;
- int ret = 1;
- raw_spin_lock_irqsave(&desc->lock, flags);
+ guard(raw_spinlock_irq)(&desc->lock);
for_each_action_of_desc(desc, action) {
if ((action != new_action) && action->name &&
- !strcmp(new_action->name, action->name)) {
- ret = 0;
- break;
- }
+ !strcmp(new_action->name, action->name))
+ return false;
}
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- return ret;
+ return true;
}
void register_handler_proc(unsigned int irq, struct irqaction *action)
{
- char name [MAX_NAMELEN];
+ char name[MAX_NAMELEN];
struct irq_desc *desc = irq_to_desc(irq);
- if (!desc->dir || action->dir || !action->name ||
- !name_unique(irq, action))
+ if (!desc->dir || action->dir || !action->name || !name_unique(irq, action))
return;
snprintf(name, MAX_NAMELEN, "%s", action->name);
@@ -347,17 +339,16 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
* added, not when the descriptor is created, so multiple
* tasks might try to register at the same time.
*/
- mutex_lock(&register_lock);
+ guard(mutex)(&register_lock);
if (desc->dir)
- goto out_unlock;
-
- sprintf(name, "%d", irq);
+ return;
/* create /proc/irq/1234 */
+ sprintf(name, "%u", irq);
desc->dir = proc_mkdir(name, root_irq_dir);
if (!desc->dir)
- goto out_unlock;
+ return;
#ifdef CONFIG_SMP
umode_t umode = S_IRUGO;
@@ -366,31 +357,27 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
umode |= S_IWUSR;
/* create /proc/irq/<irq>/smp_affinity */
- proc_create_data("smp_affinity", umode, desc->dir,
- &irq_affinity_proc_ops, irqp);
+ proc_create_data("smp_affinity", umode, desc->dir, &irq_affinity_proc_ops, irqp);
/* create /proc/irq/<irq>/affinity_hint */
proc_create_single_data("affinity_hint", 0444, desc->dir,
- irq_affinity_hint_proc_show, irqp);
+ irq_affinity_hint_proc_show, irqp);
/* create /proc/irq/<irq>/smp_affinity_list */
proc_create_data("smp_affinity_list", umode, desc->dir,
&irq_affinity_list_proc_ops, irqp);
- proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
- irqp);
+ proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, irqp);
# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
proc_create_single_data("effective_affinity", 0444, desc->dir,
- irq_effective_aff_proc_show, irqp);
+ irq_effective_aff_proc_show, irqp);
proc_create_single_data("effective_affinity_list", 0444, desc->dir,
- irq_effective_aff_list_proc_show, irqp);
+ irq_effective_aff_list_proc_show, irqp);
# endif
#endif
proc_create_single_data("spurious", 0444, desc->dir,
- irq_spurious_proc_show, (void *)(long)irq);
+ irq_spurious_proc_show, (void *)(long)irq);
-out_unlock:
- mutex_unlock(&register_lock);
}
void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
@@ -468,7 +455,6 @@ int show_interrupts(struct seq_file *p, void *v)
int i = *(loff_t *) v, j;
struct irqaction *action;
struct irq_desc *desc;
- unsigned long flags;
if (i > ACTUAL_NR_IRQS)
return 0;
@@ -487,13 +473,13 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- rcu_read_lock();
+ guard(rcu)();
desc = irq_to_desc(i);
if (!desc || irq_settings_is_hidden(desc))
- goto outsparse;
+ return 0;
if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs)
- goto outsparse;
+ return 0;
seq_printf(p, "%*d:", prec, i);
for_each_online_cpu(j) {
@@ -503,7 +489,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
seq_putc(p, ' ');
- raw_spin_lock_irqsave(&desc->lock, flags);
+ guard(raw_spinlock_irq)(&desc->lock);
if (desc->irq_data.chip) {
if (desc->irq_data.chip->irq_print_chip)
desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
@@ -532,9 +518,6 @@ int show_interrupts(struct seq_file *p, void *v)
}
seq_putc(p, '\n');
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-outsparse:
- rcu_read_unlock();
return 0;
}
#endif