diff options
Diffstat (limited to 'kernel/irq_work.c')
-rw-r--r-- | kernel/irq_work.c | 76 |
1 files changed, 45 insertions, 31 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 7b41d9aa3e9b..2940622da5b3 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -57,73 +57,87 @@ void __weak arch_irq_work_raise(void) */ } -/* - * Enqueue the irq_work @work on @cpu unless it's already pending - * somewhere. - * - * Can be re-enqueued while the callback is still in progress. - */ -bool irq_work_queue_on(struct irq_work *work, int cpu) +/* Enqueue on current CPU, work must already be claimed and preempt disabled */ +static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list) { - struct llist_head *list; + bool empty; - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(cpu)); + empty = llist_add(&work->llnode, list); -#ifdef CONFIG_SMP + if (empty && + (!(work->flags & IRQ_WORK_LAZY) || + tick_nohz_tick_stopped())) + arch_irq_work_raise(); +} - /* Arch remote IPI send/receive backend aren't NMI safe */ - WARN_ON_ONCE(in_nmi()); +/* Enqueue the irq work @work on the current CPU */ +bool irq_work_queue(struct irq_work *work) +{ + struct llist_head *list; /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; + /* Queue the entry and raise the IPI if needed. */ + preempt_disable(); if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) - list = &per_cpu(lazy_list, cpu); + list = this_cpu_ptr(&lazy_list); else - list = &per_cpu(raised_list, cpu); - - if (llist_add(&work->llnode, list)) - arch_send_call_function_single_ipi(cpu); + list = this_cpu_ptr(&raised_list); -#else /* #ifdef CONFIG_SMP */ - irq_work_queue(work); -#endif /* #else #ifdef CONFIG_SMP */ + __irq_work_queue_local(work, list); + preempt_enable(); return true; } +EXPORT_SYMBOL_GPL(irq_work_queue); -/* Enqueue the irq work @work on the current CPU */ -bool irq_work_queue(struct irq_work *work) +/* + * Enqueue the irq_work @work on @cpu unless it's already pending + * somewhere. + * + * Can be re-enqueued while the callback is still in progress. + */ +bool irq_work_queue_on(struct irq_work *work, int cpu) { +#ifndef CONFIG_SMP + return irq_work_queue(work); + +#else /* CONFIG_SMP: */ struct llist_head *list; bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(cpu)); + /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; - /* Queue the entry and raise the IPI if needed. */ preempt_disable(); lazy_work = work->flags & IRQ_WORK_LAZY; if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) - list = this_cpu_ptr(&lazy_list); + list = &per_cpu(lazy_list, cpu); else - list = this_cpu_ptr(&raised_list); + list = &per_cpu(raised_list, cpu); - if (llist_add(&work->llnode, list)) { - if (!lazy_work || tick_nohz_tick_stopped()) - arch_irq_work_raise(); + if (cpu != smp_processor_id()) { + /* Arch remote IPI send/receive backend aren't NMI safe */ + WARN_ON_ONCE(in_nmi()); + if (llist_add(&work->llnode, list)) + arch_send_call_function_single_ipi(cpu); + } else { + __irq_work_queue_local(work, list); } - preempt_enable(); return true; +#endif /* CONFIG_SMP */ } -EXPORT_SYMBOL_GPL(irq_work_queue); + bool irq_work_needs_cpu(void) { |