From aefb2f2e619b6c334bcb31de830aa00ba0b11129 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Tue, 21 Nov 2023 08:07:32 -0800 Subject: x86/bugs: Rename CONFIG_RETPOLINE => CONFIG_MITIGATION_RETPOLINE Step 5/10 of the namespace unification of CPU mitigations related Kconfig options. [ mingo: Converted a few more uses in comments/messages as well. ] Suggested-by: Josh Poimboeuf Signed-off-by: Breno Leitao Signed-off-by: Ingo Molnar Reviewed-by: Ariel Miculas Acked-by: Josh Poimboeuf Cc: Linus Torvalds Link: https://lore.kernel.org/r/20231121160740.1249350-6-leitao@debian.org --- kernel/trace/ring_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9286f88fcd32..9cb69332921d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1156,7 +1156,7 @@ static inline u64 rb_time_stamp(struct trace_buffer *buffer) u64 ts; /* Skip retpolines :-( */ - if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) + if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local)) ts = trace_clock_local(); else ts = buffer->clock(); -- cgit v1.2.3 From 31c89007285d365aa36f71d8fb0701581c770a27 Mon Sep 17 00:00:00 2001 From: Audra Mitchell Date: Mon, 15 Jan 2024 12:08:22 -0500 Subject: workqueue.c: Increase workqueue name length Currently we limit the size of the workqueue name to 24 characters due to commit ecf6881ff349 ("workqueue: make workqueue->name[] fixed len") Increase the size to 32 characters and print a warning in the event the requested name is larger than the limit of 32 characters. Signed-off-by: Audra Mitchell Signed-off-by: Tejun Heo --- kernel/workqueue.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 76e60faed892..8d9dec14b9bb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -108,7 +108,7 @@ enum { RESCUER_NICE_LEVEL = MIN_NICE, HIGHPRI_NICE_LEVEL = MIN_NICE, - WQ_NAME_LEN = 24, + WQ_NAME_LEN = 32, }; /* @@ -4666,6 +4666,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, va_list args; struct workqueue_struct *wq; struct pool_workqueue *pwq; + int len; /* * Unbound && max_active == 1 used to imply ordered, which is no longer @@ -4692,9 +4693,12 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, } va_start(args, max_active); - vsnprintf(wq->name, sizeof(wq->name), fmt, args); + len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); va_end(args); + if (len >= WQ_NAME_LEN) + pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", wq->name); + max_active = max_active ?: WQ_DFL_ACTIVE; max_active = wq_clamp_max_active(max_active, flags, wq->name); -- cgit v1.2.3 From 85f0ab43f9de62a4b9c1b503b07f1c33e5a6d2ab Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Tue, 16 Jan 2024 17:19:27 +0100 Subject: kernel/workqueue: Bind rescuer to unbound cpumask for WQ_UNBOUND At the time they are created unbound workqueues rescuers currently use cpu_possible_mask as their affinity, but this can be too wide in case a workqueue unbound mask has been set as a subset of cpu_possible_mask. Make new rescuers use their associated workqueue unbound cpumask from the start. Signed-off-by: Juri Lelli Signed-off-by: Tejun Heo --- kernel/workqueue.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8d9dec14b9bb..ed442cefea7c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4652,7 +4652,10 @@ static int init_rescuer(struct workqueue_struct *wq) } wq->rescuer = rescuer; - kthread_bind_mask(rescuer->task, cpu_possible_mask); + if (wq->flags & WQ_UNBOUND) + kthread_bind_mask(rescuer->task, wq->unbound_attrs->cpumask); + else + kthread_bind_mask(rescuer->task, cpu_possible_mask); wake_up_process(rescuer->task); return 0; -- cgit v1.2.3 From 1a65a6d17cbc58e1aeffb2be962acce49efbef9c Mon Sep 17 00:00:00 2001 From: Xuewen Yan Date: Wed, 10 Jan 2024 11:27:24 +0800 Subject: workqueue: Add rcu lock check at the end of work item execution Currently the workqueue just checks the atomic and locking states after work execution ends. However, sometimes, a work item may not unlock rcu after acquiring rcu_read_lock(). And as a result, it would cause rcu stall, but the rcu stall warning can not dump the work func, because the work has finished. In order to quickly discover those works that do not call rcu_read_unlock() after rcu_read_lock(), add the rcu lock check. Use rcu_preempt_depth() to check the work's rcu status. Normally, this value is 0. If this value is bigger than 0, it means the work are still holding rcu lock. If so, print err info and the work func. tj: Reworded the description for clarity. Minor formatting tweak. Signed-off-by: Xuewen Yan Reviewed-by: Lai Jiangshan Reviewed-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ed442cefea7c..aec3efbaaf93 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2640,11 +2640,12 @@ __acquires(&pool->lock) lock_map_release(&lockdep_map); lock_map_release(&pwq->wq->lockdep_map); - if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { - pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" + if (unlikely(in_atomic() || lockdep_depth(current) > 0 || + rcu_preempt_depth() > 0)) { + pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n" " last function: %ps\n", - current->comm, preempt_count(), task_pid_nr(current), - worker->current_func); + current->comm, preempt_count(), rcu_preempt_depth(), + task_pid_nr(current), worker->current_func); debug_show_held_locks(current); dump_stack(); } -- cgit v1.2.3 From 7bd20b6b87183db2ebf789bcf9d0aa6d06a0defb Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Fri, 19 Jan 2024 12:54:39 -0300 Subject: workqueue: mark power efficient workqueue as unbounded if nohz_full enabled A customer using nohz_full has experienced the following interruption: oslat-1004510 [018] timer_cancel: timer=0xffff90a7ca663cf8 oslat-1004510 [018] timer_expire_entry: timer=0xffff90a7ca663cf8 function=delayed_work_timer_fn now=4709188240 baseclk=4709188240 oslat-1004510 [018] workqueue_queue_work: work struct=0xffff90a7ca663cd8 function=fb_flashcursor workqueue=events_power_efficient req_cpu=8192 cpu=18 oslat-1004510 [018] workqueue_activate_work: work struct 0xffff90a7ca663cd8 oslat-1004510 [018] sched_wakeup: kworker/18:1:326 [120] CPU:018 oslat-1004510 [018] timer_expire_exit: timer=0xffff90a7ca663cf8 oslat-1004510 [018] irq_work_entry: vector=246 oslat-1004510 [018] irq_work_exit: vector=246 oslat-1004510 [018] tick_stop: success=0 dependency=SCHED oslat-1004510 [018] hrtimer_start: hrtimer=0xffff90a70009cb00 function=tick_sched_timer/0x0 ... oslat-1004510 [018] softirq_exit: vec=1 [action=TIMER] oslat-1004510 [018] softirq_entry: vec=7 [action=SCHED] oslat-1004510 [018] softirq_exit: vec=7 [action=SCHED] oslat-1004510 [018] tick_stop: success=0 dependency=SCHED oslat-1004510 [018] sched_switch: oslat:1004510 [120] R ==> kworker/18:1:326 [120] kworker/18:1-326 [018] workqueue_execute_start: work struct 0xffff90a7ca663cd8: function fb_flashcursor kworker/18:1-326 [018] workqueue_queue_work: work struct=0xffff9078f119eed0 function=drm_fb_helper_damage_work workqueue=events req_cpu=8192 cpu=18 kworker/18:1-326 [018] workqueue_activate_work: work struct 0xffff9078f119eed0 kworker/18:1-326 [018] timer_start: timer=0xffff90a7ca663cf8 function=delayed_work_timer_fn ... Set wq_power_efficient to true, in case nohz_full is enabled. This makes the power efficient workqueue be unbounded, which allows workqueue items there to be moved to HK CPUs. Signed-off-by: Marcelo Tosatti Signed-off-by: Tejun Heo --- kernel/workqueue.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index aec3efbaaf93..8ca65665efe9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6638,6 +6638,13 @@ void __init workqueue_init_early(void) wq_update_pod_attrs_buf = alloc_workqueue_attrs(); BUG_ON(!wq_update_pod_attrs_buf); + /* + * If nohz_full is enabled, set power efficient workqueue as unbound. + * This allows workqueue items to be moved to HK CPUs. + */ + if (housekeeping_enabled(HK_TYPE_TICK)) + wq_power_efficient = true; + /* initialize WQ_AFFN_SYSTEM pods */ pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); -- cgit v1.2.3 From 22653244a9fed06f2f864b44808a85bf5c4e3ef2 Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Mon, 22 Jan 2024 16:57:16 +0800 Subject: genirq: Deduplicate interrupt descriptor initialization alloc_desc() and early_irq_init() contain duplicated code to initialize interrupt descriptors. Replace that with a helper function. Suggested-by: Marc Zyngier Signed-off-by: Dawei Li Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240122085716.2999875-6-dawei.li@shingroup.cn --- kernel/irq/irqdesc.c | 112 +++++++++++++++++++++++++++++---------------------- 1 file changed, 64 insertions(+), 48 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 371eb1711d34..4c6b32318ce3 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -92,11 +92,23 @@ static void desc_smp_init(struct irq_desc *desc, int node, #endif } +static void free_masks(struct irq_desc *desc) +{ +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(desc->pending_mask); +#endif + free_cpumask_var(desc->irq_common_data.affinity); +#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK + free_cpumask_var(desc->irq_common_data.effective_affinity); +#endif +} + #else static inline int alloc_masks(struct irq_desc *desc, int node) { return 0; } static inline void desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } +static inline void free_masks(struct irq_desc *desc) { } #endif static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, @@ -165,6 +177,39 @@ static void delete_irq_desc(unsigned int irq) mas_erase(&mas); } +#ifdef CONFIG_SPARSE_IRQ +static const struct kobj_type irq_kobj_type; +#endif + +static int init_desc(struct irq_desc *desc, int irq, int node, + unsigned int flags, + const struct cpumask *affinity, + struct module *owner) +{ + desc->kstat_irqs = alloc_percpu(unsigned int); + if (!desc->kstat_irqs) + return -ENOMEM; + + if (alloc_masks(desc, node)) { + free_percpu(desc->kstat_irqs); + return -ENOMEM; + } + + raw_spin_lock_init(&desc->lock); + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + mutex_init(&desc->request_mutex); + init_waitqueue_head(&desc->wait_for_threads); + desc_set_defaults(irq, desc, node, affinity, owner); + irqd_set(&desc->irq_data, flags); + irq_resend_init(desc); +#ifdef CONFIG_SPARSE_IRQ + kobject_init(&desc->kobj, &irq_kobj_type); + init_rcu_head(&desc->rcu); +#endif + + return 0; +} + #ifdef CONFIG_SPARSE_IRQ static void irq_kobj_release(struct kobject *kobj); @@ -384,21 +429,6 @@ struct irq_desc *irq_to_desc(unsigned int irq) EXPORT_SYMBOL_GPL(irq_to_desc); #endif -#ifdef CONFIG_SMP -static void free_masks(struct irq_desc *desc) -{ -#ifdef CONFIG_GENERIC_PENDING_IRQ - free_cpumask_var(desc->pending_mask); -#endif - free_cpumask_var(desc->irq_common_data.affinity); -#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK - free_cpumask_var(desc->irq_common_data.effective_affinity); -#endif -} -#else -static inline void free_masks(struct irq_desc *desc) { } -#endif - void irq_lock_sparse(void) { mutex_lock(&sparse_irq_lock); @@ -414,36 +444,19 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, struct module *owner) { struct irq_desc *desc; + int ret; desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); if (!desc) return NULL; - /* allocate based on nr_cpu_ids */ - desc->kstat_irqs = alloc_percpu(unsigned int); - if (!desc->kstat_irqs) - goto err_desc; - - if (alloc_masks(desc, node)) - goto err_kstat; - raw_spin_lock_init(&desc->lock); - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - mutex_init(&desc->request_mutex); - init_rcu_head(&desc->rcu); - init_waitqueue_head(&desc->wait_for_threads); - - desc_set_defaults(irq, desc, node, affinity, owner); - irqd_set(&desc->irq_data, flags); - kobject_init(&desc->kobj, &irq_kobj_type); - irq_resend_init(desc); + ret = init_desc(desc, irq, node, flags, affinity, owner); + if (unlikely(ret)) { + kfree(desc); + return NULL; + } return desc; - -err_kstat: - free_percpu(desc->kstat_irqs); -err_desc: - kfree(desc); - return NULL; } static void irq_kobj_release(struct kobject *kobj) @@ -583,26 +596,29 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { int __init early_irq_init(void) { int count, i, node = first_online_node; - struct irq_desc *desc; + int ret; init_irq_default_affinity(); printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); - desc = irq_desc; count = ARRAY_SIZE(irq_desc); for (i = 0; i < count; i++) { - desc[i].kstat_irqs = alloc_percpu(unsigned int); - alloc_masks(&desc[i], node); - raw_spin_lock_init(&desc[i].lock); - lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); - mutex_init(&desc[i].request_mutex); - init_waitqueue_head(&desc[i].wait_for_threads); - desc_set_defaults(i, &desc[i], node, NULL, NULL); - irq_resend_init(&desc[i]); + ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL); + if (unlikely(ret)) + goto __free_desc_res; } + return arch_early_irq_init(); + +__free_desc_res: + while (--i >= 0) { + free_masks(irq_desc + i); + free_percpu(irq_desc[i].kstat_irqs); + } + + return ret; } struct irq_desc *irq_to_desc(unsigned int irq) -- cgit v1.2.3 From 8318d6a6362f5903edb4c904a8dd447e59be4ad1 Mon Sep 17 00:00:00 2001 From: Audra Mitchell Date: Thu, 25 Jan 2024 14:05:32 -0500 Subject: workqueue: Shorten events_freezable_power_efficient name Since we have set the WQ_NAME_LEN to 32, decrease the name of events_freezable_power_efficient so that it does not trip the name length warning when the workqueue is created. Signed-off-by: Audra Mitchell Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8ca65665efe9..ee6aa1b897e0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6706,7 +6706,7 @@ void __init workqueue_init_early(void) WQ_FREEZABLE, 0); system_power_efficient_wq = alloc_workqueue("events_power_efficient", WQ_POWER_EFFICIENT, 0); - system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", + system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_pwr_efficient", WQ_FREEZABLE | WQ_POWER_EFFICIENT, 0); BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || -- cgit v1.2.3 From 3832f390423302e373a1818f6cf8cb29ebf3a195 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 22 Jan 2024 13:42:41 +0100 Subject: genirq/irq_sim: Remove unused field from struct irq_sim_irq_ctx The irqnum field is unused. Remove it. Signed-off-by: Bartosz Golaszewski Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240122124243.44002-3-brgl@bgdev.pl --- kernel/irq/irq_sim.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c index dd76323ea3fd..f5ebb3ba6f9a 100644 --- a/kernel/irq/irq_sim.c +++ b/kernel/irq/irq_sim.c @@ -19,7 +19,6 @@ struct irq_sim_work_ctx { }; struct irq_sim_irq_ctx { - int irqnum; bool enabled; struct irq_sim_work_ctx *work_ctx; }; -- cgit v1.2.3 From 8dab7fd47e53865d37fce73c67bac97b41d5d64a Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 22 Jan 2024 13:42:42 +0100 Subject: genirq/irq_sim: Order headers alphabetically For better readability and maintenance keep headers in alphabetical order. Signed-off-by: Bartosz Golaszewski Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240122124243.44002-4-brgl@bgdev.pl --- kernel/irq/irq_sim.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c index f5ebb3ba6f9a..b0d50b48dbd1 100644 --- a/kernel/irq/irq_sim.c +++ b/kernel/irq/irq_sim.c @@ -4,10 +4,10 @@ * Copyright (C) 2020 Bartosz Golaszewski */ +#include #include #include #include -#include #include struct irq_sim_work_ctx { -- cgit v1.2.3 From ef7e585bf48013baabc00de1a15753dd7b626a2d Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 13 Jan 2024 19:06:15 -0800 Subject: cpu/hotplug: Delete an extraneous kernel-doc description struct cpuhp_cpu_state has an extraneous kernel-doc comment for @cpu. There is no struct member by that name, so remove the comment to prevent the kernel-doc warning: kernel/cpu.c:85: warning: Excess struct member 'cpu' description in 'cpuhp_cpu_state' Signed-off-by: Randy Dunlap Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240114030615.30441-1-rdunlap@infradead.org --- kernel/cpu.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index e6ec3ba4950b..f05937581f26 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -54,7 +54,6 @@ * @rollback: Perform a rollback * @single: Single callback invocation * @bringup: Single callback bringup or teardown selector - * @cpu: CPU number * @node: Remote CPU node; for multi-instance, do a * single entry callback for install/remove * @last: For multi-instance rollback, remember how far we got -- cgit v1.2.3 From effe6d278e06f85289b6ada0402a6d16ebc149a5 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Tue, 16 Jan 2024 12:51:51 +0800 Subject: kernel/cpu: Convert snprintf() to sysfs_emit() Per filesystems/sysfs.rst, show() should only use sysfs_emit() or sysfs_emit_at() when formatting the value to be returned to user space. coccinelle complains that there are still a couple of functions that use snprintf(). Convert them to sysfs_emit(). No functional change intended. Signed-off-by: Li Zhijian Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240116045151.3940401-40-lizhijian@fujitsu.com --- kernel/cpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index f05937581f26..ad7d0b00bce9 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -3004,7 +3004,7 @@ static ssize_t control_show(struct device *dev, return sysfs_emit(buf, "%d\n", cpu_smt_num_threads); #endif - return snprintf(buf, PAGE_SIZE - 2, "%s\n", state); + return sysfs_emit(buf, "%s\n", state); } static ssize_t control_store(struct device *dev, struct device_attribute *attr, @@ -3017,7 +3017,7 @@ static DEVICE_ATTR_RW(control); static ssize_t active_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active()); + return sysfs_emit(buf, "%d\n", sched_smt_active()); } static DEVICE_ATTR_RO(active); -- cgit v1.2.3 From 6a229b0e2ff6143b65ba4ef42bd71e29ffc2c16d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 26 Jan 2024 11:55:46 -1000 Subject: workqueue: Drop unnecessary kick_pool() in create_worker() After creating a new worker, create_worker() is calling kick_pool() to wake up the new worker task. However, as kick_pool() doesn't do anything if there is no work pending, it also calls wake_up_process() explicitly. There's no reason to call kick_pool() at all. wake_up_process() is enough by itself. Drop the unnecessary kick_pool() call. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ee6aa1b897e0..b6b690a17f7c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2217,12 +2217,11 @@ static struct worker *create_worker(struct worker_pool *pool) worker->pool->nr_workers++; worker_enter_idle(worker); - kick_pool(pool); /* * @worker is waiting on a completion in kthread() and will trigger hung - * check if not woken up soon. As kick_pool() might not have waken it - * up, wake it up explicitly once more. + * check if not woken up soon. As kick_pool() is noop if @pool is empty, + * wake it up explicitly. */ wake_up_process(worker->task); -- cgit v1.2.3 From e563d0a7cdc1890ff36bb177b5c8c2854d881e4d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 26 Jan 2024 11:55:50 -1000 Subject: workqueue: Break up enum definitions and give names to the types workqueue is collecting different sorts of enums into a single unnamed enum type which can increase confusion around enum width. Also, unnamed enums can't be accessed from BPF. Let's break up enum definitions according to their purposes and give them type names. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b6b690a17f7c..45d0a784ba4f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -56,7 +56,7 @@ #include "workqueue_internal.h" -enum { +enum worker_pool_flags { /* * worker_pool flags * @@ -75,7 +75,9 @@ enum { */ POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ +}; +enum worker_flags { /* worker flags */ WORKER_DIE = 1 << 1, /* die die die */ WORKER_IDLE = 1 << 2, /* is idle */ @@ -86,7 +88,9 @@ enum { WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | WORKER_UNBOUND | WORKER_REBOUND, +}; +enum wq_internal_consts { NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ -- cgit v1.2.3 From aafd753555c0ecb9c7ce11ff14429a34c8c0a14b Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 22 Jan 2024 13:42:43 +0100 Subject: genirq/irq_sim: Shrink code by using helpers Use the new __free() mechanism to remove all gotos and simplify the error paths. Signed-off-by: Bartosz Golaszewski Signed-off-by: Ingo Molnar Cc: Thomas Gleixner Cc: Nathan Chancellor Link: https://lore.kernel.org/r/20240122124243.44002-5-brgl@bgdev.pl --- kernel/irq/irq_sim.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c index b0d50b48dbd1..38d6ae651ac7 100644 --- a/kernel/irq/irq_sim.c +++ b/kernel/irq/irq_sim.c @@ -4,6 +4,7 @@ * Copyright (C) 2020 Bartosz Golaszewski */ +#include #include #include #include @@ -163,33 +164,27 @@ static const struct irq_domain_ops irq_sim_domain_ops = { struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode, unsigned int num_irqs) { - struct irq_sim_work_ctx *work_ctx; + struct irq_sim_work_ctx *work_ctx __free(kfree) = + kmalloc(sizeof(*work_ctx), GFP_KERNEL); - work_ctx = kmalloc(sizeof(*work_ctx), GFP_KERNEL); if (!work_ctx) - goto err_out; + return ERR_PTR(-ENOMEM); - work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL); - if (!work_ctx->pending) - goto err_free_work_ctx; + unsigned long *pending __free(bitmap) = bitmap_zalloc(num_irqs, GFP_KERNEL); + if (!pending) + return ERR_PTR(-ENOMEM); work_ctx->domain = irq_domain_create_linear(fwnode, num_irqs, &irq_sim_domain_ops, work_ctx); if (!work_ctx->domain) - goto err_free_bitmap; + return ERR_PTR(-ENOMEM); work_ctx->irq_count = num_irqs; work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq); + work_ctx->pending = no_free_ptr(pending); - return work_ctx->domain; - -err_free_bitmap: - bitmap_free(work_ctx->pending); -err_free_work_ctx: - kfree(work_ctx); -err_out: - return ERR_PTR(-ENOMEM); + return no_free_ptr(work_ctx)->domain; } EXPORT_SYMBOL_GPL(irq_domain_create_sim); -- cgit v1.2.3 From a045a272d887575da17ad86d6573e82871b50c27 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: workqueue: Move pwq->max_active to wq->max_active max_active is a workqueue-wide setting and the configured value is stored in wq->saved_max_active; however, the effective value was stored in pwq->max_active. While this is harmless, it makes max_active update process more complicated and gets in the way of the planned max_active semantic updates for unbound workqueues. This patches moves pwq->max_active to wq->max_active. This simplifies the code and makes freezing and noop max_active updates cheaper too. No user-visible behavior change is intended. As wq->max_active is updated while holding wq mutex but read without any locking, it now uses WRITE/READ_ONCE(). A new locking locking rule WO is added for it. v2: wq->max_active now uses WRITE/READ_ONCE() as suggested by Lai. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 133 ++++++++++++++++++++++++++--------------------------- 1 file changed, 66 insertions(+), 67 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 45d0a784ba4f..a23a35dbcd74 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -147,6 +147,9 @@ enum wq_internal_consts { * * WR: wq->mutex protected for writes. RCU protected for reads. * + * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read + * with READ_ONCE() without locking. + * * MD: wq_mayday_lock protected. * * WD: Used internally by the watchdog. @@ -254,7 +257,6 @@ struct pool_workqueue { * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. */ int nr_active; /* L: nr of active works */ - int max_active; /* L: max active works */ struct list_head inactive_works; /* L: inactive works */ struct list_head pwqs_node; /* WR: node on wq->pwqs */ struct list_head mayday_node; /* MD: node on wq->maydays */ @@ -302,7 +304,8 @@ struct workqueue_struct { struct worker *rescuer; /* MD: rescue worker */ int nr_drainers; /* WQ: drain in progress */ - int saved_max_active; /* WQ: saved pwq max_active */ + int max_active; /* WO: max active works */ + int saved_max_active; /* WQ: saved max_active */ struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ @@ -1496,7 +1499,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_ pwq->nr_active--; if (!list_empty(&pwq->inactive_works)) { /* one down, submit an inactive one */ - if (pwq->nr_active < pwq->max_active) + if (pwq->nr_active < READ_ONCE(pwq->wq->max_active)) pwq_activate_first_inactive(pwq); } } @@ -1797,7 +1800,13 @@ retry: pwq->nr_in_flight[pwq->work_color]++; work_flags = work_color_to_flags(pwq->work_color); - if (likely(pwq->nr_active < pwq->max_active)) { + /* + * Limit the number of concurrently active work items to max_active. + * @work must also queue behind existing inactive work items to maintain + * ordering when max_active changes. See wq_adjust_max_active(). + */ + if (list_empty(&pwq->inactive_works) && + pwq->nr_active < READ_ONCE(pwq->wq->max_active)) { if (list_empty(&pool->worklist)) pool->watchdog_ts = jiffies; @@ -4146,50 +4155,6 @@ static void pwq_release_workfn(struct kthread_work *work) } } -/** - * pwq_adjust_max_active - update a pwq's max_active to the current setting - * @pwq: target pool_workqueue - * - * If @pwq isn't freezing, set @pwq->max_active to the associated - * workqueue's saved_max_active and activate inactive work items - * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. - */ -static void pwq_adjust_max_active(struct pool_workqueue *pwq) -{ - struct workqueue_struct *wq = pwq->wq; - bool freezable = wq->flags & WQ_FREEZABLE; - unsigned long flags; - - /* for @wq->saved_max_active */ - lockdep_assert_held(&wq->mutex); - - /* fast exit for non-freezable wqs */ - if (!freezable && pwq->max_active == wq->saved_max_active) - return; - - /* this function can be called during early boot w/ irq disabled */ - raw_spin_lock_irqsave(&pwq->pool->lock, flags); - - /* - * During [un]freezing, the caller is responsible for ensuring that - * this function is called at least once after @workqueue_freezing - * is updated and visible. - */ - if (!freezable || !workqueue_freezing) { - pwq->max_active = wq->saved_max_active; - - while (!list_empty(&pwq->inactive_works) && - pwq->nr_active < pwq->max_active) - pwq_activate_first_inactive(pwq); - - kick_pool(pwq->pool); - } else { - pwq->max_active = 0; - } - - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); -} - /* initialize newly allocated @pwq which is associated with @wq and @pool */ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) @@ -4222,9 +4187,6 @@ static void link_pwq(struct pool_workqueue *pwq) /* set the matching work_color */ pwq->work_color = wq->work_color; - /* sync max_active to the current setting */ - pwq_adjust_max_active(pwq); - /* link in @pwq */ list_add_rcu(&pwq->pwqs_node, &wq->pwqs); } @@ -4665,6 +4627,52 @@ static int init_rescuer(struct workqueue_struct *wq) return 0; } +/** + * wq_adjust_max_active - update a wq's max_active to the current setting + * @wq: target workqueue + * + * If @wq isn't freezing, set @wq->max_active to the saved_max_active and + * activate inactive work items accordingly. If @wq is freezing, clear + * @wq->max_active to zero. + */ +static void wq_adjust_max_active(struct workqueue_struct *wq) +{ + struct pool_workqueue *pwq; + + lockdep_assert_held(&wq->mutex); + + if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) { + WRITE_ONCE(wq->max_active, 0); + return; + } + + if (wq->max_active == wq->saved_max_active) + return; + + /* + * Update @wq->max_active and then kick inactive work items if more + * active work items are allowed. This doesn't break work item ordering + * because new work items are always queued behind existing inactive + * work items if there are any. + */ + WRITE_ONCE(wq->max_active, wq->saved_max_active); + + for_each_pwq(pwq, wq) { + unsigned long flags; + + /* this function can be called during early boot w/ irq disabled */ + raw_spin_lock_irqsave(&pwq->pool->lock, flags); + + while (!list_empty(&pwq->inactive_works) && + pwq->nr_active < wq->max_active) + pwq_activate_first_inactive(pwq); + + kick_pool(pwq->pool); + + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + } +} + __printf(1, 4) struct workqueue_struct *alloc_workqueue(const char *fmt, unsigned int flags, @@ -4672,7 +4680,6 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, { va_list args; struct workqueue_struct *wq; - struct pool_workqueue *pwq; int len; /* @@ -4711,6 +4718,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, /* init wq */ wq->flags = flags; + wq->max_active = max_active; wq->saved_max_active = max_active; mutex_init(&wq->mutex); atomic_set(&wq->nr_pwqs_to_flush, 0); @@ -4739,8 +4747,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, mutex_lock(&wq_pool_mutex); mutex_lock(&wq->mutex); - for_each_pwq(pwq, wq) - pwq_adjust_max_active(pwq); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); list_add_tail_rcu(&wq->list, &workqueues); @@ -4878,8 +4885,6 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); */ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) { - struct pool_workqueue *pwq; - /* disallow meddling with max_active for ordered workqueues */ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return; @@ -4890,9 +4895,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; - - for_each_pwq(pwq, wq) - pwq_adjust_max_active(pwq); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); } @@ -5139,8 +5142,8 @@ static void show_pwq(struct pool_workqueue *pwq) pr_info(" pwq %d:", pool->id); pr_cont_pool_info(pool); - pr_cont(" active=%d/%d refcnt=%d%s\n", - pwq->nr_active, pwq->max_active, pwq->refcnt, + pr_cont(" active=%d refcnt=%d%s\n", + pwq->nr_active, pwq->refcnt, !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); hash_for_each(pool->busy_hash, bkt, worker, hentry) { @@ -5688,7 +5691,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); void freeze_workqueues_begin(void) { struct workqueue_struct *wq; - struct pool_workqueue *pwq; mutex_lock(&wq_pool_mutex); @@ -5697,8 +5699,7 @@ void freeze_workqueues_begin(void) list_for_each_entry(wq, &workqueues, list) { mutex_lock(&wq->mutex); - for_each_pwq(pwq, wq) - pwq_adjust_max_active(pwq); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); } @@ -5763,7 +5764,6 @@ out_unlock: void thaw_workqueues(void) { struct workqueue_struct *wq; - struct pool_workqueue *pwq; mutex_lock(&wq_pool_mutex); @@ -5775,8 +5775,7 @@ void thaw_workqueues(void) /* restore max_active and repopulate worklist */ list_for_each_entry(wq, &workqueues, list) { mutex_lock(&wq->mutex); - for_each_pwq(pwq, wq) - pwq_adjust_max_active(pwq); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); } -- cgit v1.2.3 From afa87ce85379e2d93863fce595afdb5771a84004 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: workqueue: Factor out pwq_is_empty() "!pwq->nr_active && list_empty(&pwq->inactive_works)" test is repeated multiple times. Let's factor it out into pwq_is_empty(). Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a23a35dbcd74..171d0e6d29f6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1460,6 +1460,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) } } +static bool pwq_is_empty(struct pool_workqueue *pwq) +{ + return !pwq->nr_active && list_empty(&pwq->inactive_works); +} + static void pwq_activate_inactive_work(struct work_struct *work) { struct pool_workqueue *pwq = get_work_pwq(work); @@ -3329,7 +3334,7 @@ reflush: bool drained; raw_spin_lock_irq(&pwq->pool->lock); - drained = !pwq->nr_active && list_empty(&pwq->inactive_works); + drained = pwq_is_empty(pwq); raw_spin_unlock_irq(&pwq->pool->lock); if (drained) @@ -4779,7 +4784,7 @@ static bool pwq_busy(struct pool_workqueue *pwq) if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) return true; - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) + if (!pwq_is_empty(pwq)) return true; return false; @@ -5217,7 +5222,7 @@ void show_one_workqueue(struct workqueue_struct *wq) unsigned long flags; for_each_pwq(pwq, wq) { - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { + if (!pwq_is_empty(pwq)) { idle = false; break; } @@ -5229,7 +5234,7 @@ void show_one_workqueue(struct workqueue_struct *wq) for_each_pwq(pwq, wq) { raw_spin_lock_irqsave(&pwq->pool->lock, flags); - if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { + if (!pwq_is_empty(pwq)) { /* * Defer printing to avoid deadlocks in console * drivers that queue work while holding locks -- cgit v1.2.3 From 4c6380305d21e36581b451f7337a36c93b64e050 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: workqueue: Replace pwq_activate_inactive_work() with [__]pwq_activate_work() To prepare for unbound nr_active handling improvements, move work activation part of pwq_activate_inactive_work() into __pwq_activate_work() and add pwq_activate_work() which tests WORK_STRUCT_INACTIVE and updates nr_active. pwq_activate_first_inactive() and try_to_grab_pending() are updated to use pwq_activate_work(). The latter conversion is functionally identical. For the former, this conversion adds an unnecessary WORK_STRUCT_INACTIVE testing. This is temporary and will be removed by the next patch. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 171d0e6d29f6..403f3a14166d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1465,16 +1465,36 @@ static bool pwq_is_empty(struct pool_workqueue *pwq) return !pwq->nr_active && list_empty(&pwq->inactive_works); } -static void pwq_activate_inactive_work(struct work_struct *work) +static void __pwq_activate_work(struct pool_workqueue *pwq, + struct work_struct *work) { - struct pool_workqueue *pwq = get_work_pwq(work); - trace_workqueue_activate_work(work); if (list_empty(&pwq->pool->worklist)) pwq->pool->watchdog_ts = jiffies; move_linked_works(work, &pwq->pool->worklist, NULL); __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); +} + +/** + * pwq_activate_work - Activate a work item if inactive + * @pwq: pool_workqueue @work belongs to + * @work: work item to activate + * + * Returns %true if activated. %false if already active. + */ +static bool pwq_activate_work(struct pool_workqueue *pwq, + struct work_struct *work) +{ + struct worker_pool *pool = pwq->pool; + + lockdep_assert_held(&pool->lock); + + if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE)) + return false; + pwq->nr_active++; + __pwq_activate_work(pwq, work); + return true; } static void pwq_activate_first_inactive(struct pool_workqueue *pwq) @@ -1482,7 +1502,7 @@ static void pwq_activate_first_inactive(struct pool_workqueue *pwq) struct work_struct *work = list_first_entry(&pwq->inactive_works, struct work_struct, entry); - pwq_activate_inactive_work(work); + pwq_activate_work(pwq, work); } /** @@ -1620,8 +1640,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, * management later on and cause stall. Make sure the work * item is activated before grabbing. */ - if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) - pwq_activate_inactive_work(work); + pwq_activate_work(pwq, work); list_del_init(&work->entry); pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); -- cgit v1.2.3 From 1c270b79ce0b8290f146255ea9057243f6dd3c17 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: workqueue: Move nr_active handling into helpers __queue_work(), pwq_dec_nr_in_flight() and wq_adjust_max_active() were open-coding nr_active handling, which is fine given that the operations are trivial. However, the planned unbound nr_active update will make them more complicated, so let's move them into helpers. - pwq_tryinc_nr_active() is added. It increments nr_active if under max_active limit and return a boolean indicating whether inc was successful. Note that the function is structured to accommodate future changes. __queue_work() is updated to use the new helper. - pwq_activate_first_inactive() is updated to use pwq_tryinc_nr_active() and thus no longer assumes that nr_active is under max_active and returns a boolean to indicate whether a work item has been activated. - wq_adjust_max_active() no longer tests directly whether a work item can be activated. Instead, it's updated to use the return value of pwq_activate_first_inactive() to tell whether a work item has been activated. - nr_active decrement and activating the first inactive work item is factored into pwq_dec_nr_active(). v3: - WARN_ON_ONCE(!WORK_STRUCT_INACTIVE) added to __pwq_activate_work() as now we're calling the function unconditionally from pwq_activate_first_inactive(). v2: - wq->max_active now uses WRITE/READ_ONCE() as suggested by Lai. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 86 ++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 67 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 403f3a14166d..b3d818d46e99 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1468,11 +1468,14 @@ static bool pwq_is_empty(struct pool_workqueue *pwq) static void __pwq_activate_work(struct pool_workqueue *pwq, struct work_struct *work) { + unsigned long *wdb = work_data_bits(work); + + WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE)); trace_workqueue_activate_work(work); if (list_empty(&pwq->pool->worklist)) pwq->pool->watchdog_ts = jiffies; move_linked_works(work, &pwq->pool->worklist, NULL); - __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); + __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb); } /** @@ -1497,12 +1500,66 @@ static bool pwq_activate_work(struct pool_workqueue *pwq, return true; } -static void pwq_activate_first_inactive(struct pool_workqueue *pwq) +/** + * pwq_tryinc_nr_active - Try to increment nr_active for a pwq + * @pwq: pool_workqueue of interest + * + * Try to increment nr_active for @pwq. Returns %true if an nr_active count is + * successfully obtained. %false otherwise. + */ +static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) +{ + struct workqueue_struct *wq = pwq->wq; + struct worker_pool *pool = pwq->pool; + bool obtained; + + lockdep_assert_held(&pool->lock); + + obtained = pwq->nr_active < READ_ONCE(wq->max_active); + + if (obtained) + pwq->nr_active++; + return obtained; +} + +/** + * pwq_activate_first_inactive - Activate the first inactive work item on a pwq + * @pwq: pool_workqueue of interest + * + * Activate the first inactive work item of @pwq if available and allowed by + * max_active limit. + * + * Returns %true if an inactive work item has been activated. %false if no + * inactive work item is found or max_active limit is reached. + */ +static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) +{ + struct work_struct *work = + list_first_entry_or_null(&pwq->inactive_works, + struct work_struct, entry); + + if (work && pwq_tryinc_nr_active(pwq)) { + __pwq_activate_work(pwq, work); + return true; + } else { + return false; + } +} + +/** + * pwq_dec_nr_active - Retire an active count + * @pwq: pool_workqueue of interest + * + * Decrement @pwq's nr_active and try to activate the first inactive work item. + */ +static void pwq_dec_nr_active(struct pool_workqueue *pwq) { - struct work_struct *work = list_first_entry(&pwq->inactive_works, - struct work_struct, entry); + struct worker_pool *pool = pwq->pool; - pwq_activate_work(pwq, work); + lockdep_assert_held(&pool->lock); + + pwq->nr_active--; + pwq_activate_first_inactive(pwq); } /** @@ -1520,14 +1577,8 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_ { int color = get_work_color(work_data); - if (!(work_data & WORK_STRUCT_INACTIVE)) { - pwq->nr_active--; - if (!list_empty(&pwq->inactive_works)) { - /* one down, submit an inactive one */ - if (pwq->nr_active < READ_ONCE(pwq->wq->max_active)) - pwq_activate_first_inactive(pwq); - } - } + if (!(work_data & WORK_STRUCT_INACTIVE)) + pwq_dec_nr_active(pwq); pwq->nr_in_flight[color]--; @@ -1829,13 +1880,11 @@ retry: * @work must also queue behind existing inactive work items to maintain * ordering when max_active changes. See wq_adjust_max_active(). */ - if (list_empty(&pwq->inactive_works) && - pwq->nr_active < READ_ONCE(pwq->wq->max_active)) { + if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) { if (list_empty(&pool->worklist)) pool->watchdog_ts = jiffies; trace_workqueue_activate_work(work); - pwq->nr_active++; insert_work(pwq, work, &pool->worklist, work_flags); kick_pool(pool); } else { @@ -4687,9 +4736,8 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) /* this function can be called during early boot w/ irq disabled */ raw_spin_lock_irqsave(&pwq->pool->lock, flags); - while (!list_empty(&pwq->inactive_works) && - pwq->nr_active < wq->max_active) - pwq_activate_first_inactive(pwq); + while (pwq_activate_first_inactive(pwq)) + ; kick_pool(pwq->pool); -- cgit v1.2.3 From c5404d4e6df6faba1007544b5f4e62c7c14416dd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: workqueue: Make wq_adjust_max_active() round-robin pwqs while activating wq_adjust_max_active() needs to activate work items after max_active is increased. Previously, it did that by visiting each pwq once activating all that could be activated. While this makes sense with per-pwq nr_active, nr_active will be shared across multiple pwqs for unbound wqs. Then, we'd want to round-robin through pwqs to be fairer. In preparation, this patch makes wq_adjust_max_active() round-robin pwqs while activating. While the activation ordering changes, this shouldn't cause user-noticeable behavior changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b3d818d46e99..489f70846ac9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4710,7 +4710,7 @@ static int init_rescuer(struct workqueue_struct *wq) */ static void wq_adjust_max_active(struct workqueue_struct *wq) { - struct pool_workqueue *pwq; + bool activated; lockdep_assert_held(&wq->mutex); @@ -4730,19 +4730,26 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) */ WRITE_ONCE(wq->max_active, wq->saved_max_active); - for_each_pwq(pwq, wq) { - unsigned long flags; - - /* this function can be called during early boot w/ irq disabled */ - raw_spin_lock_irqsave(&pwq->pool->lock, flags); - - while (pwq_activate_first_inactive(pwq)) - ; + /* + * Round-robin through pwq's activating the first inactive work item + * until max_active is filled. + */ + do { + struct pool_workqueue *pwq; - kick_pool(pwq->pool); + activated = false; + for_each_pwq(pwq, wq) { + unsigned long flags; - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); - } + /* can be called during early boot w/ irq disabled */ + raw_spin_lock_irqsave(&pwq->pool->lock, flags); + if (pwq_activate_first_inactive(pwq)) { + activated = true; + kick_pool(pwq->pool); + } + raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + } + } while (activated); } __printf(1, 4) -- cgit v1.2.3 From 9f66cff212bb3c1cd25996aaa0dfd0c9e9d8baab Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: workqueue: RCU protect wq->dfl_pwq and implement accessors for it wq->cpu_pwq is RCU protected but wq->dfl_pwq isn't. This is okay because currently wq->dfl_pwq is used only accessed to install it into wq->cpu_pwq which doesn't require RCU access. However, we want to be able to access wq->dfl_pwq under RCU in the future to access its __pod_cpumask and the code can be made easier to read by making the two pwq fields behave in the same way. - Make wq->dfl_pwq RCU protected. - Add unbound_pwq_slot() and unbound_pwq() which can access both ->dfl_pwq and ->cpu_pwq. The former returns the double pointer that can be used access and update the pwqs. The latter performs locking check and dereferences the double pointer. - pwq accesses and updates are converted to use unbound_pwq[_slot](). Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 64 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 40 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 489f70846ac9..1579b8c9a579 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -308,7 +308,7 @@ struct workqueue_struct { int saved_max_active; /* WQ: saved max_active */ struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ - struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ + struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */ #ifdef CONFIG_SYSFS struct wq_device *wq_dev; /* I: for sysfs interface */ @@ -639,6 +639,23 @@ static int worker_pool_assign_id(struct worker_pool *pool) return ret; } +static struct pool_workqueue __rcu ** +unbound_pwq_slot(struct workqueue_struct *wq, int cpu) +{ + if (cpu >= 0) + return per_cpu_ptr(wq->cpu_pwq, cpu); + else + return &wq->dfl_pwq; +} + +/* @cpu < 0 for dfl_pwq */ +static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu) +{ + return rcu_dereference_check(*unbound_pwq_slot(wq, cpu), + lockdep_is_held(&wq_pool_mutex) || + lockdep_is_held(&wq->mutex)); +} + static unsigned int work_color_to_flags(int color) { return color << WORK_STRUCT_COLOR_SHIFT; @@ -4328,10 +4345,11 @@ static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, "possible intersect\n"); } -/* install @pwq into @wq's cpu_pwq and return the old pwq */ +/* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */ static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, int cpu, struct pool_workqueue *pwq) { + struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu); struct pool_workqueue *old_pwq; lockdep_assert_held(&wq_pool_mutex); @@ -4340,8 +4358,8 @@ static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, /* link_pwq() can handle duplicate calls */ link_pwq(pwq); - old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); - rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq); + old_pwq = rcu_access_pointer(*slot); + rcu_assign_pointer(*slot, pwq); return old_pwq; } @@ -4441,14 +4459,11 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); - /* save the previous pwq and install the new one */ + /* save the previous pwqs and install the new ones */ for_each_possible_cpu(cpu) ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, ctx->pwq_tbl[cpu]); - - /* @dfl_pwq might not have been used, ensure it's linked */ - link_pwq(ctx->dfl_pwq); - swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); + ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq); mutex_unlock(&ctx->wq->mutex); } @@ -4558,9 +4573,7 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu, /* nothing to do if the target cpumask matches the current pwq */ wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); - pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu), - lockdep_is_held(&wq_pool_mutex)); - if (wqattrs_equal(target_attrs, pwq->pool->attrs)) + if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs)) return; /* create a new pwq */ @@ -4578,10 +4591,11 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu, use_dfl_pwq: mutex_lock(&wq->mutex); - raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); - get_pwq(wq->dfl_pwq); - raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); - old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq); + pwq = unbound_pwq(wq, -1); + raw_spin_lock_irq(&pwq->pool->lock); + get_pwq(pwq); + raw_spin_unlock_irq(&pwq->pool->lock); + old_pwq = install_unbound_pwq(wq, cpu, pwq); out_unlock: mutex_unlock(&wq->mutex); put_pwq_unlocked(old_pwq); @@ -4619,10 +4633,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) cpus_read_lock(); if (wq->flags & __WQ_ORDERED) { + struct pool_workqueue *dfl_pwq; + ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); /* there should only be single pwq for ordering guarantee */ - WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || - wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), + dfl_pwq = rcu_access_pointer(wq->dfl_pwq); + WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node || + wq->pwqs.prev != &dfl_pwq->pwqs_node), "ordering guarantee broken for workqueue %s\n", wq->name); } else { ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); @@ -4856,7 +4873,7 @@ static bool pwq_busy(struct pool_workqueue *pwq) if (pwq->nr_in_flight[i]) return true; - if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) + if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1)) return true; if (!pwq_is_empty(pwq)) return true; @@ -4940,13 +4957,12 @@ void destroy_workqueue(struct workqueue_struct *wq) rcu_read_lock(); for_each_possible_cpu(cpu) { - pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); - RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL); - put_pwq_unlocked(pwq); + put_pwq_unlocked(unbound_pwq(wq, cpu)); + RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL); } - put_pwq_unlocked(wq->dfl_pwq); - wq->dfl_pwq = NULL; + put_pwq_unlocked(unbound_pwq(wq, -1)); + RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL); rcu_read_unlock(); } -- cgit v1.2.3 From dd6c3c5441263723305a9c52c5ccc899a4653000 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: workqueue: Move pwq_dec_nr_in_flight() to the end of work item handling The planned shared nr_active handling for unbound workqueues will make pwq_dec_nr_active() sometimes drop the pool lock temporarily to acquire other pool locks, which is necessary as retirement of an nr_active count from one pool may need kick off an inactive work item in another pool. This patch moves pwq_dec_nr_in_flight() call in try_to_grab_pending() to the end of work item handling so that work item state changes stay atomic. process_one_work() which is the other user of pwq_dec_nr_in_flight() already calls it at the end of work item handling. Comments are added to both call sites and pwq_dec_nr_in_flight(). This shouldn't cause any behavior changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1579b8c9a579..b5aba0e5a699 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1587,6 +1587,11 @@ static void pwq_dec_nr_active(struct pool_workqueue *pwq) * A work either has completed or is removed from pending queue, * decrement nr_in_flight of its pwq and handle workqueue flushing. * + * NOTE: + * For unbound workqueues, this function may temporarily drop @pwq->pool->lock + * and thus should be called after all other state updates for the in-flight + * work item is complete. + * * CONTEXT: * raw_spin_lock_irq(pool->lock). */ @@ -1711,11 +1716,13 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, pwq_activate_work(pwq, work); list_del_init(&work->entry); - pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); /* work->data points to pwq iff queued, point to pool */ set_work_pool_and_keep_pending(work, pool->id); + /* must be the last step, see the function comment */ + pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); + raw_spin_unlock(&pool->lock); rcu_read_unlock(); return 1; @@ -2780,6 +2787,8 @@ __acquires(&pool->lock) worker->current_func = NULL; worker->current_pwq = NULL; worker->current_color = INT_MAX; + + /* must be the last step, see the function comment */ pwq_dec_nr_in_flight(pwq, work_data); } -- cgit v1.2.3 From 91ccc6e7233bb10a9c176aa4cc70d6f432a441a5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:24 -1000 Subject: workqueue: Introduce struct wq_node_nr_active Currently, for both percpu and unbound workqueues, max_active applies per-cpu, which is a recent change for unbound workqueues. The change for unbound workqueues was a significant departure from the previous behavior of per-node application. It made some use cases create undesirable number of concurrent work items and left no good way of fixing them. To address the problem, workqueue is implementing a NUMA node segmented global nr_active mechanism, which will be explained further in the next patch. As a preparation, this patch introduces struct wq_node_nr_active. It's a data structured allocated for each workqueue and NUMA node pair and currently only tracks the workqueue's number of active work items on the node. This is split out from the next patch to make it easier to understand and review. Note that there is an extra wq_node_nr_active allocated for the invalid node nr_node_ids which is used to track nr_active for pools which don't have NUMA node associated such as the default fallback system-wide pool. This doesn't cause any behavior changes visible to userland yet. The next patch will expand to implement the control mechanism on top. v4: - Fixed out-of-bound access when freeing per-cpu workqueues. v3: - Use flexible array for wq->node_nr_active as suggested by Lai. v2: - wq->max_active now uses WRITE/READ_ONCE() as suggested by Lai. - Lai pointed out that pwq_tryinc_nr_active() incorrectly dropped pwq->max_active check. Restored. As the next patch replaces the max_active enforcement mechanism, this doesn't change the end result. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 142 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 135 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b5aba0e5a699..8d465478adb9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -284,6 +284,16 @@ struct wq_flusher { struct wq_device; +/* + * Unlike in a per-cpu workqueue where max_active limits its concurrency level + * on each CPU, in an unbound workqueue, max_active applies to the whole system. + * As sharing a single nr_active across multiple sockets can be very expensive, + * the counting and enforcement is per NUMA node. + */ +struct wq_node_nr_active { + atomic_t nr; /* per-node nr_active count */ +}; + /* * The externally visible workqueue. It relays the issued work items to * the appropriate worker_pool through its pool_workqueues. @@ -330,6 +340,7 @@ struct workqueue_struct { /* hot fields used during command issue, aligned to cacheline */ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ + struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */ }; static struct kmem_cache *pwq_cache; @@ -1425,6 +1436,31 @@ work_func_t wq_worker_last_func(struct task_struct *task) return worker->last_func; } +/** + * wq_node_nr_active - Determine wq_node_nr_active to use + * @wq: workqueue of interest + * @node: NUMA node, can be %NUMA_NO_NODE + * + * Determine wq_node_nr_active to use for @wq on @node. Returns: + * + * - %NULL for per-cpu workqueues as they don't need to use shared nr_active. + * + * - node_nr_active[nr_node_ids] if @node is %NUMA_NO_NODE. + * + * - Otherwise, node_nr_active[@node]. + */ +static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq, + int node) +{ + if (!(wq->flags & WQ_UNBOUND)) + return NULL; + + if (node == NUMA_NO_NODE) + node = nr_node_ids; + + return wq->node_nr_active[node]; +} + /** * get_pwq - get an extra reference on the specified pool_workqueue * @pwq: pool_workqueue to get @@ -1506,12 +1542,17 @@ static bool pwq_activate_work(struct pool_workqueue *pwq, struct work_struct *work) { struct worker_pool *pool = pwq->pool; + struct wq_node_nr_active *nna; lockdep_assert_held(&pool->lock); if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE)) return false; + nna = wq_node_nr_active(pwq->wq, pool->node); + if (nna) + atomic_inc(&nna->nr); + pwq->nr_active++; __pwq_activate_work(pwq, work); return true; @@ -1528,14 +1569,18 @@ static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) { struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; + struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node); bool obtained; lockdep_assert_held(&pool->lock); obtained = pwq->nr_active < READ_ONCE(wq->max_active); - if (obtained) + if (obtained) { pwq->nr_active++; + if (nna) + atomic_inc(&nna->nr); + } return obtained; } @@ -1572,10 +1617,26 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) static void pwq_dec_nr_active(struct pool_workqueue *pwq) { struct worker_pool *pool = pwq->pool; + struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node); lockdep_assert_held(&pool->lock); + /* + * @pwq->nr_active should be decremented for both percpu and unbound + * workqueues. + */ pwq->nr_active--; + + /* + * For a percpu workqueue, it's simple. Just need to kick the first + * inactive work item on @pwq itself. + */ + if (!nna) { + pwq_activate_first_inactive(pwq); + return; + } + + atomic_dec(&nna->nr); pwq_activate_first_inactive(pwq); } @@ -4039,11 +4100,63 @@ static void wq_free_lockdep(struct workqueue_struct *wq) } #endif +static void free_node_nr_active(struct wq_node_nr_active **nna_ar) +{ + int node; + + for_each_node(node) { + kfree(nna_ar[node]); + nna_ar[node] = NULL; + } + + kfree(nna_ar[nr_node_ids]); + nna_ar[nr_node_ids] = NULL; +} + +static void init_node_nr_active(struct wq_node_nr_active *nna) +{ + atomic_set(&nna->nr, 0); +} + +/* + * Each node's nr_active counter will be accessed mostly from its own node and + * should be allocated in the node. + */ +static int alloc_node_nr_active(struct wq_node_nr_active **nna_ar) +{ + struct wq_node_nr_active *nna; + int node; + + for_each_node(node) { + nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, node); + if (!nna) + goto err_free; + init_node_nr_active(nna); + nna_ar[node] = nna; + } + + /* [nr_node_ids] is used as the fallback */ + nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, NUMA_NO_NODE); + if (!nna) + goto err_free; + init_node_nr_active(nna); + nna_ar[nr_node_ids] = nna; + + return 0; + +err_free: + free_node_nr_active(nna_ar); + return -ENOMEM; +} + static void rcu_free_wq(struct rcu_head *rcu) { struct workqueue_struct *wq = container_of(rcu, struct workqueue_struct, rcu); + if (wq->flags & WQ_UNBOUND) + free_node_nr_active(wq->node_nr_active); + wq_free_lockdep(wq); free_percpu(wq->cpu_pwq); free_workqueue_attrs(wq->unbound_attrs); @@ -4785,7 +4898,8 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, { va_list args; struct workqueue_struct *wq; - int len; + size_t wq_size; + int name_len; /* * Unbound && max_active == 1 used to imply ordered, which is no longer @@ -4801,7 +4915,12 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, flags |= WQ_UNBOUND; /* allocate wq and format name */ - wq = kzalloc(sizeof(*wq), GFP_KERNEL); + if (flags & WQ_UNBOUND) + wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1); + else + wq_size = sizeof(*wq); + + wq = kzalloc(wq_size, GFP_KERNEL); if (!wq) return NULL; @@ -4812,11 +4931,12 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, } va_start(args, max_active); - len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); + name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args); va_end(args); - if (len >= WQ_NAME_LEN) - pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", wq->name); + if (name_len >= WQ_NAME_LEN) + pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", + wq->name); max_active = max_active ?: WQ_DFL_ACTIVE; max_active = wq_clamp_max_active(max_active, flags, wq->name); @@ -4835,8 +4955,13 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, wq_init_lockdep(wq); INIT_LIST_HEAD(&wq->list); + if (flags & WQ_UNBOUND) { + if (alloc_node_nr_active(wq->node_nr_active) < 0) + goto err_unreg_lockdep; + } + if (alloc_and_link_pwqs(wq) < 0) - goto err_unreg_lockdep; + goto err_free_node_nr_active; if (wq_online && init_rescuer(wq) < 0) goto err_destroy; @@ -4861,6 +4986,9 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, return wq; +err_free_node_nr_active: + if (wq->flags & WQ_UNBOUND) + free_node_nr_active(wq->node_nr_active); err_unreg_lockdep: wq_unregister_lockdep(wq); wq_free_lockdep(wq); -- cgit v1.2.3 From 5797b1c18919cd9c289ded7954383e499f729ce0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 29 Jan 2024 08:11:25 -1000 Subject: workqueue: Implement system-wide nr_active enforcement for unbound workqueues A pool_workqueue (pwq) represents the connection between a workqueue and a worker_pool. One of the roles that a pwq plays is enforcement of the max_active concurrency limit. Before 636b927eba5b ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues"), there was one pwq per each CPU for per-cpu workqueues and per each NUMA node for unbound workqueues, which was a natural result of per-cpu workqueues being served by per-cpu pools and unbound by per-NUMA pools. In terms of max_active enforcement, this was, while not perfect, workable. For per-cpu workqueues, it was fine. For unbound, it wasn't great in that NUMA machines would get max_active that's multiplied by the number of nodes but didn't cause huge problems because NUMA machines are relatively rare and the node count is usually pretty low. However, cache layouts are more complex now and sharing a worker pool across a whole node didn't really work well for unbound workqueues. Thus, a series of commits culminating on 8639ecebc9b1 ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues") implemented more flexible affinity mechanism for unbound workqueues which enables using e.g. last-level-cache aligned pools. In the process, 636b927eba5b ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues") made unbound workqueues use per-cpu pwqs like per-cpu workqueues. While the change was necessary to enable more flexible affinity scopes, this came with the side effect of blowing up the effective max_active for unbound workqueues. Before, the effective max_active for unbound workqueues was multiplied by the number of nodes. After, by the number of CPUs. 636b927eba5b ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues") claims that this should generally be okay. It is okay for users which self-regulates concurrency level which are the vast majority; however, there are enough use cases which actually depend on max_active to prevent the level of concurrency from going bonkers including several IO handling workqueues that can issue a work item for each in-flight IO. With targeted benchmarks, the misbehavior can easily be exposed as reported in http://lkml.kernel.org/r/dbu6wiwu3sdhmhikb2w6lns7b27gbobfavhjj57kwi2quafgwl@htjcc5oikcr3. Unfortunately, there is no way to express what these use cases need using per-cpu max_active. A CPU may issue most of in-flight IOs, so we don't want to set max_active too low but as soon as we increase max_active a bit, we can end up with unreasonable number of in-flight work items when many CPUs issue IOs at the same time. ie. The acceptable lowest max_active is higher than the acceptable highest max_active. Ideally, max_active for an unbound workqueue should be system-wide so that the users can regulate the total level of concurrency regardless of node and cache layout. The reasons workqueue hasn't implemented that yet are: - One max_active enforcement decouples from pool boundaires, chaining execution after a work item finishes requires inter-pool operations which would require lock dancing, which is nasty. - Sharing a single nr_active count across the whole system can be pretty expensive on NUMA machines. - Per-pwq enforcement had been more or less okay while we were using per-node pools. It looks like we no longer can avoid decoupling max_active enforcement from pool boundaries. This patch implements system-wide nr_active mechanism with the following design characteristics: - To avoid sharing a single counter across multiple nodes, the configured max_active is split across nodes according to the proportion of each workqueue's online effective CPUs per node. e.g. A node with twice more online effective CPUs will get twice higher portion of max_active. - Workqueue used to be able to process a chain of interdependent work items which is as long as max_active. We can't do this anymore as max_active is distributed across the nodes. Instead, a new parameter min_active is introduced which determines the minimum level of concurrency within a node regardless of how max_active distribution comes out to be. It is set to the smaller of max_active and WQ_DFL_MIN_ACTIVE which is 8. This can lead to higher effective max_weight than configured and also deadlocks if a workqueue was depending on being able to handle chains of interdependent work items that are longer than 8. I believe these should be fine given that the number of CPUs in each NUMA node is usually higher than 8 and work item chain longer than 8 is pretty unlikely. However, if these assumptions turn out to be wrong, we'll need to add an interface to adjust min_active. - Each unbound wq has an array of struct wq_node_nr_active which tracks per-node nr_active. When its pwq wants to run a work item, it has to obtain the matching node's nr_active. If over the node's max_active, the pwq is queued on wq_node_nr_active->pending_pwqs. As work items finish, the completion path round-robins the pending pwqs activating the first inactive work item of each, which involves some pool lock dancing and kicking other pools. It's not the simplest code but doesn't look too bad. v4: - wq_adjust_max_active() updated to invoke wq_update_node_max_active(). - wq_adjust_max_active() is now protected by wq->mutex instead of wq_pool_mutex. v3: - wq_node_max_active() used to calculate per-node max_active on the fly based on system-wide CPU online states. Lai pointed out that this can lead to skewed distributions for workqueues with restricted cpumasks. Update the max_active distribution to use per-workqueue effective online CPU counts instead of system-wide and cache the calculation results in node_nr_active->max. v2: - wq->min/max_active now uses WRITE/READ_ONCE() as suggested by Lai. Signed-off-by: Tejun Heo Reported-by: Naohiro Aota Link: http://lkml.kernel.org/r/dbu6wiwu3sdhmhikb2w6lns7b27gbobfavhjj57kwi2quafgwl@htjcc5oikcr3 Fixes: 636b927eba5b ("workqueue: Make unbound workqueues to use per-cpu pool_workqueues") Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 341 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 309 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8d465478adb9..903be39bd2d1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -126,6 +126,9 @@ enum wq_internal_consts { * * L: pool->lock protected. Access with pool->lock held. * + * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for + * reads. + * * K: Only modified by worker while holding pool->lock. Can be safely read by * self, while holding pool->lock or from IRQ context if %current is the * kworker. @@ -247,17 +250,18 @@ struct pool_workqueue { * pwq->inactive_works instead of pool->worklist and marked with * WORK_STRUCT_INACTIVE. * - * All work items marked with WORK_STRUCT_INACTIVE do not participate - * in pwq->nr_active and all work items in pwq->inactive_works are - * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE - * work items are in pwq->inactive_works. Some of them are ready to - * run in pool->worklist or worker->scheduled. Those work itmes are - * only struct wq_barrier which is used for flush_work() and should - * not participate in pwq->nr_active. For non-barrier work item, it - * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. + * All work items marked with WORK_STRUCT_INACTIVE do not participate in + * nr_active and all work items in pwq->inactive_works are marked with + * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are + * in pwq->inactive_works. Some of them are ready to run in + * pool->worklist or worker->scheduled. Those work itmes are only struct + * wq_barrier which is used for flush_work() and should not participate + * in nr_active. For non-barrier work item, it is marked with + * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. */ int nr_active; /* L: nr of active works */ struct list_head inactive_works; /* L: inactive works */ + struct list_head pending_node; /* LN: node on wq_node_nr_active->pending_pwqs */ struct list_head pwqs_node; /* WR: node on wq->pwqs */ struct list_head mayday_node; /* MD: node on wq->maydays */ @@ -289,9 +293,19 @@ struct wq_device; * on each CPU, in an unbound workqueue, max_active applies to the whole system. * As sharing a single nr_active across multiple sockets can be very expensive, * the counting and enforcement is per NUMA node. + * + * The following struct is used to enforce per-node max_active. When a pwq wants + * to start executing a work item, it should increment ->nr using + * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over + * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish + * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in + * round-robin order. */ struct wq_node_nr_active { - atomic_t nr; /* per-node nr_active count */ + int max; /* per-node max_active */ + atomic_t nr; /* per-node nr_active */ + raw_spinlock_t lock; /* nests inside pool locks */ + struct list_head pending_pwqs; /* LN: pwqs with inactive works */ }; /* @@ -314,8 +328,12 @@ struct workqueue_struct { struct worker *rescuer; /* MD: rescue worker */ int nr_drainers; /* WQ: drain in progress */ + + /* See alloc_workqueue() function comment for info on min/max_active */ int max_active; /* WO: max active works */ + int min_active; /* WO: min active works */ int saved_max_active; /* WQ: saved max_active */ + int saved_min_active; /* WQ: saved min_active */ struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */ @@ -667,6 +685,19 @@ static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu) lockdep_is_held(&wq->mutex)); } +/** + * unbound_effective_cpumask - effective cpumask of an unbound workqueue + * @wq: workqueue of interest + * + * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which + * is masked with wq_unbound_cpumask to determine the effective cpumask. The + * default pwq is always mapped to the pool with the current effective cpumask. + */ +static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq) +{ + return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask; +} + static unsigned int work_color_to_flags(int color) { return color << WORK_STRUCT_COLOR_SHIFT; @@ -1461,6 +1492,46 @@ static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq, return wq->node_nr_active[node]; } +/** + * wq_update_node_max_active - Update per-node max_actives to use + * @wq: workqueue to update + * @off_cpu: CPU that's going down, -1 if a CPU is not going down + * + * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is + * distributed among nodes according to the proportions of numbers of online + * cpus. The result is always between @wq->min_active and max_active. + */ +static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) +{ + struct cpumask *effective = unbound_effective_cpumask(wq); + int min_active = READ_ONCE(wq->min_active); + int max_active = READ_ONCE(wq->max_active); + int total_cpus, node; + + lockdep_assert_held(&wq->mutex); + + if (!cpumask_test_cpu(off_cpu, effective)) + off_cpu = -1; + + total_cpus = cpumask_weight_and(effective, cpu_online_mask); + if (off_cpu >= 0) + total_cpus--; + + for_each_node(node) { + int node_cpus; + + node_cpus = cpumask_weight_and(effective, cpumask_of_node(node)); + if (off_cpu >= 0 && cpu_to_node(off_cpu) == node) + node_cpus--; + + wq_node_nr_active(wq, node)->max = + clamp(DIV_ROUND_UP(max_active * node_cpus, total_cpus), + min_active, max_active); + } + + wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active; +} + /** * get_pwq - get an extra reference on the specified pool_workqueue * @pwq: pool_workqueue to get @@ -1558,35 +1629,98 @@ static bool pwq_activate_work(struct pool_workqueue *pwq, return true; } +static bool tryinc_node_nr_active(struct wq_node_nr_active *nna) +{ + int max = READ_ONCE(nna->max); + + while (true) { + int old, tmp; + + old = atomic_read(&nna->nr); + if (old >= max) + return false; + tmp = atomic_cmpxchg_relaxed(&nna->nr, old, old + 1); + if (tmp == old) + return true; + } +} + /** * pwq_tryinc_nr_active - Try to increment nr_active for a pwq * @pwq: pool_workqueue of interest + * @fill: max_active may have increased, try to increase concurrency level * * Try to increment nr_active for @pwq. Returns %true if an nr_active count is * successfully obtained. %false otherwise. */ -static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) +static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill) { struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node); - bool obtained; + bool obtained = false; lockdep_assert_held(&pool->lock); - obtained = pwq->nr_active < READ_ONCE(wq->max_active); + if (!nna) { + /* per-cpu workqueue, pwq->nr_active is sufficient */ + obtained = pwq->nr_active < READ_ONCE(wq->max_active); + goto out; + } + + /* + * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is + * already waiting on $nna, pwq_dec_nr_active() will maintain the + * concurrency level. Don't jump the line. + * + * We need to ignore the pending test after max_active has increased as + * pwq_dec_nr_active() can only maintain the concurrency level but not + * increase it. This is indicated by @fill. + */ + if (!list_empty(&pwq->pending_node) && likely(!fill)) + goto out; + + obtained = tryinc_node_nr_active(nna); + if (obtained) + goto out; + + /* + * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs + * and try again. The smp_mb() is paired with the implied memory barrier + * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either + * we see the decremented $nna->nr or they see non-empty + * $nna->pending_pwqs. + */ + raw_spin_lock(&nna->lock); + + if (list_empty(&pwq->pending_node)) + list_add_tail(&pwq->pending_node, &nna->pending_pwqs); + else if (likely(!fill)) + goto out_unlock; + + smp_mb(); + + obtained = tryinc_node_nr_active(nna); - if (obtained) { + /* + * If @fill, @pwq might have already been pending. Being spuriously + * pending in cold paths doesn't affect anything. Let's leave it be. + */ + if (obtained && likely(!fill)) + list_del_init(&pwq->pending_node); + +out_unlock: + raw_spin_unlock(&nna->lock); +out: + if (obtained) pwq->nr_active++; - if (nna) - atomic_inc(&nna->nr); - } return obtained; } /** * pwq_activate_first_inactive - Activate the first inactive work item on a pwq * @pwq: pool_workqueue of interest + * @fill: max_active may have increased, try to increase concurrency level * * Activate the first inactive work item of @pwq if available and allowed by * max_active limit. @@ -1594,13 +1728,13 @@ static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq) * Returns %true if an inactive work item has been activated. %false if no * inactive work item is found or max_active limit is reached. */ -static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) +static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) { struct work_struct *work = list_first_entry_or_null(&pwq->inactive_works, struct work_struct, entry); - if (work && pwq_tryinc_nr_active(pwq)) { + if (work && pwq_tryinc_nr_active(pwq, fill)) { __pwq_activate_work(pwq, work); return true; } else { @@ -1608,11 +1742,93 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq) } } +/** + * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active + * @nna: wq_node_nr_active to activate a pending pwq for + * @caller_pool: worker_pool the caller is locking + * + * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked. + * @caller_pool may be unlocked and relocked to lock other worker_pools. + */ +static void node_activate_pending_pwq(struct wq_node_nr_active *nna, + struct worker_pool *caller_pool) +{ + struct worker_pool *locked_pool = caller_pool; + struct pool_workqueue *pwq; + struct work_struct *work; + + lockdep_assert_held(&caller_pool->lock); + + raw_spin_lock(&nna->lock); +retry: + pwq = list_first_entry_or_null(&nna->pending_pwqs, + struct pool_workqueue, pending_node); + if (!pwq) + goto out_unlock; + + /* + * If @pwq is for a different pool than @locked_pool, we need to lock + * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock + * / lock dance. For that, we also need to release @nna->lock as it's + * nested inside pool locks. + */ + if (pwq->pool != locked_pool) { + raw_spin_unlock(&locked_pool->lock); + locked_pool = pwq->pool; + if (!raw_spin_trylock(&locked_pool->lock)) { + raw_spin_unlock(&nna->lock); + raw_spin_lock(&locked_pool->lock); + raw_spin_lock(&nna->lock); + goto retry; + } + } + + /* + * $pwq may not have any inactive work items due to e.g. cancellations. + * Drop it from pending_pwqs and see if there's another one. + */ + work = list_first_entry_or_null(&pwq->inactive_works, + struct work_struct, entry); + if (!work) { + list_del_init(&pwq->pending_node); + goto retry; + } + + /* + * Acquire an nr_active count and activate the inactive work item. If + * $pwq still has inactive work items, rotate it to the end of the + * pending_pwqs so that we round-robin through them. This means that + * inactive work items are not activated in queueing order which is fine + * given that there has never been any ordering across different pwqs. + */ + if (likely(tryinc_node_nr_active(nna))) { + pwq->nr_active++; + __pwq_activate_work(pwq, work); + + if (list_empty(&pwq->inactive_works)) + list_del_init(&pwq->pending_node); + else + list_move_tail(&pwq->pending_node, &nna->pending_pwqs); + + /* if activating a foreign pool, make sure it's running */ + if (pwq->pool != caller_pool) + kick_pool(pwq->pool); + } + +out_unlock: + raw_spin_unlock(&nna->lock); + if (locked_pool != caller_pool) { + raw_spin_unlock(&locked_pool->lock); + raw_spin_lock(&caller_pool->lock); + } +} + /** * pwq_dec_nr_active - Retire an active count * @pwq: pool_workqueue of interest * * Decrement @pwq's nr_active and try to activate the first inactive work item. + * For unbound workqueues, this function may temporarily drop @pwq->pool->lock. */ static void pwq_dec_nr_active(struct pool_workqueue *pwq) { @@ -1632,12 +1848,29 @@ static void pwq_dec_nr_active(struct pool_workqueue *pwq) * inactive work item on @pwq itself. */ if (!nna) { - pwq_activate_first_inactive(pwq); + pwq_activate_first_inactive(pwq, false); return; } - atomic_dec(&nna->nr); - pwq_activate_first_inactive(pwq); + /* + * If @pwq is for an unbound workqueue, it's more complicated because + * multiple pwqs and pools may be sharing the nr_active count. When a + * pwq needs to wait for an nr_active count, it puts itself on + * $nna->pending_pwqs. The following atomic_dec_return()'s implied + * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to + * guarantee that either we see non-empty pending_pwqs or they see + * decremented $nna->nr. + * + * $nna->max may change as CPUs come online/offline and @pwq->wq's + * max_active gets updated. However, it is guaranteed to be equal to or + * larger than @pwq->wq->min_active which is above zero unless freezing. + * This maintains the forward progress guarantee. + */ + if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max)) + return; + + if (!list_empty(&nna->pending_pwqs)) + node_activate_pending_pwq(nna, pool); } /** @@ -1965,7 +2198,7 @@ retry: * @work must also queue behind existing inactive work items to maintain * ordering when max_active changes. See wq_adjust_max_active(). */ - if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) { + if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) { if (list_empty(&pool->worklist)) pool->watchdog_ts = jiffies; @@ -3200,7 +3433,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, barr->task = current; - /* The barrier work item does not participate in pwq->nr_active. */ + /* The barrier work item does not participate in nr_active. */ work_flags |= WORK_STRUCT_INACTIVE; /* @@ -4116,6 +4349,8 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar) static void init_node_nr_active(struct wq_node_nr_active *nna) { atomic_set(&nna->nr, 0); + raw_spin_lock_init(&nna->lock); + INIT_LIST_HEAD(&nna->pending_pwqs); } /* @@ -4355,6 +4590,15 @@ static void pwq_release_workfn(struct kthread_work *work) mutex_unlock(&wq_pool_mutex); } + if (!list_empty(&pwq->pending_node)) { + struct wq_node_nr_active *nna = + wq_node_nr_active(pwq->wq, pwq->pool->node); + + raw_spin_lock_irq(&nna->lock); + list_del_init(&pwq->pending_node); + raw_spin_unlock_irq(&nna->lock); + } + call_rcu(&pwq->rcu, rcu_free_pwq); /* @@ -4380,6 +4624,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, pwq->flush_color = -1; pwq->refcnt = 1; INIT_LIST_HEAD(&pwq->inactive_works); + INIT_LIST_HEAD(&pwq->pending_node); INIT_LIST_HEAD(&pwq->pwqs_node); INIT_LIST_HEAD(&pwq->mayday_node); kthread_init_work(&pwq->release_work, pwq_release_workfn); @@ -4587,6 +4832,9 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) ctx->pwq_tbl[cpu]); ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq); + /* update node_nr_active->max */ + wq_update_node_max_active(ctx->wq, -1); + mutex_unlock(&ctx->wq->mutex); } @@ -4850,24 +5098,35 @@ static int init_rescuer(struct workqueue_struct *wq) static void wq_adjust_max_active(struct workqueue_struct *wq) { bool activated; + int new_max, new_min; lockdep_assert_held(&wq->mutex); if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) { - WRITE_ONCE(wq->max_active, 0); - return; + new_max = 0; + new_min = 0; + } else { + new_max = wq->saved_max_active; + new_min = wq->saved_min_active; } - if (wq->max_active == wq->saved_max_active) + if (wq->max_active == new_max && wq->min_active == new_min) return; /* - * Update @wq->max_active and then kick inactive work items if more + * Update @wq->max/min_active and then kick inactive work items if more * active work items are allowed. This doesn't break work item ordering * because new work items are always queued behind existing inactive * work items if there are any. */ - WRITE_ONCE(wq->max_active, wq->saved_max_active); + WRITE_ONCE(wq->max_active, new_max); + WRITE_ONCE(wq->min_active, new_min); + + if (wq->flags & WQ_UNBOUND) + wq_update_node_max_active(wq, -1); + + if (new_max == 0) + return; /* * Round-robin through pwq's activating the first inactive work item @@ -4882,7 +5141,7 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) /* can be called during early boot w/ irq disabled */ raw_spin_lock_irqsave(&pwq->pool->lock, flags); - if (pwq_activate_first_inactive(pwq)) { + if (pwq_activate_first_inactive(pwq, true)) { activated = true; kick_pool(pwq->pool); } @@ -4944,7 +5203,9 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, /* init wq */ wq->flags = flags; wq->max_active = max_active; - wq->saved_max_active = max_active; + wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE); + wq->saved_max_active = wq->max_active; + wq->saved_min_active = wq->min_active; mutex_init(&wq->mutex); atomic_set(&wq->nr_pwqs_to_flush, 0); INIT_LIST_HEAD(&wq->pwqs); @@ -5110,7 +5371,8 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); * @wq: target workqueue * @max_active: new max_active value. * - * Set max_active of @wq to @max_active. + * Set max_active of @wq to @max_active. See the alloc_workqueue() function + * comment. * * CONTEXT: * Don't call from IRQ context. @@ -5127,6 +5389,9 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; + if (wq->flags & WQ_UNBOUND) + wq->saved_min_active = min(wq->saved_min_active, max_active); + wq_adjust_max_active(wq); mutex_unlock(&wq->mutex); @@ -5808,6 +6073,10 @@ int workqueue_online_cpu(unsigned int cpu) for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) wq_update_pod(wq, tcpu, cpu, true); + + mutex_lock(&wq->mutex); + wq_update_node_max_active(wq, -1); + mutex_unlock(&wq->mutex); } } @@ -5836,6 +6105,10 @@ int workqueue_offline_cpu(unsigned int cpu) for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) wq_update_pod(wq, tcpu, cpu, false); + + mutex_lock(&wq->mutex); + wq_update_node_max_active(wq, cpu); + mutex_unlock(&wq->mutex); } } mutex_unlock(&wq_pool_mutex); @@ -7127,8 +7400,12 @@ void __init workqueue_init_topology(void) * combinations to apply per-pod sharing. */ list_for_each_entry(wq, &workqueues, list) { - for_each_online_cpu(cpu) { + for_each_online_cpu(cpu) wq_update_pod(wq, cpu, cpu, true); + if (wq->flags & WQ_UNBOUND) { + mutex_lock(&wq->mutex); + wq_update_node_max_active(wq, -1); + mutex_unlock(&wq->mutex); } } -- cgit v1.2.3 From aae17ebb53cd3da37f5dfbde937acd091eb4340c Mon Sep 17 00:00:00 2001 From: Leonardo Bras Date: Mon, 29 Jan 2024 22:00:46 -0300 Subject: workqueue: Avoid using isolated cpus' timers on queue_delayed_work When __queue_delayed_work() is called, it chooses a cpu for handling the timer interrupt. As of today, it will pick either the cpu passed as parameter or the last cpu used for this. This is not good if a system does use CPU isolation, because it can take away some valuable cpu time to: 1 - deal with the timer interrupt, 2 - schedule-out the desired task, 3 - queue work on a random workqueue, and 4 - schedule the desired task back to the cpu. So to fix this, during __queue_delayed_work(), if cpu isolation is in place, pick a random non-isolated cpu to handle the timer interrupt. As an optimization, if the current cpu is not isolated, use it instead of looking for another candidate. Signed-off-by: Leonardo Bras Signed-off-by: Tejun Heo --- kernel/workqueue.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 903be39bd2d1..9221a4c57ae1 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2362,10 +2362,18 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, dwork->cpu = cpu; timer->expires = jiffies + delay; - if (unlikely(cpu != WORK_CPU_UNBOUND)) + if (housekeeping_enabled(HK_TYPE_TIMER)) { + /* If the current cpu is a housekeeping cpu, use it. */ + cpu = smp_processor_id(); + if (!housekeeping_test_cpu(cpu, HK_TYPE_TIMER)) + cpu = housekeeping_any_cpu(HK_TYPE_TIMER); add_timer_on(timer, cpu); - else - add_timer(timer); + } else { + if (likely(cpu == WORK_CPU_UNBOUND)) + add_timer(timer); + else + add_timer_on(timer, cpu); + } } /** -- cgit v1.2.3 From 15930da42f8981dc42c19038042947b475b19f47 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 30 Jan 2024 18:55:55 -1000 Subject: workqueue: Don't call cpumask_test_cpu() with -1 CPU in wq_update_node_max_active() For wq_update_node_max_active(), @off_cpu of -1 indicates that no CPU is going down. The function was incorrectly calling cpumask_test_cpu() with -1 CPU leading to oopses like the following on some archs: Unable to handle kernel paging request at virtual address ffff0002100296e0 .. pc : wq_update_node_max_active+0x50/0x1fc lr : wq_update_node_max_active+0x1f0/0x1fc ... Call trace: wq_update_node_max_active+0x50/0x1fc apply_wqattrs_commit+0xf0/0x114 apply_workqueue_attrs_locked+0x58/0xa0 alloc_workqueue+0x5ac/0x774 workqueue_init_early+0x460/0x540 start_kernel+0x258/0x684 __primary_switched+0xb8/0xc0 Code: 9100a273 35000d01 53067f00 d0016dc1 (f8607a60) ---[ end trace 0000000000000000 ]--- Kernel panic - not syncing: Attempted to kill the idle task! ---[ end Kernel panic - not syncing: Attempted to kill the idle task! ]--- Fix it. Signed-off-by: Tejun Heo Reported-by: Marek Szyprowski Reported-by: Nathan Chancellor Tested-by: Nathan Chancellor Link: http://lkml.kernel.org/r/91eacde0-df99-4d5c-a980-91046f66e612@samsung.com Fixes: 5797b1c18919 ("workqueue: Implement system-wide nr_active enforcement for unbound workqueues") --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9221a4c57ae1..31c1373505d8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1510,7 +1510,7 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) lockdep_assert_held(&wq->mutex); - if (!cpumask_test_cpu(off_cpu, effective)) + if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective)) off_cpu = -1; total_cpus = cpumask_weight_and(effective, cpu_online_mask); -- cgit v1.2.3 From c5f8cd6c62ce02205ced15e9a998103f21ec5455 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 30 Jan 2024 19:06:43 -1000 Subject: workqueue: Avoid premature init of wq->node_nr_active[].max System workqueues are allocated early during boot from workqueue_init_early(). While allocating unbound workqueues, wq_update_node_max_active() is invoked from apply_workqueue_attrs() and accesses NUMA topology to initialize wq->node_nr_active[].max. However, topology information may not be set up at this point. wq_update_node_max_active() is explicitly invoked from workqueue_init_topology() later when topology information is known to be available. This doesn't seem to crash anything but it's doing useless work with dubious data. Let's skip the premature and duplicate node_max_active updates by initializing the field to WQ_DFL_MIN_ACTIVE on allocation and making wq_update_node_max_active() noop until workqueue_init_topology(). Signed-off-by: Tejun Heo --- kernel/workqueue.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9221a4c57ae1..a65081ec6780 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { [WQ_AFFN_SYSTEM] = "system", }; +static bool wq_topo_initialized = false; + /* * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency @@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) lockdep_assert_held(&wq->mutex); + if (!wq_topo_initialized) + return; + if (!cpumask_test_cpu(off_cpu, effective)) off_cpu = -1; @@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar) static void init_node_nr_active(struct wq_node_nr_active *nna) { + nna->max = WQ_DFL_MIN_ACTIVE; atomic_set(&nna->nr, 0); raw_spin_lock_init(&nna->lock); INIT_LIST_HEAD(&nna->pending_pwqs); @@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void) init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); + wq_topo_initialized = true; + mutex_lock(&wq_pool_mutex); /* --- kernel/workqueue.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 31c1373505d8..ffb625db9771 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { [WQ_AFFN_SYSTEM] = "system", }; +static bool wq_topo_initialized __read_mostly = false; + /* * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency @@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) lockdep_assert_held(&wq->mutex); + if (!wq_topo_initialized) + return; + if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective)) off_cpu = -1; @@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar) static void init_node_nr_active(struct wq_node_nr_active *nna) { + nna->max = WQ_DFL_MIN_ACTIVE; atomic_set(&nna->nr, 0); raw_spin_lock_init(&nna->lock); INIT_LIST_HEAD(&nna->pending_pwqs); @@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void) init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); + wq_topo_initialized = true; + mutex_lock(&wq_pool_mutex); /* -- cgit v1.2.3 From b639585e71e63008373d3a9fd060b87315fe7ea8 Mon Sep 17 00:00:00 2001 From: Wang Jinchao Date: Wed, 31 Jan 2024 10:54:41 +0800 Subject: fork: Using clone_flags for legacy clone check In the current implementation of clone(), there is a line that initializes `u64 clone_flags = args->flags` at the top. This means that there is no longer a need to use args->flags for the legacy clone check. Signed-off-by: Wang Jinchao Link: https://lore.kernel.org/r/202401311054+0800-wangjinchao@xfusion.com Signed-off-by: Christian Brauner --- kernel/fork.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 47ff3b35352e..95647c66309f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2875,8 +2875,8 @@ pid_t kernel_clone(struct kernel_clone_args *args) * here has the advantage that we don't need to have a separate helper * to check for legacy clone(). */ - if ((args->flags & CLONE_PIDFD) && - (args->flags & CLONE_PARENT_SETTID) && + if ((clone_flags & CLONE_PIDFD) && + (clone_flags & CLONE_PARENT_SETTID) && (args->pidfd == args->parent_tid)) return -EINVAL; -- cgit v1.2.3 From cdefbf2324ceda662e2667aa2f44e8b9de3d780f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 25 Jan 2024 17:17:34 +0100 Subject: pidfd: cleanup the usage of __pidfd_prepare's flags - make pidfd_create() static. - Don't pass O_RDWR | O_CLOEXEC to __pidfd_prepare() in copy_process(), __pidfd_prepare() adds these flags unconditionally. - Kill the flags check in __pidfd_prepare(). sys_pidfd_open() checks the flags itself, all other users of pidfd_prepare() pass flags = 0. If we need a sanity check for those other in kernel users then WARN_ON_ONCE(flags & ~PIDFD_NONBLOCK) makes more sense. - Don't pass O_RDWR to get_unused_fd_flags(), it ignores everything except O_CLOEXEC. - Don't pass O_CLOEXEC to anon_inode_getfile(), it ignores everything except O_ACCMODE | O_NONBLOCK. Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240125161734.GA778@redhat.com Signed-off-by: Christian Brauner --- kernel/fork.c | 9 +++------ kernel/pid.c | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 95647c66309f..726a92043531 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2130,15 +2130,12 @@ static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **re int pidfd; struct file *pidfd_file; - if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC)) - return -EINVAL; - - pidfd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + pidfd = get_unused_fd_flags(O_CLOEXEC); if (pidfd < 0) return pidfd; pidfd_file = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, - flags | O_RDWR | O_CLOEXEC); + flags | O_RDWR); if (IS_ERR(pidfd_file)) { put_unused_fd(pidfd); return PTR_ERR(pidfd_file); @@ -2524,7 +2521,7 @@ __latent_entropy struct task_struct *copy_process( */ if (clone_flags & CLONE_PIDFD) { /* Note that no task has been attached to @pid yet. */ - retval = __pidfd_prepare(pid, O_RDWR | O_CLOEXEC, &pidfile); + retval = __pidfd_prepare(pid, 0, &pidfile); if (retval < 0) goto bad_fork_free_pid; pidfd = retval; diff --git a/kernel/pid.c b/kernel/pid.c index b52b10865454..c7a3e359f8f5 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -595,7 +595,7 @@ struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags) * Return: On success, a cloexec pidfd is returned. * On error, a negative errno number will be returned. */ -int pidfd_create(struct pid *pid, unsigned int flags) +static int pidfd_create(struct pid *pid, unsigned int flags) { int pidfd; struct file *pidfd_file; -- cgit v1.2.3 From 21e25205d7f9b6d7d3807546dd12ea93844b7c8e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 27 Jan 2024 14:24:07 +0100 Subject: pidfd: don't do_notify_pidfd() if !thread_group_empty() do_notify_pidfd() makes no sense until the whole thread group exits, change do_notify_parent() to check thread_group_empty(). This avoids the unnecessary do_notify_pidfd() when tsk is not a leader, or it exits before other threads, or it has a ptraced EXIT_ZOMBIE sub-thread. Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240127132407.GA29136@redhat.com Reviewed-by: Tycho Andersen Signed-off-by: Christian Brauner --- kernel/signal.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index c9c57d053ce4..9561a3962ca6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2050,9 +2050,11 @@ bool do_notify_parent(struct task_struct *tsk, int sig) WARN_ON_ONCE(!tsk->ptrace && (tsk->group_leader != tsk || !thread_group_empty(tsk))); - - /* Wake up all pidfd waiters */ - do_notify_pidfd(tsk); + /* + * tsk is a group leader and has no threads, wake up the pidfd waiters. + */ + if (thread_group_empty(tsk)) + do_notify_pidfd(tsk); if (sig != SIGCHLD) { /* -- cgit v1.2.3 From 64bef697d33b75fc06c5789b3f8108680271529f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 31 Jan 2024 14:26:02 +0100 Subject: pidfd: implement PIDFD_THREAD flag for pidfd_open() With this flag: - pidfd_open() doesn't require that the target task must be a thread-group leader - pidfd_poll() succeeds when the task exits and becomes a zombie (iow, passes exit_notify()), even if it is a leader and thread-group is not empty. This means that the behaviour of pidfd_poll(PIDFD_THREAD, pid-of-group-leader) is not well defined if it races with exec() from its sub-thread; pidfd_poll() can succeed or not depending on whether pidfd_task_exited() is called before or after exchange_tids(). Perhaps we can improve this behaviour later, pidfd_poll() can probably take sig->group_exec_task into account. But this doesn't really differ from the case when the leader exits before other threads (so pidfd_poll() succeeds) and then another thread execs and pidfd_poll() will block again. thread_group_exited() is no longer used, perhaps it can die. Co-developed-by: Tycho Andersen Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240131132602.GA23641@redhat.com Tested-by: Tycho Andersen Reviewed-by: Tycho Andersen Signed-off-by: Christian Brauner --- kernel/exit.c | 7 +++++++ kernel/fork.c | 38 +++++++++++++++++++++++++++++++------- kernel/pid.c | 14 +++----------- kernel/signal.c | 6 ++++-- 4 files changed, 45 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 3988a02efaef..c038d10dfb38 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -739,6 +739,13 @@ static void exit_notify(struct task_struct *tsk, int group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); tsk->exit_state = EXIT_ZOMBIE; + /* + * sub-thread or delay_group_leader(), wake up the + * PIDFD_THREAD waiters. + */ + if (!thread_group_empty(tsk)) + do_notify_pidfd(tsk); + if (unlikely(tsk->ptrace)) { int sig = thread_group_leader(tsk) && thread_group_empty(tsk) && diff --git a/kernel/fork.c b/kernel/fork.c index 726a92043531..1a9b91055916 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -101,6 +101,7 @@ #include #include #include +#include #include #include @@ -2050,6 +2051,8 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) seq_put_decimal_ll(m, "Pid:\t", nr); + /* TODO: report PIDFD_THREAD */ + #ifdef CONFIG_PID_NS seq_put_decimal_ll(m, "\nNSpid:\t", nr); if (nr > 0) { @@ -2068,22 +2071,35 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) } #endif +static bool pidfd_task_exited(struct pid *pid, bool thread) +{ + struct task_struct *task; + bool exited; + + rcu_read_lock(); + task = pid_task(pid, PIDTYPE_PID); + exited = !task || + (READ_ONCE(task->exit_state) && (thread || thread_group_empty(task))); + rcu_read_unlock(); + + return exited; +} + /* * Poll support for process exit notification. */ static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) { struct pid *pid = file->private_data; + bool thread = file->f_flags & PIDFD_THREAD; __poll_t poll_flags = 0; poll_wait(file, &pid->wait_pidfd, pts); - /* - * Inform pollers only when the whole thread group exits. - * If the thread group leader exits before all other threads in the - * group, then poll(2) should block, similar to the wait(2) family. + * Depending on PIDFD_THREAD, inform pollers when the thread + * or the whole thread-group exits. */ - if (thread_group_exited(pid)) + if (pidfd_task_exited(pid, thread)) poll_flags = EPOLLIN | EPOLLRDNORM; return poll_flags; @@ -2141,6 +2157,11 @@ static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **re return PTR_ERR(pidfd_file); } get_pid(pid); /* held by pidfd_file now */ + /* + * anon_inode_getfile() ignores everything outside of the + * O_ACCMODE | O_NONBLOCK mask, set PIDFD_THREAD manually. + */ + pidfd_file->f_flags |= (flags & PIDFD_THREAD); *ret = pidfd_file; return pidfd; } @@ -2154,7 +2175,8 @@ static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **re * Allocate a new file that stashes @pid and reserve a new pidfd number in the * caller's file descriptor table. The pidfd is reserved but not installed yet. * - * The helper verifies that @pid is used as a thread group leader. + * The helper verifies that @pid is still in use, without PIDFD_THREAD the + * task identified by @pid must be a thread-group leader. * * If this function returns successfully the caller is responsible to either * call fd_install() passing the returned pidfd and pidfd file as arguments in @@ -2173,7 +2195,9 @@ static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **re */ int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) { - if (!pid || !pid_has_task(pid, PIDTYPE_TGID)) + bool thread = flags & PIDFD_THREAD; + + if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID)) return -EINVAL; return __pidfd_prepare(pid, flags, ret); diff --git a/kernel/pid.c b/kernel/pid.c index c7a3e359f8f5..e11144466828 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -552,11 +552,6 @@ struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) * Return the task associated with @pidfd. The function takes a reference on * the returned task. The caller is responsible for releasing that reference. * - * Currently, the process identified by @pidfd is always a thread-group leader. - * This restriction currently exists for all aspects of pidfds including pidfd - * creation (CLONE_PIDFD cannot be used with CLONE_THREAD) and pidfd polling - * (only supports thread group leaders). - * * Return: On success, the task_struct associated with the pidfd. * On error, a negative errno number will be returned. */ @@ -615,11 +610,8 @@ static int pidfd_create(struct pid *pid, unsigned int flags) * @flags: flags to pass * * This creates a new pid file descriptor with the O_CLOEXEC flag set for - * the process identified by @pid. Currently, the process identified by - * @pid must be a thread-group leader. This restriction currently exists - * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot - * be used with CLONE_THREAD) and pidfd polling (only supports thread group - * leaders). + * the task identified by @pid. Without PIDFD_THREAD flag the target task + * must be a thread-group leader. * * Return: On success, a cloexec pidfd is returned. * On error, a negative errno number will be returned. @@ -629,7 +621,7 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags) int fd; struct pid *p; - if (flags & ~PIDFD_NONBLOCK) + if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD)) return -EINVAL; if (pid <= 0) diff --git a/kernel/signal.c b/kernel/signal.c index 9561a3962ca6..9b40109f0c56 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2019,7 +2019,7 @@ ret: return ret; } -static void do_notify_pidfd(struct task_struct *task) +void do_notify_pidfd(struct task_struct *task) { struct pid *pid; @@ -2051,7 +2051,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig) WARN_ON_ONCE(!tsk->ptrace && (tsk->group_leader != tsk || !thread_group_empty(tsk))); /* - * tsk is a group leader and has no threads, wake up the pidfd waiters. + * tsk is a group leader and has no threads, wake up the + * non-PIDFD_THREAD waiters. */ if (thread_group_empty(tsk)) do_notify_pidfd(tsk); @@ -3926,6 +3927,7 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, prepare_kill_siginfo(sig, &kinfo); } + /* TODO: respect PIDFD_THREAD */ ret = kill_pid_info(sig, &kinfo, pid); err: -- cgit v1.2.3 From 43f0df54c96fa5abcab9df8649c1e52119bf0238 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 2 Feb 2024 14:12:26 +0100 Subject: pidfd_poll: report POLLHUP when pid_task() == NULL Add another wake_up_all(wait_pidfd) into __change_pid() and change pidfd_poll() to include EPOLLHUP if task == NULL. This allows to wait until the target process/thread is reaped. TODO: change do_notify_pidfd() to use the keyed wakeups. Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240202131226.GA26018@redhat.com Signed-off-by: Christian Brauner --- kernel/fork.c | 22 +++++++--------------- kernel/pid.c | 5 +++++ 2 files changed, 12 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 1a9b91055916..aa08193d124f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2071,20 +2071,6 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) } #endif -static bool pidfd_task_exited(struct pid *pid, bool thread) -{ - struct task_struct *task; - bool exited; - - rcu_read_lock(); - task = pid_task(pid, PIDTYPE_PID); - exited = !task || - (READ_ONCE(task->exit_state) && (thread || thread_group_empty(task))); - rcu_read_unlock(); - - return exited; -} - /* * Poll support for process exit notification. */ @@ -2092,6 +2078,7 @@ static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) { struct pid *pid = file->private_data; bool thread = file->f_flags & PIDFD_THREAD; + struct task_struct *task; __poll_t poll_flags = 0; poll_wait(file, &pid->wait_pidfd, pts); @@ -2099,8 +2086,13 @@ static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) * Depending on PIDFD_THREAD, inform pollers when the thread * or the whole thread-group exits. */ - if (pidfd_task_exited(pid, thread)) + rcu_read_lock(); + task = pid_task(pid, PIDTYPE_PID); + if (!task) + poll_flags = EPOLLIN | EPOLLRDNORM | EPOLLHUP; + else if (task->exit_state && (thread || thread_group_empty(task))) poll_flags = EPOLLIN | EPOLLRDNORM; + rcu_read_unlock(); return poll_flags; } diff --git a/kernel/pid.c b/kernel/pid.c index e11144466828..62461c7c82b8 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -349,6 +349,11 @@ static void __change_pid(struct task_struct *task, enum pid_type type, hlist_del_rcu(&task->pid_links[type]); *pid_ptr = new; + if (type == PIDTYPE_PID) { + WARN_ON_ONCE(pid_has_task(pid, PIDTYPE_PID)); + wake_up_all(&pid->wait_pidfd); + } + for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (pid_has_task(pid, tmp)) return; -- cgit v1.2.3 From a1c6d5439fbddd06aad3ddbb7f12df0b98354070 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 2 Feb 2024 14:12:55 +0100 Subject: pid: kill the obsolete PIDTYPE_PID code in transfer_pid() transfer_pid() must be never called with pid == PIDTYPE_PID, new_leader->thread_pid should be changed by exchange_tids(). Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240202131255.GA26025@redhat.com Signed-off-by: Christian Brauner --- kernel/pid.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/pid.c b/kernel/pid.c index 62461c7c82b8..de0bf2f8d18b 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -396,8 +396,7 @@ void exchange_tids(struct task_struct *left, struct task_struct *right) void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type type) { - if (type == PIDTYPE_PID) - new->thread_pid = old->thread_pid; + WARN_ON_ONCE(type == PIDTYPE_PID); hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]); } -- cgit v1.2.3 From c70e1779b73a39f7648b26bdc835304c60100ce3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 4 Feb 2024 11:14:21 -1000 Subject: workqueue: Fix pwq->nr_in_flight corruption in try_to_grab_pending() dd6c3c544126 ("workqueue: Move pwq_dec_nr_in_flight() to the end of work item handling") relocated pwq_dec_nr_in_flight() after set_work_pool_and_keep_pending(). However, the latter destroys information contained in work->data that's needed by pwq_dec_nr_in_flight() including the flush color. With flush color destroyed, flush_workqueue() can stall easily when mixed with cancel_work*() usages. This is easily triggered by running xfstests generic/001 test on xfs: INFO: task umount:6305 blocked for more than 122 seconds. ... task:umount state:D stack:13008 pid:6305 tgid:6305 ppid:6301 flags:0x00004000 Call Trace: __schedule+0x2f6/0xa20 schedule+0x36/0xb0 schedule_timeout+0x20b/0x280 wait_for_completion+0x8a/0x140 __flush_workqueue+0x11a/0x3b0 xfs_inodegc_flush+0x24/0xf0 xfs_unmountfs+0x14/0x180 xfs_fs_put_super+0x3d/0x90 generic_shutdown_super+0x7c/0x160 kill_block_super+0x1b/0x40 xfs_kill_sb+0x12/0x30 deactivate_locked_super+0x35/0x90 deactivate_super+0x42/0x50 cleanup_mnt+0x109/0x170 __cleanup_mnt+0x12/0x20 task_work_run+0x60/0x90 syscall_exit_to_user_mode+0x146/0x150 do_syscall_64+0x5d/0x110 entry_SYSCALL_64_after_hwframe+0x6c/0x74 Fix it by stashing work_data before calling set_work_pool_and_keep_pending() and using the stashed value for pwq_dec_nr_in_flight(). Signed-off-by: Tejun Heo Reported-by: Chandan Babu R Link: http://lkml.kernel.org/r/87o7cxeehy.fsf@debian-BULLSEYE-live-builder-AMD64 Fixes: dd6c3c544126 ("workqueue: Move pwq_dec_nr_in_flight() to the end of work item handling") --- kernel/workqueue.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ffb625db9771..55c9816506b0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1999,6 +1999,8 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, */ pwq = get_work_pwq(work); if (pwq && pwq->pool == pool) { + unsigned long work_data; + debug_work_deactivate(work); /* @@ -2016,11 +2018,15 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, list_del_init(&work->entry); - /* work->data points to pwq iff queued, point to pool */ + /* + * work->data points to pwq iff queued. Let's point to pool. As + * this destroys work->data needed by the next step, stash it. + */ + work_data = *work_data_bits(work); set_work_pool_and_keep_pending(work, pool->id); /* must be the last step, see the function comment */ - pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); + pwq_dec_nr_in_flight(pwq, work_data); raw_spin_unlock(&pool->lock); rcu_read_unlock(); -- cgit v1.2.3 From d412ace11144aa2bf692c7cf9778351efc15c827 Mon Sep 17 00:00:00 2001 From: "Ricardo B. Marliere" Date: Sun, 4 Feb 2024 10:47:05 -0300 Subject: workqueue: make wq_subsys const Now that the driver core can properly handle constant struct bus_type, move the wq_subsys variable to be a constant structure as well, placing it into read-only memory which can not be modified at runtime. Cc: Greg Kroah-Hartman Suggested-and-reviewed-by: Greg Kroah-Hartman Signed-off-by: Ricardo B. Marliere Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 55c9816506b0..695f6f5ad038 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6692,7 +6692,7 @@ static struct device_attribute wq_sysfs_unbound_attrs[] = { __ATTR_NULL, }; -static struct bus_type wq_subsys = { +static const struct bus_type wq_subsys = { .name = "workqueue", .dev_groups = wq_sysfs_groups, }; -- cgit v1.2.3 From c35aea39d1e106f61fd2130f0d32a3bac8bd4570 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 4 Feb 2024 11:28:06 -1000 Subject: workqueue: Update lock debugging code These changes are in preparation of BH workqueue which will execute work items from BH context. - Update lock and RCU depth checks in process_one_work() so that it remembers and checks against the starting depths and prints out the depth changes. - Factor out lockdep annotations in the flush paths into touch_{wq|work}_lockdep_map(). The work->lockdep_map touching is moved from __flush_work() to its callee - start_flush_work(). This brings it closer to the wq counterpart and will allow testing the associated wq's flags which will be needed to support BH workqueues. This is not expected to cause any functional changes. Signed-off-by: Tejun Heo Tested-by: Allen Pais --- kernel/workqueue.c | 51 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 695f6f5ad038..ac42c005b00b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2965,6 +2965,7 @@ __acquires(&pool->lock) struct pool_workqueue *pwq = get_work_pwq(work); struct worker_pool *pool = worker->pool; unsigned long work_data; + int lockdep_start_depth, rcu_start_depth; #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from @@ -3027,6 +3028,8 @@ __acquires(&pool->lock) pwq->stats[PWQ_STAT_STARTED]++; raw_spin_unlock_irq(&pool->lock); + rcu_start_depth = rcu_preempt_depth(); + lockdep_start_depth = lockdep_depth(current); lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); /* @@ -3062,12 +3065,15 @@ __acquires(&pool->lock) lock_map_release(&lockdep_map); lock_map_release(&pwq->wq->lockdep_map); - if (unlikely(in_atomic() || lockdep_depth(current) > 0 || - rcu_preempt_depth() > 0)) { - pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n" - " last function: %ps\n", - current->comm, preempt_count(), rcu_preempt_depth(), - task_pid_nr(current), worker->current_func); + if (unlikely((worker->task && in_atomic()) || + lockdep_depth(current) != lockdep_start_depth || + rcu_preempt_depth() != rcu_start_depth)) { + pr_err("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n" + " preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n", + current->comm, task_pid_nr(current), preempt_count(), + lockdep_start_depth, lockdep_depth(current), + rcu_start_depth, rcu_preempt_depth(), + worker->current_func); debug_show_held_locks(current); dump_stack(); } @@ -3549,6 +3555,19 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, return wait; } +static void touch_wq_lockdep_map(struct workqueue_struct *wq) +{ + lock_map_acquire(&wq->lockdep_map); + lock_map_release(&wq->lockdep_map); +} + +static void touch_work_lockdep_map(struct work_struct *work, + struct workqueue_struct *wq) +{ + lock_map_acquire(&work->lockdep_map); + lock_map_release(&work->lockdep_map); +} + /** * __flush_workqueue - ensure that any scheduled work has run to completion. * @wq: workqueue to flush @@ -3568,8 +3587,7 @@ void __flush_workqueue(struct workqueue_struct *wq) if (WARN_ON(!wq_online)) return; - lock_map_acquire(&wq->lockdep_map); - lock_map_release(&wq->lockdep_map); + touch_wq_lockdep_map(wq); mutex_lock(&wq->mutex); @@ -3768,6 +3786,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, struct worker *worker = NULL; struct worker_pool *pool; struct pool_workqueue *pwq; + struct workqueue_struct *wq; might_sleep(); @@ -3791,11 +3810,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, pwq = worker->current_pwq; } - check_flush_dependency(pwq->wq, work); + wq = pwq->wq; + check_flush_dependency(wq, work); insert_wq_barrier(pwq, barr, work, worker); raw_spin_unlock_irq(&pool->lock); + touch_work_lockdep_map(work, wq); + /* * Force a lock recursion deadlock when using flush_work() inside a * single-threaded or rescuer equipped workqueue. @@ -3805,11 +3827,9 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, * workqueues the deadlock happens when the rescuer stalls, blocking * forward progress. */ - if (!from_cancel && - (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { - lock_map_acquire(&pwq->wq->lockdep_map); - lock_map_release(&pwq->wq->lockdep_map); - } + if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer)) + touch_wq_lockdep_map(wq); + rcu_read_unlock(); return true; already_gone: @@ -3828,9 +3848,6 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) if (WARN_ON(!work->func)) return false; - lock_map_acquire(&work->lockdep_map); - lock_map_release(&work->lockdep_map); - if (start_flush_work(work, &barr, from_cancel)) { wait_for_completion(&barr.done); destroy_work_on_stack(&barr.work); -- cgit v1.2.3 From 2fcdb1b44491e08f5334a92c50e8f362e0d46f91 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 4 Feb 2024 11:28:06 -1000 Subject: workqueue: Factor out init_cpu_worker_pool() Factor out init_cpu_worker_pool() from workqueue_init_early(). This is pure reorganization in preparation of BH workqueue support. Signed-off-by: Tejun Heo Tested-by: Allen Pais --- kernel/workqueue.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ac42c005b00b..767971a29c7a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7147,6 +7147,22 @@ static void __init restrict_unbound_cpumask(const char *name, const struct cpuma cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask); } +static void __init init_cpu_worker_pool(struct worker_pool *pool, int cpu, int nice) +{ + BUG_ON(init_worker_pool(pool)); + pool->cpu = cpu; + cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); + cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); + pool->attrs->nice = nice; + pool->attrs->affn_strict = true; + pool->node = cpu_to_node(cpu); + + /* alloc pool ID */ + mutex_lock(&wq_pool_mutex); + BUG_ON(worker_pool_assign_id(pool)); + mutex_unlock(&wq_pool_mutex); +} + /** * workqueue_init_early - early init for workqueue subsystem * @@ -7207,20 +7223,8 @@ void __init workqueue_init_early(void) struct worker_pool *pool; i = 0; - for_each_cpu_worker_pool(pool, cpu) { - BUG_ON(init_worker_pool(pool)); - pool->cpu = cpu; - cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); - cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); - pool->attrs->nice = std_nice[i++]; - pool->attrs->affn_strict = true; - pool->node = cpu_to_node(cpu); - - /* alloc pool ID */ - mutex_lock(&wq_pool_mutex); - BUG_ON(worker_pool_assign_id(pool)); - mutex_unlock(&wq_pool_mutex); - } + for_each_cpu_worker_pool(pool, cpu) + init_cpu_worker_pool(pool, cpu, std_nice[i++]); } /* create default unbound and ordered wq attrs */ -- cgit v1.2.3 From 4cb1ef64609f9b0254184b2947824f4b46ccab22 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 4 Feb 2024 11:28:06 -1000 Subject: workqueue: Implement BH workqueues to eventually replace tasklets The only generic interface to execute asynchronously in the BH context is tasklet; however, it's marked deprecated and has some design flaws such as the execution code accessing the tasklet item after the execution is complete which can lead to subtle use-after-free in certain usage scenarios and less-developed flush and cancel mechanisms. This patch implements BH workqueues which share the same semantics and features of regular workqueues but execute their work items in the softirq context. As there is always only one BH execution context per CPU, none of the concurrency management mechanisms applies and a BH workqueue can be thought of as a convenience wrapper around softirq. Except for the inability to sleep while executing and lack of max_active adjustments, BH workqueues and work items should behave the same as regular workqueues and work items. Currently, the execution is hooked to tasklet[_hi]. However, the goal is to convert all tasklet users over to BH workqueues. Once the conversion is complete, tasklet can be removed and BH workqueues can directly take over the tasklet softirqs. system_bh[_highpri]_wq are added. As queue-wide flushing doesn't exist in tasklet, all existing tasklet users should be able to use the system BH workqueues without creating their own workqueues. v3: - Add missing interrupt.h include. v2: - Instead of using tasklets, hook directly into its softirq action functions - tasklet[_hi]_action(). This is slightly cheaper and closer to the eventual code structure we want to arrive at. Suggested by Lai. - Lai also pointed out several places which need NULL worker->task handling or can use clarification. Updated. Signed-off-by: Tejun Heo Suggested-by: Linus Torvalds Link: http://lkml.kernel.org/r/CAHk-=wjDW53w4-YcSmgKC5RruiRLHmJ1sXeYdp_ZgVoBw=5byA@mail.gmail.com Tested-by: Allen Pais Reviewed-by: Lai Jiangshan --- kernel/softirq.c | 3 + kernel/workqueue.c | 291 +++++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 241 insertions(+), 53 deletions(-) (limited to 'kernel') diff --git a/kernel/softirq.c b/kernel/softirq.c index 210cf5f8d92c..547d282548a8 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -802,11 +803,13 @@ static void tasklet_action_common(struct softirq_action *a, static __latent_entropy void tasklet_action(struct softirq_action *a) { + workqueue_softirq_action(false); tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); } static __latent_entropy void tasklet_hi_action(struct softirq_action *a) { + workqueue_softirq_action(true); tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 767971a29c7a..78b4b992e1a3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -72,8 +73,12 @@ enum worker_pool_flags { * Note that DISASSOCIATED should be flipped only while holding * wq_pool_attach_mutex to avoid changing binding state while * worker_attach_to_pool() is in progress. + * + * As there can only be one concurrent BH execution context per CPU, a + * BH pool is per-CPU and always DISASSOCIATED. */ - POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ + POOL_BH = 1 << 0, /* is a BH pool */ + POOL_MANAGER_ACTIVE = 1 << 1, /* being managed */ POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ }; @@ -115,6 +120,14 @@ enum wq_internal_consts { WQ_NAME_LEN = 32, }; +/* + * We don't want to trap softirq for too long. See MAX_SOFTIRQ_TIME and + * MAX_SOFTIRQ_RESTART in kernel/softirq.c. These are macros because + * msecs_to_jiffies() can't be an initializer. + */ +#define BH_WORKER_JIFFIES msecs_to_jiffies(2) +#define BH_WORKER_RESTARTS 10 + /* * Structure fields follow one of the following exclusion rules. * @@ -443,8 +456,13 @@ static bool wq_debug_force_rr_cpu = false; #endif module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); +/* the BH worker pools */ +static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], + bh_worker_pools); + /* the per-cpu worker pools */ -static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], + cpu_worker_pools); static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ @@ -478,6 +496,10 @@ struct workqueue_struct *system_power_efficient_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_power_efficient_wq); struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init; EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); +struct workqueue_struct *system_bh_wq; +EXPORT_SYMBOL_GPL(system_bh_wq); +struct workqueue_struct *system_bh_highpri_wq; +EXPORT_SYMBOL_GPL(system_bh_highpri_wq); static int worker_thread(void *__worker); static void workqueue_sysfs_unregister(struct workqueue_struct *wq); @@ -498,6 +520,11 @@ static void show_one_worker_pool(struct worker_pool *pool); !lockdep_is_held(&wq_pool_mutex), \ "RCU, wq->mutex or wq_pool_mutex should be held") +#define for_each_bh_worker_pool(pool, cpu) \ + for ((pool) = &per_cpu(bh_worker_pools, cpu)[0]; \ + (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ + (pool)++) + #define for_each_cpu_worker_pool(pool, cpu) \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ @@ -1186,6 +1213,14 @@ static bool kick_pool(struct worker_pool *pool) if (!need_more_worker(pool) || !worker) return false; + if (pool->flags & POOL_BH) { + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + raise_softirq_irqoff(HI_SOFTIRQ); + else + raise_softirq_irqoff(TASKLET_SOFTIRQ); + return true; + } + p = worker->task; #ifdef CONFIG_SMP @@ -1668,7 +1703,7 @@ static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill) lockdep_assert_held(&pool->lock); if (!nna) { - /* per-cpu workqueue, pwq->nr_active is sufficient */ + /* BH or per-cpu workqueue, pwq->nr_active is sufficient */ obtained = pwq->nr_active < READ_ONCE(wq->max_active); goto out; } @@ -2523,19 +2558,21 @@ static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) * cpu-[un]hotplugs. */ static void worker_attach_to_pool(struct worker *worker, - struct worker_pool *pool) + struct worker_pool *pool) { mutex_lock(&wq_pool_attach_mutex); /* - * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains - * stable across this function. See the comments above the flag - * definition for details. + * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains stable + * across this function. See the comments above the flag definition for + * details. BH workers are, while per-CPU, always DISASSOCIATED. */ - if (pool->flags & POOL_DISASSOCIATED) + if (pool->flags & POOL_DISASSOCIATED) { worker->flags |= WORKER_UNBOUND; - else + } else { + WARN_ON_ONCE(pool->flags & POOL_BH); kthread_set_per_cpu(worker->task, pool->cpu); + } if (worker->rescue_wq) set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); @@ -2559,6 +2596,9 @@ static void worker_detach_from_pool(struct worker *worker) struct worker_pool *pool = worker->pool; struct completion *detach_completion = NULL; + /* there is one permanent BH worker per CPU which should never detach */ + WARN_ON_ONCE(pool->flags & POOL_BH); + mutex_lock(&wq_pool_attach_mutex); kthread_set_per_cpu(worker->task, -1); @@ -2610,27 +2650,29 @@ static struct worker *create_worker(struct worker_pool *pool) worker->id = id; - if (pool->cpu >= 0) - snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, - pool->attrs->nice < 0 ? "H" : ""); - else - snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); - - worker->task = kthread_create_on_node(worker_thread, worker, pool->node, - "kworker/%s", id_buf); - if (IS_ERR(worker->task)) { - if (PTR_ERR(worker->task) == -EINTR) { - pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", - id_buf); - } else { - pr_err_once("workqueue: Failed to create a worker thread: %pe", - worker->task); + if (!(pool->flags & POOL_BH)) { + if (pool->cpu >= 0) + snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, + pool->attrs->nice < 0 ? "H" : ""); + else + snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); + + worker->task = kthread_create_on_node(worker_thread, worker, + pool->node, "kworker/%s", id_buf); + if (IS_ERR(worker->task)) { + if (PTR_ERR(worker->task) == -EINTR) { + pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", + id_buf); + } else { + pr_err_once("workqueue: Failed to create a worker thread: %pe", + worker->task); + } + goto fail; } - goto fail; - } - set_user_nice(worker->task, pool->attrs->nice); - kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); + set_user_nice(worker->task, pool->attrs->nice); + kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); + } /* successful, attach the worker to the pool */ worker_attach_to_pool(worker, pool); @@ -2646,7 +2688,8 @@ static struct worker *create_worker(struct worker_pool *pool) * check if not woken up soon. As kick_pool() is noop if @pool is empty, * wake it up explicitly. */ - wake_up_process(worker->task); + if (worker->task) + wake_up_process(worker->task); raw_spin_unlock_irq(&pool->lock); @@ -2988,7 +3031,8 @@ __acquires(&pool->lock) worker->current_work = work; worker->current_func = work->func; worker->current_pwq = pwq; - worker->current_at = worker->task->se.sum_exec_runtime; + if (worker->task) + worker->current_at = worker->task->se.sum_exec_runtime; work_data = *work_data_bits(work); worker->current_color = get_work_color(work_data); @@ -3086,7 +3130,8 @@ __acquires(&pool->lock) * stop_machine. At the same time, report a quiescent RCU state so * the same condition doesn't freeze RCU. */ - cond_resched(); + if (worker->task) + cond_resched(); raw_spin_lock_irq(&pool->lock); @@ -3369,6 +3414,61 @@ repeat: goto repeat; } +static void bh_worker(struct worker *worker) +{ + struct worker_pool *pool = worker->pool; + int nr_restarts = BH_WORKER_RESTARTS; + unsigned long end = jiffies + BH_WORKER_JIFFIES; + + raw_spin_lock_irq(&pool->lock); + worker_leave_idle(worker); + + /* + * This function follows the structure of worker_thread(). See there for + * explanations on each step. + */ + if (!need_more_worker(pool)) + goto done; + + WARN_ON_ONCE(!list_empty(&worker->scheduled)); + worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); + + do { + struct work_struct *work = + list_first_entry(&pool->worklist, + struct work_struct, entry); + + if (assign_work(work, worker, NULL)) + process_scheduled_works(worker); + } while (keep_working(pool) && + --nr_restarts && time_before(jiffies, end)); + + worker_set_flags(worker, WORKER_PREP); +done: + worker_enter_idle(worker); + kick_pool(pool); + raw_spin_unlock_irq(&pool->lock); +} + +/* + * TODO: Convert all tasklet users to workqueue and use softirq directly. + * + * This is currently called from tasklet[_hi]action() and thus is also called + * whenever there are tasklets to run. Let's do an early exit if there's nothing + * queued. Once conversion from tasklet is complete, the need_more_worker() test + * can be dropped. + * + * After full conversion, we'll add worker->softirq_action, directly use the + * softirq action and obtain the worker pointer from the softirq_action pointer. + */ +void workqueue_softirq_action(bool highpri) +{ + struct worker_pool *pool = + &per_cpu(bh_worker_pools, smp_processor_id())[highpri]; + if (need_more_worker(pool)) + bh_worker(list_first_entry(&pool->workers, struct worker, node)); +} + /** * check_flush_dependency - check for flush dependency sanity * @target_wq: workqueue being flushed @@ -3441,6 +3541,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, struct wq_barrier *barr, struct work_struct *target, struct worker *worker) { + static __maybe_unused struct lock_class_key bh_key, thr_key; unsigned int work_flags = 0; unsigned int work_color; struct list_head *head; @@ -3450,8 +3551,13 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, * as we know for sure that this will not trigger any of the * checks and call back into the fixup functions where we * might deadlock. + * + * BH and threaded workqueues need separate lockdep keys to avoid + * spuriously triggering "inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} + * usage". */ - INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); + INIT_WORK_ONSTACK_KEY(&barr->work, wq_barrier_func, + (pwq->wq->flags & WQ_BH) ? &bh_key : &thr_key); __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); init_completion_map(&barr->done, &target->lockdep_map); @@ -3557,15 +3663,31 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, static void touch_wq_lockdep_map(struct workqueue_struct *wq) { +#ifdef CONFIG_LOCKDEP + if (wq->flags & WQ_BH) + local_bh_disable(); + lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); + + if (wq->flags & WQ_BH) + local_bh_enable(); +#endif } static void touch_work_lockdep_map(struct work_struct *work, struct workqueue_struct *wq) { +#ifdef CONFIG_LOCKDEP + if (wq->flags & WQ_BH) + local_bh_disable(); + lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); + + if (wq->flags & WQ_BH) + local_bh_enable(); +#endif } /** @@ -5019,10 +5141,17 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) if (!(wq->flags & WQ_UNBOUND)) { for_each_possible_cpu(cpu) { - struct pool_workqueue **pwq_p = - per_cpu_ptr(wq->cpu_pwq, cpu); - struct worker_pool *pool = - &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]); + struct pool_workqueue **pwq_p; + struct worker_pool __percpu *pools; + struct worker_pool *pool; + + if (wq->flags & WQ_BH) + pools = bh_worker_pools; + else + pools = cpu_worker_pools; + + pool = &(per_cpu_ptr(pools, cpu)[highpri]); + pwq_p = per_cpu_ptr(wq->cpu_pwq, cpu); *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); @@ -5197,6 +5326,13 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, size_t wq_size; int name_len; + if (flags & WQ_BH) { + if (WARN_ON_ONCE(flags & ~__WQ_BH_ALLOWS)) + return NULL; + if (WARN_ON_ONCE(max_active)) + return NULL; + } + /* * Unbound && max_active == 1 used to imply ordered, which is no longer * the case on many machines due to per-pod pools. While @@ -5234,8 +5370,16 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n", wq->name); - max_active = max_active ?: WQ_DFL_ACTIVE; - max_active = wq_clamp_max_active(max_active, flags, wq->name); + if (flags & WQ_BH) { + /* + * BH workqueues always share a single execution context per CPU + * and don't impose any max_active limit. + */ + max_active = INT_MAX; + } else { + max_active = max_active ?: WQ_DFL_ACTIVE; + max_active = wq_clamp_max_active(max_active, flags, wq->name); + } /* init wq */ wq->flags = flags; @@ -5416,6 +5560,9 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); */ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) { + /* max_active doesn't mean anything for BH workqueues */ + if (WARN_ON(wq->flags & WQ_BH)) + return; /* disallow meddling with max_active for ordered workqueues */ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return; @@ -5617,7 +5764,24 @@ static void pr_cont_pool_info(struct worker_pool *pool) pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); if (pool->node != NUMA_NO_NODE) pr_cont(" node=%d", pool->node); - pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); + pr_cont(" flags=0x%x", pool->flags); + if (pool->flags & POOL_BH) + pr_cont(" bh%s", + pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); + else + pr_cont(" nice=%d", pool->attrs->nice); +} + +static void pr_cont_worker_id(struct worker *worker) +{ + struct worker_pool *pool = worker->pool; + + if (pool->flags & WQ_BH) + pr_cont("bh%s", + pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); + else + pr_cont("%d%s", task_pid_nr(worker->task), + worker->rescue_wq ? "(RESCUER)" : ""); } struct pr_cont_work_struct { @@ -5694,10 +5858,9 @@ static void show_pwq(struct pool_workqueue *pwq) if (worker->current_pwq != pwq) continue; - pr_cont("%s %d%s:%ps", comma ? "," : "", - task_pid_nr(worker->task), - worker->rescue_wq ? "(RESCUER)" : "", - worker->current_func); + pr_cont(" %s", comma ? "," : ""); + pr_cont_worker_id(worker); + pr_cont(":%ps", worker->current_func); list_for_each_entry(work, &worker->scheduled, entry) pr_cont_work(false, work, &pcws); pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); @@ -5816,8 +5979,8 @@ static void show_one_worker_pool(struct worker_pool *pool) pr_cont(" manager: %d", task_pid_nr(pool->manager->task)); list_for_each_entry(worker, &pool->idle_list, entry) { - pr_cont(" %s%d", first ? "idle: " : "", - task_pid_nr(worker->task)); + pr_cont(" %s", first ? "idle: " : ""); + pr_cont_worker_id(worker); first = false; } pr_cont("\n"); @@ -6090,13 +6253,15 @@ int workqueue_online_cpu(unsigned int cpu) mutex_lock(&wq_pool_mutex); for_each_pool(pool, pi) { - mutex_lock(&wq_pool_attach_mutex); + /* BH pools aren't affected by hotplug */ + if (pool->flags & POOL_BH) + continue; + mutex_lock(&wq_pool_attach_mutex); if (pool->cpu == cpu) rebind_workers(pool); else if (pool->cpu < 0) restore_unbound_workers_cpumask(pool, cpu); - mutex_unlock(&wq_pool_attach_mutex); } @@ -7053,7 +7218,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) /* did we stall? */ if (time_after(now, ts + thresh)) { lockup_detected = true; - if (pool->cpu >= 0) { + if (pool->cpu >= 0 && !(pool->flags & POOL_BH)) { pool->cpu_stall = true; cpu_pool_stall = true; } @@ -7218,10 +7383,16 @@ void __init workqueue_init_early(void) pt->pod_node[0] = NUMA_NO_NODE; pt->cpu_pod[0] = 0; - /* initialize CPU pools */ + /* initialize BH and CPU pools */ for_each_possible_cpu(cpu) { struct worker_pool *pool; + i = 0; + for_each_bh_worker_pool(pool, cpu) { + init_cpu_worker_pool(pool, cpu, std_nice[i++]); + pool->flags |= POOL_BH; + } + i = 0; for_each_cpu_worker_pool(pool, cpu) init_cpu_worker_pool(pool, cpu, std_nice[i++]); @@ -7257,10 +7428,14 @@ void __init workqueue_init_early(void) system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_pwr_efficient", WQ_FREEZABLE | WQ_POWER_EFFICIENT, 0); + system_bh_wq = alloc_workqueue("events_bh", WQ_BH, 0); + system_bh_highpri_wq = alloc_workqueue("events_bh_highpri", + WQ_BH | WQ_HIGHPRI, 0); BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || !system_unbound_wq || !system_freezable_wq || !system_power_efficient_wq || - !system_freezable_power_efficient_wq); + !system_freezable_power_efficient_wq || + !system_bh_wq || !system_bh_highpri_wq); } static void __init wq_cpu_intensive_thresh_init(void) @@ -7326,9 +7501,10 @@ void __init workqueue_init(void) * up. Also, create a rescuer for workqueues that requested it. */ for_each_possible_cpu(cpu) { - for_each_cpu_worker_pool(pool, cpu) { + for_each_bh_worker_pool(pool, cpu) + pool->node = cpu_to_node(cpu); + for_each_cpu_worker_pool(pool, cpu) pool->node = cpu_to_node(cpu); - } } list_for_each_entry(wq, &workqueues, list) { @@ -7339,7 +7515,16 @@ void __init workqueue_init(void) mutex_unlock(&wq_pool_mutex); - /* create the initial workers */ + /* + * Create the initial workers. A BH pool has one pseudo worker that + * represents the shared BH execution context and thus doesn't get + * affected by hotplug events. Create the BH pseudo workers for all + * possible CPUs here. + */ + for_each_possible_cpu(cpu) + for_each_bh_worker_pool(pool, cpu) + BUG_ON(!create_worker(pool)); + for_each_online_cpu(cpu) { for_each_cpu_worker_pool(pool, cpu) { pool->flags &= ~POOL_DISASSOCIATED; -- cgit v1.2.3 From 06b23f92af87a84d70881b2ecaa72e00f7838264 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 16 Jan 2024 09:18:39 -0700 Subject: block: update cached timestamp post schedule/preemption Mark the task as having a cached timestamp when set assign it, so we can efficiently check if it needs updating post being scheduled back in. This covers both the actual schedule out case, which would've flushed the plug, and the preemption case which doesn't touch the plugged requests (for many reasons, one of them being then we'd need to have preemption disabled around plug state manipulation). Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- kernel/sched/core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9116bcc90346..083f2258182d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6787,10 +6787,12 @@ static inline void sched_submit_work(struct task_struct *tsk) static void sched_update_worker(struct task_struct *tsk) { - if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { + if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) { + if (tsk->flags & PF_BLOCK_TS) + blk_plug_invalidate_ts(tsk); if (tsk->flags & PF_WQ_WORKER) wq_worker_running(tsk); - else + else if (tsk->flags & PF_IO_WORKER) io_wq_worker_running(tsk); } } -- cgit v1.2.3 From 4f19b8e01e2fb6c97d4307abb7bde4d34a1e601e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 5 Feb 2024 07:18:08 -1000 Subject: Revert "workqueue: make wq_subsys const" This reverts commit d412ace11144aa2bf692c7cf9778351efc15c827. This leads to build failures as it depends on a driver-core commit 32f78abe59c7 ("driver core: bus: constantify subsys_register() calls"). Let's drop it from wq tree and route it through driver-core tree. Signed-off-by: Tejun Heo Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202402051505.kM9Rr3CJ-lkp@intel.com/ --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 78b4b992e1a3..524a7fff52af 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6874,7 +6874,7 @@ static struct device_attribute wq_sysfs_unbound_attrs[] = { __ATTR_NULL, }; -static const struct bus_type wq_subsys = { +static struct bus_type wq_subsys = { .name = "workqueue", .dev_groups = wq_sysfs_groups, }; -- cgit v1.2.3 From 96068b6030391082bf0cd97af525d731afa5ad63 Mon Sep 17 00:00:00 2001 From: Wang Jinchao Date: Mon, 5 Feb 2024 08:31:52 +0800 Subject: workqueue: fix a typo in comment There should be three, fix it. Signed-off-by: Wang Jinchao Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 524a7fff52af..d7fdb631ecf7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7604,7 +7604,7 @@ static bool __init cpus_share_numa(int cpu0, int cpu1) /** * workqueue_init_topology - initialize CPU pods for unbound workqueues * - * This is the third step of there-staged workqueue subsystem initialization and + * This is the third step of three-staged workqueue subsystem initialization and * invoked after SMP and topology information are fully initialized. It * initializes the unbound CPU pods accordingly. */ -- cgit v1.2.3 From 8eb17dc1a6b5db7e89681f59285242af8d182f95 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Sat, 3 Feb 2024 10:43:30 -0500 Subject: workqueue: Skip __WQ_DESTROYING workqueues when updating global unbound cpumask Skip updating workqueues with __WQ_DESTROYING bit set when updating global unbound cpumask to avoid unnecessary work and other complications. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d7fdb631ecf7..68c48489eab3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -6501,7 +6501,7 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) lockdep_assert_held(&wq_pool_mutex); list_for_each_entry(wq, &workqueues, list) { - if (!(wq->flags & WQ_UNBOUND)) + if (!(wq->flags & WQ_UNBOUND) || (wq->flags & __WQ_DESTROYING)) continue; /* creating multiple pwqs breaks ordering guarantee */ -- cgit v1.2.3 From 7245d24f874d781cf3f1530e6d24e1e0eba4269a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 4 Feb 2024 11:34:28 -1000 Subject: backtracetest: Convert from tasklet to BH workqueue The only generic interface to execute asynchronously in the BH context is tasklet; however, it's marked deprecated and has some design flaws. To replace tasklets, BH workqueue support was recently added. A BH workqueue behaves similarly to regular workqueues except that the queued work items are executed in the BH context. This patch converts backtracetest from tasklet to BH workqueue. - Replace "irq" with "bh" in names and message to better reflect what's happening. - Replace completion usage with a flush_work() call. Signed-off-by: Tejun Heo Cc: Arjan van de Ven --- kernel/backtracetest.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/backtracetest.c b/kernel/backtracetest.c index 370217dd7e39..a4181234232b 100644 --- a/kernel/backtracetest.c +++ b/kernel/backtracetest.c @@ -21,24 +21,20 @@ static void backtrace_test_normal(void) dump_stack(); } -static DECLARE_COMPLETION(backtrace_work); - -static void backtrace_test_irq_callback(unsigned long data) +static void backtrace_test_bh_workfn(struct work_struct *work) { dump_stack(); - complete(&backtrace_work); } -static DECLARE_TASKLET_OLD(backtrace_tasklet, &backtrace_test_irq_callback); +static DECLARE_WORK(backtrace_bh_work, &backtrace_test_bh_workfn); -static void backtrace_test_irq(void) +static void backtrace_test_bh(void) { - pr_info("Testing a backtrace from irq context.\n"); + pr_info("Testing a backtrace from BH context.\n"); pr_info("The following trace is a kernel self test and not a bug!\n"); - init_completion(&backtrace_work); - tasklet_schedule(&backtrace_tasklet); - wait_for_completion(&backtrace_work); + queue_work(system_bh_wq, &backtrace_bh_work); + flush_work(&backtrace_bh_work); } #ifdef CONFIG_STACKTRACE @@ -65,7 +61,7 @@ static int backtrace_regression_test(void) pr_info("====[ backtrace testing ]===========\n"); backtrace_test_normal(); - backtrace_test_irq(); + backtrace_test_bh(); backtrace_test_saved(); pr_info("====[ end of backtrace testing ]====\n"); -- cgit v1.2.3 From 3bc1e711c26bff01d41ad71145ecb8dcb4412576 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 5 Feb 2024 14:19:10 -1000 Subject: workqueue: Don't implicitly make UNBOUND workqueues w/ @max_active==1 ordered 5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered") automoatically promoted UNBOUND workqueues w/ @max_active==1 to ordered workqueues because UNBOUND workqueues w/ @max_active==1 used to be the way to create ordered workqueues and the new NUMA support broke it. These problems can be subtle and the fact that they can only trigger on NUMA machines made them even more difficult to debug. However, overloading the UNBOUND allocation interface this way creates other issues. It's difficult to tell whether a given workqueue actually needs to be ordered and users that legitimately want a min concurrency level wq unexpectedly gets an ordered one instead. With planned UNBOUND workqueue udpates to improve execution locality and more prevalence of chiplet designs which can benefit from such improvements, this isn't a state we wanna be in forever. There aren't that many UNBOUND w/ @max_active==1 users in the tree and the preceding patches audited all and converted them to alloc_ordered_workqueue() as appropriate. This patch removes the implicit promotion of UNBOUND w/ @max_active==1 workqueues to ordered ones. v2: v1 patch incorrectly dropped !list_empty(&wq->pwqs) condition in apply_workqueue_attrs_locked() which spuriously triggers WARNING and fails workqueue creation. Fix it. Signed-off-by: Tejun Heo Reported-by: kernel test robot Link: https://lore.kernel.org/oe-lkp/202304251050.45a5df1f-oliver.sang@intel.com --- kernel/workqueue.c | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 68c48489eab3..ecc775843bfa 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5007,12 +5007,8 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, return -EINVAL; /* creating multiple pwqs breaks ordering guarantee */ - if (!list_empty(&wq->pwqs)) { - if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) - return -EINVAL; - - wq->flags &= ~__WQ_ORDERED; - } + if (!list_empty(&wq->pwqs) && WARN_ON(wq->flags & __WQ_ORDERED)) + return -EINVAL; ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); if (IS_ERR(ctx)) @@ -5333,15 +5329,6 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, return NULL; } - /* - * Unbound && max_active == 1 used to imply ordered, which is no longer - * the case on many machines due to per-pod pools. While - * alloc_ordered_workqueue() is the right way to create an ordered - * workqueue, keep the previous behavior to avoid subtle breakages. - */ - if ((flags & WQ_UNBOUND) && max_active == 1) - flags |= __WQ_ORDERED; - /* see the comment above the definition of WQ_POWER_EFFICIENT */ if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) flags |= WQ_UNBOUND; @@ -5564,14 +5551,13 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) if (WARN_ON(wq->flags & WQ_BH)) return; /* disallow meddling with max_active for ordered workqueues */ - if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + if (WARN_ON(wq->flags & __WQ_ORDERED)) return; max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); mutex_lock(&wq->mutex); - wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; if (wq->flags & WQ_UNBOUND) wq->saved_min_active = min(wq->saved_min_active, max_active); @@ -7028,7 +7014,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) * attributes breaks ordering guarantee. Disallow exposing ordered * workqueues. */ - if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + if (WARN_ON(wq->flags & __WQ_ORDERED)) return -EINVAL; wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); -- cgit v1.2.3 From 9ed52108f6478a6a805c0c15a3f70aabba07247e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 5 Feb 2024 15:13:48 +0100 Subject: pidfd: change do_notify_pidfd() to use __wake_up(poll_to_key(EPOLLIN)) rather than wake_up_all(). This way do_notify_pidfd() won't wakeup the POLLHUP-only waiters which wait for pid_task() == NULL. TODO: - as Christian pointed out, this asks for the new wake_up_all_poll() helper, it can already have other users. - we can probably discriminate the PIDFD_THREAD and non-PIDFD_THREAD waiters, but this needs more work. See https://lore.kernel.org/all/20240205140848.GA15853@redhat.com/ Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240205141348.GA16539@redhat.com Reviewed-by: Tycho Andersen Signed-off-by: Christian Brauner --- kernel/signal.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 9b40109f0c56..c3fac06937e2 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2021,11 +2021,12 @@ ret: void do_notify_pidfd(struct task_struct *task) { - struct pid *pid; + struct pid *pid = task_pid(task); WARN_ON(task->exit_state == 0); - pid = task_pid(task); - wake_up_all(&pid->wait_pidfd); + + __wake_up(&pid->wait_pidfd, TASK_NORMAL, 0, + poll_to_key(EPOLLIN | EPOLLRDNORM)); } /* -- cgit v1.2.3 From e2e8a142fbd988d658ccb3da1d6f4b26a39de0fd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 5 Feb 2024 18:43:47 +0100 Subject: pidfd: exit: kill the no longer used thread_group_exited() It was used by pidfd_poll() but now it has no callers. If it finally finds a modular user we can revert this change, but note that the comment above this helper and the changelog in 38fd525a4c61 ("exit: Factor thread_group_exited out of pidfd_poll") are not accurate, thread_group_exited() won't return true if all other threads have passed exit_notify() and are zombies, it returns true only when all other threads are completely gone. Not to mention that it can only work if the task identified by @pid is a thread-group leader. Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240205174347.GA31461@redhat.com Reviewed-by: Tycho Andersen Signed-off-by: Christian Brauner --- kernel/exit.c | 24 ------------------------ 1 file changed, 24 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index c038d10dfb38..0e2f5dec91fb 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1900,30 +1900,6 @@ Efault: } #endif -/** - * thread_group_exited - check that a thread group has exited - * @pid: tgid of thread group to be checked. - * - * Test if the thread group represented by tgid has exited (all - * threads are zombies, dead or completely gone). - * - * Return: true if the thread group has exited. false otherwise. - */ -bool thread_group_exited(struct pid *pid) -{ - struct task_struct *task; - bool exited; - - rcu_read_lock(); - task = pid_task(pid, PIDTYPE_PID); - exited = !task || - (READ_ONCE(task->exit_state) && thread_group_empty(task)); - rcu_read_unlock(); - - return exited; -} -EXPORT_SYMBOL(thread_group_exited); - /* * This needs to be __function_aligned as GCC implicitly makes any * implementation of abort() cold and drops alignment specified by -- cgit v1.2.3 From 83b290c9e3b5d95891f43a4aeadf6071cbff25d3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 5 Feb 2024 15:55:32 +0100 Subject: pidfd: clone: allow CLONE_THREAD | CLONE_PIDFD together copy_process() just needs to pass PIDFD_THREAD to __pidfd_prepare() if clone_flags & CLONE_THREAD. We can also add another CLONE_ flag (or perhaps reuse CLONE_DETACHED) to enforce PIDFD_THREAD without CLONE_THREAD. Originally-from: Tycho Andersen Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240205145532.GA28823@redhat.com Reviewed-by: Tycho Andersen Signed-off-by: Christian Brauner --- kernel/fork.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index aa08193d124f..4b6d994505ca 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2311,9 +2311,8 @@ __latent_entropy struct task_struct *copy_process( /* * - CLONE_DETACHED is blocked so that we can potentially * reuse it later for CLONE_PIDFD. - * - CLONE_THREAD is blocked until someone really needs it. */ - if (clone_flags & (CLONE_DETACHED | CLONE_THREAD)) + if (clone_flags & CLONE_DETACHED) return ERR_PTR(-EINVAL); } @@ -2536,8 +2535,10 @@ __latent_entropy struct task_struct *copy_process( * if the fd table isn't shared). */ if (clone_flags & CLONE_PIDFD) { + int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0; + /* Note that no task has been attached to @pid yet. */ - retval = __pidfd_prepare(pid, 0, &pidfile); + retval = __pidfd_prepare(pid, flags, &pidfile); if (retval < 0) goto bad_fork_free_pid; pidfd = retval; -- cgit v1.2.3 From 0c9bd6bc4bb2ecfe8ce12e9a77ccd762dabe18b4 Mon Sep 17 00:00:00 2001 From: Tycho Andersen Date: Wed, 7 Feb 2024 10:19:29 +0100 Subject: pidfd: getfd should always report ESRCH if a task is exiting We can get EBADF from pidfd_getfd() if a task is currently exiting, which might be confusing. Let's check PF_EXITING, and just report ESRCH if so. I chose PF_EXITING, because it is set in exit_signals(), which is called before exit_files(). Since ->exit_status is mostly set after exit_files() in exit_notify(), using that still leaves a window open for the race. Reviewed-by: Oleg Nesterov Signed-off-by: Tycho Andersen Link: https://lore.kernel.org/r/20240206192357.81942-1-tycho@tycho.pizza Signed-off-by: Christian Brauner --- kernel/pid.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/pid.c b/kernel/pid.c index de0bf2f8d18b..c1d940fbd314 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -678,7 +678,26 @@ static struct file *__pidfd_fget(struct task_struct *task, int fd) up_read(&task->signal->exec_update_lock); - return file ?: ERR_PTR(-EBADF); + if (!file) { + /* + * It is possible that the target thread is exiting; it can be + * either: + * 1. before exit_signals(), which gives a real fd + * 2. before exit_files() takes the task_lock() gives a real fd + * 3. after exit_files() releases task_lock(), ->files is NULL; + * this has PF_EXITING, since it was set in exit_signals(), + * __pidfd_fget() returns EBADF. + * In case 3 we get EBADF, but that really means ESRCH, since + * the task is currently exiting and has freed its files + * struct, so we fix it up. + */ + if (task->flags & PF_EXITING) + file = ERR_PTR(-ESRCH); + else + file = ERR_PTR(-EBADF); + } + + return file; } static int pidfd_getfd(struct pid *pid, int fd) -- cgit v1.2.3 From 2bc7fc24f9a85cb6b7335354b2733615727689f6 Mon Sep 17 00:00:00 2001 From: "Ricardo B. Marliere" Date: Sun, 4 Feb 2024 10:40:15 -0300 Subject: clocksource: Make clocksource_subsys const Now that the driver core can properly handle constant struct bus_type, move the clocksource_subsys variable to be a constant structure as well, placing it into read-only memory which can not be modified at runtime. Suggested-by: Greg Kroah-Hartman Signed-off-by: Ricardo B. Marliere Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Acked-by: John Stultz Link: https://lore.kernel.org/r/20240204-bus_cleanup-time-v1-1-207ec18e24b8@marliere.net --- kernel/time/clocksource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 3052b1f1168e..4ef06651ad07 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -1468,7 +1468,7 @@ static struct attribute *clocksource_attrs[] = { }; ATTRIBUTE_GROUPS(clocksource); -static struct bus_type clocksource_subsys = { +static const struct bus_type clocksource_subsys = { .name = "clocksource", .dev_name = "clocksource", }; -- cgit v1.2.3 From 49f1ff50d49fb8b40bc0271177de8092226396e9 Mon Sep 17 00:00:00 2001 From: "Ricardo B. Marliere" Date: Sun, 4 Feb 2024 10:40:16 -0300 Subject: clockevents: Make clockevents_subsys const Now that the driver core can properly handle constant struct bus_type, move the clockevents_subsys variable to be a constant structure as well, placing it into read-only memory which can not be modified at runtime. Suggested-by: Greg Kroah-Hartman Signed-off-by: Ricardo B. Marliere Signed-off-by: Thomas Gleixner Reviewed-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20240204-bus_cleanup-time-v1-2-207ec18e24b8@marliere.net --- kernel/time/clockevents.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 960143b183cd..a7ca458cdd9c 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -659,7 +659,7 @@ void tick_cleanup_dead_cpu(int cpu) #endif #ifdef CONFIG_SYSFS -static struct bus_type clockevents_subsys = { +static const struct bus_type clockevents_subsys = { .name = "clockevents", .dev_name = "clockevent", }; -- cgit v1.2.3 From 4b7f521229ef4eee06848427d865954e6e0e3675 Mon Sep 17 00:00:00 2001 From: Peter Hilber Date: Thu, 1 Feb 2024 02:04:51 +0100 Subject: timekeeping: Evaluate system_counterval_t.cs_id instead of .cs Clocksource pointers can be problematic to obtain for drivers which are not clocksource drivers themselves. In particular, the RFC virtio_rtc driver [1] would require a new helper function to obtain a pointer to the ARM Generic Timer clocksource. The ptp_kvm driver also required a similar workaround. Address this by evaluating the clocksource ID, rather than the clocksource pointer, of struct system_counterval_t. By this, setting the clocksource pointer becomes unneeded, and get_device_system_crosststamp() callers will no longer need to supply clocksource pointers. All relevant clocksource drivers provide the ID, so this change is not changing the behaviour. [1] https://lore.kernel.org/lkml/20231218073849.35294-1-peter.hilber@opensynergy.com/ Signed-off-by: Peter Hilber Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240201010453.2212371-7-peter.hilber@opensynergy.com --- kernel/time/timekeeping.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 266d02809dbb..0ff065c5d25b 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1232,11 +1232,12 @@ int get_device_system_crosststamp(int (*get_time_fn) return ret; /* - * Verify that the clocksource associated with the captured - * system counter value is the same as the currently installed - * timekeeper clocksource + * Verify that the clocksource ID associated with the captured + * system counter value is the same as for the currently + * installed timekeeper clocksource */ - if (tk->tkr_mono.clock != system_counterval.cs) + if (system_counterval.cs_id == CSID_GENERIC || + tk->tkr_mono.clock->id != system_counterval.cs_id) return -ENODEV; cycles = system_counterval.cycles; -- cgit v1.2.3 From 26fb7e3dda4c16e2cfe2164a1e7315a9386602db Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 8 Feb 2024 11:10:11 -0500 Subject: workqueue: Link pwq's into wq->pwqs from oldest to newest Add a new pwq into the tail of wq->pwqs so that pwq iteration will start from the oldest pwq to the newest. This ordering will facilitate the inclusion of ordered workqueues in a wq_unbound_cpumask update. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cf514ba0dfc3..fa7bd3b34f52 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4804,7 +4804,7 @@ static void link_pwq(struct pool_workqueue *pwq) pwq->work_color = wq->work_color; /* link in @pwq */ - list_add_rcu(&pwq->pwqs_node, &wq->pwqs); + list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); } /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ -- cgit v1.2.3 From 4c065dbce1e8639546ef3612acffb062dd084cfe Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 8 Feb 2024 14:12:20 -0500 Subject: workqueue: Enable unbound cpumask update on ordered workqueues Ordered workqueues does not currently follow changes made to the global unbound cpumask because per-pool workqueue changes may break the ordering guarantee. IOW, a work function in an ordered workqueue may run on an isolated CPU. This patch enables ordered workqueues to follow changes made to the global unbound cpumask by temporaily plug or suspend the newly allocated pool_workqueue from executing newly queued work items until the old pwq has been properly drained. For ordered workqueues, there should only be one pwq that is unplugged, the rests should be plugged. This enables ordered workqueues to follow the unbound cpumask changes like other unbound workqueues at the expense of some delay in execution of work functions during the transition period. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 59 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index fa7bd3b34f52..da124859a691 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -255,6 +255,7 @@ struct pool_workqueue { int refcnt; /* L: reference count */ int nr_in_flight[WORK_NR_COLORS]; /* L: nr of in_flight works */ + bool plugged; /* L: execution suspended */ /* * nr_active management and WORK_STRUCT_INACTIVE: @@ -1708,6 +1709,9 @@ static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill) goto out; } + if (unlikely(pwq->plugged)) + return false; + /* * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is * already waiting on $nna, pwq_dec_nr_active() will maintain the @@ -1782,6 +1786,43 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) } } +/** + * unplug_oldest_pwq - restart an oldest plugged pool_workqueue + * @wq: workqueue_struct to be restarted + * + * pwq's are linked into wq->pwqs with the oldest first. For ordered + * workqueues, only the oldest pwq is unplugged, the others are plugged to + * suspend execution until the oldest one is drained. When this happens, the + * next oldest one (first plugged pwq in iteration) will be unplugged to + * restart work item execution to ensure proper work item ordering. + * + * dfl_pwq --------------+ [P] - plugged + * | + * v + * pwqs -> A -> B [P] -> C [P] (newest) + * | | | + * 1 3 5 + * | | | + * 2 4 6 + */ +static void unplug_oldest_pwq(struct workqueue_struct *wq) +{ + struct pool_workqueue *pwq; + + lockdep_assert_held(&wq->mutex); + + /* Caller should make sure that pwqs isn't empty before calling */ + pwq = list_first_entry_or_null(&wq->pwqs, struct pool_workqueue, + pwqs_node); + raw_spin_lock_irq(&pwq->pool->lock); + if (pwq->plugged) { + pwq->plugged = false; + if (pwq_activate_first_inactive(pwq, true)) + kick_pool(pwq->pool); + } + raw_spin_unlock_irq(&pwq->pool->lock); +} + /** * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active * @nna: wq_node_nr_active to activate a pending pwq for @@ -4740,6 +4781,13 @@ static void pwq_release_workfn(struct kthread_work *work) mutex_lock(&wq->mutex); list_del_rcu(&pwq->pwqs_node); is_last = list_empty(&wq->pwqs); + + /* + * For ordered workqueue with a plugged dfl_pwq, restart it now. + */ + if (!is_last && (wq->flags & __WQ_ORDERED)) + unplug_oldest_pwq(wq); + mutex_unlock(&wq->mutex); } @@ -4966,6 +5014,15 @@ apply_wqattrs_prepare(struct workqueue_struct *wq, cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); ctx->attrs = new_attrs; + /* + * For initialized ordered workqueues, there should only be one pwq + * (dfl_pwq). Set the plugged flag of ctx->dfl_pwq to suspend execution + * of newly queued work items until execution of older work items in + * the old pwq's have completed. + */ + if ((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)) + ctx->dfl_pwq->plugged = true; + ctx->wq = wq; return ctx; @@ -5006,10 +5063,6 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, if (WARN_ON(!(wq->flags & WQ_UNBOUND))) return -EINVAL; - /* creating multiple pwqs breaks ordering guarantee */ - if (!list_empty(&wq->pwqs) && WARN_ON(wq->flags & __WQ_ORDERED)) - return -EINVAL; - ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -6489,9 +6542,6 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) list_for_each_entry(wq, &workqueues, list) { if (!(wq->flags & WQ_UNBOUND) || (wq->flags & __WQ_DESTROYING)) continue; - /* creating multiple pwqs breaks ordering guarantee */ - if (wq->flags & __WQ_ORDERED) - continue; ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); if (IS_ERR(ctx)) { @@ -7006,9 +7056,8 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) int ret; /* - * Adjusting max_active or creating new pwqs by applying - * attributes breaks ordering guarantee. Disallow exposing ordered - * workqueues. + * Adjusting max_active breaks ordering guarantee. Disallow exposing + * ordered workqueues. */ if (WARN_ON(wq->flags & __WQ_ORDERED)) return -EINVAL; -- cgit v1.2.3 From d64f2fa064f8866802e23c8ec95d9d1f601480ee Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Thu, 8 Feb 2024 11:10:13 -0500 Subject: kernel/workqueue: Let rescuers follow unbound wq cpumask changes When workqueue cpumask changes are committed the associated rescuer (if one exists) affinity is not touched and this might be a problem down the line for isolated setups. Make sure rescuers affinity is updated every time a workqueue cpumask changes, so that rescuers can't break isolation. [longman: set_cpus_allowed_ptr() will block until the designated task is enqueued on an allowed CPU, no wake_up_process() needed. Also use the unbound_effective_cpumask() helper as suggested by Tejun.] Signed-off-by: Juri Lelli Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index da124859a691..a24c7cfb80b4 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5051,6 +5051,11 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) /* update node_nr_active->max */ wq_update_node_max_active(ctx->wq, -1); + /* rescuer needs to respect wq cpumask changes */ + if (ctx->wq->rescuer) + set_cpus_allowed_ptr(ctx->wq->rescuer->task, + unbound_effective_cpumask(ctx->wq)); + mutex_unlock(&ctx->wq->mutex); } -- cgit v1.2.3 From 49584bb8ddbe8bcfc276c2d7dd3c8890f45f5970 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 8 Feb 2024 11:10:14 -0500 Subject: workqueue: Bind unbound workqueue rescuer to wq_unbound_cpumask Commit 85f0ab43f9de ("kernel/workqueue: Bind rescuer to unbound cpumask for WQ_UNBOUND") modified init_rescuer() to bind rescuer of an unbound workqueue to the cpumask in wq->unbound_attrs. However unbound_attrs->cpumask's of all workqueues are initialized to cpu_possible_mask and will only be changed if it has the WQ_SYSFS flag to expose a cpumask sysfs file to be written by users. So this patch doesn't achieve what it is intended to do. If an unbound workqueue is created after wq_unbound_cpumask is modified and there is no more unbound cpumask update after that, the unbound rescuer will be bound to all CPUs unless the workqueue is created with the WQ_SYSFS flag and a user explicitly modified its cpumask sysfs file. Fix this problem by binding directly to wq_unbound_cpumask in init_rescuer(). Fixes: 85f0ab43f9de ("kernel/workqueue: Bind rescuer to unbound cpumask for WQ_UNBOUND") Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a24c7cfb80b4..cd2c6edc5c66 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5299,7 +5299,7 @@ static int init_rescuer(struct workqueue_struct *wq) wq->rescuer = rescuer; if (wq->flags & WQ_UNBOUND) - kthread_bind_mask(rescuer->task, wq->unbound_attrs->cpumask); + kthread_bind_mask(rescuer->task, wq_unbound_cpumask); else kthread_bind_mask(rescuer->task, cpu_possible_mask); wake_up_process(rescuer->task); -- cgit v1.2.3 From 516d3dc99f4f2ab856d879696cd3a5d7f6db7796 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Fri, 9 Feb 2024 12:06:11 -0500 Subject: workqueue: Fix kernel-doc comment of unplug_oldest_pwq() Fix the kernel-doc comment of the unplug_oldest_pwq() function to enable proper processing and formatting of the embedded ASCII diagram. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/workqueue.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cd2c6edc5c66..ddcdeb7b9f26 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1787,14 +1787,12 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) } /** - * unplug_oldest_pwq - restart an oldest plugged pool_workqueue - * @wq: workqueue_struct to be restarted + * unplug_oldest_pwq - unplug the oldest pool_workqueue + * @wq: workqueue_struct where its oldest pwq is to be unplugged * - * pwq's are linked into wq->pwqs with the oldest first. For ordered - * workqueues, only the oldest pwq is unplugged, the others are plugged to - * suspend execution until the oldest one is drained. When this happens, the - * next oldest one (first plugged pwq in iteration) will be unplugged to - * restart work item execution to ensure proper work item ordering. + * This function should only be called for ordered workqueues where only the + * oldest pwq is unplugged, the others are plugged to suspend execution to + * ensure proper work item ordering:: * * dfl_pwq --------------+ [P] - plugged * | @@ -1804,6 +1802,11 @@ static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill) * 1 3 5 * | | | * 2 4 6 + * + * When the oldest pwq is drained and removed, this function should be called + * to unplug the next oldest one to start its work item execution. Note that + * pwq's are linked into wq->pwqs with the oldest first, so the first one in + * the list is the oldest. */ static void unplug_oldest_pwq(struct workqueue_struct *wq) { -- cgit v1.2.3 From 8f172181f24bb5df7675225d9b5b66d059613f50 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 8 Feb 2024 14:11:56 -1000 Subject: workqueue: Implement workqueue_set_min_active() Since 5797b1c18919 ("workqueue: Implement system-wide nr_active enforcement for unbound workqueues"), unbound workqueues have separate min_active which sets the number of interdependent work items that can be handled. This value is currently initialized to WQ_DFL_MIN_ACTIVE which is 8. This isn't high enough for some users, let's add an interface to adjust the setting. Signed-off-by: Tejun Heo --- kernel/workqueue.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ddcdeb7b9f26..4950bfc2cdcc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5629,6 +5629,33 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) } EXPORT_SYMBOL_GPL(workqueue_set_max_active); +/** + * workqueue_set_min_active - adjust min_active of an unbound workqueue + * @wq: target unbound workqueue + * @min_active: new min_active value + * + * Set min_active of an unbound workqueue. Unlike other types of workqueues, an + * unbound workqueue is not guaranteed to be able to process max_active + * interdependent work items. Instead, an unbound workqueue is guaranteed to be + * able to process min_active number of interdependent work items which is + * %WQ_DFL_MIN_ACTIVE by default. + * + * Use this function to adjust the min_active value between 0 and the current + * max_active. + */ +void workqueue_set_min_active(struct workqueue_struct *wq, int min_active) +{ + /* min_active is only meaningful for non-ordered unbound workqueues */ + if (WARN_ON((wq->flags & (WQ_BH | WQ_UNBOUND | __WQ_ORDERED)) != + WQ_UNBOUND)) + return; + + mutex_lock(&wq->mutex); + wq->saved_min_active = clamp(min_active, 0, wq->saved_max_active); + wq_adjust_max_active(wq); + mutex_unlock(&wq->mutex); +} + /** * current_work - retrieve %current task's work struct * -- cgit v1.2.3 From bf52b1ac6ab41a060511d56d0f2da12f3a2486db Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 8 Feb 2024 14:14:16 -1000 Subject: async: Use a dedicated unbound workqueue with raised min_active Async can schedule a number of interdependent work items. However, since 5797b1c18919 ("workqueue: Implement system-wide nr_active enforcement for unbound workqueues"), unbound workqueues have separate min_active which sets the number of interdependent work items that can be handled. This default value is 8 which isn't sufficient for async and can lead to stalls during resume from suspend in some cases. Let's use a dedicated unbound workqueue with raised min_active. Link: http://lkml.kernel.org/r/708a65cc-79ec-44a6-8454-a93d0f3114c3@samsung.com Reported-by: Marek Szyprowski Cc: Rafael J. Wysocki Tested-by: Marek Szyprowski Signed-off-by: Tejun Heo --- kernel/async.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/async.c b/kernel/async.c index 97f224a5257b..4c3e6a44595f 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -64,6 +64,7 @@ static async_cookie_t next_cookie = 1; static LIST_HEAD(async_global_pending); /* pending from all registered doms */ static ASYNC_DOMAIN(async_dfl_domain); static DEFINE_SPINLOCK(async_lock); +static struct workqueue_struct *async_wq; struct async_entry { struct list_head domain_list; @@ -174,7 +175,7 @@ static async_cookie_t __async_schedule_node_domain(async_func_t func, spin_unlock_irqrestore(&async_lock, flags); /* schedule for execution */ - queue_work_node(node, system_unbound_wq, &entry->work); + queue_work_node(node, async_wq, &entry->work); return newcookie; } @@ -345,3 +346,17 @@ bool current_is_async(void) return worker && worker->current_func == async_run_entry_fn; } EXPORT_SYMBOL_GPL(current_is_async); + +void __init async_init(void) +{ + /* + * Async can schedule a number of interdependent work items. However, + * unbound workqueues can handle only upto min_active interdependent + * work items. The default min_active of 8 isn't sufficient for async + * and can lead to stalls. Let's use a dedicated workqueue with raised + * min_active. + */ + async_wq = alloc_workqueue("async", WQ_UNBOUND, 0); + BUG_ON(!async_wq); + workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE); +} -- cgit v1.2.3 From c044a9502649a66bf5c5e1a584cb82b2c538ae25 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 9 Feb 2024 14:06:20 +0100 Subject: signal: fill in si_code in prepare_kill_siginfo() So that do_tkill() can use this helper too. This also simplifies the next patch. TODO: perhaps we can kill prepare_kill_siginfo() and change the callers to use SEND_SIG_NOINFO, but this needs some changes in __send_signal_locked() and TP_STORE_SIGINFO(). Reviewed-by: Tycho Andersen Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240209130620.GA8039@redhat.com Signed-off-by: Christian Brauner --- kernel/signal.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index c3fac06937e2..1450689302d9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3793,12 +3793,13 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, #endif #endif -static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info) +static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info, + enum pid_type type) { clear_siginfo(info); info->si_signo = sig; info->si_errno = 0; - info->si_code = SI_USER; + info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER; info->si_pid = task_tgid_vnr(current); info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); } @@ -3812,7 +3813,7 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) { struct kernel_siginfo info; - prepare_kill_siginfo(sig, &info); + prepare_kill_siginfo(sig, &info, PIDTYPE_TGID); return kill_something_info(sig, &info, pid); } @@ -3925,7 +3926,7 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) goto err; } else { - prepare_kill_siginfo(sig, &kinfo); + prepare_kill_siginfo(sig, &kinfo, PIDTYPE_TGID); } /* TODO: respect PIDFD_THREAD */ @@ -3970,12 +3971,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig) { struct kernel_siginfo info; - clear_siginfo(&info); - info.si_signo = sig; - info.si_errno = 0; - info.si_code = SI_TKILL; - info.si_pid = task_tgid_vnr(current); - info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); + prepare_kill_siginfo(sig, &info, PIDTYPE_PID); return do_send_specific(tgid, pid, sig, &info); } -- cgit v1.2.3 From 81b9d8ac0640b285a3c369cd41a85f6c240d3a60 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 9 Feb 2024 14:06:50 +0100 Subject: pidfd: change pidfd_send_signal() to respect PIDFD_THREAD Turn kill_pid_info() into kill_pid_info_type(), this allows to pass any pid_type to group_send_sig_info(), despite its name it should work fine even if type = PIDTYPE_PID. Change pidfd_send_signal() to use PIDTYPE_PID or PIDTYPE_TGID depending on PIDFD_THREAD. While at it kill another TODO comment in pidfd_show_fdinfo(). As Christian expains fdinfo reports f_flags, userspace can already detect PIDFD_THREAD. Reviewed-by: Tycho Andersen Signed-off-by: Oleg Nesterov Link: https://lore.kernel.org/r/20240209130650.GA8048@redhat.com Signed-off-by: Christian Brauner --- kernel/fork.c | 2 -- kernel/signal.c | 39 +++++++++++++++++++++++---------------- 2 files changed, 23 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 4b6d994505ca..3f22ec90c5c6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2051,8 +2051,6 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) seq_put_decimal_ll(m, "Pid:\t", nr); - /* TODO: report PIDFD_THREAD */ - #ifdef CONFIG_PID_NS seq_put_decimal_ll(m, "\nNSpid:\t", nr); if (nr > 0) { diff --git a/kernel/signal.c b/kernel/signal.c index 1450689302d9..8b8169623850 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -47,6 +47,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -1436,7 +1437,8 @@ void lockdep_assert_task_sighand_held(struct task_struct *task) #endif /* - * send signal info to all the members of a group + * send signal info to all the members of a thread group or to the + * individual thread if type == PIDTYPE_PID. */ int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type) @@ -1478,7 +1480,8 @@ int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) return ret; } -int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) +static int kill_pid_info_type(int sig, struct kernel_siginfo *info, + struct pid *pid, enum pid_type type) { int error = -ESRCH; struct task_struct *p; @@ -1487,11 +1490,10 @@ int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (p) - error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); + error = group_send_sig_info(sig, info, p, type); rcu_read_unlock(); if (likely(!p || error != -ESRCH)) return error; - /* * The task was unhashed in between, try again. If it * is dead, pid_task() will return NULL, if we race with @@ -1500,6 +1502,11 @@ int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) } } +int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) +{ + return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID); +} + static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) { int error; @@ -3873,14 +3880,10 @@ static struct pid *pidfd_to_pid(const struct file *file) * @info: signal info * @flags: future flags * - * The syscall currently only signals via PIDTYPE_PID which covers - * kill(, . It does not signal threads or process - * groups. - * In order to extend the syscall to threads and process groups the @flags - * argument should be used. In essence, the @flags argument will determine - * what is signaled and not the file descriptor itself. Put in other words, - * grouping is a property of the flags argument not a property of the file - * descriptor. + * Send the signal to the thread group or to the individual thread depending + * on PIDFD_THREAD. + * In the future extension to @flags may be used to override the default scope + * of @pidfd. * * Return: 0 on success, negative errno on failure */ @@ -3891,6 +3894,7 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, struct fd f; struct pid *pid; kernel_siginfo_t kinfo; + enum pid_type type; /* Enforce flags be set to 0 until we add an extension. */ if (flags) @@ -3911,6 +3915,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, if (!access_pidfd_pidns(pid)) goto err; + if (f.file->f_flags & PIDFD_THREAD) + type = PIDTYPE_PID; + else + type = PIDTYPE_TGID; + if (info) { ret = copy_siginfo_from_user_any(&kinfo, info); if (unlikely(ret)) @@ -3926,12 +3935,10 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) goto err; } else { - prepare_kill_siginfo(sig, &kinfo, PIDTYPE_TGID); + prepare_kill_siginfo(sig, &kinfo, type); } - /* TODO: respect PIDFD_THREAD */ - ret = kill_pid_info(sig, &kinfo, pid); - + ret = kill_pid_info_type(sig, &kinfo, pid, type); err: fdput(f); return ret; -- cgit v1.2.3 From ca16265aaf9d357035000833636dcddbfafacac3 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 15 Nov 2023 14:11:27 -0500 Subject: rcu/nocb: Remove needless LOAD-ACQUIRE The LOAD-ACQUIRE access performed on rdp->nocb_cb_sleep advertizes ordering callback execution against grace period completion. However this is contradicted by the following: * This LOAD-ACQUIRE doesn't pair with anything. The only counterpart barrier that can be found is the smp_mb() placed after callbacks advancing in nocb_gp_wait(). However the barrier is placed _after_ ->nocb_cb_sleep write. * Callbacks can be concurrently advanced between the LOAD-ACQUIRE on ->nocb_cb_sleep and the call to rcu_segcblist_extract_done_cbs() in rcu_do_batch(), making any ordering based on ->nocb_cb_sleep broken. * Both rcu_segcblist_extract_done_cbs() and rcu_advance_cbs() are called under the nocb_lock, the latter hereby providing already the desired ACQUIRE semantics. Therefore it is safe to access ->nocb_cb_sleep with a simple compiler barrier. Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree_nocb.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 4efbf7333d4e..785946834c6b 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -933,8 +933,7 @@ static void nocb_cb_wait(struct rcu_data *rdp) swait_event_interruptible_exclusive(rdp->nocb_cb_wq, nocb_cb_wait_cond(rdp)); - // VVV Ensure CB invocation follows _sleep test. - if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^ + if (READ_ONCE(rdp->nocb_cb_sleep)) { WARN_ON(signal_pending(current)); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); } -- cgit v1.2.3 From 1e8e6951a5774c8dd9d1f14af9c5b7d66130d96f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 15 Nov 2023 14:11:28 -0500 Subject: rcu/nocb: Remove needless full barrier after callback advancing A full barrier is issued from nocb_gp_wait() upon callbacks advancing to order grace period completion with callbacks execution. However these two events are already ordered by the smp_mb__after_unlock_lock() barrier within the call to raw_spin_lock_rcu_node() that is necessary for callbacks advancing to happen. The following litmus test shows the kind of guarantee that this barrier provides: C smp_mb__after_unlock_lock {} // rcu_gp_cleanup() P0(spinlock_t *rnp_lock, int *gpnum) { // Grace period cleanup increase gp sequence number spin_lock(rnp_lock); WRITE_ONCE(*gpnum, 1); spin_unlock(rnp_lock); } // nocb_gp_wait() P1(spinlock_t *rnp_lock, spinlock_t *nocb_lock, int *gpnum, int *cb_ready) { int r1; // Call rcu_advance_cbs() from nocb_gp_wait() spin_lock(nocb_lock); spin_lock(rnp_lock); smp_mb__after_unlock_lock(); r1 = READ_ONCE(*gpnum); WRITE_ONCE(*cb_ready, 1); spin_unlock(rnp_lock); spin_unlock(nocb_lock); } // nocb_cb_wait() P2(spinlock_t *nocb_lock, int *cb_ready, int *cb_executed) { int r2; // rcu_do_batch() -> rcu_segcblist_extract_done_cbs() spin_lock(nocb_lock); r2 = READ_ONCE(*cb_ready); spin_unlock(nocb_lock); // Actual callback execution WRITE_ONCE(*cb_executed, 1); } P3(int *cb_executed, int *gpnum) { int r3; WRITE_ONCE(*cb_executed, 2); smp_mb(); r3 = READ_ONCE(*gpnum); } exists (1:r1=1 /\ 2:r2=1 /\ cb_executed=2 /\ 3:r3=0) (* Bad outcome. *) Here the bad outcome only occurs if the smp_mb__after_unlock_lock() is removed. This barrier orders the grace period completion against callbacks advancing and even later callbacks invocation, thanks to the opportunistic propagation via the ->nocb_lock to nocb_cb_wait(). Therefore the smp_mb() placed after callbacks advancing can be safely removed. Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 6 ++++++ kernel/rcu/tree_nocb.h | 1 - 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b2bccfd37c38..d540d210e5c7 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2145,6 +2145,12 @@ static void rcu_do_batch(struct rcu_data *rdp) * Extract the list of ready callbacks, disabling IRQs to prevent * races with call_rcu() from interrupt handlers. Leave the * callback counts, as rcu_barrier() needs to be conservative. + * + * Callbacks execution is fully ordered against preceding grace period + * completion (materialized by rnp->gp_seq update) thanks to the + * smp_mb__after_unlock_lock() upon node locking required for callbacks + * advancing. In NOCB mode this ordering is then further relayed through + * the nocb locking that protects both callbacks advancing and extraction. */ rcu_nocb_lock_irqsave(rdp, flags); WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 785946834c6b..b2c3145c4c13 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -779,7 +779,6 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) if (rcu_segcblist_ready_cbs(&rdp->cblist)) { needwake = rdp->nocb_cb_sleep; WRITE_ONCE(rdp->nocb_cb_sleep, false); - smp_mb(); /* CB invocation -after- GP end. */ } else { needwake = false; } -- cgit v1.2.3 From b913c3fe685e0aec80130975b0f330fd709ff324 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 9 Jan 2024 23:24:00 +0100 Subject: rcu/nocb: Make IRQs disablement symmetric Currently IRQs are disabled on call_rcu() and then depending on the context: * If the CPU is in nocb mode: - If the callback is enqueued in the bypass list, IRQs are re-enabled implictly by rcu_nocb_try_bypass() - If the callback is enqueued in the normal list, IRQs are re-enabled implicitly by __call_rcu_nocb_wake() * If the CPU is NOT in nocb mode, IRQs are reenabled explicitly from call_rcu() This makes the code a bit hard to follow, especially as it interleaves with nocb locking. To make the IRQ flags coverage clearer and also in order to prepare for moving all the nocb enqueue code to its own function, always re-enable the IRQ flags explicitly from call_rcu(). Reviewed-by: Neeraj Upadhyay (AMD) Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 9 ++++++--- kernel/rcu/tree_nocb.h | 20 +++++++++----------- 2 files changed, 15 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index d540d210e5c7..a402dc4e9a9c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2735,8 +2735,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) } check_cb_ovld(rdp); - if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) + if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) { + local_irq_restore(flags); return; // Enqueued onto ->nocb_bypass, so just leave. + } // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. rcu_segcblist_enqueue(&rdp->cblist, head); if (__is_kvfree_rcu_offset((unsigned long)func)) @@ -2754,8 +2756,8 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ } else { __call_rcu_core(rdp, head, flags); - local_irq_restore(flags); } + local_irq_restore(flags); } #ifdef CONFIG_RCU_LAZY @@ -4646,8 +4648,9 @@ void rcutree_migrate_callbacks(int cpu) __call_rcu_nocb_wake(my_rdp, true, flags); } else { rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ - raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags); + raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ } + local_irq_restore(flags); if (needwake) rcu_gp_kthread_wake(); lockdep_assert_irqs_enabled(); diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index b2c3145c4c13..1d5c03c5c702 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -532,9 +532,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, // 2. Both of these conditions are met: // a. The bypass list previously had only lazy CBs, and: // b. The new CB is non-lazy. - if (ncbs && (!bypass_is_lazy || lazy)) { - local_irq_restore(flags); - } else { + if (!ncbs || (bypass_is_lazy && !lazy)) { // No-CBs GP kthread might be indefinitely asleep, if so, wake. rcu_nocb_lock(rdp); // Rare during call_rcu() flood. if (!rcu_segcblist_pend_cbs(&rdp->cblist)) { @@ -544,7 +542,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, } else { trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQnoWake")); - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_nocb_unlock(rdp); } } return true; // Callback already enqueued. @@ -570,7 +568,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, // If we are being polled or there is no kthread, just leave. t = READ_ONCE(rdp->nocb_gp_kthread); if (rcu_nocb_poll || !t) { - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_nocb_unlock(rdp); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNotPoll")); return; @@ -583,17 +581,17 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, rdp->qlen_last_fqs_check = len; // Only lazy CBs in bypass list if (lazy_len && bypass_len == lazy_len) { - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY, TPS("WakeLazy")); } else if (!irqs_disabled_flags(flags)) { /* ... if queue was empty ... */ - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_nocb_unlock(rdp); wake_nocb_gp(rdp, false); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeEmpty")); } else { - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE, TPS("WakeEmptyIsDeferred")); } @@ -611,15 +609,15 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, if ((rdp->nocb_cb_sleep || !rcu_segcblist_ready_cbs(&rdp->cblist)) && !timer_pending(&rdp->nocb_timer)) { - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE, TPS("WakeOvfIsDeferred")); } else { - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_nocb_unlock(rdp); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); } } else { - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_nocb_unlock(rdp); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); } } -- cgit v1.2.3 From afd4e6964745ed98b74cacdcce21d73280a0a253 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 9 Jan 2024 23:24:01 +0100 Subject: rcu/nocb: Re-arrange call_rcu() NOCB specific code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the call_rcu() function interleaves NOCB and !NOCB enqueue code in a complicated way such that: * The bypass enqueue code may or may not have enqueued and may or may not have locked the ->nocb_lock. Everything that follows is in a Schrödinger locking state for the unwary reviewer's eyes. * The was_alldone is always set but only used in NOCB related code. * The NOCB wake up is distantly related to the locking hopefully performed by the bypass enqueue code that did not enqueue on the bypass list. Unconfuse the whole and gather NOCB and !NOCB specific enqueue code to their own functions. Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 44 ++++++++++++++++++++------------------------ kernel/rcu/tree.h | 9 ++++----- kernel/rcu/tree_nocb.h | 18 +++++++++++++++--- 3 files changed, 39 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a402dc4e9a9c..cc0e169e299a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2597,12 +2597,26 @@ static int __init rcu_spawn_core_kthreads(void) return 0; } +static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func) +{ + rcu_segcblist_enqueue(&rdp->cblist, head); + if (__is_kvfree_rcu_offset((unsigned long)func)) + trace_rcu_kvfree_callback(rcu_state.name, head, + (unsigned long)func, + rcu_segcblist_n_cbs(&rdp->cblist)); + else + trace_rcu_callback(rcu_state.name, head, + rcu_segcblist_n_cbs(&rdp->cblist)); + trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); +} + /* * Handle any core-RCU processing required by a call_rcu() invocation. */ -static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, - unsigned long flags) +static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, + rcu_callback_t func, unsigned long flags) { + rcutree_enqueue(rdp, head, func); /* * If called from an extended quiescent state, invoke the RCU * core in order to force a re-evaluation of RCU's idleness. @@ -2698,7 +2712,6 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) unsigned long flags; bool lazy; struct rcu_data *rdp; - bool was_alldone; /* Misaligned rcu_head! */ WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); @@ -2735,28 +2748,11 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) } check_cb_ovld(rdp); - if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) { - local_irq_restore(flags); - return; // Enqueued onto ->nocb_bypass, so just leave. - } - // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock. - rcu_segcblist_enqueue(&rdp->cblist, head); - if (__is_kvfree_rcu_offset((unsigned long)func)) - trace_rcu_kvfree_callback(rcu_state.name, head, - (unsigned long)func, - rcu_segcblist_n_cbs(&rdp->cblist)); - else - trace_rcu_callback(rcu_state.name, head, - rcu_segcblist_n_cbs(&rdp->cblist)); - - trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); - /* Go handle any RCU core processing required. */ - if (unlikely(rcu_rdp_is_offloaded(rdp))) { - __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ - } else { - __call_rcu_core(rdp, head, flags); - } + if (unlikely(rcu_rdp_is_offloaded(rdp))) + call_rcu_nocb(rdp, head, func, flags, lazy); + else + call_rcu_core(rdp, head, func, flags); local_irq_restore(flags); } diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index e9821a8422db..bf478da89a8f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -467,11 +467,10 @@ static void rcu_init_one_nocb(struct rcu_node *rnp); static bool wake_nocb_gp(struct rcu_data *rdp, bool force); static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, unsigned long j, bool lazy); -static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, - bool *was_alldone, unsigned long flags, - bool lazy); -static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, - unsigned long flags); +static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, + rcu_callback_t func, unsigned long flags, bool lazy); +static void __maybe_unused __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, + unsigned long flags); static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level); static bool do_nocb_deferred_wakeup(struct rcu_data *rdp); static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 1d5c03c5c702..9e8052ba14b9 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -622,6 +622,18 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, } } +static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, + rcu_callback_t func, unsigned long flags, bool lazy) +{ + bool was_alldone; + + if (!rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) { + /* Not enqueued on bypass but locked, do regular enqueue */ + rcutree_enqueue(rdp, head, func); + __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ + } +} + static int nocb_gp_toggle_rdp(struct rcu_data *rdp, bool *wake_state) { @@ -1764,10 +1776,10 @@ static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, return true; } -static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, - bool *was_alldone, unsigned long flags, bool lazy) +static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, + rcu_callback_t func, unsigned long flags, bool lazy) { - return false; + WARN_ON_ONCE(1); /* Should be dead code! */ } static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, -- cgit v1.2.3 From dda98810b552fc6bf650f4270edeebdc2f28bd3f Mon Sep 17 00:00:00 2001 From: Zqiang Date: Wed, 10 Jan 2024 16:11:28 +0800 Subject: rcu/nocb: Fix WARN_ON_ONCE() in the rcu_nocb_bypass_lock() For the kernels built with CONFIG_RCU_NOCB_CPU_DEFAULT_ALL=y and CONFIG_RCU_LAZY=y, the following scenarios will trigger WARN_ON_ONCE() in the rcu_nocb_bypass_lock() and rcu_nocb_wait_contended() functions: CPU2 CPU11 kthread rcu_nocb_cb_kthread ksys_write rcu_do_batch vfs_write rcu_torture_timer_cb proc_sys_write __kmem_cache_free proc_sys_call_handler kmemleak_free drop_caches_sysctl_handler delete_object_full drop_slab __delete_object shrink_slab put_object lazy_rcu_shrink_scan call_rcu rcu_nocb_flush_bypass __call_rcu_commn rcu_nocb_bypass_lock raw_spin_trylock(&rdp->nocb_bypass_lock) fail atomic_inc(&rdp->nocb_lock_contended); rcu_nocb_wait_contended WARN_ON_ONCE(smp_processor_id() != rdp->cpu); WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)) | |_ _ _ _ _ _ _ _ _ _same rdp and rdp->cpu != 11_ _ _ _ _ _ _ _ _ __| Reproduce this bug with "echo 3 > /proc/sys/vm/drop_caches". This commit therefore uses rcu_nocb_try_flush_bypass() instead of rcu_nocb_flush_bypass() in lazy_rcu_shrink_scan(). If the nocb_bypass queue is being flushed, then rcu_nocb_try_flush_bypass will return directly. Signed-off-by: Zqiang Reviewed-by: Joel Fernandes (Google) Reviewed-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree_nocb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 9e8052ba14b9..ffa69a5e18f4 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1391,7 +1391,7 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) rcu_nocb_unlock_irqrestore(rdp, flags); continue; } - WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false)); + rcu_nocb_try_flush_bypass(rdp, jiffies); rcu_nocb_unlock_irqrestore(rdp, flags); wake_nocb_gp(rdp, false); sc->nr_to_scan -= _count; -- cgit v1.2.3 From f3c4c00784b5f7499d9cb6d31b661370c9a1ce7f Mon Sep 17 00:00:00 2001 From: Zqiang Date: Wed, 17 Jan 2024 18:26:16 +0800 Subject: rcu/nocb: Check rdp_gp->nocb_timer in __call_rcu_nocb_wake() Currently, only rdp_gp->nocb_timer is used, for nocb_timer of no-rdp_gp structure, the timer_pending() is always return false, this commit therefore need to check rdp_gp->nocb_timer in __call_rcu_nocb_wake(). Signed-off-by: Zqiang Reviewed-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree_nocb.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index ffa69a5e18f4..f124d4d45ce6 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -564,6 +564,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, long lazy_len; long len; struct task_struct *t; + struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; // If we are being polled or there is no kthread, just leave. t = READ_ONCE(rdp->nocb_gp_kthread); @@ -608,7 +609,7 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, smp_mb(); /* Enqueue before timer_pending(). */ if ((rdp->nocb_cb_sleep || !rcu_segcblist_ready_cbs(&rdp->cblist)) && - !timer_pending(&rdp->nocb_timer)) { + !timer_pending(&rdp_gp->nocb_timer)) { rcu_nocb_unlock(rdp); wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE, TPS("WakeOvfIsDeferred")); -- cgit v1.2.3 From a7e4074dccd282f494d542150ef6235b3270b0a2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 19 Dec 2023 00:19:16 +0100 Subject: rcu/exp: Remove full barrier upon main thread wakeup When an expedited grace period is ending, care must be taken so that all the quiescent states propagated up to the root are correctly ordered against the wake up of the main expedited grace period workqueue. This ordering is already carried through the root rnp locking augmented by an smp_mb__after_unlock_lock() barrier. Therefore the explicit smp_mb() placed before the wake up is not needed and can be removed. Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree_exp.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 2ac440bc7e10..014ddf672165 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -198,10 +198,9 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp, } if (rnp->parent == NULL) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - if (wake) { - smp_mb(); /* EGP done before wake_up(). */ + if (wake) swake_up_one_online(&rcu_state.expedited_wq); - } + break; } mask = rnp->grpmask; -- cgit v1.2.3 From a636c5e6f8fc34be520277e69c7c6ee1d4fc1d17 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 12 Jan 2024 16:46:15 +0100 Subject: rcu/exp: Fix RCU expedited parallel grace period kworker allocation failure recovery Under CONFIG_RCU_EXP_KTHREAD=y, the nodes initialization for expedited grace periods is queued to a kworker. However if the allocation of that kworker failed, the nodes initialization is performed synchronously by the caller instead. Now the check for kworker initialization failure relies on the kworker pointer to be NULL while its value might actually encapsulate an allocation failure error. Make sure to handle this case. Reviewed-by: Kalesh Singh Fixes: 9621fbee44df ("rcu: Move expedited grace period (GP) work to RT kthread_worker") Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b2bccfd37c38..38c86f2c040b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4749,6 +4749,7 @@ static void __init rcu_start_exp_gp_kworkers(void) rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name); if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) { pr_err("Failed to create %s!\n", par_gp_kworker_name); + rcu_exp_par_gp_kworker = NULL; kthread_destroy_worker(rcu_exp_gp_kworker); return; } -- cgit v1.2.3 From e7539ffc9a770f36bacedcf0fbfb4bf2f244f4a5 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 12 Jan 2024 16:46:16 +0100 Subject: rcu/exp: Handle RCU expedited grace period kworker allocation failure Just like is done for the kworker performing nodes initialization, gracefully handle the possible allocation failure of the RCU expedited grace period main kworker. While at it perform a rename of the related checking functions to better reflect the expedited specifics. Reviewed-by: Kalesh Singh Fixes: 9621fbee44df ("rcu: Move expedited grace period (GP) work to RT kthread_worker") Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 2 ++ kernel/rcu/tree_exp.h | 25 +++++++++++++++++++------ 2 files changed, 21 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 38c86f2c040b..f2c10d351b59 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4743,6 +4743,7 @@ static void __init rcu_start_exp_gp_kworkers(void) rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name); if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { pr_err("Failed to create %s!\n", gp_kworker_name); + rcu_exp_gp_kworker = NULL; return; } @@ -4751,6 +4752,7 @@ static void __init rcu_start_exp_gp_kworkers(void) pr_err("Failed to create %s!\n", par_gp_kworker_name); rcu_exp_par_gp_kworker = NULL; kthread_destroy_worker(rcu_exp_gp_kworker); + rcu_exp_gp_kworker = NULL; return; } diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 014ddf672165..6123a60d9a4d 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -427,7 +427,12 @@ static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp) __sync_rcu_exp_select_node_cpus(rewp); } -static inline bool rcu_gp_par_worker_started(void) +static inline bool rcu_exp_worker_started(void) +{ + return !!READ_ONCE(rcu_exp_gp_kworker); +} + +static inline bool rcu_exp_par_worker_started(void) { return !!READ_ONCE(rcu_exp_par_gp_kworker); } @@ -477,7 +482,12 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) __sync_rcu_exp_select_node_cpus(rewp); } -static inline bool rcu_gp_par_worker_started(void) +static inline bool rcu_exp_worker_started(void) +{ + return !!READ_ONCE(rcu_gp_wq); +} + +static inline bool rcu_exp_par_worker_started(void) { return !!READ_ONCE(rcu_par_gp_wq); } @@ -540,7 +550,7 @@ static void sync_rcu_exp_select_cpus(void) rnp->exp_need_flush = false; if (!READ_ONCE(rnp->expmask)) continue; /* Avoid early boot non-existent wq. */ - if (!rcu_gp_par_worker_started() || + if (!rcu_exp_par_worker_started() || rcu_scheduler_active != RCU_SCHEDULER_RUNNING || rcu_is_last_leaf_node(rnp)) { /* No worker started yet or last leaf, do direct call. */ @@ -955,7 +965,7 @@ static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) */ void synchronize_rcu_expedited(void) { - bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT); + bool use_worker; unsigned long flags; struct rcu_exp_work rew; struct rcu_node *rnp; @@ -966,6 +976,9 @@ void synchronize_rcu_expedited(void) lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); + use_worker = (rcu_scheduler_active != RCU_SCHEDULER_INIT) && + rcu_exp_worker_started(); + /* Is the state is such that the call is a grace period? */ if (rcu_blocking_is_gp()) { // Note well that this code runs with !PREEMPT && !SMP. @@ -995,7 +1008,7 @@ void synchronize_rcu_expedited(void) return; /* Someone else did our work for us. */ /* Ensure that load happens before action based on it. */ - if (unlikely(boottime)) { + if (unlikely(!use_worker)) { /* Direct call during scheduler init and early_initcalls(). */ rcu_exp_sel_wait_wake(s); } else { @@ -1013,7 +1026,7 @@ void synchronize_rcu_expedited(void) /* Let the next expedited grace period start. */ mutex_unlock(&rcu_state.exp_mutex); - if (likely(!boottime)) + if (likely(use_worker)) synchronize_rcu_expedited_destroy_work(&rew); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); -- cgit v1.2.3 From 7836b270607676ed1c0c6a4a840a2ede9437a6a1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 12 Jan 2024 16:46:17 +0100 Subject: rcu: s/boost_kthread_mutex/kthread_mutex This mutex is currently protecting per node boost kthreads creation and affinity setting across CPU hotplug operations. Since the expedited kworkers will soon be split per node as well, they will be subject to the same concurrency constraints against hotplug. Therefore their creation and affinity tuning operations will be grouped with those of boost kthreads and then rely on the same mutex. To prepare for that, generalize its name. Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree.h | 2 +- kernel/rcu/tree_plugin.h | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f2c10d351b59..cdb80835c469 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4918,7 +4918,7 @@ static void __init rcu_init_one(void) init_waitqueue_head(&rnp->exp_wq[2]); init_waitqueue_head(&rnp->exp_wq[3]); spin_lock_init(&rnp->exp_lock); - mutex_init(&rnp->boost_kthread_mutex); + mutex_init(&rnp->kthread_mutex); raw_spin_lock_init(&rnp->exp_poll_lock); rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index e9821a8422db..13e7b0d907ab 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -113,7 +113,7 @@ struct rcu_node { /* side effect, not as a lock. */ unsigned long boost_time; /* When to start boosting (jiffies). */ - struct mutex boost_kthread_mutex; + struct mutex kthread_mutex; /* Exclusion for thread spawning and affinity */ /* manipulation. */ struct task_struct *boost_kthread_task; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 41021080ad25..0d307674915c 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1195,7 +1195,7 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) struct sched_param sp; struct task_struct *t; - mutex_lock(&rnp->boost_kthread_mutex); + mutex_lock(&rnp->kthread_mutex); if (rnp->boost_kthread_task || !rcu_scheduler_fully_active) goto out; @@ -1212,7 +1212,7 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ out: - mutex_unlock(&rnp->boost_kthread_mutex); + mutex_unlock(&rnp->kthread_mutex); } /* @@ -1224,7 +1224,7 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) * no outgoing CPU. If there are no CPUs left in the affinity set, * this function allows the kthread to execute on any CPU. * - * Any future concurrent calls are serialized via ->boost_kthread_mutex. + * Any future concurrent calls are serialized via ->kthread_mutex. */ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) { @@ -1237,7 +1237,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) return; if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) return; - mutex_lock(&rnp->boost_kthread_mutex); + mutex_lock(&rnp->kthread_mutex); mask = rcu_rnp_online_cpus(rnp); for_each_leaf_node_possible_cpu(rnp, cpu) if ((mask & leaf_node_cpu_bit(rnp, cpu)) && @@ -1250,7 +1250,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) cpumask_clear_cpu(outgoingcpu, cm); } set_cpus_allowed_ptr(t, cm); - mutex_unlock(&rnp->boost_kthread_mutex); + mutex_unlock(&rnp->kthread_mutex); free_cpumask_var(cm); } -- cgit v1.2.3 From c19e5d3b497a3036f800edf751dc7814e3e887e1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 12 Jan 2024 16:46:18 +0100 Subject: rcu/exp: Move expedited kthread worker creation functions above rcutree_prepare_cpu() The expedited kthread worker performing the per node initialization is going to be split into per node kthreads. As such, the future per node kthread creation will need to be called from CPU hotplug callbacks instead of an initcall, right beside the per node boost kthread creation. To prepare for that, move the kthread worker creation above rcutree_prepare_cpu() as a first step to make the review smoother for the upcoming modifications. No intended functional change. Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 96 +++++++++++++++++++++++++++---------------------------- 1 file changed, 48 insertions(+), 48 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index cdb80835c469..657ac12f9e27 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4394,6 +4394,54 @@ rcu_boot_init_percpu_data(int cpu) rcu_boot_init_nocb_percpu_data(rdp); } +#ifdef CONFIG_RCU_EXP_KTHREAD +struct kthread_worker *rcu_exp_gp_kworker; +struct kthread_worker *rcu_exp_par_gp_kworker; + +static void __init rcu_start_exp_gp_kworkers(void) +{ + const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker"; + const char *gp_kworker_name = "rcu_exp_gp_kthread_worker"; + struct sched_param param = { .sched_priority = kthread_prio }; + + rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name); + if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { + pr_err("Failed to create %s!\n", gp_kworker_name); + rcu_exp_gp_kworker = NULL; + return; + } + + rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name); + if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) { + pr_err("Failed to create %s!\n", par_gp_kworker_name); + rcu_exp_par_gp_kworker = NULL; + kthread_destroy_worker(rcu_exp_gp_kworker); + rcu_exp_gp_kworker = NULL; + return; + } + + sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); + sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO, + ¶m); +} + +static inline void rcu_alloc_par_gp_wq(void) +{ +} +#else /* !CONFIG_RCU_EXP_KTHREAD */ +struct workqueue_struct *rcu_par_gp_wq; + +static void __init rcu_start_exp_gp_kworkers(void) +{ +} + +static inline void rcu_alloc_par_gp_wq(void) +{ + rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); + WARN_ON(!rcu_par_gp_wq); +} +#endif /* CONFIG_RCU_EXP_KTHREAD */ + /* * Invoked early in the CPU-online process, when pretty much all services * are available. The incoming CPU is not present. @@ -4730,54 +4778,6 @@ static int rcu_pm_notify(struct notifier_block *self, return NOTIFY_OK; } -#ifdef CONFIG_RCU_EXP_KTHREAD -struct kthread_worker *rcu_exp_gp_kworker; -struct kthread_worker *rcu_exp_par_gp_kworker; - -static void __init rcu_start_exp_gp_kworkers(void) -{ - const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker"; - const char *gp_kworker_name = "rcu_exp_gp_kthread_worker"; - struct sched_param param = { .sched_priority = kthread_prio }; - - rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name); - if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { - pr_err("Failed to create %s!\n", gp_kworker_name); - rcu_exp_gp_kworker = NULL; - return; - } - - rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name); - if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) { - pr_err("Failed to create %s!\n", par_gp_kworker_name); - rcu_exp_par_gp_kworker = NULL; - kthread_destroy_worker(rcu_exp_gp_kworker); - rcu_exp_gp_kworker = NULL; - return; - } - - sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); - sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO, - ¶m); -} - -static inline void rcu_alloc_par_gp_wq(void) -{ -} -#else /* !CONFIG_RCU_EXP_KTHREAD */ -struct workqueue_struct *rcu_par_gp_wq; - -static void __init rcu_start_exp_gp_kworkers(void) -{ -} - -static inline void rcu_alloc_par_gp_wq(void) -{ - rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); - WARN_ON(!rcu_par_gp_wq); -} -#endif /* CONFIG_RCU_EXP_KTHREAD */ - /* * Spawn the kthreads that handle RCU's grace periods. */ -- cgit v1.2.3 From 8e5e621566485a3e160c0d8bfba206cb1d6b980d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 12 Jan 2024 16:46:19 +0100 Subject: rcu/exp: Make parallel exp gp kworker per rcu node When CONFIG_RCU_EXP_KTHREAD=n, the expedited grace period per node initialization is performed in parallel via workqueues (one work per node). However in CONFIG_RCU_EXP_KTHREAD=y, this per node initialization is performed by a single kworker serializing each node initialization (one work for all nodes). The second part is certainly less scalable and efficient beyond a single leaf node. To improve this, expand this single kworker into per-node kworkers. This new layout is eventually intended to remove the workqueues based implementation since it will essentially now become duplicate code. Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/rcu.h | 1 - kernel/rcu/tree.c | 61 ++++++++++++++++++++++++++++++++---------------- kernel/rcu/tree.h | 3 +++ kernel/rcu/tree_exp.h | 10 ++++---- kernel/rcu/tree_plugin.h | 10 +++----- 5 files changed, 52 insertions(+), 33 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index f94f65877f2b..6beaf70d629f 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -625,7 +625,6 @@ void rcu_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; #ifdef CONFIG_RCU_EXP_KTHREAD extern struct kthread_worker *rcu_exp_gp_kworker; -extern struct kthread_worker *rcu_exp_par_gp_kworker; #else /* !CONFIG_RCU_EXP_KTHREAD */ extern struct workqueue_struct *rcu_par_gp_wq; #endif /* CONFIG_RCU_EXP_KTHREAD */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 657ac12f9e27..398c099d45d9 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4396,33 +4396,39 @@ rcu_boot_init_percpu_data(int cpu) #ifdef CONFIG_RCU_EXP_KTHREAD struct kthread_worker *rcu_exp_gp_kworker; -struct kthread_worker *rcu_exp_par_gp_kworker; -static void __init rcu_start_exp_gp_kworkers(void) +static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) { - const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker"; - const char *gp_kworker_name = "rcu_exp_gp_kthread_worker"; + struct kthread_worker *kworker; + const char *name = "rcu_exp_par_gp_kthread_worker/%d"; struct sched_param param = { .sched_priority = kthread_prio }; + int rnp_index = rnp - rcu_get_root(); - rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name); - if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { - pr_err("Failed to create %s!\n", gp_kworker_name); - rcu_exp_gp_kworker = NULL; + if (rnp->exp_kworker) + return; + + kworker = kthread_create_worker(0, name, rnp_index); + if (IS_ERR_OR_NULL(kworker)) { + pr_err("Failed to create par gp kworker on %d/%d\n", + rnp->grplo, rnp->grphi); return; } + WRITE_ONCE(rnp->exp_kworker, kworker); + sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); +} - rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name); - if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) { - pr_err("Failed to create %s!\n", par_gp_kworker_name); - rcu_exp_par_gp_kworker = NULL; - kthread_destroy_worker(rcu_exp_gp_kworker); +static void __init rcu_start_exp_gp_kworker(void) +{ + const char *name = "rcu_exp_gp_kthread_worker"; + struct sched_param param = { .sched_priority = kthread_prio }; + + rcu_exp_gp_kworker = kthread_create_worker(0, name); + if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { + pr_err("Failed to create %s!\n", name); rcu_exp_gp_kworker = NULL; return; } - sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); - sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO, - ¶m); } static inline void rcu_alloc_par_gp_wq(void) @@ -4431,7 +4437,11 @@ static inline void rcu_alloc_par_gp_wq(void) #else /* !CONFIG_RCU_EXP_KTHREAD */ struct workqueue_struct *rcu_par_gp_wq; -static void __init rcu_start_exp_gp_kworkers(void) +static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) +{ +} + +static void __init rcu_start_exp_gp_kworker(void) { } @@ -4442,6 +4452,17 @@ static inline void rcu_alloc_par_gp_wq(void) } #endif /* CONFIG_RCU_EXP_KTHREAD */ +static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp) +{ + if ((IS_ENABLED(CONFIG_RCU_EXP_KTHREAD) || + IS_ENABLED(CONFIG_RCU_BOOST)) && rcu_scheduler_fully_active) { + mutex_lock(&rnp->kthread_mutex); + rcu_spawn_one_boost_kthread(rnp); + rcu_spawn_exp_par_gp_kworker(rnp); + mutex_unlock(&rnp->kthread_mutex); + } +} + /* * Invoked early in the CPU-online process, when pretty much all services * are available. The incoming CPU is not present. @@ -4490,7 +4511,7 @@ int rcutree_prepare_cpu(unsigned int cpu) rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - rcu_spawn_one_boost_kthread(rnp); + rcu_spawn_rnp_kthreads(rnp); rcu_spawn_cpu_nocb_kthread(cpu); WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); @@ -4812,10 +4833,10 @@ static int __init rcu_spawn_gp_kthread(void) * due to rcu_scheduler_fully_active. */ rcu_spawn_cpu_nocb_kthread(smp_processor_id()); - rcu_spawn_one_boost_kthread(rdp->mynode); + rcu_spawn_rnp_kthreads(rdp->mynode); rcu_spawn_core_kthreads(); /* Create kthread worker for expedited GPs */ - rcu_start_exp_gp_kworkers(); + rcu_start_exp_gp_kworker(); return 0; } early_initcall(rcu_spawn_gp_kthread); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 13e7b0d907ab..e173808f486f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -72,6 +72,9 @@ struct rcu_node { /* Online CPUs for next expedited GP. */ /* Any CPU that has ever been online will */ /* have its bit set. */ + struct kthread_worker *exp_kworker; + /* Workers performing per node expedited GP */ + /* initialization. */ unsigned long cbovldmask; /* CPUs experiencing callback overload. */ unsigned long ffmask; /* Fully functional CPUs. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 6123a60d9a4d..0318a8a062d5 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -432,9 +432,9 @@ static inline bool rcu_exp_worker_started(void) return !!READ_ONCE(rcu_exp_gp_kworker); } -static inline bool rcu_exp_par_worker_started(void) +static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp) { - return !!READ_ONCE(rcu_exp_par_gp_kworker); + return !!READ_ONCE(rnp->exp_kworker); } static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) @@ -445,7 +445,7 @@ static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) * another work item on the same kthread worker can result in * deadlock. */ - kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work); + kthread_queue_work(READ_ONCE(rnp->exp_kworker), &rnp->rew.rew_work); } static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) @@ -487,7 +487,7 @@ static inline bool rcu_exp_worker_started(void) return !!READ_ONCE(rcu_gp_wq); } -static inline bool rcu_exp_par_worker_started(void) +static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp) { return !!READ_ONCE(rcu_par_gp_wq); } @@ -550,7 +550,7 @@ static void sync_rcu_exp_select_cpus(void) rnp->exp_need_flush = false; if (!READ_ONCE(rnp->expmask)) continue; /* Avoid early boot non-existent wq. */ - if (!rcu_exp_par_worker_started() || + if (!rcu_exp_par_worker_started(rnp) || rcu_scheduler_active != RCU_SCHEDULER_RUNNING || rcu_is_last_leaf_node(rnp)) { /* No worker started yet or last leaf, do direct call. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0d307674915c..09bdd36ca9ff 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1195,14 +1195,13 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) struct sched_param sp; struct task_struct *t; - mutex_lock(&rnp->kthread_mutex); - if (rnp->boost_kthread_task || !rcu_scheduler_fully_active) - goto out; + if (rnp->boost_kthread_task) + return; t = kthread_create(rcu_boost_kthread, (void *)rnp, "rcub/%d", rnp_index); if (WARN_ON_ONCE(IS_ERR(t))) - goto out; + return; raw_spin_lock_irqsave_rcu_node(rnp, flags); rnp->boost_kthread_task = t; @@ -1210,9 +1209,6 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) sp.sched_priority = kthread_prio; sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ - - out: - mutex_unlock(&rnp->kthread_mutex); } /* -- cgit v1.2.3 From b67cffcbbf9dc759d95d330a5af5d1480af2b1f1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 12 Jan 2024 16:46:20 +0100 Subject: rcu/exp: Handle parallel exp gp kworkers affinity Affine the parallel expedited gp kworkers to their respective RCU node in order to make them close to the cache their are playing with. This reuses the boost kthreads machinery that probe into CPU hotplug operations such that the kthreads become/stay affine to their respective node as soon/long as they contain online CPUs. Otherwise and if the current CPU going down was the last online on the leaf node, the related kthread is affine to the housekeeping CPUs. In the long run, this affinity VS CPU hotplug operation game should probably be implemented at the generic kthread level. Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney [boqun: s/* rcu_boost_task/*rcu_boost_task as reported by checkpatch] Signed-off-by: Boqun Feng --- kernel/rcu/tree.c | 79 +++++++++++++++++++++++++++++++++++++++++++++--- kernel/rcu/tree_plugin.h | 42 +++---------------------- 2 files changed, 78 insertions(+), 43 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 398c099d45d9..312c4c5d4509 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -145,7 +145,7 @@ static int rcu_scheduler_fully_active __read_mostly; static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, unsigned long gps, unsigned long flags); -static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); +static struct task_struct *rcu_boost_task(struct rcu_node *rnp); static void invoke_rcu_core(void); static void rcu_report_exp_rdp(struct rcu_data *rdp); static void sync_sched_exp_online_cleanup(int cpu); @@ -4417,6 +4417,16 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); } +static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp) +{ + struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker); + + if (!kworker) + return NULL; + + return kworker->task; +} + static void __init rcu_start_exp_gp_kworker(void) { const char *name = "rcu_exp_gp_kthread_worker"; @@ -4441,6 +4451,11 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) { } +static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp) +{ + return NULL; +} + static void __init rcu_start_exp_gp_kworker(void) { } @@ -4519,13 +4534,67 @@ int rcutree_prepare_cpu(unsigned int cpu) } /* - * Update RCU priority boot kthread affinity for CPU-hotplug changes. + * Update kthreads affinity during CPU-hotplug changes. + * + * Set the per-rcu_node kthread's affinity to cover all CPUs that are + * served by the rcu_node in question. The CPU hotplug lock is still + * held, so the value of rnp->qsmaskinit will be stable. + * + * We don't include outgoingcpu in the affinity set, use -1 if there is + * no outgoing CPU. If there are no CPUs left in the affinity set, + * this function allows the kthread to execute on any CPU. + * + * Any future concurrent calls are serialized via ->kthread_mutex. */ -static void rcutree_affinity_setting(unsigned int cpu, int outgoing) +static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu) { - struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + cpumask_var_t cm; + unsigned long mask; + struct rcu_data *rdp; + struct rcu_node *rnp; + struct task_struct *task_boost, *task_exp; + + if (!IS_ENABLED(CONFIG_RCU_EXP_KTHREAD) && !IS_ENABLED(CONFIG_RCU_BOOST)) + return; + + rdp = per_cpu_ptr(&rcu_data, cpu); + rnp = rdp->mynode; + + task_boost = rcu_boost_task(rnp); + task_exp = rcu_exp_par_gp_task(rnp); + + /* + * If CPU is the boot one, those tasks are created later from early + * initcall since kthreadd must be created first. + */ + if (!task_boost && !task_exp) + return; + + if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) + return; + + mutex_lock(&rnp->kthread_mutex); + mask = rcu_rnp_online_cpus(rnp); + for_each_leaf_node_possible_cpu(rnp, cpu) + if ((mask & leaf_node_cpu_bit(rnp, cpu)) && + cpu != outgoingcpu) + cpumask_set_cpu(cpu, cm); + cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU)); + if (cpumask_empty(cm)) { + cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU)); + if (outgoingcpu >= 0) + cpumask_clear_cpu(outgoingcpu, cm); + } + + if (task_exp) + set_cpus_allowed_ptr(task_exp, cm); + + if (task_boost) + set_cpus_allowed_ptr(task_boost, cm); + + mutex_unlock(&rnp->kthread_mutex); - rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); + free_cpumask_var(cm); } /* diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 09bdd36ca9ff..36a8b5dbf5b5 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1211,43 +1211,9 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ } -/* - * Set the per-rcu_node kthread's affinity to cover all CPUs that are - * served by the rcu_node in question. The CPU hotplug lock is still - * held, so the value of rnp->qsmaskinit will be stable. - * - * We don't include outgoingcpu in the affinity set, use -1 if there is - * no outgoing CPU. If there are no CPUs left in the affinity set, - * this function allows the kthread to execute on any CPU. - * - * Any future concurrent calls are serialized via ->kthread_mutex. - */ -static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) +static struct task_struct *rcu_boost_task(struct rcu_node *rnp) { - struct task_struct *t = rnp->boost_kthread_task; - unsigned long mask; - cpumask_var_t cm; - int cpu; - - if (!t) - return; - if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) - return; - mutex_lock(&rnp->kthread_mutex); - mask = rcu_rnp_online_cpus(rnp); - for_each_leaf_node_possible_cpu(rnp, cpu) - if ((mask & leaf_node_cpu_bit(rnp, cpu)) && - cpu != outgoingcpu) - cpumask_set_cpu(cpu, cm); - cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU)); - if (cpumask_empty(cm)) { - cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU)); - if (outgoingcpu >= 0) - cpumask_clear_cpu(outgoingcpu, cm); - } - set_cpus_allowed_ptr(t, cm); - mutex_unlock(&rnp->kthread_mutex); - free_cpumask_var(cm); + return READ_ONCE(rnp->boost_kthread_task); } #else /* #ifdef CONFIG_RCU_BOOST */ @@ -1266,10 +1232,10 @@ static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) { } -static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) +static struct task_struct *rcu_boost_task(struct rcu_node *rnp) { + return NULL; } - #endif /* #else #ifdef CONFIG_RCU_BOOST */ /* -- cgit v1.2.3 From 23da2ad64dbe9f3fab10af90484fe41e144337b1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 12 Jan 2024 16:46:21 +0100 Subject: rcu/exp: Remove rcu_par_gp_wq TREE04 running on short iterations can produce writer stalls of the following kind: ??? Writer stall state RTWS_EXP_SYNC(4) g3968 f0x0 ->state 0x2 cpu 0 task:rcu_torture_wri state:D stack:14568 pid:83 ppid:2 flags:0x00004000 Call Trace: __schedule+0x2de/0x850 ? trace_event_raw_event_rcu_exp_funnel_lock+0x6d/0xb0 schedule+0x4f/0x90 synchronize_rcu_expedited+0x430/0x670 ? __pfx_autoremove_wake_function+0x10/0x10 ? __pfx_synchronize_rcu_expedited+0x10/0x10 do_rtws_sync.constprop.0+0xde/0x230 rcu_torture_writer+0x4b4/0xcd0 ? __pfx_rcu_torture_writer+0x10/0x10 kthread+0xc7/0xf0 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x2f/0x50 ? __pfx_kthread+0x10/0x10 ret_from_fork_asm+0x1b/0x30 Waiting for an expedited grace period and polling for an expedited grace period both are operations that internally rely on the same workqueue performing necessary asynchronous work. However, a dependency chain is involved between those two operations, as depicted below: ====== CPU 0 ======= ====== CPU 1 ======= synchronize_rcu_expedited() exp_funnel_lock() mutex_lock(&rcu_state.exp_mutex); start_poll_synchronize_rcu_expedited queue_work(rcu_gp_wq, &rnp->exp_poll_wq); synchronize_rcu_expedited_queue_work() queue_work(rcu_gp_wq, &rew->rew_work); wait_event() // A, wait for &rew->rew_work completion mutex_unlock() // B //======> switch to kworker sync_rcu_do_polled_gp() { synchronize_rcu_expedited() exp_funnel_lock() mutex_lock(&rcu_state.exp_mutex); // C, wait B .... } // D Since workqueues are usually implemented on top of several kworkers handling the queue concurrently, the above situation wouldn't deadlock most of the time because A then doesn't depend on D. But in case of memory stress, a single kworker may end up handling alone all the works in a serialized way. In that case the above layout becomes a problem because A then waits for D, closing a circular dependency: A -> D -> C -> B -> A This however only happens when CONFIG_RCU_EXP_KTHREAD=n. Indeed synchronize_rcu_expedited() is otherwise implemented on top of a kthread worker while polling still relies on rcu_gp_wq workqueue, breaking the above circular dependency chain. Fix this with making expedited grace period to always rely on kthread worker. The workqueue based implementation is essentially a duplicate anyway now that the per-node initialization is performed by per-node kthread workers. Meanwhile the CONFIG_RCU_EXP_KTHREAD switch is still kept around to manage the scheduler policy of these kthread workers. Reported-by: Anna-Maria Behnsen Reported-by: Thomas Gleixner Suggested-by: Joel Fernandes Suggested-by: Paul E. McKenney Suggested-by: Neeraj upadhyay Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/rcu.h | 4 --- kernel/rcu/tree.c | 40 +++++----------------------- kernel/rcu/tree.h | 6 +---- kernel/rcu/tree_exp.h | 73 +-------------------------------------------------- 4 files changed, 8 insertions(+), 115 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 6beaf70d629f..99032b9cb667 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -623,11 +623,7 @@ int rcu_get_gp_kthreads_prio(void); void rcu_fwd_progress_check(unsigned long j); void rcu_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; -#ifdef CONFIG_RCU_EXP_KTHREAD extern struct kthread_worker *rcu_exp_gp_kworker; -#else /* !CONFIG_RCU_EXP_KTHREAD */ -extern struct workqueue_struct *rcu_par_gp_wq; -#endif /* CONFIG_RCU_EXP_KTHREAD */ void rcu_gp_slow_register(atomic_t *rgssp); void rcu_gp_slow_unregister(atomic_t *rgssp); #endif /* #else #ifdef CONFIG_TINY_RCU */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 312c4c5d4509..9591c22408a1 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4394,7 +4394,6 @@ rcu_boot_init_percpu_data(int cpu) rcu_boot_init_nocb_percpu_data(rdp); } -#ifdef CONFIG_RCU_EXP_KTHREAD struct kthread_worker *rcu_exp_gp_kworker; static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) @@ -4414,7 +4413,9 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) return; } WRITE_ONCE(rnp->exp_kworker, kworker); - sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); + + if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) + sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); } static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp) @@ -4438,39 +4439,14 @@ static void __init rcu_start_exp_gp_kworker(void) rcu_exp_gp_kworker = NULL; return; } - sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); -} - -static inline void rcu_alloc_par_gp_wq(void) -{ -} -#else /* !CONFIG_RCU_EXP_KTHREAD */ -struct workqueue_struct *rcu_par_gp_wq; - -static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) -{ -} - -static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp) -{ - return NULL; -} - -static void __init rcu_start_exp_gp_kworker(void) -{ -} -static inline void rcu_alloc_par_gp_wq(void) -{ - rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0); - WARN_ON(!rcu_par_gp_wq); + if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) + sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); } -#endif /* CONFIG_RCU_EXP_KTHREAD */ static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp) { - if ((IS_ENABLED(CONFIG_RCU_EXP_KTHREAD) || - IS_ENABLED(CONFIG_RCU_BOOST)) && rcu_scheduler_fully_active) { + if (rcu_scheduler_fully_active) { mutex_lock(&rnp->kthread_mutex); rcu_spawn_one_boost_kthread(rnp); rcu_spawn_exp_par_gp_kworker(rnp); @@ -4554,9 +4530,6 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu) struct rcu_node *rnp; struct task_struct *task_boost, *task_exp; - if (!IS_ENABLED(CONFIG_RCU_EXP_KTHREAD) && !IS_ENABLED(CONFIG_RCU_BOOST)) - return; - rdp = per_cpu_ptr(&rcu_data, cpu); rnp = rdp->mynode; @@ -5245,7 +5218,6 @@ void __init rcu_init(void) /* Create workqueue for Tree SRCU and for expedited GPs. */ rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); WARN_ON(!rcu_gp_wq); - rcu_alloc_par_gp_wq(); /* Fill in default value for rcutree.qovld boot parameter. */ /* -After- the rcu_node ->lock fields are initialized! */ diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index e173808f486f..f35e47f24d80 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -21,14 +21,10 @@ #include "rcu_segcblist.h" -/* Communicate arguments to a workqueue handler. */ +/* Communicate arguments to a kthread worker handler. */ struct rcu_exp_work { unsigned long rew_s; -#ifdef CONFIG_RCU_EXP_KTHREAD struct kthread_work rew_work; -#else - struct work_struct rew_work; -#endif /* CONFIG_RCU_EXP_KTHREAD */ }; /* RCU's kthread states for tracing. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 0318a8a062d5..6b83537480b1 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -418,7 +418,6 @@ retry_ipi: static void rcu_exp_sel_wait_wake(unsigned long s); -#ifdef CONFIG_RCU_EXP_KTHREAD static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp) { struct rcu_exp_work *rewp = @@ -470,69 +469,6 @@ static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work); } -static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew) -{ -} -#else /* !CONFIG_RCU_EXP_KTHREAD */ -static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) -{ - struct rcu_exp_work *rewp = - container_of(wp, struct rcu_exp_work, rew_work); - - __sync_rcu_exp_select_node_cpus(rewp); -} - -static inline bool rcu_exp_worker_started(void) -{ - return !!READ_ONCE(rcu_gp_wq); -} - -static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp) -{ - return !!READ_ONCE(rcu_par_gp_wq); -} - -static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) -{ - int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); - - INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); - /* If all offline, queue the work on an unbound CPU. */ - if (unlikely(cpu > rnp->grphi - rnp->grplo)) - cpu = WORK_CPU_UNBOUND; - else - cpu += rnp->grplo; - queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); -} - -static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) -{ - flush_work(&rnp->rew.rew_work); -} - -/* - * Work-queue handler to drive an expedited grace period forward. - */ -static void wait_rcu_exp_gp(struct work_struct *wp) -{ - struct rcu_exp_work *rewp; - - rewp = container_of(wp, struct rcu_exp_work, rew_work); - rcu_exp_sel_wait_wake(rewp->rew_s); -} - -static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew) -{ - INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp); - queue_work(rcu_gp_wq, &rew->rew_work); -} - -static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew) -{ - destroy_work_on_stack(&rew->rew_work); -} -#endif /* CONFIG_RCU_EXP_KTHREAD */ - /* * Select the nodes that the upcoming expedited grace period needs * to wait for. @@ -965,7 +901,6 @@ static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) */ void synchronize_rcu_expedited(void) { - bool use_worker; unsigned long flags; struct rcu_exp_work rew; struct rcu_node *rnp; @@ -976,9 +911,6 @@ void synchronize_rcu_expedited(void) lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); - use_worker = (rcu_scheduler_active != RCU_SCHEDULER_INIT) && - rcu_exp_worker_started(); - /* Is the state is such that the call is a grace period? */ if (rcu_blocking_is_gp()) { // Note well that this code runs with !PREEMPT && !SMP. @@ -1008,7 +940,7 @@ void synchronize_rcu_expedited(void) return; /* Someone else did our work for us. */ /* Ensure that load happens before action based on it. */ - if (unlikely(!use_worker)) { + if (unlikely((rcu_scheduler_active == RCU_SCHEDULER_INIT) || !rcu_exp_worker_started())) { /* Direct call during scheduler init and early_initcalls(). */ rcu_exp_sel_wait_wake(s); } else { @@ -1025,9 +957,6 @@ void synchronize_rcu_expedited(void) /* Let the next expedited grace period start. */ mutex_unlock(&rcu_state.exp_mutex); - - if (likely(use_worker)) - synchronize_rcu_expedited_destroy_work(&rew); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); -- cgit v1.2.3 From 3b239b308e94ce6c65f6646d251edb737b82e716 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 4 Dec 2023 20:34:58 -0800 Subject: context_tracking: Fix kerneldoc headers for __ct_user_{enter,exit}() Document the "state" parameter of both of these functions. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202312041922.YZCcEPYD-lkp@intel.com/ Signed-off-by: Paul E. McKenney Tested-by: Randy Dunlap Acked-by: Randy Dunlap Cc: Frederic Weisbecker Signed-off-by: Boqun Feng --- kernel/context_tracking.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 6ef0b35fc28c..70ae70d03823 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -458,6 +458,8 @@ static __always_inline void context_tracking_recursion_exit(void) * __ct_user_enter - Inform the context tracking that the CPU is going * to enter user or guest space mode. * + * @state: userspace context-tracking state to enter. + * * This function must be called right before we switch from the kernel * to user or guest space, when it's guaranteed the remaining kernel * instructions to execute won't use any RCU read side critical section @@ -595,6 +597,8 @@ NOKPROBE_SYMBOL(user_enter_callable); * __ct_user_exit - Inform the context tracking that the CPU is * exiting user or guest mode and entering the kernel. * + * @state: userspace context-tracking state being exited from. + * * This function must be called after we entered the kernel from user or * guest space before any use of RCU read side critical section. This * potentially include any high level kernel code like syscalls, exceptions, -- cgit v1.2.3 From 499d7e7e83d25fcf0fa1a8c0be6857a84cbf6a4a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 15 Nov 2023 14:11:26 -0500 Subject: rcu: Rename jiffies_till_flush to jiffies_lazy_flush The variable name jiffies_till_flush is too generic and therefore: * It may shadow a global variable * It doesn't tell on what it operates Make the name more precise, along with the related APIs. Reviewed-by: Joel Fernandes (Google) Signed-off-by: Frederic Weisbecker Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/rcu.h | 8 ++++---- kernel/rcu/rcuscale.c | 6 +++--- kernel/rcu/tree_nocb.h | 22 +++++++++++----------- 3 files changed, 18 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index f94f65877f2b..dcfb666f2499 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -543,11 +543,11 @@ enum rcutorture_type { }; #if defined(CONFIG_RCU_LAZY) -unsigned long rcu_lazy_get_jiffies_till_flush(void); -void rcu_lazy_set_jiffies_till_flush(unsigned long j); +unsigned long rcu_get_jiffies_lazy_flush(void); +void rcu_set_jiffies_lazy_flush(unsigned long j); #else -static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; } -static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { } +static inline unsigned long rcu_get_jiffies_lazy_flush(void) { return 0; } +static inline void rcu_set_jiffies_lazy_flush(unsigned long j) { } #endif #if defined(CONFIG_TREE_RCU) diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index ffdb30495e3c..8db4fedaaa1e 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -764,9 +764,9 @@ kfree_scale_init(void) if (kfree_by_call_rcu) { /* do a test to check the timeout. */ - orig_jif = rcu_lazy_get_jiffies_till_flush(); + orig_jif = rcu_get_jiffies_lazy_flush(); - rcu_lazy_set_jiffies_till_flush(2 * HZ); + rcu_set_jiffies_lazy_flush(2 * HZ); rcu_barrier(); jif_start = jiffies; @@ -775,7 +775,7 @@ kfree_scale_init(void) smp_cond_load_relaxed(&rcu_lazy_test1_cb_called, VAL == 1); - rcu_lazy_set_jiffies_till_flush(orig_jif); + rcu_set_jiffies_lazy_flush(orig_jif); if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start < 2 * HZ)) { pr_alert("ERROR: call_rcu() CBs are not being lazy as expected!\n"); diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 4efbf7333d4e..aecef51166c7 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -256,6 +256,7 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force) return __wake_nocb_gp(rdp_gp, rdp, force, flags); } +#ifdef CONFIG_RCU_LAZY /* * LAZY_FLUSH_JIFFIES decides the maximum amount of time that * can elapse before lazy callbacks are flushed. Lazy callbacks @@ -264,21 +265,20 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force) * left unsubmitted to RCU after those many jiffies. */ #define LAZY_FLUSH_JIFFIES (10 * HZ) -static unsigned long jiffies_till_flush = LAZY_FLUSH_JIFFIES; +static unsigned long jiffies_lazy_flush = LAZY_FLUSH_JIFFIES; -#ifdef CONFIG_RCU_LAZY // To be called only from test code. -void rcu_lazy_set_jiffies_till_flush(unsigned long jif) +void rcu_set_jiffies_lazy_flush(unsigned long jif) { - jiffies_till_flush = jif; + jiffies_lazy_flush = jif; } -EXPORT_SYMBOL(rcu_lazy_set_jiffies_till_flush); +EXPORT_SYMBOL(rcu_set_jiffies_lazy_flush); -unsigned long rcu_lazy_get_jiffies_till_flush(void) +unsigned long rcu_get_jiffies_lazy_flush(void) { - return jiffies_till_flush; + return jiffies_lazy_flush; } -EXPORT_SYMBOL(rcu_lazy_get_jiffies_till_flush); +EXPORT_SYMBOL(rcu_get_jiffies_lazy_flush); #endif /* @@ -299,7 +299,7 @@ static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype, */ if (waketype == RCU_NOCB_WAKE_LAZY && rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) { - mod_timer(&rdp_gp->nocb_timer, jiffies + jiffies_till_flush); + mod_timer(&rdp_gp->nocb_timer, jiffies + rcu_get_jiffies_lazy_flush()); WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype); } else if (waketype == RCU_NOCB_WAKE_BYPASS) { mod_timer(&rdp_gp->nocb_timer, jiffies + 2); @@ -482,7 +482,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, // flush ->nocb_bypass to ->cblist. if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) || (ncbs && bypass_is_lazy && - (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush))) || + (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()))) || ncbs >= qhimark) { rcu_nocb_lock(rdp); *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); @@ -723,7 +723,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) lazy_ncbs = READ_ONCE(rdp->lazy_len); if (bypass_ncbs && (lazy_ncbs == bypass_ncbs) && - (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + jiffies_till_flush) || + (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()) || bypass_ncbs > 2 * qhimark)) { flush_bypass = true; } else if (bypass_ncbs && (lazy_ncbs != bypass_ncbs) && -- cgit v1.2.3 From 7f66f099de4dc4b1a66a3f94e6db16409924a6f8 Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Sun, 3 Dec 2023 01:12:52 +0000 Subject: rcu: Provide a boot time parameter to control lazy RCU To allow more flexible arrangements while still provide a single kernel for distros, provide a boot time parameter to enable/disable lazy RCU. Specify: rcutree.enable_rcu_lazy=[y|1|n|0] Which also requires rcu_nocbs=all at boot time to enable/disable lazy RCU. To disable it by default at build time when CONFIG_RCU_LAZY=y, the new CONFIG_RCU_LAZY_DEFAULT_OFF can be used. Signed-off-by: Qais Yousef (Google) Tested-by: Andrea Righi Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/Kconfig | 13 +++++++++++++ kernel/rcu/tree.c | 7 ++++++- 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index bdd7eadb33d8..e7d2dd267593 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -314,6 +314,19 @@ config RCU_LAZY To save power, batch RCU callbacks and flush after delay, memory pressure, or callback list growing too big. + Requires rcu_nocbs=all to be set. + + Use rcutree.enable_rcu_lazy=0 to turn it off at boot time. + +config RCU_LAZY_DEFAULT_OFF + bool "Turn RCU lazy invocation off by default" + depends on RCU_LAZY + default n + help + Allows building the kernel with CONFIG_RCU_LAZY=y yet keep it default + off. Boot time param rcutree.enable_rcu_lazy=1 can be used to switch + it back on. + config RCU_DOUBLE_CHECK_CB_TIME bool "RCU callback-batch backup time check" depends on RCU_EXPERT diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b2bccfd37c38..41c50a6c607e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2753,6 +2753,9 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) } #ifdef CONFIG_RCU_LAZY +static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF); +module_param(enable_rcu_lazy, bool, 0444); + /** * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and * flush all lazy callbacks (including the new one) to the main ->cblist while @@ -2778,6 +2781,8 @@ void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) __call_rcu_common(head, func, false); } EXPORT_SYMBOL_GPL(call_rcu_hurry); +#else +#define enable_rcu_lazy false #endif /** @@ -2826,7 +2831,7 @@ EXPORT_SYMBOL_GPL(call_rcu_hurry); */ void call_rcu(struct rcu_head *head, rcu_callback_t func) { - __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY)); + __call_rcu_common(head, func, enable_rcu_lazy); } EXPORT_SYMBOL_GPL(call_rcu); -- cgit v1.2.3 From 67050837ec14fc20a26b237ce965c50c85a318b7 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 27 Dec 2023 12:47:38 -0500 Subject: srcu: Improve comments about acceleration leak The comments added in commit 1ef990c4b36b ("srcu: No need to advance/accelerate if no callback enqueued") are a bit confusing. The comments are describing a scenario for code that was moved and is no longer the way it was (snapshot after advancing). Improve the code comments to reflect this and also document why acceleration can never fail. Cc: Frederic Weisbecker Cc: Neeraj Upadhyay Reviewed-by: Frederic Weisbecker Signed-off-by: Joel Fernandes (Google) Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/srcutree.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 0351a4e83529..e4d673fc30f4 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -1234,11 +1234,20 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, if (rhp) rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); /* - * The snapshot for acceleration must be taken _before_ the read of the - * current gp sequence used for advancing, otherwise advancing may fail - * and acceleration may then fail too. + * It's crucial to capture the snapshot 's' for acceleration before + * reading the current gp_seq that is used for advancing. This is + * essential because if the acceleration snapshot is taken after a + * failed advancement attempt, there's a risk that a grace period may + * conclude and a new one may start in the interim. If the snapshot is + * captured after this sequence of events, the acceleration snapshot 's' + * could be excessively advanced, leading to acceleration failure. + * In such a scenario, an 'acceleration leak' can occur, where new + * callbacks become indefinitely stuck in the RCU_NEXT_TAIL segment. + * Also note that encountering advancing failures is a normal + * occurrence when the grace period for RCU_WAIT_TAIL is in progress. * - * This could happen if: + * To see this, consider the following events which occur if + * rcu_seq_snap() were to be called after advance: * * 1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the * RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8). @@ -1264,6 +1273,13 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, if (rhp) { rcu_segcblist_advance(&sdp->srcu_cblist, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq)); + /* + * Acceleration can never fail because the base current gp_seq + * used for acceleration is <= the value of gp_seq used for + * advancing. This means that RCU_NEXT_TAIL segment will + * always be able to be emptied by the acceleration into the + * RCU_NEXT_READY_TAIL or RCU_WAIT_TAIL segments. + */ WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s)); } if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { -- cgit v1.2.3 From fd2a749d3f4f7ff0129af1a2c2685faca407ea54 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 3 Jan 2024 10:59:25 -0800 Subject: rcutorture: Suppress rtort_pipe_count warnings until after stalls Currently, if rcu_torture_writer() sees fewer than ten grace periods having elapsed during a call to stutter_wait() that actually waited, the rtort_pipe_count warning is emitted. This has worked well for a long time. Except that the rcutorture TREE07 scenario now does a short-term 14-second RCU CPU stall, which can most definitely case false-positive rtort_pipe_count warnings. This commit therefore changes rcu_torture_writer() to compute the full expected holdoff and stall duration, and to refuse to report any rtort_pipe_count warnings until after all stalls have completed. Signed-off-by: Paul E. McKenney Cc: Frederic Weisbecker Signed-off-by: Boqun Feng --- kernel/rcu/rcutorture.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 7567ca8e743c..45d6b4c3d199 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1368,9 +1368,13 @@ rcu_torture_writer(void *arg) struct rcu_torture *rp; struct rcu_torture *old_rp; static DEFINE_TORTURE_RANDOM(rand); + unsigned long stallsdone = jiffies; bool stutter_waited; unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE]; + // If a new stall test is added, this must be adjusted. + if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu) + stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * HZ; VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); if (!can_expedite) pr_alert("%s" TORTURE_FLAG @@ -1576,11 +1580,11 @@ rcu_torture_writer(void *arg) !atomic_read(&rcu_fwd_cb_nodelay) && !cur_ops->slow_gps && !torture_must_stop() && - boot_ended) + boot_ended && + time_after(jiffies, stallsdone)) for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) if (list_empty(&rcu_tortures[i].rtort_free) && - rcu_access_pointer(rcu_torture_current) != - &rcu_tortures[i]) { + rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { tracing_off(); show_rcu_gp_kthreads(); WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); @@ -2441,7 +2445,8 @@ static struct notifier_block rcu_torture_stall_block = { /* * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then - * induces a CPU stall for the time specified by stall_cpu. + * induces a CPU stall for the time specified by stall_cpu. If a new + * stall test is added, stallsdone in rcu_torture_writer() must be adjusted. */ static int rcu_torture_stall(void *args) { -- cgit v1.2.3 From c90e3ecc91584558d24c82940a3651fdfc174be0 Mon Sep 17 00:00:00 2001 From: Onkarnath Date: Thu, 11 Jan 2024 14:57:22 +0530 Subject: rcu/sync: remove un-used rcu_sync_enter_start function With commit '6a010a49b63a ("cgroup: Make !percpu threadgroup_rwsem operations optional")' usage of rcu_sync_enter_start is removed. So this function can also be removed. In the words of Oleg Nesterov: __rcu_sync_enter(wait => false) is a better alternative if someone needs rcu_sync_enter_start() again. Link: https://lore.kernel.org/all/20220725121208.GB28662@redhat.com/ Signed-off-by: Onkarnath Signed-off-by: Maninder Singh Acked-by: Oleg Nesterov Acked-by: Tejun Heo Reviewed-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/sync.c | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index e550f97779b8..86df878a2fee 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c @@ -24,22 +24,6 @@ void rcu_sync_init(struct rcu_sync *rsp) init_waitqueue_head(&rsp->gp_wait); } -/** - * rcu_sync_enter_start - Force readers onto slow path for multiple updates - * @rsp: Pointer to rcu_sync structure to use for synchronization - * - * Must be called after rcu_sync_init() and before first use. - * - * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() - * pairs turn into NO-OPs. - */ -void rcu_sync_enter_start(struct rcu_sync *rsp) -{ - rsp->gp_count++; - rsp->gp_state = GP_PASSED; -} - - static void rcu_sync_func(struct rcu_head *rhp); static void rcu_sync_call(struct rcu_sync *rsp) -- cgit v1.2.3 From 2f34d7337d98f3eae7bd3d1270efaf9d8a17cfc6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Feb 2024 08:33:55 -1000 Subject: workqueue: Fix queue_work_on() with BH workqueues When queue_work_on() is used to queue a BH work item on a remote CPU, the work item is queued on that CPU but kick_pool() raises softirq on the local CPU. This leads to stalls as the work item won't be executed until something else on the remote CPU schedules a BH work item or tasklet locally. Fix it by bouncing raising softirq to the target CPU using per-cpu irq_work. Signed-off-by: Tejun Heo Fixes: 4cb1ef64609f ("workqueue: Implement BH workqueues to eventually replace tasklets") --- kernel/workqueue.c | 41 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4950bfc2cdcc..04e35dbe6799 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "workqueue_internal.h" @@ -457,6 +458,10 @@ static bool wq_debug_force_rr_cpu = false; #endif module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); +/* to raise softirq for the BH worker pools on other CPUs */ +static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS], + bh_pool_irq_works); + /* the BH worker pools */ static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], bh_worker_pools); @@ -1197,6 +1202,13 @@ static bool assign_work(struct work_struct *work, struct worker *worker, return true; } +static struct irq_work *bh_pool_irq_work(struct worker_pool *pool) +{ + int high = pool->attrs->nice == HIGHPRI_NICE_LEVEL ? 1 : 0; + + return &per_cpu(bh_pool_irq_works, pool->cpu)[high]; +} + /** * kick_pool - wake up an idle worker if necessary * @pool: pool to kick @@ -1215,10 +1227,15 @@ static bool kick_pool(struct worker_pool *pool) return false; if (pool->flags & POOL_BH) { - if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) - raise_softirq_irqoff(HI_SOFTIRQ); - else - raise_softirq_irqoff(TASKLET_SOFTIRQ); + if (likely(pool->cpu == smp_processor_id())) { + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + raise_softirq_irqoff(HI_SOFTIRQ); + else + raise_softirq_irqoff(TASKLET_SOFTIRQ); + } else { + irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); + } + return true; } @@ -7367,6 +7384,16 @@ static inline void wq_watchdog_init(void) { } #endif /* CONFIG_WQ_WATCHDOG */ +static void bh_pool_kick_normal(struct irq_work *irq_work) +{ + raise_softirq_irqoff(TASKLET_SOFTIRQ); +} + +static void bh_pool_kick_highpri(struct irq_work *irq_work) +{ + raise_softirq_irqoff(HI_SOFTIRQ); +} + static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask) { if (!cpumask_intersects(wq_unbound_cpumask, mask)) { @@ -7408,6 +7435,8 @@ void __init workqueue_init_early(void) { struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; + void (*irq_work_fns[2])(struct irq_work *) = { bh_pool_kick_normal, + bh_pool_kick_highpri }; int i, cpu; BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); @@ -7455,8 +7484,10 @@ void __init workqueue_init_early(void) i = 0; for_each_bh_worker_pool(pool, cpu) { - init_cpu_worker_pool(pool, cpu, std_nice[i++]); + init_cpu_worker_pool(pool, cpu, std_nice[i]); pool->flags |= POOL_BH; + init_irq_work(bh_pool_irq_work(pool), irq_work_fns[i]); + i++; } i = 0; -- cgit v1.2.3 From de1ff306dcf4546d6a8863b1f956335e0d3fbb9b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 27 Jan 2024 21:47:30 +0530 Subject: genirq/irqdomain: Remove the param count restriction from select() Now that the GIC-v3 callback can handle invocation with a fwspec parameter count of 0 lift the restriction in the core code and invoke select() unconditionally when the domain provides it. Preparatory change for per device MSI domains. Signed-off-by: Thomas Gleixner Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240127161753.114685-3-apatel@ventanamicro.com --- kernel/irq/irqdomain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 0bdef4fe925b..8fee37918195 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -448,7 +448,7 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, */ mutex_lock(&irq_domain_mutex); list_for_each_entry(h, &irq_domain_list, link) { - if (h->ops->select && fwspec->param_count) + if (h->ops->select) rc = h->ops->select(h, fwspec, bus_token); else if (h->ops->match) rc = h->ops->match(h, to_of_node(fwnode), bus_token); -- cgit v1.2.3 From 9c78c1a85c04bdfbccc5a50588e001087d942b08 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 27 Jan 2024 21:47:35 +0530 Subject: genirq/msi: Provide optional translation op irq_create_fwspec_mapping() requires translation of the firmware spec to a hardware interrupt number and the trigger type information. Wired interrupts which are connected to a wire to MSI bridge, like MBIGEN are allocated that way. So far MBIGEN provides a regular irqdomain which then hooks backwards into the MSI infrastructure. That's an unholy mess and will be replaced with per device MSI domains which are regular MSI domains. Interrupts on MSI domains are not supported by irq_create_fwspec_mapping(), but for making the wire to MSI bridges sane it makes sense to provide a special allocation/free interface in the MSI infrastructure. That avoids the backdoors into the core MSI allocation code and just shares all the regular MSI infrastructure. Provide an optional translation callback in msi_domain_ops which can be utilized by these wire to MSI bridges. No other MSI domain should provide a translation callback. The default translation callback of the MSI irqdomains will warn when it is invoked on a non-prepared MSI domain. Signed-off-by: Thomas Gleixner Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240127161753.114685-8-apatel@ventanamicro.com --- kernel/irq/msi.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'kernel') diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 79b4a58ba9c3..c0e73788e878 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -726,11 +726,26 @@ static void msi_domain_free(struct irq_domain *domain, unsigned int virq, irq_domain_free_irqs_top(domain, virq, nr_irqs); } +static int msi_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, unsigned int *type) +{ + struct msi_domain_info *info = domain->host_data; + + /* + * This will catch allocations through the regular irqdomain path except + * for MSI domains which really support this, e.g. MBIGEN. + */ + if (!info->ops->msi_translate) + return -ENOTSUPP; + return info->ops->msi_translate(domain, fwspec, hwirq, type); +} + static const struct irq_domain_ops msi_domain_ops = { .alloc = msi_domain_alloc, .free = msi_domain_free, .activate = msi_domain_activate, .deactivate = msi_domain_deactivate, + .translate = msi_domain_translate, }; static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, -- cgit v1.2.3 From 3095cc0d5b2c246ddfcb18f54ed5557640224b6a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 27 Jan 2024 21:47:36 +0530 Subject: genirq/msi: Split msi_domain_alloc_irq_at() In preparation for providing a special allocation function for wired interrupts which are connected to a wire to MSI bridge, split the inner workings of msi_domain_alloc_irq_at() out into a helper function so the code can be shared. No functional change. Signed-off-by: Thomas Gleixner Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240127161753.114685-9-apatel@ventanamicro.com --- kernel/irq/msi.c | 76 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 33 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index c0e73788e878..8d463901c864 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -1446,34 +1446,10 @@ int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int return msi_domain_alloc_locked(dev, &ctrl); } -/** - * msi_domain_alloc_irq_at - Allocate an interrupt from a MSI interrupt domain at - * a given index - or at the next free index - * - * @dev: Pointer to device struct of the device for which the interrupts - * are allocated - * @domid: Id of the interrupt domain to operate on - * @index: Index for allocation. If @index == %MSI_ANY_INDEX the allocation - * uses the next free index. - * @affdesc: Optional pointer to an interrupt affinity descriptor structure - * @icookie: Optional pointer to a domain specific per instance cookie. If - * non-NULL the content of the cookie is stored in msi_desc::data. - * Must be NULL for MSI-X allocations - * - * This requires a MSI interrupt domain which lets the core code manage the - * MSI descriptors. - * - * Return: struct msi_map - * - * On success msi_map::index contains the allocated index number and - * msi_map::virq the corresponding Linux interrupt number - * - * On failure msi_map::index contains the error code and msi_map::virq - * is %0. - */ -struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index, - const struct irq_affinity_desc *affdesc, - union msi_instance_cookie *icookie) +static struct msi_map __msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, + unsigned int index, + const struct irq_affinity_desc *affdesc, + union msi_instance_cookie *icookie) { struct msi_ctrl ctrl = { .domid = domid, .nirqs = 1, }; struct irq_domain *domain; @@ -1481,17 +1457,16 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u struct msi_desc *desc; int ret; - msi_lock_descs(dev); domain = msi_get_device_domain(dev, domid); if (!domain) { map.index = -ENODEV; - goto unlock; + return map; } desc = msi_alloc_desc(dev, 1, affdesc); if (!desc) { map.index = -ENOMEM; - goto unlock; + return map; } if (icookie) @@ -1500,7 +1475,7 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u ret = msi_insert_desc(dev, desc, domid, index); if (ret) { map.index = ret; - goto unlock; + return map; } ctrl.first = ctrl.last = desc->msi_index; @@ -1513,7 +1488,42 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u map.index = desc->msi_index; map.virq = desc->irq; } -unlock: + return map; +} + +/** + * msi_domain_alloc_irq_at - Allocate an interrupt from a MSI interrupt domain at + * a given index - or at the next free index + * + * @dev: Pointer to device struct of the device for which the interrupts + * are allocated + * @domid: Id of the interrupt domain to operate on + * @index: Index for allocation. If @index == %MSI_ANY_INDEX the allocation + * uses the next free index. + * @affdesc: Optional pointer to an interrupt affinity descriptor structure + * @icookie: Optional pointer to a domain specific per instance cookie. If + * non-NULL the content of the cookie is stored in msi_desc::data. + * Must be NULL for MSI-X allocations + * + * This requires a MSI interrupt domain which lets the core code manage the + * MSI descriptors. + * + * Return: struct msi_map + * + * On success msi_map::index contains the allocated index number and + * msi_map::virq the corresponding Linux interrupt number + * + * On failure msi_map::index contains the error code and msi_map::virq + * is %0. + */ +struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index, + const struct irq_affinity_desc *affdesc, + union msi_instance_cookie *icookie) +{ + struct msi_map map; + + msi_lock_descs(dev); + map = __msi_domain_alloc_irq_at(dev, domid, index, affdesc, icookie); msi_unlock_descs(dev); return map; } -- cgit v1.2.3 From 9d1c58c8004653b37721dd7b16f4360216778c94 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 27 Jan 2024 21:47:38 +0530 Subject: genirq/msi: Optionally use dev->fwnode for device domain To support wire to MSI domains via the MSI infrastructure it is required to use the firmware node of the device which implements this for creating the MSI domain. Otherwise the existing firmware match mechanisms to find the correct irqdomain for a wired interrupt which is connected to a wire to MSI bridge would fail. This cannot be used for the general case because not all devices provide firmware nodes and all regular per device MSI domains are directly associated to the device and have not be searched for. Signed-off-by: Thomas Gleixner Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240127161753.114685-11-apatel@ventanamicro.com --- kernel/irq/msi.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 8d463901c864..5289fc2c7630 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -960,9 +960,9 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, void *chip_data) { struct irq_domain *domain, *parent = dev->msi.domain; - const struct msi_parent_ops *pops; + struct fwnode_handle *fwnode, *fwnalloced = NULL; struct msi_domain_template *bundle; - struct fwnode_handle *fwnode; + const struct msi_parent_ops *pops; if (!irq_domain_is_msi_parent(parent)) return false; @@ -985,7 +985,19 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, pops->prefix ? : "", bundle->chip.name, dev_name(dev)); bundle->chip.name = bundle->name; - fwnode = irq_domain_alloc_named_fwnode(bundle->name); + /* + * Using the device firmware node is required for wire to MSI + * device domains so that the existing firmware results in a domain + * match. + * All other device domains like PCI/MSI use the named firmware + * node as they are not guaranteed to have a fwnode. They are never + * looked up and always handled in the context of the device. + */ + if (bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE) + fwnode = dev->fwnode; + else + fwnode = fwnalloced = irq_domain_alloc_named_fwnode(bundle->name); + if (!fwnode) goto free_bundle; @@ -1012,7 +1024,7 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, fail: msi_unlock_descs(dev); free_fwnode: - irq_domain_free_fwnode(fwnode); + irq_domain_free_fwnode(fwnalloced); free_bundle: kfree(bundle); return false; -- cgit v1.2.3 From 0ee1578b00bcf5ef8e7955f0c6f02a624443eb29 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 27 Jan 2024 21:47:39 +0530 Subject: genirq/msi: Provide allocation/free functions for "wired" MSI interrupts To support wire to MSI bridges proper in the MSI core infrastructure it is required to have separate allocation/free interfaces which can be invoked from the regular irqdomain allocaton/free functions. The mechanism for allocation is: - Allocate the next free MSI descriptor index in the domain - Store the hardware interrupt number and the trigger type which was extracted by the irqdomain core from the firmware spec in the MSI descriptor device cookie so it can be retrieved by the underlying interrupt domain and interrupt chip - Use the regular MSI allocation mechanism for the newly allocated index which returns a fully initialized Linux interrupt on succes This works because: - the domains have a fixed size - each hardware interrupt is only allocated once - the underlying domain does not care about the MSI index it only cares about the hardware interrupt number and the trigger type The free function looks up the MSI index in the MSI descriptor of the provided Linux interrupt number and uses the regular index based free functions of the MSI core. Signed-off-by: Thomas Gleixner Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240127161753.114685-12-apatel@ventanamicro.com --- kernel/irq/msi.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) (limited to 'kernel') diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 5289fc2c7630..07e9daaf0657 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -1540,6 +1540,50 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u return map; } +/** + * msi_device_domain_alloc_wired - Allocate a "wired" interrupt on @domain + * @domain: The domain to allocate on + * @hwirq: The hardware interrupt number to allocate for + * @type: The interrupt type + * + * This weirdness supports wire to MSI controllers like MBIGEN. + * + * @hwirq is the hardware interrupt number which is handed in from + * irq_create_fwspec_mapping(). As the wire to MSI domain is sparse, but + * sized in firmware, the hardware interrupt number cannot be used as MSI + * index. For the underlying irq chip the MSI index is irrelevant and + * all it needs is the hardware interrupt number. + * + * To handle this the MSI index is allocated with MSI_ANY_INDEX and the + * hardware interrupt number is stored along with the type information in + * msi_desc::cookie so the underlying interrupt chip and domain code can + * retrieve it. + * + * Return: The Linux interrupt number (> 0) or an error code + */ +int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, + unsigned int type) +{ + unsigned int domid = MSI_DEFAULT_DOMAIN; + union msi_instance_cookie icookie = { }; + struct device *dev = domain->dev; + struct msi_map map = { }; + + if (WARN_ON_ONCE(!dev || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) + return -EINVAL; + + icookie.value = ((u64)type << 32) | hwirq; + + msi_lock_descs(dev); + if (WARN_ON_ONCE(msi_get_device_domain(dev, domid) != domain)) + map.index = -EINVAL; + else + map = __msi_domain_alloc_irq_at(dev, domid, MSI_ANY_INDEX, NULL, &icookie); + msi_unlock_descs(dev); + + return map.index >= 0 ? map.virq : map.index; +} + static void __msi_domain_free_irqs(struct device *dev, struct irq_domain *domain, struct msi_ctrl *ctrl) { @@ -1665,6 +1709,30 @@ void msi_domain_free_irqs_all(struct device *dev, unsigned int domid) msi_unlock_descs(dev); } +/** + * msi_device_domain_free_wired - Free a wired interrupt in @domain + * @domain: The domain to free the interrupt on + * @virq: The Linux interrupt number to free + * + * This is the counterpart of msi_device_domain_alloc_wired() for the + * weird wired to MSI converting domains. + */ +void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq) +{ + struct msi_desc *desc = irq_get_msi_desc(virq); + struct device *dev = domain->dev; + + if (WARN_ON_ONCE(!dev || !desc || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) + return; + + msi_lock_descs(dev); + if (!WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain)) { + msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index, + desc->msi_index); + } + msi_unlock_descs(dev); +} + /** * msi_get_domain_info - Get the MSI interrupt domain info for @domain * @domain: The interrupt domain to retrieve data from -- cgit v1.2.3 From e49312fe09df36cc4eae0cd6e1b08b563a91e1bc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 27 Jan 2024 21:47:40 +0530 Subject: genirq/irqdomain: Reroute device MSI create_mapping Reroute interrupt allocation in irq_create_fwspec_mapping() if the domain is a MSI device domain. This is required to convert the support for wire to MSI bridges to per device MSI domains. Signed-off-by: Thomas Gleixner Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240127161753.114685-13-apatel@ventanamicro.com --- kernel/irq/irqdomain.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 8fee37918195..aeb41655d6de 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -29,6 +29,7 @@ static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity); static void irq_domain_check_hierarchy(struct irq_domain *domain); +static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq); struct irqchip_fwid { struct fwnode_handle fwnode; @@ -858,8 +859,13 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) } if (irq_domain_is_hierarchy(domain)) { - virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE, - fwspec, false, NULL); + if (irq_domain_is_msi_device(domain)) { + mutex_unlock(&domain->root->mutex); + virq = msi_device_domain_alloc_wired(domain, hwirq, type); + mutex_lock(&domain->root->mutex); + } else + virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE, + fwspec, false, NULL); if (virq <= 0) { virq = 0; goto out; @@ -914,7 +920,7 @@ void irq_dispose_mapping(unsigned int virq) return; if (irq_domain_is_hierarchy(domain)) { - irq_domain_free_irqs(virq, 1); + irq_domain_free_one_irq(domain, virq); } else { irq_domain_disassociate(domain, virq); irq_free_desc(virq); @@ -1755,6 +1761,14 @@ void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) irq_free_descs(virq, nr_irqs); } +static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq) +{ + if (irq_domain_is_msi_device(domain)) + msi_device_domain_free_wired(domain, virq); + else + irq_domain_free_irqs(virq, 1); +} + /** * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain * @domain: Domain below which interrupts must be allocated @@ -1907,9 +1921,9 @@ static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, return -EINVAL; } -static void irq_domain_check_hierarchy(struct irq_domain *domain) -{ -} +static void irq_domain_check_hierarchy(struct irq_domain *domain) { } +static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq) { } + #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS -- cgit v1.2.3 From 9bbe13a5d414a7f8208dba64b54d2b6e4f7086bd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 27 Jan 2024 21:47:41 +0530 Subject: genirq/msi: Provide MSI_FLAG_PARENT_PM_DEV Some platform-MSI implementations require that power management is redirected to the underlying interrupt chip device. To make this work with per device MSI domains provide a new feature flag and let the core code handle the setup of dev->pm_dev when set during device MSI domain creation. Signed-off-by: Thomas Gleixner Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240127161753.114685-14-apatel@ventanamicro.com --- kernel/irq/msi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 07e9daaf0657..f90952ebc494 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -845,8 +845,11 @@ static struct irq_domain *__msi_create_irq_domain(struct fwnode_handle *fwnode, domain = irq_domain_create_hierarchy(parent, flags | IRQ_DOMAIN_FLAG_MSI, 0, fwnode, &msi_domain_ops, info); - if (domain) + if (domain) { irq_domain_update_bus_token(domain, info->bus_token); + if (info->flags & MSI_FLAG_PARENT_PM_DEV) + domain->pm_dev = parent->pm_dev; + } return domain; } -- cgit v1.2.3 From 8cec3dd9e5930c82c6bd0af3fdb3a36bcd428310 Mon Sep 17 00:00:00 2001 From: Shrikanth Hegde Date: Fri, 16 Feb 2024 11:44:33 +0530 Subject: sched/core: Simplify code by removing duplicate #ifdefs There's a few cases of nested #ifdefs in the scheduler code that can be simplified: #ifdef DEFINE_A ...code block... #ifdef DEFINE_A <-- This is a duplicate. ...code block... #endif #else #ifndef DEFINE_A <-- This is also duplicate. ...code block... #endif #endif More details about the script and methods used to find these code patterns can be found at: https://lore.kernel.org/all/20240118080326.13137-1-sshegde@linux.ibm.com/ No change in functionality intended. [ mingo: Clarified the changelog. ] Signed-off-by: Shrikanth Hegde Signed-off-by: Ingo Molnar Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240216061433.535522-1-sshegde@linux.ibm.com --- kernel/sched/core.c | 4 +--- kernel/sched/fair.c | 2 -- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9116bcc90346..a76c7095f736 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1792,7 +1792,6 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css); #endif #ifdef CONFIG_SYSCTL -#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK_GROUP static void uclamp_update_root_tg(void) { @@ -1898,7 +1897,6 @@ undo: return result; } #endif -#endif static int uclamp_validate(struct task_struct *p, const struct sched_attr *attr) @@ -2065,7 +2063,7 @@ static void __init init_uclamp(void) } } -#else /* CONFIG_UCLAMP_TASK */ +#else /* !CONFIG_UCLAMP_TASK */ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } static inline int uclamp_validate(struct task_struct *p, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 533547e3c90a..8e30e2bb77a0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10182,10 +10182,8 @@ static int idle_cpu_without(int cpu, struct task_struct *p) * be computed and tested before calling idle_cpu_without(). */ -#ifdef CONFIG_SMP if (rq->ttwu_pending) return 0; -#endif return 1; } -- cgit v1.2.3 From fd0a68a2337b79a7bd4dad5e7d9dc726828527af Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 15 Feb 2024 19:10:01 -1000 Subject: workqueue, irq_work: Build fix for !CONFIG_IRQ_WORK 2f34d7337d98 ("workqueue: Fix queue_work_on() with BH workqueues") added irq_work usage to workqueue; however, it turns out irq_work is actually optional and the change breaks build on configuration which doesn't have CONFIG_IRQ_WORK enabled. Fix build by making workqueue use irq_work only when CONFIG_SMP and enabling CONFIG_IRQ_WORK when CONFIG_SMP is set. It's reasonable to argue that it may be better to just always enable it. However, this still saves a small bit of memory for tiny UP configs and also the least amount of change, so, for now, let's keep it conditional. Verified to do the right thing for x86_64 allnoconfig and defconfig, and aarch64 allnoconfig, allnoconfig + prink disable (SMP but nothing selects IRQ_WORK) and a modified aarch64 Kconfig where !SMP and nothing selects IRQ_WORK. v2: `depends on SMP` leads to Kconfig warnings when CONFIG_IRQ_WORK is selected by something else when !CONFIG_SMP. Use `def_bool y if SMP` instead. Signed-off-by: Tejun Heo Reported-by: Naresh Kamboju Tested-by: Anders Roxell Fixes: 2f34d7337d98 ("workqueue: Fix queue_work_on() with BH workqueues") Cc: Stephen Rothwell --- kernel/workqueue.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 04e35dbe6799..6ae441e13804 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1209,6 +1209,20 @@ static struct irq_work *bh_pool_irq_work(struct worker_pool *pool) return &per_cpu(bh_pool_irq_works, pool->cpu)[high]; } +static void kick_bh_pool(struct worker_pool *pool) +{ +#ifdef CONFIG_SMP + if (unlikely(pool->cpu != smp_processor_id())) { + irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); + return; + } +#endif + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + raise_softirq_irqoff(HI_SOFTIRQ); + else + raise_softirq_irqoff(TASKLET_SOFTIRQ); +} + /** * kick_pool - wake up an idle worker if necessary * @pool: pool to kick @@ -1227,15 +1241,7 @@ static bool kick_pool(struct worker_pool *pool) return false; if (pool->flags & POOL_BH) { - if (likely(pool->cpu == smp_processor_id())) { - if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) - raise_softirq_irqoff(HI_SOFTIRQ); - else - raise_softirq_irqoff(TASKLET_SOFTIRQ); - } else { - irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); - } - + kick_bh_pool(pool); return true; } -- cgit v1.2.3 From ca2768bbf5c48d8c048877dfbceafcebc3f06fa6 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Tue, 23 Jan 2024 17:46:56 +0100 Subject: hrtimers: Update formatting of documentation Documentation of functions lacks the annotations which are used by kernel-doc and *.rst to make appearance in rendered documents more user-friendly. Use those annotations to improve user-friendliness. While at it prevent duplication of comments and use a reference instead. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240123164702.55612-3-anna-maria@linutronix.de --- kernel/time/hrtimer.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 760793998cdd..4c8dd633ab4a 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1021,21 +1021,23 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) } /** - * hrtimer_forward - forward the timer expiry + * hrtimer_forward() - forward the timer expiry * @timer: hrtimer to forward * @now: forward past this time * @interval: the interval to forward * * Forward the timer expiry so it will expire in the future. - * Returns the number of overruns. * - * Can be safely called from the callback function of @timer. If - * called from other contexts @timer must neither be enqueued nor - * running the callback and the caller needs to take care of - * serialization. + * .. note:: + * This only updates the timer expiry value and does not requeue the timer. * - * Note: This only updates the timer expiry value and does not requeue - * the timer. + * There is also a variant of the function hrtimer_forward_now(). + * + * Context: Can be safely called from the callback function of @timer. If called + * from other contexts @timer must neither be enqueued nor running the + * callback and the caller needs to take care of serialization. + * + * Return: The number of overruns are returned. */ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) { -- cgit v1.2.3 From f365d05506150398fe6b035918d6fd8b62f35b9f Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Tue, 23 Jan 2024 17:46:57 +0100 Subject: tick/sched: Add function description for tick_nohz_next_event() The return value of tick_nohz_next_event() is not obvious at the first glance. Add a kernel-doc compatible function description which also covers return values. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240123164702.55612-4-anna-maria@linutronix.de --- kernel/time/tick-sched.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 01fb50c1b17e..7c9efe3d9d56 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -799,6 +799,16 @@ static inline bool local_timer_softirq_pending(void) return local_softirq_pending() & BIT(TIMER_SOFTIRQ); } +/** + * tick_nohz_next_event() - return the clock monotonic based next event + * @ts: pointer to tick_sched struct + * @cpu: CPU number + * + * Return: + * *%0 - When the next event is a maximum of TICK_NSEC in the future + * and the tick is not stopped yet + * *%next_event - Next event based on clock monotonic + */ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) { u64 basemono, next_tick, delta, expires; -- cgit v1.2.3 From 892abd357183bc663d6984d10c62f94b40bfc375 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Tue, 23 Jan 2024 17:46:58 +0100 Subject: timers: Add struct member description for timer_base timer_base struct lacks description of struct members. Important struct member information is sprinkled in comments or in code all over the place. Collect information and write struct description to keep track of most important information in a single place. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240123164702.55612-5-anna-maria@linutronix.de --- kernel/time/timer.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 352b161113cd..d44dba1d4af0 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -196,6 +196,51 @@ EXPORT_SYMBOL(jiffies_64); # define BASE_DEF 0 #endif +/** + * struct timer_base - Per CPU timer base (number of base depends on config) + * @lock: Lock protecting the timer_base + * @running_timer: When expiring timers, the lock is dropped. To make + * sure not to race agains deleting/modifying a + * currently running timer, the pointer is set to the + * timer, which expires at the moment. If no timer is + * running, the pointer is NULL. + * @expiry_lock: PREEMPT_RT only: Lock is taken in softirq around + * timer expiry callback execution and when trying to + * delete a running timer and it wasn't successful in + * the first glance. It prevents priority inversion + * when callback was preempted on a remote CPU and a + * caller tries to delete the running timer. It also + * prevents a life lock, when the task which tries to + * delete a timer preempted the softirq thread which + * is running the timer callback function. + * @timer_waiters: PREEMPT_RT only: Tells, if there is a waiter + * waiting for the end of the timer callback function + * execution. + * @clk: clock of the timer base; is updated before enqueue + * of a timer; during expiry, it is 1 offset ahead of + * jiffies to avoid endless requeuing to current + * jiffies + * @next_expiry: expiry value of the first timer; it is updated when + * finding the next timer and during enqueue; the + * value is not valid, when next_expiry_recalc is set + * @cpu: Number of CPU the timer base belongs to + * @next_expiry_recalc: States, whether a recalculation of next_expiry is + * required. Value is set true, when a timer was + * deleted. + * @is_idle: Is set, when timer_base is idle. It is triggered by NOHZ + * code. This state is only used in standard + * base. Deferrable timers, which are enqueued remotely + * never wake up an idle CPU. So no matter of supporting it + * for this base. + * @timers_pending: Is set, when a timer is pending in the base. It is only + * reliable when next_expiry_recalc is not set. + * @pending_map: bitmap of the timer wheel; each bit reflects a + * bucket of the wheel. When a bit is set, at least a + * single timer is enqueued in the related bucket. + * @vectors: Array of lists; Each array member reflects a bucket + * of the timer wheel. The list contains all timers + * which are enqueued into a specific bucket. + */ struct timer_base { raw_spinlock_t lock; struct timer_list *running_timer; -- cgit v1.2.3 From c99303a2d2a25ba467ebf75d3e446b58c7e7df3a Mon Sep 17 00:00:00 2001 From: Crystal Wood Date: Mon, 22 Jan 2024 17:53:53 -0600 Subject: genirq: Wake interrupt threads immediately when changing affinity The affinity setting of interrupt threads happens in the context of the thread when the thread is woken up by an hard interrupt. As this can be an arbitrary after changing the affinity, the thread can become runnable on an isolated CPU and cause isolation disruption. Avoid this by checking the set affinity request in wait_for_interrupt() and waking the threads immediately when the affinity is modified. Note that this is of the most benefit on systems where the interrupt affinity itself does not need to be deferred to the interrupt handler, but even where that's not the case, the total dirsuption will be less. Signed-off-by: Crystal Wood Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240122235353.15235-1-crwood@redhat.com --- kernel/irq/manage.c | 109 ++++++++++++++++++++++++++-------------------------- 1 file changed, 55 insertions(+), 54 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 1782f90cd8c6..ad3eaf2ab959 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -192,10 +192,14 @@ void irq_set_thread_affinity(struct irq_desc *desc) struct irqaction *action; for_each_action_of_desc(desc, action) { - if (action->thread) + if (action->thread) { set_bit(IRQTF_AFFINITY, &action->thread_flags); - if (action->secondary && action->secondary->thread) + wake_up_process(action->thread); + } + if (action->secondary && action->secondary->thread) { set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags); + wake_up_process(action->secondary->thread); + } } } @@ -1049,10 +1053,57 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) return IRQ_NONE; } -static int irq_wait_for_interrupt(struct irqaction *action) +#ifdef CONFIG_SMP +/* + * Check whether we need to change the affinity of the interrupt thread. + */ +static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) +{ + cpumask_var_t mask; + bool valid = false; + + if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) + return; + + __set_current_state(TASK_RUNNING); + + /* + * In case we are out of memory we set IRQTF_AFFINITY again and + * try again next time + */ + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + set_bit(IRQTF_AFFINITY, &action->thread_flags); + return; + } + + raw_spin_lock_irq(&desc->lock); + /* + * This code is triggered unconditionally. Check the affinity + * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. + */ + if (cpumask_available(desc->irq_common_data.affinity)) { + const struct cpumask *m; + + m = irq_data_get_effective_affinity_mask(&desc->irq_data); + cpumask_copy(mask, m); + valid = true; + } + raw_spin_unlock_irq(&desc->lock); + + if (valid) + set_cpus_allowed_ptr(current, mask); + free_cpumask_var(mask); +} +#else +static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } +#endif + +static int irq_wait_for_interrupt(struct irq_desc *desc, + struct irqaction *action) { for (;;) { set_current_state(TASK_INTERRUPTIBLE); + irq_thread_check_affinity(desc, action); if (kthread_should_stop()) { /* may need to run one last time */ @@ -1129,52 +1180,6 @@ out_unlock: chip_bus_sync_unlock(desc); } -#ifdef CONFIG_SMP -/* - * Check whether we need to change the affinity of the interrupt thread. - */ -static void -irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) -{ - cpumask_var_t mask; - bool valid = true; - - if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) - return; - - /* - * In case we are out of memory we set IRQTF_AFFINITY again and - * try again next time - */ - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { - set_bit(IRQTF_AFFINITY, &action->thread_flags); - return; - } - - raw_spin_lock_irq(&desc->lock); - /* - * This code is triggered unconditionally. Check the affinity - * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. - */ - if (cpumask_available(desc->irq_common_data.affinity)) { - const struct cpumask *m; - - m = irq_data_get_effective_affinity_mask(&desc->irq_data); - cpumask_copy(mask, m); - } else { - valid = false; - } - raw_spin_unlock_irq(&desc->lock); - - if (valid) - set_cpus_allowed_ptr(current, mask); - free_cpumask_var(mask); -} -#else -static inline void -irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } -#endif - /* * Interrupts which are not explicitly requested as threaded * interrupts rely on the implicit bh/preempt disable of the hard irq @@ -1312,13 +1317,9 @@ static int irq_thread(void *data) init_task_work(&on_exit_work, irq_thread_dtor); task_work_add(current, &on_exit_work, TWA_NONE); - irq_thread_check_affinity(desc, action); - - while (!irq_wait_for_interrupt(action)) { + while (!irq_wait_for_interrupt(desc, action)) { irqreturn_t action_ret; - irq_thread_check_affinity(desc, action); - action_ret = handler_fn(desc, action); if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); -- cgit v1.2.3 From 84dccadd3e2a3f1a373826ad71e5ced5e76b0c00 Mon Sep 17 00:00:00 2001 From: Peter Hilber Date: Mon, 18 Dec 2023 08:38:39 +0100 Subject: timekeeping: Fix cross-timestamp interpolation on counter wrap cycle_between() decides whether get_device_system_crosststamp() will interpolate for older counter readings. cycle_between() yields wrong results for a counter wrap-around where after < before < test, and for the case after < test < before. Fix the comparison logic. Fixes: 2c756feb18d9 ("time: Add history to cross timestamp interface supporting slower devices") Signed-off-by: Peter Hilber Signed-off-by: Thomas Gleixner Acked-by: John Stultz Link: https://lore.kernel.org/r/20231218073849.35294-2-peter.hilber@opensynergy.com --- kernel/time/timekeeping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 266d02809dbb..8f35455b6250 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1186,7 +1186,7 @@ static bool cycle_between(u64 before, u64 test, u64 after) { if (test > before && test < after) return true; - if (test < before && before > after) + if (before > after && (test > before || test < after)) return true; return false; } -- cgit v1.2.3 From 87a41130881995f82f7adbafbfeddaebfb35f0ef Mon Sep 17 00:00:00 2001 From: Peter Hilber Date: Mon, 18 Dec 2023 08:38:40 +0100 Subject: timekeeping: Fix cross-timestamp interpolation corner case decision The cycle_between() helper checks if parameter test is in the open interval (before, after). Colloquially speaking, this also applies to the counter wrap-around special case before > after. get_device_system_crosststamp() currently uses cycle_between() at the first call site to decide whether to interpolate for older counter readings. get_device_system_crosststamp() has the following problem with cycle_between() testing against an open interval: Assume that, by chance, cycles == tk->tkr_mono.cycle_last (in the following, "cycle_last" for brevity). Then, cycle_between() at the first call site, with effective argument values cycle_between(cycle_last, cycles, now), returns false, enabling interpolation. During interpolation, get_device_system_crosststamp() will then call cycle_between() at the second call site (if a history_begin was supplied). The effective argument values are cycle_between(history_begin->cycles, cycles, cycles), since system_counterval.cycles == interval_start == cycles, per the assumption. Due to the test against the open interval, cycle_between() returns false again. This causes get_device_system_crosststamp() to return -EINVAL. This failure should be avoided, since get_device_system_crosststamp() works both when cycles follows cycle_last (no interpolation), and when cycles precedes cycle_last (interpolation). For the case cycles == cycle_last, interpolation is actually unneeded. Fix this by changing cycle_between() into timestamp_in_interval(), which now checks against the closed interval, rather than the open interval. This changes the get_device_system_crosststamp() behavior for three corner cases: 1. Bypass interpolation in the case cycles == tk->tkr_mono.cycle_last, fixing the problem described above. 2. At the first timestamp_in_interval() call site, cycles == now no longer causes failure. 3. At the second timestamp_in_interval() call site, history_begin->cycles == system_counterval.cycles no longer causes failure. adjust_historical_crosststamp() also works for this corner case, where partial_history_cycles == total_history_cycles. These behavioral changes should not cause any problems. Fixes: 2c756feb18d9 ("time: Add history to cross timestamp interface supporting slower devices") Signed-off-by: Peter Hilber Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20231218073849.35294-3-peter.hilber@opensynergy.com --- kernel/time/timekeeping.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 8f35455b6250..4e9f2f88c9d6 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1180,13 +1180,15 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history, } /* - * cycle_between - true if test occurs chronologically between before and after + * timestamp_in_interval - true if ts is chronologically in [start, end] + * + * True if ts occurs chronologically at or after start, and before or at end. */ -static bool cycle_between(u64 before, u64 test, u64 after) +static bool timestamp_in_interval(u64 start, u64 end, u64 ts) { - if (test > before && test < after) + if (ts >= start && ts <= end) return true; - if (before > after && (test > before || test < after)) + if (start > end && (ts >= start || ts <= end)) return true; return false; } @@ -1246,7 +1248,7 @@ int get_device_system_crosststamp(int (*get_time_fn) */ now = tk_clock_read(&tk->tkr_mono); interval_start = tk->tkr_mono.cycle_last; - if (!cycle_between(interval_start, cycles, now)) { + if (!timestamp_in_interval(interval_start, now, cycles)) { clock_was_set_seq = tk->clock_was_set_seq; cs_was_changed_seq = tk->cs_was_changed_seq; cycles = interval_start; @@ -1277,13 +1279,13 @@ int get_device_system_crosststamp(int (*get_time_fn) bool discontinuity; /* - * Check that the counter value occurs after the provided + * Check that the counter value is not before the provided * history reference and that the history doesn't cross a * clocksource change */ if (!history_begin || - !cycle_between(history_begin->cycles, - system_counterval.cycles, cycles) || + !timestamp_in_interval(history_begin->cycles, + cycles, system_counterval.cycles) || history_begin->cs_was_changed_seq != cs_was_changed_seq) return -EINVAL; partial_history_cycles = cycles - system_counterval.cycles; -- cgit v1.2.3 From 14274d0bd31b4debf28284604589f596ad2e99f2 Mon Sep 17 00:00:00 2001 From: Peter Hilber Date: Mon, 18 Dec 2023 08:38:41 +0100 Subject: timekeeping: Fix cross-timestamp interpolation for non-x86 So far, get_device_system_crosststamp() unconditionally passes system_counterval.cycles to timekeeping_cycles_to_ns(). But when interpolating system time (do_interp == true), system_counterval.cycles is before tkr_mono.cycle_last, contrary to the timekeeping_cycles_to_ns() expectations. On x86, CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE will mitigate on interpolating, setting delta to 0. With delta == 0, xtstamp->sys_monoraw and xtstamp->sys_realtime are then set to the last update time, as implicitly expected by adjust_historical_crosststamp(). On other architectures, the resulting nonsense xtstamp->sys_monoraw and xtstamp->sys_realtime corrupt the xtstamp (ts) adjustment in adjust_historical_crosststamp(). Fix this by deriving xtstamp->sys_monoraw and xtstamp->sys_realtime from the last update time when interpolating, by using the local variable "cycles". The local variable already has the right value when interpolating, unlike system_counterval.cycles. Fixes: 2c756feb18d9 ("time: Add history to cross timestamp interface supporting slower devices") Signed-off-by: Peter Hilber Signed-off-by: Thomas Gleixner Acked-by: John Stultz Link: https://lore.kernel.org/r/20231218073849.35294-4-peter.hilber@opensynergy.com --- kernel/time/timekeeping.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 4e9f2f88c9d6..8aab7ed41490 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1261,10 +1261,8 @@ int get_device_system_crosststamp(int (*get_time_fn) tk_core.timekeeper.offs_real); base_raw = tk->tkr_raw.base; - nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, - system_counterval.cycles); - nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, - system_counterval.cycles); + nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles); + nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles); } while (read_seqcount_retry(&tk_core.seq, seq)); xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real); -- cgit v1.2.3 From da92df490eeab7a97a3390ff32e0ae091e0dc2eb Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 5 Feb 2024 13:01:19 +0300 Subject: cpu: Mark cpu_possible_mask as __ro_after_init cpu_possible_mask is by definition "cpus which could be hotplugged without reboot". It's a property which is fixed after kernel enumerates the hardware configuration. Signed-off-by: Alexey Dobriyan Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/41cd78af-92a3-4f23-8c7a-4316a04a66d8@p183 --- kernel/cpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index ad7d0b00bce9..7b36b3a4e336 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -3106,10 +3106,10 @@ const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; EXPORT_SYMBOL(cpu_all_bits); #ifdef CONFIG_INIT_ALL_POSSIBLE -struct cpumask __cpu_possible_mask __read_mostly +struct cpumask __cpu_possible_mask __ro_after_init; = {CPU_BITS_ALL}; #else -struct cpumask __cpu_possible_mask __read_mostly; +struct cpumask __cpu_possible_mask __ro_after_init; #endif EXPORT_SYMBOL(__cpu_possible_mask); -- cgit v1.2.3 From 5aa3c0cf5bba6437c9e63a56f684f61de8b503d6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 20 Feb 2024 11:47:31 +0000 Subject: genirq/irqdomain: Don't call ops->select for DOMAIN_BUS_ANY tokens Users of the IRQCHIP_PLATFORM_DRIVER_{BEGIN,END} helpers rely on a fwspec containing only the fwnode (and crucially a number of parameters set to 0) together with a DOMAIN_BUS_ANY token to check whether a parent irqchip has probed and registered a domain. Since de1ff306dcf4 ("genirq/irqdomain: Remove the param count restriction from select()"), ops->select() is called unconditionally, meaning that irqchips implementing select() now need to handle ANY as a match. Instead of adding more esoteric checks to the individual drivers, add that condition to irq_find_matching_fwspec(), and let it handle the corner case, as per the comment in the function. This restores the functionality of the above helpers. Fixes: de1ff306dcf4 ("genirq/irqdomain: Remove the param count restriction from select()") Reported-by: Dmitry Baryshkov Reported-by: Biju Das Signed-off-by: Marc Zyngier Signed-off-by: Thomas Gleixner Tested-by: Dmitry Baryshkov Tested-by: Biju Das Link: https://lore.kernel.org/r/20240220114731.1898534-1-maz@kernel.org Link: https://lore.kernel.org/r/20240219-gic-fix-child-domain-v1-1-09f8fd2d9a8f@linaro.org --- kernel/irq/irqdomain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index aeb41655d6de..3dd1c871e091 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -449,7 +449,7 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, */ mutex_lock(&irq_domain_mutex); list_for_each_entry(h, &irq_domain_list, link) { - if (h->ops->select) + if (h->ops->select && bus_token != DOMAIN_BUS_ANY) rc = h->ops->select(h, fwspec, bus_token); else if (h->ops->match) rc = h->ops->match(h, to_of_node(fwnode), bus_token); -- cgit v1.2.3 From c7a40c49af920fbad2ab6795b6587308ad69de9f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:13 -1000 Subject: workqueue: Cosmetic changes Reorder some global declarations and adjust comments and whitespaces for clarity and consistency. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6ae441e13804..b280caf81fb2 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -376,8 +376,6 @@ struct workqueue_struct { struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */ }; -static struct kmem_cache *pwq_cache; - /* * Each pod type describes how CPUs should be grouped for unbound workqueues. * See the comment above workqueue_attrs->affn_scope. @@ -389,20 +387,15 @@ struct wq_pod_type { int *cpu_pod; /* cpu -> pod */ }; -static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; -static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; - static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { - [WQ_AFFN_DFL] = "default", - [WQ_AFFN_CPU] = "cpu", - [WQ_AFFN_SMT] = "smt", - [WQ_AFFN_CACHE] = "cache", - [WQ_AFFN_NUMA] = "numa", - [WQ_AFFN_SYSTEM] = "system", + [WQ_AFFN_DFL] = "default", + [WQ_AFFN_CPU] = "cpu", + [WQ_AFFN_SMT] = "smt", + [WQ_AFFN_CACHE] = "cache", + [WQ_AFFN_NUMA] = "numa", + [WQ_AFFN_SYSTEM] = "system", }; -static bool wq_topo_initialized __read_mostly = false; - /* * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency @@ -418,6 +411,12 @@ static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); module_param_named(power_efficient, wq_power_efficient, bool, 0444); static bool wq_online; /* can kworkers be created yet? */ +static bool wq_topo_initialized __read_mostly = false; + +static struct kmem_cache *pwq_cache; + +static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; +static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE; /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ static struct workqueue_attrs *wq_update_pod_attrs_buf; @@ -2231,7 +2230,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, */ lockdep_assert_irqs_disabled(); - /* * For a draining wq, only works from the same workqueue are * allowed. The __WQ_DESTROYING helps to spot the issue that @@ -4121,8 +4119,8 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) local_irq_restore(flags); /* - * This allows canceling during early boot. We know that @work - * isn't executing. + * Skip __flush_work() during early boot when we know that @work isn't + * executing. This allows canceling during early boot. */ if (wq_online) __flush_work(work, true); -- cgit v1.2.3 From d355001fa9370df8fdd6fca0e9ed77063615c7da Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:13 -1000 Subject: workqueue: Use rcu_read_lock_any_held() instead of rcu_read_lock_held() The different flavors of RCU read critical sections have been unified. Let's update the locking assertion macros accordingly to avoid requiring unnecessary explicit rcu_read_[un]lock() calls. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b280caf81fb2..87750e70b638 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -515,12 +515,12 @@ static void show_one_worker_pool(struct worker_pool *pool); #include #define assert_rcu_or_pool_mutex() \ - RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() && \ !lockdep_is_held(&wq_pool_mutex), \ "RCU or wq_pool_mutex should be held") #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ - RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() && \ !lockdep_is_held(&wq->mutex) && \ !lockdep_is_held(&wq_pool_mutex), \ "RCU, wq->mutex or wq_pool_mutex should be held") -- cgit v1.2.3 From c5140688d19a4579f7b01e6ca4b6e5f5d23d3d4d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: workqueue: Rename __cancel_work_timer() to __cancel_timer_sync() __cancel_work_timer() is used to implement cancel_work_sync() and cancel_delayed_work_sync(), similarly to how __cancel_work() is used to implement cancel_work() and cancel_delayed_work(). ie. The _timer part of the name is a complete misnomer. The difference from __cancel_work() is the fact that it syncs against work item execution not whether it handles timers or not. Let's rename it to less confusing __cancel_work_sync(). No functional change. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 87750e70b638..7e2af79bfa62 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4075,7 +4075,7 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k return autoremove_wake_function(wait, mode, sync, key); } -static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) +static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) { static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); unsigned long flags; @@ -4159,7 +4159,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) */ bool cancel_work_sync(struct work_struct *work) { - return __cancel_work_timer(work, false); + return __cancel_work_sync(work, false); } EXPORT_SYMBOL_GPL(cancel_work_sync); @@ -4264,7 +4264,7 @@ EXPORT_SYMBOL(cancel_delayed_work); */ bool cancel_delayed_work_sync(struct delayed_work *dwork) { - return __cancel_work_timer(&dwork->work, true); + return __cancel_work_sync(&dwork->work, true); } EXPORT_SYMBOL(cancel_delayed_work_sync); -- cgit v1.2.3 From cdc6e4b329bc82676886a758a940b2b6987c2109 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: workqueue: Reorganize flush and cancel[_sync] functions They are currently a bit disorganized with flush and cancel functions mixed. Reoranize them so that flush functions come first, cancel next and cancel_sync last. This way, we won't have to add prototypes for internal functions for the planned disable/enable support. This is pure code reorganization. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 136 ++++++++++++++++++++++++++--------------------------- 1 file changed, 68 insertions(+), 68 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7e2af79bfa62..962061dca05c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4061,6 +4061,65 @@ bool flush_work(struct work_struct *work) } EXPORT_SYMBOL_GPL(flush_work); +/** + * flush_delayed_work - wait for a dwork to finish executing the last queueing + * @dwork: the delayed work to flush + * + * Delayed timer is cancelled and the pending work is queued for + * immediate execution. Like flush_work(), this function only + * considers the last queueing instance of @dwork. + * + * Return: + * %true if flush_work() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_delayed_work(struct delayed_work *dwork) +{ + local_irq_disable(); + if (del_timer_sync(&dwork->timer)) + __queue_work(dwork->cpu, dwork->wq, &dwork->work); + local_irq_enable(); + return flush_work(&dwork->work); +} +EXPORT_SYMBOL(flush_delayed_work); + +/** + * flush_rcu_work - wait for a rwork to finish executing the last queueing + * @rwork: the rcu work to flush + * + * Return: + * %true if flush_rcu_work() waited for the work to finish execution, + * %false if it was already idle. + */ +bool flush_rcu_work(struct rcu_work *rwork) +{ + if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { + rcu_barrier(); + flush_work(&rwork->work); + return true; + } else { + return flush_work(&rwork->work); + } +} +EXPORT_SYMBOL(flush_rcu_work); + +static bool __cancel_work(struct work_struct *work, bool is_dwork) +{ + unsigned long flags; + int ret; + + do { + ret = try_to_grab_pending(work, is_dwork, &flags); + } while (unlikely(ret == -EAGAIN)); + + if (unlikely(ret < 0)) + return false; + + set_work_pool_and_clear_pending(work, get_work_pool_id(work)); + local_irq_restore(flags); + return ret; +} + struct cwt_wait { wait_queue_entry_t wait; struct work_struct *work; @@ -4139,6 +4198,15 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) return ret; } +/* + * See cancel_delayed_work() + */ +bool cancel_work(struct work_struct *work) +{ + return __cancel_work(work, false); +} +EXPORT_SYMBOL(cancel_work); + /** * cancel_work_sync - cancel a work and wait for it to finish * @work: the work to cancel @@ -4163,74 +4231,6 @@ bool cancel_work_sync(struct work_struct *work) } EXPORT_SYMBOL_GPL(cancel_work_sync); -/** - * flush_delayed_work - wait for a dwork to finish executing the last queueing - * @dwork: the delayed work to flush - * - * Delayed timer is cancelled and the pending work is queued for - * immediate execution. Like flush_work(), this function only - * considers the last queueing instance of @dwork. - * - * Return: - * %true if flush_work() waited for the work to finish execution, - * %false if it was already idle. - */ -bool flush_delayed_work(struct delayed_work *dwork) -{ - local_irq_disable(); - if (del_timer_sync(&dwork->timer)) - __queue_work(dwork->cpu, dwork->wq, &dwork->work); - local_irq_enable(); - return flush_work(&dwork->work); -} -EXPORT_SYMBOL(flush_delayed_work); - -/** - * flush_rcu_work - wait for a rwork to finish executing the last queueing - * @rwork: the rcu work to flush - * - * Return: - * %true if flush_rcu_work() waited for the work to finish execution, - * %false if it was already idle. - */ -bool flush_rcu_work(struct rcu_work *rwork) -{ - if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { - rcu_barrier(); - flush_work(&rwork->work); - return true; - } else { - return flush_work(&rwork->work); - } -} -EXPORT_SYMBOL(flush_rcu_work); - -static bool __cancel_work(struct work_struct *work, bool is_dwork) -{ - unsigned long flags; - int ret; - - do { - ret = try_to_grab_pending(work, is_dwork, &flags); - } while (unlikely(ret == -EAGAIN)); - - if (unlikely(ret < 0)) - return false; - - set_work_pool_and_clear_pending(work, get_work_pool_id(work)); - local_irq_restore(flags); - return ret; -} - -/* - * See cancel_delayed_work() - */ -bool cancel_work(struct work_struct *work) -{ - return __cancel_work(work, false); -} -EXPORT_SYMBOL(cancel_work); - /** * cancel_delayed_work - cancel a delayed work * @dwork: delayed_work to cancel -- cgit v1.2.3 From c26e2f2e2fcfb73996fa025a0d3b5695017d65b5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: workqueue: Use variable name irq_flags for saving local irq flags Using the generic term `flags` for irq flags is conventional but can be confusing as there's quite a bit of code dealing with work flags which involves some subtleties. Let's use a more explicit name `irq_flags` for local irq flags. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 76 +++++++++++++++++++++++++++--------------------------- 1 file changed, 38 insertions(+), 38 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 962061dca05c..b590d93d054b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2029,7 +2029,7 @@ out_put: * try_to_grab_pending - steal work item from worklist and disable irq * @work: work item to steal * @is_dwork: @work is a delayed_work - * @flags: place to store irq state + * @irq_flags: place to store irq state * * Try to grab PENDING bit of @work. This function can handle @work in any * stable state - idle, on timer or on worklist. @@ -2051,17 +2051,17 @@ out_put: * irqsafe, ensures that we return -EAGAIN for finite short period of time. * * On successful return, >= 0, irq is disabled and the caller is - * responsible for releasing it using local_irq_restore(*@flags). + * responsible for releasing it using local_irq_restore(*@irq_flags). * * This function is safe to call from any context including IRQ handler. */ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, - unsigned long *flags) + unsigned long *irq_flags) { struct worker_pool *pool; struct pool_workqueue *pwq; - local_irq_save(*flags); + local_irq_save(*irq_flags); /* try to steal the timer if it exists */ if (is_dwork) { @@ -2136,7 +2136,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, raw_spin_unlock(&pool->lock); fail: rcu_read_unlock(); - local_irq_restore(*flags); + local_irq_restore(*irq_flags); if (work_is_canceling(work)) return -ENOENT; cpu_relax(); @@ -2344,16 +2344,16 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { bool ret = false; - unsigned long flags; + unsigned long irq_flags; - local_irq_save(flags); + local_irq_save(irq_flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = true; } - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL(queue_work_on); @@ -2410,7 +2410,7 @@ static int select_numa_node_cpu(int node) bool queue_work_node(int node, struct workqueue_struct *wq, struct work_struct *work) { - unsigned long flags; + unsigned long irq_flags; bool ret = false; /* @@ -2424,7 +2424,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq, */ WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); - local_irq_save(flags); + local_irq_save(irq_flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { int cpu = select_numa_node_cpu(node); @@ -2433,7 +2433,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq, ret = true; } - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL_GPL(queue_work_node); @@ -2503,17 +2503,17 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, { struct work_struct *work = &dwork->work; bool ret = false; - unsigned long flags; + unsigned long irq_flags; /* read the comment in __queue_work() */ - local_irq_save(flags); + local_irq_save(irq_flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_delayed_work(cpu, wq, dwork, delay); ret = true; } - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL(queue_delayed_work_on); @@ -2539,16 +2539,16 @@ EXPORT_SYMBOL(queue_delayed_work_on); bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { - unsigned long flags; + unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(&dwork->work, true, &flags); + ret = try_to_grab_pending(&dwork->work, true, &irq_flags); } while (unlikely(ret == -EAGAIN)); if (likely(ret >= 0)) { __queue_delayed_work(cpu, wq, dwork, delay); - local_irq_restore(flags); + local_irq_restore(irq_flags); } /* -ENOENT from try_to_grab_pending() becomes %true */ @@ -4105,18 +4105,18 @@ EXPORT_SYMBOL(flush_rcu_work); static bool __cancel_work(struct work_struct *work, bool is_dwork) { - unsigned long flags; + unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &flags); + ret = try_to_grab_pending(work, is_dwork, &irq_flags); } while (unlikely(ret == -EAGAIN)); if (unlikely(ret < 0)) return false; set_work_pool_and_clear_pending(work, get_work_pool_id(work)); - local_irq_restore(flags); + local_irq_restore(irq_flags); return ret; } @@ -4137,11 +4137,11 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) { static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); - unsigned long flags; + unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &flags); + ret = try_to_grab_pending(work, is_dwork, &irq_flags); /* * If someone else is already canceling, wait for it to * finish. flush_work() doesn't work for PREEMPT_NONE @@ -4175,7 +4175,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); - local_irq_restore(flags); + local_irq_restore(irq_flags); /* * Skip __flush_work() during early boot when we know that @work isn't @@ -5381,15 +5381,15 @@ static void wq_adjust_max_active(struct workqueue_struct *wq) activated = false; for_each_pwq(pwq, wq) { - unsigned long flags; + unsigned long irq_flags; /* can be called during early boot w/ irq disabled */ - raw_spin_lock_irqsave(&pwq->pool->lock, flags); + raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); if (pwq_activate_first_inactive(pwq, true)) { activated = true; kick_pool(pwq->pool); } - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); } } while (activated); } @@ -5762,7 +5762,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested); unsigned int work_busy(struct work_struct *work) { struct worker_pool *pool; - unsigned long flags; + unsigned long irq_flags; unsigned int ret = 0; if (work_pending(work)) @@ -5771,10 +5771,10 @@ unsigned int work_busy(struct work_struct *work) rcu_read_lock(); pool = get_work_pool(work); if (pool) { - raw_spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, irq_flags); if (find_worker_executing_work(pool, work)) ret |= WORK_BUSY_RUNNING; - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, irq_flags); } rcu_read_unlock(); @@ -6006,7 +6006,7 @@ void show_one_workqueue(struct workqueue_struct *wq) { struct pool_workqueue *pwq; bool idle = true; - unsigned long flags; + unsigned long irq_flags; for_each_pwq(pwq, wq) { if (!pwq_is_empty(pwq)) { @@ -6020,7 +6020,7 @@ void show_one_workqueue(struct workqueue_struct *wq) pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); for_each_pwq(pwq, wq) { - raw_spin_lock_irqsave(&pwq->pool->lock, flags); + raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags); if (!pwq_is_empty(pwq)) { /* * Defer printing to avoid deadlocks in console @@ -6031,7 +6031,7 @@ void show_one_workqueue(struct workqueue_struct *wq) show_pwq(pwq); printk_deferred_exit(); } - raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); + raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags); /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_all_workqueues(). Avoid triggering @@ -6050,10 +6050,10 @@ static void show_one_worker_pool(struct worker_pool *pool) { struct worker *worker; bool first = true; - unsigned long flags; + unsigned long irq_flags; unsigned long hung = 0; - raw_spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, irq_flags); if (pool->nr_workers == pool->nr_idle) goto next_pool; @@ -6081,7 +6081,7 @@ static void show_one_worker_pool(struct worker_pool *pool) pr_cont("\n"); printk_deferred_exit(); next_pool: - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, irq_flags); /* * We could be printing a lot from atomic context, e.g. * sysrq-t -> show_all_workqueues(). Avoid triggering @@ -7212,10 +7212,10 @@ static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; static void show_cpu_pool_hog(struct worker_pool *pool) { struct worker *worker; - unsigned long flags; + unsigned long irq_flags; int bkt; - raw_spin_lock_irqsave(&pool->lock, flags); + raw_spin_lock_irqsave(&pool->lock, irq_flags); hash_for_each(pool->busy_hash, bkt, worker, hentry) { if (task_is_running(worker->task)) { @@ -7233,7 +7233,7 @@ static void show_cpu_pool_hog(struct worker_pool *pool) } } - raw_spin_unlock_irqrestore(&pool->lock, flags); + raw_spin_unlock_irqrestore(&pool->lock, irq_flags); } static void show_cpu_pools_hogs(void) -- cgit v1.2.3 From c5f5b9422a49e9bc1c2f992135592ed921ac18e5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: workqueue: Introduce work_cancel_flags The cancel path used bool @is_dwork to distinguish canceling a regular work and a delayed one. The planned disable/enable support will need passing around another flag in the code path. As passing them around with bools will be confusing, let's introduce named flags to pass around in the cancel path. WORK_CANCEL_DELAYED replaces @is_dwork. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b590d93d054b..317c85f051b0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -96,6 +96,10 @@ enum worker_flags { WORKER_UNBOUND | WORKER_REBOUND, }; +enum work_cancel_flags { + WORK_CANCEL_DELAYED = 1 << 0, /* canceling a delayed_work */ +}; + enum wq_internal_consts { NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ @@ -2028,7 +2032,7 @@ out_put: /** * try_to_grab_pending - steal work item from worklist and disable irq * @work: work item to steal - * @is_dwork: @work is a delayed_work + * @cflags: %WORK_CANCEL_ flags * @irq_flags: place to store irq state * * Try to grab PENDING bit of @work. This function can handle @work in any @@ -2055,7 +2059,7 @@ out_put: * * This function is safe to call from any context including IRQ handler. */ -static int try_to_grab_pending(struct work_struct *work, bool is_dwork, +static int try_to_grab_pending(struct work_struct *work, u32 cflags, unsigned long *irq_flags) { struct worker_pool *pool; @@ -2064,7 +2068,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, local_irq_save(*irq_flags); /* try to steal the timer if it exists */ - if (is_dwork) { + if (cflags & WORK_CANCEL_DELAYED) { struct delayed_work *dwork = to_delayed_work(work); /* @@ -2543,7 +2547,8 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, int ret; do { - ret = try_to_grab_pending(&dwork->work, true, &irq_flags); + ret = try_to_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, + &irq_flags); } while (unlikely(ret == -EAGAIN)); if (likely(ret >= 0)) { @@ -4103,13 +4108,13 @@ bool flush_rcu_work(struct rcu_work *rwork) } EXPORT_SYMBOL(flush_rcu_work); -static bool __cancel_work(struct work_struct *work, bool is_dwork) +static bool __cancel_work(struct work_struct *work, u32 cflags) { unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &irq_flags); + ret = try_to_grab_pending(work, cflags, &irq_flags); } while (unlikely(ret == -EAGAIN)); if (unlikely(ret < 0)) @@ -4134,14 +4139,14 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k return autoremove_wake_function(wait, mode, sync, key); } -static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) +static bool __cancel_work_sync(struct work_struct *work, u32 cflags) { static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); unsigned long irq_flags; int ret; do { - ret = try_to_grab_pending(work, is_dwork, &irq_flags); + ret = try_to_grab_pending(work, cflags, &irq_flags); /* * If someone else is already canceling, wait for it to * finish. flush_work() doesn't work for PREEMPT_NONE @@ -4203,7 +4208,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork) */ bool cancel_work(struct work_struct *work) { - return __cancel_work(work, false); + return __cancel_work(work, 0); } EXPORT_SYMBOL(cancel_work); @@ -4227,7 +4232,7 @@ EXPORT_SYMBOL(cancel_work); */ bool cancel_work_sync(struct work_struct *work) { - return __cancel_work_sync(work, false); + return __cancel_work_sync(work, 0); } EXPORT_SYMBOL_GPL(cancel_work_sync); @@ -4249,7 +4254,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); */ bool cancel_delayed_work(struct delayed_work *dwork) { - return __cancel_work(&dwork->work, true); + return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED); } EXPORT_SYMBOL(cancel_delayed_work); @@ -4264,7 +4269,7 @@ EXPORT_SYMBOL(cancel_delayed_work); */ bool cancel_delayed_work_sync(struct delayed_work *dwork) { - return __cancel_work_sync(&dwork->work, true); + return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED); } EXPORT_SYMBOL(cancel_delayed_work_sync); -- cgit v1.2.3 From e9a8e01f9b133c145dd125021ec47c006d108af4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: workqueue: Clean up enum work_bits and related constants The bits of work->data are used for a few different purposes. How the bits are used is determined by enum work_bits. The planned disable/enable support will add another use, so let's clean it up a bit in preparation. - Let WORK_STRUCT_*_BIT's values be determined by enum definition order. - Deliminate different bit sections the same way using SHIFT and BITS values. - Rename __WORK_OFFQ_CANCELING to WORK_OFFQ_CANCELING_BIT for consistency. - Introduce WORK_STRUCT_PWQ_SHIFT and replace WORK_STRUCT_FLAG_MASK and WORK_STRUCT_WQ_DATA_MASK with WQ_STRUCT_PWQ_MASK for clarity. - Improve documentation. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 317c85f051b0..7c6915e23c5c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -247,7 +247,7 @@ enum pool_workqueue_stats { }; /* - * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS + * The per-pool workqueue. While queued, bits below WORK_PWQ_SHIFT * of work_struct->data are used for flags and the remaining high bits * point to the pwq; thus, pwqs need to be aligned at two's power of the * number of flag bits. @@ -294,7 +294,7 @@ struct pool_workqueue { */ struct kthread_work release_work; struct rcu_head rcu; -} __aligned(1 << WORK_STRUCT_FLAG_BITS); +} __aligned(1 << WORK_STRUCT_PWQ_SHIFT); /* * Structure used to wait for workqueue flush. @@ -843,7 +843,7 @@ static void clear_work_data(struct work_struct *work) static inline struct pool_workqueue *work_struct_pwq(unsigned long data) { - return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK); + return (struct pool_workqueue *)(data & WORK_STRUCT_PWQ_MASK); } static struct pool_workqueue *get_work_pwq(struct work_struct *work) @@ -4851,7 +4851,7 @@ static void pwq_release_workfn(struct kthread_work *work) static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) { - BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); + BUG_ON((unsigned long)pwq & ~WORK_STRUCT_PWQ_MASK); memset(pwq, 0, sizeof(*pwq)); -- cgit v1.2.3 From 978b8409eab15aa733ae3a79c9b5158d34cd3fb7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: workqueue: Factor out work_grab_pending() from __cancel_work_sync() The planned disable/enable support will need the same logic. Let's factor it out. No functional changes. v2: Update function comment to include @irq_flags. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 132 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 80 insertions(+), 52 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7c6915e23c5c..3b606eb5d6e3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -484,6 +484,12 @@ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; /* I: attributes used when instantiating ordered pools on demand */ static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; +/* + * Used to synchronize multiple cancel_sync attempts on the same work item. See + * work_grab_pending() and __cancel_work_sync(). + */ +static DECLARE_WAIT_QUEUE_HEAD(wq_cancel_waitq); + /* * I: kthread_worker to release pwq's. pwq release needs to be bounced to a * process context while holding a pool lock. Bounce to a dedicated kthread @@ -2147,6 +2153,75 @@ fail: return -EAGAIN; } +struct cwt_wait { + wait_queue_entry_t wait; + struct work_struct *work; +}; + +static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) +{ + struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); + + if (cwait->work != key) + return 0; + return autoremove_wake_function(wait, mode, sync, key); +} + +/** + * work_grab_pending - steal work item from worklist and disable irq + * @work: work item to steal + * @cflags: %WORK_CANCEL_ flags + * @irq_flags: place to store IRQ state + * + * Grab PENDING bit of @work. @work can be in any stable state - idle, on timer + * or on worklist. + * + * Must be called in process context. IRQ is disabled on return with IRQ state + * stored in *@irq_flags. The caller is responsible for re-enabling it using + * local_irq_restore(). + * + * Returns %true if @work was pending. %false if idle. + */ +static bool work_grab_pending(struct work_struct *work, u32 cflags, + unsigned long *irq_flags) +{ + struct cwt_wait cwait; + int ret; + + might_sleep(); +repeat: + ret = try_to_grab_pending(work, cflags, irq_flags); + if (likely(ret >= 0)) + return ret; + if (ret != -ENOENT) + goto repeat; + + /* + * Someone is already canceling. Wait for it to finish. flush_work() + * doesn't work for PREEMPT_NONE because we may get woken up between + * @work's completion and the other canceling task resuming and clearing + * CANCELING - flush_work() will return false immediately as @work is no + * longer busy, try_to_grab_pending() will return -ENOENT as @work is + * still being canceled and the other canceling task won't be able to + * clear CANCELING as we're hogging the CPU. + * + * Let's wait for completion using a waitqueue. As this may lead to the + * thundering herd problem, use a custom wake function which matches + * @work along with exclusive wait and wakeup. + */ + init_wait(&cwait.wait); + cwait.wait.func = cwt_wakefn; + cwait.work = work; + + prepare_to_wait_exclusive(&wq_cancel_waitq, &cwait.wait, + TASK_UNINTERRUPTIBLE); + if (work_is_canceling(work)) + schedule(); + finish_wait(&wq_cancel_waitq, &cwait.wait); + + goto repeat; +} + /** * insert_work - insert a work into a pool * @pwq: pwq @work belongs to @@ -4125,60 +4200,13 @@ static bool __cancel_work(struct work_struct *work, u32 cflags) return ret; } -struct cwt_wait { - wait_queue_entry_t wait; - struct work_struct *work; -}; - -static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) -{ - struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); - - if (cwait->work != key) - return 0; - return autoremove_wake_function(wait, mode, sync, key); -} - static bool __cancel_work_sync(struct work_struct *work, u32 cflags) { - static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); unsigned long irq_flags; - int ret; - - do { - ret = try_to_grab_pending(work, cflags, &irq_flags); - /* - * If someone else is already canceling, wait for it to - * finish. flush_work() doesn't work for PREEMPT_NONE - * because we may get scheduled between @work's completion - * and the other canceling task resuming and clearing - * CANCELING - flush_work() will return false immediately - * as @work is no longer busy, try_to_grab_pending() will - * return -ENOENT as @work is still being canceled and the - * other canceling task won't be able to clear CANCELING as - * we're hogging the CPU. - * - * Let's wait for completion using a waitqueue. As this - * may lead to the thundering herd problem, use a custom - * wake function which matches @work along with exclusive - * wait and wakeup. - */ - if (unlikely(ret == -ENOENT)) { - struct cwt_wait cwait; - - init_wait(&cwait.wait); - cwait.wait.func = cwt_wakefn; - cwait.work = work; - - prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, - TASK_UNINTERRUPTIBLE); - if (work_is_canceling(work)) - schedule(); - finish_wait(&cancel_waitq, &cwait.wait); - } - } while (unlikely(ret < 0)); + bool ret; - /* tell other tasks trying to grab @work to back off */ + /* claim @work and tell other tasks trying to grab @work to back off */ + ret = work_grab_pending(work, cflags, &irq_flags); mark_work_canceling(work); local_irq_restore(irq_flags); @@ -4197,8 +4225,8 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) * visible there. */ smp_mb(); - if (waitqueue_active(&cancel_waitq)) - __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); + if (waitqueue_active(&wq_cancel_waitq)) + __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); return ret; } -- cgit v1.2.3 From afe928c1dc611bec155d834020e0631e026aeb8a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:14 -1000 Subject: workqueue: Remove clear_work_data() clear_work_data() is only used in one place and immediately followed by smp_mb(), making it equivalent to set_work_pool_and_clear_pending() w/ WORK_OFFQ_POOL_NONE for @pool_id. Drop it. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3b606eb5d6e3..4f9c85f7c57a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -763,10 +763,9 @@ static int work_next_color(int color) * contain the pointer to the queued pwq. Once execution starts, the flag * is cleared and the high bits contain OFFQ flags and pool ID. * - * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() - * and clear_work_data() can be used to set the pwq, pool or clear - * work->data. These functions should only be called while the work is - * owned - ie. while the PENDING bit is set. + * set_work_pwq(), set_work_pool_and_clear_pending() and mark_work_canceling() + * can be used to set the pwq, pool or clear work->data. These functions should + * only be called while the work is owned - ie. while the PENDING bit is set. * * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq * corresponding to a work. Pool is available once the work has been @@ -841,12 +840,6 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, smp_mb(); } -static void clear_work_data(struct work_struct *work) -{ - smp_wmb(); /* see set_work_pool_and_clear_pending() */ - set_work_data(work, WORK_STRUCT_NO_POOL, 0); -} - static inline struct pool_workqueue *work_struct_pwq(unsigned long data) { return (struct pool_workqueue *)(data & WORK_STRUCT_PWQ_MASK); @@ -4217,14 +4210,13 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) if (wq_online) __flush_work(work, true); - clear_work_data(work); - /* - * Paired with prepare_to_wait() above so that either - * waitqueue_active() is visible here or !work_is_canceling() is - * visible there. + * smp_mb() at the end of set_work_pool_and_clear_pending() is paired + * with prepare_to_wait() above so that either waitqueue_active() is + * visible here or !work_is_canceling() is visible there. */ - smp_mb(); + set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE); + if (waitqueue_active(&wq_cancel_waitq)) __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); -- cgit v1.2.3 From bccdc1faafaf32e00d6e4dddca1ded64e3272189 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 20 Feb 2024 19:36:15 -1000 Subject: workqueue: Make @flags handling consistent across set_work_data() and friends - set_work_data() takes a separate @flags argument but just ORs it to @data. This is more confusing than helpful. Just take @data. - Use the name @flags consistently and add the parameter to set_work_pool_and_{keep|clear}_pending(). This will be used by the planned disable/enable support. No functional changes. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- kernel/workqueue.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4f9c85f7c57a..65a27be81452 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -777,29 +777,28 @@ static int work_next_color(int color) * but stay off timer and worklist for arbitrarily long and nobody should * try to steal the PENDING bit. */ -static inline void set_work_data(struct work_struct *work, unsigned long data, - unsigned long flags) +static inline void set_work_data(struct work_struct *work, unsigned long data) { WARN_ON_ONCE(!work_pending(work)); - atomic_long_set(&work->data, data | flags | work_static(work)); + atomic_long_set(&work->data, data | work_static(work)); } static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, - unsigned long extra_flags) + unsigned long flags) { - set_work_data(work, (unsigned long)pwq, - WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); + set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING | + WORK_STRUCT_PWQ | flags); } static void set_work_pool_and_keep_pending(struct work_struct *work, - int pool_id) + int pool_id, unsigned long flags) { - set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, - WORK_STRUCT_PENDING); + set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | + WORK_STRUCT_PENDING | flags); } static void set_work_pool_and_clear_pending(struct work_struct *work, - int pool_id) + int pool_id, unsigned long flags) { /* * The following wmb is paired with the implied mb in @@ -808,7 +807,8 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, * owner. */ smp_wmb(); - set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); + set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | + flags); /* * The following mb guarantees that previous clear of a PENDING bit * will not be reordered with any speculative LOADS or STORES from @@ -909,7 +909,7 @@ static void mark_work_canceling(struct work_struct *work) unsigned long pool_id = get_work_pool_id(work); pool_id <<= WORK_OFFQ_POOL_SHIFT; - set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); + set_work_data(work, pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING); } static bool work_is_canceling(struct work_struct *work) @@ -2127,7 +2127,7 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags, * this destroys work->data needed by the next step, stash it. */ work_data = *work_data_bits(work); - set_work_pool_and_keep_pending(work, pool->id); + set_work_pool_and_keep_pending(work, pool->id, 0); /* must be the last step, see the function comment */ pwq_dec_nr_in_flight(pwq, work_data); @@ -3205,7 +3205,7 @@ __acquires(&pool->lock) * PENDING and queued state changes happen together while IRQ is * disabled. */ - set_work_pool_and_clear_pending(work, pool->id); + set_work_pool_and_clear_pending(work, pool->id, 0); pwq->stats[PWQ_STAT_STARTED]++; raw_spin_unlock_irq(&pool->lock); @@ -4188,7 +4188,7 @@ static bool __cancel_work(struct work_struct *work, u32 cflags) if (unlikely(ret < 0)) return false; - set_work_pool_and_clear_pending(work, get_work_pool_id(work)); + set_work_pool_and_clear_pending(work, get_work_pool_id(work), 0); local_irq_restore(irq_flags); return ret; } @@ -4215,7 +4215,7 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) * with prepare_to_wait() above so that either waitqueue_active() is * visible here or !work_is_canceling() is visible there. */ - set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE); + set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, 0); if (waitqueue_active(&wq_cancel_waitq)) __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); -- cgit v1.2.3 From e1fb1dc08e73466830612bcf2f9f72180965c9ba Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Fri, 9 Feb 2024 15:49:45 +0100 Subject: pidfd: allow to override signal scope in pidfd_send_signal() Right now we determine the scope of the signal based on the type of pidfd. There are use-cases where it's useful to override the scope of the signal. For example in [1]. Add flags to determine the scope of the signal: (1) PIDFD_SIGNAL_THREAD: send signal to specific thread reference by @pidfd (2) PIDFD_SIGNAL_THREAD_GROUP: send signal to thread-group of @pidfd (2) PIDFD_SIGNAL_PROCESS_GROUP: send signal to process-group of @pidfd Since we now allow specifying PIDFD_SEND_PROCESS_GROUP for pidfd_send_signal() to send signals to process groups we need to adjust the check restricting si_code emulation by userspace to account for PIDTYPE_PGID. Reviewed-by: Oleg Nesterov Link: https://github.com/systemd/systemd/issues/31093 [1] Link: https://lore.kernel.org/r/20240210-chihuahua-hinzog-3945b6abd44a@brauner Link: https://lore.kernel.org/r/20240214123655.GB16265@redhat.com Signed-off-by: Christian Brauner --- kernel/signal.c | 46 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 8b8169623850..bdca529f0f7b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1905,16 +1905,19 @@ int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, return send_sig_info(info.si_signo, &info, t); } -int kill_pgrp(struct pid *pid, int sig, int priv) +static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) { int ret; - read_lock(&tasklist_lock); - ret = __kill_pgrp_info(sig, __si_special(priv), pid); + ret = __kill_pgrp_info(sig, info, pgrp); read_unlock(&tasklist_lock); - return ret; } + +int kill_pgrp(struct pid *pid, int sig, int priv) +{ + return kill_pgrp_info(sig, __si_special(priv), pid); +} EXPORT_SYMBOL(kill_pgrp); int kill_pid(struct pid *pid, int sig, int priv) @@ -3873,6 +3876,10 @@ static struct pid *pidfd_to_pid(const struct file *file) return tgid_pidfd_to_pid(file); } +#define PIDFD_SEND_SIGNAL_FLAGS \ + (PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \ + PIDFD_SIGNAL_PROCESS_GROUP) + /** * sys_pidfd_send_signal - Signal a process through a pidfd * @pidfd: file descriptor of the process @@ -3897,7 +3904,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, enum pid_type type; /* Enforce flags be set to 0 until we add an extension. */ - if (flags) + if (flags & ~PIDFD_SEND_SIGNAL_FLAGS) + return -EINVAL; + + /* Ensure that only a single signal scope determining flag is set. */ + if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1) return -EINVAL; f = fdget(pidfd); @@ -3915,10 +3926,24 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, if (!access_pidfd_pidns(pid)) goto err; - if (f.file->f_flags & PIDFD_THREAD) + switch (flags) { + case 0: + /* Infer scope from the type of pidfd. */ + if (f.file->f_flags & PIDFD_THREAD) + type = PIDTYPE_PID; + else + type = PIDTYPE_TGID; + break; + case PIDFD_SIGNAL_THREAD: type = PIDTYPE_PID; - else + break; + case PIDFD_SIGNAL_THREAD_GROUP: type = PIDTYPE_TGID; + break; + case PIDFD_SIGNAL_PROCESS_GROUP: + type = PIDTYPE_PGID; + break; + } if (info) { ret = copy_siginfo_from_user_any(&kinfo, info); @@ -3931,14 +3956,17 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, /* Only allow sending arbitrary signals to yourself. */ ret = -EPERM; - if ((task_pid(current) != pid) && + if ((task_pid(current) != pid || type > PIDTYPE_TGID) && (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) goto err; } else { prepare_kill_siginfo(sig, &kinfo, type); } - ret = kill_pid_info_type(sig, &kinfo, pid, type); + if (type == PIDTYPE_PGID) + ret = kill_pgrp_info(sig, &kinfo, pid); + else + ret = kill_pid_info_type(sig, &kinfo, pid, type); err: fdput(f); return ret; -- cgit v1.2.3 From e0a1284b293bdf91a68a6d1a0479ad476d0d8ec2 Mon Sep 17 00:00:00 2001 From: David Gow Date: Wed, 21 Feb 2024 17:27:17 +0800 Subject: time/kunit: Use correct format specifier 'days' is a s64 (from div_s64), and so should use a %lld specifier. This was found by extending KUnit's assertion macros to use gcc's __printf attribute. Fixes: 276010551664 ("time: Improve performance of time64_to_tm()") Signed-off-by: David Gow Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240221092728.1281499-5-davidgow@google.com --- kernel/time/time_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c index ca058c8af6ba..3e5d422dd15c 100644 --- a/kernel/time/time_test.c +++ b/kernel/time/time_test.c @@ -73,7 +73,7 @@ static void time64_to_tm_test_date_range(struct kunit *test) days = div_s64(secs, 86400); - #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %ld", \ + #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %lld", \ year, month, mdday, yday, days KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG); -- cgit v1.2.3 From 2ed08e4bc53298db3f87b528cd804cb0cce066a9 Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Wed, 21 Feb 2024 14:08:59 +0800 Subject: clocksource: Scale the watchdog read retries automatically On a 8-socket server the TSC is wrongly marked as 'unstable' and disabled during boot time on about one out of 120 boot attempts: clocksource: timekeeping watchdog on CPU227: wd-tsc-wd excessive read-back delay of 153560ns vs. limit of 125000ns, wd-wd read-back delay only 11440ns, attempt 3, marking tsc unstable tsc: Marking TSC unstable due to clocksource watchdog TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'. sched_clock: Marking unstable (119294969739, 159204297)<-(125446229205, -5992055152) clocksource: Checking clocksource tsc synchronization from CPU 319 to CPUs 0,99,136,180,210,542,601,896. clocksource: Switched to clocksource hpet The reason is that for platform with a large number of CPUs, there are sporadic big or huge read latencies while reading the watchog/clocksource during boot or when system is under stress work load, and the frequency and maximum value of the latency goes up with the number of online CPUs. The cCurrent code already has logic to detect and filter such high latency case by reading the watchdog twice and checking the two deltas. Due to the randomness of the latency, there is a low probabilty that the first delta (latency) is big, but the second delta is small and looks valid. The watchdog code retries the readouts by default twice, which is not necessarily sufficient for systems with a large number of CPUs. There is a command line parameter 'max_cswd_read_retries' which allows to increase the number of retries, but that's not user friendly as it needs to be tweaked per system. As the number of required retries is proportional to the number of online CPUs, this parameter can be calculated at runtime. Scale and enlarge the number of retries according to the number of online CPUs and remove the command line parameter completely. [ tglx: Massaged change log and comments ] Signed-off-by: Feng Tang Signed-off-by: Thomas Gleixner Tested-by: Jin Wang Tested-by: Paul E. McKenney Reviewed-by: Waiman Long Reviewed-by: Paul E. McKenney Link: https://lore.kernel.org/r/20240221060859.1027450-1-feng.tang@intel.com --- kernel/time/clocksource-wdtest.c | 13 +++++++------ kernel/time/clocksource.c | 10 ++++------ 2 files changed, 11 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/time/clocksource-wdtest.c b/kernel/time/clocksource-wdtest.c index df922f49d171..d06185e054ea 100644 --- a/kernel/time/clocksource-wdtest.c +++ b/kernel/time/clocksource-wdtest.c @@ -104,8 +104,8 @@ static void wdtest_ktime_clocksource_reset(void) static int wdtest_func(void *arg) { unsigned long j1, j2; + int i, max_retries; char *s; - int i; schedule_timeout_uninterruptible(holdoff * HZ); @@ -139,18 +139,19 @@ static int wdtest_func(void *arg) WARN_ON_ONCE(time_before(j2, j1 + NSEC_PER_USEC)); /* Verify tsc-like stability with various numbers of errors injected. */ - for (i = 0; i <= max_cswd_read_retries + 1; i++) { - if (i <= 1 && i < max_cswd_read_retries) + max_retries = clocksource_get_max_watchdog_retry(); + for (i = 0; i <= max_retries + 1; i++) { + if (i <= 1 && i < max_retries) s = ""; - else if (i <= max_cswd_read_retries) + else if (i <= max_retries) s = ", expect message"; else s = ", expect clock skew"; - pr_info("--- Watchdog with %dx error injection, %lu retries%s.\n", i, max_cswd_read_retries, s); + pr_info("--- Watchdog with %dx error injection, %d retries%s.\n", i, max_retries, s); WRITE_ONCE(wdtest_ktime_read_ndelays, i); schedule_timeout_uninterruptible(2 * HZ); WARN_ON_ONCE(READ_ONCE(wdtest_ktime_read_ndelays)); - WARN_ON_ONCE((i <= max_cswd_read_retries) != + WARN_ON_ONCE((i <= max_retries) != !(clocksource_wdtest_ktime.flags & CLOCK_SOURCE_UNSTABLE)); wdtest_ktime_clocksource_reset(); } diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 4ef06651ad07..e5b260aa0e02 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -210,9 +210,6 @@ void clocksource_mark_unstable(struct clocksource *cs) spin_unlock_irqrestore(&watchdog_lock, flags); } -ulong max_cswd_read_retries = 2; -module_param(max_cswd_read_retries, ulong, 0644); -EXPORT_SYMBOL_GPL(max_cswd_read_retries); static int verify_n_cpus = 8; module_param(verify_n_cpus, int, 0644); @@ -224,11 +221,12 @@ enum wd_read_status { static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) { - unsigned int nretries; + unsigned int nretries, max_retries; u64 wd_end, wd_end2, wd_delta; int64_t wd_delay, wd_seq_delay; - for (nretries = 0; nretries <= max_cswd_read_retries; nretries++) { + max_retries = clocksource_get_max_watchdog_retry(); + for (nretries = 0; nretries <= max_retries; nretries++) { local_irq_disable(); *wdnow = watchdog->read(watchdog); *csnow = cs->read(cs); @@ -240,7 +238,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift); if (wd_delay <= WATCHDOG_MAX_SKEW) { - if (nretries > 1 || nretries >= max_cswd_read_retries) { + if (nretries > 1 || nretries >= max_retries) { pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n", smp_processor_id(), watchdog->name, nretries); } -- cgit v1.2.3 From 266e95786452d97f42dcb9a881bba223584b9648 Mon Sep 17 00:00:00 2001 From: Max Kellermann Date: Thu, 22 Feb 2024 12:47:27 +0100 Subject: cpu: Remove stray semicolon This syntax error was introduced by commit da92df490eea ("cpu: Mark cpu_possible_mask as __ro_after_init"). Fixes: da92df490eea ("cpu: Mark cpu_possible_mask as __ro_after_init") Signed-off-by: Max Kellermann Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240222114727.1144588-1-max.kellermann@ionos.com --- kernel/cpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 7b36b3a4e336..cc4a8068747c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -3106,7 +3106,7 @@ const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; EXPORT_SYMBOL(cpu_all_bits); #ifdef CONFIG_INIT_ALL_POSSIBLE -struct cpumask __cpu_possible_mask __ro_after_init; +struct cpumask __cpu_possible_mask __ro_after_init = {CPU_BITS_ALL}; #else struct cpumask __cpu_possible_mask __ro_after_init; -- cgit v1.2.3 From bebed6649e85fe55275bd20104ec8e0bdff1bf54 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:29 +0100 Subject: timers: Restructure get_next_timer_interrupt() get_next_timer_interrupt() contains two parts for the next timer interrupt calculation. Those two parts are separated by forwarding the base clock. But the second part does not depend on the forwarded base clock. Therefore restructure get_next_timer_interrupt() to keep things together which belong together. No functional change. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-2-anna-maria@linutronix.de --- kernel/time/timer.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index d44dba1d4af0..316ded68fa06 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1982,12 +1982,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) if (base->next_expiry_recalc) next_expiry_recalc(base); - /* - * We have a fresh next event. Check whether we can forward the - * base. - */ - __forward_timer_base(base, basej); - if (base->timers_pending) { nextevt = base->next_expiry; @@ -2005,6 +1999,12 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) base->next_expiry = nextevt; } + /* + * We have a fresh next event. Check whether we can forward the + * base. + */ + __forward_timer_base(base, basej); + /* * Base is idle if the next event is more than a tick away. * -- cgit v1.2.3 From 39ed699fb660c65cef4759c041763c75e0948425 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:30 +0100 Subject: timers: Split out get next timer interrupt Split out get_next_timer_interrupt() to be able to extend it and make it reusable for other call sites. No functional change. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-3-anna-maria@linutronix.de --- kernel/time/timer.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 316ded68fa06..9f0cdba4afe0 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1956,15 +1956,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; } -/** - * get_next_timer_interrupt - return the time (clock mono) of the next timer - * @basej: base time jiffies - * @basem: base time clock monotonic - * - * Returns the tick aligned clock monotonic time of the next pending - * timer or KTIME_MAX if no timer is pending. - */ -u64 get_next_timer_interrupt(unsigned long basej, u64 basem) +static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); unsigned long nextevt = basej + NEXT_TIMER_MAX_DELTA; @@ -2023,6 +2015,19 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) return cmp_next_hrtimer_event(basem, expires); } +/** + * get_next_timer_interrupt() - return the time (clock mono) of the next timer + * @basej: base time jiffies + * @basem: base time clock monotonic + * + * Returns the tick aligned clock monotonic time of the next pending + * timer or KTIME_MAX if no timer is pending. + */ +u64 get_next_timer_interrupt(unsigned long basej, u64 basem) +{ + return __get_next_timer_interrupt(basej, basem); +} + /** * timer_clear_idle - Clear the idle state of the timer base * -- cgit v1.2.3 From e2e1d724e948c87a31c18c34c6b6a193a9b2a0f0 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:31 +0100 Subject: timers: Move marking timer bases idle into tick_nohz_stop_tick() The timer base is marked idle when get_next_timer_interrupt() is executed. But the decision whether the tick will be stopped and whether the system is able to go idle is done later. When the timer bases is marked idle and a new first timer is enqueued remote an IPI is raised. Even if it is not required because the tick is not stopped and the timer base is evaluated again at the next tick. To prevent this, the timer base is marked idle in tick_nohz_stop_tick() and get_next_timer_interrupt() is streamlined by only looking for the next timer interrupt. All other work is postponed to timer_base_try_to_set_idle() which is called by tick_nohz_stop_tick(). timer_base_try_to_set_idle() never resets timer_base::is_idle state. This is done when the tick is restarted via tick_nohz_restart_sched_tick(). With this, tick_sched::tick_stopped and timer_base::is_idle are always in sync. So there is no longer the need to execute timer_clear_idle() in tick_nohz_idle_retain_tick(). This was required before, as tick_nohz_next_event() set timer_base::is_idle even if the tick would not be stopped. So timer_clear_idle() is only executed, when timer base is idle. So the check whether timer base is idle, is now no longer required as well. While at it fix some nearby whitespace damage as well. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-4-anna-maria@linutronix.de --- kernel/time/tick-internal.h | 1 + kernel/time/tick-sched.c | 40 +++++++++++++++++++++--------- kernel/time/timer.c | 60 +++++++++++++++++++++++++++++++-------------- 3 files changed, 71 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 481b7ab65e2c..47df30b871e4 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -163,6 +163,7 @@ static inline void timers_update_nohz(void) { } DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem); +u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle); void timer_clear_idle(void); #define CLOCK_SET_WALL \ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 7c9efe3d9d56..344b904f520f 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -859,11 +859,6 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) */ delta = next_tick - basemono; if (delta <= (u64)TICK_NSEC) { - /* - * Tell the timer code that the base is not idle, i.e. undo - * the effect of get_next_timer_interrupt(): - */ - timer_clear_idle(); /* * We've not stopped the tick yet, and there's a timer in the * next period, so no point in stopping it either, bail. @@ -899,12 +894,38 @@ out: static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); + unsigned long basejiff = ts->last_jiffies; u64 basemono = ts->timer_expires_base; - u64 expires = ts->timer_expires; + bool timer_idle; + u64 expires; /* Make sure we won't be trying to stop it twice in a row. */ ts->timer_expires_base = 0; + /* + * Now the tick should be stopped definitely - so the timer base needs + * to be marked idle as well to not miss a newly queued timer. + */ + expires = timer_base_try_to_set_idle(basejiff, basemono, &timer_idle); + if (expires > ts->timer_expires) { + /* + * This path could only happen when the first timer was removed + * between calculating the possible sleep length and now (when + * high resolution mode is not active, timer could also be a + * hrtimer). + * + * We have to stick to the original calculated expiry value to + * not stop the tick for too long with a shallow C-state (which + * was programmed by cpuidle because of an early next expiration + * value). + */ + expires = ts->timer_expires; + } + + /* If the timer base is not idle, retain the not yet stopped tick. */ + if (!timer_idle) + return; + /* * If this CPU is the one which updates jiffies, then give up * the assignment and let it be taken by the CPU which runs @@ -1001,7 +1022,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) touch_softlockup_watchdog_sched(); /* Cancel the scheduled timer and restore the tick: */ - ts->tick_stopped = 0; + ts->tick_stopped = 0; tick_nohz_restart(ts, now); } @@ -1157,11 +1178,6 @@ void tick_nohz_idle_stop_tick(void) void tick_nohz_idle_retain_tick(void) { tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); - /* - * Undo the effect of get_next_timer_interrupt() called from - * tick_nohz_next_event(). - */ - timer_clear_idle(); } /** diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 9f0cdba4afe0..a4b8a58d05e5 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1956,19 +1956,22 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; } -static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem) +static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, + bool *idle) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); unsigned long nextevt = basej + NEXT_TIMER_MAX_DELTA; u64 expires = KTIME_MAX; - bool was_idle; /* * Pretend that there is no timer pending if the cpu is offline. * Possible pending timers will be migrated later to an active cpu. */ - if (cpu_is_offline(smp_processor_id())) + if (cpu_is_offline(smp_processor_id())) { + if (idle) + *idle = true; return expires; + } raw_spin_lock(&base->lock); if (base->next_expiry_recalc) @@ -1998,17 +2001,26 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem) __forward_timer_base(base, basej); /* - * Base is idle if the next event is more than a tick away. - * - * If the base is marked idle then any timer add operation must forward - * the base clk itself to keep granularity small. This idle logic is - * only maintained for the BASE_STD base, deferrable timers may still - * see large granularity skew (by design). + * Set base->is_idle only when caller is timer_base_try_to_set_idle() */ - was_idle = base->is_idle; - base->is_idle = time_after(nextevt, basej + 1); - if (was_idle != base->is_idle) - trace_timer_base_idle(base->is_idle, base->cpu); + if (idle) { + /* + * Base is idle if the next event is more than a tick away. + * + * If the base is marked idle then any timer add operation must + * forward the base clk itself to keep granularity small. This + * idle logic is only maintained for the BASE_STD base, + * deferrable timers may still see large granularity skew (by + * design). + */ + if (!base->is_idle) { + if (time_after(nextevt, basej + 1)) { + base->is_idle = true; + trace_timer_base_idle(true, base->cpu); + } + } + *idle = base->is_idle; + } raw_spin_unlock(&base->lock); @@ -2025,7 +2037,21 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem) */ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) { - return __get_next_timer_interrupt(basej, basem); + return __get_next_timer_interrupt(basej, basem, NULL); +} + +/** + * timer_base_try_to_set_idle() - Try to set the idle state of the timer bases + * @basej: base time jiffies + * @basem: base time clock monotonic + * @idle: pointer to store the value of timer_base->is_idle + * + * Returns the tick aligned clock monotonic time of the next pending + * timer or KTIME_MAX if no timer is pending. + */ +u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle) +{ + return __get_next_timer_interrupt(basej, basem, idle); } /** @@ -2043,10 +2069,8 @@ void timer_clear_idle(void) * sending the IPI a few instructions smaller for the cost of taking * the lock in the exit from idle path. */ - if (base->is_idle) { - base->is_idle = false; - trace_timer_base_idle(false, smp_processor_id()); - } + base->is_idle = false; + trace_timer_base_idle(false, smp_processor_id()); } #endif -- cgit v1.2.3 From 73129cf4b69cd1aaa3dd5eb7900a9c349773f5ae Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:32 +0100 Subject: timers: Optimization for timer_base_try_to_set_idle() When tick is stopped also the timer base is_idle flag is set. When reentering timer_base_try_to_set_idle() with the tick stopped, there is no need to check whether the timer base needs to be set idle again. When a timer was enqueued in the meantime, this is already handled by the tick_nohz_next_event() call which was executed before tick_nohz_stop_tick(). Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-5-anna-maria@linutronix.de --- kernel/time/tick-sched.c | 2 +- kernel/time/timer.c | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 344b904f520f..f6b613380229 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -896,7 +896,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); unsigned long basejiff = ts->last_jiffies; u64 basemono = ts->timer_expires_base; - bool timer_idle; + bool timer_idle = ts->tick_stopped; u64 expires; /* Make sure we won't be trying to stop it twice in a row. */ diff --git a/kernel/time/timer.c b/kernel/time/timer.c index a4b8a58d05e5..74cfe21f8fd9 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2044,13 +2044,18 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) * timer_base_try_to_set_idle() - Try to set the idle state of the timer bases * @basej: base time jiffies * @basem: base time clock monotonic - * @idle: pointer to store the value of timer_base->is_idle + * @idle: pointer to store the value of timer_base->is_idle on return; + * *idle contains the information whether tick was already stopped * - * Returns the tick aligned clock monotonic time of the next pending - * timer or KTIME_MAX if no timer is pending. + * Returns the tick aligned clock monotonic time of the next pending timer or + * KTIME_MAX if no timer is pending. When tick was already stopped KTIME_MAX is + * returned as well. */ u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle) { + if (*idle) + return KTIME_MAX; + return __get_next_timer_interrupt(basej, basem, idle); } -- cgit v1.2.3 From 8e7e247f64a1e0fee430aba28d9108f7598eb237 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:33 +0100 Subject: timers: Introduce add_timer() variants which modify timer flags A timer might be used as a pinned timer (using add_timer_on()) and later on as non-pinned timer using add_timer(). When the "NOHZ timer pull at expiry model" is in place, the TIMER_PINNED flag is required to be used whenever a timer needs to expire on a dedicated CPU. Otherwise the flag must not be set if expiration on a dedicated CPU is not required. add_timer_on()'s behavior will be changed during the preparation patches for the "NOHZ timer pull at expiry model" to unconditionally set the TIMER_PINNED flag. To be able to clear/ set the flag when queueing a timer, two variants of add_timer() are introduced. This is a preparatory step and has no functional change. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-6-anna-maria@linutronix.de --- kernel/time/timer.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 74cfe21f8fd9..bef8cb8e7266 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1290,6 +1290,40 @@ void add_timer(struct timer_list *timer) } EXPORT_SYMBOL(add_timer); +/** + * add_timer_local() - Start a timer on the local CPU + * @timer: The timer to be started + * + * Same as add_timer() except that the timer flag TIMER_PINNED is set. + * + * See add_timer() for further details. + */ +void add_timer_local(struct timer_list *timer) +{ + if (WARN_ON_ONCE(timer_pending(timer))) + return; + timer->flags |= TIMER_PINNED; + __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); +} +EXPORT_SYMBOL(add_timer_local); + +/** + * add_timer_global() - Start a timer without TIMER_PINNED flag set + * @timer: The timer to be started + * + * Same as add_timer() except that the timer flag TIMER_PINNED is unset. + * + * See add_timer() for further details. + */ +void add_timer_global(struct timer_list *timer) +{ + if (WARN_ON_ONCE(timer_pending(timer))) + return; + timer->flags &= ~TIMER_PINNED; + __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING); +} +EXPORT_SYMBOL(add_timer_global); + /** * add_timer_on - Start a timer on a particular CPU * @timer: The timer to be started -- cgit v1.2.3 From c0e8c5b59949e8b8b004481c99e102df606fc312 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:34 +0100 Subject: workqueue: Use global variant for add_timer() The implementation of the NOHZ pull at expiry model will change the timer bases per CPU. Timers, that have to expire on a specific CPU, require the TIMER_PINNED flag. If the CPU doesn't matter, the TIMER_PINNED flag must be dropped. This is required for call sites which use the timer alternately as pinned and not pinned timer like workqueues do. Therefore use add_timer_global() in __queue_delayed_work() for non-bound delayed work to make sure the TIMER_PINNED flag is dropped. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20240221090548.36600-7-anna-maria@linutronix.de --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7b482a26d741..78eaea2e5d72 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1961,7 +1961,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, if (unlikely(cpu != WORK_CPU_UNBOUND)) add_timer_on(timer, cpu); else - add_timer(timer); + add_timer_global(timer); } /** -- cgit v1.2.3 From aae55e9fb8fc73893f86165f0d84a33b7080902a Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:35 +0100 Subject: timers: Make sure TIMER_PINNED flag is set in add_timer_on() When adding a timer to the timer wheel using add_timer_on(), it is an implicitly pinned timer. With the timer pull at expiry time model in place, the TIMER_PINNED flag is required to make sure timers end up in proper base. Set the TIMER_PINNED flag unconditionally when add_timer_on() is executed. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-8-anna-maria@linutronix.de --- kernel/time/timer.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index bef8cb8e7266..121f5b99ea21 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1329,7 +1329,10 @@ EXPORT_SYMBOL(add_timer_global); * @timer: The timer to be started * @cpu: The CPU to start it on * - * Same as add_timer() except that it starts the timer on the given CPU. + * Same as add_timer() except that it starts the timer on the given CPU and + * the TIMER_PINNED flag is set. When timer shouldn't be a pinned timer in + * the next round, add_timer_global() should be used instead as it unsets + * the TIMER_PINNED flag. * * See add_timer() for further details. */ @@ -1343,6 +1346,9 @@ void add_timer_on(struct timer_list *timer, int cpu) if (WARN_ON_ONCE(timer_pending(timer))) return; + /* Make sure timer flags have TIMER_PINNED flag set */ + timer->flags |= TIMER_PINNED; + new_base = get_timer_cpu_base(timer->flags, cpu); /* -- cgit v1.2.3 From af68cb3fc736e13e66fc8202ea94c3aff7299f39 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:36 +0100 Subject: timers: Simplify code in run_local_timers() The logic for raising a softirq the way it is implemented right now, is readable for two timer bases. When increasing the number of timer bases, code gets harder to read. With the introduction of the timer migration hierarchy, there will be three timer bases. Therefore restructure the code to use a loop. No functional change. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-9-anna-maria@linutronix.de --- kernel/time/timer.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 121f5b99ea21..9c8443d4fe33 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2180,16 +2180,14 @@ static void run_local_timers(void) struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); hrtimer_run_queues(); - /* Raise the softirq only if required. */ - if (time_before(jiffies, base->next_expiry)) { - if (!IS_ENABLED(CONFIG_NO_HZ_COMMON)) - return; - /* CPU is awake, so check the deferrable base. */ - base++; - if (time_before(jiffies, base->next_expiry)) + + for (int i = 0; i < NR_BASES; i++, base++) { + /* Raise the softirq only if required. */ + if (time_after_eq(jiffies, base->next_expiry)) { + raise_softirq(TIMER_SOFTIRQ); return; + } } - raise_softirq(TIMER_SOFTIRQ); } /* -- cgit v1.2.3 From 9f6a3c602c235c3cccbe673fb7e1cca30ca4be0d Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:37 +0100 Subject: timers: Split next timer interrupt logic Split the logic for getting next timer interrupt (no matter of recalculated or already stored in base->next_expiry) into a separate function named next_timer_interrupt(). Make it available to local call sites only. No functional change. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-10-anna-maria@linutronix.de --- kernel/time/timer.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 9c8443d4fe33..748f4b72fb6d 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1996,12 +1996,29 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires) return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC; } +static unsigned long next_timer_interrupt(struct timer_base *base, + unsigned long basej) +{ + if (base->next_expiry_recalc) + next_expiry_recalc(base); + + /* + * Move next_expiry for the empty base into the future to prevent an + * unnecessary raise of the timer softirq when the next_expiry value + * will be reached even if there is no timer pending. + */ + if (!base->timers_pending) + base->next_expiry = basej + NEXT_TIMER_MAX_DELTA; + + return base->next_expiry; +} + static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, bool *idle) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); - unsigned long nextevt = basej + NEXT_TIMER_MAX_DELTA; u64 expires = KTIME_MAX; + unsigned long nextevt; /* * Pretend that there is no timer pending if the cpu is offline. @@ -2014,24 +2031,13 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, } raw_spin_lock(&base->lock); - if (base->next_expiry_recalc) - next_expiry_recalc(base); + nextevt = next_timer_interrupt(base, basej); if (base->timers_pending) { - nextevt = base->next_expiry; - /* If we missed a tick already, force 0 delta */ if (time_before(nextevt, basej)) nextevt = basej; expires = basem + (u64)(nextevt - basej) * TICK_NSEC; - } else { - /* - * Move next_expiry for the empty base into the future to - * prevent a unnecessary raise of the timer softirq when the - * next_expiry value will be reached even if there is no timer - * pending. - */ - base->next_expiry = nextevt; } /* -- cgit v1.2.3 From 83a665dc99a7b0721fa1e02fb60d2a1789ccd371 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:38 +0100 Subject: timers: Keep the pinned timers separate from the others Separate the storage space for pinned timers. Deferrable timers (doesn't matter if pinned or non pinned) are still enqueued into their own base. This is preparatory work for changing the NOHZ timer placement from a push at enqueue time to a pull at expiry time model. Originally-by: Richard Cochran (linutronix GmbH) Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-11-anna-maria@linutronix.de --- kernel/time/timer.c | 85 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 29 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 748f4b72fb6d..aabd13675723 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -187,12 +187,18 @@ EXPORT_SYMBOL(jiffies_64); #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH) #ifdef CONFIG_NO_HZ_COMMON -# define NR_BASES 2 -# define BASE_STD 0 -# define BASE_DEF 1 +/* + * If multiple bases need to be locked, use the base ordering for lock + * nesting, i.e. lowest number first. + */ +# define NR_BASES 3 +# define BASE_LOCAL 0 +# define BASE_GLOBAL 1 +# define BASE_DEF 2 #else # define NR_BASES 1 -# define BASE_STD 0 +# define BASE_LOCAL 0 +# define BASE_GLOBAL 0 # define BASE_DEF 0 #endif @@ -944,7 +950,10 @@ static int detach_if_pending(struct timer_list *timer, struct timer_base *base, static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) { - struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); + int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL; + struct timer_base *base; + + base = per_cpu_ptr(&timer_bases[index], cpu); /* * If the timer is deferrable and NO_HZ_COMMON is set then we need @@ -957,7 +966,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) { - struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL; + struct timer_base *base; + + base = this_cpu_ptr(&timer_bases[index]); /* * If the timer is deferrable and NO_HZ_COMMON is set then we need @@ -2006,6 +2018,9 @@ static unsigned long next_timer_interrupt(struct timer_base *base, * Move next_expiry for the empty base into the future to prevent an * unnecessary raise of the timer softirq when the next_expiry value * will be reached even if there is no timer pending. + * + * This update is also required to make timer_base::next_expiry values + * easy comparable to find out which base holds the first pending timer. */ if (!base->timers_pending) base->next_expiry = basej + NEXT_TIMER_MAX_DELTA; @@ -2016,9 +2031,10 @@ static unsigned long next_timer_interrupt(struct timer_base *base, static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, bool *idle) { - struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + unsigned long nextevt, nextevt_local, nextevt_global; + struct timer_base *base_local, *base_global; u64 expires = KTIME_MAX; - unsigned long nextevt; + bool local_first; /* * Pretend that there is no timer pending if the cpu is offline. @@ -2030,10 +2046,20 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, return expires; } - raw_spin_lock(&base->lock); - nextevt = next_timer_interrupt(base, basej); + base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]); + base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]); - if (base->timers_pending) { + raw_spin_lock(&base_local->lock); + raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); + + nextevt_local = next_timer_interrupt(base_local, basej); + nextevt_global = next_timer_interrupt(base_global, basej); + + local_first = time_before_eq(nextevt_local, nextevt_global); + + nextevt = local_first ? nextevt_local : nextevt_global; + + if (base_local->timers_pending || base_global->timers_pending) { /* If we missed a tick already, force 0 delta */ if (time_before(nextevt, basej)) nextevt = basej; @@ -2044,31 +2070,31 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, * We have a fresh next event. Check whether we can forward the * base. */ - __forward_timer_base(base, basej); + __forward_timer_base(base_local, basej); + __forward_timer_base(base_global, basej); /* * Set base->is_idle only when caller is timer_base_try_to_set_idle() */ if (idle) { /* - * Base is idle if the next event is more than a tick away. + * Bases are idle if the next event is more than a tick away. * * If the base is marked idle then any timer add operation must * forward the base clk itself to keep granularity small. This - * idle logic is only maintained for the BASE_STD base, - * deferrable timers may still see large granularity skew (by - * design). + * idle logic is only maintained for the BASE_LOCAL and + * BASE_GLOBAL base, deferrable timers may still see large + * granularity skew (by design). */ - if (!base->is_idle) { - if (time_after(nextevt, basej + 1)) { - base->is_idle = true; - trace_timer_base_idle(true, base->cpu); - } + if (!base_local->is_idle && time_after(nextevt, basej + 1)) { + base_local->is_idle = base_global->is_idle = true; + trace_timer_base_idle(true, base_local->cpu); } - *idle = base->is_idle; + *idle = base_local->is_idle; } - raw_spin_unlock(&base->lock); + raw_spin_unlock(&base_global->lock); + raw_spin_unlock(&base_local->lock); return cmp_next_hrtimer_event(basem, expires); } @@ -2112,15 +2138,14 @@ u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle) */ void timer_clear_idle(void) { - struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); - /* * We do this unlocked. The worst outcome is a remote enqueue sending * a pointless IPI, but taking the lock would just make the window for * sending the IPI a few instructions smaller for the cost of taking * the lock in the exit from idle path. */ - base->is_idle = false; + __this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false); + __this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false); trace_timer_base_idle(false, smp_processor_id()); } #endif @@ -2171,11 +2196,13 @@ static inline void __run_timers(struct timer_base *base) */ static __latent_entropy void run_timer_softirq(struct softirq_action *h) { - struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]); __run_timers(base); - if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) + if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) { + __run_timers(this_cpu_ptr(&timer_bases[BASE_GLOBAL])); __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); + } } /* @@ -2183,7 +2210,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) */ static void run_local_timers(void) { - struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]); hrtimer_run_queues(); -- cgit v1.2.3 From 21927fc89e5feebc1f7cbf50bb58b81d776a62b4 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:39 +0100 Subject: timers: Retrieve next expiry of pinned/non-pinned timers separately For the conversion of the NOHZ timer placement to a pull at expiry time model it's required to have separate expiry times for the pinned and the non-pinned (movable) timers. Therefore struct timer_events is introduced. No functional change Originally-by: Richard Cochran (linutronix GmbH) Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-12-anna-maria@linutronix.de --- kernel/time/timer.c | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index aabd13675723..38becd2facee 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -266,6 +266,11 @@ struct timer_base { static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); +struct timer_events { + u64 local; + u64 global; +}; + #ifdef CONFIG_NO_HZ_COMMON static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); @@ -2031,10 +2036,11 @@ static unsigned long next_timer_interrupt(struct timer_base *base, static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, bool *idle) { + struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX }; unsigned long nextevt, nextevt_local, nextevt_global; struct timer_base *base_local, *base_global; - u64 expires = KTIME_MAX; bool local_first; + u64 expires; /* * Pretend that there is no timer pending if the cpu is offline. @@ -2043,7 +2049,7 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, if (cpu_is_offline(smp_processor_id())) { if (idle) *idle = true; - return expires; + return tevt.local; } base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]); @@ -2059,13 +2065,32 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, nextevt = local_first ? nextevt_local : nextevt_global; - if (base_local->timers_pending || base_global->timers_pending) { + /* + * If the @nextevt is at max. one tick away, use @nextevt and store + * it in the local expiry value. The next global event is irrelevant in + * this case and can be left as KTIME_MAX. + */ + if (time_before_eq(nextevt, basej + 1)) { /* If we missed a tick already, force 0 delta */ if (time_before(nextevt, basej)) nextevt = basej; - expires = basem + (u64)(nextevt - basej) * TICK_NSEC; + tevt.local = basem + (u64)(nextevt - basej) * TICK_NSEC; + goto forward; } + /* + * Update tevt.* values: + * + * If the local queue expires first, then the global event can be + * ignored. If the global queue is empty, nothing to do either. + */ + if (!local_first && base_global->timers_pending) + tevt.global = basem + (u64)(nextevt_global - basej) * TICK_NSEC; + + if (base_local->timers_pending) + tevt.local = basem + (u64)(nextevt_local - basej) * TICK_NSEC; + +forward: /* * We have a fresh next event. Check whether we can forward the * base. @@ -2096,6 +2121,8 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, raw_spin_unlock(&base_global->lock); raw_spin_unlock(&base_local->lock); + expires = min_t(u64, tevt.local, tevt.global); + return cmp_next_hrtimer_event(basem, expires); } -- cgit v1.2.3 From 70b4cf84f3acd9e72c9ea9064d82577b6f29a60b Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:40 +0100 Subject: timers: Split out "get next timer interrupt" functionality The functionality for getting the next timer interrupt in get_next_timer_interrupt() is split into a separate function fetch_next_timer_interrupt() to be usable by other call sites. This is preparatory work for the conversion of the NOHZ timer placement to a pull at expiry time model. No functional change. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-13-anna-maria@linutronix.de --- kernel/time/timer.c | 64 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 38becd2facee..b10e97c995a7 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2033,30 +2033,13 @@ static unsigned long next_timer_interrupt(struct timer_base *base, return base->next_expiry; } -static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, - bool *idle) +static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem, + struct timer_base *base_local, + struct timer_base *base_global, + struct timer_events *tevt) { - struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX }; unsigned long nextevt, nextevt_local, nextevt_global; - struct timer_base *base_local, *base_global; bool local_first; - u64 expires; - - /* - * Pretend that there is no timer pending if the cpu is offline. - * Possible pending timers will be migrated later to an active cpu. - */ - if (cpu_is_offline(smp_processor_id())) { - if (idle) - *idle = true; - return tevt.local; - } - - base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]); - base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]); - - raw_spin_lock(&base_local->lock); - raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); nextevt_local = next_timer_interrupt(base_local, basej); nextevt_global = next_timer_interrupt(base_global, basej); @@ -2074,8 +2057,8 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, /* If we missed a tick already, force 0 delta */ if (time_before(nextevt, basej)) nextevt = basej; - tevt.local = basem + (u64)(nextevt - basej) * TICK_NSEC; - goto forward; + tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC; + return nextevt; } /* @@ -2085,12 +2068,41 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, * ignored. If the global queue is empty, nothing to do either. */ if (!local_first && base_global->timers_pending) - tevt.global = basem + (u64)(nextevt_global - basej) * TICK_NSEC; + tevt->global = basem + (u64)(nextevt_global - basej) * TICK_NSEC; if (base_local->timers_pending) - tevt.local = basem + (u64)(nextevt_local - basej) * TICK_NSEC; + tevt->local = basem + (u64)(nextevt_local - basej) * TICK_NSEC; + + return nextevt; +} + +static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, + bool *idle) +{ + struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX }; + struct timer_base *base_local, *base_global; + unsigned long nextevt; + u64 expires; + + /* + * Pretend that there is no timer pending if the cpu is offline. + * Possible pending timers will be migrated later to an active cpu. + */ + if (cpu_is_offline(smp_processor_id())) { + if (idle) + *idle = true; + return tevt.local; + } + + base_local = this_cpu_ptr(&timer_bases[BASE_LOCAL]); + base_global = this_cpu_ptr(&timer_bases[BASE_GLOBAL]); + + raw_spin_lock(&base_local->lock); + raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); + + nextevt = fetch_next_timer_interrupt(basej, basem, base_local, + base_global, &tevt); -forward: /* * We have a fresh next event. Check whether we can forward the * base. -- cgit v1.2.3 From f73d9257ff3c2f415e8c342a91b7f5acfc3ce512 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:41 +0100 Subject: timers: Add get next timer interrupt functionality for remote CPUs To prepare for the conversion of the NOHZ timer placement to a pull at expiry time model it's required to have functionality available getting the next timer interrupt on a remote CPU. Locking of the timer bases and getting the information for the next timer interrupt functionality is split into separate functions. This is required to be compliant with lock ordering when the new model is in place. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-14-anna-maria@linutronix.de --- kernel/time/tick-internal.h | 10 +++++ kernel/time/timer.c | 95 ++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 100 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 47df30b871e4..8b0c28edbd09 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -8,6 +8,11 @@ #include "timekeeping.h" #include "tick-sched.h" +struct timer_events { + u64 local; + u64 global; +}; + #ifdef CONFIG_GENERIC_CLOCKEVENTS # define TICK_DO_TIMER_NONE -1 @@ -154,6 +159,11 @@ extern unsigned long tick_nohz_active; extern void timers_update_nohz(void); # ifdef CONFIG_SMP extern struct static_key_false timers_migration_enabled; +extern void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem, + struct timer_events *tevt, + unsigned int cpu); +extern void timer_lock_remote_bases(unsigned int cpu); +extern void timer_unlock_remote_bases(unsigned int cpu); # endif #else /* CONFIG_NO_HZ_COMMON */ static inline void timers_update_nohz(void) { } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index b10e97c995a7..bc074c04784f 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -266,11 +266,6 @@ struct timer_base { static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]); -struct timer_events { - u64 local; - u64 global; -}; - #ifdef CONFIG_NO_HZ_COMMON static DEFINE_STATIC_KEY_FALSE(timers_nohz_active); @@ -2058,6 +2053,21 @@ static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem, if (time_before(nextevt, basej)) nextevt = basej; tevt->local = basem + (u64)(nextevt - basej) * TICK_NSEC; + + /* + * This is required for the remote check only but it doesn't + * hurt, when it is done for both call sites: + * + * * The remote callers will only take care of the global timers + * as local timers will be handled by CPU itself. When not + * updating tevt->global with the already missed first global + * timer, it is possible that it will be missed completely. + * + * * The local callers will ignore the tevt->global anyway, when + * nextevt is max. one tick away. + */ + if (!local_first) + tevt->global = tevt->local; return nextevt; } @@ -2076,6 +2086,81 @@ static unsigned long fetch_next_timer_interrupt(unsigned long basej, u64 basem, return nextevt; } +# ifdef CONFIG_SMP +/** + * fetch_next_timer_interrupt_remote() - Store next timers into @tevt + * @basej: base time jiffies + * @basem: base time clock monotonic + * @tevt: Pointer to the storage for the expiry values + * @cpu: Remote CPU + * + * Stores the next pending local and global timer expiry values in the + * struct pointed to by @tevt. If a queue is empty the corresponding + * field is set to KTIME_MAX. If local event expires before global + * event, global event is set to KTIME_MAX as well. + * + * Caller needs to make sure timer base locks are held (use + * timer_lock_remote_bases() for this purpose). + */ +void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem, + struct timer_events *tevt, + unsigned int cpu) +{ + struct timer_base *base_local, *base_global; + + /* Preset local / global events */ + tevt->local = tevt->global = KTIME_MAX; + + base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu); + base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); + + lockdep_assert_held(&base_local->lock); + lockdep_assert_held(&base_global->lock); + + fetch_next_timer_interrupt(basej, basem, base_local, base_global, tevt); +} + +/** + * timer_unlock_remote_bases - unlock timer bases of cpu + * @cpu: Remote CPU + * + * Unlocks the remote timer bases. + */ +void timer_unlock_remote_bases(unsigned int cpu) + __releases(timer_bases[BASE_LOCAL]->lock) + __releases(timer_bases[BASE_GLOBAL]->lock) +{ + struct timer_base *base_local, *base_global; + + base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu); + base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); + + raw_spin_unlock(&base_global->lock); + raw_spin_unlock(&base_local->lock); +} + +/** + * timer_lock_remote_bases - lock timer bases of cpu + * @cpu: Remote CPU + * + * Locks the remote timer bases. + */ +void timer_lock_remote_bases(unsigned int cpu) + __acquires(timer_bases[BASE_LOCAL]->lock) + __acquires(timer_bases[BASE_GLOBAL]->lock) +{ + struct timer_base *base_local, *base_global; + + base_local = per_cpu_ptr(&timer_bases[BASE_LOCAL], cpu); + base_global = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); + + lockdep_assert_irqs_disabled(); + + raw_spin_lock(&base_local->lock); + raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); +} +# endif /* CONFIG_SMP */ + static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, bool *idle) { -- cgit v1.2.3 From 90f5df66c86c259d61aff84e16f519fe1424e1ef Mon Sep 17 00:00:00 2001 From: "Richard Cochran (linutronix GmbH)" Date: Wed, 21 Feb 2024 10:05:42 +0100 Subject: timers: Restructure internal locking Move the locking out from __run_timers() to the call sites, so the protected section can be extended at the call site. Preparatory work for changing the NOHZ timer placement to a pull at expiry time model. No functional change. Signed-off-by: Richard Cochran (linutronix GmbH) Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-15-anna-maria@linutronix.de --- kernel/time/timer.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index bc074c04784f..51053af64023 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2283,11 +2283,7 @@ static inline void __run_timers(struct timer_base *base) struct hlist_head heads[LVL_DEPTH]; int levels; - if (time_before(jiffies, base->next_expiry)) - return; - - timer_base_lock_expiry(base); - raw_spin_lock_irq(&base->lock); + lockdep_assert_held(&base->lock); while (time_after_eq(jiffies, base->clk) && time_after_eq(jiffies, base->next_expiry)) { @@ -2311,21 +2307,36 @@ static inline void __run_timers(struct timer_base *base) while (levels--) expire_timers(base, heads + levels); } +} + +static void __run_timer_base(struct timer_base *base) +{ + if (time_before(jiffies, base->next_expiry)) + return; + + timer_base_lock_expiry(base); + raw_spin_lock_irq(&base->lock); + __run_timers(base); raw_spin_unlock_irq(&base->lock); timer_base_unlock_expiry(base); } +static void run_timer_base(int index) +{ + struct timer_base *base = this_cpu_ptr(&timer_bases[index]); + + __run_timer_base(base); +} + /* * This function runs timers and the timer-tq in bottom half context. */ static __latent_entropy void run_timer_softirq(struct softirq_action *h) { - struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_LOCAL]); - - __run_timers(base); + run_timer_base(BASE_LOCAL); if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) { - __run_timers(this_cpu_ptr(&timer_bases[BASE_GLOBAL])); - __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); + run_timer_base(BASE_GLOBAL); + run_timer_base(BASE_DEF); } } -- cgit v1.2.3 From 89f01e10c99a5ca2ff88b545fad763d360cdbfc8 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:43 +0100 Subject: timers: Check if timers base is handled already Due to the conversion of the NOHZ timer placement to a pull at expiry time model, the per CPU timer bases with non pinned timers are no longer handled only by the local CPU. In case a remote CPU already expires the non pinned timers base of the local CPU, nothing more needs to be done by the local CPU. A check at the begin of the expire timers routine is required, because timer base lock is dropped before executing the timer callback function. This is a preparatory work, but has no functional impact right now. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-16-anna-maria@linutronix.de --- kernel/time/timer.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 51053af64023..4420cdf59e8c 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2285,6 +2285,9 @@ static inline void __run_timers(struct timer_base *base) lockdep_assert_held(&base->lock); + if (base->running_timer) + return; + while (time_after_eq(jiffies, base->clk) && time_after_eq(jiffies, base->next_expiry)) { levels = collect_expired_timers(base, heads); -- cgit v1.2.3 From 4c532939aa2e9345ee346bc69d3d12d56d5aa9aa Mon Sep 17 00:00:00 2001 From: "Richard Cochran (linutronix GmbH)" Date: Wed, 21 Feb 2024 10:05:44 +0100 Subject: tick/sched: Split out jiffies update helper function The logic to get the time of the last jiffies update will be needed by the timer pull model as well. Move the code into a global function in anticipation of the new caller. No functional change. Signed-off-by: Richard Cochran (linutronix GmbH) Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-17-anna-maria@linutronix.de --- kernel/time/tick-internal.h | 1 + kernel/time/tick-sched.c | 26 +++++++++++++++++++------- 2 files changed, 20 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 8b0c28edbd09..ccf39befde85 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -157,6 +157,7 @@ static inline void tick_nohz_init(void) { } #ifdef CONFIG_NO_HZ_COMMON extern unsigned long tick_nohz_active; extern void timers_update_nohz(void); +extern u64 get_jiffies_update(unsigned long *basej); # ifdef CONFIG_SMP extern struct static_key_false timers_migration_enabled; extern void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem, diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f6b613380229..417bb7f880ca 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -799,6 +799,24 @@ static inline bool local_timer_softirq_pending(void) return local_softirq_pending() & BIT(TIMER_SOFTIRQ); } +/* + * Read jiffies and the time when jiffies were updated last + */ +u64 get_jiffies_update(unsigned long *basej) +{ + unsigned long basejiff; + unsigned int seq; + u64 basemono; + + do { + seq = read_seqcount_begin(&jiffies_seq); + basemono = last_jiffies_update; + basejiff = jiffies; + } while (read_seqcount_retry(&jiffies_seq, seq)); + *basej = basejiff; + return basemono; +} + /** * tick_nohz_next_event() - return the clock monotonic based next event * @ts: pointer to tick_sched struct @@ -813,14 +831,8 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) { u64 basemono, next_tick, delta, expires; unsigned long basejiff; - unsigned int seq; - /* Read jiffies and the time when jiffies were updated last */ - do { - seq = read_seqcount_begin(&jiffies_seq); - basemono = last_jiffies_update; - basejiff = jiffies; - } while (read_seqcount_retry(&jiffies_seq, seq)); + basemono = get_jiffies_update(&basejiff); ts->last_jiffies = basejiff; ts->timer_expires_base = basemono; -- cgit v1.2.3 From 57e95a5c4117dc6a67dc416d82079c02dab7e983 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:45 +0100 Subject: timers: Introduce function to check timer base is_idle flag To prepare for the conversion of the NOHZ timer placement to a pull at expiry time model it's required to have a function that returns the value of the is_idle flag of the timer base to keep the hierarchy states during online in sync with timer base state. No functional change. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-18-anna-maria@linutronix.de --- kernel/time/tick-internal.h | 1 + kernel/time/timer.c | 10 ++++++++++ 2 files changed, 11 insertions(+) (limited to 'kernel') diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index ccf39befde85..7e3090109e33 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -165,6 +165,7 @@ extern void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem, unsigned int cpu); extern void timer_lock_remote_bases(unsigned int cpu); extern void timer_unlock_remote_bases(unsigned int cpu); +extern bool timer_base_is_idle(void); # endif #else /* CONFIG_NO_HZ_COMMON */ static inline void timers_update_nohz(void) { } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 4420cdf59e8c..e02ac4607985 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2159,6 +2159,16 @@ void timer_lock_remote_bases(unsigned int cpu) raw_spin_lock(&base_local->lock); raw_spin_lock_nested(&base_global->lock, SINGLE_DEPTH_NESTING); } + +/** + * timer_base_is_idle() - Return whether timer base is set idle + * + * Returns value of local timer base is_idle value. + */ +bool timer_base_is_idle(void) +{ + return __this_cpu_read(timer_bases[BASE_LOCAL].is_idle); +} # endif /* CONFIG_SMP */ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, -- cgit v1.2.3 From 7ee988770326fca440472200c3eb58935fe712f6 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Thu, 22 Feb 2024 11:37:10 +0100 Subject: timers: Implement the hierarchical pull model Placing timers at enqueue time on a target CPU based on dubious heuristics does not make any sense: 1) Most timer wheel timers are canceled or rearmed before they expire. 2) The heuristics to predict which CPU will be busy when the timer expires are wrong by definition. So placing the timers at enqueue wastes precious cycles. The proper solution to this problem is to always queue the timers on the local CPU and allow the non pinned timers to be pulled onto a busy CPU at expiry time. Therefore split the timer storage into local pinned and global timers: Local pinned timers are always expired on the CPU on which they have been queued. Global timers can be expired on any CPU. As long as a CPU is busy it expires both local and global timers. When a CPU goes idle it arms for the first expiring local timer. If the first expiring pinned (local) timer is before the first expiring movable timer, then no action is required because the CPU will wake up before the first movable timer expires. If the first expiring movable timer is before the first expiring pinned (local) timer, then this timer is queued into an idle timerqueue and eventually expired by another active CPU. To avoid global locking the timerqueues are implemented as a hierarchy. The lowest level of the hierarchy holds the CPUs. The CPUs are associated to groups of 8, which are separated per node. If more than one CPU group exist, then a second level in the hierarchy collects the groups. Depending on the size of the system more than 2 levels are required. Each group has a "migrator" which checks the timerqueue during the tick for remote expirable timers. If the last CPU in a group goes idle it reports the first expiring event in the group up to the next group(s) in the hierarchy. If the last CPU goes idle it arms its timer for the first system wide expiring timer to ensure that no timer event is missed. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240222103710.32582-1-anna-maria@linutronix.de --- kernel/time/Makefile | 3 + kernel/time/tick-internal.h | 1 + kernel/time/timer.c | 113 ++- kernel/time/timer_migration.c | 1761 +++++++++++++++++++++++++++++++++++++++++ kernel/time/timer_migration.h | 140 ++++ 5 files changed, 2010 insertions(+), 8 deletions(-) create mode 100644 kernel/time/timer_migration.c create mode 100644 kernel/time/timer_migration.h (limited to 'kernel') diff --git a/kernel/time/Makefile b/kernel/time/Makefile index 7e875e63ff3b..4af2a264a160 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile @@ -17,6 +17,9 @@ endif obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o obj-$(CONFIG_LEGACY_TIMER_TICK) += tick-legacy.o +ifeq ($(CONFIG_SMP),y) + obj-$(CONFIG_NO_HZ_COMMON) += timer_migration.o +endif obj-$(CONFIG_HAVE_GENERIC_VDSO) += vsyscall.o obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o obj-$(CONFIG_TEST_UDELAY) += test_udelay.o diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 7e3090109e33..a3243c4ac45f 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -166,6 +166,7 @@ extern void fetch_next_timer_interrupt_remote(unsigned long basej, u64 basem, extern void timer_lock_remote_bases(unsigned int cpu); extern void timer_unlock_remote_bases(unsigned int cpu); extern bool timer_base_is_idle(void); +extern void timer_expire_remote(unsigned int cpu); # endif #else /* CONFIG_NO_HZ_COMMON */ static inline void timers_update_nohz(void) { } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index e02ac4607985..3ed135c8de43 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -53,6 +53,7 @@ #include #include "tick-internal.h" +#include "timer_migration.h" #define CREATE_TRACE_POINTS #include @@ -2169,6 +2170,64 @@ bool timer_base_is_idle(void) { return __this_cpu_read(timer_bases[BASE_LOCAL].is_idle); } + +static void __run_timer_base(struct timer_base *base); + +/** + * timer_expire_remote() - expire global timers of cpu + * @cpu: Remote CPU + * + * Expire timers of global base of remote CPU. + */ +void timer_expire_remote(unsigned int cpu) +{ + struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_GLOBAL], cpu); + + __run_timer_base(base); +} + +static void timer_use_tmigr(unsigned long basej, u64 basem, + unsigned long *nextevt, bool *tick_stop_path, + bool timer_base_idle, struct timer_events *tevt) +{ + u64 next_tmigr; + + if (timer_base_idle) + next_tmigr = tmigr_cpu_new_timer(tevt->global); + else if (tick_stop_path) + next_tmigr = tmigr_cpu_deactivate(tevt->global); + else + next_tmigr = tmigr_quick_check(tevt->global); + + /* + * If the CPU is the last going idle in timer migration hierarchy, make + * sure the CPU will wake up in time to handle remote timers. + * next_tmigr == KTIME_MAX if other CPUs are still active. + */ + if (next_tmigr < tevt->local) { + u64 tmp; + + /* If we missed a tick already, force 0 delta */ + if (next_tmigr < basem) + next_tmigr = basem; + + tmp = div_u64(next_tmigr - basem, TICK_NSEC); + + *nextevt = basej + (unsigned long)tmp; + tevt->local = next_tmigr; + } +} +# else +static void timer_use_tmigr(unsigned long basej, u64 basem, + unsigned long *nextevt, bool *tick_stop_path, + bool timer_base_idle, struct timer_events *tevt) +{ + /* + * Make sure first event is written into tevt->local to not miss a + * timer on !SMP systems. + */ + tevt->local = min_t(u64, tevt->local, tevt->global); +} # endif /* CONFIG_SMP */ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, @@ -2177,7 +2236,7 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, struct timer_events tevt = { .local = KTIME_MAX, .global = KTIME_MAX }; struct timer_base *base_local, *base_global; unsigned long nextevt; - u64 expires; + bool idle_is_possible; /* * Pretend that there is no timer pending if the cpu is offline. @@ -2198,6 +2257,22 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, nextevt = fetch_next_timer_interrupt(basej, basem, base_local, base_global, &tevt); + /* + * If the next event is only one jiffie ahead there is no need to call + * timer migration hierarchy related functions. The value for the next + * global timer in @tevt struct equals then KTIME_MAX. This is also + * true, when the timer base is idle. + * + * The proper timer migration hierarchy function depends on the callsite + * and whether timer base is idle or not. @nextevt will be updated when + * this CPU needs to handle the first timer migration hierarchy + * event. See timer_use_tmigr() for detailed information. + */ + idle_is_possible = time_after(nextevt, basej + 1); + if (idle_is_possible) + timer_use_tmigr(basej, basem, &nextevt, idle, + base_local->is_idle, &tevt); + /* * We have a fresh next event. Check whether we can forward the * base. @@ -2210,7 +2285,10 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, */ if (idle) { /* - * Bases are idle if the next event is more than a tick away. + * Bases are idle if the next event is more than a tick + * away. Caution: @nextevt could have changed by enqueueing a + * global timer into timer migration hierarchy. Therefore a new + * check is required here. * * If the base is marked idle then any timer add operation must * forward the base clk itself to keep granularity small. This @@ -2223,14 +2301,23 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, trace_timer_base_idle(true, base_local->cpu); } *idle = base_local->is_idle; + + /* + * When timer base is not set idle, undo the effect of + * tmigr_cpu_deactivate() to prevent inconsitent states - active + * timer base but inactive timer migration hierarchy. + * + * When timer base was already marked idle, nothing will be + * changed here. + */ + if (!base_local->is_idle && idle_is_possible) + tmigr_cpu_activate(); } raw_spin_unlock(&base_global->lock); raw_spin_unlock(&base_local->lock); - expires = min_t(u64, tevt.local, tevt.global); - - return cmp_next_hrtimer_event(basem, expires); + return cmp_next_hrtimer_event(basem, tevt.local); } /** @@ -2238,8 +2325,11 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, * @basej: base time jiffies * @basem: base time clock monotonic * - * Returns the tick aligned clock monotonic time of the next pending - * timer or KTIME_MAX if no timer is pending. + * Returns the tick aligned clock monotonic time of the next pending timer or + * KTIME_MAX if no timer is pending. If timer of global base was queued into + * timer migration hierarchy, first global timer is not taken into account. If + * it was the last CPU of timer migration hierarchy going idle, first global + * event is taken into account. */ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) { @@ -2281,6 +2371,9 @@ void timer_clear_idle(void) __this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false); __this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false); trace_timer_base_idle(false, smp_processor_id()); + + /* Activate without holding the timer_base->lock */ + tmigr_cpu_activate(); } #endif @@ -2350,6 +2443,9 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) { run_timer_base(BASE_GLOBAL); run_timer_base(BASE_DEF); + + if (is_timers_nohz_active()) + tmigr_handle_remote(); } } @@ -2364,7 +2460,8 @@ static void run_local_timers(void) for (int i = 0; i < NR_BASES; i++, base++) { /* Raise the softirq only if required. */ - if (time_after_eq(jiffies, base->next_expiry)) { + if (time_after_eq(jiffies, base->next_expiry) || + (i == BASE_DEF && tmigr_requires_handle_remote())) { raise_softirq(TIMER_SOFTIRQ); return; } diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c new file mode 100644 index 000000000000..23cb6ea3d44e --- /dev/null +++ b/kernel/time/timer_migration.c @@ -0,0 +1,1761 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Infrastructure for migratable timers + * + * Copyright(C) 2022 linutronix GmbH + */ +#include +#include +#include +#include +#include +#include + +#include "timer_migration.h" +#include "tick-internal.h" + +/* + * The timer migration mechanism is built on a hierarchy of groups. The + * lowest level group contains CPUs, the next level groups of CPU groups + * and so forth. The CPU groups are kept per node so for the normal case + * lock contention won't happen across nodes. Depending on the number of + * CPUs per node even the next level might be kept as groups of CPU groups + * per node and only the levels above cross the node topology. + * + * Example topology for a two node system with 24 CPUs each. + * + * LVL 2 [GRP2:0] + * GRP1:0 = GRP1:M + * + * LVL 1 [GRP1:0] [GRP1:1] + * GRP0:0 - GRP0:2 GRP0:3 - GRP0:5 + * + * LVL 0 [GRP0:0] [GRP0:1] [GRP0:2] [GRP0:3] [GRP0:4] [GRP0:5] + * CPUS 0-7 8-15 16-23 24-31 32-39 40-47 + * + * The groups hold a timer queue of events sorted by expiry time. These + * queues are updated when CPUs go in idle. When they come out of idle + * ignore flag of events is set. + * + * Each group has a designated migrator CPU/group as long as a CPU/group is + * active in the group. This designated role is necessary to avoid that all + * active CPUs in a group try to migrate expired timers from other CPUs, + * which would result in massive lock bouncing. + * + * When a CPU is awake, it checks in it's own timer tick the group + * hierarchy up to the point where it is assigned the migrator role or if + * no CPU is active, it also checks the groups where no migrator is set + * (TMIGR_NONE). + * + * If it finds expired timers in one of the group queues it pulls them over + * from the idle CPU and runs the timer function. After that it updates the + * group and the parent groups if required. + * + * CPUs which go idle arm their CPU local timer hardware for the next local + * (pinned) timer event. If the next migratable timer expires after the + * next local timer or the CPU has no migratable timer pending then the + * CPU does not queue an event in the LVL0 group. If the next migratable + * timer expires before the next local timer then the CPU queues that timer + * in the LVL0 group. In both cases the CPU marks itself idle in the LVL0 + * group. + * + * When CPU comes out of idle and when a group has at least a single active + * child, the ignore flag of the tmigr_event is set. This indicates, that + * the event is ignored even if it is still enqueued in the parent groups + * timer queue. It will be removed when touching the timer queue the next + * time. This spares locking in active path as the lock protects (after + * setup) only event information. For more information about locking, + * please read the section "Locking rules". + * + * If the CPU is the migrator of the group then it delegates that role to + * the next active CPU in the group or sets migrator to TMIGR_NONE when + * there is no active CPU in the group. This delegation needs to be + * propagated up the hierarchy so hand over from other leaves can happen at + * all hierarchy levels w/o doing a search. + * + * When the last CPU in the system goes idle, then it drops all migrator + * duties up to the top level of the hierarchy (LVL2 in the example). It + * then has to make sure, that it arms it's own local hardware timer for + * the earliest event in the system. + * + * + * Lifetime rules: + * --------------- + * + * The groups are built up at init time or when CPUs come online. They are + * not destroyed when a group becomes empty due to offlining. The group + * just won't participate in the hierarchy management anymore. Destroying + * groups would result in interesting race conditions which would just make + * the whole mechanism slow and complex. + * + * + * Locking rules: + * -------------- + * + * For setting up new groups and handling events it's required to lock both + * child and parent group. The lock ordering is always bottom up. This also + * includes the per CPU locks in struct tmigr_cpu. For updating the migrator and + * active CPU/group information atomic_try_cmpxchg() is used instead and only + * the per CPU tmigr_cpu->lock is held. + * + * During the setup of groups tmigr_level_list is required. It is protected by + * @tmigr_mutex. + * + * When @timer_base->lock as well as tmigr related locks are required, the lock + * ordering is: first @timer_base->lock, afterwards tmigr related locks. + * + * + * Protection of the tmigr group state information: + * ------------------------------------------------ + * + * The state information with the list of active children and migrator needs to + * be protected by a sequence counter. It prevents a race when updates in child + * groups are propagated in changed order. The state update is performed + * lockless and group wise. The following scenario describes what happens + * without updating the sequence counter: + * + * Therefore, let's take three groups and four CPUs (CPU2 and CPU3 as well + * as GRP0:1 will not change during the scenario): + * + * LVL 1 [GRP1:0] + * migrator = GRP0:1 + * active = GRP0:0, GRP0:1 + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = CPU0 migrator = CPU2 + * active = CPU0 active = CPU2 + * / \ / \ + * CPUs 0 1 2 3 + * active idle active idle + * + * + * 1. CPU0 goes idle. As the update is performed group wise, in the first step + * only GRP0:0 is updated. The update of GRP1:0 is pending as CPU0 has to + * walk the hierarchy. + * + * LVL 1 [GRP1:0] + * migrator = GRP0:1 + * active = GRP0:0, GRP0:1 + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * --> migrator = TMIGR_NONE migrator = CPU2 + * --> active = active = CPU2 + * / \ / \ + * CPUs 0 1 2 3 + * --> idle idle active idle + * + * 2. While CPU0 goes idle and continues to update the state, CPU1 comes out of + * idle. CPU1 updates GRP0:0. The update for GRP1:0 is pending as CPU1 also + * has to walk the hierarchy. Both CPUs (CPU0 and CPU1) now walk the + * hierarchy to perform the needed update from their point of view. The + * currently visible state looks the following: + * + * LVL 1 [GRP1:0] + * migrator = GRP0:1 + * active = GRP0:0, GRP0:1 + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * --> migrator = CPU1 migrator = CPU2 + * --> active = CPU1 active = CPU2 + * / \ / \ + * CPUs 0 1 2 3 + * idle --> active active idle + * + * 3. Here is the race condition: CPU1 managed to propagate its changes (from + * step 2) through the hierarchy to GRP1:0 before CPU0 (step 1) did. The + * active members of GRP1:0 remain unchanged after the update since it is + * still valid from CPU1 current point of view: + * + * LVL 1 [GRP1:0] + * --> migrator = GRP0:1 + * --> active = GRP0:0, GRP0:1 + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = CPU1 migrator = CPU2 + * active = CPU1 active = CPU2 + * / \ / \ + * CPUs 0 1 2 3 + * idle active active idle + * + * 4. Now CPU0 finally propagates its changes (from step 1) to GRP1:0. + * + * LVL 1 [GRP1:0] + * --> migrator = GRP0:1 + * --> active = GRP0:1 + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = CPU1 migrator = CPU2 + * active = CPU1 active = CPU2 + * / \ / \ + * CPUs 0 1 2 3 + * idle active active idle + * + * + * The race of CPU0 vs. CPU1 led to an inconsistent state in GRP1:0. CPU1 is + * active and is correctly listed as active in GRP0:0. However GRP1:0 does not + * have GRP0:0 listed as active, which is wrong. The sequence counter has been + * added to avoid inconsistent states during updates. The state is updated + * atomically only if all members, including the sequence counter, match the + * expected value (compare-and-exchange). + * + * Looking back at the previous example with the addition of the sequence + * counter: The update as performed by CPU0 in step 4 will fail. CPU1 changed + * the sequence number during the update in step 3 so the expected old value (as + * seen by CPU0 before starting the walk) does not match. + * + * Prevent race between new event and last CPU going inactive + * ---------------------------------------------------------- + * + * When the last CPU is going idle and there is a concurrent update of a new + * first global timer of an idle CPU, the group and child states have to be read + * while holding the lock in tmigr_update_events(). The following scenario shows + * what happens, when this is not done. + * + * 1. Only CPU2 is active: + * + * LVL 1 [GRP1:0] + * migrator = GRP0:1 + * active = GRP0:1 + * next_expiry = KTIME_MAX + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = TMIGR_NONE migrator = CPU2 + * active = active = CPU2 + * next_expiry = KTIME_MAX next_expiry = KTIME_MAX + * / \ / \ + * CPUs 0 1 2 3 + * idle idle active idle + * + * 2. Now CPU 2 goes idle (and has no global timer, that has to be handled) and + * propagates that to GRP0:1: + * + * LVL 1 [GRP1:0] + * migrator = GRP0:1 + * active = GRP0:1 + * next_expiry = KTIME_MAX + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = TMIGR_NONE --> migrator = TMIGR_NONE + * active = --> active = + * next_expiry = KTIME_MAX next_expiry = KTIME_MAX + * / \ / \ + * CPUs 0 1 2 3 + * idle idle --> idle idle + * + * 3. Now the idle state is propagated up to GRP1:0. As this is now the last + * child going idle in top level group, the expiry of the next group event + * has to be handed back to make sure no event is lost. As there is no event + * enqueued, KTIME_MAX is handed back to CPU2. + * + * LVL 1 [GRP1:0] + * --> migrator = TMIGR_NONE + * --> active = + * next_expiry = KTIME_MAX + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = TMIGR_NONE migrator = TMIGR_NONE + * active = active = + * next_expiry = KTIME_MAX next_expiry = KTIME_MAX + * / \ / \ + * CPUs 0 1 2 3 + * idle idle --> idle idle + * + * 4. CPU 0 has a new timer queued from idle and it expires at TIMER0. CPU0 + * propagates that to GRP0:0: + * + * LVL 1 [GRP1:0] + * migrator = TMIGR_NONE + * active = + * next_expiry = KTIME_MAX + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = TMIGR_NONE migrator = TMIGR_NONE + * active = active = + * --> next_expiry = TIMER0 next_expiry = KTIME_MAX + * / \ / \ + * CPUs 0 1 2 3 + * idle idle idle idle + * + * 5. GRP0:0 is not active, so the new timer has to be propagated to + * GRP1:0. Therefore the GRP1:0 state has to be read. When the stalled value + * (from step 2) is read, the timer is enqueued into GRP1:0, but nothing is + * handed back to CPU0, as it seems that there is still an active child in + * top level group. + * + * LVL 1 [GRP1:0] + * migrator = TMIGR_NONE + * active = + * --> next_expiry = TIMER0 + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = TMIGR_NONE migrator = TMIGR_NONE + * active = active = + * next_expiry = TIMER0 next_expiry = KTIME_MAX + * / \ / \ + * CPUs 0 1 2 3 + * idle idle idle idle + * + * This is prevented by reading the state when holding the lock (when a new + * timer has to be propagated from idle path):: + * + * CPU2 (tmigr_inactive_up()) CPU0 (tmigr_new_timer_up()) + * -------------------------- --------------------------- + * // step 3: + * cmpxchg(&GRP1:0->state); + * tmigr_update_events() { + * spin_lock(&GRP1:0->lock); + * // ... update events ... + * // hand back first expiry when GRP1:0 is idle + * spin_unlock(&GRP1:0->lock); + * // ^^^ release state modification + * } + * tmigr_update_events() { + * spin_lock(&GRP1:0->lock) + * // ^^^ acquire state modification + * group_state = atomic_read(&GRP1:0->state) + * // .... update events ... + * // hand back first expiry when GRP1:0 is idle + * spin_unlock(&GRP1:0->lock) <3> + * // ^^^ makes state visible for other + * // callers of tmigr_new_timer_up() + * } + * + * When CPU0 grabs the lock directly after cmpxchg, the first timer is reported + * back to CPU0 and also later on to CPU2. So no timer is missed. A concurrent + * update of the group state from active path is no problem, as the upcoming CPU + * will take care of the group events. + * + * Required event and timerqueue update after a remote expiry: + * ----------------------------------------------------------- + * + * After expiring timers of a remote CPU, a walk through the hierarchy and + * update of events and timerqueues is required. It is obviously needed if there + * is a 'new' global timer but also if there is no new global timer but the + * remote CPU is still idle. + * + * 1. CPU0 and CPU1 are idle and have both a global timer expiring at the same + * time. So both have an event enqueued in the timerqueue of GRP0:0. CPU3 is + * also idle and has no global timer pending. CPU2 is the only active CPU and + * thus also the migrator: + * + * LVL 1 [GRP1:0] + * migrator = GRP0:1 + * active = GRP0:1 + * --> timerqueue = evt-GRP0:0 + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = TMIGR_NONE migrator = CPU2 + * active = active = CPU2 + * groupevt.ignore = false groupevt.ignore = true + * groupevt.cpu = CPU0 groupevt.cpu = + * timerqueue = evt-CPU0, timerqueue = + * evt-CPU1 + * / \ / \ + * CPUs 0 1 2 3 + * idle idle active idle + * + * 2. CPU2 starts to expire remote timers. It starts with LVL0 group + * GRP0:1. There is no event queued in the timerqueue, so CPU2 continues with + * the parent of GRP0:1: GRP1:0. In GRP1:0 it dequeues the first event. It + * looks at tmigr_event::cpu struct member and expires the pending timer(s) + * of CPU0. + * + * LVL 1 [GRP1:0] + * migrator = GRP0:1 + * active = GRP0:1 + * --> timerqueue = + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = TMIGR_NONE migrator = CPU2 + * active = active = CPU2 + * groupevt.ignore = false groupevt.ignore = true + * --> groupevt.cpu = CPU0 groupevt.cpu = + * timerqueue = evt-CPU0, timerqueue = + * evt-CPU1 + * / \ / \ + * CPUs 0 1 2 3 + * idle idle active idle + * + * 3. Some work has to be done after expiring the timers of CPU0. If we stop + * here, then CPU1's pending global timer(s) will not expire in time and the + * timerqueue of GRP0:0 has still an event for CPU0 enqueued which has just + * been processed. So it is required to walk the hierarchy from CPU0's point + * of view and update it accordingly. CPU0's event will be removed from the + * timerqueue because it has no pending timer. If CPU0 would have a timer + * pending then it has to expire after CPU1's first timer because all timers + * from this period were just expired. Either way CPU1's event will be first + * in GRP0:0's timerqueue and therefore set in the CPU field of the group + * event which is then enqueued in GRP1:0's timerqueue as GRP0:0 is still not + * active: + * + * LVL 1 [GRP1:0] + * migrator = GRP0:1 + * active = GRP0:1 + * --> timerqueue = evt-GRP0:0 + * / \ + * LVL 0 [GRP0:0] [GRP0:1] + * migrator = TMIGR_NONE migrator = CPU2 + * active = active = CPU2 + * groupevt.ignore = false groupevt.ignore = true + * --> groupevt.cpu = CPU1 groupevt.cpu = + * --> timerqueue = evt-CPU1 timerqueue = + * / \ / \ + * CPUs 0 1 2 3 + * idle idle active idle + * + * Now CPU2 (migrator) will continue step 2 at GRP1:0 and will expire the + * timer(s) of CPU1. + * + * The hierarchy walk in step 3 can be skipped if the migrator notices that a + * CPU of GRP0:0 is active again. The CPU will mark GRP0:0 active and take care + * of the group as migrator and any needed updates within the hierarchy. + */ + +static DEFINE_MUTEX(tmigr_mutex); +static struct list_head *tmigr_level_list __read_mostly; + +static unsigned int tmigr_hierarchy_levels __read_mostly; +static unsigned int tmigr_crossnode_level __read_mostly; + +static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu); + +#define TMIGR_NONE 0xFF +#define BIT_CNT 8 + +static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc) +{ + return !(tmc->tmgroup && tmc->online); +} + +/* + * Returns true, when @childmask corresponds to the group migrator or when the + * group is not active - so no migrator is set. + */ +static bool tmigr_check_migrator(struct tmigr_group *group, u8 childmask) +{ + union tmigr_state s; + + s.state = atomic_read(&group->migr_state); + + if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE)) + return true; + + return false; +} + +static bool tmigr_check_migrator_and_lonely(struct tmigr_group *group, u8 childmask) +{ + bool lonely, migrator = false; + unsigned long active; + union tmigr_state s; + + s.state = atomic_read(&group->migr_state); + + if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE)) + migrator = true; + + active = s.active; + lonely = bitmap_weight(&active, BIT_CNT) <= 1; + + return (migrator && lonely); +} + +static bool tmigr_check_lonely(struct tmigr_group *group) +{ + unsigned long active; + union tmigr_state s; + + s.state = atomic_read(&group->migr_state); + + active = s.active; + + return bitmap_weight(&active, BIT_CNT) <= 1; +} + +typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, void *); + +static void __walk_groups(up_f up, void *data, + struct tmigr_cpu *tmc) +{ + struct tmigr_group *child = NULL, *group = tmc->tmgroup; + + do { + WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels); + + if (up(group, child, data)) + break; + + child = group; + group = group->parent; + } while (group); +} + +static void walk_groups(up_f up, void *data, struct tmigr_cpu *tmc) +{ + lockdep_assert_held(&tmc->lock); + + __walk_groups(up, data, tmc); +} + +/** + * struct tmigr_walk - data required for walking the hierarchy + * @nextexp: Next CPU event expiry information which is handed into + * the timer migration code by the timer code + * (get_next_timer_interrupt()) + * @firstexp: Contains the first event expiry information when last + * active CPU of hierarchy is on the way to idle to make + * sure CPU will be back in time. + * @evt: Pointer to tmigr_event which needs to be queued (of idle + * child group) + * @childmask: childmask of child group + * @remote: Is set, when the new timer path is executed in + * tmigr_handle_remote_cpu() + */ +struct tmigr_walk { + u64 nextexp; + u64 firstexp; + struct tmigr_event *evt; + u8 childmask; + bool remote; +}; + +/** + * struct tmigr_remote_data - data required for remote expiry hierarchy walk + * @basej: timer base in jiffies + * @now: timer base monotonic + * @firstexp: returns expiry of the first timer in the idle timer + * migration hierarchy to make sure the timer is handled in + * time; it is stored in the per CPU tmigr_cpu struct of + * CPU which expires remote timers + * @childmask: childmask of child group + * @check: is set if there is the need to handle remote timers; + * required in tmigr_requires_handle_remote() only + * @tmc_active: this flag indicates, whether the CPU which triggers + * the hierarchy walk is !idle in the timer migration + * hierarchy. When the CPU is idle and the whole hierarchy is + * idle, only the first event of the top level has to be + * considered. + */ +struct tmigr_remote_data { + unsigned long basej; + u64 now; + u64 firstexp; + u8 childmask; + bool check; + bool tmc_active; +}; + +/* + * Returns the next event of the timerqueue @group->events + * + * Removes timers with ignore flag and update next_expiry of the group. Values + * of the group event are updated in tmigr_update_events() only. + */ +static struct tmigr_event *tmigr_next_groupevt(struct tmigr_group *group) +{ + struct timerqueue_node *node = NULL; + struct tmigr_event *evt = NULL; + + lockdep_assert_held(&group->lock); + + WRITE_ONCE(group->next_expiry, KTIME_MAX); + + while ((node = timerqueue_getnext(&group->events))) { + evt = container_of(node, struct tmigr_event, nextevt); + + if (!evt->ignore) { + WRITE_ONCE(group->next_expiry, evt->nextevt.expires); + return evt; + } + + /* + * Remove next timers with ignore flag, because the group lock + * is held anyway + */ + if (!timerqueue_del(&group->events, node)) + break; + } + + return NULL; +} + +/* + * Return the next event (with the expiry equal or before @now) + * + * Event, which is returned, is also removed from the queue. + */ +static struct tmigr_event *tmigr_next_expired_groupevt(struct tmigr_group *group, + u64 now) +{ + struct tmigr_event *evt = tmigr_next_groupevt(group); + + if (!evt || now < evt->nextevt.expires) + return NULL; + + /* + * The event is ready to expire. Remove it and update next group event. + */ + timerqueue_del(&group->events, &evt->nextevt); + tmigr_next_groupevt(group); + + return evt; +} + +static u64 tmigr_next_groupevt_expires(struct tmigr_group *group) +{ + struct tmigr_event *evt; + + evt = tmigr_next_groupevt(group); + + if (!evt) + return KTIME_MAX; + else + return evt->nextevt.expires; +} + +static bool tmigr_active_up(struct tmigr_group *group, + struct tmigr_group *child, + void *ptr) +{ + union tmigr_state curstate, newstate; + struct tmigr_walk *data = ptr; + bool walk_done; + u8 childmask; + + childmask = data->childmask; + /* + * No memory barrier is required here in contrast to + * tmigr_inactive_up(), as the group state change does not depend on the + * child state. + */ + curstate.state = atomic_read(&group->migr_state); + + do { + newstate = curstate; + walk_done = true; + + if (newstate.migrator == TMIGR_NONE) { + newstate.migrator = childmask; + + /* Changes need to be propagated */ + walk_done = false; + } + + newstate.active |= childmask; + newstate.seq++; + + } while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state)); + + if ((walk_done == false) && group->parent) + data->childmask = group->childmask; + + /* + * The group is active (again). The group event might be still queued + * into the parent group's timerqueue but can now be handled by the + * migrator of this group. Therefore the ignore flag for the group event + * is updated to reflect this. + * + * The update of the ignore flag in the active path is done lockless. In + * worst case the migrator of the parent group observes the change too + * late and expires remotely all events belonging to this group. The + * lock is held while updating the ignore flag in idle path. So this + * state change will not be lost. + */ + group->groupevt.ignore = true; + + return walk_done; +} + +static void __tmigr_cpu_activate(struct tmigr_cpu *tmc) +{ + struct tmigr_walk data; + + data.childmask = tmc->childmask; + + tmc->cpuevt.ignore = true; + WRITE_ONCE(tmc->wakeup, KTIME_MAX); + + walk_groups(&tmigr_active_up, &data, tmc); +} + +/** + * tmigr_cpu_activate() - set this CPU active in timer migration hierarchy + * + * Call site timer_clear_idle() is called with interrupts disabled. + */ +void tmigr_cpu_activate(void) +{ + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + + if (tmigr_is_not_available(tmc)) + return; + + if (WARN_ON_ONCE(!tmc->idle)) + return; + + raw_spin_lock(&tmc->lock); + tmc->idle = false; + __tmigr_cpu_activate(tmc); + raw_spin_unlock(&tmc->lock); +} + +/* + * Returns true, if there is nothing to be propagated to the next level + * + * @data->firstexp is set to expiry of first gobal event of the (top level of + * the) hierarchy, but only when hierarchy is completely idle. + * + * The child and group states need to be read under the lock, to prevent a race + * against a concurrent tmigr_inactive_up() run when the last CPU goes idle. See + * also section "Prevent race between new event and last CPU going inactive" in + * the documentation at the top. + * + * This is the only place where the group event expiry value is set. + */ +static +bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child, + struct tmigr_walk *data) +{ + struct tmigr_event *evt, *first_childevt; + union tmigr_state childstate, groupstate; + bool remote = data->remote; + bool walk_done = false; + u64 nextexp; + + if (child) { + raw_spin_lock(&child->lock); + raw_spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING); + + childstate.state = atomic_read(&child->migr_state); + groupstate.state = atomic_read(&group->migr_state); + + if (childstate.active) { + walk_done = true; + goto unlock; + } + + first_childevt = tmigr_next_groupevt(child); + nextexp = child->next_expiry; + evt = &child->groupevt; + + evt->ignore = (nextexp == KTIME_MAX) ? true : false; + } else { + nextexp = data->nextexp; + + first_childevt = evt = data->evt; + + /* + * Walking the hierarchy is required in any case when a + * remote expiry was done before. This ensures to not lose + * already queued events in non active groups (see section + * "Required event and timerqueue update after a remote + * expiry" in the documentation at the top). + * + * The two call sites which are executed without a remote expiry + * before, are not prevented from propagating changes through + * the hierarchy by the return: + * - When entering this path by tmigr_new_timer(), @evt->ignore + * is never set. + * - tmigr_inactive_up() takes care of the propagation by + * itself and ignores the return value. But an immediate + * return is required because nothing has to be done in this + * level as the event could be ignored. + */ + if (evt->ignore && !remote) + return true; + + raw_spin_lock(&group->lock); + + childstate.state = 0; + groupstate.state = atomic_read(&group->migr_state); + } + + /* + * If the child event is already queued in the group, remove it from the + * queue when the expiry time changed only or when it could be ignored. + */ + if (timerqueue_node_queued(&evt->nextevt)) { + if ((evt->nextevt.expires == nextexp) && !evt->ignore) + goto check_toplvl; + + if (!timerqueue_del(&group->events, &evt->nextevt)) + WRITE_ONCE(group->next_expiry, KTIME_MAX); + } + + if (evt->ignore) { + /* + * When the next child event could be ignored (nextexp is + * KTIME_MAX) and there was no remote timer handling before or + * the group is already active, there is no need to walk the + * hierarchy even if there is a parent group. + * + * The other way round: even if the event could be ignored, but + * if a remote timer handling was executed before and the group + * is not active, walking the hierarchy is required to not miss + * an enqueued timer in the non active group. The enqueued timer + * of the group needs to be propagated to a higher level to + * ensure it is handled. + */ + if (!remote || groupstate.active) + walk_done = true; + } else { + evt->nextevt.expires = nextexp; + evt->cpu = first_childevt->cpu; + + if (timerqueue_add(&group->events, &evt->nextevt)) + WRITE_ONCE(group->next_expiry, nextexp); + } + +check_toplvl: + if (!group->parent && (groupstate.migrator == TMIGR_NONE)) { + walk_done = true; + + /* + * Nothing to do when update was done during remote timer + * handling. First timer in top level group which needs to be + * handled when top level group is not active, is calculated + * directly in tmigr_handle_remote_up(). + */ + if (remote) + goto unlock; + + /* + * The top level group is idle and it has to be ensured the + * global timers are handled in time. (This could be optimized + * by keeping track of the last global scheduled event and only + * arming it on the CPU if the new event is earlier. Not sure if + * its worth the complexity.) + */ + data->firstexp = tmigr_next_groupevt_expires(group); + } + +unlock: + raw_spin_unlock(&group->lock); + + if (child) + raw_spin_unlock(&child->lock); + + return walk_done; +} + +static bool tmigr_new_timer_up(struct tmigr_group *group, + struct tmigr_group *child, + void *ptr) +{ + struct tmigr_walk *data = ptr; + + return tmigr_update_events(group, child, data); +} + +/* + * Returns the expiry of the next timer that needs to be handled. KTIME_MAX is + * returned, if an active CPU will handle all the timer migration hierarchy + * timers. + */ +static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp) +{ + struct tmigr_walk data = { .nextexp = nextexp, + .firstexp = KTIME_MAX, + .evt = &tmc->cpuevt }; + + lockdep_assert_held(&tmc->lock); + + if (tmc->remote) + return KTIME_MAX; + + tmc->cpuevt.ignore = false; + data.remote = false; + + walk_groups(&tmigr_new_timer_up, &data, tmc); + + /* If there is a new first global event, make sure it is handled */ + return data.firstexp; +} + +static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now, + unsigned long jif) +{ + struct timer_events tevt; + struct tmigr_walk data; + struct tmigr_cpu *tmc; + + tmc = per_cpu_ptr(&tmigr_cpu, cpu); + + raw_spin_lock_irq(&tmc->lock); + + /* + * If the remote CPU is offline then the timers have been migrated to + * another CPU. + * + * If tmigr_cpu::remote is set, at the moment another CPU already + * expires the timers of the remote CPU. + * + * If tmigr_event::ignore is set, then the CPU returns from idle and + * takes care of its timers. + * + * If the next event expires in the future, then the event has been + * updated and there are no timers to expire right now. The CPU which + * updated the event takes care when hierarchy is completely + * idle. Otherwise the migrator does it as the event is enqueued. + */ + if (!tmc->online || tmc->remote || tmc->cpuevt.ignore || + now < tmc->cpuevt.nextevt.expires) { + raw_spin_unlock_irq(&tmc->lock); + return; + } + + tmc->remote = true; + WRITE_ONCE(tmc->wakeup, KTIME_MAX); + + /* Drop the lock to allow the remote CPU to exit idle */ + raw_spin_unlock_irq(&tmc->lock); + + if (cpu != smp_processor_id()) + timer_expire_remote(cpu); + + /* + * Lock ordering needs to be preserved - timer_base locks before tmigr + * related locks (see section "Locking rules" in the documentation at + * the top). During fetching the next timer interrupt, also tmc->lock + * needs to be held. Otherwise there is a possible race window against + * the CPU itself when it comes out of idle, updates the first timer in + * the hierarchy and goes back to idle. + * + * timer base locks are dropped as fast as possible: After checking + * whether the remote CPU went offline in the meantime and after + * fetching the next remote timer interrupt. Dropping the locks as fast + * as possible keeps the locking region small and prevents holding + * several (unnecessary) locks during walking the hierarchy for updating + * the timerqueue and group events. + */ + local_irq_disable(); + timer_lock_remote_bases(cpu); + raw_spin_lock(&tmc->lock); + + /* + * When the CPU went offline in the meantime, no hierarchy walk has to + * be done for updating the queued events, because the walk was + * already done during marking the CPU offline in the hierarchy. + * + * When the CPU is no longer idle, the CPU takes care of the timers and + * also of the timers in the hierarchy. + * + * (See also section "Required event and timerqueue update after a + * remote expiry" in the documentation at the top) + */ + if (!tmc->online || !tmc->idle) { + timer_unlock_remote_bases(cpu); + goto unlock; + } + + /* next event of CPU */ + fetch_next_timer_interrupt_remote(jif, now, &tevt, cpu); + timer_unlock_remote_bases(cpu); + + data.nextexp = tevt.global; + data.firstexp = KTIME_MAX; + data.evt = &tmc->cpuevt; + data.remote = true; + + /* + * The update is done even when there is no 'new' global timer pending + * on the remote CPU (see section "Required event and timerqueue update + * after a remote expiry" in the documentation at the top) + */ + walk_groups(&tmigr_new_timer_up, &data, tmc); + +unlock: + tmc->remote = false; + raw_spin_unlock_irq(&tmc->lock); +} + +static bool tmigr_handle_remote_up(struct tmigr_group *group, + struct tmigr_group *child, + void *ptr) +{ + struct tmigr_remote_data *data = ptr; + struct tmigr_event *evt; + unsigned long jif; + u8 childmask; + u64 now; + + jif = data->basej; + now = data->now; + + childmask = data->childmask; + +again: + /* + * Handle the group only if @childmask is the migrator or if the + * group has no migrator. Otherwise the group is active and is + * handled by its own migrator. + */ + if (!tmigr_check_migrator(group, childmask)) + return true; + + raw_spin_lock_irq(&group->lock); + + evt = tmigr_next_expired_groupevt(group, now); + + if (evt) { + unsigned int remote_cpu = evt->cpu; + + raw_spin_unlock_irq(&group->lock); + + tmigr_handle_remote_cpu(remote_cpu, now, jif); + + /* check if there is another event, that needs to be handled */ + goto again; + } + + /* + * Update of childmask for the next level and keep track of the expiry + * of the first event that needs to be handled (group->next_expiry was + * updated by tmigr_next_expired_groupevt(), next was set by + * tmigr_handle_remote_cpu()). + */ + data->childmask = group->childmask; + data->firstexp = group->next_expiry; + + raw_spin_unlock_irq(&group->lock); + + return false; +} + +/** + * tmigr_handle_remote() - Handle global timers of remote idle CPUs + * + * Called from the timer soft interrupt with interrupts enabled. + */ +void tmigr_handle_remote(void) +{ + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + struct tmigr_remote_data data; + + if (tmigr_is_not_available(tmc)) + return; + + data.childmask = tmc->childmask; + data.firstexp = KTIME_MAX; + + /* + * NOTE: This is a doubled check because the migrator test will be done + * in tmigr_handle_remote_up() anyway. Keep this check to speed up the + * return when nothing has to be done. + */ + if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) + return; + + data.now = get_jiffies_update(&data.basej); + + /* + * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to + * KTIME_MAX. Even if tmc->lock is not held during the whole remote + * handling, tmc->wakeup is fine to be stale as it is called in + * interrupt context and tick_nohz_next_event() is executed in interrupt + * exit path only after processing the last pending interrupt. + */ + + __walk_groups(&tmigr_handle_remote_up, &data, tmc); + + raw_spin_lock_irq(&tmc->lock); + WRITE_ONCE(tmc->wakeup, data.firstexp); + raw_spin_unlock_irq(&tmc->lock); +} + +static bool tmigr_requires_handle_remote_up(struct tmigr_group *group, + struct tmigr_group *child, + void *ptr) +{ + struct tmigr_remote_data *data = ptr; + u8 childmask; + + childmask = data->childmask; + + /* + * Handle the group only if the child is the migrator or if the group + * has no migrator. Otherwise the group is active and is handled by its + * own migrator. + */ + if (!tmigr_check_migrator(group, childmask)) + return true; + + /* + * When there is a parent group and the CPU which triggered the + * hierarchy walk is not active, proceed the walk to reach the top level + * group before reading the next_expiry value. + */ + if (group->parent && !data->tmc_active) + goto out; + + /* + * The lock is required on 32bit architectures to read the variable + * consistently with a concurrent writer. On 64bit the lock is not + * required because the read operation is not split and so it is always + * consistent. + */ + if (IS_ENABLED(CONFIG_64BIT)) { + data->firstexp = READ_ONCE(group->next_expiry); + if (data->now >= data->firstexp) { + data->check = true; + return true; + } + } else { + raw_spin_lock(&group->lock); + data->firstexp = group->next_expiry; + if (data->now >= group->next_expiry) { + data->check = true; + raw_spin_unlock(&group->lock); + return true; + } + raw_spin_unlock(&group->lock); + } + +out: + /* Update of childmask for the next level */ + data->childmask = group->childmask; + return false; +} + +/** + * tmigr_requires_handle_remote() - Check the need of remote timer handling + * + * Must be called with interrupts disabled. + */ +bool tmigr_requires_handle_remote(void) +{ + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + struct tmigr_remote_data data; + unsigned long jif; + bool ret = false; + + if (tmigr_is_not_available(tmc)) + return ret; + + data.now = get_jiffies_update(&jif); + data.childmask = tmc->childmask; + data.firstexp = KTIME_MAX; + data.tmc_active = !tmc->idle; + data.check = false; + + /* + * If the CPU is active, walk the hierarchy to check whether a remote + * expiry is required. + * + * Check is done lockless as interrupts are disabled and @tmc->idle is + * set only by the local CPU. + */ + if (!tmc->idle) { + __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc); + + return data.check; + } + + /* + * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock + * is required on 32bit architectures to read the variable consistently + * with a concurrent writer. On 64bit the lock is not required because + * the read operation is not split and so it is always consistent. + */ + if (IS_ENABLED(CONFIG_64BIT)) { + if (data.now >= READ_ONCE(tmc->wakeup)) + return true; + } else { + raw_spin_lock(&tmc->lock); + if (data.now >= tmc->wakeup) + ret = true; + raw_spin_unlock(&tmc->lock); + } + + return ret; +} + +/** + * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc) + * @nextexp: Next expiry of global timer (or KTIME_MAX if not) + * + * The CPU is already deactivated in the timer migration + * hierarchy. tick_nohz_get_sleep_length() calls tick_nohz_next_event() + * and thereby the timer idle path is executed once more. @tmc->wakeup + * holds the first timer, when the timer migration hierarchy is + * completely idle. + * + * Returns the first timer that needs to be handled by this CPU or KTIME_MAX if + * nothing needs to be done. + */ +u64 tmigr_cpu_new_timer(u64 nextexp) +{ + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + u64 ret; + + if (tmigr_is_not_available(tmc)) + return nextexp; + + raw_spin_lock(&tmc->lock); + + ret = READ_ONCE(tmc->wakeup); + if (nextexp != KTIME_MAX) { + if (nextexp != tmc->cpuevt.nextevt.expires || + tmc->cpuevt.ignore) { + ret = tmigr_new_timer(tmc, nextexp); + } + } + /* + * Make sure the reevaluation of timers in idle path will not miss an + * event. + */ + WRITE_ONCE(tmc->wakeup, ret); + + raw_spin_unlock(&tmc->lock); + return ret; +} + +static bool tmigr_inactive_up(struct tmigr_group *group, + struct tmigr_group *child, + void *ptr) +{ + union tmigr_state curstate, newstate, childstate; + struct tmigr_walk *data = ptr; + bool walk_done; + u8 childmask; + + childmask = data->childmask; + childstate.state = 0; + + /* + * The memory barrier is paired with the cmpxchg() in tmigr_active_up() + * to make sure the updates of child and group states are ordered. The + * ordering is mandatory, as the group state change depends on the child + * state. + */ + curstate.state = atomic_read_acquire(&group->migr_state); + + for (;;) { + if (child) + childstate.state = atomic_read(&child->migr_state); + + newstate = curstate; + walk_done = true; + + /* Reset active bit when the child is no longer active */ + if (!childstate.active) + newstate.active &= ~childmask; + + if (newstate.migrator == childmask) { + /* + * Find a new migrator for the group, because the child + * group is idle! + */ + if (!childstate.active) { + unsigned long new_migr_bit, active = newstate.active; + + new_migr_bit = find_first_bit(&active, BIT_CNT); + + if (new_migr_bit != BIT_CNT) { + newstate.migrator = BIT(new_migr_bit); + } else { + newstate.migrator = TMIGR_NONE; + + /* Changes need to be propagated */ + walk_done = false; + } + } + } + + newstate.seq++; + + WARN_ON_ONCE((newstate.migrator != TMIGR_NONE) && !(newstate.active)); + + if (atomic_try_cmpxchg(&group->migr_state, &curstate.state, + newstate.state)) + break; + + /* + * The memory barrier is paired with the cmpxchg() in + * tmigr_active_up() to make sure the updates of child and group + * states are ordered. It is required only when the above + * try_cmpxchg() fails. + */ + smp_mb__after_atomic(); + } + + data->remote = false; + + /* Event Handling */ + tmigr_update_events(group, child, data); + + if (group->parent && (walk_done == false)) + data->childmask = group->childmask; + + /* + * data->firstexp was set by tmigr_update_events() and contains the + * expiry of the first global event which needs to be handled. It + * differs from KTIME_MAX if: + * - group is the top level group and + * - group is idle (which means CPU was the last active CPU in the + * hierarchy) and + * - there is a pending event in the hierarchy + */ + WARN_ON_ONCE(data->firstexp != KTIME_MAX && group->parent); + + return walk_done; +} + +static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp) +{ + struct tmigr_walk data = { .nextexp = nextexp, + .firstexp = KTIME_MAX, + .evt = &tmc->cpuevt, + .childmask = tmc->childmask }; + + /* + * If nextexp is KTIME_MAX, the CPU event will be ignored because the + * local timer expires before the global timer, no global timer is set + * or CPU goes offline. + */ + if (nextexp != KTIME_MAX) + tmc->cpuevt.ignore = false; + + walk_groups(&tmigr_inactive_up, &data, tmc); + return data.firstexp; +} + +/** + * tmigr_cpu_deactivate() - Put current CPU into inactive state + * @nextexp: The next global timer expiry of the current CPU + * + * Must be called with interrupts disabled. + * + * Return: the next event expiry of the current CPU or the next event expiry + * from the hierarchy if this CPU is the top level migrator or the hierarchy is + * completely idle. + */ +u64 tmigr_cpu_deactivate(u64 nextexp) +{ + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + u64 ret; + + if (tmigr_is_not_available(tmc)) + return nextexp; + + raw_spin_lock(&tmc->lock); + + ret = __tmigr_cpu_deactivate(tmc, nextexp); + + tmc->idle = true; + + /* + * Make sure the reevaluation of timers in idle path will not miss an + * event. + */ + WRITE_ONCE(tmc->wakeup, ret); + + raw_spin_unlock(&tmc->lock); + return ret; +} + +/** + * tmigr_quick_check() - Quick forecast of next tmigr event when CPU wants to + * go idle + * @nextevt: The next global timer expiry of the current CPU + * + * Return: + * * KTIME_MAX - when it is probable that nothing has to be done (not + * the only one in the level 0 group; and if it is the + * only one in level 0 group, but there are more than a + * single group active on the way to top level) + * * nextevt - when CPU is offline and has to handle timer on his own + * or when on the way to top in every group only a single + * child is active and but @nextevt is before next_expiry + * of top level group + * * next_expiry (top) - value of top level group, when on the way to top in + * every group only a single child is active and @nextevt + * is after this value active child. + */ +u64 tmigr_quick_check(u64 nextevt) +{ + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + struct tmigr_group *group = tmc->tmgroup; + + if (tmigr_is_not_available(tmc)) + return nextevt; + + if (WARN_ON_ONCE(tmc->idle)) + return nextevt; + + if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->childmask)) + return KTIME_MAX; + + do { + if (!tmigr_check_lonely(group)) { + return KTIME_MAX; + } else if (!group->parent) { + u64 first_global = READ_ONCE(group->next_expiry); + + return min_t(u64, nextevt, first_global); + } + group = group->parent; + } while (group); + + return KTIME_MAX; +} + +static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl, + int node) +{ + union tmigr_state s; + + raw_spin_lock_init(&group->lock); + + group->level = lvl; + group->numa_node = lvl < tmigr_crossnode_level ? node : NUMA_NO_NODE; + + group->num_children = 0; + + s.migrator = TMIGR_NONE; + s.active = 0; + s.seq = 0; + atomic_set(&group->migr_state, s.state); + + timerqueue_init_head(&group->events); + timerqueue_init(&group->groupevt.nextevt); + group->groupevt.nextevt.expires = KTIME_MAX; + WRITE_ONCE(group->next_expiry, KTIME_MAX); + group->groupevt.ignore = true; +} + +static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node, + unsigned int lvl) +{ + struct tmigr_group *tmp, *group = NULL; + + lockdep_assert_held(&tmigr_mutex); + + /* Try to attach to an existing group first */ + list_for_each_entry(tmp, &tmigr_level_list[lvl], list) { + /* + * If @lvl is below the cross NUMA node level, check whether + * this group belongs to the same NUMA node. + */ + if (lvl < tmigr_crossnode_level && tmp->numa_node != node) + continue; + + /* Capacity left? */ + if (tmp->num_children >= TMIGR_CHILDREN_PER_GROUP) + continue; + + /* + * TODO: A possible further improvement: Make sure that all CPU + * siblings end up in the same group of the lowest level of the + * hierarchy. Rely on the topology sibling mask would be a + * reasonable solution. + */ + + group = tmp; + break; + } + + if (group) + return group; + + /* Allocate and set up a new group */ + group = kzalloc_node(sizeof(*group), GFP_KERNEL, node); + if (!group) + return ERR_PTR(-ENOMEM); + + tmigr_init_group(group, lvl, node); + + /* Setup successful. Add it to the hierarchy */ + list_add(&group->list, &tmigr_level_list[lvl]); + return group; +} + +static void tmigr_connect_child_parent(struct tmigr_group *child, + struct tmigr_group *parent) +{ + union tmigr_state childstate; + + raw_spin_lock_irq(&child->lock); + raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); + + child->parent = parent; + child->childmask = BIT(parent->num_children++); + + raw_spin_unlock(&parent->lock); + raw_spin_unlock_irq(&child->lock); + + /* + * To prevent inconsistent states, active children need to be active in + * the new parent as well. Inactive children are already marked inactive + * in the parent group: + * + * * When new groups were created by tmigr_setup_groups() starting from + * the lowest level (and not higher then one level below the current + * top level), then they are not active. They will be set active when + * the new online CPU comes active. + * + * * But if a new group above the current top level is required, it is + * mandatory to propagate the active state of the already existing + * child to the new parent. So tmigr_connect_child_parent() is + * executed with the formerly top level group (child) and the newly + * created group (parent). + */ + childstate.state = atomic_read(&child->migr_state); + if (childstate.migrator != TMIGR_NONE) { + struct tmigr_walk data; + + data.childmask = child->childmask; + + /* + * There is only one new level per time. When connecting the + * child and the parent and set the child active when the parent + * is inactive, the parent needs to be the uppermost + * level. Otherwise there went something wrong! + */ + WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent); + } +} + +static int tmigr_setup_groups(unsigned int cpu, unsigned int node) +{ + struct tmigr_group *group, *child, **stack; + int top = 0, err = 0, i = 0; + struct list_head *lvllist; + + stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL); + if (!stack) + return -ENOMEM; + + do { + group = tmigr_get_group(cpu, node, i); + if (IS_ERR(group)) { + err = PTR_ERR(group); + break; + } + + top = i; + stack[i++] = group; + + /* + * When booting only less CPUs of a system than CPUs are + * available, not all calculated hierarchy levels are required. + * + * The loop is aborted as soon as the highest level, which might + * be different from tmigr_hierarchy_levels, contains only a + * single group. + */ + if (group->parent || i == tmigr_hierarchy_levels || + (list_empty(&tmigr_level_list[i]) && + list_is_singular(&tmigr_level_list[i - 1]))) + break; + + } while (i < tmigr_hierarchy_levels); + + do { + group = stack[--i]; + + if (err < 0) { + list_del(&group->list); + kfree(group); + continue; + } + + WARN_ON_ONCE(i != group->level); + + /* + * Update tmc -> group / child -> group connection + */ + if (i == 0) { + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + + raw_spin_lock_irq(&group->lock); + + tmc->tmgroup = group; + tmc->childmask = BIT(group->num_children++); + + raw_spin_unlock_irq(&group->lock); + + /* There are no children that need to be connected */ + continue; + } else { + child = stack[i - 1]; + tmigr_connect_child_parent(child, group); + } + + /* check if uppermost level was newly created */ + if (top != i) + continue; + + WARN_ON_ONCE(top == 0); + + lvllist = &tmigr_level_list[top]; + if (group->num_children == 1 && list_is_singular(lvllist)) { + lvllist = &tmigr_level_list[top - 1]; + list_for_each_entry(child, lvllist, list) { + if (child->parent) + continue; + + tmigr_connect_child_parent(child, group); + } + } + } while (i > 0); + + kfree(stack); + + return err; +} + +static int tmigr_add_cpu(unsigned int cpu) +{ + int node = cpu_to_node(cpu); + int ret; + + mutex_lock(&tmigr_mutex); + ret = tmigr_setup_groups(cpu, node); + mutex_unlock(&tmigr_mutex); + + return ret; +} + +static int tmigr_cpu_online(unsigned int cpu) +{ + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + int ret; + + /* First online attempt? Initialize CPU data */ + if (!tmc->tmgroup) { + raw_spin_lock_init(&tmc->lock); + + ret = tmigr_add_cpu(cpu); + if (ret < 0) + return ret; + + if (tmc->childmask == 0) + return -EINVAL; + + timerqueue_init(&tmc->cpuevt.nextevt); + tmc->cpuevt.nextevt.expires = KTIME_MAX; + tmc->cpuevt.ignore = true; + tmc->cpuevt.cpu = cpu; + + tmc->remote = false; + WRITE_ONCE(tmc->wakeup, KTIME_MAX); + } + raw_spin_lock_irq(&tmc->lock); + tmc->idle = timer_base_is_idle(); + if (!tmc->idle) + __tmigr_cpu_activate(tmc); + tmc->online = true; + raw_spin_unlock_irq(&tmc->lock); + return 0; +} + +/* + * tmigr_trigger_active() - trigger a CPU to become active again + * + * This function is executed on a CPU which is part of cpu_online_mask, when the + * last active CPU in the hierarchy is offlining. With this, it is ensured that + * the other CPU is active and takes over the migrator duty. + */ +static long tmigr_trigger_active(void *unused) +{ + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + + WARN_ON_ONCE(!tmc->online || tmc->idle); + + return 0; +} + +static int tmigr_cpu_offline(unsigned int cpu) +{ + struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); + int migrator; + u64 firstexp; + + raw_spin_lock_irq(&tmc->lock); + tmc->online = false; + WRITE_ONCE(tmc->wakeup, KTIME_MAX); + + /* + * CPU has to handle the local events on his own, when on the way to + * offline; Therefore nextevt value is set to KTIME_MAX + */ + firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX); + raw_spin_unlock_irq(&tmc->lock); + + if (firstexp != KTIME_MAX) { + migrator = cpumask_any_but(cpu_online_mask, cpu); + work_on_cpu(migrator, tmigr_trigger_active, NULL); + } + + return 0; +} + +static int __init tmigr_init(void) +{ + unsigned int cpulvl, nodelvl, cpus_per_node, i; + unsigned int nnodes = num_possible_nodes(); + unsigned int ncpus = num_possible_cpus(); + int ret = -ENOMEM; + + BUILD_BUG_ON_NOT_POWER_OF_2(TMIGR_CHILDREN_PER_GROUP); + + /* Nothing to do if running on UP */ + if (ncpus == 1) + return 0; + + /* + * Calculate the required hierarchy levels. Unfortunately there is no + * reliable information available, unless all possible CPUs have been + * brought up and all NUMA nodes are populated. + * + * Estimate the number of levels with the number of possible nodes and + * the number of possible CPUs. Assume CPUs are spread evenly across + * nodes. We cannot rely on cpumask_of_node() because it only works for + * online CPUs. + */ + cpus_per_node = DIV_ROUND_UP(ncpus, nnodes); + + /* Calc the hierarchy levels required to hold the CPUs of a node */ + cpulvl = DIV_ROUND_UP(order_base_2(cpus_per_node), + ilog2(TMIGR_CHILDREN_PER_GROUP)); + + /* Calculate the extra levels to connect all nodes */ + nodelvl = DIV_ROUND_UP(order_base_2(nnodes), + ilog2(TMIGR_CHILDREN_PER_GROUP)); + + tmigr_hierarchy_levels = cpulvl + nodelvl; + + /* + * If a NUMA node spawns more than one CPU level group then the next + * level(s) of the hierarchy contains groups which handle all CPU groups + * of the same NUMA node. The level above goes across NUMA nodes. Store + * this information for the setup code to decide in which level node + * matching is no longer required. + */ + tmigr_crossnode_level = cpulvl; + + tmigr_level_list = kcalloc(tmigr_hierarchy_levels, sizeof(struct list_head), GFP_KERNEL); + if (!tmigr_level_list) + goto err; + + for (i = 0; i < tmigr_hierarchy_levels; i++) + INIT_LIST_HEAD(&tmigr_level_list[i]); + + pr_info("Timer migration: %d hierarchy levels; %d children per group;" + " %d crossnode level\n", + tmigr_hierarchy_levels, TMIGR_CHILDREN_PER_GROUP, + tmigr_crossnode_level); + + ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online", + tmigr_cpu_online, tmigr_cpu_offline); + if (ret) + goto err; + + return 0; + +err: + pr_err("Timer migration setup failed\n"); + return ret; +} +late_initcall(tmigr_init); diff --git a/kernel/time/timer_migration.h b/kernel/time/timer_migration.h new file mode 100644 index 000000000000..6c37d94a37d9 --- /dev/null +++ b/kernel/time/timer_migration.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _KERNEL_TIME_MIGRATION_H +#define _KERNEL_TIME_MIGRATION_H + +/* Per group capacity. Must be a power of 2! */ +#define TMIGR_CHILDREN_PER_GROUP 8 + +/** + * struct tmigr_event - a timer event associated to a CPU + * @nextevt: The node to enqueue an event in the parent group queue + * @cpu: The CPU to which this event belongs + * @ignore: Hint whether the event could be ignored; it is set when + * CPU or group is active; + */ +struct tmigr_event { + struct timerqueue_node nextevt; + unsigned int cpu; + bool ignore; +}; + +/** + * struct tmigr_group - timer migration hierarchy group + * @lock: Lock protecting the event information and group hierarchy + * information during setup + * @parent: Pointer to the parent group + * @groupevt: Next event of the group which is only used when the + * group is !active. The group event is then queued into + * the parent timer queue. + * Ignore bit of @groupevt is set when the group is active. + * @next_expiry: Base monotonic expiry time of the next event of the + * group; It is used for the racy lockless check whether a + * remote expiry is required; it is always reliable + * @events: Timer queue for child events queued in the group + * @migr_state: State of the group (see union tmigr_state) + * @level: Hierarchy level of the group; Required during setup + * @numa_node: Required for setup only to make sure CPU and low level + * group information is NUMA local. It is set to NUMA node + * as long as the group level is per NUMA node (level < + * tmigr_crossnode_level); otherwise it is set to + * NUMA_NO_NODE + * @num_children: Counter of group children to make sure the group is only + * filled with TMIGR_CHILDREN_PER_GROUP; Required for setup + * only + * @childmask: childmask of the group in the parent group; is set + * during setup and will never change; can be read + * lockless + * @list: List head that is added to the per level + * tmigr_level_list; is required during setup when a + * new group needs to be connected to the existing + * hierarchy groups + */ +struct tmigr_group { + raw_spinlock_t lock; + struct tmigr_group *parent; + struct tmigr_event groupevt; + u64 next_expiry; + struct timerqueue_head events; + atomic_t migr_state; + unsigned int level; + int numa_node; + unsigned int num_children; + u8 childmask; + struct list_head list; +}; + +/** + * struct tmigr_cpu - timer migration per CPU group + * @lock: Lock protecting the tmigr_cpu group information + * @online: Indicates whether the CPU is online; In deactivate path + * it is required to know whether the migrator in the top + * level group is to be set offline, while a timer is + * pending. Then another online CPU needs to be notified to + * take over the migrator role. Furthermore the information + * is required in CPU hotplug path as the CPU is able to go + * idle before the timer migration hierarchy hotplug AP is + * reached. During this phase, the CPU has to handle the + * global timers on its own and must not act as a migrator. + * @idle: Indicates whether the CPU is idle in the timer migration + * hierarchy + * @remote: Is set when timers of the CPU are expired remotely + * @tmgroup: Pointer to the parent group + * @childmask: childmask of tmigr_cpu in the parent group + * @wakeup: Stores the first timer when the timer migration + * hierarchy is completely idle and remote expiry was done; + * is returned to timer code in the idle path and is only + * used in idle path. + * @cpuevt: CPU event which could be enqueued into the parent group + */ +struct tmigr_cpu { + raw_spinlock_t lock; + bool online; + bool idle; + bool remote; + struct tmigr_group *tmgroup; + u8 childmask; + u64 wakeup; + struct tmigr_event cpuevt; +}; + +/** + * union tmigr_state - state of tmigr_group + * @state: Combined version of the state - only used for atomic + * read/cmpxchg function + * @struct: Split version of the state - only use the struct members to + * update information to stay independent of endianness + */ +union tmigr_state { + u32 state; + /** + * struct - split state of tmigr_group + * @active: Contains each childmask bit of the active children + * @migrator: Contains childmask of the child which is migrator + * @seq: Sequence counter needs to be increased when an update + * to the tmigr_state is done. It prevents a race when + * updates in the child groups are propagated in changed + * order. Detailed information about the scenario is + * given in the documentation at the begin of + * timer_migration.c. + */ + struct { + u8 active; + u8 migrator; + u16 seq; + } __packed; +}; + +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +extern void tmigr_handle_remote(void); +extern bool tmigr_requires_handle_remote(void); +extern void tmigr_cpu_activate(void); +extern u64 tmigr_cpu_deactivate(u64 nextevt); +extern u64 tmigr_cpu_new_timer(u64 nextevt); +extern u64 tmigr_quick_check(u64 nextevt); +#else +static inline void tmigr_handle_remote(void) { } +static inline bool tmigr_requires_handle_remote(void) { return false; } +static inline void tmigr_cpu_activate(void) { } +#endif + +#endif -- cgit v1.2.3 From 36e40df35d2c1891fe58241640c7c95de4aa739b Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Thu, 22 Feb 2024 11:34:03 +0100 Subject: timer_migration: Add tracepoints The timer pull logic needs proper debugging aids. Add tracepoints so the hierarchical idle machinery can be diagnosed. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240222103403.31923-1-anna-maria@linutronix.de --- kernel/time/timer_migration.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'kernel') diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index 23cb6ea3d44e..d85aa2afb969 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -14,6 +14,9 @@ #include "timer_migration.h" #include "tick-internal.h" +#define CREATE_TRACE_POINTS +#include + /* * The timer migration mechanism is built on a hierarchy of groups. The * lowest level group contains CPUs, the next level groups of CPU groups @@ -663,6 +666,8 @@ static bool tmigr_active_up(struct tmigr_group *group, */ group->groupevt.ignore = true; + trace_tmigr_group_set_cpu_active(group, newstate, childmask); + return walk_done; } @@ -672,6 +677,8 @@ static void __tmigr_cpu_activate(struct tmigr_cpu *tmc) data.childmask = tmc->childmask; + trace_tmigr_cpu_active(tmc); + tmc->cpuevt.ignore = true; WRITE_ONCE(tmc->wakeup, KTIME_MAX); @@ -829,6 +836,9 @@ check_toplvl: data->firstexp = tmigr_next_groupevt_expires(group); } + trace_tmigr_update_events(child, group, childstate, groupstate, + nextexp); + unlock: raw_spin_unlock(&group->lock); @@ -863,6 +873,8 @@ static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp) if (tmc->remote) return KTIME_MAX; + trace_tmigr_cpu_new_timer(tmc); + tmc->cpuevt.ignore = false; data.remote = false; @@ -904,6 +916,8 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now, return; } + trace_tmigr_handle_remote_cpu(tmc); + tmc->remote = true; WRITE_ONCE(tmc->wakeup, KTIME_MAX); @@ -984,6 +998,7 @@ static bool tmigr_handle_remote_up(struct tmigr_group *group, childmask = data->childmask; + trace_tmigr_handle_remote(group); again: /* * Handle the group only if @childmask is the migrator or if the @@ -1206,6 +1221,7 @@ u64 tmigr_cpu_new_timer(u64 nextexp) */ WRITE_ONCE(tmc->wakeup, ret); + trace_tmigr_cpu_new_timer_idle(tmc, nextexp); raw_spin_unlock(&tmc->lock); return ret; } @@ -1298,6 +1314,8 @@ static bool tmigr_inactive_up(struct tmigr_group *group, */ WARN_ON_ONCE(data->firstexp != KTIME_MAX && group->parent); + trace_tmigr_group_set_cpu_inactive(group, newstate, childmask); + return walk_done; } @@ -1350,6 +1368,7 @@ u64 tmigr_cpu_deactivate(u64 nextexp) */ WRITE_ONCE(tmc->wakeup, ret); + trace_tmigr_cpu_idle(tmc, nextexp); raw_spin_unlock(&tmc->lock); return ret; } @@ -1467,6 +1486,7 @@ static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node, /* Setup successful. Add it to the hierarchy */ list_add(&group->list, &tmigr_level_list[lvl]); + trace_tmigr_group_set(group); return group; } @@ -1484,6 +1504,8 @@ static void tmigr_connect_child_parent(struct tmigr_group *child, raw_spin_unlock(&parent->lock); raw_spin_unlock_irq(&child->lock); + trace_tmigr_connect_child_parent(child); + /* * To prevent inconsistent states, active children need to be active in * the new parent as well. Inactive children are already marked inactive @@ -1575,6 +1597,8 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node) raw_spin_unlock_irq(&group->lock); + trace_tmigr_connect_cpu_parent(tmc); + /* There are no children that need to be connected */ continue; } else { @@ -1642,6 +1666,7 @@ static int tmigr_cpu_online(unsigned int cpu) WRITE_ONCE(tmc->wakeup, KTIME_MAX); } raw_spin_lock_irq(&tmc->lock); + trace_tmigr_cpu_online(tmc); tmc->idle = timer_base_is_idle(); if (!tmc->idle) __tmigr_cpu_activate(tmc); @@ -1681,6 +1706,7 @@ static int tmigr_cpu_offline(unsigned int cpu) * offline; Therefore nextevt value is set to KTIME_MAX */ firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX); + trace_tmigr_cpu_offline(tmc); raw_spin_unlock_irq(&tmc->lock); if (firstexp != KTIME_MAX) { -- cgit v1.2.3 From b2cf7507e18649a30512515ec0ca89f26b2c2d0f Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 21 Feb 2024 10:05:48 +0100 Subject: timers: Always queue timers on the local CPU The timer pull model is in place so we can remove the heuristics which try to guess the best target CPU at enqueue/modification time. All non pinned timers are queued on the local CPU in the separate storage and eventually pulled at expiry time to a remote CPU. Originally-by: Richard Cochran (linutronix GmbH) Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Reviewed-by: Frederic Weisbecker Link: https://lore.kernel.org/r/20240221090548.36600-21-anna-maria@linutronix.de --- kernel/time/timer.c | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 3ed135c8de43..4f4930da6448 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -635,11 +635,16 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) /* * We might have to IPI the remote CPU if the base is idle and the - * timer is not deferrable. If the other CPU is on the way to idle - * then it can't set base->is_idle as we hold the base lock: + * timer is pinned. If it is a non pinned timer, it is only queued + * on the remote CPU, when timer was running during queueing. Then + * everything is handled by remote CPU anyway. If the other CPU is + * on the way to idle then it can't set base->is_idle as we hold + * the base lock: */ - if (base->is_idle) + if (base->is_idle) { + WARN_ON_ONCE(!(timer->flags & TIMER_PINNED)); wake_up_nohz_cpu(base->cpu); + } } /* @@ -986,17 +991,6 @@ static inline struct timer_base *get_timer_base(u32 tflags) return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK); } -static inline struct timer_base * -get_target_base(struct timer_base *base, unsigned tflags) -{ -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) - if (static_branch_likely(&timers_migration_enabled) && - !(tflags & TIMER_PINNED)) - return get_timer_cpu_base(tflags, get_nohz_timer_target()); -#endif - return get_timer_this_cpu_base(tflags); -} - static inline void __forward_timer_base(struct timer_base *base, unsigned long basej) { @@ -1151,7 +1145,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option if (!ret && (options & MOD_TIMER_PENDING_ONLY)) goto out_unlock; - new_base = get_target_base(base, timer->flags); + new_base = get_timer_this_cpu_base(timer->flags); if (base != new_base) { /* @@ -2297,7 +2291,7 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, * granularity skew (by design). */ if (!base_local->is_idle && time_after(nextevt, basej + 1)) { - base_local->is_idle = base_global->is_idle = true; + base_local->is_idle = true; trace_timer_base_idle(true, base_local->cpu); } *idle = base_local->is_idle; @@ -2363,13 +2357,13 @@ u64 timer_base_try_to_set_idle(unsigned long basej, u64 basem, bool *idle) void timer_clear_idle(void) { /* - * We do this unlocked. The worst outcome is a remote enqueue sending - * a pointless IPI, but taking the lock would just make the window for - * sending the IPI a few instructions smaller for the cost of taking - * the lock in the exit from idle path. + * We do this unlocked. The worst outcome is a remote pinned timer + * enqueue sending a pointless IPI, but taking the lock would just + * make the window for sending the IPI a few instructions smaller + * for the cost of taking the lock in the exit from idle + * path. Required for BASE_LOCAL only. */ __this_cpu_write(timer_bases[BASE_LOCAL].is_idle, false); - __this_cpu_write(timer_bases[BASE_GLOBAL].is_idle, false); trace_timer_base_idle(false, smp_processor_id()); /* Activate without holding the timer_base->lock */ -- cgit v1.2.3 From ccdec92198df0c91f45a68f971771b6b0c1ba02d Mon Sep 17 00:00:00 2001 From: Xuewen Yan Date: Thu, 22 Feb 2024 15:28:08 +0800 Subject: workqueue: Control intensive warning threshold through cmdline When CONFIG_WQ_CPU_INTENSIVE_REPORT is set, the kernel will report the work functions which violate the intensive_threshold_us repeatedly. And now, only when the violate times exceed 4 and is a power of 2, the kernel warning could be triggered. However, sometimes, even if a long work execution time occurs only once, it may cause other work to be delayed for a long time. This may also cause some problems sometimes. In order to freely control the threshold of warninging, a boot argument is added so that the user can control the warning threshold to be printed. At the same time, keep the exponential backoff to prevent reporting too much. By default, the warning threshold is 4. tj: Updated kernel-parameters.txt description. Signed-off-by: Xuewen Yan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 65a27be81452..38783e3a60bb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -409,6 +409,10 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { */ static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); +#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT +static unsigned int wq_cpu_intensive_warning_thresh = 4; +module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh, uint, 0644); +#endif /* see the comment above the definition of WQ_POWER_EFFICIENT */ static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); @@ -1327,11 +1331,13 @@ restart: u64 cnt; /* - * Start reporting from the fourth time and back off + * Start reporting from the warning_thresh and back off * exponentially. */ cnt = atomic64_inc_return_relaxed(&ent->cnt); - if (cnt >= 4 && is_power_of_2(cnt)) + if (wq_cpu_intensive_warning_thresh && + cnt >= wq_cpu_intensive_warning_thresh && + is_power_of_2(cnt + 1 - wq_cpu_intensive_warning_thresh)) printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", ent->func, wq_cpu_intensive_thresh_us, atomic64_read(&ent->cnt)); @@ -1360,10 +1366,12 @@ restart: ent = &wci_ents[wci_nr_ents++]; ent->func = func; - atomic64_set(&ent->cnt, 1); + atomic64_set(&ent->cnt, 0); hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); raw_spin_unlock(&wci_lock); + + goto restart; } #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ -- cgit v1.2.3 From 56c2cb10120894be40c40a9bf0ce798da14c50f6 Mon Sep 17 00:00:00 2001 From: Costa Shulyupin Date: Thu, 22 Feb 2024 22:08:56 +0200 Subject: hrtimer: Select housekeeping CPU during migration During CPU-down hotplug, hrtimers may migrate to isolated CPUs, compromising CPU isolation. Address this issue by masking valid CPUs for hrtimers using housekeeping_cpumask(HK_TYPE_TIMER). Suggested-by: Waiman Long Signed-off-by: Costa Shulyupin Signed-off-by: Thomas Gleixner Reviewed-by: Waiman Long Link: https://lore.kernel.org/r/20240222200856.569036-1-costa.shul@redhat.com --- kernel/time/hrtimer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 5a98b35b0576..1fd106af747d 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -2225,8 +2226,8 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, int hrtimers_cpu_dying(unsigned int dying_cpu) { + int i, ncpu = cpumask_any_and(cpu_active_mask, housekeeping_cpumask(HK_TYPE_TIMER)); struct hrtimer_cpu_base *old_base, *new_base; - int i, ncpu = cpumask_first(cpu_active_mask); tick_cancel_sched_timer(dying_cpu); -- cgit v1.2.3 From 5b98d210ac1e4eb35abfbd940df50dec10ae81e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= Date: Thu, 22 Feb 2024 15:09:58 +0530 Subject: genirq/matrix: Dynamic bitmap allocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A future user of the matrix allocator, does not know the size of the matrix bitmaps at compile time. To avoid wasting memory on unnecessary large bitmaps, size the bitmap at matrix allocation time. Signed-off-by: Björn Töpel Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240222094006.1030709-11-apatel@ventanamicro.com --- kernel/irq/matrix.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 75d0ae490e29..8f222d1cccec 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -8,8 +8,6 @@ #include #include -#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS)) - struct cpumap { unsigned int available; unsigned int allocated; @@ -17,8 +15,8 @@ struct cpumap { unsigned int managed_allocated; bool initialized; bool online; - unsigned long alloc_map[IRQ_MATRIX_SIZE]; - unsigned long managed_map[IRQ_MATRIX_SIZE]; + unsigned long *managed_map; + unsigned long alloc_map[]; }; struct irq_matrix { @@ -32,8 +30,8 @@ struct irq_matrix { unsigned int total_allocated; unsigned int online_maps; struct cpumap __percpu *maps; - unsigned long scratch_map[IRQ_MATRIX_SIZE]; - unsigned long system_map[IRQ_MATRIX_SIZE]; + unsigned long *system_map; + unsigned long scratch_map[]; }; #define CREATE_TRACE_POINTS @@ -50,24 +48,32 @@ __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, unsigned int alloc_start, unsigned int alloc_end) { + unsigned int cpu, matrix_size = BITS_TO_LONGS(matrix_bits); struct irq_matrix *m; - if (matrix_bits > IRQ_MATRIX_BITS) - return NULL; - - m = kzalloc(sizeof(*m), GFP_KERNEL); + m = kzalloc(struct_size(m, scratch_map, matrix_size * 2), GFP_KERNEL); if (!m) return NULL; + m->system_map = &m->scratch_map[matrix_size]; + m->matrix_bits = matrix_bits; m->alloc_start = alloc_start; m->alloc_end = alloc_end; m->alloc_size = alloc_end - alloc_start; - m->maps = alloc_percpu(*m->maps); + m->maps = __alloc_percpu(struct_size(m->maps, alloc_map, matrix_size * 2), + __alignof__(*m->maps)); if (!m->maps) { kfree(m); return NULL; } + + for_each_possible_cpu(cpu) { + struct cpumap *cm = per_cpu_ptr(m->maps, cpu); + + cm->managed_map = &cm->alloc_map[matrix_size]; + } + return m; } -- cgit v1.2.3 From b361c9027b4e4159e7bcca4eb64fd26507c19994 Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Fri, 23 Feb 2024 15:57:48 +0000 Subject: sched: Add a new function to compare if two cpus have the same capacity The new helper function is needed to help blk-mq check if it needs to dispatch the softirq on another CPU to match the performance level the IO requester is running at. This is important on HMP systems where not all CPUs have the same compute capacity. Signed-off-by: Qais Yousef Reviewed-by: Bart Van Assche Link: https://lore.kernel.org/r/20240223155749.2958009-2-qyousef@layalina.io Signed-off-by: Jens Axboe --- kernel/sched/core.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 083f2258182d..540f229700b6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3955,6 +3955,17 @@ void wake_up_if_idle(int cpu) } } +bool cpus_equal_capacity(int this_cpu, int that_cpu) +{ + if (!sched_asym_cpucap_active()) + return true; + + if (this_cpu == that_cpu) + return true; + + return arch_scale_cpu_capacity(this_cpu) == arch_scale_cpu_capacity(that_cpu); +} + bool cpus_share_cache(int this_cpu, int that_cpu) { if (this_cpu == that_cpu) -- cgit v1.2.3 From 4379f91172f39d999919c8e8b2b5e1d665d8972d Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 23 Jan 2024 14:26:23 +0100 Subject: power: port block device access to file Link: https://lore.kernel.org/r/20240123-vfs-bdev-file-v2-6-adbd023e19cc@kernel.org Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Christian Brauner --- kernel/power/swap.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 6053ddddaf65..692f12fe60c1 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -222,7 +222,7 @@ int swsusp_swap_in_use(void) */ static unsigned short root_swap = 0xffff; -static struct bdev_handle *hib_resume_bdev_handle; +static struct file *hib_resume_bdev_file; struct hib_bio_batch { atomic_t count; @@ -276,7 +276,7 @@ static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr, struct bio *bio; int error = 0; - bio = bio_alloc(hib_resume_bdev_handle->bdev, 1, opf, + bio = bio_alloc(file_bdev(hib_resume_bdev_file), 1, opf, GFP_NOIO | __GFP_HIGH); bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); @@ -357,14 +357,14 @@ static int swsusp_swap_check(void) return res; root_swap = res; - hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device, + hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device, BLK_OPEN_WRITE, NULL, NULL); - if (IS_ERR(hib_resume_bdev_handle)) - return PTR_ERR(hib_resume_bdev_handle); + if (IS_ERR(hib_resume_bdev_file)) + return PTR_ERR(hib_resume_bdev_file); - res = set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE); + res = set_blocksize(file_bdev(hib_resume_bdev_file), PAGE_SIZE); if (res < 0) - bdev_release(hib_resume_bdev_handle); + fput(hib_resume_bdev_file); return res; } @@ -1523,10 +1523,10 @@ int swsusp_check(bool exclusive) void *holder = exclusive ? &swsusp_holder : NULL; int error; - hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device, + hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device, BLK_OPEN_READ, holder, NULL); - if (!IS_ERR(hib_resume_bdev_handle)) { - set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE); + if (!IS_ERR(hib_resume_bdev_file)) { + set_blocksize(file_bdev(hib_resume_bdev_file), PAGE_SIZE); clear_page(swsusp_header); error = hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL); @@ -1551,11 +1551,11 @@ int swsusp_check(bool exclusive) put: if (error) - bdev_release(hib_resume_bdev_handle); + fput(hib_resume_bdev_file); else pr_debug("Image signature found, resuming\n"); } else { - error = PTR_ERR(hib_resume_bdev_handle); + error = PTR_ERR(hib_resume_bdev_file); } if (error) @@ -1570,12 +1570,12 @@ put: void swsusp_close(void) { - if (IS_ERR(hib_resume_bdev_handle)) { + if (IS_ERR(hib_resume_bdev_file)) { pr_debug("Image device not initialised\n"); return; } - bdev_release(hib_resume_bdev_handle); + fput(hib_resume_bdev_file); } /** -- cgit v1.2.3 From bfe93930ea1ea3c6c115a7d44af6e4fea609067e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 5 Feb 2024 13:08:22 -0800 Subject: rcu-tasks: Add data to eliminate RCU-tasks/do_exit() deadlocks Holding a mutex across synchronize_rcu_tasks() and acquiring that same mutex in code called from do_exit() after its call to exit_tasks_rcu_start() but before its call to exit_tasks_rcu_stop() results in deadlock. This is by design, because tasks that are far enough into do_exit() are no longer present on the tasks list, making it a bit difficult for RCU Tasks to find them, let alone wait on them to do a voluntary context switch. However, such deadlocks are becoming more frequent. In addition, lockdep currently does not detect such deadlocks and they can be difficult to reproduce. In addition, if a task voluntarily context switches during that time (for example, if it blocks acquiring a mutex), then this task is in an RCU Tasks quiescent state. And with some adjustments, RCU Tasks could just as well take advantage of that fact. This commit therefore adds the data structures that will be needed to rely on these quiescent states and to eliminate these deadlocks. Link: https://lore.kernel.org/all/20240118021842.290665-1-chenzhongjin@huawei.com/ Reported-by: Chen Zhongjin Reported-by: Yang Jihong Signed-off-by: Paul E. McKenney Tested-by: Yang Jihong Tested-by: Chen Zhongjin Reviewed-by: Frederic Weisbecker Signed-off-by: Boqun Feng --- kernel/rcu/tasks.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 732ad5b39946..b7d5f2757053 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -32,6 +32,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * @rtp_irq_work: IRQ work queue for deferred wakeups. * @barrier_q_head: RCU callback for barrier operation. * @rtp_blkd_tasks: List of tasks blocked as readers. + * @rtp_exit_list: List of tasks in the latter portion of do_exit(). * @cpu: CPU number corresponding to this entry. * @rtpp: Pointer to the rcu_tasks structure. */ @@ -46,6 +47,7 @@ struct rcu_tasks_percpu { struct irq_work rtp_irq_work; struct rcu_head barrier_q_head; struct list_head rtp_blkd_tasks; + struct list_head rtp_exit_list; int cpu; struct rcu_tasks *rtpp; }; -- cgit v1.2.3 From 30ef09635b9ed3ebca4f677495332a2e444a5cda Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 22 Feb 2024 12:29:54 -0800 Subject: rcu-tasks: Initialize callback lists at rcu_init() time In order for RCU Tasks to reliably maintain per-CPU lists of exiting tasks, those lists must be initialized before it is possible for tasks to exit, especially given that the boot CPU is not necessarily CPU 0 (an example being, powerpc kexec() kernels). And at the time that rcu_init_tasks_generic() is called, a task could potentially exit, unconventional though that sort of thing might be. This commit therefore moves the calls to cblist_init_generic() from functions called from rcu_init_tasks_generic() to a new function named tasks_cblist_init_generic() that is invoked from rcu_init(). This constituted a bug in a commit that never went to mainline, so there is no need for any backporting to -stable. Reported-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney Signed-off-by: Boqun Feng --- kernel/rcu/rcu.h | 6 ++++++ kernel/rcu/tasks.h | 24 ++++++++++++++++++------ kernel/rcu/tiny.c | 1 + kernel/rcu/tree.c | 2 ++ 4 files changed, 27 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index f94f65877f2b..ef63ea59c8b6 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -528,6 +528,12 @@ struct task_struct *get_rcu_tasks_gp_kthread(void); struct task_struct *get_rcu_tasks_rude_gp_kthread(void); #endif // # ifdef CONFIG_TASKS_RUDE_RCU +#ifdef CONFIG_TASKS_RCU_GENERIC +void tasks_cblist_init_generic(void); +#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ +static inline void tasks_cblist_init_generic(void) { } +#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ + #define RCU_SCHEDULER_INACTIVE 0 #define RCU_SCHEDULER_INIT 1 #define RCU_SCHEDULER_RUNNING 2 diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index b7d5f2757053..6961a1b5b783 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -242,7 +242,6 @@ static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) static void cblist_init_generic(struct rcu_tasks *rtp) { int cpu; - unsigned long flags; int lim; int shift; @@ -268,10 +267,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp) WARN_ON_ONCE(!rtpcp); if (cpu) raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); - local_irq_save(flags); // serialize initialization if (rcu_segcblist_empty(&rtpcp->cblist)) rcu_segcblist_init(&rtpcp->cblist); - local_irq_restore(flags); INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); rtpcp->cpu = cpu; rtpcp->rtpp = rtp; @@ -1120,7 +1117,6 @@ module_param(rcu_tasks_lazy_ms, int, 0444); static int __init rcu_spawn_tasks_kthread(void) { - cblist_init_generic(&rcu_tasks); rcu_tasks.gp_sleep = HZ / 10; rcu_tasks.init_fract = HZ / 10; if (rcu_tasks_lazy_ms >= 0) @@ -1284,7 +1280,6 @@ module_param(rcu_tasks_rude_lazy_ms, int, 0444); static int __init rcu_spawn_tasks_rude_kthread(void) { - cblist_init_generic(&rcu_tasks_rude); rcu_tasks_rude.gp_sleep = HZ / 10; if (rcu_tasks_rude_lazy_ms >= 0) rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms); @@ -1916,7 +1911,6 @@ module_param(rcu_tasks_trace_lazy_ms, int, 0444); static int __init rcu_spawn_tasks_trace_kthread(void) { - cblist_init_generic(&rcu_tasks_trace); if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { rcu_tasks_trace.gp_sleep = HZ / 10; rcu_tasks_trace.init_fract = HZ / 10; @@ -2088,6 +2082,24 @@ late_initcall(rcu_tasks_verify_schedule_work); static void rcu_tasks_initiate_self_tests(void) { } #endif /* #else #ifdef CONFIG_PROVE_RCU */ +void __init tasks_cblist_init_generic(void) +{ + lockdep_assert_irqs_disabled(); + WARN_ON(num_online_cpus() > 1); + +#ifdef CONFIG_TASKS_RCU + cblist_init_generic(&rcu_tasks); +#endif + +#ifdef CONFIG_TASKS_RUDE_RCU + cblist_init_generic(&rcu_tasks_rude); +#endif + +#ifdef CONFIG_TASKS_TRACE_RCU + cblist_init_generic(&rcu_tasks_trace); +#endif +} + void __init rcu_init_tasks_generic(void) { #ifdef CONFIG_TASKS_RCU diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index fec804b79080..705c0d16850a 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -261,4 +261,5 @@ void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); rcu_early_boot_tests(); + tasks_cblist_init_generic(); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b2bccfd37c38..ba9137f39d14 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -5165,6 +5165,8 @@ void __init rcu_init(void) (void)start_poll_synchronize_rcu_expedited(); rcu_test_sync_prims(); + + tasks_cblist_init_generic(); } #include "tree_stall.h" -- cgit v1.2.3 From 46faf9d8e1d52e4a91c382c6c72da6bd8e68297b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 5 Feb 2024 13:10:19 -0800 Subject: rcu-tasks: Initialize data to eliminate RCU-tasks/do_exit() deadlocks Holding a mutex across synchronize_rcu_tasks() and acquiring that same mutex in code called from do_exit() after its call to exit_tasks_rcu_start() but before its call to exit_tasks_rcu_stop() results in deadlock. This is by design, because tasks that are far enough into do_exit() are no longer present on the tasks list, making it a bit difficult for RCU Tasks to find them, let alone wait on them to do a voluntary context switch. However, such deadlocks are becoming more frequent. In addition, lockdep currently does not detect such deadlocks and they can be difficult to reproduce. In addition, if a task voluntarily context switches during that time (for example, if it blocks acquiring a mutex), then this task is in an RCU Tasks quiescent state. And with some adjustments, RCU Tasks could just as well take advantage of that fact. This commit therefore initializes the data structures that will be needed to rely on these quiescent states and to eliminate these deadlocks. Link: https://lore.kernel.org/all/20240118021842.290665-1-chenzhongjin@huawei.com/ Reported-by: Chen Zhongjin Reported-by: Yang Jihong Signed-off-by: Paul E. McKenney Tested-by: Yang Jihong Tested-by: Chen Zhongjin Reviewed-by: Frederic Weisbecker Signed-off-by: Boqun Feng --- kernel/fork.c | 1 + kernel/rcu/tasks.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 0d944e92a43f..af7203be1d2d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1976,6 +1976,7 @@ static inline void rcu_copy_process(struct task_struct *p) p->rcu_tasks_holdout = false; INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); p->rcu_tasks_idle_cpu = -1; + INIT_LIST_HEAD(&p->rcu_tasks_exit_list); #endif /* #ifdef CONFIG_TASKS_RCU */ #ifdef CONFIG_TASKS_TRACE_RCU p->trc_reader_nesting = 0; diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 6961a1b5b783..edd14fee48c5 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -274,6 +274,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp) rtpcp->rtpp = rtp; if (!rtpcp->rtp_blkd_tasks.next) INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); + if (!rtpcp->rtp_exit_list.next) + INIT_LIST_HEAD(&rtpcp->rtp_exit_list); } pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name, -- cgit v1.2.3 From 6b70399f9ef3809f6e308fd99dd78b072c1bd05c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 2 Feb 2024 11:28:45 -0800 Subject: rcu-tasks: Maintain lists to eliminate RCU-tasks/do_exit() deadlocks This commit continues the elimination of deadlocks involving do_exit() and RCU tasks by causing exit_tasks_rcu_start() to add the current task to a per-CPU list and causing exit_tasks_rcu_stop() to remove the current task from whatever list it is on. These lists will be used to track tasks that are exiting, while still accounting for any RCU-tasks quiescent states that these tasks pass though. [ paulmck: Apply Frederic Weisbecker feedback. ] Link: https://lore.kernel.org/all/20240118021842.290665-1-chenzhongjin@huawei.com/ Reported-by: Chen Zhongjin Reported-by: Yang Jihong Signed-off-by: Paul E. McKenney Tested-by: Yang Jihong Tested-by: Chen Zhongjin Reviewed-by: Frederic Weisbecker Signed-off-by: Boqun Feng --- kernel/rcu/tasks.h | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index edd14fee48c5..9e4122497b9f 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1147,25 +1147,48 @@ struct task_struct *get_rcu_tasks_gp_kthread(void) EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread); /* - * Contribute to protect against tasklist scan blind spot while the - * task is exiting and may be removed from the tasklist. See - * corresponding synchronize_srcu() for further details. + * Protect against tasklist scan blind spot while the task is exiting and + * may be removed from the tasklist. Do this by adding the task to yet + * another list. + * + * Note that the task will remove itself from this list, so there is no + * need for get_task_struct(), except in the case where rcu_tasks_pertask() + * adds it to the holdout list, in which case rcu_tasks_pertask() supplies + * the needed get_task_struct(). */ -void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) +void exit_tasks_rcu_start(void) { - current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); + unsigned long flags; + struct rcu_tasks_percpu *rtpcp; + struct task_struct *t = current; + + WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list)); + preempt_disable(); + rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu); + t->rcu_tasks_exit_cpu = smp_processor_id(); + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + if (!rtpcp->rtp_exit_list.next) + INIT_LIST_HEAD(&rtpcp->rtp_exit_list); + list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + preempt_enable(); } /* - * Contribute to protect against tasklist scan blind spot while the - * task is exiting and may be removed from the tasklist. See - * corresponding synchronize_srcu() for further details. + * Remove the task from the "yet another list" because do_exit() is now + * non-preemptible, allowing synchronize_rcu() to wait beyond this point. */ -void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu) +void exit_tasks_rcu_stop(void) { + unsigned long flags; + struct rcu_tasks_percpu *rtpcp; struct task_struct *t = current; - __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); + WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list)); + rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu); + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + list_del_init(&t->rcu_tasks_exit_list); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); } /* -- cgit v1.2.3 From 1612160b91272f5b1596f499584d6064bf5be794 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 2 Feb 2024 11:49:06 -0800 Subject: rcu-tasks: Eliminate deadlocks involving do_exit() and RCU tasks Holding a mutex across synchronize_rcu_tasks() and acquiring that same mutex in code called from do_exit() after its call to exit_tasks_rcu_start() but before its call to exit_tasks_rcu_stop() results in deadlock. This is by design, because tasks that are far enough into do_exit() are no longer present on the tasks list, making it a bit difficult for RCU Tasks to find them, let alone wait on them to do a voluntary context switch. However, such deadlocks are becoming more frequent. In addition, lockdep currently does not detect such deadlocks and they can be difficult to reproduce. In addition, if a task voluntarily context switches during that time (for example, if it blocks acquiring a mutex), then this task is in an RCU Tasks quiescent state. And with some adjustments, RCU Tasks could just as well take advantage of that fact. This commit therefore eliminates these deadlock by replacing the SRCU-based wait for do_exit() completion with per-CPU lists of tasks currently exiting. A given task will be on one of these per-CPU lists for the same period of time that this task would previously have been in the previous SRCU read-side critical section. These lists enable RCU Tasks to find the tasks that have already been removed from the tasks list, but that must nevertheless be waited upon. The RCU Tasks grace period gathers any of these do_exit() tasks that it must wait on, and adds them to the list of holdouts. Per-CPU locking and get_task_struct() are used to synchronize addition to and removal from these lists. Link: https://lore.kernel.org/all/20240118021842.290665-1-chenzhongjin@huawei.com/ Reported-by: Chen Zhongjin Reported-by: Yang Jihong Signed-off-by: Paul E. McKenney Tested-by: Yang Jihong Tested-by: Chen Zhongjin Reviewed-by: Frederic Weisbecker Signed-off-by: Boqun Feng --- kernel/rcu/tasks.h | 44 ++++++++++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 9e4122497b9f..c61dc92537db 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -146,8 +146,6 @@ static struct rcu_tasks rt_name = \ } #ifdef CONFIG_TASKS_RCU -/* Track exiting tasks in order to allow them to be waited for. */ -DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */ static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); @@ -852,10 +850,12 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) // number of voluntary context switches, and add that task to the // holdout list. // rcu_tasks_postscan(): -// Invoke synchronize_srcu() to ensure that all tasks that were -// in the process of exiting (and which thus might not know to -// synchronize with this RCU Tasks grace period) have completed -// exiting. +// Gather per-CPU lists of tasks in do_exit() to ensure that all +// tasks that were in the process of exiting (and which thus might +// not know to synchronize with this RCU Tasks grace period) have +// completed exiting. The synchronize_rcu() in rcu_tasks_postgp() +// will take care of any tasks stuck in the non-preemptible region +// of do_exit() following its call to exit_tasks_rcu_stop(). // check_all_holdout_tasks(), repeatedly until holdout list is empty: // Scans the holdout list, attempting to identify a quiescent state // for each task on the list. If there is a quiescent state, the @@ -868,8 +868,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) // with interrupts disabled. // // For each exiting task, the exit_tasks_rcu_start() and -// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU -// read-side critical sections waited for by rcu_tasks_postscan(). +// exit_tasks_rcu_finish() functions add and remove, respectively, the +// current task to a per-CPU list of tasks that rcu_tasks_postscan() must +// wait on. This is necessary because rcu_tasks_postscan() must wait on +// tasks that have already been removed from the global list of tasks. // // Pre-grace-period update-side code is ordered before the grace // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code @@ -933,9 +935,13 @@ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) } } +void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); +DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); + /* Processing between scanning taskslist and draining the holdout list. */ static void rcu_tasks_postscan(struct list_head *hop) { + int cpu; int rtsi = READ_ONCE(rcu_task_stall_info); if (!IS_ENABLED(CONFIG_TINY_RCU)) { @@ -949,9 +955,9 @@ static void rcu_tasks_postscan(struct list_head *hop) * this, divide the fragile exit path part in two intersecting * read side critical sections: * - * 1) An _SRCU_ read side starting before calling exit_notify(), - * which may remove the task from the tasklist, and ending after - * the final preempt_disable() call in do_exit(). + * 1) A task_struct list addition before calling exit_notify(), + * which may remove the task from the tasklist, with the + * removal after the final preempt_disable() call in do_exit(). * * 2) An _RCU_ read side starting with the final preempt_disable() * call in do_exit() and ending with the final call to schedule() @@ -960,7 +966,17 @@ static void rcu_tasks_postscan(struct list_head *hop) * This handles the part 1). And postgp will handle part 2) with a * call to synchronize_rcu(). */ - synchronize_srcu(&tasks_rcu_exit_srcu); + + for_each_possible_cpu(cpu) { + struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu); + struct task_struct *t; + + raw_spin_lock_irq_rcu_node(rtpcp); + list_for_each_entry(t, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) + if (list_empty(&t->rcu_tasks_holdout_list)) + rcu_tasks_pertask(t, hop); + raw_spin_unlock_irq_rcu_node(rtpcp); + } if (!IS_ENABLED(CONFIG_TINY_RCU)) del_timer_sync(&tasks_rcu_exit_srcu_stall_timer); @@ -1028,7 +1044,6 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp) * * In addition, this synchronize_rcu() waits for exiting tasks * to complete their final preempt_disable() region of execution, - * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu), * enforcing the whole region before tasklist removal until * the final schedule() with TASK_DEAD state to be an RCU TASKS * read side critical section. @@ -1036,9 +1051,6 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp) synchronize_rcu(); } -void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); -DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); - static void tasks_rcu_exit_srcu_stall(struct timer_list *unused) { #ifndef CONFIG_TINY_RCU -- cgit v1.2.3 From 0bb11a372fc8d7006b4d0f42a2882939747bdbff Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 1 Feb 2024 06:10:26 -0800 Subject: rcu-tasks: Maintain real-time response in rcu_tasks_postscan() The current code will scan the entirety of each per-CPU list of exiting tasks in ->rtp_exit_list with interrupts disabled. This is normally just fine, because each CPU typically won't have very many tasks in this state. However, if a large number of tasks block late in do_exit(), these lists could be arbitrarily long. Low probability, perhaps, but it really could happen. This commit therefore occasionally re-enables interrupts while traversing these lists, inserting a dummy element to hold the current place in the list. In kernels built with CONFIG_PREEMPT_RT=y, this re-enabling happens after each list element is processed, otherwise every one-to-two jiffies. [ paulmck: Apply Frederic Weisbecker feedback. ] Link: https://lore.kernel.org/all/ZdeI_-RfdLR8jlsm@localhost.localdomain/ Signed-off-by: Paul E. McKenney Cc: Thomas Gleixner Cc: Sebastian Siewior Cc: Anna-Maria Behnsen Cc: Steven Rostedt Signed-off-by: Boqun Feng --- kernel/rcu/tasks.h | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index c61dc92537db..147b5945d67a 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -968,13 +968,33 @@ static void rcu_tasks_postscan(struct list_head *hop) */ for_each_possible_cpu(cpu) { + unsigned long j = jiffies + 1; struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu); struct task_struct *t; + struct task_struct *t1; + struct list_head tmp; raw_spin_lock_irq_rcu_node(rtpcp); - list_for_each_entry(t, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) + list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { if (list_empty(&t->rcu_tasks_holdout_list)) rcu_tasks_pertask(t, hop); + + // RT kernels need frequent pauses, otherwise + // pause at least once per pair of jiffies. + if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j)) + continue; + + // Keep our place in the list while pausing. + // Nothing else traverses this list, so adding a + // bare list_head is OK. + list_add(&tmp, &t->rcu_tasks_exit_list); + raw_spin_unlock_irq_rcu_node(rtpcp); + cond_resched(); // For CONFIG_PREEMPT=n kernels + raw_spin_lock_irq_rcu_node(rtpcp); + t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list); + list_del(&tmp); + j = jiffies + 1; + } raw_spin_unlock_irq_rcu_node(rtpcp); } -- cgit v1.2.3 From ffb7e01c4e654d5c8bf2ce2a4830b826fa1f149e Mon Sep 17 00:00:00 2001 From: Peng Liu Date: Sun, 25 Feb 2024 23:54:53 +0100 Subject: tick/nohz: Remove duplicate between tick_nohz_switch_to_nohz() and tick_setup_sched_timer() The ts->sched_timer initialization work of tick_nohz_switch_to_nohz() is almost the same as that of tick_setup_sched_timer(), so adjust the latter to get it reused by tick_nohz_switch_to_nohz(). This also makes the low resolution mode sched_timer benefit from the tick skew boot option. Signed-off-by: Peng Liu Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-2-frederic@kernel.org --- kernel/time/hrtimer.c | 2 +- kernel/time/tick-sched.c | 39 ++++++++++++++++++--------------------- kernel/time/tick-sched.h | 2 +- 3 files changed, 20 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 1fd106af747d..95f1f351dcd9 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -747,7 +747,7 @@ static void hrtimer_switch_to_hres(void) base->hres_active = 1; hrtimer_resolution = HIGH_RES_NSEC; - tick_setup_sched_timer(); + tick_setup_sched_timer(NOHZ_MODE_HIGHRES); /* "Retrigger" the interrupt to get things going */ retrigger_next_event(NULL); } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 417bb7f880ca..d4901654148d 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1471,9 +1471,6 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode) */ static void tick_nohz_switch_to_nohz(void) { - struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); - ktime_t next; - if (!tick_nohz_enabled) return; @@ -1482,16 +1479,9 @@ static void tick_nohz_switch_to_nohz(void) /* * Recycle the hrtimer in 'ts', so we can share the - * hrtimer_forward_now() function with the highres code. + * highres code. */ - hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); - /* Get the next period */ - next = tick_init_jiffy_update(); - - hrtimer_set_expires(&ts->sched_timer, next); - hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); - tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); - tick_nohz_activate(ts, NOHZ_MODE_LOWRES); + tick_setup_sched_timer(NOHZ_MODE_LOWRES); } static inline void tick_nohz_irq_enter(void) @@ -1570,7 +1560,11 @@ static enum hrtimer_restart tick_nohz_highres_handler(struct hrtimer *timer) return HRTIMER_RESTART; } +#else +#define tick_nohz_highres_handler NULL +#endif /* CONFIG_HIGH_RES_TIMERS */ +#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS static int sched_skew_tick; static int __init skew_tick(char *str) @@ -1583,15 +1577,17 @@ early_param("skew_tick", skew_tick); /** * tick_setup_sched_timer - setup the tick emulation timer + * @mode: tick_nohz_mode to setup for */ -void tick_setup_sched_timer(void) +void tick_setup_sched_timer(int mode) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); - ktime_t now = ktime_get(); /* Emulate tick processing via per-CPU hrtimers: */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); - ts->sched_timer.function = tick_nohz_highres_handler; + + if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && mode == NOHZ_MODE_HIGHRES) + ts->sched_timer.function = tick_nohz_highres_handler; /* Get the next period (per-CPU) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); @@ -1604,13 +1600,14 @@ void tick_setup_sched_timer(void) hrtimer_add_expires_ns(&ts->sched_timer, offset); } - hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); - hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); - tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); + hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); + if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && mode == NOHZ_MODE_HIGHRES) + hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); + else + tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); + tick_nohz_activate(ts, mode); } -#endif /* HIGH_RES_TIMERS */ -#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS void tick_cancel_sched_timer(int cpu) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); @@ -1632,7 +1629,7 @@ void tick_cancel_sched_timer(int cpu) ts->idle_calls = idle_calls; ts->idle_sleeps = idle_sleeps; } -#endif +#endif /* CONFIG_NO_HZ_COMMON || CONFIG_HIGH_RES_TIMERS */ /* * Async notification about clocksource changes diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 5ed5a9d41d5a..35808bbb8a47 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -102,7 +102,7 @@ struct tick_sched { extern struct tick_sched *tick_get_tick_sched(int cpu); -extern void tick_setup_sched_timer(void); +extern void tick_setup_sched_timer(int mode); #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS extern void tick_cancel_sched_timer(int cpu); #else -- cgit v1.2.3 From 37263ba0c44b13b2025398ac81919df9f0a91368 Mon Sep 17 00:00:00 2001 From: Peng Liu Date: Sun, 25 Feb 2024 23:54:54 +0100 Subject: tick/nohz: Remove duplicate between lowres and highres handlers tick_nohz_lowres_handler() does the same work as tick_nohz_highres_handler() plus the clockevent device reprogramming, so make the former reuse the latter and rename it accordingly. Signed-off-by: Peng Liu Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-3-frederic@kernel.org --- kernel/time/tick-sched.c | 96 ++++++++++++++++++------------------------------ 1 file changed, 36 insertions(+), 60 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index d4901654148d..88c992f48126 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -255,6 +255,40 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); } + +/* + * We rearm the timer until we get disabled by the idle code. + * Called with interrupts disabled. + */ +static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) +{ + struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); + struct pt_regs *regs = get_irq_regs(); + ktime_t now = ktime_get(); + + tick_sched_do_timer(ts, now); + + /* + * Do not call when we are not in IRQ context and have + * no valid 'regs' pointer + */ + if (regs) + tick_sched_handle(ts, regs); + else + ts->next_tick = 0; + + /* + * In dynticks mode, tick reprogram is deferred: + * - to the idle task if in dynticks-idle + * - to IRQ exit if in full-dynticks. + */ + if (unlikely(ts->tick_stopped)) + return HRTIMER_NORESTART; + + hrtimer_forward(timer, now, TICK_NSEC); + + return HRTIMER_RESTART; +} #endif #ifdef CONFIG_NO_HZ_FULL @@ -1429,31 +1463,15 @@ void tick_nohz_idle_exit(void) * at the clockevent level. hrtimer can't be used instead, because its * infrastructure actually relies on the tick itself as a backend in * low-resolution mode (see hrtimer_run_queues()). - * - * This low-resolution handler still makes use of some hrtimer APIs meanwhile - * for convenience with expiration calculation and forwarding. */ static void tick_nohz_lowres_handler(struct clock_event_device *dev) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); - struct pt_regs *regs = get_irq_regs(); - ktime_t now = ktime_get(); dev->next_event = KTIME_MAX; - tick_sched_do_timer(ts, now); - tick_sched_handle(ts, regs); - - /* - * In dynticks mode, tick reprogram is deferred: - * - to the idle task if in dynticks-idle - * - to IRQ exit if in full-dynticks. - */ - if (likely(!ts->tick_stopped)) { - hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); + if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART)) tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); - } - } static inline void tick_nohz_activate(struct tick_sched *ts, int mode) @@ -1522,48 +1540,6 @@ void tick_irq_enter(void) tick_nohz_irq_enter(); } -/* - * High resolution timer specific code - */ -#ifdef CONFIG_HIGH_RES_TIMERS -/* - * We rearm the timer until we get disabled by the idle code. - * Called with interrupts disabled. - */ -static enum hrtimer_restart tick_nohz_highres_handler(struct hrtimer *timer) -{ - struct tick_sched *ts = - container_of(timer, struct tick_sched, sched_timer); - struct pt_regs *regs = get_irq_regs(); - ktime_t now = ktime_get(); - - tick_sched_do_timer(ts, now); - - /* - * Do not call when we are not in IRQ context and have - * no valid 'regs' pointer - */ - if (regs) - tick_sched_handle(ts, regs); - else - ts->next_tick = 0; - - /* - * In dynticks mode, tick reprogram is deferred: - * - to the idle task if in dynticks-idle - * - to IRQ exit if in full-dynticks. - */ - if (unlikely(ts->tick_stopped)) - return HRTIMER_NORESTART; - - hrtimer_forward(timer, now, TICK_NSEC); - - return HRTIMER_RESTART; -} -#else -#define tick_nohz_highres_handler NULL -#endif /* CONFIG_HIGH_RES_TIMERS */ - #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS static int sched_skew_tick; @@ -1587,7 +1563,7 @@ void tick_setup_sched_timer(int mode) hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && mode == NOHZ_MODE_HIGHRES) - ts->sched_timer.function = tick_nohz_highres_handler; + ts->sched_timer.function = tick_nohz_handler; /* Get the next period (per-CPU) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); -- cgit v1.2.3 From 3aedb7fcd88af25c215be098bf8ecb9ae8cb60ab Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:54:55 +0100 Subject: tick/sched: Remove useless oneshot ifdeffery tick-sched.c is only built when CONFIG_TICK_ONESHOT=y, which is selected only if CONFIG_NO_HZ_COMMON=y or CONFIG_HIGH_RES_TIMERS=y. Therefore the related ifdeferry in this file is needless and can be removed. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-4-frederic@kernel.org --- kernel/time/tick-sched.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 88c992f48126..27aaecb2e50c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -43,7 +43,6 @@ struct tick_sched *tick_get_tick_sched(int cpu) return &per_cpu(tick_cpu_sched, cpu); } -#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) /* * The time when the last jiffy update happened. Write access must hold * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a @@ -289,7 +288,6 @@ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) return HRTIMER_RESTART; } -#endif #ifdef CONFIG_NO_HZ_FULL cpumask_var_t tick_nohz_full_mask; @@ -635,7 +633,7 @@ void __init tick_nohz_init(void) pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", cpumask_pr_args(tick_nohz_full_mask)); } -#endif +#endif /* #ifdef CONFIG_NO_HZ_FULL */ /* * NOHZ - aka dynamic tick functionality @@ -1540,7 +1538,6 @@ void tick_irq_enter(void) tick_nohz_irq_enter(); } -#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS static int sched_skew_tick; static int __init skew_tick(char *str) @@ -1605,7 +1602,6 @@ void tick_cancel_sched_timer(int cpu) ts->idle_calls = idle_calls; ts->idle_sleeps = idle_sleeps; } -#endif /* CONFIG_NO_HZ_COMMON || CONFIG_HIGH_RES_TIMERS */ /* * Async notification about clocksource changes -- cgit v1.2.3 From 27dc08096ce498ec8b87fb12ce4b9932c8111898 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:54:56 +0100 Subject: tick: Use IS_ENABLED() whenever possible Avoid ifdeferry if it can be converted to IS_ENABLED() whenever possible Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-5-frederic@kernel.org --- kernel/time/tick-common.c | 4 +--- kernel/time/tick-sched.c | 14 +++++--------- 2 files changed, 6 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index e9138cd7a0f5..0084e1ae2583 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -111,15 +111,13 @@ void tick_handle_periodic(struct clock_event_device *dev) tick_periodic(cpu); -#if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON) /* * The cpu might have transitioned to HIGHRES or NOHZ mode via * update_process_times() -> run_local_timers() -> * hrtimer_run_queues(). */ - if (dev->event_handler != tick_handle_periodic) + if (IS_ENABLED(CONFIG_TICK_ONESHOT) && dev->event_handler != tick_handle_periodic) return; -#endif if (!clockevent_state_oneshot(dev)) return; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 27aaecb2e50c..4e34967edc0d 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -186,7 +186,6 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) { int cpu = smp_processor_id(); -#ifdef CONFIG_NO_HZ_COMMON /* * Check if the do_timer duty was dropped. We don't care about * concurrency: This happens only when the CPU in charge went @@ -197,13 +196,13 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) * If nohz_full is enabled, this should not happen because the * 'tick_do_timer_cpu' CPU never relinquishes. */ - if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && + unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { #ifdef CONFIG_NO_HZ_FULL WARN_ON_ONCE(tick_nohz_full_running); #endif tick_do_timer_cpu = cpu; } -#endif /* Check if jiffies need an update */ if (tick_do_timer_cpu == cpu) @@ -230,7 +229,6 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) { -#ifdef CONFIG_NO_HZ_COMMON /* * When we are idle and the tick is stopped, we have to touch * the watchdog as we might not schedule for a really long @@ -239,7 +237,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) * idle" jiffy stamp so the idle accounting adjustment we do * when we go busy again does not account too many ticks. */ - if (ts->tick_stopped) { + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && ts->tick_stopped) { touch_softlockup_watchdog_sched(); if (is_idle_task(current)) ts->idle_jiffies++; @@ -250,7 +248,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) */ ts->next_tick = 0; } -#endif + update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); } @@ -1587,10 +1585,8 @@ void tick_cancel_sched_timer(int cpu) ktime_t idle_sleeptime, iowait_sleeptime; unsigned long idle_calls, idle_sleeps; -# ifdef CONFIG_HIGH_RES_TIMERS - if (ts->sched_timer.base) + if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && ts->sched_timer.base) hrtimer_cancel(&ts->sched_timer); -# endif idle_sleeptime = ts->idle_sleeptime; iowait_sleeptime = ts->iowait_sleeptime; -- cgit v1.2.3 From 3650f49bfb954119a33613672abe4ca3bbbf6243 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:54:57 +0100 Subject: tick/sched: Rename tick_nohz_stop_sched_tick() to tick_nohz_full_stop_tick() tick_nohz_stop_sched_tick() is only about NOHZ_full and not about dynticks-idle. Reflect that in the function name to avoid confusion. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-6-frederic@kernel.org --- kernel/time/tick-sched.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 4e34967edc0d..9f75f5621965 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1040,7 +1040,7 @@ static void tick_nohz_retain_tick(struct tick_sched *ts) } #ifdef CONFIG_NO_HZ_FULL -static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) +static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu) { if (tick_nohz_next_event(ts, cpu)) tick_nohz_stop_tick(ts, cpu); @@ -1075,7 +1075,7 @@ static void __tick_nohz_full_update_tick(struct tick_sched *ts, int cpu = smp_processor_id(); if (can_stop_full_tick(cpu, ts)) - tick_nohz_stop_sched_tick(ts, cpu); + tick_nohz_full_stop_tick(ts, cpu); else if (ts->tick_stopped) tick_nohz_restart_sched_tick(ts, now); #endif -- cgit v1.2.3 From 60313c21c33abc08108bdd60390fa89563977e64 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:54:58 +0100 Subject: tick/sched: Don't clear ts::next_tick again in can_stop_idle_tick() The tick sched structure is already cleared from tick_cancel_sched_timer(), so there is no need to clear that field again. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-7-frederic@kernel.org --- kernel/time/tick-sched.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 9f75f5621965..b17895de26b9 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1143,11 +1143,6 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) if (unlikely(!cpu_online(cpu))) { if (cpu == tick_do_timer_cpu) tick_do_timer_cpu = TICK_DO_TIMER_NONE; - /* - * Make sure the CPU doesn't get fooled by obsolete tick - * deadline if it comes back online later. - */ - ts->next_tick = 0; return false; } -- cgit v1.2.3 From 3ad6eb0683a1edbb4bb117b85d61f17a879155a1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:54:59 +0100 Subject: tick: Start centralizing tick related CPU hotplug operations During the CPU offlining process, the various timer tick features are shut down from scattered places, sometimes from teardown callbacks on stop machine, sometimes through explicit calls, sometimes from the control CPU after the CPU died. The reason why these shutdown operations are spread around is not always clear and it makes the tick lifecycle hard to follow. The tick should be shut down in order from highest to lowest level: On stop machine from the dying CPU (high-level): 1) Hand-over the timekeeping duty (tick_handover_do_timer()) 2) Cancel the tick implementation called by the clockevent callback (tick_cancel_sched_timer()) 3) Shutdown broadcasting (tick_offline_cpu() / tick_broadcast_offline()) On stop machine from the dying CPU (low-level): 4) Shutdown clockevents drivers (CPUHP_AP_*_TIMER_STARTING states) From the control CPU after the CPU died (low-level): 5) Shutdown/unregister/cleanup clockevents for the dead CPU (tick_cleanup_dead_cpu()) Instead the current order is 2, 4 (both from CPU hotplug states), then 1 and 3 through direct calls. This layout and order don't make much sense. The operations 1, 2, 3 should be gathered together and in order. Sort this situation with creating a new TICK shut-down CPU hotplug state and start with introducing the timekeeping duty hand-over there. The state must precede hrtimers migration because the tick hrtimer will be stopped from it in a further patch. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-8-frederic@kernel.org --- kernel/cpu.c | 8 +++++--- kernel/time/tick-common.c | 17 +++++++++++------ 2 files changed, 16 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index e6ec3ba4950b..263508073da8 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1324,8 +1324,6 @@ static int take_cpu_down(void *_param) */ cpuhp_invoke_callback_range_nofail(false, cpu, st, target); - /* Give up timekeeping duties */ - tick_handover_do_timer(); /* Remove CPU from timer broadcasting */ tick_offline_cpu(cpu); /* Park the stopper thread */ @@ -2205,7 +2203,11 @@ static struct cpuhp_step cpuhp_hp_states[] = { .startup.single = NULL, .teardown.single = hrtimers_cpu_dying, }, - + [CPUHP_AP_TICK_DYING] = { + .name = "tick:dying", + .startup.single = NULL, + .teardown.single = tick_cpu_dying, + }, /* Entry state on starting. Interrupts enabled from here on. Transient * state for synchronsization */ [CPUHP_AP_ONLINE] = { diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 0084e1ae2583..a89ef450fda7 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -397,15 +397,20 @@ EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control); #ifdef CONFIG_HOTPLUG_CPU /* - * Transfer the do_timer job away from a dying cpu. - * - * Called with interrupts disabled. No locking required. If - * tick_do_timer_cpu is owned by this cpu, nothing can change it. + * Stop the tick and transfer the timekeeping job away from a dying cpu. */ -void tick_handover_do_timer(void) +int tick_cpu_dying(unsigned int dying_cpu) { - if (tick_do_timer_cpu == smp_processor_id()) + /* + * If the current CPU is the timekeeper, it's the only one that + * can safely hand over its duty. Also all online CPUs are in + * stop machine, guaranteed not to be idle, therefore it's safe + * to pick any online successor. + */ + if (tick_do_timer_cpu == dying_cpu) tick_do_timer_cpu = cpumask_first(cpu_online_mask); + + return 0; } /* -- cgit v1.2.3 From f04e51220ad5cf35540f67f3ca15c8617c1f0bef Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:55:00 +0100 Subject: tick: Move tick cancellation up to CPUHP_AP_TICK_DYING The tick hrtimer is cancelled right before hrtimers are migrated. This is done from the hrtimer subsystem even though it shouldn't know about its actual users. Move instead the tick hrtimer cancellation to the relevant CPU hotplug state that aims at centralizing high level tick shutdown operations so that the related flow is easy to follow. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-9-frederic@kernel.org --- kernel/time/hrtimer.c | 2 -- kernel/time/tick-common.c | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 95f1f351dcd9..3e95474199ac 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -2229,8 +2229,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu) int i, ncpu = cpumask_any_and(cpu_active_mask, housekeeping_cpumask(HK_TYPE_TIMER)); struct hrtimer_cpu_base *old_base, *new_base; - tick_cancel_sched_timer(dying_cpu); - old_base = this_cpu_ptr(&hrtimer_bases); new_base = &per_cpu(hrtimer_bases, ncpu); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index a89ef450fda7..b4af8c743b73 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -410,6 +410,8 @@ int tick_cpu_dying(unsigned int dying_cpu) if (tick_do_timer_cpu == dying_cpu) tick_do_timer_cpu = cpumask_first(cpu_online_mask); + tick_cancel_sched_timer(dying_cpu); + return 0; } -- cgit v1.2.3 From ef8969bb552c1c75e997a42d3e2c576b6ed4025a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:55:01 +0100 Subject: tick: Move broadcast cancellation up to CPUHP_AP_TICK_DYING The broadcast shutdown code is executed through a random explicit call within stop machine from the outgoing CPU. However the tick broadcast is a midware between the tick callback and the clocksource, therefore it makes more sense to shut it down after the tick callback and before the clocksource drivers. Move it instead to the common tick shutdown CPU hotplug state where related operations can be ordered from highest to lowest level. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-10-frederic@kernel.org --- kernel/cpu.c | 2 -- kernel/time/tick-common.c | 3 +++ kernel/time/tick-internal.h | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 263508073da8..5a8ad4f5ccf3 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1324,8 +1324,6 @@ static int take_cpu_down(void *_param) */ cpuhp_invoke_callback_range_nofail(false, cpu, st, target); - /* Remove CPU from timer broadcasting */ - tick_offline_cpu(cpu); /* Park the stopper thread */ stop_machine_park(cpu); return 0; diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index b4af8c743b73..522414089c0d 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -412,6 +412,9 @@ int tick_cpu_dying(unsigned int dying_cpu) tick_cancel_sched_timer(dying_cpu); + /* Remove CPU from timer broadcasting */ + tick_offline_cpu(dying_cpu); + return 0; } diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index a3243c4ac45f..5f2105e637bd 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -142,8 +142,10 @@ static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_ #endif /* !(BROADCAST && ONESHOT) */ #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU) +extern void tick_offline_cpu(unsigned int cpu); extern void tick_broadcast_offline(unsigned int cpu); #else +static inline void tick_offline_cpu(unsigned int cpu) { } static inline void tick_broadcast_offline(unsigned int cpu) { } #endif -- cgit v1.2.3 From d9b1865c86aec7c515db5718e4820106c2c12db3 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:55:02 +0100 Subject: tick: Assume the tick can't be stopped in NOHZ_MODE_INACTIVE mode The full-nohz update function checks if the nohz mode is active before proceeding. It considers one exception though: if the tick is already stopped even though the nohz mode is inactive, it still moves on in order to update/restart the tick if needed. However in order for the tick to be stopped, the nohz_mode has to be either NOHZ_MODE_LOWRES or NOHZ_MODE_HIGHRES. Therefore it doesn't make sense to test if the tick is stopped before verifying NOHZ_MODE_INACTIVE mode. Remove the needless related condition. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-11-frederic@kernel.org --- kernel/time/tick-sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index b17895de26b9..b79f5403433b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1086,7 +1086,7 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts) if (!tick_nohz_full_cpu(smp_processor_id())) return; - if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) + if (ts->nohz_mode == NOHZ_MODE_INACTIVE) return; __tick_nohz_full_update_tick(ts, ktime_get()); -- cgit v1.2.3 From 3ce74f1a8566dbbc9774f85fb0ce781fe290fd32 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:55:03 +0100 Subject: tick: Move got_idle_tick away from common flags tick_nohz_idle_got_tick() is called by cpuidle_reflect() within the idle loop with interrupts enabled. This function modifies the struct tick_sched's bitfield "got_idle_tick". However this bitfield is stored within the same mask as other bitfields that can be modified from interrupts. Fortunately so far it looks like the only race that can happen is while writing ->got_idle_tick to 0, an interrupt fires and writes the ->idle_active field to 0. It's then possible that the interrupted write to ->got_idle_tick writes back the old value of ->idle_active back to 1. However if that happens, the worst possible outcome is that the time spent between that interrupt and the upcoming call to tick_nohz_idle_exit() is accounted as idle, which is negligible quantity. Still all the bitfield writes within this struct tick_sched's shadow mask should be IRQ-safe. Therefore move this bitfield out to its own storage to avoid further suprises. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-12-frederic@kernel.org --- kernel/time/tick-sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 35808bbb8a47..3b555e0fa937 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -61,7 +61,6 @@ struct tick_sched { unsigned int tick_stopped : 1; unsigned int idle_active : 1; unsigned int do_timer_last : 1; - unsigned int got_idle_tick : 1; /* Tick handling: jiffies stall check */ unsigned int stalled_jiffies; @@ -73,6 +72,7 @@ struct tick_sched { ktime_t next_tick; unsigned long idle_jiffies; ktime_t idle_waketime; + unsigned int got_idle_tick; /* Idle entry */ seqcount_t idle_sleeptime_seq; -- cgit v1.2.3 From a478ffb2ae234ee1ece2b84719762c54d304e2c7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:55:04 +0100 Subject: tick: Move individual bit features to debuggable mask accesses The individual bitfields of struct tick_sched must be modified from IRQs disabled places, otherwise local modifications can race due to them sharing the same memory storage. The recent move of the "got_idle_tick" bitfield to its own storage shows that the use of these bitfields, as pretty as they look, can be as much error prone. In order to avoid future issues of the like and make sure that those bitfields are safely accessed, move those flags to an explicit mask along with a mutator function performing the basic IRQs disabled sanity check. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-13-frederic@kernel.org --- kernel/time/tick-sched.c | 88 ++++++++++++++++++++++++++++++------------------ kernel/time/tick-sched.h | 23 ++++++++----- kernel/time/timer_list.c | 5 ++- 3 files changed, 73 insertions(+), 43 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index b79f5403433b..4aa7ce04a72c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -180,6 +180,26 @@ static ktime_t tick_init_jiffy_update(void) return period; } +static inline int tick_sched_flag_test(struct tick_sched *ts, + unsigned long flag) +{ + return !!(ts->flags & flag); +} + +static inline void tick_sched_flag_set(struct tick_sched *ts, + unsigned long flag) +{ + lockdep_assert_irqs_disabled(); + ts->flags |= flag; +} + +static inline void tick_sched_flag_clear(struct tick_sched *ts, + unsigned long flag) +{ + lockdep_assert_irqs_disabled(); + ts->flags &= ~flag; +} + #define MAX_STALLED_JIFFIES 5 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) @@ -223,7 +243,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) } } - if (ts->inidle) + if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) ts->got_idle_tick = 1; } @@ -237,7 +257,8 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) * idle" jiffy stamp so the idle accounting adjustment we do * when we go busy again does not account too many ticks. */ - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && ts->tick_stopped) { + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && + tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { touch_softlockup_watchdog_sched(); if (is_idle_task(current)) ts->idle_jiffies++; @@ -279,7 +300,7 @@ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) * - to the idle task if in dynticks-idle * - to IRQ exit if in full-dynticks. */ - if (unlikely(ts->tick_stopped)) + if (unlikely(tick_sched_flag_test(ts, TS_FLAG_STOPPED))) return HRTIMER_NORESTART; hrtimer_forward(timer, now, TICK_NSEC); @@ -559,7 +580,7 @@ void __tick_nohz_task_switch(void) ts = this_cpu_ptr(&tick_cpu_sched); - if (ts->tick_stopped) { + if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { if (atomic_read(¤t->tick_dep_mask) || atomic_read(¤t->signal->tick_dep_mask)) tick_nohz_full_kick(); @@ -656,14 +677,14 @@ bool tick_nohz_tick_stopped(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); - return ts->tick_stopped; + return tick_sched_flag_test(ts, TS_FLAG_STOPPED); } bool tick_nohz_tick_stopped_cpu(int cpu) { struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); - return ts->tick_stopped; + return tick_sched_flag_test(ts, TS_FLAG_STOPPED); } /** @@ -693,7 +714,7 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) { ktime_t delta; - if (WARN_ON_ONCE(!ts->idle_active)) + if (WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE))) return; delta = ktime_sub(now, ts->idle_entrytime); @@ -705,7 +726,7 @@ static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); ts->idle_entrytime = now; - ts->idle_active = 0; + tick_sched_flag_clear(ts, TS_FLAG_IDLE_ACTIVE); write_seqcount_end(&ts->idle_sleeptime_seq); sched_clock_idle_wakeup_event(); @@ -715,7 +736,7 @@ static void tick_nohz_start_idle(struct tick_sched *ts) { write_seqcount_begin(&ts->idle_sleeptime_seq); ts->idle_entrytime = ktime_get(); - ts->idle_active = 1; + tick_sched_flag_set(ts, TS_FLAG_IDLE_ACTIVE); write_seqcount_end(&ts->idle_sleeptime_seq); sched_clock_idle_sleep_event(); @@ -737,7 +758,7 @@ static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, do { seq = read_seqcount_begin(&ts->idle_sleeptime_seq); - if (ts->idle_active && compute_delta) { + if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE) && compute_delta) { ktime_t delta = ktime_sub(now, ts->idle_entrytime); idle = ktime_add(*sleeptime, delta); @@ -905,7 +926,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) * We've not stopped the tick yet, and there's a timer in the * next period, so no point in stopping it either, bail. */ - if (!ts->tick_stopped) { + if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ts->timer_expires = 0; goto out; } @@ -918,7 +939,8 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) */ delta = timekeeping_max_deferment(); if (cpu != tick_do_timer_cpu && - (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) + (tick_do_timer_cpu != TICK_DO_TIMER_NONE || + !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST))) delta = KTIME_MAX; /* Calculate the next expiry time */ @@ -938,7 +960,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); unsigned long basejiff = ts->last_jiffies; u64 basemono = ts->timer_expires_base; - bool timer_idle = ts->tick_stopped; + bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED); u64 expires; /* Make sure we won't be trying to stop it twice in a row. */ @@ -978,13 +1000,13 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) */ if (cpu == tick_do_timer_cpu) { tick_do_timer_cpu = TICK_DO_TIMER_NONE; - ts->do_timer_last = 1; + tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST); } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { - ts->do_timer_last = 0; + tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST); } /* Skip reprogram of event if it's not changed */ - if (ts->tick_stopped && (expires == ts->next_tick)) { + if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) { /* Sanity check: make sure clockevent is actually programmed */ if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) return; @@ -1002,12 +1024,12 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) * call we save the current tick time, so we can restart the * scheduler tick in tick_nohz_restart_sched_tick(). */ - if (!ts->tick_stopped) { + if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { calc_load_nohz_start(); quiet_vmstat(); ts->last_tick = hrtimer_get_expires(&ts->sched_timer); - ts->tick_stopped = 1; + tick_sched_flag_set(ts, TS_FLAG_STOPPED); trace_tick_stop(1, TICK_DEP_MASK_NONE); } @@ -1064,7 +1086,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) touch_softlockup_watchdog_sched(); /* Cancel the scheduled timer and restore the tick: */ - ts->tick_stopped = 0; + tick_sched_flag_clear(ts, TS_FLAG_STOPPED); tick_nohz_restart(ts, now); } @@ -1076,7 +1098,7 @@ static void __tick_nohz_full_update_tick(struct tick_sched *ts, if (can_stop_full_tick(cpu, ts)) tick_nohz_full_stop_tick(ts, cpu); - else if (ts->tick_stopped) + else if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) tick_nohz_restart_sched_tick(ts, now); #endif } @@ -1196,14 +1218,14 @@ void tick_nohz_idle_stop_tick(void) ts->idle_calls++; if (expires > 0LL) { - int was_stopped = ts->tick_stopped; + int was_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); tick_nohz_stop_tick(ts, cpu); ts->idle_sleeps++; ts->idle_expires = expires; - if (!was_stopped && ts->tick_stopped) { + if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ts->idle_jiffies = ts->last_jiffies; nohz_balance_enter_idle(cpu); } @@ -1234,7 +1256,7 @@ void tick_nohz_idle_enter(void) WARN_ON_ONCE(ts->timer_expires_base); - ts->inidle = 1; + tick_sched_flag_set(ts, TS_FLAG_INIDLE); tick_nohz_start_idle(ts); local_irq_enable(); @@ -1263,7 +1285,7 @@ void tick_nohz_irq_exit(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); - if (ts->inidle) + if (tick_sched_flag_test(ts, TS_FLAG_INIDLE)) tick_nohz_start_idle(ts); else tick_nohz_full_update_tick(ts); @@ -1317,7 +1339,7 @@ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) ktime_t now = ts->idle_entrytime; ktime_t next_event; - WARN_ON_ONCE(!ts->inidle); + WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); *delta_next = ktime_sub(dev->next_event, now); @@ -1389,7 +1411,7 @@ void tick_nohz_idle_restart_tick(void) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); - if (ts->tick_stopped) { + if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) { ktime_t now = ktime_get(); tick_nohz_restart_sched_tick(ts, now); tick_nohz_account_idle_time(ts, now); @@ -1430,12 +1452,12 @@ void tick_nohz_idle_exit(void) local_irq_disable(); - WARN_ON_ONCE(!ts->inidle); + WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE)); WARN_ON_ONCE(ts->timer_expires_base); - ts->inidle = 0; - idle_active = ts->idle_active; - tick_stopped = ts->tick_stopped; + tick_sched_flag_clear(ts, TS_FLAG_INIDLE); + idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE); + tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED); if (idle_active || tick_stopped) now = ktime_get(); @@ -1498,10 +1520,10 @@ static inline void tick_nohz_irq_enter(void) struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); ktime_t now; - if (!ts->idle_active && !ts->tick_stopped) + if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE)) return; now = ktime_get(); - if (ts->idle_active) + if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)) tick_nohz_stop_idle(ts, now); /* * If all CPUs are idle we may need to update a stale jiffies value. @@ -1510,7 +1532,7 @@ static inline void tick_nohz_irq_enter(void) * rare case (typically stop machine). So we must make sure we have a * last resort. */ - if (ts->tick_stopped) + if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) tick_nohz_update_jiffies(now); } diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 3b555e0fa937..07a4c0144c47 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -20,14 +20,22 @@ enum tick_nohz_mode { NOHZ_MODE_HIGHRES, }; +/* The CPU is in the tick idle mode */ +#define TS_FLAG_INIDLE BIT(0) +/* The idle tick has been stopped */ +#define TS_FLAG_STOPPED BIT(1) +/* + * Indicator that the CPU is actively in the tick idle mode; + * it is reset during irq handling phases. + */ +#define TS_FLAG_IDLE_ACTIVE BIT(2) +/* CPU was the last one doing do_timer before going idle */ +#define TS_FLAG_DO_TIMER_LAST BIT(3) + /** * struct tick_sched - sched tick emulation and no idle tick control/stats * - * @inidle: Indicator that the CPU is in the tick idle mode - * @tick_stopped: Indicator that the idle tick has been stopped - * @idle_active: Indicator that the CPU is actively in the tick idle mode; - * it is reset during irq handling phases. - * @do_timer_last: CPU was the last one doing do_timer before going idle + * @flags: State flags gathering the TS_FLAG_* features * @got_idle_tick: Tick timer function has run with @inidle set * @stalled_jiffies: Number of stalled jiffies detected across ticks * @last_tick_jiffies: Value of jiffies seen on last tick @@ -57,10 +65,7 @@ enum tick_nohz_mode { */ struct tick_sched { /* Common flags */ - unsigned int inidle : 1; - unsigned int tick_stopped : 1; - unsigned int idle_active : 1; - unsigned int do_timer_last : 1; + unsigned long flags; /* Tick handling: jiffies stall check */ unsigned int stalled_jiffies; diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index ed7d6ad694fb..38f81d836fc5 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -147,11 +147,14 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now) # define P_ns(x) \ SEQ_printf(m, " .%-15s: %Lu nsecs\n", #x, \ (unsigned long long)(ktime_to_ns(ts->x))) +# define P_flag(x, f) \ + SEQ_printf(m, " .%-15s: %d\n", #x, !!(ts->flags & (f))) + { struct tick_sched *ts = tick_get_tick_sched(cpu); P(nohz_mode); P_ns(last_tick); - P(tick_stopped); + P_flag(tick_stopped, TS_FLAG_STOPPED); P(idle_jiffies); P(idle_calls); P(idle_sleeps); -- cgit v1.2.3 From 7988e5ae2be70b110db9d4b8ec163bd42e67d3be Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:55:05 +0100 Subject: tick: Split nohz and highres features from nohz_mode The nohz mode field tells about low resolution nohz mode or high resolution nohz mode but it doesn't tell about high resolution non-nohz mode. In order to retrieve the latter state, tick_cancel_sched_timer() must fiddle with struct hrtimer's internals to guess if the tick has been initialized in high resolution. Move instead the nohz mode field information into the tick flags and provide two new bits: one to know if the tick is in nohz mode and another one to know if the tick is in high resolution. The combination of those two flags provides all the needed informations to determine which of the three tick modes is running. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-14-frederic@kernel.org --- kernel/time/hrtimer.c | 2 +- kernel/time/tick-sched.c | 32 +++++++++++++++++--------------- kernel/time/tick-sched.h | 13 +++++-------- kernel/time/timer_list.c | 5 +++-- 4 files changed, 26 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 3e95474199ac..70625dff62ce 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -747,7 +747,7 @@ static void hrtimer_switch_to_hres(void) base->hres_active = 1; hrtimer_resolution = HIGH_RES_NSEC; - tick_setup_sched_timer(NOHZ_MODE_HIGHRES); + tick_setup_sched_timer(true); /* "Retrigger" the interrupt to get things going */ retrigger_next_event(NULL); } diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 4aa7ce04a72c..dcb9f0394182 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -831,7 +831,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) /* Forward the time to expire in the future */ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); - if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { + if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); } else { @@ -1040,14 +1040,14 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) * the tick timer. */ if (unlikely(expires == KTIME_MAX)) { - if (ts->nohz_mode == NOHZ_MODE_HIGHRES) + if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) hrtimer_cancel(&ts->sched_timer); else tick_program_event(KTIME_MAX, 1); return; } - if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { + if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) { hrtimer_start(&ts->sched_timer, expires, HRTIMER_MODE_ABS_PINNED_HARD); } else { @@ -1108,7 +1108,7 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts) if (!tick_nohz_full_cpu(smp_processor_id())) return; - if (ts->nohz_mode == NOHZ_MODE_INACTIVE) + if (!tick_sched_flag_test(ts, TS_FLAG_NOHZ)) return; __tick_nohz_full_update_tick(ts, ktime_get()); @@ -1168,7 +1168,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return false; } - if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) + if (unlikely(!tick_sched_flag_test(ts, TS_FLAG_NOHZ))) return false; if (need_resched()) @@ -1487,11 +1487,11 @@ static void tick_nohz_lowres_handler(struct clock_event_device *dev) tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); } -static inline void tick_nohz_activate(struct tick_sched *ts, int mode) +static inline void tick_nohz_activate(struct tick_sched *ts) { if (!tick_nohz_enabled) return; - ts->nohz_mode = mode; + tick_sched_flag_set(ts, TS_FLAG_NOHZ); /* One update is enough */ if (!test_and_set_bit(0, &tick_nohz_active)) timers_update_nohz(); @@ -1512,7 +1512,7 @@ static void tick_nohz_switch_to_nohz(void) * Recycle the hrtimer in 'ts', so we can share the * highres code. */ - tick_setup_sched_timer(NOHZ_MODE_LOWRES); + tick_setup_sched_timer(false); } static inline void tick_nohz_irq_enter(void) @@ -1540,7 +1540,7 @@ static inline void tick_nohz_irq_enter(void) static inline void tick_nohz_switch_to_nohz(void) { } static inline void tick_nohz_irq_enter(void) { } -static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } +static inline void tick_nohz_activate(struct tick_sched *ts) { } #endif /* CONFIG_NO_HZ_COMMON */ @@ -1567,15 +1567,17 @@ early_param("skew_tick", skew_tick); * tick_setup_sched_timer - setup the tick emulation timer * @mode: tick_nohz_mode to setup for */ -void tick_setup_sched_timer(int mode) +void tick_setup_sched_timer(bool hrtimer) { struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); /* Emulate tick processing via per-CPU hrtimers: */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); - if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && mode == NOHZ_MODE_HIGHRES) + if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer) { + tick_sched_flag_set(ts, TS_FLAG_HIGHRES); ts->sched_timer.function = tick_nohz_handler; + } /* Get the next period (per-CPU) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); @@ -1589,11 +1591,11 @@ void tick_setup_sched_timer(int mode) } hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); - if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && mode == NOHZ_MODE_HIGHRES) + if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer) hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); else tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); - tick_nohz_activate(ts, mode); + tick_nohz_activate(ts); } void tick_cancel_sched_timer(int cpu) @@ -1602,7 +1604,7 @@ void tick_cancel_sched_timer(int cpu) ktime_t idle_sleeptime, iowait_sleeptime; unsigned long idle_calls, idle_sleeps; - if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && ts->sched_timer.base) + if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) hrtimer_cancel(&ts->sched_timer); idle_sleeptime = ts->idle_sleeptime; @@ -1652,7 +1654,7 @@ int tick_check_oneshot_change(int allow_nohz) if (!test_and_clear_bit(0, &ts->check_clocks)) return 0; - if (ts->nohz_mode != NOHZ_MODE_INACTIVE) + if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) return 0; if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 07a4c0144c47..bbe72a078985 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -14,12 +14,6 @@ struct tick_device { enum tick_device_mode mode; }; -enum tick_nohz_mode { - NOHZ_MODE_INACTIVE, - NOHZ_MODE_LOWRES, - NOHZ_MODE_HIGHRES, -}; - /* The CPU is in the tick idle mode */ #define TS_FLAG_INIDLE BIT(0) /* The idle tick has been stopped */ @@ -31,6 +25,10 @@ enum tick_nohz_mode { #define TS_FLAG_IDLE_ACTIVE BIT(2) /* CPU was the last one doing do_timer before going idle */ #define TS_FLAG_DO_TIMER_LAST BIT(3) +/* NO_HZ is enabled */ +#define TS_FLAG_NOHZ BIT(4) +/* High resolution tick mode */ +#define TS_FLAG_HIGHRES BIT(5) /** * struct tick_sched - sched tick emulation and no idle tick control/stats @@ -84,7 +82,6 @@ struct tick_sched { ktime_t idle_entrytime; /* Tick stop */ - enum tick_nohz_mode nohz_mode; unsigned long last_jiffies; u64 timer_expires_base; u64 timer_expires; @@ -107,7 +104,7 @@ struct tick_sched { extern struct tick_sched *tick_get_tick_sched(int cpu); -extern void tick_setup_sched_timer(int mode); +extern void tick_setup_sched_timer(bool hrtimer); #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS extern void tick_cancel_sched_timer(int cpu); #else diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 38f81d836fc5..1c311c46da50 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -152,7 +152,8 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now) { struct tick_sched *ts = tick_get_tick_sched(cpu); - P(nohz_mode); + P_flag(nohz, TS_FLAG_NOHZ); + P_flag(highres, TS_FLAG_HIGHRES); P_ns(last_tick); P_flag(tick_stopped, TS_FLAG_STOPPED); P(idle_jiffies); @@ -259,7 +260,7 @@ static void timer_list_show_tickdevices_header(struct seq_file *m) static inline void timer_list_header(struct seq_file *m, u64 now) { - SEQ_printf(m, "Timer List Version: v0.9\n"); + SEQ_printf(m, "Timer List Version: v0.10\n"); SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); SEQ_printf(m, "\n"); -- cgit v1.2.3 From 3f69d04e146c6e14ccdd4e7b37d93f789229202a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:55:06 +0100 Subject: tick: Shut down low-res tick from dying CPU The timekeeping duty is handed over from the outgoing CPU within stop machine. This works well if CONFIG_NO_HZ_COMMON=n or the tick is in high-res mode. However in low-res dynticks mode, the tick isn't cancelled until the clockevent is shut down, which can happen later. The tick may therefore fire again once IRQs are re-enabled on stop machine and until IRQs are disabled for good upon the last call to idle. That's so many opportunities for a timekeeper to go idle and the outgoing CPU to take over that duty. This is why tick_nohz_idle_stop_tick() is called one last time on idle if the CPU is seen offline: so that the timekeeping duty is handed over again in case the CPU has re-taken the duty. This means there are two timekeeping handovers on CPU down hotplug with different undocumented constraints and purposes: 1) A handover on stop machine for !dynticks || highres. All online CPUs are guaranteed to be non-idle and the timekeeping duty can be safely handed-over. The hrtimer tick is cancelled so it is guaranteed that in dynticks mode the outgoing CPU won't take again the duty. 2) A handover on last idle call for dynticks && lowres. Setting the duty to TICK_DO_TIMER_NONE makes sure that a CPU will take over the timekeeping. Prepare for consolidating the handover to a single place (the first one) with shutting down the low-res tick as well from tick_cancel_sched_timer() as well. This will simplify the handover and unify the tick cancellation between high-res and low-res. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-15-frederic@kernel.org --- kernel/time/tick-common.c | 3 ++- kernel/time/tick-sched.c | 32 +++++++++++++++++++++++++------- kernel/time/tick-sched.h | 4 ++-- 3 files changed, 29 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 522414089c0d..9cd09eea06d6 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -410,7 +410,8 @@ int tick_cpu_dying(unsigned int dying_cpu) if (tick_do_timer_cpu == dying_cpu) tick_do_timer_cpu = cpumask_first(cpu_online_mask); - tick_cancel_sched_timer(dying_cpu); + /* Make sure the CPU won't try to retake the timekeeping duty */ + tick_sched_timer_dying(dying_cpu); /* Remove CPU from timer broadcasting */ tick_offline_cpu(dying_cpu); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index dcb9f0394182..89d16b8ea2c4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -308,6 +308,14 @@ static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer) return HRTIMER_RESTART; } +static void tick_sched_timer_cancel(struct tick_sched *ts) +{ + if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) + hrtimer_cancel(&ts->sched_timer); + else if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) + tick_program_event(KTIME_MAX, 1); +} + #ifdef CONFIG_NO_HZ_FULL cpumask_var_t tick_nohz_full_mask; EXPORT_SYMBOL_GPL(tick_nohz_full_mask); @@ -1040,10 +1048,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) * the tick timer. */ if (unlikely(expires == KTIME_MAX)) { - if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) - hrtimer_cancel(&ts->sched_timer); - else - tick_program_event(KTIME_MAX, 1); + tick_sched_timer_cancel(ts); return; } @@ -1598,14 +1603,27 @@ void tick_setup_sched_timer(bool hrtimer) tick_nohz_activate(ts); } -void tick_cancel_sched_timer(int cpu) +/* + * Shut down the tick and make sure the CPU won't try to retake the timekeeping + * duty before disabling IRQs in idle for the last time. + */ +void tick_sched_timer_dying(int cpu) { + struct tick_device *td = &per_cpu(tick_cpu_device, cpu); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); + struct clock_event_device *dev = td->evtdev; ktime_t idle_sleeptime, iowait_sleeptime; unsigned long idle_calls, idle_sleeps; - if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) - hrtimer_cancel(&ts->sched_timer); + /* This must happen before hrtimers are migrated! */ + tick_sched_timer_cancel(ts); + + /* + * If the clockevents doesn't support CLOCK_EVT_STATE_ONESHOT_STOPPED, + * make sure not to call low-res tick handler. + */ + if (tick_sched_flag_test(ts, TS_FLAG_NOHZ)) + dev->event_handler = clockevents_handle_noop; idle_sleeptime = ts->idle_sleeptime; iowait_sleeptime = ts->iowait_sleeptime; diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index bbe72a078985..58d8d1c49dd3 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -106,9 +106,9 @@ extern struct tick_sched *tick_get_tick_sched(int cpu); extern void tick_setup_sched_timer(bool hrtimer); #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS -extern void tick_cancel_sched_timer(int cpu); +extern void tick_sched_timer_dying(int cpu); #else -static inline void tick_cancel_sched_timer(int cpu) { } +static inline void tick_sched_timer_dying(int cpu) { } #endif #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -- cgit v1.2.3 From 500f8f9bced86f0c0f2482773bd64a1b7ec9c4e1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:55:07 +0100 Subject: tick: Assume timekeeping is correctly handed over upon last offline idle call The timekeeping duty is handed over from the outgoing CPU on stop machine, then the oneshot tick is stopped right after. Therefore it's guaranteed that the current CPU isn't the timekeeper upon its last call to idle. Besides, calling tick_nohz_idle_stop_tick() while the dying CPU goes into idle suggests that the tick is going to be stopped while it is actually stopped already from the appropriate CPU hotplug state. Remove the confusing call and the obsolete case handling and convert it to a sanity check that verifies the above assumption. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-16-frederic@kernel.org --- kernel/cpu.c | 1 + kernel/sched/idle.c | 1 - kernel/time/tick-common.c | 4 ++++ kernel/time/tick-sched.c | 13 +------------ 4 files changed, 6 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 5a8ad4f5ccf3..7e84a7b0675e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1399,6 +1399,7 @@ void cpuhp_report_idle_dead(void) struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); BUG_ON(st->state != CPUHP_AP_OFFLINE); + tick_assert_timekeeping_handover(); rcutree_report_cpu_dead(); st->state = CPUHP_AP_IDLE_DEAD; /* diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 31231925f1ec..b15d40cad7ea 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -291,7 +291,6 @@ static void do_idle(void) local_irq_disable(); if (cpu_is_offline(cpu)) { - tick_nohz_idle_stop_tick(); cpuhp_report_idle_dead(); arch_cpu_idle_dead(); } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 9cd09eea06d6..fb0fdec8719a 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -396,6 +396,10 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state) EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control); #ifdef CONFIG_HOTPLUG_CPU +void tick_assert_timekeeping_handover(void) +{ + WARN_ON_ONCE(tick_do_timer_cpu == smp_processor_id()); +} /* * Stop the tick and transfer the timekeeping job away from a dying cpu. */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 89d16b8ea2c4..269e21590df5 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1160,18 +1160,7 @@ static bool report_idle_softirq(void) static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) { - /* - * If this CPU is offline and it is the one which updates - * jiffies, then give up the assignment and let it be taken by - * the CPU which runs the tick timer next. If we don't drop - * this here, the jiffies might be stale and do_timer() never - * gets invoked. - */ - if (unlikely(!cpu_online(cpu))) { - if (cpu == tick_do_timer_cpu) - tick_do_timer_cpu = TICK_DO_TIMER_NONE; - return false; - } + WARN_ON_ONCE(cpu_is_offline(cpu)); if (unlikely(!tick_sched_flag_test(ts, TS_FLAG_NOHZ))) return false; -- cgit v1.2.3 From 19b344a91ff79f65be377825635bcf5f5bb8df67 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 25 Feb 2024 23:55:08 +0100 Subject: timers: Assert no next dyntick timer look-up while CPU is offline MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The next timer (re-)evaluation, with the purpose of entering/updating the dyntick mode, can happen from 3 sites and none of them are relevant while the CPU is offline: 1) The idle loop: a) From the quick check helping the cpuidle governor to heuristically predict the best C-state. b) While stopping the tick. But if the CPU is offline, the tick has been cancelled and there is consequently no need to further stop the tick. 2) Remote expiry: when a CPU remotely expires global timers on behalf of another CPU, the latter target's next timer is re-evaluated afterwards. However remote expîry doesn't happen on offline CPUs. 3) IRQ exit: on nohz_full mode, the tick is (re-)evaluated on IRQ exit. But full dynticks is disabled on offline CPUs. Therefore it is safe to assume that no next dyntick timer lookup can be performed on offline CPUs. Assert this expectation to report any surprise. Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240225225508.11587-17-frederic@kernel.org --- kernel/time/timer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 4f4930da6448..e69e75d3858c 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2233,10 +2233,10 @@ static inline u64 __get_next_timer_interrupt(unsigned long basej, u64 basem, bool idle_is_possible; /* - * Pretend that there is no timer pending if the cpu is offline. - * Possible pending timers will be migrated later to an active cpu. + * When the CPU is offline, the tick is cancelled and nothing is supposed + * to try to stop it. */ - if (cpu_is_offline(smp_processor_id())) { + if (WARN_ON_ONCE(cpu_is_offline(smp_processor_id()))) { if (idle) *idle = true; return tevt.local; -- cgit v1.2.3 From 4c8a49854130da0117a0fdb858551824919a2389 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 27 Feb 2024 09:58:15 +0100 Subject: smp: Avoid 'setup_max_cpus' namespace collision/shadowing bringup_nonboot_cpus() gets passed the 'setup_max_cpus' variable in init/main.c - which is also the name of the parameter, shadowing the name. To reduce confusion and to allow the 'setup_max_cpus' value to be #defined in the header, use the 'max_cpus' name for the function parameter name. Signed-off-by: Ingo Molnar Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org --- kernel/cpu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index e6ec3ba4950b..023ddf8d625a 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1909,14 +1909,14 @@ static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus) static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; } #endif /* CONFIG_HOTPLUG_PARALLEL */ -void __init bringup_nonboot_cpus(unsigned int setup_max_cpus) +void __init bringup_nonboot_cpus(unsigned int max_cpus) { /* Try parallel bringup optimization if enabled */ - if (cpuhp_bringup_cpus_parallel(setup_max_cpus)) + if (cpuhp_bringup_cpus_parallel(max_cpus)) return; /* Full per CPU serialized bringup */ - cpuhp_bringup_mask(cpu_present_mask, setup_max_cpus, CPUHP_ONLINE); + cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE); } #ifdef CONFIG_PM_SLEEP_SMP -- cgit v1.2.3 From 133e267ef4a26d19c93996a874714e9f3f8c70aa Mon Sep 17 00:00:00 2001 From: David Gow Date: Wed, 21 Feb 2024 17:27:17 +0800 Subject: time: test: Fix incorrect format specifier 'days' is a s64 (from div_s64), and so should use a %lld specifier. This was found by extending KUnit's assertion macros to use gcc's __printf attribute. Fixes: 276010551664 ("time: Improve performance of time64_to_tm()") Signed-off-by: David Gow Tested-by: Guenter Roeck Reviewed-by: Justin Stitt Signed-off-by: Shuah Khan --- kernel/time/time_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c index ca058c8af6ba..3e5d422dd15c 100644 --- a/kernel/time/time_test.c +++ b/kernel/time/time_test.c @@ -73,7 +73,7 @@ static void time64_to_tm_test_date_range(struct kunit *test) days = div_s64(secs, 86400); - #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %ld", \ + #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %lld", \ year, month, mdday, yday, days KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG); -- cgit v1.2.3 From ca4bc2e07b716509fd279d2b449bb42f4263a9c8 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 22 Feb 2024 10:05:37 -0500 Subject: locking/qspinlock: Fix 'wait_early' set but not used warning When CONFIG_LOCK_EVENT_COUNTS is off, the wait_early variable will be set but not used. This is expected. Recent compilers will not generate wait_early code in this case. Add the __maybe_unused attribute to wait_early for suppressing this W=1 warning. Reported-by: kernel test robot Signed-off-by: Waiman Long Signed-off-by: Ingo Molnar Reviewed-by: Boqun Feng Cc: Linus Torvalds Link: https://lore.kernel.org/r/20240222150540.79981-2-longman@redhat.com Closes: https://lore.kernel.org/oe-kbuild-all/202312260422.f4pK3f9m-lkp@intel.com/ --- kernel/locking/qspinlock_paravirt.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 6a0184e9c234..ae2b12f68b90 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -294,8 +294,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) { struct pv_node *pn = (struct pv_node *)node; struct pv_node *pp = (struct pv_node *)prev; + bool __maybe_unused wait_early; int loop; - bool wait_early; for (;;) { for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { -- cgit v1.2.3 From d566c78659eccf085f905fd266fc461de92eaa8f Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 22 Feb 2024 10:05:39 -0500 Subject: locking/rwsem: Clarify that RWSEM_READER_OWNED is just a hint Clarify in the comments that the RWSEM_READER_OWNED bit in the owner field is just a hint, not an authoritative state of the rwsem. Signed-off-by: Waiman Long Signed-off-by: Ingo Molnar Reviewed-by: Boqun Feng Link: https://lore.kernel.org/r/20240222150540.79981-4-longman@redhat.com --- kernel/locking/rwsem.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 2340b6d90ec6..c6d17aee4209 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -35,7 +35,7 @@ /* * The least significant 2 bits of the owner value has the following * meanings when set. - * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers + * - Bit 0: RWSEM_READER_OWNED - rwsem may be owned by readers (just a hint) * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock * * When the rwsem is reader-owned and a spinning writer has timed out, @@ -1002,8 +1002,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat /* * To prevent a constant stream of readers from starving a sleeping - * waiter, don't attempt optimistic lock stealing if the lock is - * currently owned by readers. + * writer, don't attempt optimistic lock stealing if the lock is + * very likely owned by readers. */ if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED)) -- cgit v1.2.3 From f3e3620f1a97fcd02a5f3606fa63888dbcffd82c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 8 Nov 2023 13:53:22 -0800 Subject: locking/percpu-rwsem: Trigger contention tracepoints only if contended We mistakenly always fire lock contention tracepoints in the writer path, while it should be conditional on the trylock result. Signed-off-by: Namhyung Kim Signed-off-by: Ingo Molnar Reviewed-by: Waiman Long Link: https://lore.kernel.org/r/20231108215322.2845536-1-namhyung@kernel.org --- kernel/locking/percpu-rwsem.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index 185bd1c906b0..6083883c4fe0 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -223,9 +223,10 @@ static bool readers_active_check(struct percpu_rw_semaphore *sem) void __sched percpu_down_write(struct percpu_rw_semaphore *sem) { + bool contended = false; + might_sleep(); rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); - trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE); /* Notify readers to take the slow path. */ rcu_sync_enter(&sem->rss); @@ -234,8 +235,11 @@ void __sched percpu_down_write(struct percpu_rw_semaphore *sem) * Try set sem->block; this provides writer-writer exclusion. * Having sem->block set makes new readers block. */ - if (!__percpu_down_write_trylock(sem)) + if (!__percpu_down_write_trylock(sem)) { + trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE); percpu_rwsem_wait(sem, /* .reader = */ false); + contended = true; + } /* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */ @@ -247,7 +251,8 @@ void __sched percpu_down_write(struct percpu_rw_semaphore *sem) /* Wait for all active readers to complete. */ rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE); - trace_contention_end(sem, 0); + if (contended) + trace_contention_end(sem, 0); } EXPORT_SYMBOL_GPL(percpu_down_write); -- cgit v1.2.3 From 8b936fc1d84f7d70009ea34d66bbf6c54c09fae7 Mon Sep 17 00:00:00 2001 From: Shrikanth Hegde Date: Mon, 1 Jan 2024 21:16:23 +0530 Subject: sched/fair: Use existing helper functions to access ->avg_rt and ->avg_dl There are helper functions called cpu_util_dl() and cpu_util_rt() which give the average utilization of DL and RT respectively. But there are a few places in code where access to these variables is open-coded. Instead use the helper function so that code becomes simpler and easier to maintain later on. No functional changes intended. Signed-off-by: Shrikanth Hegde Signed-off-by: Ingo Molnar Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240101154624.100981-2-sshegde@linux.vnet.ibm.com --- kernel/sched/fair.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8e30e2bb77a0..127e727fb7b4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9237,10 +9237,10 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) static inline bool others_have_blocked(struct rq *rq) { - if (READ_ONCE(rq->avg_rt.util_avg)) + if (cpu_util_rt(rq)) return true; - if (READ_ONCE(rq->avg_dl.util_avg)) + if (cpu_util_dl(rq)) return true; if (thermal_load_avg(rq)) @@ -9506,8 +9506,8 @@ static unsigned long scale_rt_capacity(int cpu) * avg_thermal.load_avg tracks thermal pressure and the weighted * average uses the actual delta max capacity(load). */ - used = READ_ONCE(rq->avg_rt.util_avg); - used += READ_ONCE(rq->avg_dl.util_avg); + used = cpu_util_rt(rq); + used += cpu_util_dl(rq); used += thermal_load_avg(rq); if (unlikely(used >= max)) -- cgit v1.2.3 From a6965b31888501f889261a6783f0de6afff84f8d Mon Sep 17 00:00:00 2001 From: Shrikanth Hegde Date: Mon, 1 Jan 2024 21:16:24 +0530 Subject: sched/fair: Add READ_ONCE() and use existing helper function to access ->avg_irq Use existing helper function cpu_util_irq() instead of open-coding access to ->avg_irq. During review it was noted that ->avg_irq could be updated by a different CPU than the one which is trying to access it. ->avg_irq is updated with WRITE_ONCE(), use READ_ONCE to access it in order to avoid any compiler optimizations. Signed-off-by: Shrikanth Hegde Signed-off-by: Ingo Molnar Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240101154624.100981-3-sshegde@linux.vnet.ibm.com --- kernel/sched/fair.c | 4 +--- kernel/sched/sched.h | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 127e727fb7b4..ba3633940f6f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9246,10 +9246,8 @@ static inline bool others_have_blocked(struct rq *rq) if (thermal_load_avg(rq)) return true; -#ifdef CONFIG_HAVE_SCHED_AVG_IRQ - if (READ_ONCE(rq->avg_irq.util_avg)) + if (cpu_util_irq(rq)) return true; -#endif return false; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 001fe047bd5d..d2242679239e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3136,7 +3136,7 @@ static inline bool uclamp_rq_is_idle(struct rq *rq) #ifdef CONFIG_HAVE_SCHED_AVG_IRQ static inline unsigned long cpu_util_irq(struct rq *rq) { - return rq->avg_irq.util_avg; + return READ_ONCE(rq->avg_irq.util_avg); } static inline -- cgit v1.2.3 From 8aeaffef8c6eceab0e1498486fdd4f3dc3b7066c Mon Sep 17 00:00:00 2001 From: Keisuke Nishimura Date: Wed, 10 Jan 2024 14:17:06 +0100 Subject: sched/fair: Take the scheduling domain into account in select_idle_smt() When picking a CPU on task wakeup, select_idle_smt() has to take into account the scheduling domain of @target. This is because the "isolcpus" kernel command line option can remove CPUs from the domain to isolate them from other SMT siblings. This fix checks if the candidate CPU is in the target scheduling domain. Commit: df3cb4ea1fb6 ("sched/fair: Fix wrong cpu selecting from isolated domain") ... originally introduced this fix by adding the check of the scheduling domain in the loop. However, commit: 3e6efe87cd5cc ("sched/fair: Remove redundant check in select_idle_smt()") ... accidentally removed the check. Bring it back. Fixes: 3e6efe87cd5c ("sched/fair: Remove redundant check in select_idle_smt()") Signed-off-by: Keisuke Nishimura Signed-off-by: Julia Lawall Signed-off-by: Ingo Molnar Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240110131707.437301-1-keisuke.nishimura@inria.fr --- kernel/sched/fair.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ba3633940f6f..005f6d31e8f6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7311,13 +7311,19 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu /* * Scan the local SMT mask for idle CPUs. */ -static int select_idle_smt(struct task_struct *p, int target) +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) { int cpu; for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) { if (cpu == target) continue; + /* + * Check if the CPU is in the LLC scheduling domain of @target. + * Due to isolcpus, there is no guarantee that all the siblings are in the domain. + */ + if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) + continue; if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) return cpu; } @@ -7341,7 +7347,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma return __select_idle_cpu(core, p); } -static inline int select_idle_smt(struct task_struct *p, int target) +static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) { return -1; } @@ -7591,7 +7597,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) has_idle_core = test_idle_cores(target); if (!has_idle_core && cpus_share_cache(prev, target)) { - i = select_idle_smt(p, prev); + i = select_idle_smt(p, sd, prev); if ((unsigned int)i < nr_cpumask_bits) return i; } -- cgit v1.2.3 From 23d04d8c6b8ec339057264659b7834027f3e6a63 Mon Sep 17 00:00:00 2001 From: Keisuke Nishimura Date: Wed, 10 Jan 2024 14:17:07 +0100 Subject: sched/fair: Take the scheduling domain into account in select_idle_core() When picking a CPU on task wakeup, select_idle_core() has to take into account the scheduling domain where the function looks for the CPU. This is because the "isolcpus" kernel command line option can remove CPUs from the domain to isolate them from other SMT siblings. This change replaces the set of CPUs allowed to run the task from p->cpus_ptr by the intersection of p->cpus_ptr and sched_domain_span(sd) which is stored in the 'cpus' argument provided by select_idle_cpu(). Fixes: 9fe1f127b913 ("sched/fair: Merge select_idle_core/cpu()") Signed-off-by: Keisuke Nishimura Signed-off-by: Julia Lawall Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20240110131707.437301-2-keisuke.nishimura@inria.fr --- kernel/sched/fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 005f6d31e8f6..352222d09c90 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7289,7 +7289,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu if (!available_idle_cpu(cpu)) { idle = false; if (*idle_cpu == -1) { - if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) { + if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, cpus)) { *idle_cpu = cpu; break; } @@ -7297,7 +7297,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu } break; } - if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr)) + if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus)) *idle_cpu = cpu; } -- cgit v1.2.3 From 9dfbc26d27aaf0f5958c5972188f16fe977e5af5 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Mon, 5 Feb 2024 22:39:19 -0600 Subject: sched/fair: Remove unnecessary goto in update_sd_lb_stats() In update_sd_lb_stats(), when we're iterating over the sched groups that comprise a sched domain, we're skipping the call to update_sd_pick_busiest() for the sched group that contains the local / destination CPU. We use a goto to skip the call, but we could just as easily check !local_group, as there's no other logic that we need to skip with the goto. Let's remove the goto, and check for !local_group in the if statement instead. Signed-off-by: David Vernet Signed-off-by: Ingo Molnar Reviewed-by: Vincent Guittot Reviewed-by: Valentin Schneider Link: https://lore.kernel.org/r/20240206043921.850302-2-void@manifault.com --- kernel/sched/fair.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 352222d09c90..41dda5311770 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10580,16 +10580,11 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd update_sg_lb_stats(env, sds, sg, sgs, &sg_status); - if (local_group) - goto next_group; - - - if (update_sd_pick_busiest(env, sds, sg, sgs)) { + if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) { sds->busiest = sg; sds->busiest_stat = *sgs; } -next_group: /* Now, start updating sd_lb_stats */ sds->total_load += sgs->group_load; sds->total_capacity += sgs->group_capacity; -- cgit v1.2.3 From 7f1a7229718d788f26a711374da83adc2689837f Mon Sep 17 00:00:00 2001 From: David Vernet Date: Mon, 5 Feb 2024 22:39:20 -0600 Subject: sched/fair: Do strict inequality check for busiest misfit task group In update_sd_pick_busiest(), when comparing two sched groups that are both of type group_misfit_task, we currently consider the new group as busier than the current busiest group even if the new group has the same misfit task load as the current busiest group. We can avoid some unnecessary writes if we instead only consider the newest group to be the busiest if it has a higher load than the current busiest. This matches the behavior of other group types where we compare load, such as two groups that are both overloaded. Let's update the group_misfit_task type comparison to also only update the busiest group in the event of strict inequality. Signed-off-by: David Vernet Signed-off-by: Ingo Molnar Reviewed-by: Vincent Guittot Reviewed-by: Valentin Schneider Link: https://lore.kernel.org/r/20240206043921.850302-3-void@manifault.com --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 41dda5311770..448520f4fe83 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10032,7 +10032,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, * If we have more than one misfit sg go with the biggest * misfit. */ - if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) + if (sgs->group_misfit_task_load <= busiest->group_misfit_task_load) return false; break; -- cgit v1.2.3 From 7e9f7d17fe6c23432fda9a3648a858b7589cb4aa Mon Sep 17 00:00:00 2001 From: David Vernet Date: Mon, 5 Feb 2024 22:39:21 -0600 Subject: sched/fair: Simplify the update_sd_pick_busiest() logic When comparing the current struct sched_group with the yet-busiest domain in update_sd_pick_busiest(), if the two groups have the same group type, we're currently doing a bit of unnecessary work for any group >= group_misfit_task. We're comparing the two groups, and then returning only if false (the group in question is not the busiest). Otherwise, we break out, do an extra unnecessary conditional check that's vacuously false for any group type > group_fully_busy, and then always return true. Let's just return directly in the switch statement instead. This doesn't change the size of vmlinux with llvm 17 (not surprising given that all of this is inlined in load_balance()), but it does shrink load_balance() by 88 bytes on x86. Given that it also improves readability, this seems worth doing. Signed-off-by: David Vernet Signed-off-by: Ingo Molnar Reviewed-by: Vincent Guittot Reviewed-by: Valentin Schneider Link: https://lore.kernel.org/r/20240206043921.850302-4-void@manifault.com --- kernel/sched/fair.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 448520f4fe83..51fe17f10ef8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10010,9 +10010,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, switch (sgs->group_type) { case group_overloaded: /* Select the overloaded group with highest avg_load. */ - if (sgs->avg_load <= busiest->avg_load) - return false; - break; + return sgs->avg_load > busiest->avg_load; case group_imbalanced: /* @@ -10023,18 +10021,14 @@ static bool update_sd_pick_busiest(struct lb_env *env, case group_asym_packing: /* Prefer to move from lowest priority CPU's work */ - if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) - return false; - break; + return sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg->asym_prefer_cpu); case group_misfit_task: /* * If we have more than one misfit sg go with the biggest * misfit. */ - if (sgs->group_misfit_task_load <= busiest->group_misfit_task_load) - return false; - break; + return sgs->group_misfit_task_load > busiest->group_misfit_task_load; case group_smt_balance: /* -- cgit v1.2.3 From d654c8ddde84b9d1a30a40917e588b5a1e53dada Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Sat, 10 Feb 2024 19:39:19 +0800 Subject: sched/topology: Remove duplicate descriptions from TOPOLOGY_SD_FLAGS These flags are already documented in include/linux/sched/sd_flags.h. Also, add missing SD_CLUSTER and keep the comment on SD_ASYM_PACKING as it is a special case. Suggested-by: Ricardo Neri Signed-off-by: Alex Shi Signed-off-by: Ingo Molnar Reviewed-by: Ricardo Neri Reviewed-by: Valentin Schneider Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240210113924.1130448-1-alexs@kernel.org --- kernel/sched/topology.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 10d1391e7416..0b33f7b05d21 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1551,11 +1551,12 @@ static struct cpumask ***sched_domains_numa_masks; * * These flags are purely descriptive of the topology and do not prescribe * behaviour. Behaviour is artificial and mapped in the below sd_init() - * function: + * function. For details, see include/linux/sched/sd_flags.h. * - * SD_SHARE_CPUCAPACITY - describes SMT topologies - * SD_SHARE_PKG_RESOURCES - describes shared caches - * SD_NUMA - describes NUMA topologies + * SD_SHARE_CPUCAPACITY + * SD_SHARE_PKG_RESOURCES + * SD_CLUSTER + * SD_NUMA * * Odd one out, which beside describing the topology has a quirk also * prescribes the desired behaviour that goes along with it: -- cgit v1.2.3 From 5a64983731566f3b102b4ed12445b8a1b2f46a46 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Sat, 10 Feb 2024 19:39:20 +0800 Subject: sched/fair: Remove unused parameter from sched_asym() The 'sds' argument is not used in the sched_asym() function anymore, remove it. Fixes: c9ca07886aaa ("sched/fair: Do not even the number of busy CPUs via asym_packing") Signed-off-by: Alex Shi Signed-off-by: Ingo Molnar Reviewed-by: Ricardo Neri Reviewed-by: Valentin Schneider Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240210113924.1130448-2-alexs@kernel.org --- kernel/sched/fair.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 51fe17f10ef8..300d1bfe6d2b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9753,7 +9753,6 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu) /** * sched_asym - Check if the destination CPU can do asym_packing load balance * @env: The load balancing environment - * @sds: Load-balancing data with statistics of the local group * @sgs: Load-balancing statistics of the candidate busiest group * @group: The candidate busiest group * @@ -9772,8 +9771,7 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu) * otherwise. */ static inline bool -sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, - struct sched_group *group) +sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group) { /* Ensure that the whole local core is idle, if applicable. */ if (!sched_use_asym_prio(env->sd, env->dst_cpu)) @@ -9944,7 +9942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, /* Check if dst CPU is idle and preferred to this group */ if (!local_group && env->sd->flags & SD_ASYM_PACKING && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && - sched_asym(env, sds, sgs, group)) { + sched_asym(env, sgs, group)) { sgs->group_asym_packing = 1; } -- cgit v1.2.3 From 45de20623475049c424bc0b89f42efca54995edd Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Sat, 10 Feb 2024 19:39:21 +0800 Subject: sched/fair: Rework sched_use_asym_prio() and sched_asym_prefer() sched_use_asym_prio() and sched_asym_prefer() are used together in various places. Consolidate them into a single function sched_asym(). The existing sched_asym() function is only used when collecting statistics of a scheduling group. Rename it as sched_group_asym(), and remove the obsolete function description. This makes the code easier to read. No functional changes. Signed-off-by: Alex Shi Signed-off-by: Ingo Molnar Tested-by: Ricardo Neri Reviewed-by: Ricardo Neri Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240210113924.1130448-3-alexs@kernel.org --- kernel/sched/fair.c | 45 ++++++++++++++++++++------------------------- 1 file changed, 20 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 300d1bfe6d2b..475e2ca66b63 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9750,8 +9750,18 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu) return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu); } +static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu) +{ + /* + * First check if @dst_cpu can do asym_packing load balance. Only do it + * if it has higher priority than @src_cpu. + */ + return sched_use_asym_prio(sd, dst_cpu) && + sched_asym_prefer(dst_cpu, src_cpu); +} + /** - * sched_asym - Check if the destination CPU can do asym_packing load balance + * sched_group_asym - Check if the destination CPU can do asym_packing balance * @env: The load balancing environment * @sgs: Load-balancing statistics of the candidate busiest group * @group: The candidate busiest group @@ -9759,34 +9769,21 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu) * @env::dst_cpu can do asym_packing if it has higher priority than the * preferred CPU of @group. * - * SMT is a special case. If we are balancing load between cores, @env::dst_cpu - * can do asym_packing balance only if all its SMT siblings are idle. Also, it - * can only do it if @group is an SMT group and has exactly on busy CPU. Larger - * imbalances in the number of CPUS are dealt with in find_busiest_group(). - * - * If we are balancing load within an SMT core, or at PKG domain level, always - * proceed. - * * Return: true if @env::dst_cpu can do with asym_packing load balance. False * otherwise. */ static inline bool -sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group) +sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group) { - /* Ensure that the whole local core is idle, if applicable. */ - if (!sched_use_asym_prio(env->sd, env->dst_cpu)) - return false; - /* - * CPU priorities does not make sense for SMT cores with more than one + * CPU priorities do not make sense for SMT cores with more than one * busy sibling. */ - if (group->flags & SD_SHARE_CPUCAPACITY) { - if (sgs->group_weight - sgs->idle_cpus != 1) - return false; - } + if ((group->flags & SD_SHARE_CPUCAPACITY) && + (sgs->group_weight - sgs->idle_cpus != 1)) + return false; - return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); + return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu); } /* One group has more than one SMT CPU while the other group does not */ @@ -9942,7 +9939,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, /* Check if dst CPU is idle and preferred to this group */ if (!local_group && env->sd->flags & SD_ASYM_PACKING && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && - sched_asym(env, sgs, group)) { + sched_group_asym(env, sgs, group)) { sgs->group_asym_packing = 1; } @@ -11028,8 +11025,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, * SMT cores with more than one busy sibling. */ if ((env->sd->flags & SD_ASYM_PACKING) && - sched_use_asym_prio(env->sd, i) && - sched_asym_prefer(i, env->dst_cpu) && + sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1) continue; @@ -11899,8 +11895,7 @@ static void nohz_balancer_kick(struct rq *rq) * preferred CPU must be idle. */ for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { - if (sched_use_asym_prio(sd, i) && - sched_asym_prefer(i, cpu)) { + if (sched_asym(sd, i, cpu)) { flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; goto unlock; } -- cgit v1.2.3 From fbc449864e0d2ee2c16f3af2d1e9093b9b8d7ad0 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Sat, 10 Feb 2024 19:39:22 +0800 Subject: sched/fair: Check the SD_ASYM_PACKING flag in sched_use_asym_prio() sched_use_asym_prio() checks whether CPU priorities should be used. It makes sense to check for the SD_ASYM_PACKING() inside the function. Since both sched_asym() and sched_group_asym() use sched_use_asym_prio(), remove the now superfluous checks for the flag in various places. Signed-off-by: Alex Shi Signed-off-by: Ingo Molnar Tested-by: Ricardo Neri Reviewed-by: Ricardo Neri Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240210113924.1130448-4-alexs@kernel.org --- kernel/sched/fair.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 475e2ca66b63..39781a666c08 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9744,6 +9744,9 @@ group_type group_classify(unsigned int imbalance_pct, */ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu) { + if (!(sd->flags & SD_ASYM_PACKING)) + return false; + if (!sched_smt_active()) return true; @@ -9937,11 +9940,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, sgs->group_weight = group->group_weight; /* Check if dst CPU is idle and preferred to this group */ - if (!local_group && env->sd->flags & SD_ASYM_PACKING && - env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && - sched_group_asym(env, sgs, group)) { + if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && + sched_group_asym(env, sgs, group)) sgs->group_asym_packing = 1; - } /* Check for loaded SMT group to be balanced to dst CPU */ if (!local_group && smt_balance(env, sgs, group)) @@ -11024,9 +11025,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, * If balancing between cores, let lower priority CPUs help * SMT cores with more than one busy sibling. */ - if ((env->sd->flags & SD_ASYM_PACKING) && - sched_asym(env->sd, i, env->dst_cpu) && - nr_running == 1) + if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1) continue; switch (env->migration_type) { @@ -11122,8 +11121,7 @@ asym_active_balance(struct lb_env *env) * the lower priority @env::dst_cpu help it. Do not follow * CPU priority. */ - return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && - sched_use_asym_prio(env->sd, env->dst_cpu) && + return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) && (sched_asym_prefer(env->dst_cpu, env->src_cpu) || !sched_use_asym_prio(env->sd, env->src_cpu)); } -- cgit v1.2.3 From 54de442747037485da1fc4eca9636287a61e97e3 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Sat, 10 Feb 2024 19:39:23 +0800 Subject: sched/topology: Rename SD_SHARE_PKG_RESOURCES to SD_SHARE_LLC SD_SHARE_PKG_RESOURCES is a bit of a misnomer: its naming suggests that it's sharing all 'package resources' - while in reality it's specifically for sharing the LLC only. Rename it to SD_SHARE_LLC to reduce confusion. [ mingo: Rewrote the confusing changelog as well. ] Suggested-by: Valentin Schneider Signed-off-by: Alex Shi Signed-off-by: Ingo Molnar Reviewed-by: Valentin Schneider Reviewed-by: Ricardo Neri Reviewed-by: Barry Song Link: https://lore.kernel.org/r/20240210113924.1130448-5-alexs@kernel.org --- kernel/sched/fair.c | 2 +- kernel/sched/topology.c | 28 ++++++++++++++-------------- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 39781a666c08..6a16129f9a5c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10678,7 +10678,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s */ if (local->group_type == group_has_spare) { if ((busiest->group_type > group_fully_busy) && - !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { + !(env->sd->flags & SD_SHARE_LLC)) { /* * If busiest is overloaded, try to fill spare * capacity. This might end up creating spare capacity diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 0b33f7b05d21..99ea5986038c 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -657,13 +657,13 @@ static void destroy_sched_domains(struct sched_domain *sd) } /* - * Keep a special pointer to the highest sched_domain that has - * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this - * allows us to avoid some pointer chasing select_idle_sibling(). + * Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set + * (Last Level Cache Domain) for this allows us to avoid some pointer chasing + * select_idle_sibling(). * - * Also keep a unique ID per domain (we use the first CPU number in - * the cpumask of the domain), this allows us to quickly tell if - * two CPUs are in the same cache domain, see cpus_share_cache(). + * Also keep a unique ID per domain (we use the first CPU number in the cpumask + * of the domain), this allows us to quickly tell if two CPUs are in the same + * cache domain, see cpus_share_cache(). */ DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); @@ -684,7 +684,7 @@ static void update_top_cache_domain(int cpu) int id = cpu; int size = 1; - sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); + sd = highest_flag_domain(cpu, SD_SHARE_LLC); if (sd) { id = cpumask_first(sched_domain_span(sd)); size = cpumask_weight(sched_domain_span(sd)); @@ -1554,7 +1554,7 @@ static struct cpumask ***sched_domains_numa_masks; * function. For details, see include/linux/sched/sd_flags.h. * * SD_SHARE_CPUCAPACITY - * SD_SHARE_PKG_RESOURCES + * SD_SHARE_LLC * SD_CLUSTER * SD_NUMA * @@ -1566,7 +1566,7 @@ static struct cpumask ***sched_domains_numa_masks; #define TOPOLOGY_SD_FLAGS \ (SD_SHARE_CPUCAPACITY | \ SD_CLUSTER | \ - SD_SHARE_PKG_RESOURCES | \ + SD_SHARE_LLC | \ SD_NUMA | \ SD_ASYM_PACKING) @@ -1609,7 +1609,7 @@ sd_init(struct sched_domain_topology_level *tl, | 0*SD_BALANCE_WAKE | 1*SD_WAKE_AFFINE | 0*SD_SHARE_CPUCAPACITY - | 0*SD_SHARE_PKG_RESOURCES + | 0*SD_SHARE_LLC | 0*SD_SERIALIZE | 1*SD_PREFER_SIBLING | 0*SD_NUMA @@ -1646,7 +1646,7 @@ sd_init(struct sched_domain_topology_level *tl, if (sd->flags & SD_SHARE_CPUCAPACITY) { sd->imbalance_pct = 110; - } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { + } else if (sd->flags & SD_SHARE_LLC) { sd->imbalance_pct = 117; sd->cache_nice_tries = 1; @@ -1671,7 +1671,7 @@ sd_init(struct sched_domain_topology_level *tl, * For all levels sharing cache; connect a sched_domain_shared * instance. */ - if (sd->flags & SD_SHARE_PKG_RESOURCES) { + if (sd->flags & SD_SHARE_LLC) { sd->shared = *per_cpu_ptr(sdd->sds, sd_id); atomic_inc(&sd->shared->ref); atomic_set(&sd->shared->nr_busy_cpus, sd_weight); @@ -2446,8 +2446,8 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { struct sched_domain *child = sd->child; - if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child && - (child->flags & SD_SHARE_PKG_RESOURCES)) { + if (!(sd->flags & SD_SHARE_LLC) && child && + (child->flags & SD_SHARE_LLC)) { struct sched_domain __rcu *top_p; unsigned int nr_llcs; -- cgit v1.2.3 From 50f4f2d197e194ec0356962b99ca2b72e9a37bc8 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 12 Feb 2024 16:00:50 +0100 Subject: pidfd: move struct pidfd_fops Move the pidfd file operations over to their own file in preparation of implementing pidfs and to isolate them from other mostly unrelated functionality in other files. Link: https://lore.kernel.org/r/20240213-vfs-pidfd_fs-v1-1-f863f58cfce1@kernel.org Signed-off-by: Christian Brauner --- kernel/fork.c | 110 ---------------------------------------------------------- 1 file changed, 110 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 3f22ec90c5c6..662a61f340ce 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1993,116 +1993,6 @@ struct pid *pidfd_pid(const struct file *file) return ERR_PTR(-EBADF); } -static int pidfd_release(struct inode *inode, struct file *file) -{ - struct pid *pid = file->private_data; - - file->private_data = NULL; - put_pid(pid); - return 0; -} - -#ifdef CONFIG_PROC_FS -/** - * pidfd_show_fdinfo - print information about a pidfd - * @m: proc fdinfo file - * @f: file referencing a pidfd - * - * Pid: - * This function will print the pid that a given pidfd refers to in the - * pid namespace of the procfs instance. - * If the pid namespace of the process is not a descendant of the pid - * namespace of the procfs instance 0 will be shown as its pid. This is - * similar to calling getppid() on a process whose parent is outside of - * its pid namespace. - * - * NSpid: - * If pid namespaces are supported then this function will also print - * the pid of a given pidfd refers to for all descendant pid namespaces - * starting from the current pid namespace of the instance, i.e. the - * Pid field and the first entry in the NSpid field will be identical. - * If the pid namespace of the process is not a descendant of the pid - * namespace of the procfs instance 0 will be shown as its first NSpid - * entry and no others will be shown. - * Note that this differs from the Pid and NSpid fields in - * /proc//status where Pid and NSpid are always shown relative to - * the pid namespace of the procfs instance. The difference becomes - * obvious when sending around a pidfd between pid namespaces from a - * different branch of the tree, i.e. where no ancestral relation is - * present between the pid namespaces: - * - create two new pid namespaces ns1 and ns2 in the initial pid - * namespace (also take care to create new mount namespaces in the - * new pid namespace and mount procfs) - * - create a process with a pidfd in ns1 - * - send pidfd from ns1 to ns2 - * - read /proc/self/fdinfo/ and observe that both Pid and NSpid - * have exactly one entry, which is 0 - */ -static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) -{ - struct pid *pid = f->private_data; - struct pid_namespace *ns; - pid_t nr = -1; - - if (likely(pid_has_task(pid, PIDTYPE_PID))) { - ns = proc_pid_ns(file_inode(m->file)->i_sb); - nr = pid_nr_ns(pid, ns); - } - - seq_put_decimal_ll(m, "Pid:\t", nr); - -#ifdef CONFIG_PID_NS - seq_put_decimal_ll(m, "\nNSpid:\t", nr); - if (nr > 0) { - int i; - - /* If nr is non-zero it means that 'pid' is valid and that - * ns, i.e. the pid namespace associated with the procfs - * instance, is in the pid namespace hierarchy of pid. - * Start at one below the already printed level. - */ - for (i = ns->level + 1; i <= pid->level; i++) - seq_put_decimal_ll(m, "\t", pid->numbers[i].nr); - } -#endif - seq_putc(m, '\n'); -} -#endif - -/* - * Poll support for process exit notification. - */ -static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) -{ - struct pid *pid = file->private_data; - bool thread = file->f_flags & PIDFD_THREAD; - struct task_struct *task; - __poll_t poll_flags = 0; - - poll_wait(file, &pid->wait_pidfd, pts); - /* - * Depending on PIDFD_THREAD, inform pollers when the thread - * or the whole thread-group exits. - */ - rcu_read_lock(); - task = pid_task(pid, PIDTYPE_PID); - if (!task) - poll_flags = EPOLLIN | EPOLLRDNORM | EPOLLHUP; - else if (task->exit_state && (thread || thread_group_empty(task))) - poll_flags = EPOLLIN | EPOLLRDNORM; - rcu_read_unlock(); - - return poll_flags; -} - -const struct file_operations pidfd_fops = { - .release = pidfd_release, - .poll = pidfd_poll, -#ifdef CONFIG_PROC_FS - .show_fdinfo = pidfd_show_fdinfo, -#endif -}; - /** * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd * @pid: the struct pid for which to create a pidfd -- cgit v1.2.3 From 66f40b926dd249f74334a22162c09e7ec1ec5b07 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Tue, 27 Feb 2024 19:58:01 -0500 Subject: cgroup/cpuset: Fix a memory leak in update_exclusive_cpumask() Fix a possible memory leak in update_exclusive_cpumask() by moving the alloc_cpumasks() down after the validate_change() check which can fail and still before the temporary cpumasks are needed. Fixes: e2ffe502ba45 ("cgroup/cpuset: Add cpuset.cpus.exclusive for v2") Reported-and-tested-by: Mirsad Todorovac Closes: https://lore.kernel.org/lkml/14915689-27a3-4cd8-80d2-9c30d0c768b6@alu.unizg.hr Signed-off-by: Waiman Long Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org # v6.7+ --- kernel/cgroup/cpuset.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index ba36c073304a..7260f095802a 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -2598,9 +2598,6 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus)) return 0; - if (alloc_cpumasks(NULL, &tmp)) - return -ENOMEM; - if (*buf) compute_effective_exclusive_cpumask(trialcs, NULL); @@ -2615,6 +2612,9 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, if (retval) return retval; + if (alloc_cpumasks(NULL, &tmp)) + return -ENOMEM; + if (old_prs) { if (cpumask_empty(trialcs->effective_xcpus)) { invalidate = true; -- cgit v1.2.3 From a184d9835a0a689261ea6a4a8dbc18173a031b77 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 28 Feb 2024 13:38:41 +0100 Subject: tick/sched: Fix build failure for CONFIG_NO_HZ_COMMON=n In configurations with CONFIG_TICK_ONESHOT but no CONFIG_NO_HZ or CONFIG_HIGH_RES_TIMERS, tick_sched_timer_dying() is stubbed out, but still defined as a global function as well: kernel/time/tick-sched.c:1599:6: error: redefinition of 'tick_sched_timer_dying' 1599 | void tick_sched_timer_dying(int cpu) | ^ kernel/time/tick-sched.h:111:20: note: previous definition is here 111 | static inline void tick_sched_timer_dying(int cpu) { } | ^ This configuration only appears with ARM CONFIG_ARCH_BCM_MOBILE, which should not actually select CONFIG_TICK_ONESHOT. Adjust the #ifdef for the stub to match the condition for building the tick-sched.c file for consistency with the definition and to avoid the build regression. Fixes: 3aedb7fcd88a ("tick/sched: Remove useless oneshot ifdeffery") Signed-off-by: Arnd Bergmann Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240228123850.3499024-1-arnd@kernel.org --- kernel/time/tick-sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 58d8d1c49dd3..e11c4dc65bcb 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -105,7 +105,7 @@ struct tick_sched { extern struct tick_sched *tick_get_tick_sched(int cpu); extern void tick_setup_sched_timer(bool hrtimer); -#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS +#if defined CONFIG_TICK_ONESHOT extern void tick_sched_timer_dying(int cpu); #else static inline void tick_sched_timer_dying(int cpu) { } -- cgit v1.2.3 From 3ab67a9ce82ff22447b1dad53b49a91d1abbf1ff Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Thu, 29 Feb 2024 22:20:07 +0800 Subject: cgroup/cpuset: Mark memory_spread_slab as obsolete We've removed the SLAB allocator, cpuset_do_slab_mem_spread() and SLAB_MEM_SPREAD, memory_spread_slab is a no-op now. We can mark memory_spread_slab as obsolete in case someone still wants to use it after cpuset_do_slab_mem_spread() removed. For more details, please check [1]. [1] https://lore.kernel.org/lkml/32bc1403-49da-445a-8c00-9686a3b0d6a3@redhat.com/T/#m8e292e21b00f95a4bb8086371fa7387fa4ea8f60 tj: Description and cosmetic updates. Signed-off-by: Xiongwei Song Acked-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index ba36c073304a..c940cf01b148 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -3897,6 +3897,7 @@ static struct cftype legacy_files[] = { }, { + /* obsolete, may be removed in the future */ .name = "memory_spread_slab", .read_u64 = cpuset_read_u64, .write_u64 = cpuset_write_u64, -- cgit v1.2.3 From 25125a4762835d62ba1e540c1351d447fc1f6c7c Mon Sep 17 00:00:00 2001 From: Kamalesh Babulal Date: Thu, 29 Feb 2024 15:41:14 +0530 Subject: cgroup/cpuset: Fix retval in update_cpumask() The update_cpumask(), checks for newly requested cpumask by calling validate_change(), which returns an error on passing an invalid set of cpu(s). Independent of the error returned, update_cpumask() always returns zero, suppressing the error and returning success to the user on writing an invalid cpu range for a cpuset. Fix it by returning retval instead, which is returned by validate_change(). Fixes: 99fe36ba6fc1 ("cgroup/cpuset: Improve temporary cpumasks handling") Signed-off-by: Kamalesh Babulal Reviewed-by: Waiman Long Cc: stable@vger.kernel.org # v6.6+ Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 7260f095802a..927bef3a598a 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -2562,7 +2562,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, update_partition_sd_lb(cs, old_prs); out_free: free_cpumasks(NULL, &tmp); - return 0; + return retval; } /** -- cgit v1.2.3 From 1acd92d95fa24edca8f0292b21870025da93e24f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 26 Feb 2024 15:38:55 -1000 Subject: workqueue: Drain BH work items on hot-unplugged CPUs Boqun pointed out that workqueues aren't handling BH work items on offlined CPUs. Unlike tasklet which transfers out the pending tasks from CPUHP_SOFTIRQ_DEAD, BH workqueue would just leave them pending which is problematic. Note that this behavior is specific to BH workqueues as the non-BH per-CPU workers just become unbound when the CPU goes offline. This patch fixes the issue by draining the pending BH work items from an offlined CPU from CPUHP_SOFTIRQ_DEAD. Because work items carry more context, it's not as easy to transfer the pending work items from one pool to another. Instead, run BH work items which execute the offlined pools on an online CPU. Note that this assumes that no further BH work items will be queued on the offlined CPUs. This assumption is shared with tasklet and should be fine for conversions. However, this issue also exists for per-CPU workqueues which will just keep executing work items queued after CPU offline on unbound workers and workqueue should reject per-CPU and BH work items queued on offline CPUs. This will be addressed separately later. Signed-off-by: Tejun Heo Reported-and-reviewed-by: Boqun Feng Link: http://lkml.kernel.org/r/Zdvw0HdSXcU3JZ4g@boqun-archlinux --- kernel/softirq.c | 2 ++ kernel/workqueue.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 90 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/softirq.c b/kernel/softirq.c index 547d282548a8..b315b21fb28c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -932,6 +932,8 @@ static void run_ksoftirqd(unsigned int cpu) #ifdef CONFIG_HOTPLUG_CPU static int takeover_tasklets(unsigned int cpu) { + workqueue_softirq_dead(cpu); + /* CPU is dead, so no lock needed. */ local_irq_disable(); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 38783e3a60bb..a60eb65955e7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -81,6 +81,7 @@ enum worker_pool_flags { POOL_BH = 1 << 0, /* is a BH pool */ POOL_MANAGER_ACTIVE = 1 << 1, /* being managed */ POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ + POOL_BH_DRAINING = 1 << 3, /* draining after CPU offline */ }; enum worker_flags { @@ -1218,7 +1219,9 @@ static struct irq_work *bh_pool_irq_work(struct worker_pool *pool) static void kick_bh_pool(struct worker_pool *pool) { #ifdef CONFIG_SMP - if (unlikely(pool->cpu != smp_processor_id())) { + /* see drain_dead_softirq_workfn() for BH_DRAINING */ + if (unlikely(pool->cpu != smp_processor_id() && + !(pool->flags & POOL_BH_DRAINING))) { irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu); return; } @@ -3155,6 +3158,7 @@ __acquires(&pool->lock) struct worker_pool *pool = worker->pool; unsigned long work_data; int lockdep_start_depth, rcu_start_depth; + bool bh_draining = pool->flags & POOL_BH_DRAINING; #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from @@ -3220,7 +3224,9 @@ __acquires(&pool->lock) rcu_start_depth = rcu_preempt_depth(); lockdep_start_depth = lockdep_depth(current); - lock_map_acquire(&pwq->wq->lockdep_map); + /* see drain_dead_softirq_workfn() */ + if (!bh_draining) + lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); /* * Strictly speaking we should mark the invariant state without holding @@ -3253,7 +3259,8 @@ __acquires(&pool->lock) trace_workqueue_execute_end(work, worker->current_func); pwq->stats[PWQ_STAT_COMPLETED]++; lock_map_release(&lockdep_map); - lock_map_release(&pwq->wq->lockdep_map); + if (!bh_draining) + lock_map_release(&pwq->wq->lockdep_map); if (unlikely((worker->task && in_atomic()) || lockdep_depth(current) != lockdep_start_depth || @@ -3615,6 +3622,84 @@ void workqueue_softirq_action(bool highpri) bh_worker(list_first_entry(&pool->workers, struct worker, node)); } +struct wq_drain_dead_softirq_work { + struct work_struct work; + struct worker_pool *pool; + struct completion done; +}; + +static void drain_dead_softirq_workfn(struct work_struct *work) +{ + struct wq_drain_dead_softirq_work *dead_work = + container_of(work, struct wq_drain_dead_softirq_work, work); + struct worker_pool *pool = dead_work->pool; + bool repeat; + + /* + * @pool's CPU is dead and we want to execute its still pending work + * items from this BH work item which is running on a different CPU. As + * its CPU is dead, @pool can't be kicked and, as work execution path + * will be nested, a lockdep annotation needs to be suppressed. Mark + * @pool with %POOL_BH_DRAINING for the special treatments. + */ + raw_spin_lock_irq(&pool->lock); + pool->flags |= POOL_BH_DRAINING; + raw_spin_unlock_irq(&pool->lock); + + bh_worker(list_first_entry(&pool->workers, struct worker, node)); + + raw_spin_lock_irq(&pool->lock); + pool->flags &= ~POOL_BH_DRAINING; + repeat = need_more_worker(pool); + raw_spin_unlock_irq(&pool->lock); + + /* + * bh_worker() might hit consecutive execution limit and bail. If there + * still are pending work items, reschedule self and return so that we + * don't hog this CPU's BH. + */ + if (repeat) { + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + queue_work(system_bh_highpri_wq, work); + else + queue_work(system_bh_wq, work); + } else { + complete(&dead_work->done); + } +} + +/* + * @cpu is dead. Drain the remaining BH work items on the current CPU. It's + * possible to allocate dead_work per CPU and avoid flushing. However, then we + * have to worry about draining overlapping with CPU coming back online or + * nesting (one CPU's dead_work queued on another CPU which is also dead and so + * on). Let's keep it simple and drain them synchronously. These are BH work + * items which shouldn't be requeued on the same pool. Shouldn't take long. + */ +void workqueue_softirq_dead(unsigned int cpu) +{ + int i; + + for (i = 0; i < NR_STD_WORKER_POOLS; i++) { + struct worker_pool *pool = &per_cpu(bh_worker_pools, cpu)[i]; + struct wq_drain_dead_softirq_work dead_work; + + if (!need_more_worker(pool)) + continue; + + INIT_WORK(&dead_work.work, drain_dead_softirq_workfn); + dead_work.pool = pool; + init_completion(&dead_work.done); + + if (pool->attrs->nice == HIGHPRI_NICE_LEVEL) + queue_work(system_bh_highpri_wq, &dead_work.work); + else + queue_work(system_bh_wq, &dead_work.work); + + wait_for_completion(&dead_work.done); + } +} + /** * check_flush_dependency - check for flush dependency sanity * @target_wq: workqueue being flushed -- cgit v1.2.3 From 6572786006fa96ad2c35bb31757f1f861298093b Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Fri, 1 Mar 2024 09:18:24 +0900 Subject: fprobe: Fix to allocate entry_data_size buffer with rethook instances Fix to allocate fprobe::entry_data_size buffer with rethook instances. If fprobe doesn't allocate entry_data_size buffer for each rethook instance, fprobe entry handler can cause a buffer overrun when storing entry data in entry handler. Link: https://lore.kernel.org/all/170920576727.107552.638161246679734051.stgit@devnote2/ Reported-by: Jiri Olsa Closes: https://lore.kernel.org/all/Zd9eBn2FTQzYyg7L@krava/ Fixes: 4bbd93455659 ("kprobes: kretprobe scalability improvement") Cc: stable@vger.kernel.org Tested-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- kernel/trace/fprobe.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 6cd2a4e3afb8..9ff018245840 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -189,9 +189,6 @@ static int fprobe_init_rethook(struct fprobe *fp, int num) { int size; - if (num <= 0) - return -EINVAL; - if (!fp->exit_handler) { fp->rethook = NULL; return 0; @@ -199,15 +196,16 @@ static int fprobe_init_rethook(struct fprobe *fp, int num) /* Initialize rethook if needed */ if (fp->nr_maxactive) - size = fp->nr_maxactive; + num = fp->nr_maxactive; else - size = num * num_possible_cpus() * 2; - if (size <= 0) + num *= num_possible_cpus() * 2; + if (num <= 0) return -EINVAL; + size = sizeof(struct fprobe_rethook_node) + fp->entry_data_size; + /* Initialize rethook */ - fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler, - sizeof(struct fprobe_rethook_node), size); + fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler, size, num); if (IS_ERR(fp->rethook)) return PTR_ERR(fp->rethook); -- cgit v1.2.3 From cb12fd8e0dabb9a1c8aef55a6a41e2c255fcdf4b Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 12 Feb 2024 16:32:38 +0100 Subject: pidfd: add pidfs This moves pidfds from the anonymous inode infrastructure to a tiny pseudo filesystem. This has been on my todo for quite a while as it will unblock further work that we weren't able to do simply because of the very justified limitations of anonymous inodes. Moving pidfds to a tiny pseudo filesystem allows: * statx() on pidfds becomes useful for the first time. * pidfds can be compared simply via statx() and then comparing inode numbers. * pidfds have unique inode numbers for the system lifetime. * struct pid is now stashed in inode->i_private instead of file->private_data. This means it is now possible to introduce concepts that operate on a process once all file descriptors have been closed. A concrete example is kill-on-last-close. * file->private_data is freed up for per-file options for pidfds. * Each struct pid will refer to a different inode but the same struct pid will refer to the same inode if it's opened multiple times. In contrast to now where each struct pid refers to the same inode. Even if we were to move to anon_inode_create_getfile() which creates new inodes we'd still be associating the same struct pid with multiple different inodes. The tiny pseudo filesystem is not visible anywhere in userspace exactly like e.g., pipefs and sockfs. There's no lookup, there's no complex inode operations, nothing. Dentries and inodes are always deleted when the last pidfd is closed. We allocate a new inode for each struct pid and we reuse that inode for all pidfds. We use iget_locked() to find that inode again based on the inode number which isn't recycled. We allocate a new dentry for each pidfd that uses the same inode. That is similar to anonymous inodes which reuse the same inode for thousands of dentries. For pidfds we're talking way less than that. There usually won't be a lot of concurrent openers of the same struct pid. They can probably often be counted on two hands. I know that systemd does use separate pidfd for the same struct pid for various complex process tracking issues. So I think with that things actually become way simpler. Especially because we don't have to care about lookup. Dentries and inodes continue to be always deleted. The code is entirely optional and fairly small. If it's not selected we fallback to anonymous inodes. Heavily inspired by nsfs which uses a similar stashing mechanism just for namespaces. Link: https://lore.kernel.org/r/20240213-vfs-pidfd_fs-v1-2-f863f58cfce1@kernel.org Signed-off-by: Christian Brauner --- kernel/fork.c | 13 ++----------- kernel/nsproxy.c | 2 +- kernel/pid.c | 11 +++++++++++ 3 files changed, 14 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 662a61f340ce..2f839c290dcf 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -102,6 +102,7 @@ #include #include #include +#include #include #include @@ -1985,14 +1986,6 @@ static inline void rcu_copy_process(struct task_struct *p) #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ } -struct pid *pidfd_pid(const struct file *file) -{ - if (file->f_op == &pidfd_fops) - return file->private_data; - - return ERR_PTR(-EBADF); -} - /** * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd * @pid: the struct pid for which to create a pidfd @@ -2030,13 +2023,11 @@ static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **re if (pidfd < 0) return pidfd; - pidfd_file = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, - flags | O_RDWR); + pidfd_file = pidfs_alloc_file(pid, flags | O_RDWR); if (IS_ERR(pidfd_file)) { put_unused_fd(pidfd); return PTR_ERR(pidfd_file); } - get_pid(pid); /* held by pidfd_file now */ /* * anon_inode_getfile() ignores everything outside of the * O_ACCMODE | O_NONBLOCK mask, set PIDFD_THREAD manually. diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 15781acaac1c..6ec3deec68c2 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -573,7 +573,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, flags) if (proc_ns_file(f.file)) err = validate_ns(&nsset, ns); else - err = validate_nsset(&nsset, f.file->private_data); + err = validate_nsset(&nsset, pidfd_pid(f.file)); if (!err) { commit_nsset(&nsset); perf_event_namespaces(current); diff --git a/kernel/pid.c b/kernel/pid.c index c1d940fbd314..581cc34341fd 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -65,6 +66,13 @@ int pid_max = PID_MAX_DEFAULT; int pid_max_min = RESERVED_PIDS + 1; int pid_max_max = PID_MAX_LIMIT; +#ifdef CONFIG_FS_PID +/* + * Pseudo filesystems start inode numbering after one. We use Reserved + * PIDs as a natural offset. + */ +static u64 pidfs_ino = RESERVED_PIDS; +#endif /* * PID-map pages start out as NULL, they get allocated upon @@ -272,6 +280,9 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, spin_lock_irq(&pidmap_lock); if (!(ns->pid_allocated & PIDNS_ADDING)) goto out_unlock; +#ifdef CONFIG_FS_PID + pid->ino = ++pidfs_ino; +#endif for ( ; upid >= pid->numbers; --upid) { /* Make the PID visible to find_pid_ns. */ idr_replace(&upid->ns->idr, pid, upid->nr); -- cgit v1.2.3 From b28ddcc32d8fa3e20745b3a47dff863fe0376d79 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 19 Feb 2024 16:30:57 +0100 Subject: pidfs: convert to path_from_stashed() helper Moving pidfds from the anonymous inode infrastructure to a separate tiny in-kernel filesystem similar to sockfs, pipefs, and anon_inodefs causes selinux denials and thus various userspace components that make heavy use of pidfds to fail as pidfds used anon_inode_getfile() which aren't subject to any LSM hooks. But dentry_open() is and that would cause regressions. The failures that are seen are selinux denials. But the core failure is dbus-broker. That cascades into other services failing that depend on dbus-broker. For example, when dbus-broker fails to start polkit and all the others won't be able to work because they depend on dbus-broker. The reason for dbus-broker failing is because it doesn't handle failures for SO_PEERPIDFD correctly. Last kernel release we introduced SO_PEERPIDFD (and SCM_PIDFD). SO_PEERPIDFD allows dbus-broker and polkit and others to receive a pidfd for the peer of an AF_UNIX socket. This is the first time in the history of Linux that we can safely authenticate clients in a race-free manner. dbus-broker immediately made use of this but messed up the error checking. It only allowed EINVAL as a valid failure for SO_PEERPIDFD. That's obviously problematic not just because of LSM denials but because of seccomp denials that would prevent SO_PEERPIDFD from working; or any other new error code from there. So this is catching a flawed implementation in dbus-broker as well. It has to fallback to the old pid-based authentication when SO_PEERPIDFD doesn't work no matter the reasons otherwise it'll always risk such failures. So overall that LSM denial should not have caused dbus-broker to fail. It can never assume that a feature released one kernel ago like SO_PEERPIDFD can be assumed to be available. So, the next fix separate from the selinux policy update is to try and fix dbus-broker at [3]. That should make it into Fedora as well. In addition the selinux reference policy should also be updated. See [4] for that. If Selinux is in enforcing mode in userspace and it encounters anything that it doesn't know about it will deny it by default. And the policy is entirely in userspace including declaring new types for stuff like nsfs or pidfs to allow it. For now we continue to raise S_PRIVATE on the inode if it's a pidfs inode which means things behave exactly like before. Link: https://bugzilla.redhat.com/show_bug.cgi?id=2265630 Link: https://github.com/fedora-selinux/selinux-policy/pull/2050 Link: https://github.com/bus1/dbus-broker/pull/343 [3] Link: https://github.com/SELinuxProject/refpolicy/pull/762 [4] Reported-by: Nathan Chancellor Link: https://lore.kernel.org/r/20240222190334.GA412503@dev-arch.thelio-3990X Link: https://lore.kernel.org/r/20240218-neufahrzeuge-brauhaus-fb0eb6459771@brauner Signed-off-by: Christian Brauner --- kernel/pid.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/pid.c b/kernel/pid.c index 581cc34341fd..99a0c5eb24b8 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -281,6 +281,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, if (!(ns->pid_allocated & PIDNS_ADDING)) goto out_unlock; #ifdef CONFIG_FS_PID + pid->stashed = NULL; pid->ino = ++pidfs_ino; #endif for ( ; upid >= pid->numbers; --upid) { -- cgit v1.2.3 From ce3576ebd62d99f79c1dc98824e2ef6d6ab68434 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Wed, 24 Jan 2024 11:49:53 +0100 Subject: locking/rtmutex: Use try_cmpxchg_relaxed() in mark_rt_mutex_waiters() Use try_cmpxchg() instead of cmpxchg(*ptr, old, new) == old. The x86 CMPXCHG instruction returns success in the ZF flag, so this change saves a compare after CMPXCHG (and related move instruction in front of CMPXCHG). Also, try_cmpxchg() implicitly assigns old *ptr value to "old" when CMPXCHG fails. There is no need to re-read the value in the loop. Note that the value from *ptr should be read using READ_ONCE() to prevent the compiler from merging, refetching or reordering the read. No functional change intended. Signed-off-by: Uros Bizjak Signed-off-by: Ingo Molnar Cc: Peter Zijlstra Cc: Waiman Long Cc: Will Deacon Cc: Thomas Gleixner Cc: Linus Torvalds Cc: Paul E. McKenney Link: https://lore.kernel.org/r/20240124104953.612063-1-ubizjak@gmail.com --- kernel/locking/rtmutex.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 4a10e8c16fd2..88d08eeb8bc0 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -237,12 +237,13 @@ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, */ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) { - unsigned long owner, *p = (unsigned long *) &lock->owner; + unsigned long *p = (unsigned long *) &lock->owner; + unsigned long owner, new; + owner = READ_ONCE(*p); do { - owner = *p; - } while (cmpxchg_relaxed(p, owner, - owner | RT_MUTEX_HAS_WAITERS) != owner); + new = owner | RT_MUTEX_HAS_WAITERS; + } while (!try_cmpxchg_relaxed(p, &owner, new)); /* * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE -- cgit v1.2.3 From 2be2a197ff6c3a659ab9285e1d88cbdc609ac6de Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 29 Feb 2024 15:23:36 +0100 Subject: sched/idle: Conditionally handle tick broadcast in default_idle_call() The x86 architecture has an idle routine for AMD CPUs which are affected by erratum 400. On the affected CPUs the local APIC timer stops in the C1E halt state. It therefore requires tick broadcasting. The invocation of tick_broadcast_enter()/exit() from this function violates the RCU constraints because it can end up in lockdep or tracing, which rightfully triggers a warning. tick_broadcast_enter()/exit() must be invoked before ct_cpuidle_enter() and after ct_cpuidle_exit() in default_idle_call(). Add a static branch conditional invocation of tick_broadcast_enter()/exit() into this function to allow X86 to replace the AMD specific idle code. It's guarded by a config switch which will be selected by x86. Otherwise it's a NOOP. Reported-by: Borislav Petkov Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240229142248.266708822@linutronix.de --- kernel/sched/idle.c | 21 +++++++++++++++++++++ kernel/time/Kconfig | 5 +++++ 2 files changed, 26 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 31231925f1ec..31ad81153295 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -81,6 +81,25 @@ void __weak arch_cpu_idle(void) cpu_idle_force_poll = 1; } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE +DEFINE_STATIC_KEY_FALSE(arch_needs_tick_broadcast); + +static inline void cond_tick_broadcast_enter(void) +{ + if (static_branch_unlikely(&arch_needs_tick_broadcast)) + tick_broadcast_enter(); +} + +static inline void cond_tick_broadcast_exit(void) +{ + if (static_branch_unlikely(&arch_needs_tick_broadcast)) + tick_broadcast_exit(); +} +#else +static inline void cond_tick_broadcast_enter(void) { } +static inline void cond_tick_broadcast_exit(void) { } +#endif + /** * default_idle_call - Default CPU idle routine. * @@ -90,6 +109,7 @@ void __cpuidle default_idle_call(void) { instrumentation_begin(); if (!current_clr_polling_and_test()) { + cond_tick_broadcast_enter(); trace_cpu_idle(1, smp_processor_id()); stop_critical_timings(); @@ -99,6 +119,7 @@ void __cpuidle default_idle_call(void) start_critical_timings(); trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + cond_tick_broadcast_exit(); } local_irq_enable(); instrumentation_end(); diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index bae8f11070be..fc3b1a06c981 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -39,6 +39,11 @@ config GENERIC_CLOCKEVENTS_BROADCAST bool depends on GENERIC_CLOCKEVENTS +# Handle broadcast in default_idle_call() +config GENERIC_CLOCKEVENTS_BROADCAST_IDLE + bool + depends on GENERIC_CLOCKEVENTS_BROADCAST + # Automatically adjust the min. reprogramming time for # clock event device config GENERIC_CLOCKEVENTS_MIN_ADJUST -- cgit v1.2.3 From e9a8e5a587ca55fec6c58e4881742705d45bee54 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Thu, 22 Feb 2024 17:41:20 +0200 Subject: bpf: check bpf_func_state->callback_depth when pruning states When comparing current and cached states verifier should consider bpf_func_state->callback_depth. Current state cannot be pruned against cached state, when current states has more iterations left compared to cached state. Current state has more iterations left when it's callback_depth is smaller. Below is an example illustrating this bug, minimized from mailing list discussion [0] (assume that BPF_F_TEST_STATE_FREQ is set). The example is not a safe program: if loop_cb point (1) is followed by loop_cb point (2), then division by zero is possible at point (4). struct ctx { __u64 a; __u64 b; __u64 c; }; static void loop_cb(int i, struct ctx *ctx) { /* assume that generated code is "fallthrough-first": * if ... == 1 goto * if ... == 2 goto * */ switch (bpf_get_prandom_u32()) { case 1: /* 1 */ ctx->a = 42; return 0; break; case 2: /* 2 */ ctx->b = 42; return 0; break; default: /* 3 */ ctx->c = 42; return 0; break; } } SEC("tc") __failure __flag(BPF_F_TEST_STATE_FREQ) int test(struct __sk_buff *skb) { struct ctx ctx = { 7, 7, 7 }; bpf_loop(2, loop_cb, &ctx, 0); /* 0 */ /* assume generated checks are in-order: .a first */ if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7) asm volatile("r0 /= 0;":::"r0"); /* 4 */ return 0; } Prior to this commit verifier built the following checkpoint tree for this example: .------------------------------------- Checkpoint / State name | .-------------------------------- Code point number | | .---------------------------- Stack state {ctx.a,ctx.b,ctx.c} | | | .------------------- Callback depth in frame #0 v v v v - (0) {7P,7P,7},depth=0 - (3) {7P,7P,7},depth=1 - (0) {7P,7P,42},depth=1 - (3) {7P,7,42},depth=2 - (0) {7P,7,42},depth=2 loop terminates because of depth limit - (4) {7P,7,42},depth=0 predicted false, ctx.a marked precise - (6) exit (a) - (2) {7P,7,42},depth=2 - (0) {7P,42,42},depth=2 loop terminates because of depth limit - (4) {7P,42,42},depth=0 predicted false, ctx.a marked precise - (6) exit (b) - (1) {7P,7P,42},depth=2 - (0) {42P,7P,42},depth=2 loop terminates because of depth limit - (4) {42P,7P,42},depth=0 predicted false, ctx.{a,b} marked precise - (6) exit - (2) {7P,7,7},depth=1 considered safe, pruned using checkpoint (a) (c) - (1) {7P,7P,7},depth=1 considered safe, pruned using checkpoint (b) Here checkpoint (b) has callback_depth of 2, meaning that it would never reach state {42,42,7}. While checkpoint (c) has callback_depth of 1, and thus could yet explore the state {42,42,7} if not pruned prematurely. This commit makes forbids such premature pruning, allowing verifier to explore states sub-tree starting at (c): (c) - (1) {7,7,7P},depth=1 - (0) {42P,7,7P},depth=1 ... - (2) {42,7,7},depth=2 - (0) {42,42,7},depth=2 loop terminates because of depth limit - (4) {42,42,7},depth=0 predicted true, ctx.{a,b,c} marked precise - (5) division by zero [0] https://lore.kernel.org/bpf/9b251840-7cb8-4d17-bd23-1fc8071d8eef@linux.dev/ Fixes: bb124da69c47 ("bpf: keep track of max number of bpf_loop callback iterations") Suggested-by: Yonghong Song Signed-off-by: Eduard Zingerman Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20240222154121.6991-2-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b263f093ee76..ddea9567f755 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -16602,6 +16602,9 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat { int i; + if (old->callback_depth > cur->callback_depth) + return false; + for (i = 0; i < MAX_BPF_REG; i++) if (!regsafe(env, &old->regs[i], &cur->regs[i], &env->idmap_scratch, exact)) -- cgit v1.2.3 From 2487007aa3b9fafbd2cb14068f49791ce1d7ede5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Tue, 5 Mar 2024 22:31:32 +0100 Subject: cpumap: Zero-initialise xdp_rxq_info struct before running XDP program MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When running an XDP program that is attached to a cpumap entry, we don't initialise the xdp_rxq_info data structure being used in the xdp_buff that backs the XDP program invocation. Tobias noticed that this leads to random values being returned as the xdp_md->rx_queue_index value for XDP programs running in a cpumap. This means we're basically returning the contents of the uninitialised memory, which is bad. Fix this by zero-initialising the rxq data structure before running the XDP program. Fixes: 9216477449f3 ("bpf: cpumap: Add the possibility to attach an eBPF program to cpumap") Reported-by: Tobias Böhm Signed-off-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/r/20240305213132.11955-1-toke@redhat.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/cpumap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 8a0bb80fe48a..ef82ffc90cbe 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -178,7 +178,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, void **frames, int n, struct xdp_cpumap_stats *stats) { - struct xdp_rxq_info rxq; + struct xdp_rxq_info rxq = {}; struct xdp_buff xdp; int i, nframes = 0; -- cgit v1.2.3 From 8ca1836769d758e4fbf5851bb81e181c52193f5d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 5 Mar 2024 01:28:22 +0100 Subject: timer/migration: Fix quick check reporting late expiry When a CPU is the last active in the hierarchy and it tries to enter into idle, the quick check looking up the next event towards cpuidle heuristics may report a too late expiry, such as in the following scenario: [GRP1:0] migrator = NONE active = NONE nextevt = T0:0, T0:1 / \ [GRP0:0] [GRP0:1] migrator = NONE migrator = NONE active = NONE active = NONE nextevt = T0, T1 nextevt = T2 / \ / \ 0 1 2 3 idle idle idle idle 0) The whole system is idle, and CPU 0 was the last migrator. CPU 0 has a timer (T0), CPU 1 has a timer (T1) and CPU 2 has a timer (T2). The expire order is T0 < T1 < T2. [GRP1:0] migrator = GRP0:0 active = GRP0:0 nextevt = T0:0(i), T0:1 / \ [GRP0:0] [GRP0:1] migrator = CPU0 migrator = NONE active = CPU0 active = NONE nextevt = T0(i), T1 nextevt = T2 / \ / \ 0 1 2 3 active idle idle idle 1) CPU 0 becomes active. The (i) means a now ignored timer. [GRP1:0] migrator = GRP0:0 active = GRP0:0 nextevt = T0:1 / \ [GRP0:0] [GRP0:1] migrator = CPU0 migrator = NONE active = CPU0 active = NONE nextevt = T1 nextevt = T2 / \ / \ 0 1 2 3 active idle idle idle 2) CPU 0 handles remote. No timer actually expired but ignored timers have been cleaned out and their sibling's timers haven't been propagated. As a result the top level's next event is T2 and not T1. 3) CPU 0 tries to enter idle without any global timer enqueued and calls tmigr_quick_check(). The expiry of T2 is returned instead of the expiry of T1. When the quick check returns an expiry that is too late, the cpuidle governor may pick up a C-state that is too deep. This may be result into undesired CPU wake up latency if the next timer is actually close enough. Fix this with assuming that expiries aren't sorted top-down while performing the quick check. Pick up instead the earliest encountered one while walking up the hierarchy. 7ee988770326 ("timers: Implement the hierarchical pull model") Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240305002822.18130-1-frederic@kernel.org --- kernel/time/timer_migration.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index d85aa2afb969..8f49b6b96dfd 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1385,11 +1385,11 @@ u64 tmigr_cpu_deactivate(u64 nextexp) * single group active on the way to top level) * * nextevt - when CPU is offline and has to handle timer on his own * or when on the way to top in every group only a single - * child is active and but @nextevt is before next_expiry - * of top level group - * * next_expiry (top) - value of top level group, when on the way to top in - * every group only a single child is active and @nextevt - * is after this value active child. + * child is active but @nextevt is before the lowest + * next_expiry encountered while walking up to top level. + * * next_expiry - value of lowest expiry encountered while walking groups + * if only a single child is active on each and @nextevt + * is after this lowest expiry. */ u64 tmigr_quick_check(u64 nextevt) { @@ -1408,10 +1408,16 @@ u64 tmigr_quick_check(u64 nextevt) do { if (!tmigr_check_lonely(group)) { return KTIME_MAX; - } else if (!group->parent) { - u64 first_global = READ_ONCE(group->next_expiry); - - return min_t(u64, nextevt, first_global); + } else { + /* + * Since current CPU is active, events may not be sorted + * from bottom to the top because the CPU's event is ignored + * up to the top and its sibling's events not propagated upwards. + * Thus keep track of the lowest observed expiry. + */ + nextevt = min_t(u64, nextevt, READ_ONCE(group->next_expiry)); + if (!group->parent) + return nextevt; } group = group->parent; } while (group); -- cgit v1.2.3 From 5efd3e2aef91d2d812290dcb25b2058e6f3f532c Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Mon, 4 Mar 2024 17:43:41 -0500 Subject: tracing: Remove precision vsnprintf() check from print event This reverts 60be76eeabb3d ("tracing: Add size check when printing trace_marker output"). The only reason the precision check was added was because of a bug that miscalculated the write size of the string into the ring buffer and it truncated it removing the terminating nul byte. On reading the trace it crashed the kernel. But this was due to the bug in the code that happened during development and should never happen in practice. If anything, the precision can hide bugs where the string in the ring buffer isn't nul terminated and it will not be checked. Link: https://lore.kernel.org/all/C7E7AF1A-D30F-4D18-B8E5-AF1EF58004F5@linux.ibm.com/ Link: https://lore.kernel.org/linux-trace-kernel/20240227125706.04279ac2@gandalf.local.home Link: https://lore.kernel.org/all/20240302111244.3a1674be@gandalf.local.home/ Link: https://lore.kernel.org/linux-trace-kernel/20240304174341.2a561d9f@gandalf.local.home Cc: Masami Hiramatsu Cc: Linus Torvalds Fixes: 60be76eeabb3d ("tracing: Add size check when printing trace_marker output") Reported-by: Sachin Sant Tested-by: Sachin Sant Reviewed-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_output.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 3e7fa44dc2b2..d8b302d01083 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -1587,12 +1587,11 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter, { struct print_entry *field; struct trace_seq *s = &iter->seq; - int max = iter->ent_size - offsetof(struct print_entry, buf); trace_assign_type(field, iter->ent); seq_print_ip_sym(s, field->ip, flags); - trace_seq_printf(s, ": %.*s", max, field->buf); + trace_seq_printf(s, ": %s", field->buf); return trace_handle_return(s); } @@ -1601,11 +1600,10 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, struct trace_event *event) { struct print_entry *field; - int max = iter->ent_size - offsetof(struct print_entry, buf); trace_assign_type(field, iter->ent); - trace_seq_printf(&iter->seq, "# %lx %.*s", field->ip, max, field->buf); + trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); return trace_handle_return(&iter->seq); } -- cgit v1.2.3 From 095fe4891282be510af0db1b03587b512c0de31d Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Mon, 4 Mar 2024 22:34:33 -0500 Subject: tracing: Limit trace_marker writes to just 4K Limit the max print event of trace_marker to just 4K string size. This must also be less than the amount that can be held by a trace_seq along with the text that is before the output (like the task name, PID, CPU, state, etc). As trace_seq is made to handle large events (some greater than 4K). Make the max size of a trace_marker write event be 4K which is guaranteed to fit in the trace_seq buffer. Link: https://lore.kernel.org/linux-trace-kernel/20240304223433.4ba47dff@gandalf.local.home Suggested-by: Linus Torvalds Reviewed-by: Mathieu Desnoyers Reviewed-by: Masami Hiramatsu (Google) Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8198bfc54b58..d16b95ca58a7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -7293,6 +7293,8 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) return 0; } +#define TRACE_MARKER_MAX_SIZE 4096 + static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) @@ -7320,6 +7322,9 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, if ((ssize_t)cnt < 0) return -EINVAL; + if (cnt > TRACE_MARKER_MAX_SIZE) + cnt = TRACE_MARKER_MAX_SIZE; + meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */ again: size = cnt + meta_size; @@ -7328,11 +7333,6 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, if (cnt < FAULTED_SIZE) size += FAULTED_SIZE - cnt; - if (size > TRACE_SEQ_BUFFER_SIZE) { - cnt -= size - TRACE_SEQ_BUFFER_SIZE; - goto again; - } - buffer = tr->array_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, tracing_gen_ctx()); -- cgit v1.2.3 From b3594573681b53316ec0365332681a30463edfd6 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Fri, 8 Mar 2024 15:24:03 -0500 Subject: ring-buffer: Fix waking up ring buffer readers A task can wait on a ring buffer for when it fills up to a specific watermark. The writer will check the minimum watermark that waiters are waiting for and if the ring buffer is past that, it will wake up all the waiters. The waiters are in a wait loop, and will first check if a signal is pending and then check if the ring buffer is at the desired level where it should break out of the loop. If a file that uses a ring buffer closes, and there's threads waiting on the ring buffer, it needs to wake up those threads. To do this, a "wait_index" was used. Before entering the wait loop, the waiter will read the wait_index. On wakeup, it will check if the wait_index is different than when it entered the loop, and will exit the loop if it is. The waker will only need to update the wait_index before waking up the waiters. This had a couple of bugs. One trivial one and one broken by design. The trivial bug was that the waiter checked the wait_index after the schedule() call. It had to be checked between the prepare_to_wait() and the schedule() which it was not. The main bug is that the first check to set the default wait_index will always be outside the prepare_to_wait() and the schedule(). That's because the ring_buffer_wait() doesn't have enough context to know if it should break out of the loop. The loop itself is not needed, because all the callers to the ring_buffer_wait() also has their own loop, as the callers have a better sense of what the context is to decide whether to break out of the loop or not. Just have the ring_buffer_wait() block once, and if it gets woken up, exit the function and let the callers decide what to do next. Link: https://lore.kernel.org/all/CAHk-=whs5MdtNjzFkTyaUy=vHi=qwWgPi0JgTe6OYUYMNSRZfg@mail.gmail.com/ Link: https://lore.kernel.org/linux-trace-kernel/20240308202431.792933613@goodmis.org Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Mark Rutland Cc: Mathieu Desnoyers Cc: Andrew Morton Cc: Linus Torvalds Cc: linke li Cc: Rabin Vincent Fixes: e30f53aad2202 ("tracing: Do not busy wait in buffer splice") Signed-off-by: Steven Rostedt (Google) --- kernel/trace/ring_buffer.c | 139 ++++++++++++++++++++++----------------------- 1 file changed, 68 insertions(+), 71 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0699027b4f4c..3400f11286e3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -384,7 +384,6 @@ struct rb_irq_work { struct irq_work work; wait_queue_head_t waiters; wait_queue_head_t full_waiters; - long wait_index; bool waiters_pending; bool full_waiters_pending; bool wakeup_full; @@ -798,14 +797,40 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) rbwork = &cpu_buffer->irq_work; } - rbwork->wait_index++; - /* make sure the waiters see the new index */ - smp_wmb(); - /* This can be called in any context */ irq_work_queue(&rbwork->work); } +static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) +{ + struct ring_buffer_per_cpu *cpu_buffer; + bool ret = false; + + /* Reads of all CPUs always waits for any data */ + if (cpu == RING_BUFFER_ALL_CPUS) + return !ring_buffer_empty(buffer); + + cpu_buffer = buffer->buffers[cpu]; + + if (!ring_buffer_empty_cpu(buffer, cpu)) { + unsigned long flags; + bool pagebusy; + + if (!full) + return true; + + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; + ret = !pagebusy && full_hit(buffer, cpu, full); + + if (!cpu_buffer->shortest_full || + cpu_buffer->shortest_full > full) + cpu_buffer->shortest_full = full; + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + } + return ret; +} + /** * ring_buffer_wait - wait for input to the ring buffer * @buffer: buffer to wait on @@ -821,7 +846,6 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) struct ring_buffer_per_cpu *cpu_buffer; DEFINE_WAIT(wait); struct rb_irq_work *work; - long wait_index; int ret = 0; /* @@ -840,81 +864,54 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) work = &cpu_buffer->irq_work; } - wait_index = READ_ONCE(work->wait_index); - - while (true) { - if (full) - prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); - else - prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); - - /* - * The events can happen in critical sections where - * checking a work queue can cause deadlocks. - * After adding a task to the queue, this flag is set - * only to notify events to try to wake up the queue - * using irq_work. - * - * We don't clear it even if the buffer is no longer - * empty. The flag only causes the next event to run - * irq_work to do the work queue wake up. The worse - * that can happen if we race with !trace_empty() is that - * an event will cause an irq_work to try to wake up - * an empty queue. - * - * There's no reason to protect this flag either, as - * the work queue and irq_work logic will do the necessary - * synchronization for the wake ups. The only thing - * that is necessary is that the wake up happens after - * a task has been queued. It's OK for spurious wake ups. - */ - if (full) - work->full_waiters_pending = true; - else - work->waiters_pending = true; - - if (signal_pending(current)) { - ret = -EINTR; - break; - } - - if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) - break; - - if (cpu != RING_BUFFER_ALL_CPUS && - !ring_buffer_empty_cpu(buffer, cpu)) { - unsigned long flags; - bool pagebusy; - bool done; - - if (!full) - break; - - raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); - pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; - done = !pagebusy && full_hit(buffer, cpu, full); + if (full) + prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); + else + prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); - if (!cpu_buffer->shortest_full || - cpu_buffer->shortest_full > full) - cpu_buffer->shortest_full = full; - raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); - if (done) - break; - } + /* + * The events can happen in critical sections where + * checking a work queue can cause deadlocks. + * After adding a task to the queue, this flag is set + * only to notify events to try to wake up the queue + * using irq_work. + * + * We don't clear it even if the buffer is no longer + * empty. The flag only causes the next event to run + * irq_work to do the work queue wake up. The worse + * that can happen if we race with !trace_empty() is that + * an event will cause an irq_work to try to wake up + * an empty queue. + * + * There's no reason to protect this flag either, as + * the work queue and irq_work logic will do the necessary + * synchronization for the wake ups. The only thing + * that is necessary is that the wake up happens after + * a task has been queued. It's OK for spurious wake ups. + */ + if (full) + work->full_waiters_pending = true; + else + work->waiters_pending = true; - schedule(); + if (rb_watermark_hit(buffer, cpu, full)) + goto out; - /* Make sure to see the new wait index */ - smp_rmb(); - if (wait_index != work->wait_index) - break; + if (signal_pending(current)) { + ret = -EINTR; + goto out; } + schedule(); + out: if (full) finish_wait(&work->full_waiters, &wait); else finish_wait(&work->waiters, &wait); + if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current)) + ret = -EINTR; + return ret; } -- cgit v1.2.3 From 68282dd930ea38b068ce2c109d12405f40df3f93 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Fri, 8 Mar 2024 15:24:04 -0500 Subject: ring-buffer: Fix resetting of shortest_full The "shortest_full" variable is used to keep track of the waiter that is waiting for the smallest amount on the ring buffer before being woken up. When a tasks waits on the ring buffer, it passes in a "full" value that is a percentage. 0 means wake up on any data. 1-100 means wake up from 1% to 100% full buffer. As all waiters are on the same wait queue, the wake up happens for the waiter with the smallest percentage. The problem is that the smallest_full on the cpu_buffer that stores the smallest amount doesn't get reset when all the waiters are woken up. It does get reset when the ring buffer is reset (echo > /sys/kernel/tracing/trace). This means that tasks may be woken up more often then when they want to be. Instead, have the shortest_full field get reset just before waking up all the tasks. If the tasks wait again, they will update the shortest_full before sleeping. Also add locking around setting of shortest_full in the poll logic, and change "work" to "rbwork" to match the variable name for rb_irq_work structures that are used in other places. Link: https://lore.kernel.org/linux-trace-kernel/20240308202431.948914369@goodmis.org Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Mark Rutland Cc: Mathieu Desnoyers Cc: Andrew Morton Cc: Linus Torvalds Cc: linke li Cc: Rabin Vincent Fixes: 2c2b0a78b3739 ("ring-buffer: Add percentage of ring buffer full to wake up reader") Signed-off-by: Steven Rostedt (Google) --- kernel/trace/ring_buffer.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3400f11286e3..aa332ace108b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -755,8 +755,19 @@ static void rb_wake_up_waiters(struct irq_work *work) wake_up_all(&rbwork->waiters); if (rbwork->full_waiters_pending || rbwork->wakeup_full) { + /* Only cpu_buffer sets the above flags */ + struct ring_buffer_per_cpu *cpu_buffer = + container_of(rbwork, struct ring_buffer_per_cpu, irq_work); + + /* Called from interrupt context */ + raw_spin_lock(&cpu_buffer->reader_lock); rbwork->wakeup_full = false; rbwork->full_waiters_pending = false; + + /* Waking up all waiters, they will reset the shortest full */ + cpu_buffer->shortest_full = 0; + raw_spin_unlock(&cpu_buffer->reader_lock); + wake_up_all(&rbwork->full_waiters); } } @@ -934,28 +945,33 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table, int full) { struct ring_buffer_per_cpu *cpu_buffer; - struct rb_irq_work *work; + struct rb_irq_work *rbwork; if (cpu == RING_BUFFER_ALL_CPUS) { - work = &buffer->irq_work; + rbwork = &buffer->irq_work; full = 0; } else { if (!cpumask_test_cpu(cpu, buffer->cpumask)) return EPOLLERR; cpu_buffer = buffer->buffers[cpu]; - work = &cpu_buffer->irq_work; + rbwork = &cpu_buffer->irq_work; } if (full) { - poll_wait(filp, &work->full_waiters, poll_table); - work->full_waiters_pending = true; + unsigned long flags; + + poll_wait(filp, &rbwork->full_waiters, poll_table); + + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + rbwork->full_waiters_pending = true; if (!cpu_buffer->shortest_full || cpu_buffer->shortest_full > full) cpu_buffer->shortest_full = full; + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); } else { - poll_wait(filp, &work->waiters, poll_table); - work->waiters_pending = true; + poll_wait(filp, &rbwork->waiters, poll_table); + rbwork->waiters_pending = true; } /* -- cgit v1.2.3 From e5d7c1916562f0e856eb3d6f569629fcd535fed2 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Fri, 8 Mar 2024 15:24:05 -0500 Subject: tracing: Use .flush() call to wake up readers The .release() function does not get called until all readers of a file descriptor are finished. If a thread is blocked on reading a file descriptor in ring_buffer_wait(), and another thread closes the file descriptor, it will not wake up the other thread as ring_buffer_wake_waiters() is called by .release(), and that will not get called until the .read() is finished. The issue originally showed up in trace-cmd, but the readers are actually other processes with their own file descriptors. So calling close() would wake up the other tasks because they are blocked on another descriptor then the one that was closed(). But there's other wake ups that solve that issue. When a thread is blocked on a read, it can still hang even when another thread closed its descriptor. This is what the .flush() callback is for. Have the .flush() wake up the readers. Link: https://lore.kernel.org/linux-trace-kernel/20240308202432.107909457@goodmis.org Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Mark Rutland Cc: Mathieu Desnoyers Cc: Andrew Morton Cc: Linus Torvalds Cc: linke li Cc: Rabin Vincent Fixes: f3ddb74ad0790 ("tracing: Wake up ring buffer waiters on closing of the file") Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d16b95ca58a7..c9c898307348 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8393,6 +8393,20 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, return size; } +static int tracing_buffers_flush(struct file *file, fl_owner_t id) +{ + struct ftrace_buffer_info *info = file->private_data; + struct trace_iterator *iter = &info->iter; + + iter->wait_index++; + /* Make sure the waiters see the new wait_index */ + smp_wmb(); + + ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); + + return 0; +} + static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; @@ -8404,12 +8418,6 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) __trace_array_put(iter->tr); - iter->wait_index++; - /* Make sure the waiters see the new wait_index */ - smp_wmb(); - - ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); - if (info->spare) ring_buffer_free_read_page(iter->array_buffer->buffer, info->spare_cpu, info->spare); @@ -8625,6 +8633,7 @@ static const struct file_operations tracing_buffers_fops = { .read = tracing_buffers_read, .poll = tracing_buffers_poll, .release = tracing_buffers_release, + .flush = tracing_buffers_flush, .splice_read = tracing_buffers_splice_read, .unlocked_ioctl = tracing_buffers_ioctl, .llseek = no_llseek, -- cgit v1.2.3