From fc5ff53d2aa088713870cd684b160ee95c018520 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Dec 2025 21:25:17 +0800 Subject: workqueue: Make send_mayday() take a PWQ argument directly Make send_mayday() operate on a PWQ directly instead of taking a work item, so that rescuer_thread() now calls send_mayday(pwq) instead of open-coding the mayday list manipulation. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 253311af47c6..f8371aa54dca 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2976,9 +2976,8 @@ static void idle_cull_fn(struct work_struct *work) reap_dying_workers(&cull_list); } -static void send_mayday(struct work_struct *work) +static void send_mayday(struct pool_workqueue *pwq) { - struct pool_workqueue *pwq = get_work_pwq(work); struct workqueue_struct *wq = pwq->wq; lockdep_assert_held(&wq_mayday_lock); @@ -3016,7 +3015,7 @@ static void pool_mayday_timeout(struct timer_list *t) * rescuers. */ list_for_each_entry(work, &pool->worklist, entry) - send_mayday(work); + send_mayday(get_work_pwq(work)); } raw_spin_unlock(&wq_mayday_lock); @@ -3538,13 +3537,7 @@ repeat: */ if (pwq->nr_active && need_to_create_worker(pool)) { raw_spin_lock(&wq_mayday_lock); - /* - * Queue iff somebody else hasn't queued it already. - */ - if (list_empty(&pwq->mayday_node)) { - get_pwq(pwq); - list_add_tail(&pwq->mayday_node, &wq->maydays); - } + send_mayday(pwq); raw_spin_unlock(&wq_mayday_lock); } } -- cgit v1.2.3 From e5a30c303b07a4d6083e0f7f051b53add6d93c5d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Dec 2025 21:25:18 +0800 Subject: workqueue: Process rescuer work items one-by-one using a cursor Previously, the rescuer scanned for all matching work items at once and processed them within a single rescuer thread, which could cause one blocking work item to stall all others. Make the rescuer process work items one-by-one instead of slurping all matches in a single pass. Break the rescuer loop after finding and processing the first matching work item, then restart the search to pick up the next. This gives normal worker threads a chance to process other items which gives them the opportunity to be processed instead of waiting on the rescuer's queue and prevents a blocking work item from stalling the rest once memory pressure is relieved. Introduce a dummy cursor work item to avoid potentially O(N^2) rescans of the work list. The marker records the resume position for the next scan, eliminating redundant traversals. Also introduce RESCUER_BATCH to control the maximum number of work items the rescuer processes in each turn, and move on to other PWQs when the limit is reached. Cc: ying chen Reported-by: ying chen Fixes: e22bee782b3b ("workqueue: implement concurrency managed dynamic worker pool") Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 75 ++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f8371aa54dca..7f9225936cd9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -117,6 +117,8 @@ enum wq_internal_consts { MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ CREATE_COOLDOWN = HZ, /* time to breath after fail */ + RESCUER_BATCH = 16, /* process items per turn */ + /* * Rescue workers are used only on emergencies and shared by * all cpus. Give MIN_NICE. @@ -286,6 +288,7 @@ struct pool_workqueue { struct list_head pending_node; /* LN: node on wq_node_nr_active->pending_pwqs */ struct list_head pwqs_node; /* WR: node on wq->pwqs */ struct list_head mayday_node; /* MD: node on wq->maydays */ + struct work_struct mayday_cursor; /* L: cursor on pool->worklist */ u64 stats[PWQ_NR_STATS]; @@ -1120,6 +1123,12 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool, return NULL; } +static void mayday_cursor_func(struct work_struct *work) +{ + /* should not be processed, only for marking position */ + BUG(); +} + /** * move_linked_works - move linked works to a list * @work: start of series of works to be scheduled @@ -1182,6 +1191,16 @@ static bool assign_work(struct work_struct *work, struct worker *worker, lockdep_assert_held(&pool->lock); + /* The cursor work should not be processed */ + if (unlikely(work->func == mayday_cursor_func)) { + /* only worker_thread() can possibly take this branch */ + WARN_ON_ONCE(worker->rescue_wq); + if (nextp) + *nextp = list_next_entry(work, entry); + list_del_init(&work->entry); + return false; + } + /* * A single work shouldn't be executed concurrently by multiple workers. * __queue_work() ensures that @work doesn't jump to a different pool @@ -3439,22 +3458,30 @@ sleep: static bool assign_rescuer_work(struct pool_workqueue *pwq, struct worker *rescuer) { struct worker_pool *pool = pwq->pool; + struct work_struct *cursor = &pwq->mayday_cursor; struct work_struct *work, *n; /* need rescue? */ if (!pwq->nr_active || !need_to_create_worker(pool)) return false; - /* - * Slurp in all works issued via this workqueue and - * process'em. - */ - list_for_each_entry_safe(work, n, &pool->worklist, entry) { - if (get_work_pwq(work) == pwq && assign_work(work, rescuer, &n)) + /* search from the start or cursor if available */ + if (list_empty(&cursor->entry)) + work = list_first_entry(&pool->worklist, struct work_struct, entry); + else + work = list_next_entry(cursor, entry); + + /* find the next work item to rescue */ + list_for_each_entry_safe_from(work, n, &pool->worklist, entry) { + if (get_work_pwq(work) == pwq && assign_work(work, rescuer, &n)) { pwq->stats[PWQ_STAT_RESCUED]++; + /* put the cursor for next search */ + list_move_tail(&cursor->entry, &n->entry); + return true; + } } - return !list_empty(&rescuer->scheduled); + return false; } /** @@ -3511,6 +3538,7 @@ repeat: struct pool_workqueue *pwq = list_first_entry(&wq->maydays, struct pool_workqueue, mayday_node); struct worker_pool *pool = pwq->pool; + unsigned int count = 0; __set_current_state(TASK_RUNNING); list_del_init(&pwq->mayday_node); @@ -3523,25 +3551,27 @@ repeat: WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); - if (assign_rescuer_work(pwq, rescuer)) { + while (assign_rescuer_work(pwq, rescuer)) { process_scheduled_works(rescuer); /* - * The above execution of rescued work items could - * have created more to rescue through - * pwq_activate_first_inactive() or chained - * queueing. Let's put @pwq back on mayday list so - * that such back-to-back work items, which may be - * being used to relieve memory pressure, don't - * incur MAYDAY_INTERVAL delay inbetween. + * If the per-turn work item limit is reached and other + * PWQs are in mayday, requeue mayday for this PWQ and + * let the rescuer handle the other PWQs first. */ - if (pwq->nr_active && need_to_create_worker(pool)) { + if (++count > RESCUER_BATCH && !list_empty(&pwq->wq->maydays) && + pwq->nr_active && need_to_create_worker(pool)) { raw_spin_lock(&wq_mayday_lock); send_mayday(pwq); raw_spin_unlock(&wq_mayday_lock); + break; } } + /* The cursor can not be left behind without the rescuer watching it. */ + if (!list_empty(&pwq->mayday_cursor.entry) && list_empty(&pwq->mayday_node)) + list_del_init(&pwq->mayday_cursor.entry); + /* * Leave this pool. Notify regular workers; otherwise, we end up * with 0 concurrency and stalling the execution. @@ -5160,6 +5190,19 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, INIT_LIST_HEAD(&pwq->pwqs_node); INIT_LIST_HEAD(&pwq->mayday_node); kthread_init_work(&pwq->release_work, pwq_release_workfn); + + /* + * Set the dummy cursor work with valid function and get_work_pwq(). + * + * The cursor work should only be in the pwq->pool->worklist, and + * should not be treated as a processable work item. + * + * WORK_STRUCT_PENDING and WORK_STRUCT_INACTIVE just make it less + * surprise for kernel debugging tools and reviewers. + */ + INIT_WORK(&pwq->mayday_cursor, mayday_cursor_func); + atomic_long_set(&pwq->mayday_cursor.data, (unsigned long)pwq | + WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | WORK_STRUCT_INACTIVE); } /* sync @pwq with the current state of its associated wq and link it */ -- cgit v1.2.3 From 51cd2d2decf365a248ddc304b7aa6f0cadc748c3 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Mon, 8 Dec 2025 21:25:19 +0800 Subject: workqueue: Process extra works in rescuer on memory pressure Make the rescuer process more work on the last pwq when there are no more to rescue for the whole workqueue to help the regular workers in case it is a temporary memory pressure relief and to reduce relapse. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7f9225936cd9..64fe81f30e85 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3461,10 +3461,37 @@ static bool assign_rescuer_work(struct pool_workqueue *pwq, struct worker *rescu struct work_struct *cursor = &pwq->mayday_cursor; struct work_struct *work, *n; - /* need rescue? */ - if (!pwq->nr_active || !need_to_create_worker(pool)) + /* have work items to rescue? */ + if (!pwq->nr_active) return false; + /* need rescue? */ + if (!need_to_create_worker(pool)) { + /* + * The pool has idle workers and doesn't need the rescuer, so it + * could simply return false here. + * + * However, the memory pressure might not be fully relieved. + * In PERCPU pool with concurrency enabled, having idle workers + * does not necessarily mean memory pressure is gone; it may + * simply mean regular workers have woken up, completed their + * work, and gone idle again due to concurrency limits. + * + * In this case, those working workers may later sleep again, + * the pool may run out of idle workers, and it will have to + * allocate new ones and wait for the timer to send mayday, + * causing unnecessary delay - especially if memory pressure + * was never resolved throughout. + * + * Do more work if memory pressure is still on to reduce + * relapse, using (pool->flags & POOL_MANAGER_ACTIVE), though + * not precisely, unless there are other PWQs needing help. + */ + if (!(pool->flags & POOL_MANAGER_ACTIVE) || + !list_empty(&pwq->wq->maydays)) + return false; + } + /* search from the start or cursor if available */ if (list_empty(&cursor->entry)) work = list_first_entry(&pool->worklist, struct work_struct, entry); -- cgit v1.2.3 From 32d572e39031920691abfada68cdb19ad44b4eeb Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Tue, 3 Feb 2026 09:01:17 -0800 Subject: workqueue: add CONFIG_BOOTPARAM_WQ_STALL_PANIC option Add a kernel config option to set the default value of workqueue.panic_on_stall, similar to CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC, CONFIG_BOOTPARAM_HARDLOCKUP_PANIC and CONFIG_BOOTPARAM_HUNG_TASK_PANIC. This allows setting the number of workqueue stalls before triggering a kernel panic at build time, which is useful for high-availability systems that need consistent panic-on-stall, in other words, those servers which run with CONFIG_BOOTPARAM_*_PANIC=y already. The default remains 0 (disabled). Setting it to 1 will panic on the first stall, and higher values will panic after that many stall warnings. The value can still be overridden at runtime via the workqueue.panic_on_stall boot parameter or sysfs. Signed-off-by: Breno Leitao Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 64fe81f30e85..2e7fd46fce17 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7568,7 +7568,7 @@ static struct timer_list wq_watchdog_timer; static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; -static unsigned int wq_panic_on_stall; +static unsigned int wq_panic_on_stall = CONFIG_BOOTPARAM_WQ_STALL_PANIC; module_param_named(panic_on_stall, wq_panic_on_stall, uint, 0644); /* -- cgit v1.2.3 From f84c9dd34e8dce3fb42598344da711573b383626 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 6 Feb 2026 03:18:01 -0800 Subject: workqueue: add time-based panic for stalls Add a new module parameter 'panic_on_stall_time' that triggers a panic when a workqueue stall persists for longer than the specified duration in seconds. Unlike 'panic_on_stall' which counts accumulated stall events, this parameter triggers based on the duration of a single continuous stall. This is useful for catching truly stuck workqueues rather than accumulating transient stalls. Usage: workqueue.panic_on_stall_time=120 This would panic if any workqueue pool has been stalled for 120 seconds or more. The stall duration is measured from the workqueue last progress (poll_ts) which accounts for legitimate system stalls. Signed-off-by: Breno Leitao Signed-off-by: Tejun Heo --- kernel/workqueue.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2e7fd46fce17..68e664d7dbec 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7571,6 +7571,10 @@ static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; static unsigned int wq_panic_on_stall = CONFIG_BOOTPARAM_WQ_STALL_PANIC; module_param_named(panic_on_stall, wq_panic_on_stall, uint, 0644); +static unsigned int wq_panic_on_stall_time; +module_param_named(panic_on_stall_time, wq_panic_on_stall_time, uint, 0644); +MODULE_PARM_DESC(panic_on_stall_time, "Panic if stall exceeds this many seconds (0=disabled)"); + /* * Show workers that might prevent the processing of pending work items. * The only candidates are CPU-bound workers in the running state. @@ -7622,7 +7626,12 @@ static void show_cpu_pools_hogs(void) rcu_read_unlock(); } -static void panic_on_wq_watchdog(void) +/* + * It triggers a panic in two scenarios: when the total number of stalls + * exceeds a threshold, and when a stall lasts longer than + * wq_panic_on_stall_time + */ +static void panic_on_wq_watchdog(unsigned int stall_time_sec) { static unsigned int wq_stall; @@ -7630,6 +7639,8 @@ static void panic_on_wq_watchdog(void) wq_stall++; BUG_ON(wq_stall >= wq_panic_on_stall); } + + BUG_ON(wq_panic_on_stall_time && stall_time_sec >= wq_panic_on_stall_time); } static void wq_watchdog_reset_touched(void) @@ -7644,10 +7655,12 @@ static void wq_watchdog_reset_touched(void) static void wq_watchdog_timer_fn(struct timer_list *unused) { unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; + unsigned int max_stall_time = 0; bool lockup_detected = false; bool cpu_pool_stall = false; unsigned long now = jiffies; struct worker_pool *pool; + unsigned int stall_time; int pi; if (!thresh) @@ -7681,14 +7694,15 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) /* did we stall? */ if (time_after(now, ts + thresh)) { lockup_detected = true; + stall_time = jiffies_to_msecs(now - pool_ts) / 1000; + max_stall_time = max(max_stall_time, stall_time); if (pool->cpu >= 0 && !(pool->flags & POOL_BH)) { pool->cpu_stall = true; cpu_pool_stall = true; } pr_emerg("BUG: workqueue lockup - pool"); pr_cont_pool_info(pool); - pr_cont(" stuck for %us!\n", - jiffies_to_msecs(now - pool_ts) / 1000); + pr_cont(" stuck for %us!\n", stall_time); } @@ -7701,7 +7715,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) show_cpu_pools_hogs(); if (lockup_detected) - panic_on_wq_watchdog(); + panic_on_wq_watchdog(max_stall_time); wq_watchdog_reset_touched(); mod_timer(&wq_watchdog_timer, jiffies + thresh); -- cgit v1.2.3 From 9cb8b0f289560728dbb8b88158e7a957e2e90a14 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 6 Feb 2026 03:18:02 -0800 Subject: workqueue: replace BUG_ON with panic in panic_on_wq_watchdog Replace BUG_ON() with panic() in panic_on_wq_watchdog(). This is not a bug condition but a deliberate forced panic requested by the user via module parameters to crash the system for debugging purposes. Using panic() instead of BUG_ON() makes this intent clearer and provides more informative output about which threshold was exceeded and the actual values, making it easier to diagnose the stall condition from crash dumps. Signed-off-by: Breno Leitao Signed-off-by: Tejun Heo --- kernel/workqueue.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 68e664d7dbec..e6c249f2fb46 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7637,10 +7637,14 @@ static void panic_on_wq_watchdog(unsigned int stall_time_sec) if (wq_panic_on_stall) { wq_stall++; - BUG_ON(wq_stall >= wq_panic_on_stall); + if (wq_stall >= wq_panic_on_stall) + panic("workqueue: %u stall(s) exceeded threshold %u\n", + wq_stall, wq_panic_on_stall); } - BUG_ON(wq_panic_on_stall_time && stall_time_sec >= wq_panic_on_stall_time); + if (wq_panic_on_stall_time && stall_time_sec >= wq_panic_on_stall_time) + panic("workqueue: stall lasted %us, exceeding threshold %us\n", + stall_time_sec, wq_panic_on_stall_time); } static void wq_watchdog_reset_touched(void) -- cgit v1.2.3