summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c39
1 files changed, 35 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b77119d71641..c6ea96d5b716 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1849,8 +1849,20 @@ static void unplug_oldest_pwq(struct workqueue_struct *wq)
raw_spin_lock_irq(&pwq->pool->lock);
if (pwq->plugged) {
pwq->plugged = false;
- if (pwq_activate_first_inactive(pwq, true))
+ if (pwq_activate_first_inactive(pwq, true)) {
+ /*
+ * While plugged, queueing skips activation which
+ * includes bumping the nr_active count and adding the
+ * pwq to nna->pending_pwqs if the count can't be
+ * obtained. We need to restore both for the pwq being
+ * unplugged. The first call activates the first
+ * inactive work item and the second, if there are more
+ * inactive, puts the pwq on pending_pwqs.
+ */
+ pwq_activate_first_inactive(pwq, false);
+
kick_pool(pwq->pool);
+ }
}
raw_spin_unlock_irq(&pwq->pool->lock);
}
@@ -7699,8 +7711,29 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
else
ts = touched;
- /* did we stall? */
+ /*
+ * Did we stall?
+ *
+ * Do a lockless check first to do not disturb the system.
+ *
+ * Prevent false positives by double checking the timestamp
+ * under pool->lock. The lock makes sure that the check reads
+ * an updated pool->last_progress_ts when this CPU saw
+ * an already updated pool->worklist above. It seems better
+ * than adding another barrier into __queue_work() which
+ * is a hotter path.
+ */
if (time_after(now, ts + thresh)) {
+ scoped_guard(raw_spinlock_irqsave, &pool->lock) {
+ pool_ts = pool->last_progress_ts;
+ if (time_after(pool_ts, touched))
+ ts = pool_ts;
+ else
+ ts = touched;
+ }
+ if (!time_after(now, ts + thresh))
+ continue;
+
lockup_detected = true;
stall_time = jiffies_to_msecs(now - pool_ts) / 1000;
max_stall_time = max(max_stall_time, stall_time);
@@ -7712,8 +7745,6 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
pr_cont_pool_info(pool);
pr_cont(" stuck for %us!\n", stall_time);
}
-
-
}
if (lockup_detected)