From 3a46c9833c1fad3b4a91bbbeb856810c7e1d8e47 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 17 May 2023 17:02:08 -1000 Subject: workqueue: Re-order struct worker fields struct worker was laid out with the intent that all fields that are modified for each work item execution are in the first cacheline. However, this hasn't been true for a while with the addition of ->last_func. Let's just collect hot fields together at the top. Move ->sleeping in the hole after ->current_color and move ->lst_func right below. While at it, drop the cacheline comment which isn't useful anymore. Signed-off-by: Tejun Heo Cc: Lai Jiangshan --- kernel/workqueue_internal.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'kernel/workqueue_internal.h') diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index e00b1204a8e9..0600f04ceeb2 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -32,9 +32,12 @@ struct worker { work_func_t current_func; /* L: current_work's fn */ struct pool_workqueue *current_pwq; /* L: current_work's pwq */ unsigned int current_color; /* L: current_work's color */ - struct list_head scheduled; /* L: scheduled works */ + int sleeping; /* None */ + + /* used by the scheduler to determine a worker's last known identity */ + work_func_t last_func; /* L: last work's fn */ - /* 64 bytes boundary on 64bit, 32 on 32bit */ + struct list_head scheduled; /* L: scheduled works */ struct task_struct *task; /* I: worker task */ struct worker_pool *pool; /* A: the associated pool */ @@ -45,7 +48,6 @@ struct worker { unsigned long last_active; /* L: last active timestamp */ unsigned int flags; /* X: flags */ int id; /* I: worker id */ - int sleeping; /* None */ /* * Opaque string set with work_set_desc(). Printed out with task @@ -55,9 +57,6 @@ struct worker { /* used only by rescuers to point to the target workqueue */ struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ - - /* used by the scheduler to determine a worker's last known identity */ - work_func_t last_func; }; /** -- cgit v1.2.3 From bdf8b9bfc131864f0fcef268b34123acfb6a1b59 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 17 May 2023 17:02:08 -1000 Subject: workqueue: Improve locking rule description for worker fields * Some worker fields are modified only by the worker itself while holding pool->lock thus making them safe to read from self, IRQ context if the CPU is running the worker or while holding pool->lock. Add 'K' locking rule for them. * worker->sleeping is currently marked "None" which isn't very descriptive. It's used only by the worker itself. Add 'S' locking rule for it. A future patch will depend on the 'K' rule to access worker->current_* from the scheduler ticks. Signed-off-by: Tejun Heo --- kernel/workqueue_internal.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'kernel/workqueue_internal.h') diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index 0600f04ceeb2..c2455be7b4c2 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -28,14 +28,15 @@ struct worker { struct hlist_node hentry; /* L: while busy */ }; - struct work_struct *current_work; /* L: work being processed */ - work_func_t current_func; /* L: current_work's fn */ - struct pool_workqueue *current_pwq; /* L: current_work's pwq */ - unsigned int current_color; /* L: current_work's color */ - int sleeping; /* None */ + struct work_struct *current_work; /* K: work being processed and its */ + work_func_t current_func; /* K: function */ + struct pool_workqueue *current_pwq; /* K: pwq */ + unsigned int current_color; /* K: color */ + + int sleeping; /* S: is worker sleeping? */ /* used by the scheduler to determine a worker's last known identity */ - work_func_t last_func; /* L: last work's fn */ + work_func_t last_func; /* K: last work's fn */ struct list_head scheduled; /* L: scheduled works */ @@ -45,7 +46,7 @@ struct worker { struct list_head node; /* A: anchored at pool->workers */ /* A: runs through worker->node */ - unsigned long last_active; /* L: last active timestamp */ + unsigned long last_active; /* K: last active timestamp */ unsigned int flags; /* X: flags */ int id; /* I: worker id */ -- cgit v1.2.3 From 616db8779b1e3f93075df691432cccc5ef3c3ba0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 17 May 2023 17:02:08 -1000 Subject: workqueue: Automatically mark CPU-hogging work items CPU_INTENSIVE If a per-cpu work item hogs the CPU, it can prevent other work items from starting through concurrency management. A per-cpu workqueue which intends to host such CPU-hogging work items can choose to not participate in concurrency management by setting %WQ_CPU_INTENSIVE; however, this can be error-prone and difficult to debug when missed. This patch adds an automatic CPU usage based detection. If a concurrency-managed work item consumes more CPU time than the threshold (10ms by default) continuously without intervening sleeps, wq_worker_tick() which is called from scheduler_tick() will detect the condition and automatically mark it CPU_INTENSIVE. The mechanism isn't foolproof: * Detection depends on tick hitting the work item. Getting preempted at the right timings may allow a violating work item to evade detection at least temporarily. * nohz_full CPUs may not be running ticks and thus can fail detection. * Even when detection is working, the 10ms detection delays can add up if many CPU-hogging work items are queued at the same time. However, in vast majority of cases, this should be able to detect violations reliably and provide reasonable protection with a small increase in code complexity. If some work items trigger this condition repeatedly, the bigger problem likely is the CPU being saturated with such per-cpu work items and the solution would be making them UNBOUND. The next patch will add a debug mechanism to help spot such cases. v4: Documentation for workqueue.cpu_intensive_thresh_us added to kernel-parameters.txt. v3: Switch to use wq_worker_tick() instead of hooking into preemptions as suggested by Peter. v2: Lai pointed out that wq_worker_stopping() also needs to be called from preemption and rtlock paths and an earlier patch was updated accordingly. This patch adds a comment describing the risk of infinte recursions and how they're avoided. Signed-off-by: Tejun Heo Acked-by: Peter Zijlstra Cc: Linus Torvalds Cc: Lai Jiangshan --- kernel/workqueue_internal.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel/workqueue_internal.h') diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index c2455be7b4c2..6b1d66e28269 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -31,6 +31,7 @@ struct worker { struct work_struct *current_work; /* K: work being processed and its */ work_func_t current_func; /* K: function */ struct pool_workqueue *current_pwq; /* K: pwq */ + u64 current_at; /* K: runtime at start or last wakeup */ unsigned int current_color; /* K: color */ int sleeping; /* S: is worker sleeping? */ @@ -76,6 +77,7 @@ static inline struct worker *current_wq_worker(void) */ void wq_worker_running(struct task_struct *task); void wq_worker_sleeping(struct task_struct *task); +void wq_worker_tick(struct task_struct *task); work_func_t wq_worker_last_func(struct task_struct *task); #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ -- cgit v1.2.3