From fda6add9243867486f8cd456d7b05395d2132e0a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 16 Jun 2025 13:59:20 -0400 Subject: workqueue: Basic memory allocation profiling support Hook alloc_workqueue and alloc_workqueue_attrs() so that they're accounted to the callsite. Since we're doing allocations on behalf of another subsystem, this helps when using memory allocation profiling to check for leaks. Cc: Tejun Heo Cc: Lai Jiangshan Signed-off-by: Kent Overstreet Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'include/linux/workqueue.h') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 6e30f275da77..e907c9bb840c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -6,6 +6,7 @@ #ifndef _LINUX_WORKQUEUE_H #define _LINUX_WORKQUEUE_H +#include #include #include #include @@ -505,7 +506,8 @@ void workqueue_softirq_dead(unsigned int cpu); * Pointer to the allocated workqueue on success, %NULL on failure. */ __printf(1, 4) struct workqueue_struct * -alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); +alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...); +#define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__)) #ifdef CONFIG_LOCKDEP /** @@ -544,8 +546,8 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \ - alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \ - 1, lockdep_map, ##args) + alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\ + 1, lockdep_map, ##args)) #endif /** @@ -577,7 +579,9 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, extern void destroy_workqueue(struct workqueue_struct *wq); -struct workqueue_attrs *alloc_workqueue_attrs(void); +struct workqueue_attrs *alloc_workqueue_attrs_noprof(void); +#define alloc_workqueue_attrs(...) alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__)) + void free_workqueue_attrs(struct workqueue_attrs *attrs); int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); -- cgit v1.2.3 From 128ea9f6ccfb6960293ae4212f4f97165e42222d Mon Sep 17 00:00:00 2001 From: Marco Crivellari Date: Sat, 14 Jun 2025 15:35:29 +0200 Subject: workqueue: Add system_percpu_wq and system_dfl_wq Currently, if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. system_wq is a per-CPU worqueue, yet nothing in its name tells about that CPU affinity constraint, which is very often not required by users. Make it clear by adding a system_percpu_wq. system_unbound_wq should be the default workqueue so as not to enforce locality constraints for random work whenever it's not required. Adding system_dfl_wq to encourage its use when unbound work should be used. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'include/linux/workqueue.h') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b0dc957c3e56..74b0042709cd 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -427,7 +427,7 @@ enum wq_consts { /* * System-wide workqueues which are always present. * - * system_wq is the one used by schedule[_delayed]_work[_on](). + * system_percpu_wq is the one used by schedule[_delayed]_work[_on](). * Multi-CPU multi-threaded. There are users which expect relatively * short queue flush time. Don't queue works which can run for too * long. @@ -438,7 +438,7 @@ enum wq_consts { * system_long_wq is similar to system_wq but may host long running * works. Queue flushing might take relatively long. * - * system_unbound_wq is unbound workqueue. Workers are not bound to + * system_dfl_wq is unbound workqueue. Workers are not bound to * any specific CPU, not concurrency managed, and all queued works are * executed immediately as long as max_active limit is not reached and * resources are available. @@ -455,10 +455,12 @@ enum wq_consts { * system_bh[_highpri]_wq are convenience interface to softirq. BH work items * are executed in the queueing CPU's BH context in the queueing order. */ -extern struct workqueue_struct *system_wq; +extern struct workqueue_struct *system_wq; /* use system_percpu_wq, this will be removed */ +extern struct workqueue_struct *system_percpu_wq; extern struct workqueue_struct *system_highpri_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_unbound_wq; +extern struct workqueue_struct *system_dfl_wq; extern struct workqueue_struct *system_freezable_wq; extern struct workqueue_struct *system_power_efficient_wq; extern struct workqueue_struct *system_freezable_power_efficient_wq; -- cgit v1.2.3 From 930c2ea566aff59e962c50b2421d5fcc3b98b8be Mon Sep 17 00:00:00 2001 From: Marco Crivellari Date: Sat, 14 Jun 2025 15:35:30 +0200 Subject: workqueue: Add new WQ_PERCPU flag Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. This patch adds a new WQ_PERCPU flag to explicitly request the use of the per-CPU behavior. Both flags coexist for one release cycle to allow callers to transition their calls. Once migration is complete, WQ_UNBOUND can be removed and unbound will become the implicit default. tj: Merged doc patch. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/workqueue.h') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 74b0042709cd..f19072605faa 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -401,6 +401,7 @@ enum wq_flags { * http://thread.gmane.org/gmane.linux.kernel/1480396 */ WQ_POWER_EFFICIENT = 1 << 7, + WQ_PERCPU = 1 << 8, /* bound to a specific cpu */ __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ -- cgit v1.2.3 From fc2898ea793a48bc4b74b61cde2d8656f20efdf4 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Mon, 23 Jun 2025 01:30:49 +0100 Subject: workqueue: Remove unused work_on_cpu_safe The last use of the work_on_cpu_safe() macro was removed recently by commit 9cda46babdfe ("crypto: n2 - remove Niagara2 SPU driver") Remove it, and the work_on_cpu_safe_key() function it calls. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'include/linux/workqueue.h') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 83d158bb2791..45d5dd470ff6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -847,19 +847,6 @@ long work_on_cpu_key(int cpu, long (*fn)(void *), work_on_cpu_key(_cpu, _fn, _arg, &__key); \ }) -long work_on_cpu_safe_key(int cpu, long (*fn)(void *), - void *arg, struct lock_class_key *key); - -/* - * A new key is defined for each caller to make sure the work - * associated with the function doesn't share its locking class. - */ -#define work_on_cpu_safe(_cpu, _fn, _arg) \ -({ \ - static struct lock_class_key __key; \ - \ - work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \ -}) #endif /* CONFIG_SMP */ #ifdef CONFIG_FREEZER -- cgit v1.2.3