From cee22a15052faa817e3ec8985a28154d3fabc7aa Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 8 Apr 2013 16:45:40 +0530 Subject: workqueues: Introduce new flag WQ_POWER_EFFICIENT for power oriented workqueues Workqueues can be performance or power-oriented. Currently, most workqueues are bound to the CPU they were created on. This gives good performance (due to cache effects) at the cost of potentially waking up otherwise idle cores (Idle from scheduler's perspective. Which may or may not be physically idle) just to process some work. To save power, we can allow the work to be rescheduled on a core that is already awake. Workqueues created with the WQ_UNBOUND flag will allow some power savings. However, we don't change the default behaviour of the system. To enable power-saving behaviour, a new config option CONFIG_WQ_POWER_EFFICIENT needs to be turned on. This option can also be overridden by the workqueue.power_efficient boot parameter. tj: Updated config description and comments. Renamed CONFIG_WQ_POWER_EFFICIENT to CONFIG_WQ_POWER_EFFICIENT_DEFAULT. Signed-off-by: Viresh Kumar Reviewed-by: Amit Kucheria Signed-off-by: Tejun Heo --- kernel/workqueue.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4aa9f5bc6b2d..8068d97ce141 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -272,6 +272,15 @@ static cpumask_var_t *wq_numa_possible_cpumask; static bool wq_disable_numa; module_param_named(disable_numa, wq_disable_numa, bool, 0444); +/* see the comment above the definition of WQ_POWER_EFFICIENT */ +#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT +static bool wq_power_efficient = true; +#else +static bool wq_power_efficient; +#endif + +module_param_named(power_efficient, wq_power_efficient, bool, 0444); + static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ @@ -4085,6 +4094,10 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, struct workqueue_struct *wq; struct pool_workqueue *pwq; + /* see the comment above the definition of WQ_POWER_EFFICIENT */ + if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) + flags |= WQ_UNBOUND; + /* allocate wq and format name */ if (flags & WQ_UNBOUND) tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]); -- cgit v1.2.3 From 0668106ca3865ba945e155097fb042bf66d364d3 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 24 Apr 2013 17:12:54 +0530 Subject: workqueue: Add system wide power_efficient workqueues This patch adds system wide workqueues aligned towards power saving. This is done by allocating them with WQ_UNBOUND flag if 'wq_power_efficient' is set to 'true'. tj: updated comments a bit. Signed-off-by: Viresh Kumar Signed-off-by: Tejun Heo --- kernel/workqueue.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8068d97ce141..16ca2d3dd29f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -314,6 +314,10 @@ struct workqueue_struct *system_unbound_wq __read_mostly; EXPORT_SYMBOL_GPL(system_unbound_wq); struct workqueue_struct *system_freezable_wq __read_mostly; EXPORT_SYMBOL_GPL(system_freezable_wq); +struct workqueue_struct *system_power_efficient_wq __read_mostly; +EXPORT_SYMBOL_GPL(system_power_efficient_wq); +struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; +EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); static int worker_thread(void *__worker); static void copy_workqueue_attrs(struct workqueue_attrs *to, @@ -4987,8 +4991,15 @@ static int __init init_workqueues(void) WQ_UNBOUND_MAX_ACTIVE); system_freezable_wq = alloc_workqueue("events_freezable", WQ_FREEZABLE, 0); + system_power_efficient_wq = alloc_workqueue("events_power_efficient", + WQ_POWER_EFFICIENT, 0); + system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", + WQ_FREEZABLE | WQ_POWER_EFFICIENT, + 0); BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || - !system_unbound_wq || !system_freezable_wq); + !system_unbound_wq || !system_freezable_wq || + !system_power_efficient_wq || + !system_freezable_power_efficient_wq); return 0; } early_initcall(init_workqueues); -- cgit v1.2.3 From 0db0628d90125193280eabb501c94feaf48fa9ab Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 19 Jun 2013 14:53:51 -0400 Subject: kernel: delete __cpuinit usage from all core kernel files The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. This removes all the uses of the __cpuinit macros from C files in the core kernel directories (kernel, init, lib, mm, and include) that don't really have a specific maintainer. [1] https://lkml.org/lkml/2013/5/20/589 Signed-off-by: Paul Gortmaker --- kernel/workqueue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f02c4a4a0c3c..0b72e816b8d0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4644,7 +4644,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) * Workqueues should be brought up before normal priority CPU notifiers. * This will be registered high priority CPU notifier. */ -static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, +static int workqueue_cpu_up_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -4697,7 +4697,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, * Workqueues should be brought down after normal priority CPU notifiers. * This will be registered as low priority CPU notifier. */ -static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, +static int workqueue_cpu_down_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { -- cgit v1.2.3 From c2fda509667b0fda4372a237f5a59ea4570b1627 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 24 Jul 2013 18:31:42 +0800 Subject: workqueue: allow work_on_cpu() to be called recursively If the @fn call work_on_cpu() again, the lockdep will complain: > [ INFO: possible recursive locking detected ] > 3.11.0-rc1-lockdep-fix-a #6 Not tainted > --------------------------------------------- > kworker/0:1/142 is trying to acquire lock: > ((&wfc.work)){+.+.+.}, at: [] flush_work+0x0/0xb0 > > but task is already holding lock: > ((&wfc.work)){+.+.+.}, at: [] process_one_work+0x169/0x610 > > other info that might help us debug this: > Possible unsafe locking scenario: > > CPU0 > ---- > lock((&wfc.work)); > lock((&wfc.work)); > > *** DEADLOCK *** It is false-positive lockdep report. In this sutiation, the two "wfc"s of the two work_on_cpu() are different, they are both on stack. flush_work() can't be deadlock. To fix this, we need to avoid the lockdep checking in this case, thus we instroduce a internal __flush_work() which skip the lockdep. tj: Minor comment adjustment. Signed-off-by: Lai Jiangshan Reported-by: "Srivatsa S. Bhat" Reported-by: Alexander Duyck Signed-off-by: Tejun Heo --- kernel/workqueue.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f02c4a4a0c3c..55f5f0afcd0d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2817,6 +2817,19 @@ already_gone: return false; } +static bool __flush_work(struct work_struct *work) +{ + struct wq_barrier barr; + + if (start_flush_work(work, &barr)) { + wait_for_completion(&barr.done); + destroy_work_on_stack(&barr.work); + return true; + } else { + return false; + } +} + /** * flush_work - wait for a work to finish executing the last queueing instance * @work: the work to flush @@ -2830,18 +2843,10 @@ already_gone: */ bool flush_work(struct work_struct *work) { - struct wq_barrier barr; - lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); - if (start_flush_work(work, &barr)) { - wait_for_completion(&barr.done); - destroy_work_on_stack(&barr.work); - return true; - } else { - return false; - } + return __flush_work(work); } EXPORT_SYMBOL_GPL(flush_work); @@ -4756,7 +4761,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); schedule_work_on(cpu, &wfc.work); - flush_work(&wfc.work); + + /* + * The work item is on-stack and can't lead to deadlock through + * flushing. Use __flush_work() to avoid spurious lockdep warnings + * when work_on_cpu()s are nested. + */ + __flush_work(&wfc.work); + return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); -- cgit v1.2.3 From 2865a8fb44cc32420407362cbda80c10fa09c6b2 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 1 Aug 2013 09:56:36 +0800 Subject: workqueue: copy workqueue_attrs with all fields $echo '0' > /sys/bus/workqueue/devices/xxx/numa $cat /sys/bus/workqueue/devices/xxx/numa I got 1. It should be 0, the reason is copy_workqueue_attrs() called in apply_workqueue_attrs() doesn't copy no_numa field. Fix it by making copy_workqueue_attrs() copy ->no_numa too. This would also make get_unbound_pool() set a pool's ->no_numa attribute according to the workqueue attributes used when the pool was created. While harmelss, as ->no_numa isn't a pool attribute, this is a bit confusing. Clear it explicitly. tj: Updated description and comments a bit. Signed-off-by: Shaohua Li Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org --- kernel/workqueue.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 55f5f0afcd0d..726adc84b3ca 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3416,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, { to->nice = from->nice; cpumask_copy(to->cpumask, from->cpumask); + /* + * Unlike hash and equality test, this function doesn't ignore + * ->no_numa as it is used for both pool and wq attrs. Instead, + * get_unbound_pool() explicitly clears ->no_numa after copying. + */ + to->no_numa = from->no_numa; } /* hash value of the content of @attr */ @@ -3583,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ copy_workqueue_attrs(pool->attrs, attrs); + /* + * no_numa isn't a worker_pool attribute, always clear it. See + * 'struct workqueue_attrs' comments for detail. + */ + pool->attrs->no_numa = false; + /* if cpumask is contained inside a NUMA node, we belong to that node */ if (wq_numa_enabled) { for_each_node(node) { -- cgit v1.2.3 From d185af300fe43c130083851ca918ea2bb9600f0f Mon Sep 17 00:00:00 2001 From: Yacine Belkadi Date: Wed, 31 Jul 2013 14:59:24 -0700 Subject: workqueue: fix some scripts/kernel-doc warnings When building the htmldocs (in verbose mode), scripts/kernel-doc reports the following type of warnings: Warning(kernel/workqueue.c:653): No description found for return value of 'get_work_pool' Fix them by: - Using "Return:" sections to introduce descriptions of return values - Adding some missing descriptions Signed-off-by: Yacine Belkadi Signed-off-by: Jiri Kosina --- kernel/workqueue.c | 107 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 66 insertions(+), 41 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0b72e816b8d0..7f01a3eeaf95 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -540,6 +540,8 @@ static int worker_pool_assign_id(struct worker_pool *pool) * This must be called either with pwq_lock held or sched RCU read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. + * + * Return: The unbound pool_workqueue for @node. */ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, int node) @@ -638,8 +640,6 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) * get_work_pool - return the worker_pool a given work was associated with * @work: the work item of interest * - * Return the worker_pool @work was last associated with. %NULL if none. - * * Pools are created and destroyed under wq_pool_mutex, and allows read * access under sched-RCU read lock. As such, this function should be * called under wq_pool_mutex or with preemption disabled. @@ -648,6 +648,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) * mentioned locking is in effect. If the returned pool needs to be used * beyond the critical section, the caller is responsible for ensuring the * returned pool is and stays online. + * + * Return: The worker_pool @work was last associated with. %NULL if none. */ static struct worker_pool *get_work_pool(struct work_struct *work) { @@ -671,7 +673,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work) * get_work_pool_id - return the worker pool ID a given work is associated with * @work: the work item of interest * - * Return the worker_pool ID @work was last associated with. + * Return: The worker_pool ID @work was last associated with. * %WORK_OFFQ_POOL_NONE if none. */ static int get_work_pool_id(struct work_struct *work) @@ -830,7 +832,7 @@ void wq_worker_waking_up(struct task_struct *task, int cpu) * CONTEXT: * spin_lock_irq(rq->lock) * - * RETURNS: + * Return: * Worker task on @cpu to wake up, %NULL if none. */ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) @@ -965,8 +967,8 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) * CONTEXT: * spin_lock_irq(pool->lock). * - * RETURNS: - * Pointer to worker which is executing @work if found, NULL + * Return: + * Pointer to worker which is executing @work if found, %NULL * otherwise. */ static struct worker *find_worker_executing_work(struct worker_pool *pool, @@ -1154,14 +1156,16 @@ out_put: * @flags: place to store irq state * * Try to grab PENDING bit of @work. This function can handle @work in any - * stable state - idle, on timer or on worklist. Return values are + * stable state - idle, on timer or on worklist. * + * Return: * 1 if @work was pending and we successfully stole PENDING * 0 if @work was idle and we claimed PENDING * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry * -ENOENT if someone else is canceling @work, this state may persist * for arbitrarily long * + * Note: * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting * interrupted while holding PENDING and @work off queue, irq must be * disabled on entry. This, combined with delayed_work->timer being @@ -1403,10 +1407,10 @@ retry: * @wq: workqueue to use * @work: work to queue * - * Returns %false if @work was already on a queue, %true otherwise. - * * We queue the work to a specific CPU, the caller must ensure it * can't go away. + * + * Return: %false if @work was already on a queue, %true otherwise. */ bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) @@ -1476,7 +1480,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, * @dwork: work to queue * @delay: number of jiffies to wait before queueing * - * Returns %false if @work was already on a queue, %true otherwise. If + * Return: %false if @work was already on a queue, %true otherwise. If * @delay is zero and @dwork is idle, it will be scheduled for immediate * execution. */ @@ -1512,7 +1516,7 @@ EXPORT_SYMBOL(queue_delayed_work_on); * zero, @work is guaranteed to be scheduled immediately regardless of its * current state. * - * Returns %false if @dwork was idle and queued, %true if @dwork was + * Return: %false if @dwork was idle and queued, %true if @dwork was * pending and its timer was modified. * * This function is safe to call from any context including IRQ handler. @@ -1627,7 +1631,7 @@ static void worker_leave_idle(struct worker *worker) * Might sleep. Called without any lock but returns with pool->lock * held. * - * RETURNS: + * Return: * %true if the associated pool is online (@worker is successfully * bound), %false if offline. */ @@ -1688,7 +1692,7 @@ static struct worker *alloc_worker(void) * CONTEXT: * Might sleep. Does GFP_KERNEL allocations. * - * RETURNS: + * Return: * Pointer to the newly created worker. */ static struct worker *create_worker(struct worker_pool *pool) @@ -1788,6 +1792,8 @@ static void start_worker(struct worker *worker) * @pool: the target pool * * Grab the managership of @pool and create and start a new worker for it. + * + * Return: 0 on success. A negative error code otherwise. */ static int create_and_start_worker(struct worker_pool *pool) { @@ -1932,7 +1938,7 @@ static void pool_mayday_timeout(unsigned long __pool) * multiple times. Does GFP_KERNEL allocations. Called only from * manager. * - * RETURNS: + * Return: * %false if no action was taken and pool->lock stayed locked, %true * otherwise. */ @@ -1989,7 +1995,7 @@ restart: * spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Called only from manager. * - * RETURNS: + * Return: * %false if no action was taken and pool->lock stayed locked, %true * otherwise. */ @@ -2032,7 +2038,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool) * spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Does GFP_KERNEL allocations. * - * RETURNS: + * Return: * spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Does GFP_KERNEL allocations. */ @@ -2246,6 +2252,8 @@ static void process_scheduled_works(struct worker *worker) * work items regardless of their specific target workqueue. The only * exception is work items which belong to workqueues with a rescuer which * will be explained in rescuer_thread(). + * + * Return: 0 */ static int worker_thread(void *__worker) { @@ -2344,6 +2352,8 @@ sleep: * those works so that forward progress can be guaranteed. * * This should happen rarely. + * + * Return: 0 */ static int rescuer_thread(void *__rescuer) { @@ -2516,7 +2526,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, * CONTEXT: * mutex_lock(wq->mutex). * - * RETURNS: + * Return: * %true if @flush_color >= 0 and there's something to flush. %false * otherwise. */ @@ -2824,7 +2834,7 @@ already_gone: * Wait until @work has finished execution. @work is guaranteed to be idle * on return if it hasn't been requeued since flush started. * - * RETURNS: + * Return: * %true if flush_work() waited for the work to finish execution, * %false if it was already idle. */ @@ -2884,7 +2894,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) * The caller must ensure that the workqueue on which @work was last * queued can't be destroyed before this function returns. * - * RETURNS: + * Return: * %true if @work was pending, %false otherwise. */ bool cancel_work_sync(struct work_struct *work) @@ -2901,7 +2911,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); * immediate execution. Like flush_work(), this function only * considers the last queueing instance of @dwork. * - * RETURNS: + * Return: * %true if flush_work() waited for the work to finish execution, * %false if it was already idle. */ @@ -2919,11 +2929,15 @@ EXPORT_SYMBOL(flush_delayed_work); * cancel_delayed_work - cancel a delayed work * @dwork: delayed_work to cancel * - * Kill off a pending delayed_work. Returns %true if @dwork was pending - * and canceled; %false if wasn't pending. Note that the work callback - * function may still be running on return, unless it returns %true and the - * work doesn't re-arm itself. Explicitly flush or use - * cancel_delayed_work_sync() to wait on it. + * Kill off a pending delayed_work. + * + * Return: %true if @dwork was pending and canceled; %false if it wasn't + * pending. + * + * Note: + * The work callback function may still be running on return, unless + * it returns %true and the work doesn't re-arm itself. Explicitly flush or + * use cancel_delayed_work_sync() to wait on it. * * This function is safe to call from any context including IRQ handler. */ @@ -2952,7 +2966,7 @@ EXPORT_SYMBOL(cancel_delayed_work); * * This is cancel_work_sync() for delayed works. * - * RETURNS: + * Return: * %true if @dwork was pending, %false otherwise. */ bool cancel_delayed_work_sync(struct delayed_work *dwork) @@ -2969,7 +2983,7 @@ EXPORT_SYMBOL(cancel_delayed_work_sync); * system workqueue and blocks until all CPUs have completed. * schedule_on_each_cpu() is very slow. * - * RETURNS: + * Return: * 0 on success, -errno on failure. */ int schedule_on_each_cpu(work_func_t func) @@ -3037,7 +3051,7 @@ EXPORT_SYMBOL(flush_scheduled_work); * Executes the function immediately if process context is available, * otherwise schedules the function for delayed execution. * - * Returns: 0 - function was executed + * Return: 0 - function was executed * 1 - function was scheduled for execution */ int execute_in_process_context(work_func_t fn, struct execute_work *ew) @@ -3294,7 +3308,7 @@ static void wq_device_release(struct device *dev) * apply_workqueue_attrs() may race against userland updating the * attributes. * - * Returns 0 on success, -errno on failure. + * Return: 0 on success, -errno on failure. */ int workqueue_sysfs_register(struct workqueue_struct *wq) { @@ -3387,7 +3401,9 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) * @gfp_mask: allocation mask to use * * Allocate a new workqueue_attrs, initialize with default settings and - * return it. Returns NULL on failure. + * return it. + * + * Return: The allocated new workqueue_attr on success. %NULL on failure. */ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) { @@ -3440,7 +3456,8 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, * @pool: worker_pool to initialize * * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. - * Returns 0 on success, -errno on failure. Even on failure, all fields + * + * Return: 0 on success, -errno on failure. Even on failure, all fields * inside @pool proper are initialized and put_unbound_pool() can be called * on @pool safely to release it. */ @@ -3547,9 +3564,12 @@ static void put_unbound_pool(struct worker_pool *pool) * Obtain a worker_pool which has the same attributes as @attrs, bump the * reference count and return it. If there already is a matching * worker_pool, it will be used; otherwise, this function attempts to - * create a new one. On failure, returns NULL. + * create a new one. * * Should be called with wq_pool_mutex held. + * + * Return: On success, a worker_pool with the same attributes as @attrs. + * On failure, %NULL. */ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) { @@ -3779,9 +3799,7 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) * * Calculate the cpumask a workqueue with @attrs should use on @node. If * @cpu_going_down is >= 0, that cpu is considered offline during - * calculation. The result is stored in @cpumask. This function returns - * %true if the resulting @cpumask is different from @attrs->cpumask, - * %false if equal. + * calculation. The result is stored in @cpumask. * * If NUMA affinity is not enabled, @attrs->cpumask is always used. If * enabled and @node has online CPUs requested by @attrs, the returned @@ -3790,6 +3808,9 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) * * The caller is responsible for ensuring that the cpumask of @node stays * stable. + * + * Return: %true if the resulting @cpumask is different from @attrs->cpumask, + * %false if equal. */ static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, int cpu_going_down, cpumask_t *cpumask) @@ -3843,8 +3864,9 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, * items finish. Note that a work item which repeatedly requeues itself * back-to-back will stay on its current pwq. * - * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on - * failure. + * Performs GFP_KERNEL allocations. + * + * Return: 0 on success and -errno on failure. */ int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs) @@ -4312,6 +4334,8 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active); * * Determine whether %current is a workqueue rescuer. Can be used from * work functions to determine whether it's being run off the rescuer task. + * + * Return: %true if %current is a workqueue rescuer. %false otherwise. */ bool current_is_workqueue_rescuer(void) { @@ -4335,7 +4359,7 @@ bool current_is_workqueue_rescuer(void) * workqueue being congested on one CPU doesn't mean the workqueue is also * contested on other CPUs / NUMA nodes. * - * RETURNS: + * Return: * %true if congested, %false otherwise. */ bool workqueue_congested(int cpu, struct workqueue_struct *wq) @@ -4368,7 +4392,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested); * synchronization around this function and the test result is * unreliable and only useful as advisory hints or for debugging. * - * RETURNS: + * Return: * OR'd bitmask of WORK_BUSY_* bits. */ unsigned int work_busy(struct work_struct *work) @@ -4746,9 +4770,10 @@ static void work_for_cpu_fn(struct work_struct *work) * @fn: the function to run * @arg: the function arg * - * This will return the value @fn returns. * It is up to the caller to ensure that the cpu doesn't go offline. * The caller must not hold any locks which would prevent @fn from completing. + * + * Return: The value @fn returns. */ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) { @@ -4813,7 +4838,7 @@ void freeze_workqueues_begin(void) * CONTEXT: * Grabs and releases wq_pool_mutex. * - * RETURNS: + * Return: * %true if some freezable workqueues are still busy. %false if freezing * is complete. */ -- cgit v1.2.3 From b11895c45899daff094610f6cdbf7611d74ae2a6 Mon Sep 17 00:00:00 2001 From: Libin Date: Wed, 21 Aug 2013 08:50:39 +0800 Subject: workqueue: Comment correction in file header No functional change. There are two worker pools for each cpu in current implementation (one for normal work items and the other for high priority ones). tj: Whitespace adjustments. Signed-off-by: Libin Signed-off-by: Tejun Heo --- kernel/workqueue.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f02c4a4a0c3c..eebd9a66c044 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -16,9 +16,10 @@ * * This is the generic async execution mechanism. Work items as are * executed in process context. The worker pool is shared and - * automatically managed. There is one worker pool for each CPU and - * one extra for works which are better served by workers which are - * not bound to any specific CPU. + * automatically managed. There are two worker pools for each CPU (one for + * normal work items and the other for high priority ones) and some extra + * pools for workqueues which are not bound to any specific CPU - the + * number of these backing pools is dynamic. * * Please read Documentation/workqueue.txt for details. */ -- cgit v1.2.3 From 2d498db9814c6f3a79b708c8867c7ffcf7b5e2fc Mon Sep 17 00:00:00 2001 From: Libin Date: Wed, 21 Aug 2013 08:50:40 +0800 Subject: workqueue: Fix manage_workers() RETURNS description No functional change. The comment of function manage_workers() RETURNS description is obvious wrong, same as the CONTEXT. Fix it. Signed-off-by: Libin Signed-off-by: Tejun Heo --- kernel/workqueue.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index eebd9a66c044..10f655ec8de6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2034,8 +2034,11 @@ static bool maybe_destroy_workers(struct worker_pool *pool) * multiple times. Does GFP_KERNEL allocations. * * RETURNS: - * spin_lock_irq(pool->lock) which may be released and regrabbed - * multiple times. Does GFP_KERNEL allocations. + * %false if the pool don't need management and the caller can safely start + * processing works, %true indicates that the function released pool->lock + * and reacquired it to perform some management function and that the + * conditions that the caller verified while holding the lock before + * calling the function might no longer be true. */ static bool manage_workers(struct worker *worker) { -- cgit v1.2.3 From 1a6661dafd2528d03d0eaed898ad596816dfe738 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 23 Aug 2013 14:24:41 -0700 Subject: workqueue: convert bus code to use dev_groups The dev_attrs field of struct bus_type is going away soon, dev_groups should be used instead. This converts the workqueue bus code to use the correct field. Acked-by: Tejun Heo Signed-off-by: Greg Kroah-Hartman --- kernel/workqueue.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0b72e816b8d0..d1b5f0662651 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3081,25 +3081,26 @@ static struct workqueue_struct *dev_to_wq(struct device *dev) return wq_dev->wq; } -static ssize_t wq_per_cpu_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct workqueue_struct *wq = dev_to_wq(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); } +static DEVICE_ATTR_RO(per_cpu); -static ssize_t wq_max_active_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t max_active_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct workqueue_struct *wq = dev_to_wq(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); } -static ssize_t wq_max_active_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t max_active_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) { struct workqueue_struct *wq = dev_to_wq(dev); int val; @@ -3110,12 +3111,14 @@ static ssize_t wq_max_active_store(struct device *dev, workqueue_set_max_active(wq, val); return count; } +static DEVICE_ATTR_RW(max_active); -static struct device_attribute wq_sysfs_attrs[] = { - __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL), - __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store), - __ATTR_NULL, +static struct attribute *wq_sysfs_attrs[] = { + &dev_attr_per_cpu.attr, + &dev_attr_max_active.attr, + NULL, }; +ATTRIBUTE_GROUPS(wq_sysfs); static ssize_t wq_pool_ids_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -3265,7 +3268,7 @@ static struct device_attribute wq_sysfs_unbound_attrs[] = { static struct bus_type wq_subsys = { .name = "workqueue", - .dev_attrs = wq_sysfs_attrs, + .dev_groups = wq_sysfs_groups, }; static int __init wq_sysfs_init(void) -- cgit v1.2.3 From b22ce2785d97423846206cceec4efee0c4afd980 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 28 Aug 2013 17:33:37 -0400 Subject: workqueue: cond_resched() after processing each work item If !PREEMPT, a kworker running work items back to back can hog CPU. This becomes dangerous when a self-requeueing work item which is waiting for something to happen races against stop_machine. Such self-requeueing work item would requeue itself indefinitely hogging the kworker and CPU it's running on while stop_machine would wait for that CPU to enter stop_machine while preventing anything else from happening on all other CPUs. The two would deadlock. Jamie Liu reports that this deadlock scenario exists around scsi_requeue_run_queue() and libata port multiplier support, where one port may exclude command processing from other ports. With the right timing, scsi_requeue_run_queue() can end up requeueing itself trying to execute an IO which is asked to be retried while another device has an exclusive access, which in turn can't make forward progress due to stop_machine. Fix it by invoking cond_resched() after executing each work item. Signed-off-by: Tejun Heo Reported-by: Jamie Liu References: http://thread.gmane.org/gmane.linux.kernel/1552567 Cc: stable@vger.kernel.org -- kernel/workqueue.c | 9 +++++++++ 1 file changed, 9 insertions(+) --- kernel/workqueue.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7f5d4be22034..e93f7b9067d8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2201,6 +2201,15 @@ __acquires(&pool->lock) dump_stack(); } + /* + * The following prevents a kworker from hogging CPU on !PREEMPT + * kernels, where a requeueing work item waiting for something to + * happen could deadlock with stop_machine as such work item could + * indefinitely requeue itself while all other CPUs are trapped in + * stop_machine. + */ + cond_resched(); + spin_lock_irq(&pool->lock); /* clear cpu intensive status */ -- cgit v1.2.3