diff options
-rw-r--r-- | include/linux/workqueue.h | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 46 |
2 files changed, 48 insertions, 0 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 14d47120682b..5c158c477ac7 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -201,6 +201,8 @@ extern int keventd_up(void); extern void init_workqueues(void); int execute_in_process_context(work_func_t fn, struct execute_work *); +extern int flush_work(struct work_struct *work); + extern int cancel_work_sync(struct work_struct *work); /* diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d9a2d65cc63e..ee41cf857d55 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -423,6 +423,52 @@ void flush_workqueue(struct workqueue_struct *wq) } EXPORT_SYMBOL_GPL(flush_workqueue); +/** + * flush_work - block until a work_struct's callback has terminated + * @work: the work which is to be flushed + * + * It is expected that, prior to calling flush_work(), the caller has + * arranged for the work to not be requeued, otherwise it doesn't make + * sense to use this function. + */ +int flush_work(struct work_struct *work) +{ + struct cpu_workqueue_struct *cwq; + struct list_head *prev; + struct wq_barrier barr; + + might_sleep(); + cwq = get_wq_data(work); + if (!cwq) + return 0; + + prev = NULL; + spin_lock_irq(&cwq->lock); + if (!list_empty(&work->entry)) { + /* + * See the comment near try_to_grab_pending()->smp_rmb(). + * If it was re-queued under us we are not going to wait. + */ + smp_rmb(); + if (unlikely(cwq != get_wq_data(work))) + goto out; + prev = &work->entry; + } else { + if (cwq->current_work != work) + goto out; + prev = &cwq->worklist; + } + insert_wq_barrier(cwq, &barr, prev->next); +out: + spin_unlock_irq(&cwq->lock); + if (!prev) + return 0; + + wait_for_completion(&barr.done); + return 1; +} +EXPORT_SYMBOL_GPL(flush_work); + /* * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, * so this work can't be re-armed in any way. |