summaryrefslogtreecommitdiff
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h45
1 files changed, 6 insertions, 39 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 594521ba0d43..1b22c42e9c2d 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -177,20 +177,10 @@ struct execute_work {
#define DECLARE_DEFERRABLE_WORK(n, f) \
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
-/*
- * initialize a work item's function pointer
- */
-#define PREPARE_WORK(_work, _func) \
- do { \
- (_work)->func = (_func); \
- } while (0)
-
-#define PREPARE_DELAYED_WORK(_work, _func) \
- PREPARE_WORK(&(_work)->work, (_func))
-
#ifdef CONFIG_DEBUG_OBJECTS_WORK
extern void __init_work(struct work_struct *work, int onstack);
extern void destroy_work_on_stack(struct work_struct *work);
+extern void destroy_delayed_work_on_stack(struct delayed_work *work);
static inline unsigned int work_static(struct work_struct *work)
{
return *work_data_bits(work) & WORK_STRUCT_STATIC;
@@ -198,6 +188,7 @@ static inline unsigned int work_static(struct work_struct *work)
#else
static inline void __init_work(struct work_struct *work, int onstack) { }
static inline void destroy_work_on_stack(struct work_struct *work) { }
+static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
static inline unsigned int work_static(struct work_struct *work) { return 0; }
#endif
@@ -217,7 +208,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
INIT_LIST_HEAD(&(_work)->entry); \
- PREPARE_WORK((_work), (_func)); \
+ (_work)->func = (_func); \
} while (0)
#else
#define __INIT_WORK(_work, _func, _onstack) \
@@ -225,7 +216,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
INIT_LIST_HEAD(&(_work)->entry); \
- PREPARE_WORK((_work), (_func)); \
+ (_work)->func = (_func); \
} while (0)
#endif
@@ -295,17 +286,11 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
* Documentation/workqueue.txt.
*/
enum {
- /*
- * All wqs are now non-reentrant making the following flag
- * meaningless. Will be removed.
- */
- WQ_NON_REENTRANT = 1 << 0, /* DEPRECATED */
-
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
- WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
+ WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
/*
@@ -419,10 +404,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
static struct lock_class_key __key; \
const char *__lock_name; \
\
- if (__builtin_constant_p(fmt)) \
- __lock_name = (fmt); \
- else \
- __lock_name = #fmt; \
+ __lock_name = #fmt#args; \
\
__alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \
@@ -605,21 +587,6 @@ static inline bool keventd_up(void)
return system_wq != NULL;
}
-/*
- * Like above, but uses del_timer() instead of del_timer_sync(). This means,
- * if it returns 0 the timer function may be running and the queueing is in
- * progress.
- */
-static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
-{
- bool ret;
-
- ret = del_timer(&work->timer);
- if (ret)
- work_clear_pending(&work->work);
- return ret;
-}
-
/* used to be different but now identical to flush_work(), deprecated */
static inline bool __deprecated flush_work_sync(struct work_struct *work)
{