]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/workqueue.h
workqueue: misc/cosmetic updates
[mirror_ubuntu-artful-kernel.git] / include / linux / workqueue.h
1 /*
2 * workqueue.h --- work queue handling for Linux.
3 */
4
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
7
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <asm/atomic.h>
13
14 struct workqueue_struct;
15
16 struct work_struct;
17 typedef void (*work_func_t)(struct work_struct *work);
18
19 /*
20 * The first word is the work queue pointer and the flags rolled into
21 * one
22 */
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
24
25 struct work_struct {
26 atomic_long_t data;
27 #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
28 #define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */
29 #define WORK_STRUCT_FLAG_MASK (3UL)
30 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
31 struct list_head entry;
32 work_func_t func;
33 #ifdef CONFIG_LOCKDEP
34 struct lockdep_map lockdep_map;
35 #endif
36 };
37
38 #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
39 #define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2)
40
41 struct delayed_work {
42 struct work_struct work;
43 struct timer_list timer;
44 };
45
46 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
47 {
48 return container_of(work, struct delayed_work, work);
49 }
50
51 struct execute_work {
52 struct work_struct work;
53 };
54
55 #ifdef CONFIG_LOCKDEP
56 /*
57 * NB: because we have to copy the lockdep_map, setting _key
58 * here is required, otherwise it could get initialised to the
59 * copy of the lockdep_map!
60 */
61 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
62 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
63 #else
64 #define __WORK_INIT_LOCKDEP_MAP(n, k)
65 #endif
66
67 #define __WORK_INITIALIZER(n, f) { \
68 .data = WORK_DATA_STATIC_INIT(), \
69 .entry = { &(n).entry, &(n).entry }, \
70 .func = (f), \
71 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
72 }
73
74 #define __DELAYED_WORK_INITIALIZER(n, f) { \
75 .work = __WORK_INITIALIZER((n).work, (f)), \
76 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
77 }
78
79 #define DECLARE_WORK(n, f) \
80 struct work_struct n = __WORK_INITIALIZER(n, f)
81
82 #define DECLARE_DELAYED_WORK(n, f) \
83 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
84
85 /*
86 * initialize a work item's function pointer
87 */
88 #define PREPARE_WORK(_work, _func) \
89 do { \
90 (_work)->func = (_func); \
91 } while (0)
92
93 #define PREPARE_DELAYED_WORK(_work, _func) \
94 PREPARE_WORK(&(_work)->work, (_func))
95
96 #ifdef CONFIG_DEBUG_OBJECTS_WORK
97 extern void __init_work(struct work_struct *work, int onstack);
98 extern void destroy_work_on_stack(struct work_struct *work);
99 static inline unsigned int work_static(struct work_struct *work)
100 {
101 return *work_data_bits(work) & (1 << WORK_STRUCT_STATIC);
102 }
103 #else
104 static inline void __init_work(struct work_struct *work, int onstack) { }
105 static inline void destroy_work_on_stack(struct work_struct *work) { }
106 static inline unsigned int work_static(struct work_struct *work) { return 0; }
107 #endif
108
109 /*
110 * initialize all of a work item in one go
111 *
112 * NOTE! No point in using "atomic_long_set()": using a direct
113 * assignment of the work data initializer allows the compiler
114 * to generate better code.
115 */
116 #ifdef CONFIG_LOCKDEP
117 #define __INIT_WORK(_work, _func, _onstack) \
118 do { \
119 static struct lock_class_key __key; \
120 \
121 __init_work((_work), _onstack); \
122 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
123 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
124 INIT_LIST_HEAD(&(_work)->entry); \
125 PREPARE_WORK((_work), (_func)); \
126 } while (0)
127 #else
128 #define __INIT_WORK(_work, _func, _onstack) \
129 do { \
130 __init_work((_work), _onstack); \
131 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
132 INIT_LIST_HEAD(&(_work)->entry); \
133 PREPARE_WORK((_work), (_func)); \
134 } while (0)
135 #endif
136
137 #define INIT_WORK(_work, _func) \
138 do { \
139 __INIT_WORK((_work), (_func), 0); \
140 } while (0)
141
142 #define INIT_WORK_ON_STACK(_work, _func) \
143 do { \
144 __INIT_WORK((_work), (_func), 1); \
145 } while (0)
146
147 #define INIT_DELAYED_WORK(_work, _func) \
148 do { \
149 INIT_WORK(&(_work)->work, (_func)); \
150 init_timer(&(_work)->timer); \
151 } while (0)
152
153 #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
154 do { \
155 INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
156 init_timer_on_stack(&(_work)->timer); \
157 } while (0)
158
159 #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
160 do { \
161 INIT_WORK(&(_work)->work, (_func)); \
162 init_timer_deferrable(&(_work)->timer); \
163 } while (0)
164
165 /**
166 * work_pending - Find out whether a work item is currently pending
167 * @work: The work item in question
168 */
169 #define work_pending(work) \
170 test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
171
172 /**
173 * delayed_work_pending - Find out whether a delayable work item is currently
174 * pending
175 * @work: The work item in question
176 */
177 #define delayed_work_pending(w) \
178 work_pending(&(w)->work)
179
180 /**
181 * work_clear_pending - for internal use only, mark a work item as not pending
182 * @work: The work item in question
183 */
184 #define work_clear_pending(work) \
185 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
186
187
188 extern struct workqueue_struct *
189 __create_workqueue_key(const char *name, int singlethread, int freezeable,
190 struct lock_class_key *key, const char *lock_name);
191
192 #ifdef CONFIG_LOCKDEP
193 #define __create_workqueue(name, singlethread, freezeable) \
194 ({ \
195 static struct lock_class_key __key; \
196 const char *__lock_name; \
197 \
198 if (__builtin_constant_p(name)) \
199 __lock_name = (name); \
200 else \
201 __lock_name = #name; \
202 \
203 __create_workqueue_key((name), (singlethread), \
204 (freezeable), &__key, \
205 __lock_name); \
206 })
207 #else
208 #define __create_workqueue(name, singlethread, freezeable) \
209 __create_workqueue_key((name), (singlethread), (freezeable), \
210 NULL, NULL)
211 #endif
212
213 #define create_workqueue(name) __create_workqueue((name), 0, 0)
214 #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
215 #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
216
217 extern void destroy_workqueue(struct workqueue_struct *wq);
218
219 extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
220 extern int queue_work_on(int cpu, struct workqueue_struct *wq,
221 struct work_struct *work);
222 extern int queue_delayed_work(struct workqueue_struct *wq,
223 struct delayed_work *work, unsigned long delay);
224 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
225 struct delayed_work *work, unsigned long delay);
226
227 extern void flush_workqueue(struct workqueue_struct *wq);
228 extern void flush_scheduled_work(void);
229 extern void flush_delayed_work(struct delayed_work *work);
230
231 extern int schedule_work(struct work_struct *work);
232 extern int schedule_work_on(int cpu, struct work_struct *work);
233 extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
234 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
235 unsigned long delay);
236 extern int schedule_on_each_cpu(work_func_t func);
237 extern int current_is_keventd(void);
238 extern int keventd_up(void);
239
240 extern void init_workqueues(void);
241 int execute_in_process_context(work_func_t fn, struct execute_work *);
242
243 extern int flush_work(struct work_struct *work);
244
245 extern int cancel_work_sync(struct work_struct *work);
246
247 /*
248 * Kill off a pending schedule_delayed_work(). Note that the work callback
249 * function may still be running on return from cancel_delayed_work(), unless
250 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
251 * cancel_work_sync() to wait on it.
252 */
253 static inline int cancel_delayed_work(struct delayed_work *work)
254 {
255 int ret;
256
257 ret = del_timer_sync(&work->timer);
258 if (ret)
259 work_clear_pending(&work->work);
260 return ret;
261 }
262
263 /*
264 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
265 * if it returns 0 the timer function may be running and the queueing is in
266 * progress.
267 */
268 static inline int __cancel_delayed_work(struct delayed_work *work)
269 {
270 int ret;
271
272 ret = del_timer(&work->timer);
273 if (ret)
274 work_clear_pending(&work->work);
275 return ret;
276 }
277
278 extern int cancel_delayed_work_sync(struct delayed_work *work);
279
280 /* Obsolete. use cancel_delayed_work_sync() */
281 static inline
282 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
283 struct delayed_work *work)
284 {
285 cancel_delayed_work_sync(work);
286 }
287
288 /* Obsolete. use cancel_delayed_work_sync() */
289 static inline
290 void cancel_rearming_delayed_work(struct delayed_work *work)
291 {
292 cancel_delayed_work_sync(work);
293 }
294
295 #ifndef CONFIG_SMP
296 static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
297 {
298 return fn(arg);
299 }
300 #else
301 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
302 #endif /* CONFIG_SMP */
303 #endif