]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * workqueue.h --- work queue handling for Linux. | |
3 | */ | |
4 | ||
5 | #ifndef _LINUX_WORKQUEUE_H | |
6 | #define _LINUX_WORKQUEUE_H | |
7 | ||
8 | #include <linux/timer.h> | |
9 | #include <linux/linkage.h> | |
10 | #include <linux/bitops.h> | |
4e6045f1 | 11 | #include <linux/lockdep.h> |
7a22ad75 | 12 | #include <linux/threads.h> |
60063497 | 13 | #include <linux/atomic.h> |
7a4e344c | 14 | #include <linux/cpumask.h> |
1da177e4 LT |
15 | |
16 | struct workqueue_struct; | |
17 | ||
65f27f38 DH |
18 | struct work_struct; |
19 | typedef void (*work_func_t)(struct work_struct *work); | |
d8e794df | 20 | void delayed_work_timer_fn(unsigned long __data); |
6bb49e59 | 21 | |
a08727ba LT |
22 | /* |
23 | * The first word is the work queue pointer and the flags rolled into | |
24 | * one | |
25 | */ | |
26 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) | |
27 | ||
22df02bb TH |
28 | enum { |
29 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | |
8a2e8e5d | 30 | WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
112202d9 | 31 | WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ |
8a2e8e5d | 32 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ |
22df02bb | 33 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
8a2e8e5d TH |
34 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ |
35 | WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ | |
0f900049 | 36 | #else |
8a2e8e5d | 37 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
22df02bb TH |
38 | #endif |
39 | ||
73f53c4a TH |
40 | WORK_STRUCT_COLOR_BITS = 4, |
41 | ||
22df02bb | 42 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
8a2e8e5d | 43 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, |
112202d9 | 44 | WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, |
affee4b2 | 45 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
22df02bb TH |
46 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
47 | WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, | |
48 | #else | |
49 | WORK_STRUCT_STATIC = 0, | |
50 | #endif | |
51 | ||
73f53c4a TH |
52 | /* |
53 | * The last color is no color used for works which don't | |
54 | * participate in workqueue flushing. | |
55 | */ | |
56 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, | |
57 | WORK_NO_COLOR = WORK_NR_COLORS, | |
58 | ||
bdbc5dd7 | 59 | /* special cpu IDs */ |
f3421797 | 60 | WORK_CPU_UNBOUND = NR_CPUS, |
6be19588 | 61 | WORK_CPU_END = NR_CPUS + 1, |
bdbc5dd7 | 62 | |
73f53c4a | 63 | /* |
112202d9 TH |
64 | * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. |
65 | * This makes pwqs aligned to 256 bytes and allows 15 workqueue | |
66 | * flush colors. | |
73f53c4a TH |
67 | */ |
68 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | |
69 | WORK_STRUCT_COLOR_BITS, | |
70 | ||
112202d9 | 71 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ |
45d9550a | 72 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
bbb68dfa TH |
73 | |
74 | WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), | |
75 | ||
715b06b8 TH |
76 | /* |
77 | * When a work item is off queue, its high bits point to the last | |
7c3eed5c TH |
78 | * pool it was on. Cap at 31 bits and use the highest number to |
79 | * indicate that no pool is associated. | |
715b06b8 | 80 | */ |
bbb68dfa | 81 | WORK_OFFQ_FLAG_BITS = 1, |
7c3eed5c TH |
82 | WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, |
83 | WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, | |
84 | WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, | |
85 | WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, | |
b5490077 TH |
86 | |
87 | /* convenience constants */ | |
0f900049 | 88 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, |
22df02bb | 89 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, |
7c3eed5c | 90 | WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, |
dcd989cb TH |
91 | |
92 | /* bit mask for work_busy() return values */ | |
93 | WORK_BUSY_PENDING = 1 << 0, | |
94 | WORK_BUSY_RUNNING = 1 << 1, | |
22df02bb TH |
95 | }; |
96 | ||
1da177e4 | 97 | struct work_struct { |
a08727ba | 98 | atomic_long_t data; |
1da177e4 | 99 | struct list_head entry; |
6bb49e59 | 100 | work_func_t func; |
4e6045f1 JB |
101 | #ifdef CONFIG_LOCKDEP |
102 | struct lockdep_map lockdep_map; | |
103 | #endif | |
52bad64d DH |
104 | }; |
105 | ||
7c3eed5c | 106 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL) |
7a22ad75 | 107 | #define WORK_DATA_STATIC_INIT() \ |
7c3eed5c | 108 | ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC) |
a08727ba | 109 | |
52bad64d DH |
110 | struct delayed_work { |
111 | struct work_struct work; | |
1da177e4 | 112 | struct timer_list timer; |
60c057bc LJ |
113 | |
114 | /* target workqueue and CPU ->timer uses to queue ->work */ | |
115 | struct workqueue_struct *wq; | |
1265057f | 116 | int cpu; |
1da177e4 LT |
117 | }; |
118 | ||
7a4e344c TH |
119 | /* |
120 | * A struct for workqueue attributes. This can be used to change | |
121 | * attributes of an unbound workqueue. | |
122 | */ | |
123 | struct workqueue_attrs { | |
124 | int nice; /* nice level */ | |
125 | cpumask_var_t cpumask; /* allowed CPUs */ | |
126 | }; | |
127 | ||
bf6aede7 JD |
128 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
129 | { | |
130 | return container_of(work, struct delayed_work, work); | |
131 | } | |
132 | ||
1fa44eca JB |
133 | struct execute_work { |
134 | struct work_struct work; | |
135 | }; | |
136 | ||
4e6045f1 JB |
137 | #ifdef CONFIG_LOCKDEP |
138 | /* | |
139 | * NB: because we have to copy the lockdep_map, setting _key | |
140 | * here is required, otherwise it could get initialised to the | |
141 | * copy of the lockdep_map! | |
142 | */ | |
143 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ | |
144 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), | |
145 | #else | |
146 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | |
147 | #endif | |
148 | ||
ee64e7f6 TH |
149 | #define __WORK_INITIALIZER(n, f) { \ |
150 | .data = WORK_DATA_STATIC_INIT(), \ | |
151 | .entry = { &(n).entry, &(n).entry }, \ | |
152 | .func = (f), \ | |
153 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | |
65f27f38 DH |
154 | } |
155 | ||
f991b318 | 156 | #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ |
ee64e7f6 | 157 | .work = __WORK_INITIALIZER((n).work, (f)), \ |
f991b318 | 158 | .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \ |
e0aecdd8 TH |
159 | 0, (unsigned long)&(n), \ |
160 | (tflags) | TIMER_IRQSAFE), \ | |
dd6414b5 PC |
161 | } |
162 | ||
ee64e7f6 | 163 | #define DECLARE_WORK(n, f) \ |
65f27f38 DH |
164 | struct work_struct n = __WORK_INITIALIZER(n, f) |
165 | ||
ee64e7f6 | 166 | #define DECLARE_DELAYED_WORK(n, f) \ |
f991b318 | 167 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) |
65f27f38 | 168 | |
203b42f7 | 169 | #define DECLARE_DEFERRABLE_WORK(n, f) \ |
f991b318 | 170 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) |
dd6414b5 | 171 | |
1da177e4 | 172 | /* |
65f27f38 | 173 | * initialize a work item's function pointer |
1da177e4 | 174 | */ |
ee64e7f6 TH |
175 | #define PREPARE_WORK(_work, _func) \ |
176 | do { \ | |
177 | (_work)->func = (_func); \ | |
1da177e4 LT |
178 | } while (0) |
179 | ||
ee64e7f6 | 180 | #define PREPARE_DELAYED_WORK(_work, _func) \ |
65f27f38 | 181 | PREPARE_WORK(&(_work)->work, (_func)) |
52bad64d | 182 | |
dc186ad7 TG |
183 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
184 | extern void __init_work(struct work_struct *work, int onstack); | |
185 | extern void destroy_work_on_stack(struct work_struct *work); | |
4690c4ab TH |
186 | static inline unsigned int work_static(struct work_struct *work) |
187 | { | |
22df02bb | 188 | return *work_data_bits(work) & WORK_STRUCT_STATIC; |
4690c4ab | 189 | } |
dc186ad7 TG |
190 | #else |
191 | static inline void __init_work(struct work_struct *work, int onstack) { } | |
192 | static inline void destroy_work_on_stack(struct work_struct *work) { } | |
4690c4ab | 193 | static inline unsigned int work_static(struct work_struct *work) { return 0; } |
dc186ad7 TG |
194 | #endif |
195 | ||
1da177e4 | 196 | /* |
52bad64d | 197 | * initialize all of a work item in one go |
a08727ba | 198 | * |
b9049df5 | 199 | * NOTE! No point in using "atomic_long_set()": using a direct |
a08727ba LT |
200 | * assignment of the work data initializer allows the compiler |
201 | * to generate better code. | |
1da177e4 | 202 | */ |
4e6045f1 | 203 | #ifdef CONFIG_LOCKDEP |
dc186ad7 | 204 | #define __INIT_WORK(_work, _func, _onstack) \ |
65f27f38 | 205 | do { \ |
4e6045f1 JB |
206 | static struct lock_class_key __key; \ |
207 | \ | |
dc186ad7 | 208 | __init_work((_work), _onstack); \ |
23b2e599 | 209 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
ee64e7f6 | 210 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ |
65f27f38 DH |
211 | INIT_LIST_HEAD(&(_work)->entry); \ |
212 | PREPARE_WORK((_work), (_func)); \ | |
213 | } while (0) | |
4e6045f1 | 214 | #else |
dc186ad7 | 215 | #define __INIT_WORK(_work, _func, _onstack) \ |
4e6045f1 | 216 | do { \ |
dc186ad7 | 217 | __init_work((_work), _onstack); \ |
4e6045f1 JB |
218 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
219 | INIT_LIST_HEAD(&(_work)->entry); \ | |
220 | PREPARE_WORK((_work), (_func)); \ | |
221 | } while (0) | |
222 | #endif | |
65f27f38 | 223 | |
ee64e7f6 TH |
224 | #define INIT_WORK(_work, _func) \ |
225 | do { \ | |
226 | __INIT_WORK((_work), (_func), 0); \ | |
dc186ad7 TG |
227 | } while (0) |
228 | ||
ee64e7f6 TH |
229 | #define INIT_WORK_ONSTACK(_work, _func) \ |
230 | do { \ | |
231 | __INIT_WORK((_work), (_func), 1); \ | |
dc186ad7 TG |
232 | } while (0) |
233 | ||
f991b318 | 234 | #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ |
ee64e7f6 TH |
235 | do { \ |
236 | INIT_WORK(&(_work)->work, (_func)); \ | |
f991b318 | 237 | __setup_timer(&(_work)->timer, delayed_work_timer_fn, \ |
e0aecdd8 TH |
238 | (unsigned long)(_work), \ |
239 | (_tflags) | TIMER_IRQSAFE); \ | |
52bad64d DH |
240 | } while (0) |
241 | ||
f991b318 | 242 | #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ |
ee64e7f6 TH |
243 | do { \ |
244 | INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ | |
f991b318 TH |
245 | __setup_timer_on_stack(&(_work)->timer, \ |
246 | delayed_work_timer_fn, \ | |
247 | (unsigned long)(_work), \ | |
e0aecdd8 | 248 | (_tflags) | TIMER_IRQSAFE); \ |
6d612b0f PZ |
249 | } while (0) |
250 | ||
f991b318 TH |
251 | #define INIT_DELAYED_WORK(_work, _func) \ |
252 | __INIT_DELAYED_WORK(_work, _func, 0) | |
253 | ||
254 | #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ | |
255 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) | |
256 | ||
203b42f7 | 257 | #define INIT_DEFERRABLE_WORK(_work, _func) \ |
f991b318 TH |
258 | __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) |
259 | ||
260 | #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ | |
261 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) | |
28287033 | 262 | |
365970a1 DH |
263 | /** |
264 | * work_pending - Find out whether a work item is currently pending | |
265 | * @work: The work item in question | |
266 | */ | |
267 | #define work_pending(work) \ | |
22df02bb | 268 | test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
365970a1 DH |
269 | |
270 | /** | |
271 | * delayed_work_pending - Find out whether a delayable work item is currently | |
272 | * pending | |
273 | * @work: The work item in question | |
274 | */ | |
0221872a LT |
275 | #define delayed_work_pending(w) \ |
276 | work_pending(&(w)->work) | |
365970a1 | 277 | |
65f27f38 | 278 | /** |
23b2e599 ON |
279 | * work_clear_pending - for internal use only, mark a work item as not pending |
280 | * @work: The work item in question | |
65f27f38 | 281 | */ |
23b2e599 | 282 | #define work_clear_pending(work) \ |
22df02bb | 283 | clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
65f27f38 | 284 | |
c54fce6e TH |
285 | /* |
286 | * Workqueue flags and constants. For details, please refer to | |
287 | * Documentation/workqueue.txt. | |
288 | */ | |
97e37d7b | 289 | enum { |
bdbc5dd7 | 290 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ |
c7fc77f7 | 291 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
58a69cb4 | 292 | WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
6370a6ad | 293 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
649027d7 | 294 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
fb0e7beb | 295 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
226223ab | 296 | WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ |
b71ab8c2 | 297 | |
618b01eb | 298 | __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ |
8719dcea | 299 | __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ |
e41e704b | 300 | |
b71ab8c2 | 301 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
f3421797 | 302 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
b71ab8c2 | 303 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
97e37d7b | 304 | }; |
52bad64d | 305 | |
f3421797 TH |
306 | /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ |
307 | #define WQ_UNBOUND_MAX_ACTIVE \ | |
308 | max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) | |
65f27f38 | 309 | |
d320c038 TH |
310 | /* |
311 | * System-wide workqueues which are always present. | |
312 | * | |
313 | * system_wq is the one used by schedule[_delayed]_work[_on](). | |
314 | * Multi-CPU multi-threaded. There are users which expect relatively | |
315 | * short queue flush time. Don't queue works which can run for too | |
316 | * long. | |
317 | * | |
318 | * system_long_wq is similar to system_wq but may host long running | |
319 | * works. Queue flushing might take relatively long. | |
320 | * | |
f3421797 TH |
321 | * system_unbound_wq is unbound workqueue. Workers are not bound to |
322 | * any specific CPU, not concurrency managed, and all queued works are | |
323 | * executed immediately as long as max_active limit is not reached and | |
324 | * resources are available. | |
4149efb2 | 325 | * |
24d51add TH |
326 | * system_freezable_wq is equivalent to system_wq except that it's |
327 | * freezable. | |
d320c038 TH |
328 | */ |
329 | extern struct workqueue_struct *system_wq; | |
330 | extern struct workqueue_struct *system_long_wq; | |
f3421797 | 331 | extern struct workqueue_struct *system_unbound_wq; |
24d51add | 332 | extern struct workqueue_struct *system_freezable_wq; |
ae930e0f | 333 | |
3b07e9ca | 334 | static inline struct workqueue_struct * __deprecated __system_nrt_wq(void) |
ae930e0f TH |
335 | { |
336 | return system_wq; | |
337 | } | |
338 | ||
3b07e9ca | 339 | static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void) |
ae930e0f TH |
340 | { |
341 | return system_freezable_wq; | |
342 | } | |
343 | ||
344 | /* equivlalent to system_wq and system_freezable_wq, deprecated */ | |
345 | #define system_nrt_wq __system_nrt_wq() | |
346 | #define system_nrt_freezable_wq __system_nrt_freezable_wq() | |
52bad64d | 347 | |
4e6045f1 | 348 | extern struct workqueue_struct * |
b196be89 TH |
349 | __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, |
350 | struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); | |
4e6045f1 | 351 | |
b196be89 TH |
352 | /** |
353 | * alloc_workqueue - allocate a workqueue | |
354 | * @fmt: printf format for the name of the workqueue | |
355 | * @flags: WQ_* flags | |
356 | * @max_active: max in-flight work items, 0 for default | |
357 | * @args: args for @fmt | |
358 | * | |
359 | * Allocate a workqueue with the specified parameters. For detailed | |
360 | * information on WQ_* flags, please refer to Documentation/workqueue.txt. | |
361 | * | |
362 | * The __lock_name macro dance is to guarantee that single lock_class_key | |
363 | * doesn't end up with different namesm, which isn't allowed by lockdep. | |
364 | * | |
365 | * RETURNS: | |
366 | * Pointer to the allocated workqueue on success, %NULL on failure. | |
367 | */ | |
4e6045f1 | 368 | #ifdef CONFIG_LOCKDEP |
ee64e7f6 TH |
369 | #define alloc_workqueue(fmt, flags, max_active, args...) \ |
370 | ({ \ | |
371 | static struct lock_class_key __key; \ | |
372 | const char *__lock_name; \ | |
373 | \ | |
374 | if (__builtin_constant_p(fmt)) \ | |
375 | __lock_name = (fmt); \ | |
376 | else \ | |
377 | __lock_name = #fmt; \ | |
378 | \ | |
379 | __alloc_workqueue_key((fmt), (flags), (max_active), \ | |
380 | &__key, __lock_name, ##args); \ | |
4e6045f1 JB |
381 | }) |
382 | #else | |
ee64e7f6 TH |
383 | #define alloc_workqueue(fmt, flags, max_active, args...) \ |
384 | __alloc_workqueue_key((fmt), (flags), (max_active), \ | |
b196be89 | 385 | NULL, NULL, ##args) |
4e6045f1 JB |
386 | #endif |
387 | ||
81dcaf65 TH |
388 | /** |
389 | * alloc_ordered_workqueue - allocate an ordered workqueue | |
b196be89 | 390 | * @fmt: printf format for the name of the workqueue |
58a69cb4 | 391 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
b196be89 | 392 | * @args: args for @fmt |
81dcaf65 TH |
393 | * |
394 | * Allocate an ordered workqueue. An ordered workqueue executes at | |
395 | * most one work item at any given time in the queued order. They are | |
396 | * implemented as unbound workqueues with @max_active of one. | |
397 | * | |
398 | * RETURNS: | |
399 | * Pointer to the allocated workqueue on success, %NULL on failure. | |
400 | */ | |
ee64e7f6 | 401 | #define alloc_ordered_workqueue(fmt, flags, args...) \ |
8719dcea | 402 | alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) |
81dcaf65 | 403 | |
ee64e7f6 | 404 | #define create_workqueue(name) \ |
6370a6ad | 405 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) |
ee64e7f6 | 406 | #define create_freezable_workqueue(name) \ |
58a69cb4 | 407 | alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
ee64e7f6 | 408 | #define create_singlethread_workqueue(name) \ |
6370a6ad | 409 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
1da177e4 LT |
410 | |
411 | extern void destroy_workqueue(struct workqueue_struct *wq); | |
412 | ||
7a4e344c TH |
413 | struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); |
414 | void free_workqueue_attrs(struct workqueue_attrs *attrs); | |
9e8cd2f5 TH |
415 | int apply_workqueue_attrs(struct workqueue_struct *wq, |
416 | const struct workqueue_attrs *attrs); | |
7a4e344c | 417 | |
d4283e93 | 418 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
c1a220e7 | 419 | struct work_struct *work); |
d4283e93 | 420 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
28e53bdd | 421 | struct delayed_work *work, unsigned long delay); |
8376fe22 TH |
422 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, |
423 | struct delayed_work *dwork, unsigned long delay); | |
28e53bdd | 424 | |
b3c97528 | 425 | extern void flush_workqueue(struct workqueue_struct *wq); |
9c5a2ba7 | 426 | extern void drain_workqueue(struct workqueue_struct *wq); |
28e53bdd | 427 | extern void flush_scheduled_work(void); |
1da177e4 | 428 | |
65f27f38 | 429 | extern int schedule_on_each_cpu(work_func_t func); |
1da177e4 | 430 | |
65f27f38 | 431 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
1da177e4 | 432 | |
401a8d04 TH |
433 | extern bool flush_work(struct work_struct *work); |
434 | extern bool cancel_work_sync(struct work_struct *work); | |
435 | ||
436 | extern bool flush_delayed_work(struct delayed_work *dwork); | |
57b30ae7 | 437 | extern bool cancel_delayed_work(struct delayed_work *dwork); |
401a8d04 | 438 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
28e53bdd | 439 | |
dcd989cb TH |
440 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
441 | int max_active); | |
e6267616 | 442 | extern bool current_is_workqueue_rescuer(void); |
d84ff051 | 443 | extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); |
dcd989cb TH |
444 | extern unsigned int work_busy(struct work_struct *work); |
445 | ||
8425e3d5 TH |
446 | /** |
447 | * queue_work - queue work on a workqueue | |
448 | * @wq: workqueue to use | |
449 | * @work: work to queue | |
450 | * | |
451 | * Returns %false if @work was already on a queue, %true otherwise. | |
452 | * | |
453 | * We queue the work to the CPU on which it was submitted, but if the CPU dies | |
454 | * it can be processed by another CPU. | |
455 | */ | |
456 | static inline bool queue_work(struct workqueue_struct *wq, | |
457 | struct work_struct *work) | |
458 | { | |
459 | return queue_work_on(WORK_CPU_UNBOUND, wq, work); | |
460 | } | |
461 | ||
462 | /** | |
463 | * queue_delayed_work - queue work on a workqueue after delay | |
464 | * @wq: workqueue to use | |
465 | * @dwork: delayable work to queue | |
466 | * @delay: number of jiffies to wait before queueing | |
467 | * | |
468 | * Equivalent to queue_delayed_work_on() but tries to use the local CPU. | |
469 | */ | |
470 | static inline bool queue_delayed_work(struct workqueue_struct *wq, | |
471 | struct delayed_work *dwork, | |
472 | unsigned long delay) | |
473 | { | |
474 | return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); | |
475 | } | |
476 | ||
477 | /** | |
478 | * mod_delayed_work - modify delay of or queue a delayed work | |
479 | * @wq: workqueue to use | |
480 | * @dwork: work to queue | |
481 | * @delay: number of jiffies to wait before queueing | |
482 | * | |
483 | * mod_delayed_work_on() on local CPU. | |
484 | */ | |
485 | static inline bool mod_delayed_work(struct workqueue_struct *wq, | |
486 | struct delayed_work *dwork, | |
487 | unsigned long delay) | |
488 | { | |
489 | return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); | |
490 | } | |
491 | ||
492 | /** | |
493 | * schedule_work_on - put work task on a specific cpu | |
494 | * @cpu: cpu to put the work task on | |
495 | * @work: job to be done | |
496 | * | |
497 | * This puts a job on a specific cpu | |
498 | */ | |
499 | static inline bool schedule_work_on(int cpu, struct work_struct *work) | |
500 | { | |
501 | return queue_work_on(cpu, system_wq, work); | |
502 | } | |
503 | ||
504 | /** | |
505 | * schedule_work - put work task in global workqueue | |
506 | * @work: job to be done | |
507 | * | |
508 | * Returns %false if @work was already on the kernel-global workqueue and | |
509 | * %true otherwise. | |
510 | * | |
511 | * This puts a job in the kernel-global workqueue if it was not already | |
512 | * queued and leaves it in the same position on the kernel-global | |
513 | * workqueue otherwise. | |
514 | */ | |
515 | static inline bool schedule_work(struct work_struct *work) | |
516 | { | |
517 | return queue_work(system_wq, work); | |
518 | } | |
519 | ||
520 | /** | |
521 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | |
522 | * @cpu: cpu to use | |
523 | * @dwork: job to be done | |
524 | * @delay: number of jiffies to wait | |
525 | * | |
526 | * After waiting for a given time this puts a job in the kernel-global | |
527 | * workqueue on the specified CPU. | |
528 | */ | |
529 | static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, | |
530 | unsigned long delay) | |
531 | { | |
532 | return queue_delayed_work_on(cpu, system_wq, dwork, delay); | |
533 | } | |
534 | ||
535 | /** | |
536 | * schedule_delayed_work - put work task in global workqueue after delay | |
537 | * @dwork: job to be done | |
538 | * @delay: number of jiffies to wait or 0 for immediate execution | |
539 | * | |
540 | * After waiting for a given time this puts a job in the kernel-global | |
541 | * workqueue. | |
542 | */ | |
543 | static inline bool schedule_delayed_work(struct delayed_work *dwork, | |
544 | unsigned long delay) | |
545 | { | |
546 | return queue_delayed_work(system_wq, dwork, delay); | |
547 | } | |
548 | ||
549 | /** | |
550 | * keventd_up - is workqueue initialized yet? | |
551 | */ | |
552 | static inline bool keventd_up(void) | |
553 | { | |
554 | return system_wq != NULL; | |
555 | } | |
556 | ||
4e49627b ON |
557 | /* |
558 | * Like above, but uses del_timer() instead of del_timer_sync(). This means, | |
559 | * if it returns 0 the timer function may be running and the queueing is in | |
560 | * progress. | |
561 | */ | |
136b5721 | 562 | static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work) |
4e49627b | 563 | { |
401a8d04 | 564 | bool ret; |
4e49627b ON |
565 | |
566 | ret = del_timer(&work->timer); | |
567 | if (ret) | |
568 | work_clear_pending(&work->work); | |
569 | return ret; | |
570 | } | |
571 | ||
606a5020 | 572 | /* used to be different but now identical to flush_work(), deprecated */ |
43829731 | 573 | static inline bool __deprecated flush_work_sync(struct work_struct *work) |
606a5020 TH |
574 | { |
575 | return flush_work(work); | |
576 | } | |
577 | ||
578 | /* used to be different but now identical to flush_delayed_work(), deprecated */ | |
43829731 | 579 | static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork) |
606a5020 TH |
580 | { |
581 | return flush_delayed_work(dwork); | |
582 | } | |
583 | ||
2d3854a3 | 584 | #ifndef CONFIG_SMP |
d84ff051 | 585 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
2d3854a3 RR |
586 | { |
587 | return fn(arg); | |
588 | } | |
589 | #else | |
d84ff051 | 590 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg); |
2d3854a3 | 591 | #endif /* CONFIG_SMP */ |
a25909a4 | 592 | |
a0a1a5fd TH |
593 | #ifdef CONFIG_FREEZER |
594 | extern void freeze_workqueues_begin(void); | |
595 | extern bool freeze_workqueues_busy(void); | |
596 | extern void thaw_workqueues(void); | |
597 | #endif /* CONFIG_FREEZER */ | |
598 | ||
226223ab TH |
599 | #ifdef CONFIG_SYSFS |
600 | int workqueue_sysfs_register(struct workqueue_struct *wq); | |
601 | #else /* CONFIG_SYSFS */ | |
602 | static inline int workqueue_sysfs_register(struct workqueue_struct *wq) | |
603 | { return 0; } | |
604 | #endif /* CONFIG_SYSFS */ | |
605 | ||
1da177e4 | 606 | #endif |