]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * workqueue.h --- work queue handling for Linux. | |
4 | */ | |
5 | ||
6 | #ifndef _LINUX_WORKQUEUE_H | |
7 | #define _LINUX_WORKQUEUE_H | |
8 | ||
9 | #include <linux/timer.h> | |
10 | #include <linux/linkage.h> | |
11 | #include <linux/bitops.h> | |
4e6045f1 | 12 | #include <linux/lockdep.h> |
7a22ad75 | 13 | #include <linux/threads.h> |
60063497 | 14 | #include <linux/atomic.h> |
7a4e344c | 15 | #include <linux/cpumask.h> |
05f0fe6b | 16 | #include <linux/rcupdate.h> |
1da177e4 LT |
17 | |
18 | struct workqueue_struct; | |
19 | ||
65f27f38 DH |
20 | struct work_struct; |
21 | typedef void (*work_func_t)(struct work_struct *work); | |
8c20feb6 | 22 | void delayed_work_timer_fn(struct timer_list *t); |
6bb49e59 | 23 | |
a08727ba LT |
24 | /* |
25 | * The first word is the work queue pointer and the flags rolled into | |
26 | * one | |
27 | */ | |
28 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) | |
29 | ||
22df02bb TH |
30 | enum { |
31 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | |
8a2e8e5d | 32 | WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
112202d9 | 33 | WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ |
8a2e8e5d | 34 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ |
22df02bb | 35 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
8a2e8e5d TH |
36 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ |
37 | WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ | |
0f900049 | 38 | #else |
8a2e8e5d | 39 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
22df02bb TH |
40 | #endif |
41 | ||
73f53c4a TH |
42 | WORK_STRUCT_COLOR_BITS = 4, |
43 | ||
22df02bb | 44 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
8a2e8e5d | 45 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, |
112202d9 | 46 | WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, |
affee4b2 | 47 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
22df02bb TH |
48 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
49 | WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, | |
50 | #else | |
51 | WORK_STRUCT_STATIC = 0, | |
52 | #endif | |
53 | ||
73f53c4a TH |
54 | /* |
55 | * The last color is no color used for works which don't | |
56 | * participate in workqueue flushing. | |
57 | */ | |
58 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, | |
59 | WORK_NO_COLOR = WORK_NR_COLORS, | |
60 | ||
79bc251f | 61 | /* not bound to any CPU, prefer the local CPU */ |
f3421797 | 62 | WORK_CPU_UNBOUND = NR_CPUS, |
bdbc5dd7 | 63 | |
73f53c4a | 64 | /* |
112202d9 TH |
65 | * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. |
66 | * This makes pwqs aligned to 256 bytes and allows 15 workqueue | |
67 | * flush colors. | |
73f53c4a TH |
68 | */ |
69 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | |
70 | WORK_STRUCT_COLOR_BITS, | |
71 | ||
112202d9 | 72 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ |
45d9550a | 73 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
bbb68dfa | 74 | |
8603e1b3 TH |
75 | __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, |
76 | WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), | |
bbb68dfa | 77 | |
715b06b8 TH |
78 | /* |
79 | * When a work item is off queue, its high bits point to the last | |
7c3eed5c TH |
80 | * pool it was on. Cap at 31 bits and use the highest number to |
81 | * indicate that no pool is associated. | |
715b06b8 | 82 | */ |
bbb68dfa | 83 | WORK_OFFQ_FLAG_BITS = 1, |
7c3eed5c TH |
84 | WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, |
85 | WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, | |
86 | WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, | |
87 | WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, | |
b5490077 TH |
88 | |
89 | /* convenience constants */ | |
0f900049 | 90 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, |
22df02bb | 91 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, |
7c3eed5c | 92 | WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, |
dcd989cb TH |
93 | |
94 | /* bit mask for work_busy() return values */ | |
95 | WORK_BUSY_PENDING = 1 << 0, | |
96 | WORK_BUSY_RUNNING = 1 << 1, | |
3d1cb205 TH |
97 | |
98 | /* maximum string length for set_worker_desc() */ | |
99 | WORKER_DESC_LEN = 24, | |
22df02bb TH |
100 | }; |
101 | ||
1da177e4 | 102 | struct work_struct { |
a08727ba | 103 | atomic_long_t data; |
1da177e4 | 104 | struct list_head entry; |
6bb49e59 | 105 | work_func_t func; |
4e6045f1 JB |
106 | #ifdef CONFIG_LOCKDEP |
107 | struct lockdep_map lockdep_map; | |
108 | #endif | |
52bad64d DH |
109 | }; |
110 | ||
a45463cb | 111 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) |
7a22ad75 | 112 | #define WORK_DATA_STATIC_INIT() \ |
a45463cb | 113 | ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) |
a08727ba | 114 | |
52bad64d DH |
115 | struct delayed_work { |
116 | struct work_struct work; | |
1da177e4 | 117 | struct timer_list timer; |
60c057bc LJ |
118 | |
119 | /* target workqueue and CPU ->timer uses to queue ->work */ | |
120 | struct workqueue_struct *wq; | |
1265057f | 121 | int cpu; |
1da177e4 LT |
122 | }; |
123 | ||
05f0fe6b TH |
124 | struct rcu_work { |
125 | struct work_struct work; | |
126 | struct rcu_head rcu; | |
127 | ||
128 | /* target workqueue ->rcu uses to queue ->work */ | |
129 | struct workqueue_struct *wq; | |
130 | }; | |
131 | ||
42412c3a SF |
132 | /** |
133 | * struct workqueue_attrs - A struct for workqueue attributes. | |
d55262c4 | 134 | * |
42412c3a | 135 | * This can be used to change attributes of an unbound workqueue. |
7a4e344c TH |
136 | */ |
137 | struct workqueue_attrs { | |
42412c3a SF |
138 | /** |
139 | * @nice: nice level | |
140 | */ | |
141 | int nice; | |
142 | ||
143 | /** | |
144 | * @cpumask: allowed CPUs | |
145 | */ | |
146 | cpumask_var_t cpumask; | |
147 | ||
148 | /** | |
149 | * @no_numa: disable NUMA affinity | |
150 | * | |
151 | * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It | |
152 | * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus | |
153 | * doesn't participate in pool hash calculations or equality comparisons. | |
154 | */ | |
155 | bool no_numa; | |
7a4e344c TH |
156 | }; |
157 | ||
bf6aede7 JD |
158 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
159 | { | |
160 | return container_of(work, struct delayed_work, work); | |
161 | } | |
162 | ||
05f0fe6b TH |
163 | static inline struct rcu_work *to_rcu_work(struct work_struct *work) |
164 | { | |
165 | return container_of(work, struct rcu_work, work); | |
166 | } | |
167 | ||
1fa44eca JB |
168 | struct execute_work { |
169 | struct work_struct work; | |
170 | }; | |
171 | ||
4e6045f1 JB |
172 | #ifdef CONFIG_LOCKDEP |
173 | /* | |
174 | * NB: because we have to copy the lockdep_map, setting _key | |
175 | * here is required, otherwise it could get initialised to the | |
176 | * copy of the lockdep_map! | |
177 | */ | |
178 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ | |
179 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), | |
180 | #else | |
181 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | |
182 | #endif | |
183 | ||
ee64e7f6 TH |
184 | #define __WORK_INITIALIZER(n, f) { \ |
185 | .data = WORK_DATA_STATIC_INIT(), \ | |
186 | .entry = { &(n).entry, &(n).entry }, \ | |
187 | .func = (f), \ | |
188 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | |
65f27f38 DH |
189 | } |
190 | ||
f991b318 | 191 | #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ |
ee64e7f6 | 192 | .work = __WORK_INITIALIZER((n).work, (f)), \ |
841b86f3 | 193 | .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ |
e0aecdd8 | 194 | (tflags) | TIMER_IRQSAFE), \ |
dd6414b5 PC |
195 | } |
196 | ||
ee64e7f6 | 197 | #define DECLARE_WORK(n, f) \ |
65f27f38 DH |
198 | struct work_struct n = __WORK_INITIALIZER(n, f) |
199 | ||
ee64e7f6 | 200 | #define DECLARE_DELAYED_WORK(n, f) \ |
f991b318 | 201 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) |
65f27f38 | 202 | |
203b42f7 | 203 | #define DECLARE_DEFERRABLE_WORK(n, f) \ |
f991b318 | 204 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) |
dd6414b5 | 205 | |
dc186ad7 TG |
206 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
207 | extern void __init_work(struct work_struct *work, int onstack); | |
208 | extern void destroy_work_on_stack(struct work_struct *work); | |
ea2e64f2 | 209 | extern void destroy_delayed_work_on_stack(struct delayed_work *work); |
4690c4ab TH |
210 | static inline unsigned int work_static(struct work_struct *work) |
211 | { | |
22df02bb | 212 | return *work_data_bits(work) & WORK_STRUCT_STATIC; |
4690c4ab | 213 | } |
dc186ad7 TG |
214 | #else |
215 | static inline void __init_work(struct work_struct *work, int onstack) { } | |
216 | static inline void destroy_work_on_stack(struct work_struct *work) { } | |
ea2e64f2 | 217 | static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } |
4690c4ab | 218 | static inline unsigned int work_static(struct work_struct *work) { return 0; } |
dc186ad7 TG |
219 | #endif |
220 | ||
1da177e4 | 221 | /* |
52bad64d | 222 | * initialize all of a work item in one go |
a08727ba | 223 | * |
b9049df5 | 224 | * NOTE! No point in using "atomic_long_set()": using a direct |
a08727ba LT |
225 | * assignment of the work data initializer allows the compiler |
226 | * to generate better code. | |
1da177e4 | 227 | */ |
4e6045f1 | 228 | #ifdef CONFIG_LOCKDEP |
dc186ad7 | 229 | #define __INIT_WORK(_work, _func, _onstack) \ |
65f27f38 | 230 | do { \ |
4e6045f1 JB |
231 | static struct lock_class_key __key; \ |
232 | \ | |
dc186ad7 | 233 | __init_work((_work), _onstack); \ |
23b2e599 | 234 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
fd1a5b04 | 235 | lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \ |
65f27f38 | 236 | INIT_LIST_HEAD(&(_work)->entry); \ |
f073f922 | 237 | (_work)->func = (_func); \ |
65f27f38 | 238 | } while (0) |
4e6045f1 | 239 | #else |
dc186ad7 | 240 | #define __INIT_WORK(_work, _func, _onstack) \ |
4e6045f1 | 241 | do { \ |
dc186ad7 | 242 | __init_work((_work), _onstack); \ |
4e6045f1 JB |
243 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
244 | INIT_LIST_HEAD(&(_work)->entry); \ | |
f073f922 | 245 | (_work)->func = (_func); \ |
4e6045f1 JB |
246 | } while (0) |
247 | #endif | |
65f27f38 | 248 | |
ee64e7f6 | 249 | #define INIT_WORK(_work, _func) \ |
9da7dae9 | 250 | __INIT_WORK((_work), (_func), 0) |
dc186ad7 | 251 | |
ee64e7f6 | 252 | #define INIT_WORK_ONSTACK(_work, _func) \ |
9da7dae9 | 253 | __INIT_WORK((_work), (_func), 1) |
dc186ad7 | 254 | |
f991b318 | 255 | #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ |
ee64e7f6 TH |
256 | do { \ |
257 | INIT_WORK(&(_work)->work, (_func)); \ | |
919b250f KC |
258 | __init_timer(&(_work)->timer, \ |
259 | delayed_work_timer_fn, \ | |
260 | (_tflags) | TIMER_IRQSAFE); \ | |
52bad64d DH |
261 | } while (0) |
262 | ||
f991b318 | 263 | #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ |
ee64e7f6 TH |
264 | do { \ |
265 | INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ | |
919b250f KC |
266 | __init_timer_on_stack(&(_work)->timer, \ |
267 | delayed_work_timer_fn, \ | |
268 | (_tflags) | TIMER_IRQSAFE); \ | |
6d612b0f PZ |
269 | } while (0) |
270 | ||
f991b318 TH |
271 | #define INIT_DELAYED_WORK(_work, _func) \ |
272 | __INIT_DELAYED_WORK(_work, _func, 0) | |
273 | ||
274 | #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ | |
275 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) | |
276 | ||
203b42f7 | 277 | #define INIT_DEFERRABLE_WORK(_work, _func) \ |
f991b318 TH |
278 | __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) |
279 | ||
280 | #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ | |
281 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) | |
28287033 | 282 | |
05f0fe6b TH |
283 | #define INIT_RCU_WORK(_work, _func) \ |
284 | INIT_WORK(&(_work)->work, (_func)) | |
285 | ||
286 | #define INIT_RCU_WORK_ONSTACK(_work, _func) \ | |
287 | INIT_WORK_ONSTACK(&(_work)->work, (_func)) | |
288 | ||
365970a1 DH |
289 | /** |
290 | * work_pending - Find out whether a work item is currently pending | |
291 | * @work: The work item in question | |
292 | */ | |
293 | #define work_pending(work) \ | |
22df02bb | 294 | test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
365970a1 DH |
295 | |
296 | /** | |
297 | * delayed_work_pending - Find out whether a delayable work item is currently | |
298 | * pending | |
355c0663 | 299 | * @w: The work item in question |
365970a1 | 300 | */ |
0221872a LT |
301 | #define delayed_work_pending(w) \ |
302 | work_pending(&(w)->work) | |
365970a1 | 303 | |
c54fce6e TH |
304 | /* |
305 | * Workqueue flags and constants. For details, please refer to | |
42412c3a | 306 | * Documentation/core-api/workqueue.rst. |
c54fce6e | 307 | */ |
97e37d7b | 308 | enum { |
c7fc77f7 | 309 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
58a69cb4 | 310 | WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
6370a6ad | 311 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
649027d7 | 312 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
41f50094 | 313 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ |
226223ab | 314 | WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ |
b71ab8c2 | 315 | |
cee22a15 VK |
316 | /* |
317 | * Per-cpu workqueues are generally preferred because they tend to | |
318 | * show better performance thanks to cache locality. Per-cpu | |
319 | * workqueues exclude the scheduler from choosing the CPU to | |
320 | * execute the worker threads, which has an unfortunate side effect | |
321 | * of increasing power consumption. | |
322 | * | |
323 | * The scheduler considers a CPU idle if it doesn't have any task | |
324 | * to execute and tries to keep idle cores idle to conserve power; | |
325 | * however, for example, a per-cpu work item scheduled from an | |
326 | * interrupt handler on an idle CPU will force the scheduler to | |
327 | * excute the work item on that CPU breaking the idleness, which in | |
328 | * turn may lead to more scheduling choices which are sub-optimal | |
329 | * in terms of power consumption. | |
330 | * | |
331 | * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default | |
332 | * but become unbound if workqueue.power_efficient kernel param is | |
333 | * specified. Per-cpu workqueues which are identified to | |
334 | * contribute significantly to power-consumption are identified and | |
335 | * marked with this flag and enabling the power_efficient mode | |
336 | * leads to noticeable power saving at the cost of small | |
337 | * performance disadvantage. | |
338 | * | |
339 | * http://thread.gmane.org/gmane.linux.kernel/1480396 | |
340 | */ | |
341 | WQ_POWER_EFFICIENT = 1 << 7, | |
342 | ||
618b01eb | 343 | __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ |
8719dcea | 344 | __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ |
23d11a58 | 345 | __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ |
fbf1c41f | 346 | __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ |
e41e704b | 347 | |
b71ab8c2 | 348 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
f3421797 | 349 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
b71ab8c2 | 350 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
97e37d7b | 351 | }; |
52bad64d | 352 | |
f3421797 TH |
353 | /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ |
354 | #define WQ_UNBOUND_MAX_ACTIVE \ | |
355 | max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) | |
65f27f38 | 356 | |
d320c038 TH |
357 | /* |
358 | * System-wide workqueues which are always present. | |
359 | * | |
360 | * system_wq is the one used by schedule[_delayed]_work[_on](). | |
361 | * Multi-CPU multi-threaded. There are users which expect relatively | |
362 | * short queue flush time. Don't queue works which can run for too | |
363 | * long. | |
364 | * | |
73e43544 LJ |
365 | * system_highpri_wq is similar to system_wq but for work items which |
366 | * require WQ_HIGHPRI. | |
367 | * | |
d320c038 TH |
368 | * system_long_wq is similar to system_wq but may host long running |
369 | * works. Queue flushing might take relatively long. | |
370 | * | |
f3421797 TH |
371 | * system_unbound_wq is unbound workqueue. Workers are not bound to |
372 | * any specific CPU, not concurrency managed, and all queued works are | |
373 | * executed immediately as long as max_active limit is not reached and | |
374 | * resources are available. | |
4149efb2 | 375 | * |
24d51add TH |
376 | * system_freezable_wq is equivalent to system_wq except that it's |
377 | * freezable. | |
0668106c VK |
378 | * |
379 | * *_power_efficient_wq are inclined towards saving power and converted | |
380 | * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, | |
381 | * they are same as their non-power-efficient counterparts - e.g. | |
382 | * system_power_efficient_wq is identical to system_wq if | |
383 | * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. | |
d320c038 TH |
384 | */ |
385 | extern struct workqueue_struct *system_wq; | |
73e43544 | 386 | extern struct workqueue_struct *system_highpri_wq; |
d320c038 | 387 | extern struct workqueue_struct *system_long_wq; |
f3421797 | 388 | extern struct workqueue_struct *system_unbound_wq; |
24d51add | 389 | extern struct workqueue_struct *system_freezable_wq; |
0668106c VK |
390 | extern struct workqueue_struct *system_power_efficient_wq; |
391 | extern struct workqueue_struct *system_freezable_power_efficient_wq; | |
ae930e0f | 392 | |
b196be89 TH |
393 | /** |
394 | * alloc_workqueue - allocate a workqueue | |
395 | * @fmt: printf format for the name of the workqueue | |
396 | * @flags: WQ_* flags | |
397 | * @max_active: max in-flight work items, 0 for default | |
669de8bd | 398 | * remaining args: args for @fmt |
b196be89 TH |
399 | * |
400 | * Allocate a workqueue with the specified parameters. For detailed | |
42412c3a SF |
401 | * information on WQ_* flags, please refer to |
402 | * Documentation/core-api/workqueue.rst. | |
b196be89 | 403 | * |
b196be89 TH |
404 | * RETURNS: |
405 | * Pointer to the allocated workqueue on success, %NULL on failure. | |
406 | */ | |
669de8bd BVA |
407 | struct workqueue_struct *alloc_workqueue(const char *fmt, |
408 | unsigned int flags, | |
409 | int max_active, ...); | |
4e6045f1 | 410 | |
81dcaf65 TH |
411 | /** |
412 | * alloc_ordered_workqueue - allocate an ordered workqueue | |
b196be89 | 413 | * @fmt: printf format for the name of the workqueue |
58a69cb4 | 414 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
355c0663 | 415 | * @args...: args for @fmt |
81dcaf65 TH |
416 | * |
417 | * Allocate an ordered workqueue. An ordered workqueue executes at | |
418 | * most one work item at any given time in the queued order. They are | |
419 | * implemented as unbound workqueues with @max_active of one. | |
420 | * | |
421 | * RETURNS: | |
422 | * Pointer to the allocated workqueue on success, %NULL on failure. | |
423 | */ | |
ee64e7f6 | 424 | #define alloc_ordered_workqueue(fmt, flags, args...) \ |
0a94efb5 TH |
425 | alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ |
426 | __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) | |
81dcaf65 | 427 | |
ee64e7f6 | 428 | #define create_workqueue(name) \ |
23d11a58 | 429 | alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) |
ee64e7f6 | 430 | #define create_freezable_workqueue(name) \ |
23d11a58 TH |
431 | alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ |
432 | WQ_MEM_RECLAIM, 1, (name)) | |
ee64e7f6 | 433 | #define create_singlethread_workqueue(name) \ |
23d11a58 | 434 | alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) |
1da177e4 LT |
435 | |
436 | extern void destroy_workqueue(struct workqueue_struct *wq); | |
437 | ||
513c98d0 DJ |
438 | struct workqueue_attrs *alloc_workqueue_attrs(void); |
439 | void free_workqueue_attrs(struct workqueue_attrs *attrs); | |
440 | int apply_workqueue_attrs(struct workqueue_struct *wq, | |
441 | const struct workqueue_attrs *attrs); | |
042f7df1 | 442 | int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); |
7a4e344c | 443 | |
d4283e93 | 444 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
c1a220e7 | 445 | struct work_struct *work); |
8204e0c1 AD |
446 | extern bool queue_work_node(int node, struct workqueue_struct *wq, |
447 | struct work_struct *work); | |
d4283e93 | 448 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
28e53bdd | 449 | struct delayed_work *work, unsigned long delay); |
8376fe22 TH |
450 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, |
451 | struct delayed_work *dwork, unsigned long delay); | |
05f0fe6b | 452 | extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); |
28e53bdd | 453 | |
b3c97528 | 454 | extern void flush_workqueue(struct workqueue_struct *wq); |
9c5a2ba7 | 455 | extern void drain_workqueue(struct workqueue_struct *wq); |
1da177e4 | 456 | |
65f27f38 | 457 | extern int schedule_on_each_cpu(work_func_t func); |
1da177e4 | 458 | |
65f27f38 | 459 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
1da177e4 | 460 | |
401a8d04 TH |
461 | extern bool flush_work(struct work_struct *work); |
462 | extern bool cancel_work_sync(struct work_struct *work); | |
463 | ||
464 | extern bool flush_delayed_work(struct delayed_work *dwork); | |
57b30ae7 | 465 | extern bool cancel_delayed_work(struct delayed_work *dwork); |
401a8d04 | 466 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
28e53bdd | 467 | |
05f0fe6b TH |
468 | extern bool flush_rcu_work(struct rcu_work *rwork); |
469 | ||
dcd989cb TH |
470 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
471 | int max_active); | |
27d4ee03 | 472 | extern struct work_struct *current_work(void); |
e6267616 | 473 | extern bool current_is_workqueue_rescuer(void); |
d84ff051 | 474 | extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); |
dcd989cb | 475 | extern unsigned int work_busy(struct work_struct *work); |
3d1cb205 TH |
476 | extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); |
477 | extern void print_worker_info(const char *log_lvl, struct task_struct *task); | |
3494fc30 | 478 | extern void show_workqueue_state(void); |
6b59808b | 479 | extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); |
dcd989cb | 480 | |
8425e3d5 TH |
481 | /** |
482 | * queue_work - queue work on a workqueue | |
483 | * @wq: workqueue to use | |
484 | * @work: work to queue | |
485 | * | |
486 | * Returns %false if @work was already on a queue, %true otherwise. | |
487 | * | |
488 | * We queue the work to the CPU on which it was submitted, but if the CPU dies | |
489 | * it can be processed by another CPU. | |
dbb92f88 AP |
490 | * |
491 | * Memory-ordering properties: If it returns %true, guarantees that all stores | |
492 | * preceding the call to queue_work() in the program order will be visible from | |
493 | * the CPU which will execute @work by the time such work executes, e.g., | |
494 | * | |
495 | * { x is initially 0 } | |
496 | * | |
497 | * CPU0 CPU1 | |
498 | * | |
499 | * WRITE_ONCE(x, 1); [ @work is being executed ] | |
500 | * r0 = queue_work(wq, work); r1 = READ_ONCE(x); | |
501 | * | |
502 | * Forbids: r0 == true && r1 == 0 | |
8425e3d5 TH |
503 | */ |
504 | static inline bool queue_work(struct workqueue_struct *wq, | |
505 | struct work_struct *work) | |
506 | { | |
507 | return queue_work_on(WORK_CPU_UNBOUND, wq, work); | |
508 | } | |
509 | ||
510 | /** | |
511 | * queue_delayed_work - queue work on a workqueue after delay | |
512 | * @wq: workqueue to use | |
513 | * @dwork: delayable work to queue | |
514 | * @delay: number of jiffies to wait before queueing | |
515 | * | |
516 | * Equivalent to queue_delayed_work_on() but tries to use the local CPU. | |
517 | */ | |
518 | static inline bool queue_delayed_work(struct workqueue_struct *wq, | |
519 | struct delayed_work *dwork, | |
520 | unsigned long delay) | |
521 | { | |
522 | return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); | |
523 | } | |
524 | ||
525 | /** | |
526 | * mod_delayed_work - modify delay of or queue a delayed work | |
527 | * @wq: workqueue to use | |
528 | * @dwork: work to queue | |
529 | * @delay: number of jiffies to wait before queueing | |
530 | * | |
531 | * mod_delayed_work_on() on local CPU. | |
532 | */ | |
533 | static inline bool mod_delayed_work(struct workqueue_struct *wq, | |
534 | struct delayed_work *dwork, | |
535 | unsigned long delay) | |
536 | { | |
537 | return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); | |
538 | } | |
539 | ||
540 | /** | |
541 | * schedule_work_on - put work task on a specific cpu | |
542 | * @cpu: cpu to put the work task on | |
543 | * @work: job to be done | |
544 | * | |
545 | * This puts a job on a specific cpu | |
546 | */ | |
547 | static inline bool schedule_work_on(int cpu, struct work_struct *work) | |
548 | { | |
549 | return queue_work_on(cpu, system_wq, work); | |
550 | } | |
551 | ||
552 | /** | |
553 | * schedule_work - put work task in global workqueue | |
554 | * @work: job to be done | |
555 | * | |
556 | * Returns %false if @work was already on the kernel-global workqueue and | |
557 | * %true otherwise. | |
558 | * | |
559 | * This puts a job in the kernel-global workqueue if it was not already | |
560 | * queued and leaves it in the same position on the kernel-global | |
561 | * workqueue otherwise. | |
dbb92f88 AP |
562 | * |
563 | * Shares the same memory-ordering properties of queue_work(), cf. the | |
564 | * DocBook header of queue_work(). | |
8425e3d5 TH |
565 | */ |
566 | static inline bool schedule_work(struct work_struct *work) | |
567 | { | |
568 | return queue_work(system_wq, work); | |
569 | } | |
570 | ||
37b1ef31 LJ |
571 | /** |
572 | * flush_scheduled_work - ensure that any scheduled work has run to completion. | |
573 | * | |
574 | * Forces execution of the kernel-global workqueue and blocks until its | |
575 | * completion. | |
576 | * | |
577 | * Think twice before calling this function! It's very easy to get into | |
578 | * trouble if you don't take great care. Either of the following situations | |
579 | * will lead to deadlock: | |
580 | * | |
581 | * One of the work items currently on the workqueue needs to acquire | |
582 | * a lock held by your code or its caller. | |
583 | * | |
584 | * Your code is running in the context of a work routine. | |
585 | * | |
586 | * They will be detected by lockdep when they occur, but the first might not | |
587 | * occur very often. It depends on what work items are on the workqueue and | |
588 | * what locks they need, which you have no control over. | |
589 | * | |
590 | * In most situations flushing the entire workqueue is overkill; you merely | |
591 | * need to know that a particular work item isn't queued and isn't running. | |
592 | * In such cases you should use cancel_delayed_work_sync() or | |
593 | * cancel_work_sync() instead. | |
594 | */ | |
595 | static inline void flush_scheduled_work(void) | |
596 | { | |
597 | flush_workqueue(system_wq); | |
598 | } | |
599 | ||
8425e3d5 TH |
600 | /** |
601 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | |
602 | * @cpu: cpu to use | |
603 | * @dwork: job to be done | |
604 | * @delay: number of jiffies to wait | |
605 | * | |
606 | * After waiting for a given time this puts a job in the kernel-global | |
607 | * workqueue on the specified CPU. | |
608 | */ | |
609 | static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, | |
610 | unsigned long delay) | |
611 | { | |
612 | return queue_delayed_work_on(cpu, system_wq, dwork, delay); | |
613 | } | |
614 | ||
615 | /** | |
616 | * schedule_delayed_work - put work task in global workqueue after delay | |
617 | * @dwork: job to be done | |
618 | * @delay: number of jiffies to wait or 0 for immediate execution | |
619 | * | |
620 | * After waiting for a given time this puts a job in the kernel-global | |
621 | * workqueue. | |
622 | */ | |
623 | static inline bool schedule_delayed_work(struct delayed_work *dwork, | |
624 | unsigned long delay) | |
625 | { | |
626 | return queue_delayed_work(system_wq, dwork, delay); | |
627 | } | |
628 | ||
2d3854a3 | 629 | #ifndef CONFIG_SMP |
d84ff051 | 630 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
2d3854a3 RR |
631 | { |
632 | return fn(arg); | |
633 | } | |
0e8d6a93 TG |
634 | static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) |
635 | { | |
636 | return fn(arg); | |
637 | } | |
2d3854a3 | 638 | #else |
d84ff051 | 639 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg); |
0e8d6a93 | 640 | long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg); |
2d3854a3 | 641 | #endif /* CONFIG_SMP */ |
a25909a4 | 642 | |
a0a1a5fd TH |
643 | #ifdef CONFIG_FREEZER |
644 | extern void freeze_workqueues_begin(void); | |
645 | extern bool freeze_workqueues_busy(void); | |
646 | extern void thaw_workqueues(void); | |
647 | #endif /* CONFIG_FREEZER */ | |
648 | ||
226223ab TH |
649 | #ifdef CONFIG_SYSFS |
650 | int workqueue_sysfs_register(struct workqueue_struct *wq); | |
651 | #else /* CONFIG_SYSFS */ | |
652 | static inline int workqueue_sysfs_register(struct workqueue_struct *wq) | |
653 | { return 0; } | |
654 | #endif /* CONFIG_SYSFS */ | |
655 | ||
82607adc TH |
656 | #ifdef CONFIG_WQ_WATCHDOG |
657 | void wq_watchdog_touch(int cpu); | |
658 | #else /* CONFIG_WQ_WATCHDOG */ | |
659 | static inline void wq_watchdog_touch(int cpu) { } | |
660 | #endif /* CONFIG_WQ_WATCHDOG */ | |
661 | ||
7ee681b2 TG |
662 | #ifdef CONFIG_SMP |
663 | int workqueue_prepare_cpu(unsigned int cpu); | |
664 | int workqueue_online_cpu(unsigned int cpu); | |
665 | int workqueue_offline_cpu(unsigned int cpu); | |
666 | #endif | |
667 | ||
2333e829 YC |
668 | void __init workqueue_init_early(void); |
669 | void __init workqueue_init(void); | |
3347fa09 | 670 | |
1da177e4 | 671 | #endif |