]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/workqueue.c
powerpc/iommu/powernv: Get rid of set_iommu_table_base_and_group
[mirror_ubuntu-zesty-kernel.git] / kernel / workqueue.c
1 /*
2 * kernel/workqueue.c - generic async execution with shared worker pool
3 *
4 * Copyright (C) 2002 Ingo Molnar
5 *
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
11 *
12 * Made to use alloc_percpu by Christoph Lameter.
13 *
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
16 *
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There are two worker pools for each CPU (one for
20 * normal work items and the other for high priority ones) and some extra
21 * pools for workqueues which are not bound to any specific CPU - the
22 * number of these backing pools is dynamic.
23 *
24 * Please read Documentation/workqueue.txt for details.
25 */
26
27 #include <linux/export.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/init.h>
31 #include <linux/signal.h>
32 #include <linux/completion.h>
33 #include <linux/workqueue.h>
34 #include <linux/slab.h>
35 #include <linux/cpu.h>
36 #include <linux/notifier.h>
37 #include <linux/kthread.h>
38 #include <linux/hardirq.h>
39 #include <linux/mempolicy.h>
40 #include <linux/freezer.h>
41 #include <linux/kallsyms.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51
52 #include "workqueue_internal.h"
53
54 enum {
55 /*
56 * worker_pool flags
57 *
58 * A bound pool is either associated or disassociated with its CPU.
59 * While associated (!DISASSOCIATED), all workers are bound to the
60 * CPU and none has %WORKER_UNBOUND set and concurrency management
61 * is in effect.
62 *
63 * While DISASSOCIATED, the cpu may be offline and all workers have
64 * %WORKER_UNBOUND set and concurrency management disabled, and may
65 * be executing on any CPU. The pool behaves as an unbound one.
66 *
67 * Note that DISASSOCIATED should be flipped only while holding
68 * attach_mutex to avoid changing binding state while
69 * worker_attach_to_pool() is in progress.
70 */
71 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
72
73 /* worker flags */
74 WORKER_DIE = 1 << 1, /* die die die */
75 WORKER_IDLE = 1 << 2, /* is idle */
76 WORKER_PREP = 1 << 3, /* preparing to run works */
77 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
78 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
79 WORKER_REBOUND = 1 << 8, /* worker was rebound */
80
81 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
82 WORKER_UNBOUND | WORKER_REBOUND,
83
84 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
85
86 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
87 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
88
89 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
90 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
91
92 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
93 /* call for help after 10ms
94 (min two ticks) */
95 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
96 CREATE_COOLDOWN = HZ, /* time to breath after fail */
97
98 /*
99 * Rescue workers are used only on emergencies and shared by
100 * all cpus. Give MIN_NICE.
101 */
102 RESCUER_NICE_LEVEL = MIN_NICE,
103 HIGHPRI_NICE_LEVEL = MIN_NICE,
104
105 WQ_NAME_LEN = 24,
106 };
107
108 /*
109 * Structure fields follow one of the following exclusion rules.
110 *
111 * I: Modifiable by initialization/destruction paths and read-only for
112 * everyone else.
113 *
114 * P: Preemption protected. Disabling preemption is enough and should
115 * only be modified and accessed from the local cpu.
116 *
117 * L: pool->lock protected. Access with pool->lock held.
118 *
119 * X: During normal operation, modification requires pool->lock and should
120 * be done only from local cpu. Either disabling preemption on local
121 * cpu or grabbing pool->lock is enough for read access. If
122 * POOL_DISASSOCIATED is set, it's identical to L.
123 *
124 * A: pool->attach_mutex protected.
125 *
126 * PL: wq_pool_mutex protected.
127 *
128 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
129 *
130 * WQ: wq->mutex protected.
131 *
132 * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
133 *
134 * MD: wq_mayday_lock protected.
135 */
136
137 /* struct worker is defined in workqueue_internal.h */
138
139 struct worker_pool {
140 spinlock_t lock; /* the pool lock */
141 int cpu; /* I: the associated cpu */
142 int node; /* I: the associated node ID */
143 int id; /* I: pool ID */
144 unsigned int flags; /* X: flags */
145
146 struct list_head worklist; /* L: list of pending works */
147 int nr_workers; /* L: total number of workers */
148
149 /* nr_idle includes the ones off idle_list for rebinding */
150 int nr_idle; /* L: currently idle ones */
151
152 struct list_head idle_list; /* X: list of idle workers */
153 struct timer_list idle_timer; /* L: worker idle timeout */
154 struct timer_list mayday_timer; /* L: SOS timer for workers */
155
156 /* a workers is either on busy_hash or idle_list, or the manager */
157 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
158 /* L: hash of busy workers */
159
160 /* see manage_workers() for details on the two manager mutexes */
161 struct mutex manager_arb; /* manager arbitration */
162 struct worker *manager; /* L: purely informational */
163 struct mutex attach_mutex; /* attach/detach exclusion */
164 struct list_head workers; /* A: attached workers */
165 struct completion *detach_completion; /* all workers detached */
166
167 struct ida worker_ida; /* worker IDs for task name */
168
169 struct workqueue_attrs *attrs; /* I: worker attributes */
170 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
171 int refcnt; /* PL: refcnt for unbound pools */
172
173 /*
174 * The current concurrency level. As it's likely to be accessed
175 * from other CPUs during try_to_wake_up(), put it in a separate
176 * cacheline.
177 */
178 atomic_t nr_running ____cacheline_aligned_in_smp;
179
180 /*
181 * Destruction of pool is sched-RCU protected to allow dereferences
182 * from get_work_pool().
183 */
184 struct rcu_head rcu;
185 } ____cacheline_aligned_in_smp;
186
187 /*
188 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
189 * of work_struct->data are used for flags and the remaining high bits
190 * point to the pwq; thus, pwqs need to be aligned at two's power of the
191 * number of flag bits.
192 */
193 struct pool_workqueue {
194 struct worker_pool *pool; /* I: the associated pool */
195 struct workqueue_struct *wq; /* I: the owning workqueue */
196 int work_color; /* L: current color */
197 int flush_color; /* L: flushing color */
198 int refcnt; /* L: reference count */
199 int nr_in_flight[WORK_NR_COLORS];
200 /* L: nr of in_flight works */
201 int nr_active; /* L: nr of active works */
202 int max_active; /* L: max active works */
203 struct list_head delayed_works; /* L: delayed works */
204 struct list_head pwqs_node; /* WR: node on wq->pwqs */
205 struct list_head mayday_node; /* MD: node on wq->maydays */
206
207 /*
208 * Release of unbound pwq is punted to system_wq. See put_pwq()
209 * and pwq_unbound_release_workfn() for details. pool_workqueue
210 * itself is also sched-RCU protected so that the first pwq can be
211 * determined without grabbing wq->mutex.
212 */
213 struct work_struct unbound_release_work;
214 struct rcu_head rcu;
215 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
216
217 /*
218 * Structure used to wait for workqueue flush.
219 */
220 struct wq_flusher {
221 struct list_head list; /* WQ: list of flushers */
222 int flush_color; /* WQ: flush color waiting for */
223 struct completion done; /* flush completion */
224 };
225
226 struct wq_device;
227
228 /*
229 * The externally visible workqueue. It relays the issued work items to
230 * the appropriate worker_pool through its pool_workqueues.
231 */
232 struct workqueue_struct {
233 struct list_head pwqs; /* WR: all pwqs of this wq */
234 struct list_head list; /* PR: list of all workqueues */
235
236 struct mutex mutex; /* protects this wq */
237 int work_color; /* WQ: current work color */
238 int flush_color; /* WQ: current flush color */
239 atomic_t nr_pwqs_to_flush; /* flush in progress */
240 struct wq_flusher *first_flusher; /* WQ: first flusher */
241 struct list_head flusher_queue; /* WQ: flush waiters */
242 struct list_head flusher_overflow; /* WQ: flush overflow list */
243
244 struct list_head maydays; /* MD: pwqs requesting rescue */
245 struct worker *rescuer; /* I: rescue worker */
246
247 int nr_drainers; /* WQ: drain in progress */
248 int saved_max_active; /* WQ: saved pwq max_active */
249
250 struct workqueue_attrs *unbound_attrs; /* WQ: only for unbound wqs */
251 struct pool_workqueue *dfl_pwq; /* WQ: only for unbound wqs */
252
253 #ifdef CONFIG_SYSFS
254 struct wq_device *wq_dev; /* I: for sysfs interface */
255 #endif
256 #ifdef CONFIG_LOCKDEP
257 struct lockdep_map lockdep_map;
258 #endif
259 char name[WQ_NAME_LEN]; /* I: workqueue name */
260
261 /*
262 * Destruction of workqueue_struct is sched-RCU protected to allow
263 * walking the workqueues list without grabbing wq_pool_mutex.
264 * This is used to dump all workqueues from sysrq.
265 */
266 struct rcu_head rcu;
267
268 /* hot fields used during command issue, aligned to cacheline */
269 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
270 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
271 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */
272 };
273
274 static struct kmem_cache *pwq_cache;
275
276 static cpumask_var_t *wq_numa_possible_cpumask;
277 /* possible CPUs of each node */
278
279 static bool wq_disable_numa;
280 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
281
282 /* see the comment above the definition of WQ_POWER_EFFICIENT */
283 #ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
284 static bool wq_power_efficient = true;
285 #else
286 static bool wq_power_efficient;
287 #endif
288
289 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
290
291 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
292
293 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
294 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
295
296 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
297 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
298
299 static LIST_HEAD(workqueues); /* PR: list of all workqueues */
300 static bool workqueue_freezing; /* PL: have wqs started freezing? */
301
302 /* the per-cpu worker pools */
303 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
304 cpu_worker_pools);
305
306 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
307
308 /* PL: hash of all unbound pools keyed by pool->attrs */
309 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
310
311 /* I: attributes used when instantiating standard unbound pools on demand */
312 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
313
314 /* I: attributes used when instantiating ordered pools on demand */
315 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
316
317 struct workqueue_struct *system_wq __read_mostly;
318 EXPORT_SYMBOL(system_wq);
319 struct workqueue_struct *system_highpri_wq __read_mostly;
320 EXPORT_SYMBOL_GPL(system_highpri_wq);
321 struct workqueue_struct *system_long_wq __read_mostly;
322 EXPORT_SYMBOL_GPL(system_long_wq);
323 struct workqueue_struct *system_unbound_wq __read_mostly;
324 EXPORT_SYMBOL_GPL(system_unbound_wq);
325 struct workqueue_struct *system_freezable_wq __read_mostly;
326 EXPORT_SYMBOL_GPL(system_freezable_wq);
327 struct workqueue_struct *system_power_efficient_wq __read_mostly;
328 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
329 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
330 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
331
332 static int worker_thread(void *__worker);
333 static void copy_workqueue_attrs(struct workqueue_attrs *to,
334 const struct workqueue_attrs *from);
335 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
336
337 #define CREATE_TRACE_POINTS
338 #include <trace/events/workqueue.h>
339
340 #define assert_rcu_or_pool_mutex() \
341 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
342 lockdep_is_held(&wq_pool_mutex), \
343 "sched RCU or wq_pool_mutex should be held")
344
345 #define assert_rcu_or_wq_mutex(wq) \
346 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
347 lockdep_is_held(&wq->mutex), \
348 "sched RCU or wq->mutex should be held")
349
350 #define for_each_cpu_worker_pool(pool, cpu) \
351 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
352 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
353 (pool)++)
354
355 /**
356 * for_each_pool - iterate through all worker_pools in the system
357 * @pool: iteration cursor
358 * @pi: integer used for iteration
359 *
360 * This must be called either with wq_pool_mutex held or sched RCU read
361 * locked. If the pool needs to be used beyond the locking in effect, the
362 * caller is responsible for guaranteeing that the pool stays online.
363 *
364 * The if/else clause exists only for the lockdep assertion and can be
365 * ignored.
366 */
367 #define for_each_pool(pool, pi) \
368 idr_for_each_entry(&worker_pool_idr, pool, pi) \
369 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
370 else
371
372 /**
373 * for_each_pool_worker - iterate through all workers of a worker_pool
374 * @worker: iteration cursor
375 * @pool: worker_pool to iterate workers of
376 *
377 * This must be called with @pool->attach_mutex.
378 *
379 * The if/else clause exists only for the lockdep assertion and can be
380 * ignored.
381 */
382 #define for_each_pool_worker(worker, pool) \
383 list_for_each_entry((worker), &(pool)->workers, node) \
384 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
385 else
386
387 /**
388 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
389 * @pwq: iteration cursor
390 * @wq: the target workqueue
391 *
392 * This must be called either with wq->mutex held or sched RCU read locked.
393 * If the pwq needs to be used beyond the locking in effect, the caller is
394 * responsible for guaranteeing that the pwq stays online.
395 *
396 * The if/else clause exists only for the lockdep assertion and can be
397 * ignored.
398 */
399 #define for_each_pwq(pwq, wq) \
400 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
401 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
402 else
403
404 #ifdef CONFIG_DEBUG_OBJECTS_WORK
405
406 static struct debug_obj_descr work_debug_descr;
407
408 static void *work_debug_hint(void *addr)
409 {
410 return ((struct work_struct *) addr)->func;
411 }
412
413 /*
414 * fixup_init is called when:
415 * - an active object is initialized
416 */
417 static int work_fixup_init(void *addr, enum debug_obj_state state)
418 {
419 struct work_struct *work = addr;
420
421 switch (state) {
422 case ODEBUG_STATE_ACTIVE:
423 cancel_work_sync(work);
424 debug_object_init(work, &work_debug_descr);
425 return 1;
426 default:
427 return 0;
428 }
429 }
430
431 /*
432 * fixup_activate is called when:
433 * - an active object is activated
434 * - an unknown object is activated (might be a statically initialized object)
435 */
436 static int work_fixup_activate(void *addr, enum debug_obj_state state)
437 {
438 struct work_struct *work = addr;
439
440 switch (state) {
441
442 case ODEBUG_STATE_NOTAVAILABLE:
443 /*
444 * This is not really a fixup. The work struct was
445 * statically initialized. We just make sure that it
446 * is tracked in the object tracker.
447 */
448 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
449 debug_object_init(work, &work_debug_descr);
450 debug_object_activate(work, &work_debug_descr);
451 return 0;
452 }
453 WARN_ON_ONCE(1);
454 return 0;
455
456 case ODEBUG_STATE_ACTIVE:
457 WARN_ON(1);
458
459 default:
460 return 0;
461 }
462 }
463
464 /*
465 * fixup_free is called when:
466 * - an active object is freed
467 */
468 static int work_fixup_free(void *addr, enum debug_obj_state state)
469 {
470 struct work_struct *work = addr;
471
472 switch (state) {
473 case ODEBUG_STATE_ACTIVE:
474 cancel_work_sync(work);
475 debug_object_free(work, &work_debug_descr);
476 return 1;
477 default:
478 return 0;
479 }
480 }
481
482 static struct debug_obj_descr work_debug_descr = {
483 .name = "work_struct",
484 .debug_hint = work_debug_hint,
485 .fixup_init = work_fixup_init,
486 .fixup_activate = work_fixup_activate,
487 .fixup_free = work_fixup_free,
488 };
489
490 static inline void debug_work_activate(struct work_struct *work)
491 {
492 debug_object_activate(work, &work_debug_descr);
493 }
494
495 static inline void debug_work_deactivate(struct work_struct *work)
496 {
497 debug_object_deactivate(work, &work_debug_descr);
498 }
499
500 void __init_work(struct work_struct *work, int onstack)
501 {
502 if (onstack)
503 debug_object_init_on_stack(work, &work_debug_descr);
504 else
505 debug_object_init(work, &work_debug_descr);
506 }
507 EXPORT_SYMBOL_GPL(__init_work);
508
509 void destroy_work_on_stack(struct work_struct *work)
510 {
511 debug_object_free(work, &work_debug_descr);
512 }
513 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
514
515 void destroy_delayed_work_on_stack(struct delayed_work *work)
516 {
517 destroy_timer_on_stack(&work->timer);
518 debug_object_free(&work->work, &work_debug_descr);
519 }
520 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
521
522 #else
523 static inline void debug_work_activate(struct work_struct *work) { }
524 static inline void debug_work_deactivate(struct work_struct *work) { }
525 #endif
526
527 /**
528 * worker_pool_assign_id - allocate ID and assing it to @pool
529 * @pool: the pool pointer of interest
530 *
531 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
532 * successfully, -errno on failure.
533 */
534 static int worker_pool_assign_id(struct worker_pool *pool)
535 {
536 int ret;
537
538 lockdep_assert_held(&wq_pool_mutex);
539
540 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
541 GFP_KERNEL);
542 if (ret >= 0) {
543 pool->id = ret;
544 return 0;
545 }
546 return ret;
547 }
548
549 /**
550 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
551 * @wq: the target workqueue
552 * @node: the node ID
553 *
554 * This must be called either with pwq_lock held or sched RCU read locked.
555 * If the pwq needs to be used beyond the locking in effect, the caller is
556 * responsible for guaranteeing that the pwq stays online.
557 *
558 * Return: The unbound pool_workqueue for @node.
559 */
560 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
561 int node)
562 {
563 assert_rcu_or_wq_mutex(wq);
564 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
565 }
566
567 static unsigned int work_color_to_flags(int color)
568 {
569 return color << WORK_STRUCT_COLOR_SHIFT;
570 }
571
572 static int get_work_color(struct work_struct *work)
573 {
574 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
575 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
576 }
577
578 static int work_next_color(int color)
579 {
580 return (color + 1) % WORK_NR_COLORS;
581 }
582
583 /*
584 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
585 * contain the pointer to the queued pwq. Once execution starts, the flag
586 * is cleared and the high bits contain OFFQ flags and pool ID.
587 *
588 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
589 * and clear_work_data() can be used to set the pwq, pool or clear
590 * work->data. These functions should only be called while the work is
591 * owned - ie. while the PENDING bit is set.
592 *
593 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
594 * corresponding to a work. Pool is available once the work has been
595 * queued anywhere after initialization until it is sync canceled. pwq is
596 * available only while the work item is queued.
597 *
598 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
599 * canceled. While being canceled, a work item may have its PENDING set
600 * but stay off timer and worklist for arbitrarily long and nobody should
601 * try to steal the PENDING bit.
602 */
603 static inline void set_work_data(struct work_struct *work, unsigned long data,
604 unsigned long flags)
605 {
606 WARN_ON_ONCE(!work_pending(work));
607 atomic_long_set(&work->data, data | flags | work_static(work));
608 }
609
610 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
611 unsigned long extra_flags)
612 {
613 set_work_data(work, (unsigned long)pwq,
614 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
615 }
616
617 static void set_work_pool_and_keep_pending(struct work_struct *work,
618 int pool_id)
619 {
620 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
621 WORK_STRUCT_PENDING);
622 }
623
624 static void set_work_pool_and_clear_pending(struct work_struct *work,
625 int pool_id)
626 {
627 /*
628 * The following wmb is paired with the implied mb in
629 * test_and_set_bit(PENDING) and ensures all updates to @work made
630 * here are visible to and precede any updates by the next PENDING
631 * owner.
632 */
633 smp_wmb();
634 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
635 }
636
637 static void clear_work_data(struct work_struct *work)
638 {
639 smp_wmb(); /* see set_work_pool_and_clear_pending() */
640 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
641 }
642
643 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
644 {
645 unsigned long data = atomic_long_read(&work->data);
646
647 if (data & WORK_STRUCT_PWQ)
648 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
649 else
650 return NULL;
651 }
652
653 /**
654 * get_work_pool - return the worker_pool a given work was associated with
655 * @work: the work item of interest
656 *
657 * Pools are created and destroyed under wq_pool_mutex, and allows read
658 * access under sched-RCU read lock. As such, this function should be
659 * called under wq_pool_mutex or with preemption disabled.
660 *
661 * All fields of the returned pool are accessible as long as the above
662 * mentioned locking is in effect. If the returned pool needs to be used
663 * beyond the critical section, the caller is responsible for ensuring the
664 * returned pool is and stays online.
665 *
666 * Return: The worker_pool @work was last associated with. %NULL if none.
667 */
668 static struct worker_pool *get_work_pool(struct work_struct *work)
669 {
670 unsigned long data = atomic_long_read(&work->data);
671 int pool_id;
672
673 assert_rcu_or_pool_mutex();
674
675 if (data & WORK_STRUCT_PWQ)
676 return ((struct pool_workqueue *)
677 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
678
679 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
680 if (pool_id == WORK_OFFQ_POOL_NONE)
681 return NULL;
682
683 return idr_find(&worker_pool_idr, pool_id);
684 }
685
686 /**
687 * get_work_pool_id - return the worker pool ID a given work is associated with
688 * @work: the work item of interest
689 *
690 * Return: The worker_pool ID @work was last associated with.
691 * %WORK_OFFQ_POOL_NONE if none.
692 */
693 static int get_work_pool_id(struct work_struct *work)
694 {
695 unsigned long data = atomic_long_read(&work->data);
696
697 if (data & WORK_STRUCT_PWQ)
698 return ((struct pool_workqueue *)
699 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
700
701 return data >> WORK_OFFQ_POOL_SHIFT;
702 }
703
704 static void mark_work_canceling(struct work_struct *work)
705 {
706 unsigned long pool_id = get_work_pool_id(work);
707
708 pool_id <<= WORK_OFFQ_POOL_SHIFT;
709 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
710 }
711
712 static bool work_is_canceling(struct work_struct *work)
713 {
714 unsigned long data = atomic_long_read(&work->data);
715
716 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
717 }
718
719 /*
720 * Policy functions. These define the policies on how the global worker
721 * pools are managed. Unless noted otherwise, these functions assume that
722 * they're being called with pool->lock held.
723 */
724
725 static bool __need_more_worker(struct worker_pool *pool)
726 {
727 return !atomic_read(&pool->nr_running);
728 }
729
730 /*
731 * Need to wake up a worker? Called from anything but currently
732 * running workers.
733 *
734 * Note that, because unbound workers never contribute to nr_running, this
735 * function will always return %true for unbound pools as long as the
736 * worklist isn't empty.
737 */
738 static bool need_more_worker(struct worker_pool *pool)
739 {
740 return !list_empty(&pool->worklist) && __need_more_worker(pool);
741 }
742
743 /* Can I start working? Called from busy but !running workers. */
744 static bool may_start_working(struct worker_pool *pool)
745 {
746 return pool->nr_idle;
747 }
748
749 /* Do I need to keep working? Called from currently running workers. */
750 static bool keep_working(struct worker_pool *pool)
751 {
752 return !list_empty(&pool->worklist) &&
753 atomic_read(&pool->nr_running) <= 1;
754 }
755
756 /* Do we need a new worker? Called from manager. */
757 static bool need_to_create_worker(struct worker_pool *pool)
758 {
759 return need_more_worker(pool) && !may_start_working(pool);
760 }
761
762 /* Do we have too many workers and should some go away? */
763 static bool too_many_workers(struct worker_pool *pool)
764 {
765 bool managing = mutex_is_locked(&pool->manager_arb);
766 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
767 int nr_busy = pool->nr_workers - nr_idle;
768
769 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
770 }
771
772 /*
773 * Wake up functions.
774 */
775
776 /* Return the first idle worker. Safe with preemption disabled */
777 static struct worker *first_idle_worker(struct worker_pool *pool)
778 {
779 if (unlikely(list_empty(&pool->idle_list)))
780 return NULL;
781
782 return list_first_entry(&pool->idle_list, struct worker, entry);
783 }
784
785 /**
786 * wake_up_worker - wake up an idle worker
787 * @pool: worker pool to wake worker from
788 *
789 * Wake up the first idle worker of @pool.
790 *
791 * CONTEXT:
792 * spin_lock_irq(pool->lock).
793 */
794 static void wake_up_worker(struct worker_pool *pool)
795 {
796 struct worker *worker = first_idle_worker(pool);
797
798 if (likely(worker))
799 wake_up_process(worker->task);
800 }
801
802 /**
803 * wq_worker_waking_up - a worker is waking up
804 * @task: task waking up
805 * @cpu: CPU @task is waking up to
806 *
807 * This function is called during try_to_wake_up() when a worker is
808 * being awoken.
809 *
810 * CONTEXT:
811 * spin_lock_irq(rq->lock)
812 */
813 void wq_worker_waking_up(struct task_struct *task, int cpu)
814 {
815 struct worker *worker = kthread_data(task);
816
817 if (!(worker->flags & WORKER_NOT_RUNNING)) {
818 WARN_ON_ONCE(worker->pool->cpu != cpu);
819 atomic_inc(&worker->pool->nr_running);
820 }
821 }
822
823 /**
824 * wq_worker_sleeping - a worker is going to sleep
825 * @task: task going to sleep
826 * @cpu: CPU in question, must be the current CPU number
827 *
828 * This function is called during schedule() when a busy worker is
829 * going to sleep. Worker on the same cpu can be woken up by
830 * returning pointer to its task.
831 *
832 * CONTEXT:
833 * spin_lock_irq(rq->lock)
834 *
835 * Return:
836 * Worker task on @cpu to wake up, %NULL if none.
837 */
838 struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
839 {
840 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
841 struct worker_pool *pool;
842
843 /*
844 * Rescuers, which may not have all the fields set up like normal
845 * workers, also reach here, let's not access anything before
846 * checking NOT_RUNNING.
847 */
848 if (worker->flags & WORKER_NOT_RUNNING)
849 return NULL;
850
851 pool = worker->pool;
852
853 /* this can only happen on the local cpu */
854 if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
855 return NULL;
856
857 /*
858 * The counterpart of the following dec_and_test, implied mb,
859 * worklist not empty test sequence is in insert_work().
860 * Please read comment there.
861 *
862 * NOT_RUNNING is clear. This means that we're bound to and
863 * running on the local cpu w/ rq lock held and preemption
864 * disabled, which in turn means that none else could be
865 * manipulating idle_list, so dereferencing idle_list without pool
866 * lock is safe.
867 */
868 if (atomic_dec_and_test(&pool->nr_running) &&
869 !list_empty(&pool->worklist))
870 to_wakeup = first_idle_worker(pool);
871 return to_wakeup ? to_wakeup->task : NULL;
872 }
873
874 /**
875 * worker_set_flags - set worker flags and adjust nr_running accordingly
876 * @worker: self
877 * @flags: flags to set
878 *
879 * Set @flags in @worker->flags and adjust nr_running accordingly.
880 *
881 * CONTEXT:
882 * spin_lock_irq(pool->lock)
883 */
884 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
885 {
886 struct worker_pool *pool = worker->pool;
887
888 WARN_ON_ONCE(worker->task != current);
889
890 /* If transitioning into NOT_RUNNING, adjust nr_running. */
891 if ((flags & WORKER_NOT_RUNNING) &&
892 !(worker->flags & WORKER_NOT_RUNNING)) {
893 atomic_dec(&pool->nr_running);
894 }
895
896 worker->flags |= flags;
897 }
898
899 /**
900 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
901 * @worker: self
902 * @flags: flags to clear
903 *
904 * Clear @flags in @worker->flags and adjust nr_running accordingly.
905 *
906 * CONTEXT:
907 * spin_lock_irq(pool->lock)
908 */
909 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
910 {
911 struct worker_pool *pool = worker->pool;
912 unsigned int oflags = worker->flags;
913
914 WARN_ON_ONCE(worker->task != current);
915
916 worker->flags &= ~flags;
917
918 /*
919 * If transitioning out of NOT_RUNNING, increment nr_running. Note
920 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
921 * of multiple flags, not a single flag.
922 */
923 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
924 if (!(worker->flags & WORKER_NOT_RUNNING))
925 atomic_inc(&pool->nr_running);
926 }
927
928 /**
929 * find_worker_executing_work - find worker which is executing a work
930 * @pool: pool of interest
931 * @work: work to find worker for
932 *
933 * Find a worker which is executing @work on @pool by searching
934 * @pool->busy_hash which is keyed by the address of @work. For a worker
935 * to match, its current execution should match the address of @work and
936 * its work function. This is to avoid unwanted dependency between
937 * unrelated work executions through a work item being recycled while still
938 * being executed.
939 *
940 * This is a bit tricky. A work item may be freed once its execution
941 * starts and nothing prevents the freed area from being recycled for
942 * another work item. If the same work item address ends up being reused
943 * before the original execution finishes, workqueue will identify the
944 * recycled work item as currently executing and make it wait until the
945 * current execution finishes, introducing an unwanted dependency.
946 *
947 * This function checks the work item address and work function to avoid
948 * false positives. Note that this isn't complete as one may construct a
949 * work function which can introduce dependency onto itself through a
950 * recycled work item. Well, if somebody wants to shoot oneself in the
951 * foot that badly, there's only so much we can do, and if such deadlock
952 * actually occurs, it should be easy to locate the culprit work function.
953 *
954 * CONTEXT:
955 * spin_lock_irq(pool->lock).
956 *
957 * Return:
958 * Pointer to worker which is executing @work if found, %NULL
959 * otherwise.
960 */
961 static struct worker *find_worker_executing_work(struct worker_pool *pool,
962 struct work_struct *work)
963 {
964 struct worker *worker;
965
966 hash_for_each_possible(pool->busy_hash, worker, hentry,
967 (unsigned long)work)
968 if (worker->current_work == work &&
969 worker->current_func == work->func)
970 return worker;
971
972 return NULL;
973 }
974
975 /**
976 * move_linked_works - move linked works to a list
977 * @work: start of series of works to be scheduled
978 * @head: target list to append @work to
979 * @nextp: out paramter for nested worklist walking
980 *
981 * Schedule linked works starting from @work to @head. Work series to
982 * be scheduled starts at @work and includes any consecutive work with
983 * WORK_STRUCT_LINKED set in its predecessor.
984 *
985 * If @nextp is not NULL, it's updated to point to the next work of
986 * the last scheduled work. This allows move_linked_works() to be
987 * nested inside outer list_for_each_entry_safe().
988 *
989 * CONTEXT:
990 * spin_lock_irq(pool->lock).
991 */
992 static void move_linked_works(struct work_struct *work, struct list_head *head,
993 struct work_struct **nextp)
994 {
995 struct work_struct *n;
996
997 /*
998 * Linked worklist will always end before the end of the list,
999 * use NULL for list head.
1000 */
1001 list_for_each_entry_safe_from(work, n, NULL, entry) {
1002 list_move_tail(&work->entry, head);
1003 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1004 break;
1005 }
1006
1007 /*
1008 * If we're already inside safe list traversal and have moved
1009 * multiple works to the scheduled queue, the next position
1010 * needs to be updated.
1011 */
1012 if (nextp)
1013 *nextp = n;
1014 }
1015
1016 /**
1017 * get_pwq - get an extra reference on the specified pool_workqueue
1018 * @pwq: pool_workqueue to get
1019 *
1020 * Obtain an extra reference on @pwq. The caller should guarantee that
1021 * @pwq has positive refcnt and be holding the matching pool->lock.
1022 */
1023 static void get_pwq(struct pool_workqueue *pwq)
1024 {
1025 lockdep_assert_held(&pwq->pool->lock);
1026 WARN_ON_ONCE(pwq->refcnt <= 0);
1027 pwq->refcnt++;
1028 }
1029
1030 /**
1031 * put_pwq - put a pool_workqueue reference
1032 * @pwq: pool_workqueue to put
1033 *
1034 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1035 * destruction. The caller should be holding the matching pool->lock.
1036 */
1037 static void put_pwq(struct pool_workqueue *pwq)
1038 {
1039 lockdep_assert_held(&pwq->pool->lock);
1040 if (likely(--pwq->refcnt))
1041 return;
1042 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1043 return;
1044 /*
1045 * @pwq can't be released under pool->lock, bounce to
1046 * pwq_unbound_release_workfn(). This never recurses on the same
1047 * pool->lock as this path is taken only for unbound workqueues and
1048 * the release work item is scheduled on a per-cpu workqueue. To
1049 * avoid lockdep warning, unbound pool->locks are given lockdep
1050 * subclass of 1 in get_unbound_pool().
1051 */
1052 schedule_work(&pwq->unbound_release_work);
1053 }
1054
1055 /**
1056 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1057 * @pwq: pool_workqueue to put (can be %NULL)
1058 *
1059 * put_pwq() with locking. This function also allows %NULL @pwq.
1060 */
1061 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1062 {
1063 if (pwq) {
1064 /*
1065 * As both pwqs and pools are sched-RCU protected, the
1066 * following lock operations are safe.
1067 */
1068 spin_lock_irq(&pwq->pool->lock);
1069 put_pwq(pwq);
1070 spin_unlock_irq(&pwq->pool->lock);
1071 }
1072 }
1073
1074 static void pwq_activate_delayed_work(struct work_struct *work)
1075 {
1076 struct pool_workqueue *pwq = get_work_pwq(work);
1077
1078 trace_workqueue_activate_work(work);
1079 move_linked_works(work, &pwq->pool->worklist, NULL);
1080 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1081 pwq->nr_active++;
1082 }
1083
1084 static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1085 {
1086 struct work_struct *work = list_first_entry(&pwq->delayed_works,
1087 struct work_struct, entry);
1088
1089 pwq_activate_delayed_work(work);
1090 }
1091
1092 /**
1093 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1094 * @pwq: pwq of interest
1095 * @color: color of work which left the queue
1096 *
1097 * A work either has completed or is removed from pending queue,
1098 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1099 *
1100 * CONTEXT:
1101 * spin_lock_irq(pool->lock).
1102 */
1103 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1104 {
1105 /* uncolored work items don't participate in flushing or nr_active */
1106 if (color == WORK_NO_COLOR)
1107 goto out_put;
1108
1109 pwq->nr_in_flight[color]--;
1110
1111 pwq->nr_active--;
1112 if (!list_empty(&pwq->delayed_works)) {
1113 /* one down, submit a delayed one */
1114 if (pwq->nr_active < pwq->max_active)
1115 pwq_activate_first_delayed(pwq);
1116 }
1117
1118 /* is flush in progress and are we at the flushing tip? */
1119 if (likely(pwq->flush_color != color))
1120 goto out_put;
1121
1122 /* are there still in-flight works? */
1123 if (pwq->nr_in_flight[color])
1124 goto out_put;
1125
1126 /* this pwq is done, clear flush_color */
1127 pwq->flush_color = -1;
1128
1129 /*
1130 * If this was the last pwq, wake up the first flusher. It
1131 * will handle the rest.
1132 */
1133 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1134 complete(&pwq->wq->first_flusher->done);
1135 out_put:
1136 put_pwq(pwq);
1137 }
1138
1139 /**
1140 * try_to_grab_pending - steal work item from worklist and disable irq
1141 * @work: work item to steal
1142 * @is_dwork: @work is a delayed_work
1143 * @flags: place to store irq state
1144 *
1145 * Try to grab PENDING bit of @work. This function can handle @work in any
1146 * stable state - idle, on timer or on worklist.
1147 *
1148 * Return:
1149 * 1 if @work was pending and we successfully stole PENDING
1150 * 0 if @work was idle and we claimed PENDING
1151 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1152 * -ENOENT if someone else is canceling @work, this state may persist
1153 * for arbitrarily long
1154 *
1155 * Note:
1156 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1157 * interrupted while holding PENDING and @work off queue, irq must be
1158 * disabled on entry. This, combined with delayed_work->timer being
1159 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1160 *
1161 * On successful return, >= 0, irq is disabled and the caller is
1162 * responsible for releasing it using local_irq_restore(*@flags).
1163 *
1164 * This function is safe to call from any context including IRQ handler.
1165 */
1166 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1167 unsigned long *flags)
1168 {
1169 struct worker_pool *pool;
1170 struct pool_workqueue *pwq;
1171
1172 local_irq_save(*flags);
1173
1174 /* try to steal the timer if it exists */
1175 if (is_dwork) {
1176 struct delayed_work *dwork = to_delayed_work(work);
1177
1178 /*
1179 * dwork->timer is irqsafe. If del_timer() fails, it's
1180 * guaranteed that the timer is not queued anywhere and not
1181 * running on the local CPU.
1182 */
1183 if (likely(del_timer(&dwork->timer)))
1184 return 1;
1185 }
1186
1187 /* try to claim PENDING the normal way */
1188 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1189 return 0;
1190
1191 /*
1192 * The queueing is in progress, or it is already queued. Try to
1193 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1194 */
1195 pool = get_work_pool(work);
1196 if (!pool)
1197 goto fail;
1198
1199 spin_lock(&pool->lock);
1200 /*
1201 * work->data is guaranteed to point to pwq only while the work
1202 * item is queued on pwq->wq, and both updating work->data to point
1203 * to pwq on queueing and to pool on dequeueing are done under
1204 * pwq->pool->lock. This in turn guarantees that, if work->data
1205 * points to pwq which is associated with a locked pool, the work
1206 * item is currently queued on that pool.
1207 */
1208 pwq = get_work_pwq(work);
1209 if (pwq && pwq->pool == pool) {
1210 debug_work_deactivate(work);
1211
1212 /*
1213 * A delayed work item cannot be grabbed directly because
1214 * it might have linked NO_COLOR work items which, if left
1215 * on the delayed_list, will confuse pwq->nr_active
1216 * management later on and cause stall. Make sure the work
1217 * item is activated before grabbing.
1218 */
1219 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1220 pwq_activate_delayed_work(work);
1221
1222 list_del_init(&work->entry);
1223 pwq_dec_nr_in_flight(pwq, get_work_color(work));
1224
1225 /* work->data points to pwq iff queued, point to pool */
1226 set_work_pool_and_keep_pending(work, pool->id);
1227
1228 spin_unlock(&pool->lock);
1229 return 1;
1230 }
1231 spin_unlock(&pool->lock);
1232 fail:
1233 local_irq_restore(*flags);
1234 if (work_is_canceling(work))
1235 return -ENOENT;
1236 cpu_relax();
1237 return -EAGAIN;
1238 }
1239
1240 /**
1241 * insert_work - insert a work into a pool
1242 * @pwq: pwq @work belongs to
1243 * @work: work to insert
1244 * @head: insertion point
1245 * @extra_flags: extra WORK_STRUCT_* flags to set
1246 *
1247 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1248 * work_struct flags.
1249 *
1250 * CONTEXT:
1251 * spin_lock_irq(pool->lock).
1252 */
1253 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1254 struct list_head *head, unsigned int extra_flags)
1255 {
1256 struct worker_pool *pool = pwq->pool;
1257
1258 /* we own @work, set data and link */
1259 set_work_pwq(work, pwq, extra_flags);
1260 list_add_tail(&work->entry, head);
1261 get_pwq(pwq);
1262
1263 /*
1264 * Ensure either wq_worker_sleeping() sees the above
1265 * list_add_tail() or we see zero nr_running to avoid workers lying
1266 * around lazily while there are works to be processed.
1267 */
1268 smp_mb();
1269
1270 if (__need_more_worker(pool))
1271 wake_up_worker(pool);
1272 }
1273
1274 /*
1275 * Test whether @work is being queued from another work executing on the
1276 * same workqueue.
1277 */
1278 static bool is_chained_work(struct workqueue_struct *wq)
1279 {
1280 struct worker *worker;
1281
1282 worker = current_wq_worker();
1283 /*
1284 * Return %true iff I'm a worker execuing a work item on @wq. If
1285 * I'm @worker, it's safe to dereference it without locking.
1286 */
1287 return worker && worker->current_pwq->wq == wq;
1288 }
1289
1290 static void __queue_work(int cpu, struct workqueue_struct *wq,
1291 struct work_struct *work)
1292 {
1293 struct pool_workqueue *pwq;
1294 struct worker_pool *last_pool;
1295 struct list_head *worklist;
1296 unsigned int work_flags;
1297 unsigned int req_cpu = cpu;
1298
1299 /*
1300 * While a work item is PENDING && off queue, a task trying to
1301 * steal the PENDING will busy-loop waiting for it to either get
1302 * queued or lose PENDING. Grabbing PENDING and queueing should
1303 * happen with IRQ disabled.
1304 */
1305 WARN_ON_ONCE(!irqs_disabled());
1306
1307 debug_work_activate(work);
1308
1309 /* if draining, only works from the same workqueue are allowed */
1310 if (unlikely(wq->flags & __WQ_DRAINING) &&
1311 WARN_ON_ONCE(!is_chained_work(wq)))
1312 return;
1313 retry:
1314 if (req_cpu == WORK_CPU_UNBOUND)
1315 cpu = raw_smp_processor_id();
1316
1317 /* pwq which will be used unless @work is executing elsewhere */
1318 if (!(wq->flags & WQ_UNBOUND))
1319 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1320 else
1321 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1322
1323 /*
1324 * If @work was previously on a different pool, it might still be
1325 * running there, in which case the work needs to be queued on that
1326 * pool to guarantee non-reentrancy.
1327 */
1328 last_pool = get_work_pool(work);
1329 if (last_pool && last_pool != pwq->pool) {
1330 struct worker *worker;
1331
1332 spin_lock(&last_pool->lock);
1333
1334 worker = find_worker_executing_work(last_pool, work);
1335
1336 if (worker && worker->current_pwq->wq == wq) {
1337 pwq = worker->current_pwq;
1338 } else {
1339 /* meh... not running there, queue here */
1340 spin_unlock(&last_pool->lock);
1341 spin_lock(&pwq->pool->lock);
1342 }
1343 } else {
1344 spin_lock(&pwq->pool->lock);
1345 }
1346
1347 /*
1348 * pwq is determined and locked. For unbound pools, we could have
1349 * raced with pwq release and it could already be dead. If its
1350 * refcnt is zero, repeat pwq selection. Note that pwqs never die
1351 * without another pwq replacing it in the numa_pwq_tbl or while
1352 * work items are executing on it, so the retrying is guaranteed to
1353 * make forward-progress.
1354 */
1355 if (unlikely(!pwq->refcnt)) {
1356 if (wq->flags & WQ_UNBOUND) {
1357 spin_unlock(&pwq->pool->lock);
1358 cpu_relax();
1359 goto retry;
1360 }
1361 /* oops */
1362 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1363 wq->name, cpu);
1364 }
1365
1366 /* pwq determined, queue */
1367 trace_workqueue_queue_work(req_cpu, pwq, work);
1368
1369 if (WARN_ON(!list_empty(&work->entry))) {
1370 spin_unlock(&pwq->pool->lock);
1371 return;
1372 }
1373
1374 pwq->nr_in_flight[pwq->work_color]++;
1375 work_flags = work_color_to_flags(pwq->work_color);
1376
1377 if (likely(pwq->nr_active < pwq->max_active)) {
1378 trace_workqueue_activate_work(work);
1379 pwq->nr_active++;
1380 worklist = &pwq->pool->worklist;
1381 } else {
1382 work_flags |= WORK_STRUCT_DELAYED;
1383 worklist = &pwq->delayed_works;
1384 }
1385
1386 insert_work(pwq, work, worklist, work_flags);
1387
1388 spin_unlock(&pwq->pool->lock);
1389 }
1390
1391 /**
1392 * queue_work_on - queue work on specific cpu
1393 * @cpu: CPU number to execute work on
1394 * @wq: workqueue to use
1395 * @work: work to queue
1396 *
1397 * We queue the work to a specific CPU, the caller must ensure it
1398 * can't go away.
1399 *
1400 * Return: %false if @work was already on a queue, %true otherwise.
1401 */
1402 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1403 struct work_struct *work)
1404 {
1405 bool ret = false;
1406 unsigned long flags;
1407
1408 local_irq_save(flags);
1409
1410 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1411 __queue_work(cpu, wq, work);
1412 ret = true;
1413 }
1414
1415 local_irq_restore(flags);
1416 return ret;
1417 }
1418 EXPORT_SYMBOL(queue_work_on);
1419
1420 void delayed_work_timer_fn(unsigned long __data)
1421 {
1422 struct delayed_work *dwork = (struct delayed_work *)__data;
1423
1424 /* should have been called from irqsafe timer with irq already off */
1425 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1426 }
1427 EXPORT_SYMBOL(delayed_work_timer_fn);
1428
1429 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1430 struct delayed_work *dwork, unsigned long delay)
1431 {
1432 struct timer_list *timer = &dwork->timer;
1433 struct work_struct *work = &dwork->work;
1434
1435 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1436 timer->data != (unsigned long)dwork);
1437 WARN_ON_ONCE(timer_pending(timer));
1438 WARN_ON_ONCE(!list_empty(&work->entry));
1439
1440 /*
1441 * If @delay is 0, queue @dwork->work immediately. This is for
1442 * both optimization and correctness. The earliest @timer can
1443 * expire is on the closest next tick and delayed_work users depend
1444 * on that there's no such delay when @delay is 0.
1445 */
1446 if (!delay) {
1447 __queue_work(cpu, wq, &dwork->work);
1448 return;
1449 }
1450
1451 timer_stats_timer_set_start_info(&dwork->timer);
1452
1453 dwork->wq = wq;
1454 dwork->cpu = cpu;
1455 timer->expires = jiffies + delay;
1456
1457 if (unlikely(cpu != WORK_CPU_UNBOUND))
1458 add_timer_on(timer, cpu);
1459 else
1460 add_timer(timer);
1461 }
1462
1463 /**
1464 * queue_delayed_work_on - queue work on specific CPU after delay
1465 * @cpu: CPU number to execute work on
1466 * @wq: workqueue to use
1467 * @dwork: work to queue
1468 * @delay: number of jiffies to wait before queueing
1469 *
1470 * Return: %false if @work was already on a queue, %true otherwise. If
1471 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1472 * execution.
1473 */
1474 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1475 struct delayed_work *dwork, unsigned long delay)
1476 {
1477 struct work_struct *work = &dwork->work;
1478 bool ret = false;
1479 unsigned long flags;
1480
1481 /* read the comment in __queue_work() */
1482 local_irq_save(flags);
1483
1484 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1485 __queue_delayed_work(cpu, wq, dwork, delay);
1486 ret = true;
1487 }
1488
1489 local_irq_restore(flags);
1490 return ret;
1491 }
1492 EXPORT_SYMBOL(queue_delayed_work_on);
1493
1494 /**
1495 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1496 * @cpu: CPU number to execute work on
1497 * @wq: workqueue to use
1498 * @dwork: work to queue
1499 * @delay: number of jiffies to wait before queueing
1500 *
1501 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1502 * modify @dwork's timer so that it expires after @delay. If @delay is
1503 * zero, @work is guaranteed to be scheduled immediately regardless of its
1504 * current state.
1505 *
1506 * Return: %false if @dwork was idle and queued, %true if @dwork was
1507 * pending and its timer was modified.
1508 *
1509 * This function is safe to call from any context including IRQ handler.
1510 * See try_to_grab_pending() for details.
1511 */
1512 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1513 struct delayed_work *dwork, unsigned long delay)
1514 {
1515 unsigned long flags;
1516 int ret;
1517
1518 do {
1519 ret = try_to_grab_pending(&dwork->work, true, &flags);
1520 } while (unlikely(ret == -EAGAIN));
1521
1522 if (likely(ret >= 0)) {
1523 __queue_delayed_work(cpu, wq, dwork, delay);
1524 local_irq_restore(flags);
1525 }
1526
1527 /* -ENOENT from try_to_grab_pending() becomes %true */
1528 return ret;
1529 }
1530 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1531
1532 /**
1533 * worker_enter_idle - enter idle state
1534 * @worker: worker which is entering idle state
1535 *
1536 * @worker is entering idle state. Update stats and idle timer if
1537 * necessary.
1538 *
1539 * LOCKING:
1540 * spin_lock_irq(pool->lock).
1541 */
1542 static void worker_enter_idle(struct worker *worker)
1543 {
1544 struct worker_pool *pool = worker->pool;
1545
1546 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1547 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1548 (worker->hentry.next || worker->hentry.pprev)))
1549 return;
1550
1551 /* can't use worker_set_flags(), also called from create_worker() */
1552 worker->flags |= WORKER_IDLE;
1553 pool->nr_idle++;
1554 worker->last_active = jiffies;
1555
1556 /* idle_list is LIFO */
1557 list_add(&worker->entry, &pool->idle_list);
1558
1559 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1560 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1561
1562 /*
1563 * Sanity check nr_running. Because wq_unbind_fn() releases
1564 * pool->lock between setting %WORKER_UNBOUND and zapping
1565 * nr_running, the warning may trigger spuriously. Check iff
1566 * unbind is not in progress.
1567 */
1568 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1569 pool->nr_workers == pool->nr_idle &&
1570 atomic_read(&pool->nr_running));
1571 }
1572
1573 /**
1574 * worker_leave_idle - leave idle state
1575 * @worker: worker which is leaving idle state
1576 *
1577 * @worker is leaving idle state. Update stats.
1578 *
1579 * LOCKING:
1580 * spin_lock_irq(pool->lock).
1581 */
1582 static void worker_leave_idle(struct worker *worker)
1583 {
1584 struct worker_pool *pool = worker->pool;
1585
1586 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1587 return;
1588 worker_clr_flags(worker, WORKER_IDLE);
1589 pool->nr_idle--;
1590 list_del_init(&worker->entry);
1591 }
1592
1593 static struct worker *alloc_worker(int node)
1594 {
1595 struct worker *worker;
1596
1597 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1598 if (worker) {
1599 INIT_LIST_HEAD(&worker->entry);
1600 INIT_LIST_HEAD(&worker->scheduled);
1601 INIT_LIST_HEAD(&worker->node);
1602 /* on creation a worker is in !idle && prep state */
1603 worker->flags = WORKER_PREP;
1604 }
1605 return worker;
1606 }
1607
1608 /**
1609 * worker_attach_to_pool() - attach a worker to a pool
1610 * @worker: worker to be attached
1611 * @pool: the target pool
1612 *
1613 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1614 * cpu-binding of @worker are kept coordinated with the pool across
1615 * cpu-[un]hotplugs.
1616 */
1617 static void worker_attach_to_pool(struct worker *worker,
1618 struct worker_pool *pool)
1619 {
1620 mutex_lock(&pool->attach_mutex);
1621
1622 /*
1623 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1624 * online CPUs. It'll be re-applied when any of the CPUs come up.
1625 */
1626 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1627
1628 /*
1629 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
1630 * stable across this function. See the comments above the
1631 * flag definition for details.
1632 */
1633 if (pool->flags & POOL_DISASSOCIATED)
1634 worker->flags |= WORKER_UNBOUND;
1635
1636 list_add_tail(&worker->node, &pool->workers);
1637
1638 mutex_unlock(&pool->attach_mutex);
1639 }
1640
1641 /**
1642 * worker_detach_from_pool() - detach a worker from its pool
1643 * @worker: worker which is attached to its pool
1644 * @pool: the pool @worker is attached to
1645 *
1646 * Undo the attaching which had been done in worker_attach_to_pool(). The
1647 * caller worker shouldn't access to the pool after detached except it has
1648 * other reference to the pool.
1649 */
1650 static void worker_detach_from_pool(struct worker *worker,
1651 struct worker_pool *pool)
1652 {
1653 struct completion *detach_completion = NULL;
1654
1655 mutex_lock(&pool->attach_mutex);
1656 list_del(&worker->node);
1657 if (list_empty(&pool->workers))
1658 detach_completion = pool->detach_completion;
1659 mutex_unlock(&pool->attach_mutex);
1660
1661 /* clear leftover flags without pool->lock after it is detached */
1662 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1663
1664 if (detach_completion)
1665 complete(detach_completion);
1666 }
1667
1668 /**
1669 * create_worker - create a new workqueue worker
1670 * @pool: pool the new worker will belong to
1671 *
1672 * Create and start a new worker which is attached to @pool.
1673 *
1674 * CONTEXT:
1675 * Might sleep. Does GFP_KERNEL allocations.
1676 *
1677 * Return:
1678 * Pointer to the newly created worker.
1679 */
1680 static struct worker *create_worker(struct worker_pool *pool)
1681 {
1682 struct worker *worker = NULL;
1683 int id = -1;
1684 char id_buf[16];
1685
1686 /* ID is needed to determine kthread name */
1687 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1688 if (id < 0)
1689 goto fail;
1690
1691 worker = alloc_worker(pool->node);
1692 if (!worker)
1693 goto fail;
1694
1695 worker->pool = pool;
1696 worker->id = id;
1697
1698 if (pool->cpu >= 0)
1699 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1700 pool->attrs->nice < 0 ? "H" : "");
1701 else
1702 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1703
1704 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1705 "kworker/%s", id_buf);
1706 if (IS_ERR(worker->task))
1707 goto fail;
1708
1709 set_user_nice(worker->task, pool->attrs->nice);
1710
1711 /* prevent userland from meddling with cpumask of workqueue workers */
1712 worker->task->flags |= PF_NO_SETAFFINITY;
1713
1714 /* successful, attach the worker to the pool */
1715 worker_attach_to_pool(worker, pool);
1716
1717 /* start the newly created worker */
1718 spin_lock_irq(&pool->lock);
1719 worker->pool->nr_workers++;
1720 worker_enter_idle(worker);
1721 wake_up_process(worker->task);
1722 spin_unlock_irq(&pool->lock);
1723
1724 return worker;
1725
1726 fail:
1727 if (id >= 0)
1728 ida_simple_remove(&pool->worker_ida, id);
1729 kfree(worker);
1730 return NULL;
1731 }
1732
1733 /**
1734 * destroy_worker - destroy a workqueue worker
1735 * @worker: worker to be destroyed
1736 *
1737 * Destroy @worker and adjust @pool stats accordingly. The worker should
1738 * be idle.
1739 *
1740 * CONTEXT:
1741 * spin_lock_irq(pool->lock).
1742 */
1743 static void destroy_worker(struct worker *worker)
1744 {
1745 struct worker_pool *pool = worker->pool;
1746
1747 lockdep_assert_held(&pool->lock);
1748
1749 /* sanity check frenzy */
1750 if (WARN_ON(worker->current_work) ||
1751 WARN_ON(!list_empty(&worker->scheduled)) ||
1752 WARN_ON(!(worker->flags & WORKER_IDLE)))
1753 return;
1754
1755 pool->nr_workers--;
1756 pool->nr_idle--;
1757
1758 list_del_init(&worker->entry);
1759 worker->flags |= WORKER_DIE;
1760 wake_up_process(worker->task);
1761 }
1762
1763 static void idle_worker_timeout(unsigned long __pool)
1764 {
1765 struct worker_pool *pool = (void *)__pool;
1766
1767 spin_lock_irq(&pool->lock);
1768
1769 while (too_many_workers(pool)) {
1770 struct worker *worker;
1771 unsigned long expires;
1772
1773 /* idle_list is kept in LIFO order, check the last one */
1774 worker = list_entry(pool->idle_list.prev, struct worker, entry);
1775 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1776
1777 if (time_before(jiffies, expires)) {
1778 mod_timer(&pool->idle_timer, expires);
1779 break;
1780 }
1781
1782 destroy_worker(worker);
1783 }
1784
1785 spin_unlock_irq(&pool->lock);
1786 }
1787
1788 static void send_mayday(struct work_struct *work)
1789 {
1790 struct pool_workqueue *pwq = get_work_pwq(work);
1791 struct workqueue_struct *wq = pwq->wq;
1792
1793 lockdep_assert_held(&wq_mayday_lock);
1794
1795 if (!wq->rescuer)
1796 return;
1797
1798 /* mayday mayday mayday */
1799 if (list_empty(&pwq->mayday_node)) {
1800 /*
1801 * If @pwq is for an unbound wq, its base ref may be put at
1802 * any time due to an attribute change. Pin @pwq until the
1803 * rescuer is done with it.
1804 */
1805 get_pwq(pwq);
1806 list_add_tail(&pwq->mayday_node, &wq->maydays);
1807 wake_up_process(wq->rescuer->task);
1808 }
1809 }
1810
1811 static void pool_mayday_timeout(unsigned long __pool)
1812 {
1813 struct worker_pool *pool = (void *)__pool;
1814 struct work_struct *work;
1815
1816 spin_lock_irq(&pool->lock);
1817 spin_lock(&wq_mayday_lock); /* for wq->maydays */
1818
1819 if (need_to_create_worker(pool)) {
1820 /*
1821 * We've been trying to create a new worker but
1822 * haven't been successful. We might be hitting an
1823 * allocation deadlock. Send distress signals to
1824 * rescuers.
1825 */
1826 list_for_each_entry(work, &pool->worklist, entry)
1827 send_mayday(work);
1828 }
1829
1830 spin_unlock(&wq_mayday_lock);
1831 spin_unlock_irq(&pool->lock);
1832
1833 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1834 }
1835
1836 /**
1837 * maybe_create_worker - create a new worker if necessary
1838 * @pool: pool to create a new worker for
1839 *
1840 * Create a new worker for @pool if necessary. @pool is guaranteed to
1841 * have at least one idle worker on return from this function. If
1842 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1843 * sent to all rescuers with works scheduled on @pool to resolve
1844 * possible allocation deadlock.
1845 *
1846 * On return, need_to_create_worker() is guaranteed to be %false and
1847 * may_start_working() %true.
1848 *
1849 * LOCKING:
1850 * spin_lock_irq(pool->lock) which may be released and regrabbed
1851 * multiple times. Does GFP_KERNEL allocations. Called only from
1852 * manager.
1853 */
1854 static void maybe_create_worker(struct worker_pool *pool)
1855 __releases(&pool->lock)
1856 __acquires(&pool->lock)
1857 {
1858 restart:
1859 spin_unlock_irq(&pool->lock);
1860
1861 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1862 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1863
1864 while (true) {
1865 if (create_worker(pool) || !need_to_create_worker(pool))
1866 break;
1867
1868 schedule_timeout_interruptible(CREATE_COOLDOWN);
1869
1870 if (!need_to_create_worker(pool))
1871 break;
1872 }
1873
1874 del_timer_sync(&pool->mayday_timer);
1875 spin_lock_irq(&pool->lock);
1876 /*
1877 * This is necessary even after a new worker was just successfully
1878 * created as @pool->lock was dropped and the new worker might have
1879 * already become busy.
1880 */
1881 if (need_to_create_worker(pool))
1882 goto restart;
1883 }
1884
1885 /**
1886 * manage_workers - manage worker pool
1887 * @worker: self
1888 *
1889 * Assume the manager role and manage the worker pool @worker belongs
1890 * to. At any given time, there can be only zero or one manager per
1891 * pool. The exclusion is handled automatically by this function.
1892 *
1893 * The caller can safely start processing works on false return. On
1894 * true return, it's guaranteed that need_to_create_worker() is false
1895 * and may_start_working() is true.
1896 *
1897 * CONTEXT:
1898 * spin_lock_irq(pool->lock) which may be released and regrabbed
1899 * multiple times. Does GFP_KERNEL allocations.
1900 *
1901 * Return:
1902 * %false if the pool doesn't need management and the caller can safely
1903 * start processing works, %true if management function was performed and
1904 * the conditions that the caller verified before calling the function may
1905 * no longer be true.
1906 */
1907 static bool manage_workers(struct worker *worker)
1908 {
1909 struct worker_pool *pool = worker->pool;
1910
1911 /*
1912 * Anyone who successfully grabs manager_arb wins the arbitration
1913 * and becomes the manager. mutex_trylock() on pool->manager_arb
1914 * failure while holding pool->lock reliably indicates that someone
1915 * else is managing the pool and the worker which failed trylock
1916 * can proceed to executing work items. This means that anyone
1917 * grabbing manager_arb is responsible for actually performing
1918 * manager duties. If manager_arb is grabbed and released without
1919 * actual management, the pool may stall indefinitely.
1920 */
1921 if (!mutex_trylock(&pool->manager_arb))
1922 return false;
1923 pool->manager = worker;
1924
1925 maybe_create_worker(pool);
1926
1927 pool->manager = NULL;
1928 mutex_unlock(&pool->manager_arb);
1929 return true;
1930 }
1931
1932 /**
1933 * process_one_work - process single work
1934 * @worker: self
1935 * @work: work to process
1936 *
1937 * Process @work. This function contains all the logics necessary to
1938 * process a single work including synchronization against and
1939 * interaction with other workers on the same cpu, queueing and
1940 * flushing. As long as context requirement is met, any worker can
1941 * call this function to process a work.
1942 *
1943 * CONTEXT:
1944 * spin_lock_irq(pool->lock) which is released and regrabbed.
1945 */
1946 static void process_one_work(struct worker *worker, struct work_struct *work)
1947 __releases(&pool->lock)
1948 __acquires(&pool->lock)
1949 {
1950 struct pool_workqueue *pwq = get_work_pwq(work);
1951 struct worker_pool *pool = worker->pool;
1952 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
1953 int work_color;
1954 struct worker *collision;
1955 #ifdef CONFIG_LOCKDEP
1956 /*
1957 * It is permissible to free the struct work_struct from
1958 * inside the function that is called from it, this we need to
1959 * take into account for lockdep too. To avoid bogus "held
1960 * lock freed" warnings as well as problems when looking into
1961 * work->lockdep_map, make a copy and use that here.
1962 */
1963 struct lockdep_map lockdep_map;
1964
1965 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
1966 #endif
1967 /* ensure we're on the correct CPU */
1968 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1969 raw_smp_processor_id() != pool->cpu);
1970
1971 /*
1972 * A single work shouldn't be executed concurrently by
1973 * multiple workers on a single cpu. Check whether anyone is
1974 * already processing the work. If so, defer the work to the
1975 * currently executing one.
1976 */
1977 collision = find_worker_executing_work(pool, work);
1978 if (unlikely(collision)) {
1979 move_linked_works(work, &collision->scheduled, NULL);
1980 return;
1981 }
1982
1983 /* claim and dequeue */
1984 debug_work_deactivate(work);
1985 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
1986 worker->current_work = work;
1987 worker->current_func = work->func;
1988 worker->current_pwq = pwq;
1989 work_color = get_work_color(work);
1990
1991 list_del_init(&work->entry);
1992
1993 /*
1994 * CPU intensive works don't participate in concurrency management.
1995 * They're the scheduler's responsibility. This takes @worker out
1996 * of concurrency management and the next code block will chain
1997 * execution of the pending work items.
1998 */
1999 if (unlikely(cpu_intensive))
2000 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2001
2002 /*
2003 * Wake up another worker if necessary. The condition is always
2004 * false for normal per-cpu workers since nr_running would always
2005 * be >= 1 at this point. This is used to chain execution of the
2006 * pending work items for WORKER_NOT_RUNNING workers such as the
2007 * UNBOUND and CPU_INTENSIVE ones.
2008 */
2009 if (need_more_worker(pool))
2010 wake_up_worker(pool);
2011
2012 /*
2013 * Record the last pool and clear PENDING which should be the last
2014 * update to @work. Also, do this inside @pool->lock so that
2015 * PENDING and queued state changes happen together while IRQ is
2016 * disabled.
2017 */
2018 set_work_pool_and_clear_pending(work, pool->id);
2019
2020 spin_unlock_irq(&pool->lock);
2021
2022 lock_map_acquire_read(&pwq->wq->lockdep_map);
2023 lock_map_acquire(&lockdep_map);
2024 trace_workqueue_execute_start(work);
2025 worker->current_func(work);
2026 /*
2027 * While we must be careful to not use "work" after this, the trace
2028 * point will only record its address.
2029 */
2030 trace_workqueue_execute_end(work);
2031 lock_map_release(&lockdep_map);
2032 lock_map_release(&pwq->wq->lockdep_map);
2033
2034 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2035 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2036 " last function: %pf\n",
2037 current->comm, preempt_count(), task_pid_nr(current),
2038 worker->current_func);
2039 debug_show_held_locks(current);
2040 dump_stack();
2041 }
2042
2043 /*
2044 * The following prevents a kworker from hogging CPU on !PREEMPT
2045 * kernels, where a requeueing work item waiting for something to
2046 * happen could deadlock with stop_machine as such work item could
2047 * indefinitely requeue itself while all other CPUs are trapped in
2048 * stop_machine. At the same time, report a quiescent RCU state so
2049 * the same condition doesn't freeze RCU.
2050 */
2051 cond_resched_rcu_qs();
2052
2053 spin_lock_irq(&pool->lock);
2054
2055 /* clear cpu intensive status */
2056 if (unlikely(cpu_intensive))
2057 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2058
2059 /* we're done with it, release */
2060 hash_del(&worker->hentry);
2061 worker->current_work = NULL;
2062 worker->current_func = NULL;
2063 worker->current_pwq = NULL;
2064 worker->desc_valid = false;
2065 pwq_dec_nr_in_flight(pwq, work_color);
2066 }
2067
2068 /**
2069 * process_scheduled_works - process scheduled works
2070 * @worker: self
2071 *
2072 * Process all scheduled works. Please note that the scheduled list
2073 * may change while processing a work, so this function repeatedly
2074 * fetches a work from the top and executes it.
2075 *
2076 * CONTEXT:
2077 * spin_lock_irq(pool->lock) which may be released and regrabbed
2078 * multiple times.
2079 */
2080 static void process_scheduled_works(struct worker *worker)
2081 {
2082 while (!list_empty(&worker->scheduled)) {
2083 struct work_struct *work = list_first_entry(&worker->scheduled,
2084 struct work_struct, entry);
2085 process_one_work(worker, work);
2086 }
2087 }
2088
2089 /**
2090 * worker_thread - the worker thread function
2091 * @__worker: self
2092 *
2093 * The worker thread function. All workers belong to a worker_pool -
2094 * either a per-cpu one or dynamic unbound one. These workers process all
2095 * work items regardless of their specific target workqueue. The only
2096 * exception is work items which belong to workqueues with a rescuer which
2097 * will be explained in rescuer_thread().
2098 *
2099 * Return: 0
2100 */
2101 static int worker_thread(void *__worker)
2102 {
2103 struct worker *worker = __worker;
2104 struct worker_pool *pool = worker->pool;
2105
2106 /* tell the scheduler that this is a workqueue worker */
2107 worker->task->flags |= PF_WQ_WORKER;
2108 woke_up:
2109 spin_lock_irq(&pool->lock);
2110
2111 /* am I supposed to die? */
2112 if (unlikely(worker->flags & WORKER_DIE)) {
2113 spin_unlock_irq(&pool->lock);
2114 WARN_ON_ONCE(!list_empty(&worker->entry));
2115 worker->task->flags &= ~PF_WQ_WORKER;
2116
2117 set_task_comm(worker->task, "kworker/dying");
2118 ida_simple_remove(&pool->worker_ida, worker->id);
2119 worker_detach_from_pool(worker, pool);
2120 kfree(worker);
2121 return 0;
2122 }
2123
2124 worker_leave_idle(worker);
2125 recheck:
2126 /* no more worker necessary? */
2127 if (!need_more_worker(pool))
2128 goto sleep;
2129
2130 /* do we need to manage? */
2131 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2132 goto recheck;
2133
2134 /*
2135 * ->scheduled list can only be filled while a worker is
2136 * preparing to process a work or actually processing it.
2137 * Make sure nobody diddled with it while I was sleeping.
2138 */
2139 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2140
2141 /*
2142 * Finish PREP stage. We're guaranteed to have at least one idle
2143 * worker or that someone else has already assumed the manager
2144 * role. This is where @worker starts participating in concurrency
2145 * management if applicable and concurrency management is restored
2146 * after being rebound. See rebind_workers() for details.
2147 */
2148 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2149
2150 do {
2151 struct work_struct *work =
2152 list_first_entry(&pool->worklist,
2153 struct work_struct, entry);
2154
2155 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2156 /* optimization path, not strictly necessary */
2157 process_one_work(worker, work);
2158 if (unlikely(!list_empty(&worker->scheduled)))
2159 process_scheduled_works(worker);
2160 } else {
2161 move_linked_works(work, &worker->scheduled, NULL);
2162 process_scheduled_works(worker);
2163 }
2164 } while (keep_working(pool));
2165
2166 worker_set_flags(worker, WORKER_PREP);
2167 sleep:
2168 /*
2169 * pool->lock is held and there's no work to process and no need to
2170 * manage, sleep. Workers are woken up only while holding
2171 * pool->lock or from local cpu, so setting the current state
2172 * before releasing pool->lock is enough to prevent losing any
2173 * event.
2174 */
2175 worker_enter_idle(worker);
2176 __set_current_state(TASK_INTERRUPTIBLE);
2177 spin_unlock_irq(&pool->lock);
2178 schedule();
2179 goto woke_up;
2180 }
2181
2182 /**
2183 * rescuer_thread - the rescuer thread function
2184 * @__rescuer: self
2185 *
2186 * Workqueue rescuer thread function. There's one rescuer for each
2187 * workqueue which has WQ_MEM_RECLAIM set.
2188 *
2189 * Regular work processing on a pool may block trying to create a new
2190 * worker which uses GFP_KERNEL allocation which has slight chance of
2191 * developing into deadlock if some works currently on the same queue
2192 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2193 * the problem rescuer solves.
2194 *
2195 * When such condition is possible, the pool summons rescuers of all
2196 * workqueues which have works queued on the pool and let them process
2197 * those works so that forward progress can be guaranteed.
2198 *
2199 * This should happen rarely.
2200 *
2201 * Return: 0
2202 */
2203 static int rescuer_thread(void *__rescuer)
2204 {
2205 struct worker *rescuer = __rescuer;
2206 struct workqueue_struct *wq = rescuer->rescue_wq;
2207 struct list_head *scheduled = &rescuer->scheduled;
2208 bool should_stop;
2209
2210 set_user_nice(current, RESCUER_NICE_LEVEL);
2211
2212 /*
2213 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2214 * doesn't participate in concurrency management.
2215 */
2216 rescuer->task->flags |= PF_WQ_WORKER;
2217 repeat:
2218 set_current_state(TASK_INTERRUPTIBLE);
2219
2220 /*
2221 * By the time the rescuer is requested to stop, the workqueue
2222 * shouldn't have any work pending, but @wq->maydays may still have
2223 * pwq(s) queued. This can happen by non-rescuer workers consuming
2224 * all the work items before the rescuer got to them. Go through
2225 * @wq->maydays processing before acting on should_stop so that the
2226 * list is always empty on exit.
2227 */
2228 should_stop = kthread_should_stop();
2229
2230 /* see whether any pwq is asking for help */
2231 spin_lock_irq(&wq_mayday_lock);
2232
2233 while (!list_empty(&wq->maydays)) {
2234 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2235 struct pool_workqueue, mayday_node);
2236 struct worker_pool *pool = pwq->pool;
2237 struct work_struct *work, *n;
2238
2239 __set_current_state(TASK_RUNNING);
2240 list_del_init(&pwq->mayday_node);
2241
2242 spin_unlock_irq(&wq_mayday_lock);
2243
2244 worker_attach_to_pool(rescuer, pool);
2245
2246 spin_lock_irq(&pool->lock);
2247 rescuer->pool = pool;
2248
2249 /*
2250 * Slurp in all works issued via this workqueue and
2251 * process'em.
2252 */
2253 WARN_ON_ONCE(!list_empty(scheduled));
2254 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2255 if (get_work_pwq(work) == pwq)
2256 move_linked_works(work, scheduled, &n);
2257
2258 if (!list_empty(scheduled)) {
2259 process_scheduled_works(rescuer);
2260
2261 /*
2262 * The above execution of rescued work items could
2263 * have created more to rescue through
2264 * pwq_activate_first_delayed() or chained
2265 * queueing. Let's put @pwq back on mayday list so
2266 * that such back-to-back work items, which may be
2267 * being used to relieve memory pressure, don't
2268 * incur MAYDAY_INTERVAL delay inbetween.
2269 */
2270 if (need_to_create_worker(pool)) {
2271 spin_lock(&wq_mayday_lock);
2272 get_pwq(pwq);
2273 list_move_tail(&pwq->mayday_node, &wq->maydays);
2274 spin_unlock(&wq_mayday_lock);
2275 }
2276 }
2277
2278 /*
2279 * Put the reference grabbed by send_mayday(). @pool won't
2280 * go away while we're still attached to it.
2281 */
2282 put_pwq(pwq);
2283
2284 /*
2285 * Leave this pool. If need_more_worker() is %true, notify a
2286 * regular worker; otherwise, we end up with 0 concurrency
2287 * and stalling the execution.
2288 */
2289 if (need_more_worker(pool))
2290 wake_up_worker(pool);
2291
2292 rescuer->pool = NULL;
2293 spin_unlock_irq(&pool->lock);
2294
2295 worker_detach_from_pool(rescuer, pool);
2296
2297 spin_lock_irq(&wq_mayday_lock);
2298 }
2299
2300 spin_unlock_irq(&wq_mayday_lock);
2301
2302 if (should_stop) {
2303 __set_current_state(TASK_RUNNING);
2304 rescuer->task->flags &= ~PF_WQ_WORKER;
2305 return 0;
2306 }
2307
2308 /* rescuers should never participate in concurrency management */
2309 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2310 schedule();
2311 goto repeat;
2312 }
2313
2314 struct wq_barrier {
2315 struct work_struct work;
2316 struct completion done;
2317 struct task_struct *task; /* purely informational */
2318 };
2319
2320 static void wq_barrier_func(struct work_struct *work)
2321 {
2322 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2323 complete(&barr->done);
2324 }
2325
2326 /**
2327 * insert_wq_barrier - insert a barrier work
2328 * @pwq: pwq to insert barrier into
2329 * @barr: wq_barrier to insert
2330 * @target: target work to attach @barr to
2331 * @worker: worker currently executing @target, NULL if @target is not executing
2332 *
2333 * @barr is linked to @target such that @barr is completed only after
2334 * @target finishes execution. Please note that the ordering
2335 * guarantee is observed only with respect to @target and on the local
2336 * cpu.
2337 *
2338 * Currently, a queued barrier can't be canceled. This is because
2339 * try_to_grab_pending() can't determine whether the work to be
2340 * grabbed is at the head of the queue and thus can't clear LINKED
2341 * flag of the previous work while there must be a valid next work
2342 * after a work with LINKED flag set.
2343 *
2344 * Note that when @worker is non-NULL, @target may be modified
2345 * underneath us, so we can't reliably determine pwq from @target.
2346 *
2347 * CONTEXT:
2348 * spin_lock_irq(pool->lock).
2349 */
2350 static void insert_wq_barrier(struct pool_workqueue *pwq,
2351 struct wq_barrier *barr,
2352 struct work_struct *target, struct worker *worker)
2353 {
2354 struct list_head *head;
2355 unsigned int linked = 0;
2356
2357 /*
2358 * debugobject calls are safe here even with pool->lock locked
2359 * as we know for sure that this will not trigger any of the
2360 * checks and call back into the fixup functions where we
2361 * might deadlock.
2362 */
2363 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2364 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2365 init_completion(&barr->done);
2366 barr->task = current;
2367
2368 /*
2369 * If @target is currently being executed, schedule the
2370 * barrier to the worker; otherwise, put it after @target.
2371 */
2372 if (worker)
2373 head = worker->scheduled.next;
2374 else {
2375 unsigned long *bits = work_data_bits(target);
2376
2377 head = target->entry.next;
2378 /* there can already be other linked works, inherit and set */
2379 linked = *bits & WORK_STRUCT_LINKED;
2380 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2381 }
2382
2383 debug_work_activate(&barr->work);
2384 insert_work(pwq, &barr->work, head,
2385 work_color_to_flags(WORK_NO_COLOR) | linked);
2386 }
2387
2388 /**
2389 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2390 * @wq: workqueue being flushed
2391 * @flush_color: new flush color, < 0 for no-op
2392 * @work_color: new work color, < 0 for no-op
2393 *
2394 * Prepare pwqs for workqueue flushing.
2395 *
2396 * If @flush_color is non-negative, flush_color on all pwqs should be
2397 * -1. If no pwq has in-flight commands at the specified color, all
2398 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2399 * has in flight commands, its pwq->flush_color is set to
2400 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2401 * wakeup logic is armed and %true is returned.
2402 *
2403 * The caller should have initialized @wq->first_flusher prior to
2404 * calling this function with non-negative @flush_color. If
2405 * @flush_color is negative, no flush color update is done and %false
2406 * is returned.
2407 *
2408 * If @work_color is non-negative, all pwqs should have the same
2409 * work_color which is previous to @work_color and all will be
2410 * advanced to @work_color.
2411 *
2412 * CONTEXT:
2413 * mutex_lock(wq->mutex).
2414 *
2415 * Return:
2416 * %true if @flush_color >= 0 and there's something to flush. %false
2417 * otherwise.
2418 */
2419 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2420 int flush_color, int work_color)
2421 {
2422 bool wait = false;
2423 struct pool_workqueue *pwq;
2424
2425 if (flush_color >= 0) {
2426 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2427 atomic_set(&wq->nr_pwqs_to_flush, 1);
2428 }
2429
2430 for_each_pwq(pwq, wq) {
2431 struct worker_pool *pool = pwq->pool;
2432
2433 spin_lock_irq(&pool->lock);
2434
2435 if (flush_color >= 0) {
2436 WARN_ON_ONCE(pwq->flush_color != -1);
2437
2438 if (pwq->nr_in_flight[flush_color]) {
2439 pwq->flush_color = flush_color;
2440 atomic_inc(&wq->nr_pwqs_to_flush);
2441 wait = true;
2442 }
2443 }
2444
2445 if (work_color >= 0) {
2446 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2447 pwq->work_color = work_color;
2448 }
2449
2450 spin_unlock_irq(&pool->lock);
2451 }
2452
2453 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2454 complete(&wq->first_flusher->done);
2455
2456 return wait;
2457 }
2458
2459 /**
2460 * flush_workqueue - ensure that any scheduled work has run to completion.
2461 * @wq: workqueue to flush
2462 *
2463 * This function sleeps until all work items which were queued on entry
2464 * have finished execution, but it is not livelocked by new incoming ones.
2465 */
2466 void flush_workqueue(struct workqueue_struct *wq)
2467 {
2468 struct wq_flusher this_flusher = {
2469 .list = LIST_HEAD_INIT(this_flusher.list),
2470 .flush_color = -1,
2471 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2472 };
2473 int next_color;
2474
2475 lock_map_acquire(&wq->lockdep_map);
2476 lock_map_release(&wq->lockdep_map);
2477
2478 mutex_lock(&wq->mutex);
2479
2480 /*
2481 * Start-to-wait phase
2482 */
2483 next_color = work_next_color(wq->work_color);
2484
2485 if (next_color != wq->flush_color) {
2486 /*
2487 * Color space is not full. The current work_color
2488 * becomes our flush_color and work_color is advanced
2489 * by one.
2490 */
2491 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2492 this_flusher.flush_color = wq->work_color;
2493 wq->work_color = next_color;
2494
2495 if (!wq->first_flusher) {
2496 /* no flush in progress, become the first flusher */
2497 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2498
2499 wq->first_flusher = &this_flusher;
2500
2501 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2502 wq->work_color)) {
2503 /* nothing to flush, done */
2504 wq->flush_color = next_color;
2505 wq->first_flusher = NULL;
2506 goto out_unlock;
2507 }
2508 } else {
2509 /* wait in queue */
2510 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2511 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2512 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2513 }
2514 } else {
2515 /*
2516 * Oops, color space is full, wait on overflow queue.
2517 * The next flush completion will assign us
2518 * flush_color and transfer to flusher_queue.
2519 */
2520 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2521 }
2522
2523 mutex_unlock(&wq->mutex);
2524
2525 wait_for_completion(&this_flusher.done);
2526
2527 /*
2528 * Wake-up-and-cascade phase
2529 *
2530 * First flushers are responsible for cascading flushes and
2531 * handling overflow. Non-first flushers can simply return.
2532 */
2533 if (wq->first_flusher != &this_flusher)
2534 return;
2535
2536 mutex_lock(&wq->mutex);
2537
2538 /* we might have raced, check again with mutex held */
2539 if (wq->first_flusher != &this_flusher)
2540 goto out_unlock;
2541
2542 wq->first_flusher = NULL;
2543
2544 WARN_ON_ONCE(!list_empty(&this_flusher.list));
2545 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2546
2547 while (true) {
2548 struct wq_flusher *next, *tmp;
2549
2550 /* complete all the flushers sharing the current flush color */
2551 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2552 if (next->flush_color != wq->flush_color)
2553 break;
2554 list_del_init(&next->list);
2555 complete(&next->done);
2556 }
2557
2558 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2559 wq->flush_color != work_next_color(wq->work_color));
2560
2561 /* this flush_color is finished, advance by one */
2562 wq->flush_color = work_next_color(wq->flush_color);
2563
2564 /* one color has been freed, handle overflow queue */
2565 if (!list_empty(&wq->flusher_overflow)) {
2566 /*
2567 * Assign the same color to all overflowed
2568 * flushers, advance work_color and append to
2569 * flusher_queue. This is the start-to-wait
2570 * phase for these overflowed flushers.
2571 */
2572 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2573 tmp->flush_color = wq->work_color;
2574
2575 wq->work_color = work_next_color(wq->work_color);
2576
2577 list_splice_tail_init(&wq->flusher_overflow,
2578 &wq->flusher_queue);
2579 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2580 }
2581
2582 if (list_empty(&wq->flusher_queue)) {
2583 WARN_ON_ONCE(wq->flush_color != wq->work_color);
2584 break;
2585 }
2586
2587 /*
2588 * Need to flush more colors. Make the next flusher
2589 * the new first flusher and arm pwqs.
2590 */
2591 WARN_ON_ONCE(wq->flush_color == wq->work_color);
2592 WARN_ON_ONCE(wq->flush_color != next->flush_color);
2593
2594 list_del_init(&next->list);
2595 wq->first_flusher = next;
2596
2597 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2598 break;
2599
2600 /*
2601 * Meh... this color is already done, clear first
2602 * flusher and repeat cascading.
2603 */
2604 wq->first_flusher = NULL;
2605 }
2606
2607 out_unlock:
2608 mutex_unlock(&wq->mutex);
2609 }
2610 EXPORT_SYMBOL_GPL(flush_workqueue);
2611
2612 /**
2613 * drain_workqueue - drain a workqueue
2614 * @wq: workqueue to drain
2615 *
2616 * Wait until the workqueue becomes empty. While draining is in progress,
2617 * only chain queueing is allowed. IOW, only currently pending or running
2618 * work items on @wq can queue further work items on it. @wq is flushed
2619 * repeatedly until it becomes empty. The number of flushing is detemined
2620 * by the depth of chaining and should be relatively short. Whine if it
2621 * takes too long.
2622 */
2623 void drain_workqueue(struct workqueue_struct *wq)
2624 {
2625 unsigned int flush_cnt = 0;
2626 struct pool_workqueue *pwq;
2627
2628 /*
2629 * __queue_work() needs to test whether there are drainers, is much
2630 * hotter than drain_workqueue() and already looks at @wq->flags.
2631 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2632 */
2633 mutex_lock(&wq->mutex);
2634 if (!wq->nr_drainers++)
2635 wq->flags |= __WQ_DRAINING;
2636 mutex_unlock(&wq->mutex);
2637 reflush:
2638 flush_workqueue(wq);
2639
2640 mutex_lock(&wq->mutex);
2641
2642 for_each_pwq(pwq, wq) {
2643 bool drained;
2644
2645 spin_lock_irq(&pwq->pool->lock);
2646 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2647 spin_unlock_irq(&pwq->pool->lock);
2648
2649 if (drained)
2650 continue;
2651
2652 if (++flush_cnt == 10 ||
2653 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2654 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2655 wq->name, flush_cnt);
2656
2657 mutex_unlock(&wq->mutex);
2658 goto reflush;
2659 }
2660
2661 if (!--wq->nr_drainers)
2662 wq->flags &= ~__WQ_DRAINING;
2663 mutex_unlock(&wq->mutex);
2664 }
2665 EXPORT_SYMBOL_GPL(drain_workqueue);
2666
2667 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2668 {
2669 struct worker *worker = NULL;
2670 struct worker_pool *pool;
2671 struct pool_workqueue *pwq;
2672
2673 might_sleep();
2674
2675 local_irq_disable();
2676 pool = get_work_pool(work);
2677 if (!pool) {
2678 local_irq_enable();
2679 return false;
2680 }
2681
2682 spin_lock(&pool->lock);
2683 /* see the comment in try_to_grab_pending() with the same code */
2684 pwq = get_work_pwq(work);
2685 if (pwq) {
2686 if (unlikely(pwq->pool != pool))
2687 goto already_gone;
2688 } else {
2689 worker = find_worker_executing_work(pool, work);
2690 if (!worker)
2691 goto already_gone;
2692 pwq = worker->current_pwq;
2693 }
2694
2695 insert_wq_barrier(pwq, barr, work, worker);
2696 spin_unlock_irq(&pool->lock);
2697
2698 /*
2699 * If @max_active is 1 or rescuer is in use, flushing another work
2700 * item on the same workqueue may lead to deadlock. Make sure the
2701 * flusher is not running on the same workqueue by verifying write
2702 * access.
2703 */
2704 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
2705 lock_map_acquire(&pwq->wq->lockdep_map);
2706 else
2707 lock_map_acquire_read(&pwq->wq->lockdep_map);
2708 lock_map_release(&pwq->wq->lockdep_map);
2709
2710 return true;
2711 already_gone:
2712 spin_unlock_irq(&pool->lock);
2713 return false;
2714 }
2715
2716 /**
2717 * flush_work - wait for a work to finish executing the last queueing instance
2718 * @work: the work to flush
2719 *
2720 * Wait until @work has finished execution. @work is guaranteed to be idle
2721 * on return if it hasn't been requeued since flush started.
2722 *
2723 * Return:
2724 * %true if flush_work() waited for the work to finish execution,
2725 * %false if it was already idle.
2726 */
2727 bool flush_work(struct work_struct *work)
2728 {
2729 struct wq_barrier barr;
2730
2731 lock_map_acquire(&work->lockdep_map);
2732 lock_map_release(&work->lockdep_map);
2733
2734 if (start_flush_work(work, &barr)) {
2735 wait_for_completion(&barr.done);
2736 destroy_work_on_stack(&barr.work);
2737 return true;
2738 } else {
2739 return false;
2740 }
2741 }
2742 EXPORT_SYMBOL_GPL(flush_work);
2743
2744 struct cwt_wait {
2745 wait_queue_t wait;
2746 struct work_struct *work;
2747 };
2748
2749 static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
2750 {
2751 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2752
2753 if (cwait->work != key)
2754 return 0;
2755 return autoremove_wake_function(wait, mode, sync, key);
2756 }
2757
2758 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2759 {
2760 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
2761 unsigned long flags;
2762 int ret;
2763
2764 do {
2765 ret = try_to_grab_pending(work, is_dwork, &flags);
2766 /*
2767 * If someone else is already canceling, wait for it to
2768 * finish. flush_work() doesn't work for PREEMPT_NONE
2769 * because we may get scheduled between @work's completion
2770 * and the other canceling task resuming and clearing
2771 * CANCELING - flush_work() will return false immediately
2772 * as @work is no longer busy, try_to_grab_pending() will
2773 * return -ENOENT as @work is still being canceled and the
2774 * other canceling task won't be able to clear CANCELING as
2775 * we're hogging the CPU.
2776 *
2777 * Let's wait for completion using a waitqueue. As this
2778 * may lead to the thundering herd problem, use a custom
2779 * wake function which matches @work along with exclusive
2780 * wait and wakeup.
2781 */
2782 if (unlikely(ret == -ENOENT)) {
2783 struct cwt_wait cwait;
2784
2785 init_wait(&cwait.wait);
2786 cwait.wait.func = cwt_wakefn;
2787 cwait.work = work;
2788
2789 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
2790 TASK_UNINTERRUPTIBLE);
2791 if (work_is_canceling(work))
2792 schedule();
2793 finish_wait(&cancel_waitq, &cwait.wait);
2794 }
2795 } while (unlikely(ret < 0));
2796
2797 /* tell other tasks trying to grab @work to back off */
2798 mark_work_canceling(work);
2799 local_irq_restore(flags);
2800
2801 flush_work(work);
2802 clear_work_data(work);
2803
2804 /*
2805 * Paired with prepare_to_wait() above so that either
2806 * waitqueue_active() is visible here or !work_is_canceling() is
2807 * visible there.
2808 */
2809 smp_mb();
2810 if (waitqueue_active(&cancel_waitq))
2811 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
2812
2813 return ret;
2814 }
2815
2816 /**
2817 * cancel_work_sync - cancel a work and wait for it to finish
2818 * @work: the work to cancel
2819 *
2820 * Cancel @work and wait for its execution to finish. This function
2821 * can be used even if the work re-queues itself or migrates to
2822 * another workqueue. On return from this function, @work is
2823 * guaranteed to be not pending or executing on any CPU.
2824 *
2825 * cancel_work_sync(&delayed_work->work) must not be used for
2826 * delayed_work's. Use cancel_delayed_work_sync() instead.
2827 *
2828 * The caller must ensure that the workqueue on which @work was last
2829 * queued can't be destroyed before this function returns.
2830 *
2831 * Return:
2832 * %true if @work was pending, %false otherwise.
2833 */
2834 bool cancel_work_sync(struct work_struct *work)
2835 {
2836 return __cancel_work_timer(work, false);
2837 }
2838 EXPORT_SYMBOL_GPL(cancel_work_sync);
2839
2840 /**
2841 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2842 * @dwork: the delayed work to flush
2843 *
2844 * Delayed timer is cancelled and the pending work is queued for
2845 * immediate execution. Like flush_work(), this function only
2846 * considers the last queueing instance of @dwork.
2847 *
2848 * Return:
2849 * %true if flush_work() waited for the work to finish execution,
2850 * %false if it was already idle.
2851 */
2852 bool flush_delayed_work(struct delayed_work *dwork)
2853 {
2854 local_irq_disable();
2855 if (del_timer_sync(&dwork->timer))
2856 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2857 local_irq_enable();
2858 return flush_work(&dwork->work);
2859 }
2860 EXPORT_SYMBOL(flush_delayed_work);
2861
2862 /**
2863 * cancel_delayed_work - cancel a delayed work
2864 * @dwork: delayed_work to cancel
2865 *
2866 * Kill off a pending delayed_work.
2867 *
2868 * Return: %true if @dwork was pending and canceled; %false if it wasn't
2869 * pending.
2870 *
2871 * Note:
2872 * The work callback function may still be running on return, unless
2873 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
2874 * use cancel_delayed_work_sync() to wait on it.
2875 *
2876 * This function is safe to call from any context including IRQ handler.
2877 */
2878 bool cancel_delayed_work(struct delayed_work *dwork)
2879 {
2880 unsigned long flags;
2881 int ret;
2882
2883 do {
2884 ret = try_to_grab_pending(&dwork->work, true, &flags);
2885 } while (unlikely(ret == -EAGAIN));
2886
2887 if (unlikely(ret < 0))
2888 return false;
2889
2890 set_work_pool_and_clear_pending(&dwork->work,
2891 get_work_pool_id(&dwork->work));
2892 local_irq_restore(flags);
2893 return ret;
2894 }
2895 EXPORT_SYMBOL(cancel_delayed_work);
2896
2897 /**
2898 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2899 * @dwork: the delayed work cancel
2900 *
2901 * This is cancel_work_sync() for delayed works.
2902 *
2903 * Return:
2904 * %true if @dwork was pending, %false otherwise.
2905 */
2906 bool cancel_delayed_work_sync(struct delayed_work *dwork)
2907 {
2908 return __cancel_work_timer(&dwork->work, true);
2909 }
2910 EXPORT_SYMBOL(cancel_delayed_work_sync);
2911
2912 /**
2913 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2914 * @func: the function to call
2915 *
2916 * schedule_on_each_cpu() executes @func on each online CPU using the
2917 * system workqueue and blocks until all CPUs have completed.
2918 * schedule_on_each_cpu() is very slow.
2919 *
2920 * Return:
2921 * 0 on success, -errno on failure.
2922 */
2923 int schedule_on_each_cpu(work_func_t func)
2924 {
2925 int cpu;
2926 struct work_struct __percpu *works;
2927
2928 works = alloc_percpu(struct work_struct);
2929 if (!works)
2930 return -ENOMEM;
2931
2932 get_online_cpus();
2933
2934 for_each_online_cpu(cpu) {
2935 struct work_struct *work = per_cpu_ptr(works, cpu);
2936
2937 INIT_WORK(work, func);
2938 schedule_work_on(cpu, work);
2939 }
2940
2941 for_each_online_cpu(cpu)
2942 flush_work(per_cpu_ptr(works, cpu));
2943
2944 put_online_cpus();
2945 free_percpu(works);
2946 return 0;
2947 }
2948
2949 /**
2950 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2951 *
2952 * Forces execution of the kernel-global workqueue and blocks until its
2953 * completion.
2954 *
2955 * Think twice before calling this function! It's very easy to get into
2956 * trouble if you don't take great care. Either of the following situations
2957 * will lead to deadlock:
2958 *
2959 * One of the work items currently on the workqueue needs to acquire
2960 * a lock held by your code or its caller.
2961 *
2962 * Your code is running in the context of a work routine.
2963 *
2964 * They will be detected by lockdep when they occur, but the first might not
2965 * occur very often. It depends on what work items are on the workqueue and
2966 * what locks they need, which you have no control over.
2967 *
2968 * In most situations flushing the entire workqueue is overkill; you merely
2969 * need to know that a particular work item isn't queued and isn't running.
2970 * In such cases you should use cancel_delayed_work_sync() or
2971 * cancel_work_sync() instead.
2972 */
2973 void flush_scheduled_work(void)
2974 {
2975 flush_workqueue(system_wq);
2976 }
2977 EXPORT_SYMBOL(flush_scheduled_work);
2978
2979 /**
2980 * execute_in_process_context - reliably execute the routine with user context
2981 * @fn: the function to execute
2982 * @ew: guaranteed storage for the execute work structure (must
2983 * be available when the work executes)
2984 *
2985 * Executes the function immediately if process context is available,
2986 * otherwise schedules the function for delayed execution.
2987 *
2988 * Return: 0 - function was executed
2989 * 1 - function was scheduled for execution
2990 */
2991 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2992 {
2993 if (!in_interrupt()) {
2994 fn(&ew->work);
2995 return 0;
2996 }
2997
2998 INIT_WORK(&ew->work, fn);
2999 schedule_work(&ew->work);
3000
3001 return 1;
3002 }
3003 EXPORT_SYMBOL_GPL(execute_in_process_context);
3004
3005 /**
3006 * free_workqueue_attrs - free a workqueue_attrs
3007 * @attrs: workqueue_attrs to free
3008 *
3009 * Undo alloc_workqueue_attrs().
3010 */
3011 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3012 {
3013 if (attrs) {
3014 free_cpumask_var(attrs->cpumask);
3015 kfree(attrs);
3016 }
3017 }
3018
3019 /**
3020 * alloc_workqueue_attrs - allocate a workqueue_attrs
3021 * @gfp_mask: allocation mask to use
3022 *
3023 * Allocate a new workqueue_attrs, initialize with default settings and
3024 * return it.
3025 *
3026 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3027 */
3028 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3029 {
3030 struct workqueue_attrs *attrs;
3031
3032 attrs = kzalloc(sizeof(*attrs), gfp_mask);
3033 if (!attrs)
3034 goto fail;
3035 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3036 goto fail;
3037
3038 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3039 return attrs;
3040 fail:
3041 free_workqueue_attrs(attrs);
3042 return NULL;
3043 }
3044
3045 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3046 const struct workqueue_attrs *from)
3047 {
3048 to->nice = from->nice;
3049 cpumask_copy(to->cpumask, from->cpumask);
3050 /*
3051 * Unlike hash and equality test, this function doesn't ignore
3052 * ->no_numa as it is used for both pool and wq attrs. Instead,
3053 * get_unbound_pool() explicitly clears ->no_numa after copying.
3054 */
3055 to->no_numa = from->no_numa;
3056 }
3057
3058 /* hash value of the content of @attr */
3059 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3060 {
3061 u32 hash = 0;
3062
3063 hash = jhash_1word(attrs->nice, hash);
3064 hash = jhash(cpumask_bits(attrs->cpumask),
3065 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3066 return hash;
3067 }
3068
3069 /* content equality test */
3070 static bool wqattrs_equal(const struct workqueue_attrs *a,
3071 const struct workqueue_attrs *b)
3072 {
3073 if (a->nice != b->nice)
3074 return false;
3075 if (!cpumask_equal(a->cpumask, b->cpumask))
3076 return false;
3077 return true;
3078 }
3079
3080 /**
3081 * init_worker_pool - initialize a newly zalloc'd worker_pool
3082 * @pool: worker_pool to initialize
3083 *
3084 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs.
3085 *
3086 * Return: 0 on success, -errno on failure. Even on failure, all fields
3087 * inside @pool proper are initialized and put_unbound_pool() can be called
3088 * on @pool safely to release it.
3089 */
3090 static int init_worker_pool(struct worker_pool *pool)
3091 {
3092 spin_lock_init(&pool->lock);
3093 pool->id = -1;
3094 pool->cpu = -1;
3095 pool->node = NUMA_NO_NODE;
3096 pool->flags |= POOL_DISASSOCIATED;
3097 INIT_LIST_HEAD(&pool->worklist);
3098 INIT_LIST_HEAD(&pool->idle_list);
3099 hash_init(pool->busy_hash);
3100
3101 init_timer_deferrable(&pool->idle_timer);
3102 pool->idle_timer.function = idle_worker_timeout;
3103 pool->idle_timer.data = (unsigned long)pool;
3104
3105 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3106 (unsigned long)pool);
3107
3108 mutex_init(&pool->manager_arb);
3109 mutex_init(&pool->attach_mutex);
3110 INIT_LIST_HEAD(&pool->workers);
3111
3112 ida_init(&pool->worker_ida);
3113 INIT_HLIST_NODE(&pool->hash_node);
3114 pool->refcnt = 1;
3115
3116 /* shouldn't fail above this point */
3117 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3118 if (!pool->attrs)
3119 return -ENOMEM;
3120 return 0;
3121 }
3122
3123 static void rcu_free_wq(struct rcu_head *rcu)
3124 {
3125 struct workqueue_struct *wq =
3126 container_of(rcu, struct workqueue_struct, rcu);
3127
3128 if (!(wq->flags & WQ_UNBOUND))
3129 free_percpu(wq->cpu_pwqs);
3130 else
3131 free_workqueue_attrs(wq->unbound_attrs);
3132
3133 kfree(wq->rescuer);
3134 kfree(wq);
3135 }
3136
3137 static void rcu_free_pool(struct rcu_head *rcu)
3138 {
3139 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3140
3141 ida_destroy(&pool->worker_ida);
3142 free_workqueue_attrs(pool->attrs);
3143 kfree(pool);
3144 }
3145
3146 /**
3147 * put_unbound_pool - put a worker_pool
3148 * @pool: worker_pool to put
3149 *
3150 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
3151 * safe manner. get_unbound_pool() calls this function on its failure path
3152 * and this function should be able to release pools which went through,
3153 * successfully or not, init_worker_pool().
3154 *
3155 * Should be called with wq_pool_mutex held.
3156 */
3157 static void put_unbound_pool(struct worker_pool *pool)
3158 {
3159 DECLARE_COMPLETION_ONSTACK(detach_completion);
3160 struct worker *worker;
3161
3162 lockdep_assert_held(&wq_pool_mutex);
3163
3164 if (--pool->refcnt)
3165 return;
3166
3167 /* sanity checks */
3168 if (WARN_ON(!(pool->cpu < 0)) ||
3169 WARN_ON(!list_empty(&pool->worklist)))
3170 return;
3171
3172 /* release id and unhash */
3173 if (pool->id >= 0)
3174 idr_remove(&worker_pool_idr, pool->id);
3175 hash_del(&pool->hash_node);
3176
3177 /*
3178 * Become the manager and destroy all workers. Grabbing
3179 * manager_arb prevents @pool's workers from blocking on
3180 * attach_mutex.
3181 */
3182 mutex_lock(&pool->manager_arb);
3183
3184 spin_lock_irq(&pool->lock);
3185 while ((worker = first_idle_worker(pool)))
3186 destroy_worker(worker);
3187 WARN_ON(pool->nr_workers || pool->nr_idle);
3188 spin_unlock_irq(&pool->lock);
3189
3190 mutex_lock(&pool->attach_mutex);
3191 if (!list_empty(&pool->workers))
3192 pool->detach_completion = &detach_completion;
3193 mutex_unlock(&pool->attach_mutex);
3194
3195 if (pool->detach_completion)
3196 wait_for_completion(pool->detach_completion);
3197
3198 mutex_unlock(&pool->manager_arb);
3199
3200 /* shut down the timers */
3201 del_timer_sync(&pool->idle_timer);
3202 del_timer_sync(&pool->mayday_timer);
3203
3204 /* sched-RCU protected to allow dereferences from get_work_pool() */
3205 call_rcu_sched(&pool->rcu, rcu_free_pool);
3206 }
3207
3208 /**
3209 * get_unbound_pool - get a worker_pool with the specified attributes
3210 * @attrs: the attributes of the worker_pool to get
3211 *
3212 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3213 * reference count and return it. If there already is a matching
3214 * worker_pool, it will be used; otherwise, this function attempts to
3215 * create a new one.
3216 *
3217 * Should be called with wq_pool_mutex held.
3218 *
3219 * Return: On success, a worker_pool with the same attributes as @attrs.
3220 * On failure, %NULL.
3221 */
3222 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3223 {
3224 u32 hash = wqattrs_hash(attrs);
3225 struct worker_pool *pool;
3226 int node;
3227
3228 lockdep_assert_held(&wq_pool_mutex);
3229
3230 /* do we already have a matching pool? */
3231 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3232 if (wqattrs_equal(pool->attrs, attrs)) {
3233 pool->refcnt++;
3234 return pool;
3235 }
3236 }
3237
3238 /* nope, create a new one */
3239 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
3240 if (!pool || init_worker_pool(pool) < 0)
3241 goto fail;
3242
3243 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3244 copy_workqueue_attrs(pool->attrs, attrs);
3245
3246 /*
3247 * no_numa isn't a worker_pool attribute, always clear it. See
3248 * 'struct workqueue_attrs' comments for detail.
3249 */
3250 pool->attrs->no_numa = false;
3251
3252 /* if cpumask is contained inside a NUMA node, we belong to that node */
3253 if (wq_numa_enabled) {
3254 for_each_node(node) {
3255 if (cpumask_subset(pool->attrs->cpumask,
3256 wq_numa_possible_cpumask[node])) {
3257 pool->node = node;
3258 break;
3259 }
3260 }
3261 }
3262
3263 if (worker_pool_assign_id(pool) < 0)
3264 goto fail;
3265
3266 /* create and start the initial worker */
3267 if (!create_worker(pool))
3268 goto fail;
3269
3270 /* install */
3271 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3272
3273 return pool;
3274 fail:
3275 if (pool)
3276 put_unbound_pool(pool);
3277 return NULL;
3278 }
3279
3280 static void rcu_free_pwq(struct rcu_head *rcu)
3281 {
3282 kmem_cache_free(pwq_cache,
3283 container_of(rcu, struct pool_workqueue, rcu));
3284 }
3285
3286 /*
3287 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3288 * and needs to be destroyed.
3289 */
3290 static void pwq_unbound_release_workfn(struct work_struct *work)
3291 {
3292 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3293 unbound_release_work);
3294 struct workqueue_struct *wq = pwq->wq;
3295 struct worker_pool *pool = pwq->pool;
3296 bool is_last;
3297
3298 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3299 return;
3300
3301 mutex_lock(&wq->mutex);
3302 list_del_rcu(&pwq->pwqs_node);
3303 is_last = list_empty(&wq->pwqs);
3304 mutex_unlock(&wq->mutex);
3305
3306 mutex_lock(&wq_pool_mutex);
3307 put_unbound_pool(pool);
3308 mutex_unlock(&wq_pool_mutex);
3309
3310 call_rcu_sched(&pwq->rcu, rcu_free_pwq);
3311
3312 /*
3313 * If we're the last pwq going away, @wq is already dead and no one
3314 * is gonna access it anymore. Schedule RCU free.
3315 */
3316 if (is_last)
3317 call_rcu_sched(&wq->rcu, rcu_free_wq);
3318 }
3319
3320 /**
3321 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3322 * @pwq: target pool_workqueue
3323 *
3324 * If @pwq isn't freezing, set @pwq->max_active to the associated
3325 * workqueue's saved_max_active and activate delayed work items
3326 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3327 */
3328 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3329 {
3330 struct workqueue_struct *wq = pwq->wq;
3331 bool freezable = wq->flags & WQ_FREEZABLE;
3332
3333 /* for @wq->saved_max_active */
3334 lockdep_assert_held(&wq->mutex);
3335
3336 /* fast exit for non-freezable wqs */
3337 if (!freezable && pwq->max_active == wq->saved_max_active)
3338 return;
3339
3340 spin_lock_irq(&pwq->pool->lock);
3341
3342 /*
3343 * During [un]freezing, the caller is responsible for ensuring that
3344 * this function is called at least once after @workqueue_freezing
3345 * is updated and visible.
3346 */
3347 if (!freezable || !workqueue_freezing) {
3348 pwq->max_active = wq->saved_max_active;
3349
3350 while (!list_empty(&pwq->delayed_works) &&
3351 pwq->nr_active < pwq->max_active)
3352 pwq_activate_first_delayed(pwq);
3353
3354 /*
3355 * Need to kick a worker after thawed or an unbound wq's
3356 * max_active is bumped. It's a slow path. Do it always.
3357 */
3358 wake_up_worker(pwq->pool);
3359 } else {
3360 pwq->max_active = 0;
3361 }
3362
3363 spin_unlock_irq(&pwq->pool->lock);
3364 }
3365
3366 /* initialize newly alloced @pwq which is associated with @wq and @pool */
3367 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3368 struct worker_pool *pool)
3369 {
3370 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3371
3372 memset(pwq, 0, sizeof(*pwq));
3373
3374 pwq->pool = pool;
3375 pwq->wq = wq;
3376 pwq->flush_color = -1;
3377 pwq->refcnt = 1;
3378 INIT_LIST_HEAD(&pwq->delayed_works);
3379 INIT_LIST_HEAD(&pwq->pwqs_node);
3380 INIT_LIST_HEAD(&pwq->mayday_node);
3381 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3382 }
3383
3384 /* sync @pwq with the current state of its associated wq and link it */
3385 static void link_pwq(struct pool_workqueue *pwq)
3386 {
3387 struct workqueue_struct *wq = pwq->wq;
3388
3389 lockdep_assert_held(&wq->mutex);
3390
3391 /* may be called multiple times, ignore if already linked */
3392 if (!list_empty(&pwq->pwqs_node))
3393 return;
3394
3395 /* set the matching work_color */
3396 pwq->work_color = wq->work_color;
3397
3398 /* sync max_active to the current setting */
3399 pwq_adjust_max_active(pwq);
3400
3401 /* link in @pwq */
3402 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3403 }
3404
3405 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3406 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3407 const struct workqueue_attrs *attrs)
3408 {
3409 struct worker_pool *pool;
3410 struct pool_workqueue *pwq;
3411
3412 lockdep_assert_held(&wq_pool_mutex);
3413
3414 pool = get_unbound_pool(attrs);
3415 if (!pool)
3416 return NULL;
3417
3418 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3419 if (!pwq) {
3420 put_unbound_pool(pool);
3421 return NULL;
3422 }
3423
3424 init_pwq(pwq, wq, pool);
3425 return pwq;
3426 }
3427
3428 /* undo alloc_unbound_pwq(), used only in the error path */
3429 static void free_unbound_pwq(struct pool_workqueue *pwq)
3430 {
3431 lockdep_assert_held(&wq_pool_mutex);
3432
3433 if (pwq) {
3434 put_unbound_pool(pwq->pool);
3435 kmem_cache_free(pwq_cache, pwq);
3436 }
3437 }
3438
3439 /**
3440 * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node
3441 * @attrs: the wq_attrs of interest
3442 * @node: the target NUMA node
3443 * @cpu_going_down: if >= 0, the CPU to consider as offline
3444 * @cpumask: outarg, the resulting cpumask
3445 *
3446 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3447 * @cpu_going_down is >= 0, that cpu is considered offline during
3448 * calculation. The result is stored in @cpumask.
3449 *
3450 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
3451 * enabled and @node has online CPUs requested by @attrs, the returned
3452 * cpumask is the intersection of the possible CPUs of @node and
3453 * @attrs->cpumask.
3454 *
3455 * The caller is responsible for ensuring that the cpumask of @node stays
3456 * stable.
3457 *
3458 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3459 * %false if equal.
3460 */
3461 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3462 int cpu_going_down, cpumask_t *cpumask)
3463 {
3464 if (!wq_numa_enabled || attrs->no_numa)
3465 goto use_dfl;
3466
3467 /* does @node have any online CPUs @attrs wants? */
3468 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3469 if (cpu_going_down >= 0)
3470 cpumask_clear_cpu(cpu_going_down, cpumask);
3471
3472 if (cpumask_empty(cpumask))
3473 goto use_dfl;
3474
3475 /* yeap, return possible CPUs in @node that @attrs wants */
3476 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3477 return !cpumask_equal(cpumask, attrs->cpumask);
3478
3479 use_dfl:
3480 cpumask_copy(cpumask, attrs->cpumask);
3481 return false;
3482 }
3483
3484 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3485 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3486 int node,
3487 struct pool_workqueue *pwq)
3488 {
3489 struct pool_workqueue *old_pwq;
3490
3491 lockdep_assert_held(&wq->mutex);
3492
3493 /* link_pwq() can handle duplicate calls */
3494 link_pwq(pwq);
3495
3496 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3497 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3498 return old_pwq;
3499 }
3500
3501 /**
3502 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3503 * @wq: the target workqueue
3504 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3505 *
3506 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
3507 * machines, this function maps a separate pwq to each NUMA node with
3508 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3509 * NUMA node it was issued on. Older pwqs are released as in-flight work
3510 * items finish. Note that a work item which repeatedly requeues itself
3511 * back-to-back will stay on its current pwq.
3512 *
3513 * Performs GFP_KERNEL allocations.
3514 *
3515 * Return: 0 on success and -errno on failure.
3516 */
3517 int apply_workqueue_attrs(struct workqueue_struct *wq,
3518 const struct workqueue_attrs *attrs)
3519 {
3520 struct workqueue_attrs *new_attrs, *tmp_attrs;
3521 struct pool_workqueue **pwq_tbl, *dfl_pwq;
3522 int node, ret;
3523
3524 /* only unbound workqueues can change attributes */
3525 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3526 return -EINVAL;
3527
3528 /* creating multiple pwqs breaks ordering guarantee */
3529 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
3530 return -EINVAL;
3531
3532 pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
3533 new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3534 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3535 if (!pwq_tbl || !new_attrs || !tmp_attrs)
3536 goto enomem;
3537
3538 /* make a copy of @attrs and sanitize it */
3539 copy_workqueue_attrs(new_attrs, attrs);
3540 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3541
3542 /*
3543 * We may create multiple pwqs with differing cpumasks. Make a
3544 * copy of @new_attrs which will be modified and used to obtain
3545 * pools.
3546 */
3547 copy_workqueue_attrs(tmp_attrs, new_attrs);
3548
3549 /*
3550 * CPUs should stay stable across pwq creations and installations.
3551 * Pin CPUs, determine the target cpumask for each node and create
3552 * pwqs accordingly.
3553 */
3554 get_online_cpus();
3555
3556 mutex_lock(&wq_pool_mutex);
3557
3558 /*
3559 * If something goes wrong during CPU up/down, we'll fall back to
3560 * the default pwq covering whole @attrs->cpumask. Always create
3561 * it even if we don't use it immediately.
3562 */
3563 dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3564 if (!dfl_pwq)
3565 goto enomem_pwq;
3566
3567 for_each_node(node) {
3568 if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) {
3569 pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3570 if (!pwq_tbl[node])
3571 goto enomem_pwq;
3572 } else {
3573 dfl_pwq->refcnt++;
3574 pwq_tbl[node] = dfl_pwq;
3575 }
3576 }
3577
3578 mutex_unlock(&wq_pool_mutex);
3579
3580 /* all pwqs have been created successfully, let's install'em */
3581 mutex_lock(&wq->mutex);
3582
3583 copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
3584
3585 /* save the previous pwq and install the new one */
3586 for_each_node(node)
3587 pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
3588
3589 /* @dfl_pwq might not have been used, ensure it's linked */
3590 link_pwq(dfl_pwq);
3591 swap(wq->dfl_pwq, dfl_pwq);
3592
3593 mutex_unlock(&wq->mutex);
3594
3595 /* put the old pwqs */
3596 for_each_node(node)
3597 put_pwq_unlocked(pwq_tbl[node]);
3598 put_pwq_unlocked(dfl_pwq);
3599
3600 put_online_cpus();
3601 ret = 0;
3602 /* fall through */
3603 out_free:
3604 free_workqueue_attrs(tmp_attrs);
3605 free_workqueue_attrs(new_attrs);
3606 kfree(pwq_tbl);
3607 return ret;
3608
3609 enomem_pwq:
3610 free_unbound_pwq(dfl_pwq);
3611 for_each_node(node)
3612 if (pwq_tbl && pwq_tbl[node] != dfl_pwq)
3613 free_unbound_pwq(pwq_tbl[node]);
3614 mutex_unlock(&wq_pool_mutex);
3615 put_online_cpus();
3616 enomem:
3617 ret = -ENOMEM;
3618 goto out_free;
3619 }
3620
3621 /**
3622 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3623 * @wq: the target workqueue
3624 * @cpu: the CPU coming up or going down
3625 * @online: whether @cpu is coming up or going down
3626 *
3627 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
3628 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
3629 * @wq accordingly.
3630 *
3631 * If NUMA affinity can't be adjusted due to memory allocation failure, it
3632 * falls back to @wq->dfl_pwq which may not be optimal but is always
3633 * correct.
3634 *
3635 * Note that when the last allowed CPU of a NUMA node goes offline for a
3636 * workqueue with a cpumask spanning multiple nodes, the workers which were
3637 * already executing the work items for the workqueue will lose their CPU
3638 * affinity and may execute on any CPU. This is similar to how per-cpu
3639 * workqueues behave on CPU_DOWN. If a workqueue user wants strict
3640 * affinity, it's the user's responsibility to flush the work item from
3641 * CPU_DOWN_PREPARE.
3642 */
3643 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3644 bool online)
3645 {
3646 int node = cpu_to_node(cpu);
3647 int cpu_off = online ? -1 : cpu;
3648 struct pool_workqueue *old_pwq = NULL, *pwq;
3649 struct workqueue_attrs *target_attrs;
3650 cpumask_t *cpumask;
3651
3652 lockdep_assert_held(&wq_pool_mutex);
3653
3654 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND))
3655 return;
3656
3657 /*
3658 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
3659 * Let's use a preallocated one. The following buf is protected by
3660 * CPU hotplug exclusion.
3661 */
3662 target_attrs = wq_update_unbound_numa_attrs_buf;
3663 cpumask = target_attrs->cpumask;
3664
3665 mutex_lock(&wq->mutex);
3666 if (wq->unbound_attrs->no_numa)
3667 goto out_unlock;
3668
3669 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
3670 pwq = unbound_pwq_by_node(wq, node);
3671
3672 /*
3673 * Let's determine what needs to be done. If the target cpumask is
3674 * different from wq's, we need to compare it to @pwq's and create
3675 * a new one if they don't match. If the target cpumask equals
3676 * wq's, the default pwq should be used.
3677 */
3678 if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) {
3679 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
3680 goto out_unlock;
3681 } else {
3682 goto use_dfl_pwq;
3683 }
3684
3685 mutex_unlock(&wq->mutex);
3686
3687 /* create a new pwq */
3688 pwq = alloc_unbound_pwq(wq, target_attrs);
3689 if (!pwq) {
3690 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
3691 wq->name);
3692 mutex_lock(&wq->mutex);
3693 goto use_dfl_pwq;
3694 }
3695
3696 /*
3697 * Install the new pwq. As this function is called only from CPU
3698 * hotplug callbacks and applying a new attrs is wrapped with
3699 * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed
3700 * inbetween.
3701 */
3702 mutex_lock(&wq->mutex);
3703 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
3704 goto out_unlock;
3705
3706 use_dfl_pwq:
3707 spin_lock_irq(&wq->dfl_pwq->pool->lock);
3708 get_pwq(wq->dfl_pwq);
3709 spin_unlock_irq(&wq->dfl_pwq->pool->lock);
3710 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
3711 out_unlock:
3712 mutex_unlock(&wq->mutex);
3713 put_pwq_unlocked(old_pwq);
3714 }
3715
3716 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3717 {
3718 bool highpri = wq->flags & WQ_HIGHPRI;
3719 int cpu, ret;
3720
3721 if (!(wq->flags & WQ_UNBOUND)) {
3722 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
3723 if (!wq->cpu_pwqs)
3724 return -ENOMEM;
3725
3726 for_each_possible_cpu(cpu) {
3727 struct pool_workqueue *pwq =
3728 per_cpu_ptr(wq->cpu_pwqs, cpu);
3729 struct worker_pool *cpu_pools =
3730 per_cpu(cpu_worker_pools, cpu);
3731
3732 init_pwq(pwq, wq, &cpu_pools[highpri]);
3733
3734 mutex_lock(&wq->mutex);
3735 link_pwq(pwq);
3736 mutex_unlock(&wq->mutex);
3737 }
3738 return 0;
3739 } else if (wq->flags & __WQ_ORDERED) {
3740 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
3741 /* there should only be single pwq for ordering guarantee */
3742 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
3743 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
3744 "ordering guarantee broken for workqueue %s\n", wq->name);
3745 return ret;
3746 } else {
3747 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
3748 }
3749 }
3750
3751 static int wq_clamp_max_active(int max_active, unsigned int flags,
3752 const char *name)
3753 {
3754 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3755
3756 if (max_active < 1 || max_active > lim)
3757 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
3758 max_active, name, 1, lim);
3759
3760 return clamp_val(max_active, 1, lim);
3761 }
3762
3763 struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3764 unsigned int flags,
3765 int max_active,
3766 struct lock_class_key *key,
3767 const char *lock_name, ...)
3768 {
3769 size_t tbl_size = 0;
3770 va_list args;
3771 struct workqueue_struct *wq;
3772 struct pool_workqueue *pwq;
3773
3774 /* see the comment above the definition of WQ_POWER_EFFICIENT */
3775 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
3776 flags |= WQ_UNBOUND;
3777
3778 /* allocate wq and format name */
3779 if (flags & WQ_UNBOUND)
3780 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
3781
3782 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
3783 if (!wq)
3784 return NULL;
3785
3786 if (flags & WQ_UNBOUND) {
3787 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3788 if (!wq->unbound_attrs)
3789 goto err_free_wq;
3790 }
3791
3792 va_start(args, lock_name);
3793 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
3794 va_end(args);
3795
3796 max_active = max_active ?: WQ_DFL_ACTIVE;
3797 max_active = wq_clamp_max_active(max_active, flags, wq->name);
3798
3799 /* init wq */
3800 wq->flags = flags;
3801 wq->saved_max_active = max_active;
3802 mutex_init(&wq->mutex);
3803 atomic_set(&wq->nr_pwqs_to_flush, 0);
3804 INIT_LIST_HEAD(&wq->pwqs);
3805 INIT_LIST_HEAD(&wq->flusher_queue);
3806 INIT_LIST_HEAD(&wq->flusher_overflow);
3807 INIT_LIST_HEAD(&wq->maydays);
3808
3809 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3810 INIT_LIST_HEAD(&wq->list);
3811
3812 if (alloc_and_link_pwqs(wq) < 0)
3813 goto err_free_wq;
3814
3815 /*
3816 * Workqueues which may be used during memory reclaim should
3817 * have a rescuer to guarantee forward progress.
3818 */
3819 if (flags & WQ_MEM_RECLAIM) {
3820 struct worker *rescuer;
3821
3822 rescuer = alloc_worker(NUMA_NO_NODE);
3823 if (!rescuer)
3824 goto err_destroy;
3825
3826 rescuer->rescue_wq = wq;
3827 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
3828 wq->name);
3829 if (IS_ERR(rescuer->task)) {
3830 kfree(rescuer);
3831 goto err_destroy;
3832 }
3833
3834 wq->rescuer = rescuer;
3835 rescuer->task->flags |= PF_NO_SETAFFINITY;
3836 wake_up_process(rescuer->task);
3837 }
3838
3839 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
3840 goto err_destroy;
3841
3842 /*
3843 * wq_pool_mutex protects global freeze state and workqueues list.
3844 * Grab it, adjust max_active and add the new @wq to workqueues
3845 * list.
3846 */
3847 mutex_lock(&wq_pool_mutex);
3848
3849 mutex_lock(&wq->mutex);
3850 for_each_pwq(pwq, wq)
3851 pwq_adjust_max_active(pwq);
3852 mutex_unlock(&wq->mutex);
3853
3854 list_add_tail_rcu(&wq->list, &workqueues);
3855
3856 mutex_unlock(&wq_pool_mutex);
3857
3858 return wq;
3859
3860 err_free_wq:
3861 free_workqueue_attrs(wq->unbound_attrs);
3862 kfree(wq);
3863 return NULL;
3864 err_destroy:
3865 destroy_workqueue(wq);
3866 return NULL;
3867 }
3868 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3869
3870 /**
3871 * destroy_workqueue - safely terminate a workqueue
3872 * @wq: target workqueue
3873 *
3874 * Safely destroy a workqueue. All work currently pending will be done first.
3875 */
3876 void destroy_workqueue(struct workqueue_struct *wq)
3877 {
3878 struct pool_workqueue *pwq;
3879 int node;
3880
3881 /* drain it before proceeding with destruction */
3882 drain_workqueue(wq);
3883
3884 /* sanity checks */
3885 mutex_lock(&wq->mutex);
3886 for_each_pwq(pwq, wq) {
3887 int i;
3888
3889 for (i = 0; i < WORK_NR_COLORS; i++) {
3890 if (WARN_ON(pwq->nr_in_flight[i])) {
3891 mutex_unlock(&wq->mutex);
3892 return;
3893 }
3894 }
3895
3896 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
3897 WARN_ON(pwq->nr_active) ||
3898 WARN_ON(!list_empty(&pwq->delayed_works))) {
3899 mutex_unlock(&wq->mutex);
3900 return;
3901 }
3902 }
3903 mutex_unlock(&wq->mutex);
3904
3905 /*
3906 * wq list is used to freeze wq, remove from list after
3907 * flushing is complete in case freeze races us.
3908 */
3909 mutex_lock(&wq_pool_mutex);
3910 list_del_rcu(&wq->list);
3911 mutex_unlock(&wq_pool_mutex);
3912
3913 workqueue_sysfs_unregister(wq);
3914
3915 if (wq->rescuer)
3916 kthread_stop(wq->rescuer->task);
3917
3918 if (!(wq->flags & WQ_UNBOUND)) {
3919 /*
3920 * The base ref is never dropped on per-cpu pwqs. Directly
3921 * schedule RCU free.
3922 */
3923 call_rcu_sched(&wq->rcu, rcu_free_wq);
3924 } else {
3925 /*
3926 * We're the sole accessor of @wq at this point. Directly
3927 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
3928 * @wq will be freed when the last pwq is released.
3929 */
3930 for_each_node(node) {
3931 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3932 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
3933 put_pwq_unlocked(pwq);
3934 }
3935
3936 /*
3937 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
3938 * put. Don't access it afterwards.
3939 */
3940 pwq = wq->dfl_pwq;
3941 wq->dfl_pwq = NULL;
3942 put_pwq_unlocked(pwq);
3943 }
3944 }
3945 EXPORT_SYMBOL_GPL(destroy_workqueue);
3946
3947 /**
3948 * workqueue_set_max_active - adjust max_active of a workqueue
3949 * @wq: target workqueue
3950 * @max_active: new max_active value.
3951 *
3952 * Set max_active of @wq to @max_active.
3953 *
3954 * CONTEXT:
3955 * Don't call from IRQ context.
3956 */
3957 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3958 {
3959 struct pool_workqueue *pwq;
3960
3961 /* disallow meddling with max_active for ordered workqueues */
3962 if (WARN_ON(wq->flags & __WQ_ORDERED))
3963 return;
3964
3965 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3966
3967 mutex_lock(&wq->mutex);
3968
3969 wq->saved_max_active = max_active;
3970
3971 for_each_pwq(pwq, wq)
3972 pwq_adjust_max_active(pwq);
3973
3974 mutex_unlock(&wq->mutex);
3975 }
3976 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3977
3978 /**
3979 * current_is_workqueue_rescuer - is %current workqueue rescuer?
3980 *
3981 * Determine whether %current is a workqueue rescuer. Can be used from
3982 * work functions to determine whether it's being run off the rescuer task.
3983 *
3984 * Return: %true if %current is a workqueue rescuer. %false otherwise.
3985 */
3986 bool current_is_workqueue_rescuer(void)
3987 {
3988 struct worker *worker = current_wq_worker();
3989
3990 return worker && worker->rescue_wq;
3991 }
3992
3993 /**
3994 * workqueue_congested - test whether a workqueue is congested
3995 * @cpu: CPU in question
3996 * @wq: target workqueue
3997 *
3998 * Test whether @wq's cpu workqueue for @cpu is congested. There is
3999 * no synchronization around this function and the test result is
4000 * unreliable and only useful as advisory hints or for debugging.
4001 *
4002 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4003 * Note that both per-cpu and unbound workqueues may be associated with
4004 * multiple pool_workqueues which have separate congested states. A
4005 * workqueue being congested on one CPU doesn't mean the workqueue is also
4006 * contested on other CPUs / NUMA nodes.
4007 *
4008 * Return:
4009 * %true if congested, %false otherwise.
4010 */
4011 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4012 {
4013 struct pool_workqueue *pwq;
4014 bool ret;
4015
4016 rcu_read_lock_sched();
4017
4018 if (cpu == WORK_CPU_UNBOUND)
4019 cpu = smp_processor_id();
4020
4021 if (!(wq->flags & WQ_UNBOUND))
4022 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4023 else
4024 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4025
4026 ret = !list_empty(&pwq->delayed_works);
4027 rcu_read_unlock_sched();
4028
4029 return ret;
4030 }
4031 EXPORT_SYMBOL_GPL(workqueue_congested);
4032
4033 /**
4034 * work_busy - test whether a work is currently pending or running
4035 * @work: the work to be tested
4036 *
4037 * Test whether @work is currently pending or running. There is no
4038 * synchronization around this function and the test result is
4039 * unreliable and only useful as advisory hints or for debugging.
4040 *
4041 * Return:
4042 * OR'd bitmask of WORK_BUSY_* bits.
4043 */
4044 unsigned int work_busy(struct work_struct *work)
4045 {
4046 struct worker_pool *pool;
4047 unsigned long flags;
4048 unsigned int ret = 0;
4049
4050 if (work_pending(work))
4051 ret |= WORK_BUSY_PENDING;
4052
4053 local_irq_save(flags);
4054 pool = get_work_pool(work);
4055 if (pool) {
4056 spin_lock(&pool->lock);
4057 if (find_worker_executing_work(pool, work))
4058 ret |= WORK_BUSY_RUNNING;
4059 spin_unlock(&pool->lock);
4060 }
4061 local_irq_restore(flags);
4062
4063 return ret;
4064 }
4065 EXPORT_SYMBOL_GPL(work_busy);
4066
4067 /**
4068 * set_worker_desc - set description for the current work item
4069 * @fmt: printf-style format string
4070 * @...: arguments for the format string
4071 *
4072 * This function can be called by a running work function to describe what
4073 * the work item is about. If the worker task gets dumped, this
4074 * information will be printed out together to help debugging. The
4075 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4076 */
4077 void set_worker_desc(const char *fmt, ...)
4078 {
4079 struct worker *worker = current_wq_worker();
4080 va_list args;
4081
4082 if (worker) {
4083 va_start(args, fmt);
4084 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4085 va_end(args);
4086 worker->desc_valid = true;
4087 }
4088 }
4089
4090 /**
4091 * print_worker_info - print out worker information and description
4092 * @log_lvl: the log level to use when printing
4093 * @task: target task
4094 *
4095 * If @task is a worker and currently executing a work item, print out the
4096 * name of the workqueue being serviced and worker description set with
4097 * set_worker_desc() by the currently executing work item.
4098 *
4099 * This function can be safely called on any task as long as the
4100 * task_struct itself is accessible. While safe, this function isn't
4101 * synchronized and may print out mixups or garbages of limited length.
4102 */
4103 void print_worker_info(const char *log_lvl, struct task_struct *task)
4104 {
4105 work_func_t *fn = NULL;
4106 char name[WQ_NAME_LEN] = { };
4107 char desc[WORKER_DESC_LEN] = { };
4108 struct pool_workqueue *pwq = NULL;
4109 struct workqueue_struct *wq = NULL;
4110 bool desc_valid = false;
4111 struct worker *worker;
4112
4113 if (!(task->flags & PF_WQ_WORKER))
4114 return;
4115
4116 /*
4117 * This function is called without any synchronization and @task
4118 * could be in any state. Be careful with dereferences.
4119 */
4120 worker = probe_kthread_data(task);
4121
4122 /*
4123 * Carefully copy the associated workqueue's workfn and name. Keep
4124 * the original last '\0' in case the original contains garbage.
4125 */
4126 probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4127 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4128 probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4129 probe_kernel_read(name, wq->name, sizeof(name) - 1);
4130
4131 /* copy worker description */
4132 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4133 if (desc_valid)
4134 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4135
4136 if (fn || name[0] || desc[0]) {
4137 printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4138 if (desc[0])
4139 pr_cont(" (%s)", desc);
4140 pr_cont("\n");
4141 }
4142 }
4143
4144 static void pr_cont_pool_info(struct worker_pool *pool)
4145 {
4146 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4147 if (pool->node != NUMA_NO_NODE)
4148 pr_cont(" node=%d", pool->node);
4149 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4150 }
4151
4152 static void pr_cont_work(bool comma, struct work_struct *work)
4153 {
4154 if (work->func == wq_barrier_func) {
4155 struct wq_barrier *barr;
4156
4157 barr = container_of(work, struct wq_barrier, work);
4158
4159 pr_cont("%s BAR(%d)", comma ? "," : "",
4160 task_pid_nr(barr->task));
4161 } else {
4162 pr_cont("%s %pf", comma ? "," : "", work->func);
4163 }
4164 }
4165
4166 static void show_pwq(struct pool_workqueue *pwq)
4167 {
4168 struct worker_pool *pool = pwq->pool;
4169 struct work_struct *work;
4170 struct worker *worker;
4171 bool has_in_flight = false, has_pending = false;
4172 int bkt;
4173
4174 pr_info(" pwq %d:", pool->id);
4175 pr_cont_pool_info(pool);
4176
4177 pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
4178 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4179
4180 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4181 if (worker->current_pwq == pwq) {
4182 has_in_flight = true;
4183 break;
4184 }
4185 }
4186 if (has_in_flight) {
4187 bool comma = false;
4188
4189 pr_info(" in-flight:");
4190 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4191 if (worker->current_pwq != pwq)
4192 continue;
4193
4194 pr_cont("%s %d%s:%pf", comma ? "," : "",
4195 task_pid_nr(worker->task),
4196 worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4197 worker->current_func);
4198 list_for_each_entry(work, &worker->scheduled, entry)
4199 pr_cont_work(false, work);
4200 comma = true;
4201 }
4202 pr_cont("\n");
4203 }
4204
4205 list_for_each_entry(work, &pool->worklist, entry) {
4206 if (get_work_pwq(work) == pwq) {
4207 has_pending = true;
4208 break;
4209 }
4210 }
4211 if (has_pending) {
4212 bool comma = false;
4213
4214 pr_info(" pending:");
4215 list_for_each_entry(work, &pool->worklist, entry) {
4216 if (get_work_pwq(work) != pwq)
4217 continue;
4218
4219 pr_cont_work(comma, work);
4220 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4221 }
4222 pr_cont("\n");
4223 }
4224
4225 if (!list_empty(&pwq->delayed_works)) {
4226 bool comma = false;
4227
4228 pr_info(" delayed:");
4229 list_for_each_entry(work, &pwq->delayed_works, entry) {
4230 pr_cont_work(comma, work);
4231 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4232 }
4233 pr_cont("\n");
4234 }
4235 }
4236
4237 /**
4238 * show_workqueue_state - dump workqueue state
4239 *
4240 * Called from a sysrq handler and prints out all busy workqueues and
4241 * pools.
4242 */
4243 void show_workqueue_state(void)
4244 {
4245 struct workqueue_struct *wq;
4246 struct worker_pool *pool;
4247 unsigned long flags;
4248 int pi;
4249
4250 rcu_read_lock_sched();
4251
4252 pr_info("Showing busy workqueues and worker pools:\n");
4253
4254 list_for_each_entry_rcu(wq, &workqueues, list) {
4255 struct pool_workqueue *pwq;
4256 bool idle = true;
4257
4258 for_each_pwq(pwq, wq) {
4259 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4260 idle = false;
4261 break;
4262 }
4263 }
4264 if (idle)
4265 continue;
4266
4267 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4268
4269 for_each_pwq(pwq, wq) {
4270 spin_lock_irqsave(&pwq->pool->lock, flags);
4271 if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4272 show_pwq(pwq);
4273 spin_unlock_irqrestore(&pwq->pool->lock, flags);
4274 }
4275 }
4276
4277 for_each_pool(pool, pi) {
4278 struct worker *worker;
4279 bool first = true;
4280
4281 spin_lock_irqsave(&pool->lock, flags);
4282 if (pool->nr_workers == pool->nr_idle)
4283 goto next_pool;
4284
4285 pr_info("pool %d:", pool->id);
4286 pr_cont_pool_info(pool);
4287 pr_cont(" workers=%d", pool->nr_workers);
4288 if (pool->manager)
4289 pr_cont(" manager: %d",
4290 task_pid_nr(pool->manager->task));
4291 list_for_each_entry(worker, &pool->idle_list, entry) {
4292 pr_cont(" %s%d", first ? "idle: " : "",
4293 task_pid_nr(worker->task));
4294 first = false;
4295 }
4296 pr_cont("\n");
4297 next_pool:
4298 spin_unlock_irqrestore(&pool->lock, flags);
4299 }
4300
4301 rcu_read_unlock_sched();
4302 }
4303
4304 /*
4305 * CPU hotplug.
4306 *
4307 * There are two challenges in supporting CPU hotplug. Firstly, there
4308 * are a lot of assumptions on strong associations among work, pwq and
4309 * pool which make migrating pending and scheduled works very
4310 * difficult to implement without impacting hot paths. Secondly,
4311 * worker pools serve mix of short, long and very long running works making
4312 * blocked draining impractical.
4313 *
4314 * This is solved by allowing the pools to be disassociated from the CPU
4315 * running as an unbound one and allowing it to be reattached later if the
4316 * cpu comes back online.
4317 */
4318
4319 static void wq_unbind_fn(struct work_struct *work)
4320 {
4321 int cpu = smp_processor_id();
4322 struct worker_pool *pool;
4323 struct worker *worker;
4324
4325 for_each_cpu_worker_pool(pool, cpu) {
4326 mutex_lock(&pool->attach_mutex);
4327 spin_lock_irq(&pool->lock);
4328
4329 /*
4330 * We've blocked all attach/detach operations. Make all workers
4331 * unbound and set DISASSOCIATED. Before this, all workers
4332 * except for the ones which are still executing works from
4333 * before the last CPU down must be on the cpu. After
4334 * this, they may become diasporas.
4335 */
4336 for_each_pool_worker(worker, pool)
4337 worker->flags |= WORKER_UNBOUND;
4338
4339 pool->flags |= POOL_DISASSOCIATED;
4340
4341 spin_unlock_irq(&pool->lock);
4342 mutex_unlock(&pool->attach_mutex);
4343
4344 /*
4345 * Call schedule() so that we cross rq->lock and thus can
4346 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4347 * This is necessary as scheduler callbacks may be invoked
4348 * from other cpus.
4349 */
4350 schedule();
4351
4352 /*
4353 * Sched callbacks are disabled now. Zap nr_running.
4354 * After this, nr_running stays zero and need_more_worker()
4355 * and keep_working() are always true as long as the
4356 * worklist is not empty. This pool now behaves as an
4357 * unbound (in terms of concurrency management) pool which
4358 * are served by workers tied to the pool.
4359 */
4360 atomic_set(&pool->nr_running, 0);
4361
4362 /*
4363 * With concurrency management just turned off, a busy
4364 * worker blocking could lead to lengthy stalls. Kick off
4365 * unbound chain execution of currently pending work items.
4366 */
4367 spin_lock_irq(&pool->lock);
4368 wake_up_worker(pool);
4369 spin_unlock_irq(&pool->lock);
4370 }
4371 }
4372
4373 /**
4374 * rebind_workers - rebind all workers of a pool to the associated CPU
4375 * @pool: pool of interest
4376 *
4377 * @pool->cpu is coming online. Rebind all workers to the CPU.
4378 */
4379 static void rebind_workers(struct worker_pool *pool)
4380 {
4381 struct worker *worker;
4382
4383 lockdep_assert_held(&pool->attach_mutex);
4384
4385 /*
4386 * Restore CPU affinity of all workers. As all idle workers should
4387 * be on the run-queue of the associated CPU before any local
4388 * wake-ups for concurrency management happen, restore CPU affinty
4389 * of all workers first and then clear UNBOUND. As we're called
4390 * from CPU_ONLINE, the following shouldn't fail.
4391 */
4392 for_each_pool_worker(worker, pool)
4393 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4394 pool->attrs->cpumask) < 0);
4395
4396 spin_lock_irq(&pool->lock);
4397 pool->flags &= ~POOL_DISASSOCIATED;
4398
4399 for_each_pool_worker(worker, pool) {
4400 unsigned int worker_flags = worker->flags;
4401
4402 /*
4403 * A bound idle worker should actually be on the runqueue
4404 * of the associated CPU for local wake-ups targeting it to
4405 * work. Kick all idle workers so that they migrate to the
4406 * associated CPU. Doing this in the same loop as
4407 * replacing UNBOUND with REBOUND is safe as no worker will
4408 * be bound before @pool->lock is released.
4409 */
4410 if (worker_flags & WORKER_IDLE)
4411 wake_up_process(worker->task);
4412
4413 /*
4414 * We want to clear UNBOUND but can't directly call
4415 * worker_clr_flags() or adjust nr_running. Atomically
4416 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4417 * @worker will clear REBOUND using worker_clr_flags() when
4418 * it initiates the next execution cycle thus restoring
4419 * concurrency management. Note that when or whether
4420 * @worker clears REBOUND doesn't affect correctness.
4421 *
4422 * ACCESS_ONCE() is necessary because @worker->flags may be
4423 * tested without holding any lock in
4424 * wq_worker_waking_up(). Without it, NOT_RUNNING test may
4425 * fail incorrectly leading to premature concurrency
4426 * management operations.
4427 */
4428 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4429 worker_flags |= WORKER_REBOUND;
4430 worker_flags &= ~WORKER_UNBOUND;
4431 ACCESS_ONCE(worker->flags) = worker_flags;
4432 }
4433
4434 spin_unlock_irq(&pool->lock);
4435 }
4436
4437 /**
4438 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4439 * @pool: unbound pool of interest
4440 * @cpu: the CPU which is coming up
4441 *
4442 * An unbound pool may end up with a cpumask which doesn't have any online
4443 * CPUs. When a worker of such pool get scheduled, the scheduler resets
4444 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
4445 * online CPU before, cpus_allowed of all its workers should be restored.
4446 */
4447 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4448 {
4449 static cpumask_t cpumask;
4450 struct worker *worker;
4451
4452 lockdep_assert_held(&pool->attach_mutex);
4453
4454 /* is @cpu allowed for @pool? */
4455 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4456 return;
4457
4458 /* is @cpu the only online CPU? */
4459 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4460 if (cpumask_weight(&cpumask) != 1)
4461 return;
4462
4463 /* as we're called from CPU_ONLINE, the following shouldn't fail */
4464 for_each_pool_worker(worker, pool)
4465 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4466 pool->attrs->cpumask) < 0);
4467 }
4468
4469 /*
4470 * Workqueues should be brought up before normal priority CPU notifiers.
4471 * This will be registered high priority CPU notifier.
4472 */
4473 static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4474 unsigned long action,
4475 void *hcpu)
4476 {
4477 int cpu = (unsigned long)hcpu;
4478 struct worker_pool *pool;
4479 struct workqueue_struct *wq;
4480 int pi;
4481
4482 switch (action & ~CPU_TASKS_FROZEN) {
4483 case CPU_UP_PREPARE:
4484 for_each_cpu_worker_pool(pool, cpu) {
4485 if (pool->nr_workers)
4486 continue;
4487 if (!create_worker(pool))
4488 return NOTIFY_BAD;
4489 }
4490 break;
4491
4492 case CPU_DOWN_FAILED:
4493 case CPU_ONLINE:
4494 mutex_lock(&wq_pool_mutex);
4495
4496 for_each_pool(pool, pi) {
4497 mutex_lock(&pool->attach_mutex);
4498
4499 if (pool->cpu == cpu)
4500 rebind_workers(pool);
4501 else if (pool->cpu < 0)
4502 restore_unbound_workers_cpumask(pool, cpu);
4503
4504 mutex_unlock(&pool->attach_mutex);
4505 }
4506
4507 /* update NUMA affinity of unbound workqueues */
4508 list_for_each_entry(wq, &workqueues, list)
4509 wq_update_unbound_numa(wq, cpu, true);
4510
4511 mutex_unlock(&wq_pool_mutex);
4512 break;
4513 }
4514 return NOTIFY_OK;
4515 }
4516
4517 /*
4518 * Workqueues should be brought down after normal priority CPU notifiers.
4519 * This will be registered as low priority CPU notifier.
4520 */
4521 static int workqueue_cpu_down_callback(struct notifier_block *nfb,
4522 unsigned long action,
4523 void *hcpu)
4524 {
4525 int cpu = (unsigned long)hcpu;
4526 struct work_struct unbind_work;
4527 struct workqueue_struct *wq;
4528
4529 switch (action & ~CPU_TASKS_FROZEN) {
4530 case CPU_DOWN_PREPARE:
4531 /* unbinding per-cpu workers should happen on the local CPU */
4532 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4533 queue_work_on(cpu, system_highpri_wq, &unbind_work);
4534
4535 /* update NUMA affinity of unbound workqueues */
4536 mutex_lock(&wq_pool_mutex);
4537 list_for_each_entry(wq, &workqueues, list)
4538 wq_update_unbound_numa(wq, cpu, false);
4539 mutex_unlock(&wq_pool_mutex);
4540
4541 /* wait for per-cpu unbinding to finish */
4542 flush_work(&unbind_work);
4543 destroy_work_on_stack(&unbind_work);
4544 break;
4545 }
4546 return NOTIFY_OK;
4547 }
4548
4549 #ifdef CONFIG_SMP
4550
4551 struct work_for_cpu {
4552 struct work_struct work;
4553 long (*fn)(void *);
4554 void *arg;
4555 long ret;
4556 };
4557
4558 static void work_for_cpu_fn(struct work_struct *work)
4559 {
4560 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
4561
4562 wfc->ret = wfc->fn(wfc->arg);
4563 }
4564
4565 /**
4566 * work_on_cpu - run a function in user context on a particular cpu
4567 * @cpu: the cpu to run on
4568 * @fn: the function to run
4569 * @arg: the function arg
4570 *
4571 * It is up to the caller to ensure that the cpu doesn't go offline.
4572 * The caller must not hold any locks which would prevent @fn from completing.
4573 *
4574 * Return: The value @fn returns.
4575 */
4576 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4577 {
4578 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
4579
4580 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4581 schedule_work_on(cpu, &wfc.work);
4582 flush_work(&wfc.work);
4583 destroy_work_on_stack(&wfc.work);
4584 return wfc.ret;
4585 }
4586 EXPORT_SYMBOL_GPL(work_on_cpu);
4587 #endif /* CONFIG_SMP */
4588
4589 #ifdef CONFIG_FREEZER
4590
4591 /**
4592 * freeze_workqueues_begin - begin freezing workqueues
4593 *
4594 * Start freezing workqueues. After this function returns, all freezable
4595 * workqueues will queue new works to their delayed_works list instead of
4596 * pool->worklist.
4597 *
4598 * CONTEXT:
4599 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4600 */
4601 void freeze_workqueues_begin(void)
4602 {
4603 struct workqueue_struct *wq;
4604 struct pool_workqueue *pwq;
4605
4606 mutex_lock(&wq_pool_mutex);
4607
4608 WARN_ON_ONCE(workqueue_freezing);
4609 workqueue_freezing = true;
4610
4611 list_for_each_entry(wq, &workqueues, list) {
4612 mutex_lock(&wq->mutex);
4613 for_each_pwq(pwq, wq)
4614 pwq_adjust_max_active(pwq);
4615 mutex_unlock(&wq->mutex);
4616 }
4617
4618 mutex_unlock(&wq_pool_mutex);
4619 }
4620
4621 /**
4622 * freeze_workqueues_busy - are freezable workqueues still busy?
4623 *
4624 * Check whether freezing is complete. This function must be called
4625 * between freeze_workqueues_begin() and thaw_workqueues().
4626 *
4627 * CONTEXT:
4628 * Grabs and releases wq_pool_mutex.
4629 *
4630 * Return:
4631 * %true if some freezable workqueues are still busy. %false if freezing
4632 * is complete.
4633 */
4634 bool freeze_workqueues_busy(void)
4635 {
4636 bool busy = false;
4637 struct workqueue_struct *wq;
4638 struct pool_workqueue *pwq;
4639
4640 mutex_lock(&wq_pool_mutex);
4641
4642 WARN_ON_ONCE(!workqueue_freezing);
4643
4644 list_for_each_entry(wq, &workqueues, list) {
4645 if (!(wq->flags & WQ_FREEZABLE))
4646 continue;
4647 /*
4648 * nr_active is monotonically decreasing. It's safe
4649 * to peek without lock.
4650 */
4651 rcu_read_lock_sched();
4652 for_each_pwq(pwq, wq) {
4653 WARN_ON_ONCE(pwq->nr_active < 0);
4654 if (pwq->nr_active) {
4655 busy = true;
4656 rcu_read_unlock_sched();
4657 goto out_unlock;
4658 }
4659 }
4660 rcu_read_unlock_sched();
4661 }
4662 out_unlock:
4663 mutex_unlock(&wq_pool_mutex);
4664 return busy;
4665 }
4666
4667 /**
4668 * thaw_workqueues - thaw workqueues
4669 *
4670 * Thaw workqueues. Normal queueing is restored and all collected
4671 * frozen works are transferred to their respective pool worklists.
4672 *
4673 * CONTEXT:
4674 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4675 */
4676 void thaw_workqueues(void)
4677 {
4678 struct workqueue_struct *wq;
4679 struct pool_workqueue *pwq;
4680
4681 mutex_lock(&wq_pool_mutex);
4682
4683 if (!workqueue_freezing)
4684 goto out_unlock;
4685
4686 workqueue_freezing = false;
4687
4688 /* restore max_active and repopulate worklist */
4689 list_for_each_entry(wq, &workqueues, list) {
4690 mutex_lock(&wq->mutex);
4691 for_each_pwq(pwq, wq)
4692 pwq_adjust_max_active(pwq);
4693 mutex_unlock(&wq->mutex);
4694 }
4695
4696 out_unlock:
4697 mutex_unlock(&wq_pool_mutex);
4698 }
4699 #endif /* CONFIG_FREEZER */
4700
4701 #ifdef CONFIG_SYSFS
4702 /*
4703 * Workqueues with WQ_SYSFS flag set is visible to userland via
4704 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
4705 * following attributes.
4706 *
4707 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
4708 * max_active RW int : maximum number of in-flight work items
4709 *
4710 * Unbound workqueues have the following extra attributes.
4711 *
4712 * id RO int : the associated pool ID
4713 * nice RW int : nice value of the workers
4714 * cpumask RW mask : bitmask of allowed CPUs for the workers
4715 */
4716 struct wq_device {
4717 struct workqueue_struct *wq;
4718 struct device dev;
4719 };
4720
4721 static struct workqueue_struct *dev_to_wq(struct device *dev)
4722 {
4723 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
4724
4725 return wq_dev->wq;
4726 }
4727
4728 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
4729 char *buf)
4730 {
4731 struct workqueue_struct *wq = dev_to_wq(dev);
4732
4733 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
4734 }
4735 static DEVICE_ATTR_RO(per_cpu);
4736
4737 static ssize_t max_active_show(struct device *dev,
4738 struct device_attribute *attr, char *buf)
4739 {
4740 struct workqueue_struct *wq = dev_to_wq(dev);
4741
4742 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
4743 }
4744
4745 static ssize_t max_active_store(struct device *dev,
4746 struct device_attribute *attr, const char *buf,
4747 size_t count)
4748 {
4749 struct workqueue_struct *wq = dev_to_wq(dev);
4750 int val;
4751
4752 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
4753 return -EINVAL;
4754
4755 workqueue_set_max_active(wq, val);
4756 return count;
4757 }
4758 static DEVICE_ATTR_RW(max_active);
4759
4760 static struct attribute *wq_sysfs_attrs[] = {
4761 &dev_attr_per_cpu.attr,
4762 &dev_attr_max_active.attr,
4763 NULL,
4764 };
4765 ATTRIBUTE_GROUPS(wq_sysfs);
4766
4767 static ssize_t wq_pool_ids_show(struct device *dev,
4768 struct device_attribute *attr, char *buf)
4769 {
4770 struct workqueue_struct *wq = dev_to_wq(dev);
4771 const char *delim = "";
4772 int node, written = 0;
4773
4774 rcu_read_lock_sched();
4775 for_each_node(node) {
4776 written += scnprintf(buf + written, PAGE_SIZE - written,
4777 "%s%d:%d", delim, node,
4778 unbound_pwq_by_node(wq, node)->pool->id);
4779 delim = " ";
4780 }
4781 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
4782 rcu_read_unlock_sched();
4783
4784 return written;
4785 }
4786
4787 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
4788 char *buf)
4789 {
4790 struct workqueue_struct *wq = dev_to_wq(dev);
4791 int written;
4792
4793 mutex_lock(&wq->mutex);
4794 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
4795 mutex_unlock(&wq->mutex);
4796
4797 return written;
4798 }
4799
4800 /* prepare workqueue_attrs for sysfs store operations */
4801 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
4802 {
4803 struct workqueue_attrs *attrs;
4804
4805 attrs = alloc_workqueue_attrs(GFP_KERNEL);
4806 if (!attrs)
4807 return NULL;
4808
4809 mutex_lock(&wq->mutex);
4810 copy_workqueue_attrs(attrs, wq->unbound_attrs);
4811 mutex_unlock(&wq->mutex);
4812 return attrs;
4813 }
4814
4815 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
4816 const char *buf, size_t count)
4817 {
4818 struct workqueue_struct *wq = dev_to_wq(dev);
4819 struct workqueue_attrs *attrs;
4820 int ret;
4821
4822 attrs = wq_sysfs_prep_attrs(wq);
4823 if (!attrs)
4824 return -ENOMEM;
4825
4826 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
4827 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
4828 ret = apply_workqueue_attrs(wq, attrs);
4829 else
4830 ret = -EINVAL;
4831
4832 free_workqueue_attrs(attrs);
4833 return ret ?: count;
4834 }
4835
4836 static ssize_t wq_cpumask_show(struct device *dev,
4837 struct device_attribute *attr, char *buf)
4838 {
4839 struct workqueue_struct *wq = dev_to_wq(dev);
4840 int written;
4841
4842 mutex_lock(&wq->mutex);
4843 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
4844 cpumask_pr_args(wq->unbound_attrs->cpumask));
4845 mutex_unlock(&wq->mutex);
4846 return written;
4847 }
4848
4849 static ssize_t wq_cpumask_store(struct device *dev,
4850 struct device_attribute *attr,
4851 const char *buf, size_t count)
4852 {
4853 struct workqueue_struct *wq = dev_to_wq(dev);
4854 struct workqueue_attrs *attrs;
4855 int ret;
4856
4857 attrs = wq_sysfs_prep_attrs(wq);
4858 if (!attrs)
4859 return -ENOMEM;
4860
4861 ret = cpumask_parse(buf, attrs->cpumask);
4862 if (!ret)
4863 ret = apply_workqueue_attrs(wq, attrs);
4864
4865 free_workqueue_attrs(attrs);
4866 return ret ?: count;
4867 }
4868
4869 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
4870 char *buf)
4871 {
4872 struct workqueue_struct *wq = dev_to_wq(dev);
4873 int written;
4874
4875 mutex_lock(&wq->mutex);
4876 written = scnprintf(buf, PAGE_SIZE, "%d\n",
4877 !wq->unbound_attrs->no_numa);
4878 mutex_unlock(&wq->mutex);
4879
4880 return written;
4881 }
4882
4883 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
4884 const char *buf, size_t count)
4885 {
4886 struct workqueue_struct *wq = dev_to_wq(dev);
4887 struct workqueue_attrs *attrs;
4888 int v, ret;
4889
4890 attrs = wq_sysfs_prep_attrs(wq);
4891 if (!attrs)
4892 return -ENOMEM;
4893
4894 ret = -EINVAL;
4895 if (sscanf(buf, "%d", &v) == 1) {
4896 attrs->no_numa = !v;
4897 ret = apply_workqueue_attrs(wq, attrs);
4898 }
4899
4900 free_workqueue_attrs(attrs);
4901 return ret ?: count;
4902 }
4903
4904 static struct device_attribute wq_sysfs_unbound_attrs[] = {
4905 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
4906 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
4907 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
4908 __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
4909 __ATTR_NULL,
4910 };
4911
4912 static struct bus_type wq_subsys = {
4913 .name = "workqueue",
4914 .dev_groups = wq_sysfs_groups,
4915 };
4916
4917 static int __init wq_sysfs_init(void)
4918 {
4919 return subsys_virtual_register(&wq_subsys, NULL);
4920 }
4921 core_initcall(wq_sysfs_init);
4922
4923 static void wq_device_release(struct device *dev)
4924 {
4925 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
4926
4927 kfree(wq_dev);
4928 }
4929
4930 /**
4931 * workqueue_sysfs_register - make a workqueue visible in sysfs
4932 * @wq: the workqueue to register
4933 *
4934 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
4935 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
4936 * which is the preferred method.
4937 *
4938 * Workqueue user should use this function directly iff it wants to apply
4939 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
4940 * apply_workqueue_attrs() may race against userland updating the
4941 * attributes.
4942 *
4943 * Return: 0 on success, -errno on failure.
4944 */
4945 int workqueue_sysfs_register(struct workqueue_struct *wq)
4946 {
4947 struct wq_device *wq_dev;
4948 int ret;
4949
4950 /*
4951 * Adjusting max_active or creating new pwqs by applyting
4952 * attributes breaks ordering guarantee. Disallow exposing ordered
4953 * workqueues.
4954 */
4955 if (WARN_ON(wq->flags & __WQ_ORDERED))
4956 return -EINVAL;
4957
4958 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
4959 if (!wq_dev)
4960 return -ENOMEM;
4961
4962 wq_dev->wq = wq;
4963 wq_dev->dev.bus = &wq_subsys;
4964 wq_dev->dev.init_name = wq->name;
4965 wq_dev->dev.release = wq_device_release;
4966
4967 /*
4968 * unbound_attrs are created separately. Suppress uevent until
4969 * everything is ready.
4970 */
4971 dev_set_uevent_suppress(&wq_dev->dev, true);
4972
4973 ret = device_register(&wq_dev->dev);
4974 if (ret) {
4975 kfree(wq_dev);
4976 wq->wq_dev = NULL;
4977 return ret;
4978 }
4979
4980 if (wq->flags & WQ_UNBOUND) {
4981 struct device_attribute *attr;
4982
4983 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
4984 ret = device_create_file(&wq_dev->dev, attr);
4985 if (ret) {
4986 device_unregister(&wq_dev->dev);
4987 wq->wq_dev = NULL;
4988 return ret;
4989 }
4990 }
4991 }
4992
4993 dev_set_uevent_suppress(&wq_dev->dev, false);
4994 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
4995 return 0;
4996 }
4997
4998 /**
4999 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5000 * @wq: the workqueue to unregister
5001 *
5002 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5003 */
5004 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5005 {
5006 struct wq_device *wq_dev = wq->wq_dev;
5007
5008 if (!wq->wq_dev)
5009 return;
5010
5011 wq->wq_dev = NULL;
5012 device_unregister(&wq_dev->dev);
5013 }
5014 #else /* CONFIG_SYSFS */
5015 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5016 #endif /* CONFIG_SYSFS */
5017
5018 static void __init wq_numa_init(void)
5019 {
5020 cpumask_var_t *tbl;
5021 int node, cpu;
5022
5023 if (num_possible_nodes() <= 1)
5024 return;
5025
5026 if (wq_disable_numa) {
5027 pr_info("workqueue: NUMA affinity support disabled\n");
5028 return;
5029 }
5030
5031 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
5032 BUG_ON(!wq_update_unbound_numa_attrs_buf);
5033
5034 /*
5035 * We want masks of possible CPUs of each node which isn't readily
5036 * available. Build one from cpu_to_node() which should have been
5037 * fully initialized by now.
5038 */
5039 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
5040 BUG_ON(!tbl);
5041
5042 for_each_node(node)
5043 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5044 node_online(node) ? node : NUMA_NO_NODE));
5045
5046 for_each_possible_cpu(cpu) {
5047 node = cpu_to_node(cpu);
5048 if (WARN_ON(node == NUMA_NO_NODE)) {
5049 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5050 /* happens iff arch is bonkers, let's just proceed */
5051 return;
5052 }
5053 cpumask_set_cpu(cpu, tbl[node]);
5054 }
5055
5056 wq_numa_possible_cpumask = tbl;
5057 wq_numa_enabled = true;
5058 }
5059
5060 static int __init init_workqueues(void)
5061 {
5062 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5063 int i, cpu;
5064
5065 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5066
5067 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5068
5069 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
5070 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
5071
5072 wq_numa_init();
5073
5074 /* initialize CPU pools */
5075 for_each_possible_cpu(cpu) {
5076 struct worker_pool *pool;
5077
5078 i = 0;
5079 for_each_cpu_worker_pool(pool, cpu) {
5080 BUG_ON(init_worker_pool(pool));
5081 pool->cpu = cpu;
5082 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5083 pool->attrs->nice = std_nice[i++];
5084 pool->node = cpu_to_node(cpu);
5085
5086 /* alloc pool ID */
5087 mutex_lock(&wq_pool_mutex);
5088 BUG_ON(worker_pool_assign_id(pool));
5089 mutex_unlock(&wq_pool_mutex);
5090 }
5091 }
5092
5093 /* create the initial worker */
5094 for_each_online_cpu(cpu) {
5095 struct worker_pool *pool;
5096
5097 for_each_cpu_worker_pool(pool, cpu) {
5098 pool->flags &= ~POOL_DISASSOCIATED;
5099 BUG_ON(!create_worker(pool));
5100 }
5101 }
5102
5103 /* create default unbound and ordered wq attrs */
5104 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5105 struct workqueue_attrs *attrs;
5106
5107 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5108 attrs->nice = std_nice[i];
5109 unbound_std_wq_attrs[i] = attrs;
5110
5111 /*
5112 * An ordered wq should have only one pwq as ordering is
5113 * guaranteed by max_active which is enforced by pwqs.
5114 * Turn off NUMA so that dfl_pwq is used for all nodes.
5115 */
5116 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5117 attrs->nice = std_nice[i];
5118 attrs->no_numa = true;
5119 ordered_wq_attrs[i] = attrs;
5120 }
5121
5122 system_wq = alloc_workqueue("events", 0, 0);
5123 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5124 system_long_wq = alloc_workqueue("events_long", 0, 0);
5125 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5126 WQ_UNBOUND_MAX_ACTIVE);
5127 system_freezable_wq = alloc_workqueue("events_freezable",
5128 WQ_FREEZABLE, 0);
5129 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5130 WQ_POWER_EFFICIENT, 0);
5131 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5132 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5133 0);
5134 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5135 !system_unbound_wq || !system_freezable_wq ||
5136 !system_power_efficient_wq ||
5137 !system_freezable_power_efficient_wq);
5138 return 0;
5139 }
5140 early_initcall(init_workqueues);