]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - kernel/workqueue.c
workqueue: Add RCU annotation for pwq list walk
[mirror_ubuntu-focal-kernel.git] / kernel / workqueue.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/workqueue.c - generic async execution with shared worker pool
4 *
5 * Copyright (C) 2002 Ingo Molnar
6 *
7 * Derived from the taskqueue/keventd code by:
8 * David Woodhouse <dwmw2@infradead.org>
9 * Andrew Morton
10 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
11 * Theodore Ts'o <tytso@mit.edu>
12 *
13 * Made to use alloc_percpu by Christoph Lameter.
14 *
15 * Copyright (C) 2010 SUSE Linux Products GmbH
16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
17 *
18 * This is the generic async execution mechanism. Work items as are
19 * executed in process context. The worker pool is shared and
20 * automatically managed. There are two worker pools for each CPU (one for
21 * normal work items and the other for high priority ones) and some extra
22 * pools for workqueues which are not bound to any specific CPU - the
23 * number of these backing pools is dynamic.
24 *
25 * Please read Documentation/core-api/workqueue.rst for details.
26 */
27
28 #include <linux/export.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/signal.h>
33 #include <linux/completion.h>
34 #include <linux/workqueue.h>
35 #include <linux/slab.h>
36 #include <linux/cpu.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/hardirq.h>
40 #include <linux/mempolicy.h>
41 #include <linux/freezer.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51 #include <linux/sched/isolation.h>
52 #include <linux/nmi.h>
53
54 #include "workqueue_internal.h"
55
56 enum {
57 /*
58 * worker_pool flags
59 *
60 * A bound pool is either associated or disassociated with its CPU.
61 * While associated (!DISASSOCIATED), all workers are bound to the
62 * CPU and none has %WORKER_UNBOUND set and concurrency management
63 * is in effect.
64 *
65 * While DISASSOCIATED, the cpu may be offline and all workers have
66 * %WORKER_UNBOUND set and concurrency management disabled, and may
67 * be executing on any CPU. The pool behaves as an unbound one.
68 *
69 * Note that DISASSOCIATED should be flipped only while holding
70 * wq_pool_attach_mutex to avoid changing binding state while
71 * worker_attach_to_pool() is in progress.
72 */
73 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
74 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
75
76 /* worker flags */
77 WORKER_DIE = 1 << 1, /* die die die */
78 WORKER_IDLE = 1 << 2, /* is idle */
79 WORKER_PREP = 1 << 3, /* preparing to run works */
80 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
81 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
82 WORKER_REBOUND = 1 << 8, /* worker was rebound */
83
84 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
85 WORKER_UNBOUND | WORKER_REBOUND,
86
87 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
88
89 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
90 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
91
92 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
93 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
94
95 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
96 /* call for help after 10ms
97 (min two ticks) */
98 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
99 CREATE_COOLDOWN = HZ, /* time to breath after fail */
100
101 /*
102 * Rescue workers are used only on emergencies and shared by
103 * all cpus. Give MIN_NICE.
104 */
105 RESCUER_NICE_LEVEL = MIN_NICE,
106 HIGHPRI_NICE_LEVEL = MIN_NICE,
107
108 WQ_NAME_LEN = 24,
109 };
110
111 /*
112 * Structure fields follow one of the following exclusion rules.
113 *
114 * I: Modifiable by initialization/destruction paths and read-only for
115 * everyone else.
116 *
117 * P: Preemption protected. Disabling preemption is enough and should
118 * only be modified and accessed from the local cpu.
119 *
120 * L: pool->lock protected. Access with pool->lock held.
121 *
122 * X: During normal operation, modification requires pool->lock and should
123 * be done only from local cpu. Either disabling preemption on local
124 * cpu or grabbing pool->lock is enough for read access. If
125 * POOL_DISASSOCIATED is set, it's identical to L.
126 *
127 * A: wq_pool_attach_mutex protected.
128 *
129 * PL: wq_pool_mutex protected.
130 *
131 * PR: wq_pool_mutex protected for writes. RCU protected for reads.
132 *
133 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
134 *
135 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
136 * RCU for reads.
137 *
138 * WQ: wq->mutex protected.
139 *
140 * WR: wq->mutex protected for writes. RCU protected for reads.
141 *
142 * MD: wq_mayday_lock protected.
143 */
144
145 /* struct worker is defined in workqueue_internal.h */
146
147 struct worker_pool {
148 spinlock_t lock; /* the pool lock */
149 int cpu; /* I: the associated cpu */
150 int node; /* I: the associated node ID */
151 int id; /* I: pool ID */
152 unsigned int flags; /* X: flags */
153
154 unsigned long watchdog_ts; /* L: watchdog timestamp */
155
156 struct list_head worklist; /* L: list of pending works */
157
158 int nr_workers; /* L: total number of workers */
159 int nr_idle; /* L: currently idle workers */
160
161 struct list_head idle_list; /* X: list of idle workers */
162 struct timer_list idle_timer; /* L: worker idle timeout */
163 struct timer_list mayday_timer; /* L: SOS timer for workers */
164
165 /* a workers is either on busy_hash or idle_list, or the manager */
166 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
167 /* L: hash of busy workers */
168
169 struct worker *manager; /* L: purely informational */
170 struct list_head workers; /* A: attached workers */
171 struct completion *detach_completion; /* all workers detached */
172
173 struct ida worker_ida; /* worker IDs for task name */
174
175 struct workqueue_attrs *attrs; /* I: worker attributes */
176 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
177 int refcnt; /* PL: refcnt for unbound pools */
178
179 /*
180 * The current concurrency level. As it's likely to be accessed
181 * from other CPUs during try_to_wake_up(), put it in a separate
182 * cacheline.
183 */
184 atomic_t nr_running ____cacheline_aligned_in_smp;
185
186 /*
187 * Destruction of pool is RCU protected to allow dereferences
188 * from get_work_pool().
189 */
190 struct rcu_head rcu;
191 } ____cacheline_aligned_in_smp;
192
193 /*
194 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
195 * of work_struct->data are used for flags and the remaining high bits
196 * point to the pwq; thus, pwqs need to be aligned at two's power of the
197 * number of flag bits.
198 */
199 struct pool_workqueue {
200 struct worker_pool *pool; /* I: the associated pool */
201 struct workqueue_struct *wq; /* I: the owning workqueue */
202 int work_color; /* L: current color */
203 int flush_color; /* L: flushing color */
204 int refcnt; /* L: reference count */
205 int nr_in_flight[WORK_NR_COLORS];
206 /* L: nr of in_flight works */
207 int nr_active; /* L: nr of active works */
208 int max_active; /* L: max active works */
209 struct list_head delayed_works; /* L: delayed works */
210 struct list_head pwqs_node; /* WR: node on wq->pwqs */
211 struct list_head mayday_node; /* MD: node on wq->maydays */
212
213 /*
214 * Release of unbound pwq is punted to system_wq. See put_pwq()
215 * and pwq_unbound_release_workfn() for details. pool_workqueue
216 * itself is also RCU protected so that the first pwq can be
217 * determined without grabbing wq->mutex.
218 */
219 struct work_struct unbound_release_work;
220 struct rcu_head rcu;
221 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
222
223 /*
224 * Structure used to wait for workqueue flush.
225 */
226 struct wq_flusher {
227 struct list_head list; /* WQ: list of flushers */
228 int flush_color; /* WQ: flush color waiting for */
229 struct completion done; /* flush completion */
230 };
231
232 struct wq_device;
233
234 /*
235 * The externally visible workqueue. It relays the issued work items to
236 * the appropriate worker_pool through its pool_workqueues.
237 */
238 struct workqueue_struct {
239 struct list_head pwqs; /* WR: all pwqs of this wq */
240 struct list_head list; /* PR: list of all workqueues */
241
242 struct mutex mutex; /* protects this wq */
243 int work_color; /* WQ: current work color */
244 int flush_color; /* WQ: current flush color */
245 atomic_t nr_pwqs_to_flush; /* flush in progress */
246 struct wq_flusher *first_flusher; /* WQ: first flusher */
247 struct list_head flusher_queue; /* WQ: flush waiters */
248 struct list_head flusher_overflow; /* WQ: flush overflow list */
249
250 struct list_head maydays; /* MD: pwqs requesting rescue */
251 struct worker *rescuer; /* I: rescue worker */
252
253 int nr_drainers; /* WQ: drain in progress */
254 int saved_max_active; /* WQ: saved pwq max_active */
255
256 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
257 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
258
259 #ifdef CONFIG_SYSFS
260 struct wq_device *wq_dev; /* I: for sysfs interface */
261 #endif
262 #ifdef CONFIG_LOCKDEP
263 char *lock_name;
264 struct lock_class_key key;
265 struct lockdep_map lockdep_map;
266 #endif
267 char name[WQ_NAME_LEN]; /* I: workqueue name */
268
269 /*
270 * Destruction of workqueue_struct is RCU protected to allow walking
271 * the workqueues list without grabbing wq_pool_mutex.
272 * This is used to dump all workqueues from sysrq.
273 */
274 struct rcu_head rcu;
275
276 /* hot fields used during command issue, aligned to cacheline */
277 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
278 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
279 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
280 };
281
282 static struct kmem_cache *pwq_cache;
283
284 static cpumask_var_t *wq_numa_possible_cpumask;
285 /* possible CPUs of each node */
286
287 static bool wq_disable_numa;
288 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
289
290 /* see the comment above the definition of WQ_POWER_EFFICIENT */
291 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
292 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
293
294 static bool wq_online; /* can kworkers be created yet? */
295
296 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
297
298 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
299 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
300
301 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
302 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
303 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
304 static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
305
306 static LIST_HEAD(workqueues); /* PR: list of all workqueues */
307 static bool workqueue_freezing; /* PL: have wqs started freezing? */
308
309 /* PL: allowable cpus for unbound wqs and work items */
310 static cpumask_var_t wq_unbound_cpumask;
311
312 /* CPU where unbound work was last round robin scheduled from this CPU */
313 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
314
315 /*
316 * Local execution of unbound work items is no longer guaranteed. The
317 * following always forces round-robin CPU selection on unbound work items
318 * to uncover usages which depend on it.
319 */
320 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
321 static bool wq_debug_force_rr_cpu = true;
322 #else
323 static bool wq_debug_force_rr_cpu = false;
324 #endif
325 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
326
327 /* the per-cpu worker pools */
328 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
329
330 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
331
332 /* PL: hash of all unbound pools keyed by pool->attrs */
333 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
334
335 /* I: attributes used when instantiating standard unbound pools on demand */
336 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
337
338 /* I: attributes used when instantiating ordered pools on demand */
339 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
340
341 struct workqueue_struct *system_wq __read_mostly;
342 EXPORT_SYMBOL(system_wq);
343 struct workqueue_struct *system_highpri_wq __read_mostly;
344 EXPORT_SYMBOL_GPL(system_highpri_wq);
345 struct workqueue_struct *system_long_wq __read_mostly;
346 EXPORT_SYMBOL_GPL(system_long_wq);
347 struct workqueue_struct *system_unbound_wq __read_mostly;
348 EXPORT_SYMBOL_GPL(system_unbound_wq);
349 struct workqueue_struct *system_freezable_wq __read_mostly;
350 EXPORT_SYMBOL_GPL(system_freezable_wq);
351 struct workqueue_struct *system_power_efficient_wq __read_mostly;
352 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
353 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
354 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
355
356 static int worker_thread(void *__worker);
357 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
358
359 #define CREATE_TRACE_POINTS
360 #include <trace/events/workqueue.h>
361
362 #define assert_rcu_or_pool_mutex() \
363 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
364 !lockdep_is_held(&wq_pool_mutex), \
365 "RCU or wq_pool_mutex should be held")
366
367 #define assert_rcu_or_wq_mutex(wq) \
368 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
369 !lockdep_is_held(&wq->mutex), \
370 "RCU or wq->mutex should be held")
371
372 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
373 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
374 !lockdep_is_held(&wq->mutex) && \
375 !lockdep_is_held(&wq_pool_mutex), \
376 "RCU, wq->mutex or wq_pool_mutex should be held")
377
378 #define for_each_cpu_worker_pool(pool, cpu) \
379 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
380 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
381 (pool)++)
382
383 /**
384 * for_each_pool - iterate through all worker_pools in the system
385 * @pool: iteration cursor
386 * @pi: integer used for iteration
387 *
388 * This must be called either with wq_pool_mutex held or RCU read
389 * locked. If the pool needs to be used beyond the locking in effect, the
390 * caller is responsible for guaranteeing that the pool stays online.
391 *
392 * The if/else clause exists only for the lockdep assertion and can be
393 * ignored.
394 */
395 #define for_each_pool(pool, pi) \
396 idr_for_each_entry(&worker_pool_idr, pool, pi) \
397 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
398 else
399
400 /**
401 * for_each_pool_worker - iterate through all workers of a worker_pool
402 * @worker: iteration cursor
403 * @pool: worker_pool to iterate workers of
404 *
405 * This must be called with wq_pool_attach_mutex.
406 *
407 * The if/else clause exists only for the lockdep assertion and can be
408 * ignored.
409 */
410 #define for_each_pool_worker(worker, pool) \
411 list_for_each_entry((worker), &(pool)->workers, node) \
412 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
413 else
414
415 /**
416 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
417 * @pwq: iteration cursor
418 * @wq: the target workqueue
419 *
420 * This must be called either with wq->mutex held or RCU read locked.
421 * If the pwq needs to be used beyond the locking in effect, the caller is
422 * responsible for guaranteeing that the pwq stays online.
423 *
424 * The if/else clause exists only for the lockdep assertion and can be
425 * ignored.
426 */
427 #define for_each_pwq(pwq, wq) \
428 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
429 lockdep_is_held(&wq->mutex)) \
430 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
431 else
432
433 #ifdef CONFIG_DEBUG_OBJECTS_WORK
434
435 static struct debug_obj_descr work_debug_descr;
436
437 static void *work_debug_hint(void *addr)
438 {
439 return ((struct work_struct *) addr)->func;
440 }
441
442 static bool work_is_static_object(void *addr)
443 {
444 struct work_struct *work = addr;
445
446 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
447 }
448
449 /*
450 * fixup_init is called when:
451 * - an active object is initialized
452 */
453 static bool work_fixup_init(void *addr, enum debug_obj_state state)
454 {
455 struct work_struct *work = addr;
456
457 switch (state) {
458 case ODEBUG_STATE_ACTIVE:
459 cancel_work_sync(work);
460 debug_object_init(work, &work_debug_descr);
461 return true;
462 default:
463 return false;
464 }
465 }
466
467 /*
468 * fixup_free is called when:
469 * - an active object is freed
470 */
471 static bool work_fixup_free(void *addr, enum debug_obj_state state)
472 {
473 struct work_struct *work = addr;
474
475 switch (state) {
476 case ODEBUG_STATE_ACTIVE:
477 cancel_work_sync(work);
478 debug_object_free(work, &work_debug_descr);
479 return true;
480 default:
481 return false;
482 }
483 }
484
485 static struct debug_obj_descr work_debug_descr = {
486 .name = "work_struct",
487 .debug_hint = work_debug_hint,
488 .is_static_object = work_is_static_object,
489 .fixup_init = work_fixup_init,
490 .fixup_free = work_fixup_free,
491 };
492
493 static inline void debug_work_activate(struct work_struct *work)
494 {
495 debug_object_activate(work, &work_debug_descr);
496 }
497
498 static inline void debug_work_deactivate(struct work_struct *work)
499 {
500 debug_object_deactivate(work, &work_debug_descr);
501 }
502
503 void __init_work(struct work_struct *work, int onstack)
504 {
505 if (onstack)
506 debug_object_init_on_stack(work, &work_debug_descr);
507 else
508 debug_object_init(work, &work_debug_descr);
509 }
510 EXPORT_SYMBOL_GPL(__init_work);
511
512 void destroy_work_on_stack(struct work_struct *work)
513 {
514 debug_object_free(work, &work_debug_descr);
515 }
516 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
517
518 void destroy_delayed_work_on_stack(struct delayed_work *work)
519 {
520 destroy_timer_on_stack(&work->timer);
521 debug_object_free(&work->work, &work_debug_descr);
522 }
523 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
524
525 #else
526 static inline void debug_work_activate(struct work_struct *work) { }
527 static inline void debug_work_deactivate(struct work_struct *work) { }
528 #endif
529
530 /**
531 * worker_pool_assign_id - allocate ID and assing it to @pool
532 * @pool: the pool pointer of interest
533 *
534 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
535 * successfully, -errno on failure.
536 */
537 static int worker_pool_assign_id(struct worker_pool *pool)
538 {
539 int ret;
540
541 lockdep_assert_held(&wq_pool_mutex);
542
543 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
544 GFP_KERNEL);
545 if (ret >= 0) {
546 pool->id = ret;
547 return 0;
548 }
549 return ret;
550 }
551
552 /**
553 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
554 * @wq: the target workqueue
555 * @node: the node ID
556 *
557 * This must be called with any of wq_pool_mutex, wq->mutex or RCU
558 * read locked.
559 * If the pwq needs to be used beyond the locking in effect, the caller is
560 * responsible for guaranteeing that the pwq stays online.
561 *
562 * Return: The unbound pool_workqueue for @node.
563 */
564 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
565 int node)
566 {
567 assert_rcu_or_wq_mutex_or_pool_mutex(wq);
568
569 /*
570 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
571 * delayed item is pending. The plan is to keep CPU -> NODE
572 * mapping valid and stable across CPU on/offlines. Once that
573 * happens, this workaround can be removed.
574 */
575 if (unlikely(node == NUMA_NO_NODE))
576 return wq->dfl_pwq;
577
578 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
579 }
580
581 static unsigned int work_color_to_flags(int color)
582 {
583 return color << WORK_STRUCT_COLOR_SHIFT;
584 }
585
586 static int get_work_color(struct work_struct *work)
587 {
588 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
589 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
590 }
591
592 static int work_next_color(int color)
593 {
594 return (color + 1) % WORK_NR_COLORS;
595 }
596
597 /*
598 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
599 * contain the pointer to the queued pwq. Once execution starts, the flag
600 * is cleared and the high bits contain OFFQ flags and pool ID.
601 *
602 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
603 * and clear_work_data() can be used to set the pwq, pool or clear
604 * work->data. These functions should only be called while the work is
605 * owned - ie. while the PENDING bit is set.
606 *
607 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
608 * corresponding to a work. Pool is available once the work has been
609 * queued anywhere after initialization until it is sync canceled. pwq is
610 * available only while the work item is queued.
611 *
612 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
613 * canceled. While being canceled, a work item may have its PENDING set
614 * but stay off timer and worklist for arbitrarily long and nobody should
615 * try to steal the PENDING bit.
616 */
617 static inline void set_work_data(struct work_struct *work, unsigned long data,
618 unsigned long flags)
619 {
620 WARN_ON_ONCE(!work_pending(work));
621 atomic_long_set(&work->data, data | flags | work_static(work));
622 }
623
624 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
625 unsigned long extra_flags)
626 {
627 set_work_data(work, (unsigned long)pwq,
628 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
629 }
630
631 static void set_work_pool_and_keep_pending(struct work_struct *work,
632 int pool_id)
633 {
634 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
635 WORK_STRUCT_PENDING);
636 }
637
638 static void set_work_pool_and_clear_pending(struct work_struct *work,
639 int pool_id)
640 {
641 /*
642 * The following wmb is paired with the implied mb in
643 * test_and_set_bit(PENDING) and ensures all updates to @work made
644 * here are visible to and precede any updates by the next PENDING
645 * owner.
646 */
647 smp_wmb();
648 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
649 /*
650 * The following mb guarantees that previous clear of a PENDING bit
651 * will not be reordered with any speculative LOADS or STORES from
652 * work->current_func, which is executed afterwards. This possible
653 * reordering can lead to a missed execution on attempt to queue
654 * the same @work. E.g. consider this case:
655 *
656 * CPU#0 CPU#1
657 * ---------------------------- --------------------------------
658 *
659 * 1 STORE event_indicated
660 * 2 queue_work_on() {
661 * 3 test_and_set_bit(PENDING)
662 * 4 } set_..._and_clear_pending() {
663 * 5 set_work_data() # clear bit
664 * 6 smp_mb()
665 * 7 work->current_func() {
666 * 8 LOAD event_indicated
667 * }
668 *
669 * Without an explicit full barrier speculative LOAD on line 8 can
670 * be executed before CPU#0 does STORE on line 1. If that happens,
671 * CPU#0 observes the PENDING bit is still set and new execution of
672 * a @work is not queued in a hope, that CPU#1 will eventually
673 * finish the queued @work. Meanwhile CPU#1 does not see
674 * event_indicated is set, because speculative LOAD was executed
675 * before actual STORE.
676 */
677 smp_mb();
678 }
679
680 static void clear_work_data(struct work_struct *work)
681 {
682 smp_wmb(); /* see set_work_pool_and_clear_pending() */
683 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
684 }
685
686 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
687 {
688 unsigned long data = atomic_long_read(&work->data);
689
690 if (data & WORK_STRUCT_PWQ)
691 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
692 else
693 return NULL;
694 }
695
696 /**
697 * get_work_pool - return the worker_pool a given work was associated with
698 * @work: the work item of interest
699 *
700 * Pools are created and destroyed under wq_pool_mutex, and allows read
701 * access under RCU read lock. As such, this function should be
702 * called under wq_pool_mutex or inside of a rcu_read_lock() region.
703 *
704 * All fields of the returned pool are accessible as long as the above
705 * mentioned locking is in effect. If the returned pool needs to be used
706 * beyond the critical section, the caller is responsible for ensuring the
707 * returned pool is and stays online.
708 *
709 * Return: The worker_pool @work was last associated with. %NULL if none.
710 */
711 static struct worker_pool *get_work_pool(struct work_struct *work)
712 {
713 unsigned long data = atomic_long_read(&work->data);
714 int pool_id;
715
716 assert_rcu_or_pool_mutex();
717
718 if (data & WORK_STRUCT_PWQ)
719 return ((struct pool_workqueue *)
720 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
721
722 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
723 if (pool_id == WORK_OFFQ_POOL_NONE)
724 return NULL;
725
726 return idr_find(&worker_pool_idr, pool_id);
727 }
728
729 /**
730 * get_work_pool_id - return the worker pool ID a given work is associated with
731 * @work: the work item of interest
732 *
733 * Return: The worker_pool ID @work was last associated with.
734 * %WORK_OFFQ_POOL_NONE if none.
735 */
736 static int get_work_pool_id(struct work_struct *work)
737 {
738 unsigned long data = atomic_long_read(&work->data);
739
740 if (data & WORK_STRUCT_PWQ)
741 return ((struct pool_workqueue *)
742 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
743
744 return data >> WORK_OFFQ_POOL_SHIFT;
745 }
746
747 static void mark_work_canceling(struct work_struct *work)
748 {
749 unsigned long pool_id = get_work_pool_id(work);
750
751 pool_id <<= WORK_OFFQ_POOL_SHIFT;
752 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
753 }
754
755 static bool work_is_canceling(struct work_struct *work)
756 {
757 unsigned long data = atomic_long_read(&work->data);
758
759 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
760 }
761
762 /*
763 * Policy functions. These define the policies on how the global worker
764 * pools are managed. Unless noted otherwise, these functions assume that
765 * they're being called with pool->lock held.
766 */
767
768 static bool __need_more_worker(struct worker_pool *pool)
769 {
770 return !atomic_read(&pool->nr_running);
771 }
772
773 /*
774 * Need to wake up a worker? Called from anything but currently
775 * running workers.
776 *
777 * Note that, because unbound workers never contribute to nr_running, this
778 * function will always return %true for unbound pools as long as the
779 * worklist isn't empty.
780 */
781 static bool need_more_worker(struct worker_pool *pool)
782 {
783 return !list_empty(&pool->worklist) && __need_more_worker(pool);
784 }
785
786 /* Can I start working? Called from busy but !running workers. */
787 static bool may_start_working(struct worker_pool *pool)
788 {
789 return pool->nr_idle;
790 }
791
792 /* Do I need to keep working? Called from currently running workers. */
793 static bool keep_working(struct worker_pool *pool)
794 {
795 return !list_empty(&pool->worklist) &&
796 atomic_read(&pool->nr_running) <= 1;
797 }
798
799 /* Do we need a new worker? Called from manager. */
800 static bool need_to_create_worker(struct worker_pool *pool)
801 {
802 return need_more_worker(pool) && !may_start_working(pool);
803 }
804
805 /* Do we have too many workers and should some go away? */
806 static bool too_many_workers(struct worker_pool *pool)
807 {
808 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
809 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
810 int nr_busy = pool->nr_workers - nr_idle;
811
812 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
813 }
814
815 /*
816 * Wake up functions.
817 */
818
819 /* Return the first idle worker. Safe with preemption disabled */
820 static struct worker *first_idle_worker(struct worker_pool *pool)
821 {
822 if (unlikely(list_empty(&pool->idle_list)))
823 return NULL;
824
825 return list_first_entry(&pool->idle_list, struct worker, entry);
826 }
827
828 /**
829 * wake_up_worker - wake up an idle worker
830 * @pool: worker pool to wake worker from
831 *
832 * Wake up the first idle worker of @pool.
833 *
834 * CONTEXT:
835 * spin_lock_irq(pool->lock).
836 */
837 static void wake_up_worker(struct worker_pool *pool)
838 {
839 struct worker *worker = first_idle_worker(pool);
840
841 if (likely(worker))
842 wake_up_process(worker->task);
843 }
844
845 /**
846 * wq_worker_running - a worker is running again
847 * @task: task waking up
848 *
849 * This function is called when a worker returns from schedule()
850 */
851 void wq_worker_running(struct task_struct *task)
852 {
853 struct worker *worker = kthread_data(task);
854
855 if (!worker->sleeping)
856 return;
857 if (!(worker->flags & WORKER_NOT_RUNNING))
858 atomic_inc(&worker->pool->nr_running);
859 worker->sleeping = 0;
860 }
861
862 /**
863 * wq_worker_sleeping - a worker is going to sleep
864 * @task: task going to sleep
865 *
866 * This function is called from schedule() when a busy worker is
867 * going to sleep.
868 */
869 void wq_worker_sleeping(struct task_struct *task)
870 {
871 struct worker *next, *worker = kthread_data(task);
872 struct worker_pool *pool;
873
874 /*
875 * Rescuers, which may not have all the fields set up like normal
876 * workers, also reach here, let's not access anything before
877 * checking NOT_RUNNING.
878 */
879 if (worker->flags & WORKER_NOT_RUNNING)
880 return;
881
882 pool = worker->pool;
883
884 if (WARN_ON_ONCE(worker->sleeping))
885 return;
886
887 worker->sleeping = 1;
888 spin_lock_irq(&pool->lock);
889
890 /*
891 * The counterpart of the following dec_and_test, implied mb,
892 * worklist not empty test sequence is in insert_work().
893 * Please read comment there.
894 *
895 * NOT_RUNNING is clear. This means that we're bound to and
896 * running on the local cpu w/ rq lock held and preemption
897 * disabled, which in turn means that none else could be
898 * manipulating idle_list, so dereferencing idle_list without pool
899 * lock is safe.
900 */
901 if (atomic_dec_and_test(&pool->nr_running) &&
902 !list_empty(&pool->worklist)) {
903 next = first_idle_worker(pool);
904 if (next)
905 wake_up_process(next->task);
906 }
907 spin_unlock_irq(&pool->lock);
908 }
909
910 /**
911 * wq_worker_last_func - retrieve worker's last work function
912 * @task: Task to retrieve last work function of.
913 *
914 * Determine the last function a worker executed. This is called from
915 * the scheduler to get a worker's last known identity.
916 *
917 * CONTEXT:
918 * spin_lock_irq(rq->lock)
919 *
920 * This function is called during schedule() when a kworker is going
921 * to sleep. It's used by psi to identify aggregation workers during
922 * dequeuing, to allow periodic aggregation to shut-off when that
923 * worker is the last task in the system or cgroup to go to sleep.
924 *
925 * As this function doesn't involve any workqueue-related locking, it
926 * only returns stable values when called from inside the scheduler's
927 * queuing and dequeuing paths, when @task, which must be a kworker,
928 * is guaranteed to not be processing any works.
929 *
930 * Return:
931 * The last work function %current executed as a worker, NULL if it
932 * hasn't executed any work yet.
933 */
934 work_func_t wq_worker_last_func(struct task_struct *task)
935 {
936 struct worker *worker = kthread_data(task);
937
938 return worker->last_func;
939 }
940
941 /**
942 * worker_set_flags - set worker flags and adjust nr_running accordingly
943 * @worker: self
944 * @flags: flags to set
945 *
946 * Set @flags in @worker->flags and adjust nr_running accordingly.
947 *
948 * CONTEXT:
949 * spin_lock_irq(pool->lock)
950 */
951 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
952 {
953 struct worker_pool *pool = worker->pool;
954
955 WARN_ON_ONCE(worker->task != current);
956
957 /* If transitioning into NOT_RUNNING, adjust nr_running. */
958 if ((flags & WORKER_NOT_RUNNING) &&
959 !(worker->flags & WORKER_NOT_RUNNING)) {
960 atomic_dec(&pool->nr_running);
961 }
962
963 worker->flags |= flags;
964 }
965
966 /**
967 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
968 * @worker: self
969 * @flags: flags to clear
970 *
971 * Clear @flags in @worker->flags and adjust nr_running accordingly.
972 *
973 * CONTEXT:
974 * spin_lock_irq(pool->lock)
975 */
976 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
977 {
978 struct worker_pool *pool = worker->pool;
979 unsigned int oflags = worker->flags;
980
981 WARN_ON_ONCE(worker->task != current);
982
983 worker->flags &= ~flags;
984
985 /*
986 * If transitioning out of NOT_RUNNING, increment nr_running. Note
987 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
988 * of multiple flags, not a single flag.
989 */
990 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
991 if (!(worker->flags & WORKER_NOT_RUNNING))
992 atomic_inc(&pool->nr_running);
993 }
994
995 /**
996 * find_worker_executing_work - find worker which is executing a work
997 * @pool: pool of interest
998 * @work: work to find worker for
999 *
1000 * Find a worker which is executing @work on @pool by searching
1001 * @pool->busy_hash which is keyed by the address of @work. For a worker
1002 * to match, its current execution should match the address of @work and
1003 * its work function. This is to avoid unwanted dependency between
1004 * unrelated work executions through a work item being recycled while still
1005 * being executed.
1006 *
1007 * This is a bit tricky. A work item may be freed once its execution
1008 * starts and nothing prevents the freed area from being recycled for
1009 * another work item. If the same work item address ends up being reused
1010 * before the original execution finishes, workqueue will identify the
1011 * recycled work item as currently executing and make it wait until the
1012 * current execution finishes, introducing an unwanted dependency.
1013 *
1014 * This function checks the work item address and work function to avoid
1015 * false positives. Note that this isn't complete as one may construct a
1016 * work function which can introduce dependency onto itself through a
1017 * recycled work item. Well, if somebody wants to shoot oneself in the
1018 * foot that badly, there's only so much we can do, and if such deadlock
1019 * actually occurs, it should be easy to locate the culprit work function.
1020 *
1021 * CONTEXT:
1022 * spin_lock_irq(pool->lock).
1023 *
1024 * Return:
1025 * Pointer to worker which is executing @work if found, %NULL
1026 * otherwise.
1027 */
1028 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1029 struct work_struct *work)
1030 {
1031 struct worker *worker;
1032
1033 hash_for_each_possible(pool->busy_hash, worker, hentry,
1034 (unsigned long)work)
1035 if (worker->current_work == work &&
1036 worker->current_func == work->func)
1037 return worker;
1038
1039 return NULL;
1040 }
1041
1042 /**
1043 * move_linked_works - move linked works to a list
1044 * @work: start of series of works to be scheduled
1045 * @head: target list to append @work to
1046 * @nextp: out parameter for nested worklist walking
1047 *
1048 * Schedule linked works starting from @work to @head. Work series to
1049 * be scheduled starts at @work and includes any consecutive work with
1050 * WORK_STRUCT_LINKED set in its predecessor.
1051 *
1052 * If @nextp is not NULL, it's updated to point to the next work of
1053 * the last scheduled work. This allows move_linked_works() to be
1054 * nested inside outer list_for_each_entry_safe().
1055 *
1056 * CONTEXT:
1057 * spin_lock_irq(pool->lock).
1058 */
1059 static void move_linked_works(struct work_struct *work, struct list_head *head,
1060 struct work_struct **nextp)
1061 {
1062 struct work_struct *n;
1063
1064 /*
1065 * Linked worklist will always end before the end of the list,
1066 * use NULL for list head.
1067 */
1068 list_for_each_entry_safe_from(work, n, NULL, entry) {
1069 list_move_tail(&work->entry, head);
1070 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1071 break;
1072 }
1073
1074 /*
1075 * If we're already inside safe list traversal and have moved
1076 * multiple works to the scheduled queue, the next position
1077 * needs to be updated.
1078 */
1079 if (nextp)
1080 *nextp = n;
1081 }
1082
1083 /**
1084 * get_pwq - get an extra reference on the specified pool_workqueue
1085 * @pwq: pool_workqueue to get
1086 *
1087 * Obtain an extra reference on @pwq. The caller should guarantee that
1088 * @pwq has positive refcnt and be holding the matching pool->lock.
1089 */
1090 static void get_pwq(struct pool_workqueue *pwq)
1091 {
1092 lockdep_assert_held(&pwq->pool->lock);
1093 WARN_ON_ONCE(pwq->refcnt <= 0);
1094 pwq->refcnt++;
1095 }
1096
1097 /**
1098 * put_pwq - put a pool_workqueue reference
1099 * @pwq: pool_workqueue to put
1100 *
1101 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1102 * destruction. The caller should be holding the matching pool->lock.
1103 */
1104 static void put_pwq(struct pool_workqueue *pwq)
1105 {
1106 lockdep_assert_held(&pwq->pool->lock);
1107 if (likely(--pwq->refcnt))
1108 return;
1109 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1110 return;
1111 /*
1112 * @pwq can't be released under pool->lock, bounce to
1113 * pwq_unbound_release_workfn(). This never recurses on the same
1114 * pool->lock as this path is taken only for unbound workqueues and
1115 * the release work item is scheduled on a per-cpu workqueue. To
1116 * avoid lockdep warning, unbound pool->locks are given lockdep
1117 * subclass of 1 in get_unbound_pool().
1118 */
1119 schedule_work(&pwq->unbound_release_work);
1120 }
1121
1122 /**
1123 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1124 * @pwq: pool_workqueue to put (can be %NULL)
1125 *
1126 * put_pwq() with locking. This function also allows %NULL @pwq.
1127 */
1128 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1129 {
1130 if (pwq) {
1131 /*
1132 * As both pwqs and pools are RCU protected, the
1133 * following lock operations are safe.
1134 */
1135 spin_lock_irq(&pwq->pool->lock);
1136 put_pwq(pwq);
1137 spin_unlock_irq(&pwq->pool->lock);
1138 }
1139 }
1140
1141 static void pwq_activate_delayed_work(struct work_struct *work)
1142 {
1143 struct pool_workqueue *pwq = get_work_pwq(work);
1144
1145 trace_workqueue_activate_work(work);
1146 if (list_empty(&pwq->pool->worklist))
1147 pwq->pool->watchdog_ts = jiffies;
1148 move_linked_works(work, &pwq->pool->worklist, NULL);
1149 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1150 pwq->nr_active++;
1151 }
1152
1153 static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1154 {
1155 struct work_struct *work = list_first_entry(&pwq->delayed_works,
1156 struct work_struct, entry);
1157
1158 pwq_activate_delayed_work(work);
1159 }
1160
1161 /**
1162 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1163 * @pwq: pwq of interest
1164 * @color: color of work which left the queue
1165 *
1166 * A work either has completed or is removed from pending queue,
1167 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1168 *
1169 * CONTEXT:
1170 * spin_lock_irq(pool->lock).
1171 */
1172 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1173 {
1174 /* uncolored work items don't participate in flushing or nr_active */
1175 if (color == WORK_NO_COLOR)
1176 goto out_put;
1177
1178 pwq->nr_in_flight[color]--;
1179
1180 pwq->nr_active--;
1181 if (!list_empty(&pwq->delayed_works)) {
1182 /* one down, submit a delayed one */
1183 if (pwq->nr_active < pwq->max_active)
1184 pwq_activate_first_delayed(pwq);
1185 }
1186
1187 /* is flush in progress and are we at the flushing tip? */
1188 if (likely(pwq->flush_color != color))
1189 goto out_put;
1190
1191 /* are there still in-flight works? */
1192 if (pwq->nr_in_flight[color])
1193 goto out_put;
1194
1195 /* this pwq is done, clear flush_color */
1196 pwq->flush_color = -1;
1197
1198 /*
1199 * If this was the last pwq, wake up the first flusher. It
1200 * will handle the rest.
1201 */
1202 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1203 complete(&pwq->wq->first_flusher->done);
1204 out_put:
1205 put_pwq(pwq);
1206 }
1207
1208 /**
1209 * try_to_grab_pending - steal work item from worklist and disable irq
1210 * @work: work item to steal
1211 * @is_dwork: @work is a delayed_work
1212 * @flags: place to store irq state
1213 *
1214 * Try to grab PENDING bit of @work. This function can handle @work in any
1215 * stable state - idle, on timer or on worklist.
1216 *
1217 * Return:
1218 * 1 if @work was pending and we successfully stole PENDING
1219 * 0 if @work was idle and we claimed PENDING
1220 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1221 * -ENOENT if someone else is canceling @work, this state may persist
1222 * for arbitrarily long
1223 *
1224 * Note:
1225 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1226 * interrupted while holding PENDING and @work off queue, irq must be
1227 * disabled on entry. This, combined with delayed_work->timer being
1228 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1229 *
1230 * On successful return, >= 0, irq is disabled and the caller is
1231 * responsible for releasing it using local_irq_restore(*@flags).
1232 *
1233 * This function is safe to call from any context including IRQ handler.
1234 */
1235 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1236 unsigned long *flags)
1237 {
1238 struct worker_pool *pool;
1239 struct pool_workqueue *pwq;
1240
1241 local_irq_save(*flags);
1242
1243 /* try to steal the timer if it exists */
1244 if (is_dwork) {
1245 struct delayed_work *dwork = to_delayed_work(work);
1246
1247 /*
1248 * dwork->timer is irqsafe. If del_timer() fails, it's
1249 * guaranteed that the timer is not queued anywhere and not
1250 * running on the local CPU.
1251 */
1252 if (likely(del_timer(&dwork->timer)))
1253 return 1;
1254 }
1255
1256 /* try to claim PENDING the normal way */
1257 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1258 return 0;
1259
1260 rcu_read_lock();
1261 /*
1262 * The queueing is in progress, or it is already queued. Try to
1263 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1264 */
1265 pool = get_work_pool(work);
1266 if (!pool)
1267 goto fail;
1268
1269 spin_lock(&pool->lock);
1270 /*
1271 * work->data is guaranteed to point to pwq only while the work
1272 * item is queued on pwq->wq, and both updating work->data to point
1273 * to pwq on queueing and to pool on dequeueing are done under
1274 * pwq->pool->lock. This in turn guarantees that, if work->data
1275 * points to pwq which is associated with a locked pool, the work
1276 * item is currently queued on that pool.
1277 */
1278 pwq = get_work_pwq(work);
1279 if (pwq && pwq->pool == pool) {
1280 debug_work_deactivate(work);
1281
1282 /*
1283 * A delayed work item cannot be grabbed directly because
1284 * it might have linked NO_COLOR work items which, if left
1285 * on the delayed_list, will confuse pwq->nr_active
1286 * management later on and cause stall. Make sure the work
1287 * item is activated before grabbing.
1288 */
1289 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1290 pwq_activate_delayed_work(work);
1291
1292 list_del_init(&work->entry);
1293 pwq_dec_nr_in_flight(pwq, get_work_color(work));
1294
1295 /* work->data points to pwq iff queued, point to pool */
1296 set_work_pool_and_keep_pending(work, pool->id);
1297
1298 spin_unlock(&pool->lock);
1299 rcu_read_unlock();
1300 return 1;
1301 }
1302 spin_unlock(&pool->lock);
1303 fail:
1304 rcu_read_unlock();
1305 local_irq_restore(*flags);
1306 if (work_is_canceling(work))
1307 return -ENOENT;
1308 cpu_relax();
1309 return -EAGAIN;
1310 }
1311
1312 /**
1313 * insert_work - insert a work into a pool
1314 * @pwq: pwq @work belongs to
1315 * @work: work to insert
1316 * @head: insertion point
1317 * @extra_flags: extra WORK_STRUCT_* flags to set
1318 *
1319 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1320 * work_struct flags.
1321 *
1322 * CONTEXT:
1323 * spin_lock_irq(pool->lock).
1324 */
1325 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1326 struct list_head *head, unsigned int extra_flags)
1327 {
1328 struct worker_pool *pool = pwq->pool;
1329
1330 /* we own @work, set data and link */
1331 set_work_pwq(work, pwq, extra_flags);
1332 list_add_tail(&work->entry, head);
1333 get_pwq(pwq);
1334
1335 /*
1336 * Ensure either wq_worker_sleeping() sees the above
1337 * list_add_tail() or we see zero nr_running to avoid workers lying
1338 * around lazily while there are works to be processed.
1339 */
1340 smp_mb();
1341
1342 if (__need_more_worker(pool))
1343 wake_up_worker(pool);
1344 }
1345
1346 /*
1347 * Test whether @work is being queued from another work executing on the
1348 * same workqueue.
1349 */
1350 static bool is_chained_work(struct workqueue_struct *wq)
1351 {
1352 struct worker *worker;
1353
1354 worker = current_wq_worker();
1355 /*
1356 * Return %true iff I'm a worker executing a work item on @wq. If
1357 * I'm @worker, it's safe to dereference it without locking.
1358 */
1359 return worker && worker->current_pwq->wq == wq;
1360 }
1361
1362 /*
1363 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1364 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1365 * avoid perturbing sensitive tasks.
1366 */
1367 static int wq_select_unbound_cpu(int cpu)
1368 {
1369 static bool printed_dbg_warning;
1370 int new_cpu;
1371
1372 if (likely(!wq_debug_force_rr_cpu)) {
1373 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1374 return cpu;
1375 } else if (!printed_dbg_warning) {
1376 pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1377 printed_dbg_warning = true;
1378 }
1379
1380 if (cpumask_empty(wq_unbound_cpumask))
1381 return cpu;
1382
1383 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1384 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1385 if (unlikely(new_cpu >= nr_cpu_ids)) {
1386 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1387 if (unlikely(new_cpu >= nr_cpu_ids))
1388 return cpu;
1389 }
1390 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1391
1392 return new_cpu;
1393 }
1394
1395 static void __queue_work(int cpu, struct workqueue_struct *wq,
1396 struct work_struct *work)
1397 {
1398 struct pool_workqueue *pwq;
1399 struct worker_pool *last_pool;
1400 struct list_head *worklist;
1401 unsigned int work_flags;
1402 unsigned int req_cpu = cpu;
1403
1404 /*
1405 * While a work item is PENDING && off queue, a task trying to
1406 * steal the PENDING will busy-loop waiting for it to either get
1407 * queued or lose PENDING. Grabbing PENDING and queueing should
1408 * happen with IRQ disabled.
1409 */
1410 lockdep_assert_irqs_disabled();
1411
1412 debug_work_activate(work);
1413
1414 /* if draining, only works from the same workqueue are allowed */
1415 if (unlikely(wq->flags & __WQ_DRAINING) &&
1416 WARN_ON_ONCE(!is_chained_work(wq)))
1417 return;
1418 rcu_read_lock();
1419 retry:
1420 if (req_cpu == WORK_CPU_UNBOUND)
1421 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1422
1423 /* pwq which will be used unless @work is executing elsewhere */
1424 if (!(wq->flags & WQ_UNBOUND))
1425 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1426 else
1427 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1428
1429 /*
1430 * If @work was previously on a different pool, it might still be
1431 * running there, in which case the work needs to be queued on that
1432 * pool to guarantee non-reentrancy.
1433 */
1434 last_pool = get_work_pool(work);
1435 if (last_pool && last_pool != pwq->pool) {
1436 struct worker *worker;
1437
1438 spin_lock(&last_pool->lock);
1439
1440 worker = find_worker_executing_work(last_pool, work);
1441
1442 if (worker && worker->current_pwq->wq == wq) {
1443 pwq = worker->current_pwq;
1444 } else {
1445 /* meh... not running there, queue here */
1446 spin_unlock(&last_pool->lock);
1447 spin_lock(&pwq->pool->lock);
1448 }
1449 } else {
1450 spin_lock(&pwq->pool->lock);
1451 }
1452
1453 /*
1454 * pwq is determined and locked. For unbound pools, we could have
1455 * raced with pwq release and it could already be dead. If its
1456 * refcnt is zero, repeat pwq selection. Note that pwqs never die
1457 * without another pwq replacing it in the numa_pwq_tbl or while
1458 * work items are executing on it, so the retrying is guaranteed to
1459 * make forward-progress.
1460 */
1461 if (unlikely(!pwq->refcnt)) {
1462 if (wq->flags & WQ_UNBOUND) {
1463 spin_unlock(&pwq->pool->lock);
1464 cpu_relax();
1465 goto retry;
1466 }
1467 /* oops */
1468 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1469 wq->name, cpu);
1470 }
1471
1472 /* pwq determined, queue */
1473 trace_workqueue_queue_work(req_cpu, pwq, work);
1474
1475 if (WARN_ON(!list_empty(&work->entry)))
1476 goto out;
1477
1478 pwq->nr_in_flight[pwq->work_color]++;
1479 work_flags = work_color_to_flags(pwq->work_color);
1480
1481 if (likely(pwq->nr_active < pwq->max_active)) {
1482 trace_workqueue_activate_work(work);
1483 pwq->nr_active++;
1484 worklist = &pwq->pool->worklist;
1485 if (list_empty(worklist))
1486 pwq->pool->watchdog_ts = jiffies;
1487 } else {
1488 work_flags |= WORK_STRUCT_DELAYED;
1489 worklist = &pwq->delayed_works;
1490 }
1491
1492 insert_work(pwq, work, worklist, work_flags);
1493
1494 out:
1495 spin_unlock(&pwq->pool->lock);
1496 rcu_read_unlock();
1497 }
1498
1499 /**
1500 * queue_work_on - queue work on specific cpu
1501 * @cpu: CPU number to execute work on
1502 * @wq: workqueue to use
1503 * @work: work to queue
1504 *
1505 * We queue the work to a specific CPU, the caller must ensure it
1506 * can't go away.
1507 *
1508 * Return: %false if @work was already on a queue, %true otherwise.
1509 */
1510 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1511 struct work_struct *work)
1512 {
1513 bool ret = false;
1514 unsigned long flags;
1515
1516 local_irq_save(flags);
1517
1518 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1519 __queue_work(cpu, wq, work);
1520 ret = true;
1521 }
1522
1523 local_irq_restore(flags);
1524 return ret;
1525 }
1526 EXPORT_SYMBOL(queue_work_on);
1527
1528 /**
1529 * workqueue_select_cpu_near - Select a CPU based on NUMA node
1530 * @node: NUMA node ID that we want to select a CPU from
1531 *
1532 * This function will attempt to find a "random" cpu available on a given
1533 * node. If there are no CPUs available on the given node it will return
1534 * WORK_CPU_UNBOUND indicating that we should just schedule to any
1535 * available CPU if we need to schedule this work.
1536 */
1537 static int workqueue_select_cpu_near(int node)
1538 {
1539 int cpu;
1540
1541 /* No point in doing this if NUMA isn't enabled for workqueues */
1542 if (!wq_numa_enabled)
1543 return WORK_CPU_UNBOUND;
1544
1545 /* Delay binding to CPU if node is not valid or online */
1546 if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1547 return WORK_CPU_UNBOUND;
1548
1549 /* Use local node/cpu if we are already there */
1550 cpu = raw_smp_processor_id();
1551 if (node == cpu_to_node(cpu))
1552 return cpu;
1553
1554 /* Use "random" otherwise know as "first" online CPU of node */
1555 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1556
1557 /* If CPU is valid return that, otherwise just defer */
1558 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1559 }
1560
1561 /**
1562 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1563 * @node: NUMA node that we are targeting the work for
1564 * @wq: workqueue to use
1565 * @work: work to queue
1566 *
1567 * We queue the work to a "random" CPU within a given NUMA node. The basic
1568 * idea here is to provide a way to somehow associate work with a given
1569 * NUMA node.
1570 *
1571 * This function will only make a best effort attempt at getting this onto
1572 * the right NUMA node. If no node is requested or the requested node is
1573 * offline then we just fall back to standard queue_work behavior.
1574 *
1575 * Currently the "random" CPU ends up being the first available CPU in the
1576 * intersection of cpu_online_mask and the cpumask of the node, unless we
1577 * are running on the node. In that case we just use the current CPU.
1578 *
1579 * Return: %false if @work was already on a queue, %true otherwise.
1580 */
1581 bool queue_work_node(int node, struct workqueue_struct *wq,
1582 struct work_struct *work)
1583 {
1584 unsigned long flags;
1585 bool ret = false;
1586
1587 /*
1588 * This current implementation is specific to unbound workqueues.
1589 * Specifically we only return the first available CPU for a given
1590 * node instead of cycling through individual CPUs within the node.
1591 *
1592 * If this is used with a per-cpu workqueue then the logic in
1593 * workqueue_select_cpu_near would need to be updated to allow for
1594 * some round robin type logic.
1595 */
1596 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1597
1598 local_irq_save(flags);
1599
1600 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1601 int cpu = workqueue_select_cpu_near(node);
1602
1603 __queue_work(cpu, wq, work);
1604 ret = true;
1605 }
1606
1607 local_irq_restore(flags);
1608 return ret;
1609 }
1610 EXPORT_SYMBOL_GPL(queue_work_node);
1611
1612 void delayed_work_timer_fn(struct timer_list *t)
1613 {
1614 struct delayed_work *dwork = from_timer(dwork, t, timer);
1615
1616 /* should have been called from irqsafe timer with irq already off */
1617 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1618 }
1619 EXPORT_SYMBOL(delayed_work_timer_fn);
1620
1621 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1622 struct delayed_work *dwork, unsigned long delay)
1623 {
1624 struct timer_list *timer = &dwork->timer;
1625 struct work_struct *work = &dwork->work;
1626
1627 WARN_ON_ONCE(!wq);
1628 WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1629 WARN_ON_ONCE(timer_pending(timer));
1630 WARN_ON_ONCE(!list_empty(&work->entry));
1631
1632 /*
1633 * If @delay is 0, queue @dwork->work immediately. This is for
1634 * both optimization and correctness. The earliest @timer can
1635 * expire is on the closest next tick and delayed_work users depend
1636 * on that there's no such delay when @delay is 0.
1637 */
1638 if (!delay) {
1639 __queue_work(cpu, wq, &dwork->work);
1640 return;
1641 }
1642
1643 dwork->wq = wq;
1644 dwork->cpu = cpu;
1645 timer->expires = jiffies + delay;
1646
1647 if (unlikely(cpu != WORK_CPU_UNBOUND))
1648 add_timer_on(timer, cpu);
1649 else
1650 add_timer(timer);
1651 }
1652
1653 /**
1654 * queue_delayed_work_on - queue work on specific CPU after delay
1655 * @cpu: CPU number to execute work on
1656 * @wq: workqueue to use
1657 * @dwork: work to queue
1658 * @delay: number of jiffies to wait before queueing
1659 *
1660 * Return: %false if @work was already on a queue, %true otherwise. If
1661 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1662 * execution.
1663 */
1664 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1665 struct delayed_work *dwork, unsigned long delay)
1666 {
1667 struct work_struct *work = &dwork->work;
1668 bool ret = false;
1669 unsigned long flags;
1670
1671 /* read the comment in __queue_work() */
1672 local_irq_save(flags);
1673
1674 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1675 __queue_delayed_work(cpu, wq, dwork, delay);
1676 ret = true;
1677 }
1678
1679 local_irq_restore(flags);
1680 return ret;
1681 }
1682 EXPORT_SYMBOL(queue_delayed_work_on);
1683
1684 /**
1685 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1686 * @cpu: CPU number to execute work on
1687 * @wq: workqueue to use
1688 * @dwork: work to queue
1689 * @delay: number of jiffies to wait before queueing
1690 *
1691 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1692 * modify @dwork's timer so that it expires after @delay. If @delay is
1693 * zero, @work is guaranteed to be scheduled immediately regardless of its
1694 * current state.
1695 *
1696 * Return: %false if @dwork was idle and queued, %true if @dwork was
1697 * pending and its timer was modified.
1698 *
1699 * This function is safe to call from any context including IRQ handler.
1700 * See try_to_grab_pending() for details.
1701 */
1702 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1703 struct delayed_work *dwork, unsigned long delay)
1704 {
1705 unsigned long flags;
1706 int ret;
1707
1708 do {
1709 ret = try_to_grab_pending(&dwork->work, true, &flags);
1710 } while (unlikely(ret == -EAGAIN));
1711
1712 if (likely(ret >= 0)) {
1713 __queue_delayed_work(cpu, wq, dwork, delay);
1714 local_irq_restore(flags);
1715 }
1716
1717 /* -ENOENT from try_to_grab_pending() becomes %true */
1718 return ret;
1719 }
1720 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1721
1722 static void rcu_work_rcufn(struct rcu_head *rcu)
1723 {
1724 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
1725
1726 /* read the comment in __queue_work() */
1727 local_irq_disable();
1728 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1729 local_irq_enable();
1730 }
1731
1732 /**
1733 * queue_rcu_work - queue work after a RCU grace period
1734 * @wq: workqueue to use
1735 * @rwork: work to queue
1736 *
1737 * Return: %false if @rwork was already pending, %true otherwise. Note
1738 * that a full RCU grace period is guaranteed only after a %true return.
1739 * While @rwork is guaranteed to be executed after a %false return, the
1740 * execution may happen before a full RCU grace period has passed.
1741 */
1742 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1743 {
1744 struct work_struct *work = &rwork->work;
1745
1746 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1747 rwork->wq = wq;
1748 call_rcu(&rwork->rcu, rcu_work_rcufn);
1749 return true;
1750 }
1751
1752 return false;
1753 }
1754 EXPORT_SYMBOL(queue_rcu_work);
1755
1756 /**
1757 * worker_enter_idle - enter idle state
1758 * @worker: worker which is entering idle state
1759 *
1760 * @worker is entering idle state. Update stats and idle timer if
1761 * necessary.
1762 *
1763 * LOCKING:
1764 * spin_lock_irq(pool->lock).
1765 */
1766 static void worker_enter_idle(struct worker *worker)
1767 {
1768 struct worker_pool *pool = worker->pool;
1769
1770 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1771 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1772 (worker->hentry.next || worker->hentry.pprev)))
1773 return;
1774
1775 /* can't use worker_set_flags(), also called from create_worker() */
1776 worker->flags |= WORKER_IDLE;
1777 pool->nr_idle++;
1778 worker->last_active = jiffies;
1779
1780 /* idle_list is LIFO */
1781 list_add(&worker->entry, &pool->idle_list);
1782
1783 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1784 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1785
1786 /*
1787 * Sanity check nr_running. Because unbind_workers() releases
1788 * pool->lock between setting %WORKER_UNBOUND and zapping
1789 * nr_running, the warning may trigger spuriously. Check iff
1790 * unbind is not in progress.
1791 */
1792 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1793 pool->nr_workers == pool->nr_idle &&
1794 atomic_read(&pool->nr_running));
1795 }
1796
1797 /**
1798 * worker_leave_idle - leave idle state
1799 * @worker: worker which is leaving idle state
1800 *
1801 * @worker is leaving idle state. Update stats.
1802 *
1803 * LOCKING:
1804 * spin_lock_irq(pool->lock).
1805 */
1806 static void worker_leave_idle(struct worker *worker)
1807 {
1808 struct worker_pool *pool = worker->pool;
1809
1810 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1811 return;
1812 worker_clr_flags(worker, WORKER_IDLE);
1813 pool->nr_idle--;
1814 list_del_init(&worker->entry);
1815 }
1816
1817 static struct worker *alloc_worker(int node)
1818 {
1819 struct worker *worker;
1820
1821 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1822 if (worker) {
1823 INIT_LIST_HEAD(&worker->entry);
1824 INIT_LIST_HEAD(&worker->scheduled);
1825 INIT_LIST_HEAD(&worker->node);
1826 /* on creation a worker is in !idle && prep state */
1827 worker->flags = WORKER_PREP;
1828 }
1829 return worker;
1830 }
1831
1832 /**
1833 * worker_attach_to_pool() - attach a worker to a pool
1834 * @worker: worker to be attached
1835 * @pool: the target pool
1836 *
1837 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1838 * cpu-binding of @worker are kept coordinated with the pool across
1839 * cpu-[un]hotplugs.
1840 */
1841 static void worker_attach_to_pool(struct worker *worker,
1842 struct worker_pool *pool)
1843 {
1844 mutex_lock(&wq_pool_attach_mutex);
1845
1846 /*
1847 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1848 * online CPUs. It'll be re-applied when any of the CPUs come up.
1849 */
1850 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1851
1852 /*
1853 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1854 * stable across this function. See the comments above the flag
1855 * definition for details.
1856 */
1857 if (pool->flags & POOL_DISASSOCIATED)
1858 worker->flags |= WORKER_UNBOUND;
1859
1860 list_add_tail(&worker->node, &pool->workers);
1861 worker->pool = pool;
1862
1863 mutex_unlock(&wq_pool_attach_mutex);
1864 }
1865
1866 /**
1867 * worker_detach_from_pool() - detach a worker from its pool
1868 * @worker: worker which is attached to its pool
1869 *
1870 * Undo the attaching which had been done in worker_attach_to_pool(). The
1871 * caller worker shouldn't access to the pool after detached except it has
1872 * other reference to the pool.
1873 */
1874 static void worker_detach_from_pool(struct worker *worker)
1875 {
1876 struct worker_pool *pool = worker->pool;
1877 struct completion *detach_completion = NULL;
1878
1879 mutex_lock(&wq_pool_attach_mutex);
1880
1881 list_del(&worker->node);
1882 worker->pool = NULL;
1883
1884 if (list_empty(&pool->workers))
1885 detach_completion = pool->detach_completion;
1886 mutex_unlock(&wq_pool_attach_mutex);
1887
1888 /* clear leftover flags without pool->lock after it is detached */
1889 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1890
1891 if (detach_completion)
1892 complete(detach_completion);
1893 }
1894
1895 /**
1896 * create_worker - create a new workqueue worker
1897 * @pool: pool the new worker will belong to
1898 *
1899 * Create and start a new worker which is attached to @pool.
1900 *
1901 * CONTEXT:
1902 * Might sleep. Does GFP_KERNEL allocations.
1903 *
1904 * Return:
1905 * Pointer to the newly created worker.
1906 */
1907 static struct worker *create_worker(struct worker_pool *pool)
1908 {
1909 struct worker *worker = NULL;
1910 int id = -1;
1911 char id_buf[16];
1912
1913 /* ID is needed to determine kthread name */
1914 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1915 if (id < 0)
1916 goto fail;
1917
1918 worker = alloc_worker(pool->node);
1919 if (!worker)
1920 goto fail;
1921
1922 worker->id = id;
1923
1924 if (pool->cpu >= 0)
1925 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1926 pool->attrs->nice < 0 ? "H" : "");
1927 else
1928 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1929
1930 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1931 "kworker/%s", id_buf);
1932 if (IS_ERR(worker->task))
1933 goto fail;
1934
1935 set_user_nice(worker->task, pool->attrs->nice);
1936 kthread_bind_mask(worker->task, pool->attrs->cpumask);
1937
1938 /* successful, attach the worker to the pool */
1939 worker_attach_to_pool(worker, pool);
1940
1941 /* start the newly created worker */
1942 spin_lock_irq(&pool->lock);
1943 worker->pool->nr_workers++;
1944 worker_enter_idle(worker);
1945 wake_up_process(worker->task);
1946 spin_unlock_irq(&pool->lock);
1947
1948 return worker;
1949
1950 fail:
1951 if (id >= 0)
1952 ida_simple_remove(&pool->worker_ida, id);
1953 kfree(worker);
1954 return NULL;
1955 }
1956
1957 /**
1958 * destroy_worker - destroy a workqueue worker
1959 * @worker: worker to be destroyed
1960 *
1961 * Destroy @worker and adjust @pool stats accordingly. The worker should
1962 * be idle.
1963 *
1964 * CONTEXT:
1965 * spin_lock_irq(pool->lock).
1966 */
1967 static void destroy_worker(struct worker *worker)
1968 {
1969 struct worker_pool *pool = worker->pool;
1970
1971 lockdep_assert_held(&pool->lock);
1972
1973 /* sanity check frenzy */
1974 if (WARN_ON(worker->current_work) ||
1975 WARN_ON(!list_empty(&worker->scheduled)) ||
1976 WARN_ON(!(worker->flags & WORKER_IDLE)))
1977 return;
1978
1979 pool->nr_workers--;
1980 pool->nr_idle--;
1981
1982 list_del_init(&worker->entry);
1983 worker->flags |= WORKER_DIE;
1984 wake_up_process(worker->task);
1985 }
1986
1987 static void idle_worker_timeout(struct timer_list *t)
1988 {
1989 struct worker_pool *pool = from_timer(pool, t, idle_timer);
1990
1991 spin_lock_irq(&pool->lock);
1992
1993 while (too_many_workers(pool)) {
1994 struct worker *worker;
1995 unsigned long expires;
1996
1997 /* idle_list is kept in LIFO order, check the last one */
1998 worker = list_entry(pool->idle_list.prev, struct worker, entry);
1999 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2000
2001 if (time_before(jiffies, expires)) {
2002 mod_timer(&pool->idle_timer, expires);
2003 break;
2004 }
2005
2006 destroy_worker(worker);
2007 }
2008
2009 spin_unlock_irq(&pool->lock);
2010 }
2011
2012 static void send_mayday(struct work_struct *work)
2013 {
2014 struct pool_workqueue *pwq = get_work_pwq(work);
2015 struct workqueue_struct *wq = pwq->wq;
2016
2017 lockdep_assert_held(&wq_mayday_lock);
2018
2019 if (!wq->rescuer)
2020 return;
2021
2022 /* mayday mayday mayday */
2023 if (list_empty(&pwq->mayday_node)) {
2024 /*
2025 * If @pwq is for an unbound wq, its base ref may be put at
2026 * any time due to an attribute change. Pin @pwq until the
2027 * rescuer is done with it.
2028 */
2029 get_pwq(pwq);
2030 list_add_tail(&pwq->mayday_node, &wq->maydays);
2031 wake_up_process(wq->rescuer->task);
2032 }
2033 }
2034
2035 static void pool_mayday_timeout(struct timer_list *t)
2036 {
2037 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2038 struct work_struct *work;
2039
2040 spin_lock_irq(&pool->lock);
2041 spin_lock(&wq_mayday_lock); /* for wq->maydays */
2042
2043 if (need_to_create_worker(pool)) {
2044 /*
2045 * We've been trying to create a new worker but
2046 * haven't been successful. We might be hitting an
2047 * allocation deadlock. Send distress signals to
2048 * rescuers.
2049 */
2050 list_for_each_entry(work, &pool->worklist, entry)
2051 send_mayday(work);
2052 }
2053
2054 spin_unlock(&wq_mayday_lock);
2055 spin_unlock_irq(&pool->lock);
2056
2057 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2058 }
2059
2060 /**
2061 * maybe_create_worker - create a new worker if necessary
2062 * @pool: pool to create a new worker for
2063 *
2064 * Create a new worker for @pool if necessary. @pool is guaranteed to
2065 * have at least one idle worker on return from this function. If
2066 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2067 * sent to all rescuers with works scheduled on @pool to resolve
2068 * possible allocation deadlock.
2069 *
2070 * On return, need_to_create_worker() is guaranteed to be %false and
2071 * may_start_working() %true.
2072 *
2073 * LOCKING:
2074 * spin_lock_irq(pool->lock) which may be released and regrabbed
2075 * multiple times. Does GFP_KERNEL allocations. Called only from
2076 * manager.
2077 */
2078 static void maybe_create_worker(struct worker_pool *pool)
2079 __releases(&pool->lock)
2080 __acquires(&pool->lock)
2081 {
2082 restart:
2083 spin_unlock_irq(&pool->lock);
2084
2085 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2086 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2087
2088 while (true) {
2089 if (create_worker(pool) || !need_to_create_worker(pool))
2090 break;
2091
2092 schedule_timeout_interruptible(CREATE_COOLDOWN);
2093
2094 if (!need_to_create_worker(pool))
2095 break;
2096 }
2097
2098 del_timer_sync(&pool->mayday_timer);
2099 spin_lock_irq(&pool->lock);
2100 /*
2101 * This is necessary even after a new worker was just successfully
2102 * created as @pool->lock was dropped and the new worker might have
2103 * already become busy.
2104 */
2105 if (need_to_create_worker(pool))
2106 goto restart;
2107 }
2108
2109 /**
2110 * manage_workers - manage worker pool
2111 * @worker: self
2112 *
2113 * Assume the manager role and manage the worker pool @worker belongs
2114 * to. At any given time, there can be only zero or one manager per
2115 * pool. The exclusion is handled automatically by this function.
2116 *
2117 * The caller can safely start processing works on false return. On
2118 * true return, it's guaranteed that need_to_create_worker() is false
2119 * and may_start_working() is true.
2120 *
2121 * CONTEXT:
2122 * spin_lock_irq(pool->lock) which may be released and regrabbed
2123 * multiple times. Does GFP_KERNEL allocations.
2124 *
2125 * Return:
2126 * %false if the pool doesn't need management and the caller can safely
2127 * start processing works, %true if management function was performed and
2128 * the conditions that the caller verified before calling the function may
2129 * no longer be true.
2130 */
2131 static bool manage_workers(struct worker *worker)
2132 {
2133 struct worker_pool *pool = worker->pool;
2134
2135 if (pool->flags & POOL_MANAGER_ACTIVE)
2136 return false;
2137
2138 pool->flags |= POOL_MANAGER_ACTIVE;
2139 pool->manager = worker;
2140
2141 maybe_create_worker(pool);
2142
2143 pool->manager = NULL;
2144 pool->flags &= ~POOL_MANAGER_ACTIVE;
2145 wake_up(&wq_manager_wait);
2146 return true;
2147 }
2148
2149 /**
2150 * process_one_work - process single work
2151 * @worker: self
2152 * @work: work to process
2153 *
2154 * Process @work. This function contains all the logics necessary to
2155 * process a single work including synchronization against and
2156 * interaction with other workers on the same cpu, queueing and
2157 * flushing. As long as context requirement is met, any worker can
2158 * call this function to process a work.
2159 *
2160 * CONTEXT:
2161 * spin_lock_irq(pool->lock) which is released and regrabbed.
2162 */
2163 static void process_one_work(struct worker *worker, struct work_struct *work)
2164 __releases(&pool->lock)
2165 __acquires(&pool->lock)
2166 {
2167 struct pool_workqueue *pwq = get_work_pwq(work);
2168 struct worker_pool *pool = worker->pool;
2169 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2170 int work_color;
2171 struct worker *collision;
2172 #ifdef CONFIG_LOCKDEP
2173 /*
2174 * It is permissible to free the struct work_struct from
2175 * inside the function that is called from it, this we need to
2176 * take into account for lockdep too. To avoid bogus "held
2177 * lock freed" warnings as well as problems when looking into
2178 * work->lockdep_map, make a copy and use that here.
2179 */
2180 struct lockdep_map lockdep_map;
2181
2182 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2183 #endif
2184 /* ensure we're on the correct CPU */
2185 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2186 raw_smp_processor_id() != pool->cpu);
2187
2188 /*
2189 * A single work shouldn't be executed concurrently by
2190 * multiple workers on a single cpu. Check whether anyone is
2191 * already processing the work. If so, defer the work to the
2192 * currently executing one.
2193 */
2194 collision = find_worker_executing_work(pool, work);
2195 if (unlikely(collision)) {
2196 move_linked_works(work, &collision->scheduled, NULL);
2197 return;
2198 }
2199
2200 /* claim and dequeue */
2201 debug_work_deactivate(work);
2202 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2203 worker->current_work = work;
2204 worker->current_func = work->func;
2205 worker->current_pwq = pwq;
2206 work_color = get_work_color(work);
2207
2208 /*
2209 * Record wq name for cmdline and debug reporting, may get
2210 * overridden through set_worker_desc().
2211 */
2212 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2213
2214 list_del_init(&work->entry);
2215
2216 /*
2217 * CPU intensive works don't participate in concurrency management.
2218 * They're the scheduler's responsibility. This takes @worker out
2219 * of concurrency management and the next code block will chain
2220 * execution of the pending work items.
2221 */
2222 if (unlikely(cpu_intensive))
2223 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2224
2225 /*
2226 * Wake up another worker if necessary. The condition is always
2227 * false for normal per-cpu workers since nr_running would always
2228 * be >= 1 at this point. This is used to chain execution of the
2229 * pending work items for WORKER_NOT_RUNNING workers such as the
2230 * UNBOUND and CPU_INTENSIVE ones.
2231 */
2232 if (need_more_worker(pool))
2233 wake_up_worker(pool);
2234
2235 /*
2236 * Record the last pool and clear PENDING which should be the last
2237 * update to @work. Also, do this inside @pool->lock so that
2238 * PENDING and queued state changes happen together while IRQ is
2239 * disabled.
2240 */
2241 set_work_pool_and_clear_pending(work, pool->id);
2242
2243 spin_unlock_irq(&pool->lock);
2244
2245 lock_map_acquire(&pwq->wq->lockdep_map);
2246 lock_map_acquire(&lockdep_map);
2247 /*
2248 * Strictly speaking we should mark the invariant state without holding
2249 * any locks, that is, before these two lock_map_acquire()'s.
2250 *
2251 * However, that would result in:
2252 *
2253 * A(W1)
2254 * WFC(C)
2255 * A(W1)
2256 * C(C)
2257 *
2258 * Which would create W1->C->W1 dependencies, even though there is no
2259 * actual deadlock possible. There are two solutions, using a
2260 * read-recursive acquire on the work(queue) 'locks', but this will then
2261 * hit the lockdep limitation on recursive locks, or simply discard
2262 * these locks.
2263 *
2264 * AFAICT there is no possible deadlock scenario between the
2265 * flush_work() and complete() primitives (except for single-threaded
2266 * workqueues), so hiding them isn't a problem.
2267 */
2268 lockdep_invariant_state(true);
2269 trace_workqueue_execute_start(work);
2270 worker->current_func(work);
2271 /*
2272 * While we must be careful to not use "work" after this, the trace
2273 * point will only record its address.
2274 */
2275 trace_workqueue_execute_end(work);
2276 lock_map_release(&lockdep_map);
2277 lock_map_release(&pwq->wq->lockdep_map);
2278
2279 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2280 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2281 " last function: %ps\n",
2282 current->comm, preempt_count(), task_pid_nr(current),
2283 worker->current_func);
2284 debug_show_held_locks(current);
2285 dump_stack();
2286 }
2287
2288 /*
2289 * The following prevents a kworker from hogging CPU on !PREEMPT
2290 * kernels, where a requeueing work item waiting for something to
2291 * happen could deadlock with stop_machine as such work item could
2292 * indefinitely requeue itself while all other CPUs are trapped in
2293 * stop_machine. At the same time, report a quiescent RCU state so
2294 * the same condition doesn't freeze RCU.
2295 */
2296 cond_resched();
2297
2298 spin_lock_irq(&pool->lock);
2299
2300 /* clear cpu intensive status */
2301 if (unlikely(cpu_intensive))
2302 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2303
2304 /* tag the worker for identification in schedule() */
2305 worker->last_func = worker->current_func;
2306
2307 /* we're done with it, release */
2308 hash_del(&worker->hentry);
2309 worker->current_work = NULL;
2310 worker->current_func = NULL;
2311 worker->current_pwq = NULL;
2312 pwq_dec_nr_in_flight(pwq, work_color);
2313 }
2314
2315 /**
2316 * process_scheduled_works - process scheduled works
2317 * @worker: self
2318 *
2319 * Process all scheduled works. Please note that the scheduled list
2320 * may change while processing a work, so this function repeatedly
2321 * fetches a work from the top and executes it.
2322 *
2323 * CONTEXT:
2324 * spin_lock_irq(pool->lock) which may be released and regrabbed
2325 * multiple times.
2326 */
2327 static void process_scheduled_works(struct worker *worker)
2328 {
2329 while (!list_empty(&worker->scheduled)) {
2330 struct work_struct *work = list_first_entry(&worker->scheduled,
2331 struct work_struct, entry);
2332 process_one_work(worker, work);
2333 }
2334 }
2335
2336 static void set_pf_worker(bool val)
2337 {
2338 mutex_lock(&wq_pool_attach_mutex);
2339 if (val)
2340 current->flags |= PF_WQ_WORKER;
2341 else
2342 current->flags &= ~PF_WQ_WORKER;
2343 mutex_unlock(&wq_pool_attach_mutex);
2344 }
2345
2346 /**
2347 * worker_thread - the worker thread function
2348 * @__worker: self
2349 *
2350 * The worker thread function. All workers belong to a worker_pool -
2351 * either a per-cpu one or dynamic unbound one. These workers process all
2352 * work items regardless of their specific target workqueue. The only
2353 * exception is work items which belong to workqueues with a rescuer which
2354 * will be explained in rescuer_thread().
2355 *
2356 * Return: 0
2357 */
2358 static int worker_thread(void *__worker)
2359 {
2360 struct worker *worker = __worker;
2361 struct worker_pool *pool = worker->pool;
2362
2363 /* tell the scheduler that this is a workqueue worker */
2364 set_pf_worker(true);
2365 woke_up:
2366 spin_lock_irq(&pool->lock);
2367
2368 /* am I supposed to die? */
2369 if (unlikely(worker->flags & WORKER_DIE)) {
2370 spin_unlock_irq(&pool->lock);
2371 WARN_ON_ONCE(!list_empty(&worker->entry));
2372 set_pf_worker(false);
2373
2374 set_task_comm(worker->task, "kworker/dying");
2375 ida_simple_remove(&pool->worker_ida, worker->id);
2376 worker_detach_from_pool(worker);
2377 kfree(worker);
2378 return 0;
2379 }
2380
2381 worker_leave_idle(worker);
2382 recheck:
2383 /* no more worker necessary? */
2384 if (!need_more_worker(pool))
2385 goto sleep;
2386
2387 /* do we need to manage? */
2388 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2389 goto recheck;
2390
2391 /*
2392 * ->scheduled list can only be filled while a worker is
2393 * preparing to process a work or actually processing it.
2394 * Make sure nobody diddled with it while I was sleeping.
2395 */
2396 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2397
2398 /*
2399 * Finish PREP stage. We're guaranteed to have at least one idle
2400 * worker or that someone else has already assumed the manager
2401 * role. This is where @worker starts participating in concurrency
2402 * management if applicable and concurrency management is restored
2403 * after being rebound. See rebind_workers() for details.
2404 */
2405 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2406
2407 do {
2408 struct work_struct *work =
2409 list_first_entry(&pool->worklist,
2410 struct work_struct, entry);
2411
2412 pool->watchdog_ts = jiffies;
2413
2414 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2415 /* optimization path, not strictly necessary */
2416 process_one_work(worker, work);
2417 if (unlikely(!list_empty(&worker->scheduled)))
2418 process_scheduled_works(worker);
2419 } else {
2420 move_linked_works(work, &worker->scheduled, NULL);
2421 process_scheduled_works(worker);
2422 }
2423 } while (keep_working(pool));
2424
2425 worker_set_flags(worker, WORKER_PREP);
2426 sleep:
2427 /*
2428 * pool->lock is held and there's no work to process and no need to
2429 * manage, sleep. Workers are woken up only while holding
2430 * pool->lock or from local cpu, so setting the current state
2431 * before releasing pool->lock is enough to prevent losing any
2432 * event.
2433 */
2434 worker_enter_idle(worker);
2435 __set_current_state(TASK_IDLE);
2436 spin_unlock_irq(&pool->lock);
2437 schedule();
2438 goto woke_up;
2439 }
2440
2441 /**
2442 * rescuer_thread - the rescuer thread function
2443 * @__rescuer: self
2444 *
2445 * Workqueue rescuer thread function. There's one rescuer for each
2446 * workqueue which has WQ_MEM_RECLAIM set.
2447 *
2448 * Regular work processing on a pool may block trying to create a new
2449 * worker which uses GFP_KERNEL allocation which has slight chance of
2450 * developing into deadlock if some works currently on the same queue
2451 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2452 * the problem rescuer solves.
2453 *
2454 * When such condition is possible, the pool summons rescuers of all
2455 * workqueues which have works queued on the pool and let them process
2456 * those works so that forward progress can be guaranteed.
2457 *
2458 * This should happen rarely.
2459 *
2460 * Return: 0
2461 */
2462 static int rescuer_thread(void *__rescuer)
2463 {
2464 struct worker *rescuer = __rescuer;
2465 struct workqueue_struct *wq = rescuer->rescue_wq;
2466 struct list_head *scheduled = &rescuer->scheduled;
2467 bool should_stop;
2468
2469 set_user_nice(current, RESCUER_NICE_LEVEL);
2470
2471 /*
2472 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2473 * doesn't participate in concurrency management.
2474 */
2475 set_pf_worker(true);
2476 repeat:
2477 set_current_state(TASK_IDLE);
2478
2479 /*
2480 * By the time the rescuer is requested to stop, the workqueue
2481 * shouldn't have any work pending, but @wq->maydays may still have
2482 * pwq(s) queued. This can happen by non-rescuer workers consuming
2483 * all the work items before the rescuer got to them. Go through
2484 * @wq->maydays processing before acting on should_stop so that the
2485 * list is always empty on exit.
2486 */
2487 should_stop = kthread_should_stop();
2488
2489 /* see whether any pwq is asking for help */
2490 spin_lock_irq(&wq_mayday_lock);
2491
2492 while (!list_empty(&wq->maydays)) {
2493 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2494 struct pool_workqueue, mayday_node);
2495 struct worker_pool *pool = pwq->pool;
2496 struct work_struct *work, *n;
2497 bool first = true;
2498
2499 __set_current_state(TASK_RUNNING);
2500 list_del_init(&pwq->mayday_node);
2501
2502 spin_unlock_irq(&wq_mayday_lock);
2503
2504 worker_attach_to_pool(rescuer, pool);
2505
2506 spin_lock_irq(&pool->lock);
2507
2508 /*
2509 * Slurp in all works issued via this workqueue and
2510 * process'em.
2511 */
2512 WARN_ON_ONCE(!list_empty(scheduled));
2513 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2514 if (get_work_pwq(work) == pwq) {
2515 if (first)
2516 pool->watchdog_ts = jiffies;
2517 move_linked_works(work, scheduled, &n);
2518 }
2519 first = false;
2520 }
2521
2522 if (!list_empty(scheduled)) {
2523 process_scheduled_works(rescuer);
2524
2525 /*
2526 * The above execution of rescued work items could
2527 * have created more to rescue through
2528 * pwq_activate_first_delayed() or chained
2529 * queueing. Let's put @pwq back on mayday list so
2530 * that such back-to-back work items, which may be
2531 * being used to relieve memory pressure, don't
2532 * incur MAYDAY_INTERVAL delay inbetween.
2533 */
2534 if (need_to_create_worker(pool)) {
2535 spin_lock(&wq_mayday_lock);
2536 /*
2537 * Queue iff we aren't racing destruction
2538 * and somebody else hasn't queued it already.
2539 */
2540 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2541 get_pwq(pwq);
2542 list_add_tail(&pwq->mayday_node, &wq->maydays);
2543 }
2544 spin_unlock(&wq_mayday_lock);
2545 }
2546 }
2547
2548 /*
2549 * Put the reference grabbed by send_mayday(). @pool won't
2550 * go away while we're still attached to it.
2551 */
2552 put_pwq(pwq);
2553
2554 /*
2555 * Leave this pool. If need_more_worker() is %true, notify a
2556 * regular worker; otherwise, we end up with 0 concurrency
2557 * and stalling the execution.
2558 */
2559 if (need_more_worker(pool))
2560 wake_up_worker(pool);
2561
2562 spin_unlock_irq(&pool->lock);
2563
2564 worker_detach_from_pool(rescuer);
2565
2566 spin_lock_irq(&wq_mayday_lock);
2567 }
2568
2569 spin_unlock_irq(&wq_mayday_lock);
2570
2571 if (should_stop) {
2572 __set_current_state(TASK_RUNNING);
2573 set_pf_worker(false);
2574 return 0;
2575 }
2576
2577 /* rescuers should never participate in concurrency management */
2578 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2579 schedule();
2580 goto repeat;
2581 }
2582
2583 /**
2584 * check_flush_dependency - check for flush dependency sanity
2585 * @target_wq: workqueue being flushed
2586 * @target_work: work item being flushed (NULL for workqueue flushes)
2587 *
2588 * %current is trying to flush the whole @target_wq or @target_work on it.
2589 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2590 * reclaiming memory or running on a workqueue which doesn't have
2591 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2592 * a deadlock.
2593 */
2594 static void check_flush_dependency(struct workqueue_struct *target_wq,
2595 struct work_struct *target_work)
2596 {
2597 work_func_t target_func = target_work ? target_work->func : NULL;
2598 struct worker *worker;
2599
2600 if (target_wq->flags & WQ_MEM_RECLAIM)
2601 return;
2602
2603 worker = current_wq_worker();
2604
2605 WARN_ONCE(current->flags & PF_MEMALLOC,
2606 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2607 current->pid, current->comm, target_wq->name, target_func);
2608 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2609 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2610 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2611 worker->current_pwq->wq->name, worker->current_func,
2612 target_wq->name, target_func);
2613 }
2614
2615 struct wq_barrier {
2616 struct work_struct work;
2617 struct completion done;
2618 struct task_struct *task; /* purely informational */
2619 };
2620
2621 static void wq_barrier_func(struct work_struct *work)
2622 {
2623 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2624 complete(&barr->done);
2625 }
2626
2627 /**
2628 * insert_wq_barrier - insert a barrier work
2629 * @pwq: pwq to insert barrier into
2630 * @barr: wq_barrier to insert
2631 * @target: target work to attach @barr to
2632 * @worker: worker currently executing @target, NULL if @target is not executing
2633 *
2634 * @barr is linked to @target such that @barr is completed only after
2635 * @target finishes execution. Please note that the ordering
2636 * guarantee is observed only with respect to @target and on the local
2637 * cpu.
2638 *
2639 * Currently, a queued barrier can't be canceled. This is because
2640 * try_to_grab_pending() can't determine whether the work to be
2641 * grabbed is at the head of the queue and thus can't clear LINKED
2642 * flag of the previous work while there must be a valid next work
2643 * after a work with LINKED flag set.
2644 *
2645 * Note that when @worker is non-NULL, @target may be modified
2646 * underneath us, so we can't reliably determine pwq from @target.
2647 *
2648 * CONTEXT:
2649 * spin_lock_irq(pool->lock).
2650 */
2651 static void insert_wq_barrier(struct pool_workqueue *pwq,
2652 struct wq_barrier *barr,
2653 struct work_struct *target, struct worker *worker)
2654 {
2655 struct list_head *head;
2656 unsigned int linked = 0;
2657
2658 /*
2659 * debugobject calls are safe here even with pool->lock locked
2660 * as we know for sure that this will not trigger any of the
2661 * checks and call back into the fixup functions where we
2662 * might deadlock.
2663 */
2664 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2665 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2666
2667 init_completion_map(&barr->done, &target->lockdep_map);
2668
2669 barr->task = current;
2670
2671 /*
2672 * If @target is currently being executed, schedule the
2673 * barrier to the worker; otherwise, put it after @target.
2674 */
2675 if (worker)
2676 head = worker->scheduled.next;
2677 else {
2678 unsigned long *bits = work_data_bits(target);
2679
2680 head = target->entry.next;
2681 /* there can already be other linked works, inherit and set */
2682 linked = *bits & WORK_STRUCT_LINKED;
2683 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2684 }
2685
2686 debug_work_activate(&barr->work);
2687 insert_work(pwq, &barr->work, head,
2688 work_color_to_flags(WORK_NO_COLOR) | linked);
2689 }
2690
2691 /**
2692 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2693 * @wq: workqueue being flushed
2694 * @flush_color: new flush color, < 0 for no-op
2695 * @work_color: new work color, < 0 for no-op
2696 *
2697 * Prepare pwqs for workqueue flushing.
2698 *
2699 * If @flush_color is non-negative, flush_color on all pwqs should be
2700 * -1. If no pwq has in-flight commands at the specified color, all
2701 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2702 * has in flight commands, its pwq->flush_color is set to
2703 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2704 * wakeup logic is armed and %true is returned.
2705 *
2706 * The caller should have initialized @wq->first_flusher prior to
2707 * calling this function with non-negative @flush_color. If
2708 * @flush_color is negative, no flush color update is done and %false
2709 * is returned.
2710 *
2711 * If @work_color is non-negative, all pwqs should have the same
2712 * work_color which is previous to @work_color and all will be
2713 * advanced to @work_color.
2714 *
2715 * CONTEXT:
2716 * mutex_lock(wq->mutex).
2717 *
2718 * Return:
2719 * %true if @flush_color >= 0 and there's something to flush. %false
2720 * otherwise.
2721 */
2722 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2723 int flush_color, int work_color)
2724 {
2725 bool wait = false;
2726 struct pool_workqueue *pwq;
2727
2728 if (flush_color >= 0) {
2729 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2730 atomic_set(&wq->nr_pwqs_to_flush, 1);
2731 }
2732
2733 for_each_pwq(pwq, wq) {
2734 struct worker_pool *pool = pwq->pool;
2735
2736 spin_lock_irq(&pool->lock);
2737
2738 if (flush_color >= 0) {
2739 WARN_ON_ONCE(pwq->flush_color != -1);
2740
2741 if (pwq->nr_in_flight[flush_color]) {
2742 pwq->flush_color = flush_color;
2743 atomic_inc(&wq->nr_pwqs_to_flush);
2744 wait = true;
2745 }
2746 }
2747
2748 if (work_color >= 0) {
2749 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2750 pwq->work_color = work_color;
2751 }
2752
2753 spin_unlock_irq(&pool->lock);
2754 }
2755
2756 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2757 complete(&wq->first_flusher->done);
2758
2759 return wait;
2760 }
2761
2762 /**
2763 * flush_workqueue - ensure that any scheduled work has run to completion.
2764 * @wq: workqueue to flush
2765 *
2766 * This function sleeps until all work items which were queued on entry
2767 * have finished execution, but it is not livelocked by new incoming ones.
2768 */
2769 void flush_workqueue(struct workqueue_struct *wq)
2770 {
2771 struct wq_flusher this_flusher = {
2772 .list = LIST_HEAD_INIT(this_flusher.list),
2773 .flush_color = -1,
2774 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2775 };
2776 int next_color;
2777
2778 if (WARN_ON(!wq_online))
2779 return;
2780
2781 lock_map_acquire(&wq->lockdep_map);
2782 lock_map_release(&wq->lockdep_map);
2783
2784 mutex_lock(&wq->mutex);
2785
2786 /*
2787 * Start-to-wait phase
2788 */
2789 next_color = work_next_color(wq->work_color);
2790
2791 if (next_color != wq->flush_color) {
2792 /*
2793 * Color space is not full. The current work_color
2794 * becomes our flush_color and work_color is advanced
2795 * by one.
2796 */
2797 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2798 this_flusher.flush_color = wq->work_color;
2799 wq->work_color = next_color;
2800
2801 if (!wq->first_flusher) {
2802 /* no flush in progress, become the first flusher */
2803 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2804
2805 wq->first_flusher = &this_flusher;
2806
2807 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2808 wq->work_color)) {
2809 /* nothing to flush, done */
2810 wq->flush_color = next_color;
2811 wq->first_flusher = NULL;
2812 goto out_unlock;
2813 }
2814 } else {
2815 /* wait in queue */
2816 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2817 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2818 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2819 }
2820 } else {
2821 /*
2822 * Oops, color space is full, wait on overflow queue.
2823 * The next flush completion will assign us
2824 * flush_color and transfer to flusher_queue.
2825 */
2826 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2827 }
2828
2829 check_flush_dependency(wq, NULL);
2830
2831 mutex_unlock(&wq->mutex);
2832
2833 wait_for_completion(&this_flusher.done);
2834
2835 /*
2836 * Wake-up-and-cascade phase
2837 *
2838 * First flushers are responsible for cascading flushes and
2839 * handling overflow. Non-first flushers can simply return.
2840 */
2841 if (wq->first_flusher != &this_flusher)
2842 return;
2843
2844 mutex_lock(&wq->mutex);
2845
2846 /* we might have raced, check again with mutex held */
2847 if (wq->first_flusher != &this_flusher)
2848 goto out_unlock;
2849
2850 wq->first_flusher = NULL;
2851
2852 WARN_ON_ONCE(!list_empty(&this_flusher.list));
2853 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2854
2855 while (true) {
2856 struct wq_flusher *next, *tmp;
2857
2858 /* complete all the flushers sharing the current flush color */
2859 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2860 if (next->flush_color != wq->flush_color)
2861 break;
2862 list_del_init(&next->list);
2863 complete(&next->done);
2864 }
2865
2866 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2867 wq->flush_color != work_next_color(wq->work_color));
2868
2869 /* this flush_color is finished, advance by one */
2870 wq->flush_color = work_next_color(wq->flush_color);
2871
2872 /* one color has been freed, handle overflow queue */
2873 if (!list_empty(&wq->flusher_overflow)) {
2874 /*
2875 * Assign the same color to all overflowed
2876 * flushers, advance work_color and append to
2877 * flusher_queue. This is the start-to-wait
2878 * phase for these overflowed flushers.
2879 */
2880 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2881 tmp->flush_color = wq->work_color;
2882
2883 wq->work_color = work_next_color(wq->work_color);
2884
2885 list_splice_tail_init(&wq->flusher_overflow,
2886 &wq->flusher_queue);
2887 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2888 }
2889
2890 if (list_empty(&wq->flusher_queue)) {
2891 WARN_ON_ONCE(wq->flush_color != wq->work_color);
2892 break;
2893 }
2894
2895 /*
2896 * Need to flush more colors. Make the next flusher
2897 * the new first flusher and arm pwqs.
2898 */
2899 WARN_ON_ONCE(wq->flush_color == wq->work_color);
2900 WARN_ON_ONCE(wq->flush_color != next->flush_color);
2901
2902 list_del_init(&next->list);
2903 wq->first_flusher = next;
2904
2905 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2906 break;
2907
2908 /*
2909 * Meh... this color is already done, clear first
2910 * flusher and repeat cascading.
2911 */
2912 wq->first_flusher = NULL;
2913 }
2914
2915 out_unlock:
2916 mutex_unlock(&wq->mutex);
2917 }
2918 EXPORT_SYMBOL(flush_workqueue);
2919
2920 /**
2921 * drain_workqueue - drain a workqueue
2922 * @wq: workqueue to drain
2923 *
2924 * Wait until the workqueue becomes empty. While draining is in progress,
2925 * only chain queueing is allowed. IOW, only currently pending or running
2926 * work items on @wq can queue further work items on it. @wq is flushed
2927 * repeatedly until it becomes empty. The number of flushing is determined
2928 * by the depth of chaining and should be relatively short. Whine if it
2929 * takes too long.
2930 */
2931 void drain_workqueue(struct workqueue_struct *wq)
2932 {
2933 unsigned int flush_cnt = 0;
2934 struct pool_workqueue *pwq;
2935
2936 /*
2937 * __queue_work() needs to test whether there are drainers, is much
2938 * hotter than drain_workqueue() and already looks at @wq->flags.
2939 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2940 */
2941 mutex_lock(&wq->mutex);
2942 if (!wq->nr_drainers++)
2943 wq->flags |= __WQ_DRAINING;
2944 mutex_unlock(&wq->mutex);
2945 reflush:
2946 flush_workqueue(wq);
2947
2948 mutex_lock(&wq->mutex);
2949
2950 for_each_pwq(pwq, wq) {
2951 bool drained;
2952
2953 spin_lock_irq(&pwq->pool->lock);
2954 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2955 spin_unlock_irq(&pwq->pool->lock);
2956
2957 if (drained)
2958 continue;
2959
2960 if (++flush_cnt == 10 ||
2961 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2962 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2963 wq->name, flush_cnt);
2964
2965 mutex_unlock(&wq->mutex);
2966 goto reflush;
2967 }
2968
2969 if (!--wq->nr_drainers)
2970 wq->flags &= ~__WQ_DRAINING;
2971 mutex_unlock(&wq->mutex);
2972 }
2973 EXPORT_SYMBOL_GPL(drain_workqueue);
2974
2975 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2976 bool from_cancel)
2977 {
2978 struct worker *worker = NULL;
2979 struct worker_pool *pool;
2980 struct pool_workqueue *pwq;
2981
2982 might_sleep();
2983
2984 rcu_read_lock();
2985 pool = get_work_pool(work);
2986 if (!pool) {
2987 rcu_read_unlock();
2988 return false;
2989 }
2990
2991 spin_lock_irq(&pool->lock);
2992 /* see the comment in try_to_grab_pending() with the same code */
2993 pwq = get_work_pwq(work);
2994 if (pwq) {
2995 if (unlikely(pwq->pool != pool))
2996 goto already_gone;
2997 } else {
2998 worker = find_worker_executing_work(pool, work);
2999 if (!worker)
3000 goto already_gone;
3001 pwq = worker->current_pwq;
3002 }
3003
3004 check_flush_dependency(pwq->wq, work);
3005
3006 insert_wq_barrier(pwq, barr, work, worker);
3007 spin_unlock_irq(&pool->lock);
3008
3009 /*
3010 * Force a lock recursion deadlock when using flush_work() inside a
3011 * single-threaded or rescuer equipped workqueue.
3012 *
3013 * For single threaded workqueues the deadlock happens when the work
3014 * is after the work issuing the flush_work(). For rescuer equipped
3015 * workqueues the deadlock happens when the rescuer stalls, blocking
3016 * forward progress.
3017 */
3018 if (!from_cancel &&
3019 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3020 lock_map_acquire(&pwq->wq->lockdep_map);
3021 lock_map_release(&pwq->wq->lockdep_map);
3022 }
3023 rcu_read_unlock();
3024 return true;
3025 already_gone:
3026 spin_unlock_irq(&pool->lock);
3027 rcu_read_unlock();
3028 return false;
3029 }
3030
3031 static bool __flush_work(struct work_struct *work, bool from_cancel)
3032 {
3033 struct wq_barrier barr;
3034
3035 if (WARN_ON(!wq_online))
3036 return false;
3037
3038 if (WARN_ON(!work->func))
3039 return false;
3040
3041 if (!from_cancel) {
3042 lock_map_acquire(&work->lockdep_map);
3043 lock_map_release(&work->lockdep_map);
3044 }
3045
3046 if (start_flush_work(work, &barr, from_cancel)) {
3047 wait_for_completion(&barr.done);
3048 destroy_work_on_stack(&barr.work);
3049 return true;
3050 } else {
3051 return false;
3052 }
3053 }
3054
3055 /**
3056 * flush_work - wait for a work to finish executing the last queueing instance
3057 * @work: the work to flush
3058 *
3059 * Wait until @work has finished execution. @work is guaranteed to be idle
3060 * on return if it hasn't been requeued since flush started.
3061 *
3062 * Return:
3063 * %true if flush_work() waited for the work to finish execution,
3064 * %false if it was already idle.
3065 */
3066 bool flush_work(struct work_struct *work)
3067 {
3068 return __flush_work(work, false);
3069 }
3070 EXPORT_SYMBOL_GPL(flush_work);
3071
3072 struct cwt_wait {
3073 wait_queue_entry_t wait;
3074 struct work_struct *work;
3075 };
3076
3077 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3078 {
3079 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3080
3081 if (cwait->work != key)
3082 return 0;
3083 return autoremove_wake_function(wait, mode, sync, key);
3084 }
3085
3086 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3087 {
3088 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3089 unsigned long flags;
3090 int ret;
3091
3092 do {
3093 ret = try_to_grab_pending(work, is_dwork, &flags);
3094 /*
3095 * If someone else is already canceling, wait for it to
3096 * finish. flush_work() doesn't work for PREEMPT_NONE
3097 * because we may get scheduled between @work's completion
3098 * and the other canceling task resuming and clearing
3099 * CANCELING - flush_work() will return false immediately
3100 * as @work is no longer busy, try_to_grab_pending() will
3101 * return -ENOENT as @work is still being canceled and the
3102 * other canceling task won't be able to clear CANCELING as
3103 * we're hogging the CPU.
3104 *
3105 * Let's wait for completion using a waitqueue. As this
3106 * may lead to the thundering herd problem, use a custom
3107 * wake function which matches @work along with exclusive
3108 * wait and wakeup.
3109 */
3110 if (unlikely(ret == -ENOENT)) {
3111 struct cwt_wait cwait;
3112
3113 init_wait(&cwait.wait);
3114 cwait.wait.func = cwt_wakefn;
3115 cwait.work = work;
3116
3117 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3118 TASK_UNINTERRUPTIBLE);
3119 if (work_is_canceling(work))
3120 schedule();
3121 finish_wait(&cancel_waitq, &cwait.wait);
3122 }
3123 } while (unlikely(ret < 0));
3124
3125 /* tell other tasks trying to grab @work to back off */
3126 mark_work_canceling(work);
3127 local_irq_restore(flags);
3128
3129 /*
3130 * This allows canceling during early boot. We know that @work
3131 * isn't executing.
3132 */
3133 if (wq_online)
3134 __flush_work(work, true);
3135
3136 clear_work_data(work);
3137
3138 /*
3139 * Paired with prepare_to_wait() above so that either
3140 * waitqueue_active() is visible here or !work_is_canceling() is
3141 * visible there.
3142 */
3143 smp_mb();
3144 if (waitqueue_active(&cancel_waitq))
3145 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3146
3147 return ret;
3148 }
3149
3150 /**
3151 * cancel_work_sync - cancel a work and wait for it to finish
3152 * @work: the work to cancel
3153 *
3154 * Cancel @work and wait for its execution to finish. This function
3155 * can be used even if the work re-queues itself or migrates to
3156 * another workqueue. On return from this function, @work is
3157 * guaranteed to be not pending or executing on any CPU.
3158 *
3159 * cancel_work_sync(&delayed_work->work) must not be used for
3160 * delayed_work's. Use cancel_delayed_work_sync() instead.
3161 *
3162 * The caller must ensure that the workqueue on which @work was last
3163 * queued can't be destroyed before this function returns.
3164 *
3165 * Return:
3166 * %true if @work was pending, %false otherwise.
3167 */
3168 bool cancel_work_sync(struct work_struct *work)
3169 {
3170 return __cancel_work_timer(work, false);
3171 }
3172 EXPORT_SYMBOL_GPL(cancel_work_sync);
3173
3174 /**
3175 * flush_delayed_work - wait for a dwork to finish executing the last queueing
3176 * @dwork: the delayed work to flush
3177 *
3178 * Delayed timer is cancelled and the pending work is queued for
3179 * immediate execution. Like flush_work(), this function only
3180 * considers the last queueing instance of @dwork.
3181 *
3182 * Return:
3183 * %true if flush_work() waited for the work to finish execution,
3184 * %false if it was already idle.
3185 */
3186 bool flush_delayed_work(struct delayed_work *dwork)
3187 {
3188 local_irq_disable();
3189 if (del_timer_sync(&dwork->timer))
3190 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3191 local_irq_enable();
3192 return flush_work(&dwork->work);
3193 }
3194 EXPORT_SYMBOL(flush_delayed_work);
3195
3196 /**
3197 * flush_rcu_work - wait for a rwork to finish executing the last queueing
3198 * @rwork: the rcu work to flush
3199 *
3200 * Return:
3201 * %true if flush_rcu_work() waited for the work to finish execution,
3202 * %false if it was already idle.
3203 */
3204 bool flush_rcu_work(struct rcu_work *rwork)
3205 {
3206 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3207 rcu_barrier();
3208 flush_work(&rwork->work);
3209 return true;
3210 } else {
3211 return flush_work(&rwork->work);
3212 }
3213 }
3214 EXPORT_SYMBOL(flush_rcu_work);
3215
3216 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3217 {
3218 unsigned long flags;
3219 int ret;
3220
3221 do {
3222 ret = try_to_grab_pending(work, is_dwork, &flags);
3223 } while (unlikely(ret == -EAGAIN));
3224
3225 if (unlikely(ret < 0))
3226 return false;
3227
3228 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3229 local_irq_restore(flags);
3230 return ret;
3231 }
3232
3233 /**
3234 * cancel_delayed_work - cancel a delayed work
3235 * @dwork: delayed_work to cancel
3236 *
3237 * Kill off a pending delayed_work.
3238 *
3239 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3240 * pending.
3241 *
3242 * Note:
3243 * The work callback function may still be running on return, unless
3244 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3245 * use cancel_delayed_work_sync() to wait on it.
3246 *
3247 * This function is safe to call from any context including IRQ handler.
3248 */
3249 bool cancel_delayed_work(struct delayed_work *dwork)
3250 {
3251 return __cancel_work(&dwork->work, true);
3252 }
3253 EXPORT_SYMBOL(cancel_delayed_work);
3254
3255 /**
3256 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3257 * @dwork: the delayed work cancel
3258 *
3259 * This is cancel_work_sync() for delayed works.
3260 *
3261 * Return:
3262 * %true if @dwork was pending, %false otherwise.
3263 */
3264 bool cancel_delayed_work_sync(struct delayed_work *dwork)
3265 {
3266 return __cancel_work_timer(&dwork->work, true);
3267 }
3268 EXPORT_SYMBOL(cancel_delayed_work_sync);
3269
3270 /**
3271 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3272 * @func: the function to call
3273 *
3274 * schedule_on_each_cpu() executes @func on each online CPU using the
3275 * system workqueue and blocks until all CPUs have completed.
3276 * schedule_on_each_cpu() is very slow.
3277 *
3278 * Return:
3279 * 0 on success, -errno on failure.
3280 */
3281 int schedule_on_each_cpu(work_func_t func)
3282 {
3283 int cpu;
3284 struct work_struct __percpu *works;
3285
3286 works = alloc_percpu(struct work_struct);
3287 if (!works)
3288 return -ENOMEM;
3289
3290 get_online_cpus();
3291
3292 for_each_online_cpu(cpu) {
3293 struct work_struct *work = per_cpu_ptr(works, cpu);
3294
3295 INIT_WORK(work, func);
3296 schedule_work_on(cpu, work);
3297 }
3298
3299 for_each_online_cpu(cpu)
3300 flush_work(per_cpu_ptr(works, cpu));
3301
3302 put_online_cpus();
3303 free_percpu(works);
3304 return 0;
3305 }
3306
3307 /**
3308 * execute_in_process_context - reliably execute the routine with user context
3309 * @fn: the function to execute
3310 * @ew: guaranteed storage for the execute work structure (must
3311 * be available when the work executes)
3312 *
3313 * Executes the function immediately if process context is available,
3314 * otherwise schedules the function for delayed execution.
3315 *
3316 * Return: 0 - function was executed
3317 * 1 - function was scheduled for execution
3318 */
3319 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3320 {
3321 if (!in_interrupt()) {
3322 fn(&ew->work);
3323 return 0;
3324 }
3325
3326 INIT_WORK(&ew->work, fn);
3327 schedule_work(&ew->work);
3328
3329 return 1;
3330 }
3331 EXPORT_SYMBOL_GPL(execute_in_process_context);
3332
3333 /**
3334 * free_workqueue_attrs - free a workqueue_attrs
3335 * @attrs: workqueue_attrs to free
3336 *
3337 * Undo alloc_workqueue_attrs().
3338 */
3339 void free_workqueue_attrs(struct workqueue_attrs *attrs)
3340 {
3341 if (attrs) {
3342 free_cpumask_var(attrs->cpumask);
3343 kfree(attrs);
3344 }
3345 }
3346
3347 /**
3348 * alloc_workqueue_attrs - allocate a workqueue_attrs
3349 *
3350 * Allocate a new workqueue_attrs, initialize with default settings and
3351 * return it.
3352 *
3353 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3354 */
3355 struct workqueue_attrs *alloc_workqueue_attrs(void)
3356 {
3357 struct workqueue_attrs *attrs;
3358
3359 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3360 if (!attrs)
3361 goto fail;
3362 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3363 goto fail;
3364
3365 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3366 return attrs;
3367 fail:
3368 free_workqueue_attrs(attrs);
3369 return NULL;
3370 }
3371
3372 static void copy_workqueue_attrs(struct workqueue_attrs *to,
3373 const struct workqueue_attrs *from)
3374 {
3375 to->nice = from->nice;
3376 cpumask_copy(to->cpumask, from->cpumask);
3377 /*
3378 * Unlike hash and equality test, this function doesn't ignore
3379 * ->no_numa as it is used for both pool and wq attrs. Instead,
3380 * get_unbound_pool() explicitly clears ->no_numa after copying.
3381 */
3382 to->no_numa = from->no_numa;
3383 }
3384
3385 /* hash value of the content of @attr */
3386 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3387 {
3388 u32 hash = 0;
3389
3390 hash = jhash_1word(attrs->nice, hash);
3391 hash = jhash(cpumask_bits(attrs->cpumask),
3392 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3393 return hash;
3394 }
3395
3396 /* content equality test */
3397 static bool wqattrs_equal(const struct workqueue_attrs *a,
3398 const struct workqueue_attrs *b)
3399 {
3400 if (a->nice != b->nice)
3401 return false;
3402 if (!cpumask_equal(a->cpumask, b->cpumask))
3403 return false;
3404 return true;
3405 }
3406
3407 /**
3408 * init_worker_pool - initialize a newly zalloc'd worker_pool
3409 * @pool: worker_pool to initialize
3410 *
3411 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
3412 *
3413 * Return: 0 on success, -errno on failure. Even on failure, all fields
3414 * inside @pool proper are initialized and put_unbound_pool() can be called
3415 * on @pool safely to release it.
3416 */
3417 static int init_worker_pool(struct worker_pool *pool)
3418 {
3419 spin_lock_init(&pool->lock);
3420 pool->id = -1;
3421 pool->cpu = -1;
3422 pool->node = NUMA_NO_NODE;
3423 pool->flags |= POOL_DISASSOCIATED;
3424 pool->watchdog_ts = jiffies;
3425 INIT_LIST_HEAD(&pool->worklist);
3426 INIT_LIST_HEAD(&pool->idle_list);
3427 hash_init(pool->busy_hash);
3428
3429 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3430
3431 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3432
3433 INIT_LIST_HEAD(&pool->workers);
3434
3435 ida_init(&pool->worker_ida);
3436 INIT_HLIST_NODE(&pool->hash_node);
3437 pool->refcnt = 1;
3438
3439 /* shouldn't fail above this point */
3440 pool->attrs = alloc_workqueue_attrs();
3441 if (!pool->attrs)
3442 return -ENOMEM;
3443 return 0;
3444 }
3445
3446 #ifdef CONFIG_LOCKDEP
3447 static void wq_init_lockdep(struct workqueue_struct *wq)
3448 {
3449 char *lock_name;
3450
3451 lockdep_register_key(&wq->key);
3452 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3453 if (!lock_name)
3454 lock_name = wq->name;
3455
3456 wq->lock_name = lock_name;
3457 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3458 }
3459
3460 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3461 {
3462 lockdep_unregister_key(&wq->key);
3463 }
3464
3465 static void wq_free_lockdep(struct workqueue_struct *wq)
3466 {
3467 if (wq->lock_name != wq->name)
3468 kfree(wq->lock_name);
3469 }
3470 #else
3471 static void wq_init_lockdep(struct workqueue_struct *wq)
3472 {
3473 }
3474
3475 static void wq_unregister_lockdep(struct workqueue_struct *wq)
3476 {
3477 }
3478
3479 static void wq_free_lockdep(struct workqueue_struct *wq)
3480 {
3481 }
3482 #endif
3483
3484 static void rcu_free_wq(struct rcu_head *rcu)
3485 {
3486 struct workqueue_struct *wq =
3487 container_of(rcu, struct workqueue_struct, rcu);
3488
3489 wq_free_lockdep(wq);
3490
3491 if (!(wq->flags & WQ_UNBOUND))
3492 free_percpu(wq->cpu_pwqs);
3493 else
3494 free_workqueue_attrs(wq->unbound_attrs);
3495
3496 kfree(wq->rescuer);
3497 kfree(wq);
3498 }
3499
3500 static void rcu_free_pool(struct rcu_head *rcu)
3501 {
3502 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3503
3504 ida_destroy(&pool->worker_ida);
3505 free_workqueue_attrs(pool->attrs);
3506 kfree(pool);
3507 }
3508
3509 /**
3510 * put_unbound_pool - put a worker_pool
3511 * @pool: worker_pool to put
3512 *
3513 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
3514 * safe manner. get_unbound_pool() calls this function on its failure path
3515 * and this function should be able to release pools which went through,
3516 * successfully or not, init_worker_pool().
3517 *
3518 * Should be called with wq_pool_mutex held.
3519 */
3520 static void put_unbound_pool(struct worker_pool *pool)
3521 {
3522 DECLARE_COMPLETION_ONSTACK(detach_completion);
3523 struct worker *worker;
3524
3525 lockdep_assert_held(&wq_pool_mutex);
3526
3527 if (--pool->refcnt)
3528 return;
3529
3530 /* sanity checks */
3531 if (WARN_ON(!(pool->cpu < 0)) ||
3532 WARN_ON(!list_empty(&pool->worklist)))
3533 return;
3534
3535 /* release id and unhash */
3536 if (pool->id >= 0)
3537 idr_remove(&worker_pool_idr, pool->id);
3538 hash_del(&pool->hash_node);
3539
3540 /*
3541 * Become the manager and destroy all workers. This prevents
3542 * @pool's workers from blocking on attach_mutex. We're the last
3543 * manager and @pool gets freed with the flag set.
3544 */
3545 spin_lock_irq(&pool->lock);
3546 wait_event_lock_irq(wq_manager_wait,
3547 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3548 pool->flags |= POOL_MANAGER_ACTIVE;
3549
3550 while ((worker = first_idle_worker(pool)))
3551 destroy_worker(worker);
3552 WARN_ON(pool->nr_workers || pool->nr_idle);
3553 spin_unlock_irq(&pool->lock);
3554
3555 mutex_lock(&wq_pool_attach_mutex);
3556 if (!list_empty(&pool->workers))
3557 pool->detach_completion = &detach_completion;
3558 mutex_unlock(&wq_pool_attach_mutex);
3559
3560 if (pool->detach_completion)
3561 wait_for_completion(pool->detach_completion);
3562
3563 /* shut down the timers */
3564 del_timer_sync(&pool->idle_timer);
3565 del_timer_sync(&pool->mayday_timer);
3566
3567 /* RCU protected to allow dereferences from get_work_pool() */
3568 call_rcu(&pool->rcu, rcu_free_pool);
3569 }
3570
3571 /**
3572 * get_unbound_pool - get a worker_pool with the specified attributes
3573 * @attrs: the attributes of the worker_pool to get
3574 *
3575 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3576 * reference count and return it. If there already is a matching
3577 * worker_pool, it will be used; otherwise, this function attempts to
3578 * create a new one.
3579 *
3580 * Should be called with wq_pool_mutex held.
3581 *
3582 * Return: On success, a worker_pool with the same attributes as @attrs.
3583 * On failure, %NULL.
3584 */
3585 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3586 {
3587 u32 hash = wqattrs_hash(attrs);
3588 struct worker_pool *pool;
3589 int node;
3590 int target_node = NUMA_NO_NODE;
3591
3592 lockdep_assert_held(&wq_pool_mutex);
3593
3594 /* do we already have a matching pool? */
3595 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3596 if (wqattrs_equal(pool->attrs, attrs)) {
3597 pool->refcnt++;
3598 return pool;
3599 }
3600 }
3601
3602 /* if cpumask is contained inside a NUMA node, we belong to that node */
3603 if (wq_numa_enabled) {
3604 for_each_node(node) {
3605 if (cpumask_subset(attrs->cpumask,
3606 wq_numa_possible_cpumask[node])) {
3607 target_node = node;
3608 break;
3609 }
3610 }
3611 }
3612
3613 /* nope, create a new one */
3614 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3615 if (!pool || init_worker_pool(pool) < 0)
3616 goto fail;
3617
3618 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3619 copy_workqueue_attrs(pool->attrs, attrs);
3620 pool->node = target_node;
3621
3622 /*
3623 * no_numa isn't a worker_pool attribute, always clear it. See
3624 * 'struct workqueue_attrs' comments for detail.
3625 */
3626 pool->attrs->no_numa = false;
3627
3628 if (worker_pool_assign_id(pool) < 0)
3629 goto fail;
3630
3631 /* create and start the initial worker */
3632 if (wq_online && !create_worker(pool))
3633 goto fail;
3634
3635 /* install */
3636 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3637
3638 return pool;
3639 fail:
3640 if (pool)
3641 put_unbound_pool(pool);
3642 return NULL;
3643 }
3644
3645 static void rcu_free_pwq(struct rcu_head *rcu)
3646 {
3647 kmem_cache_free(pwq_cache,
3648 container_of(rcu, struct pool_workqueue, rcu));
3649 }
3650
3651 /*
3652 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3653 * and needs to be destroyed.
3654 */
3655 static void pwq_unbound_release_workfn(struct work_struct *work)
3656 {
3657 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3658 unbound_release_work);
3659 struct workqueue_struct *wq = pwq->wq;
3660 struct worker_pool *pool = pwq->pool;
3661 bool is_last;
3662
3663 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3664 return;
3665
3666 mutex_lock(&wq->mutex);
3667 list_del_rcu(&pwq->pwqs_node);
3668 is_last = list_empty(&wq->pwqs);
3669 mutex_unlock(&wq->mutex);
3670
3671 mutex_lock(&wq_pool_mutex);
3672 put_unbound_pool(pool);
3673 mutex_unlock(&wq_pool_mutex);
3674
3675 call_rcu(&pwq->rcu, rcu_free_pwq);
3676
3677 /*
3678 * If we're the last pwq going away, @wq is already dead and no one
3679 * is gonna access it anymore. Schedule RCU free.
3680 */
3681 if (is_last) {
3682 wq_unregister_lockdep(wq);
3683 call_rcu(&wq->rcu, rcu_free_wq);
3684 }
3685 }
3686
3687 /**
3688 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3689 * @pwq: target pool_workqueue
3690 *
3691 * If @pwq isn't freezing, set @pwq->max_active to the associated
3692 * workqueue's saved_max_active and activate delayed work items
3693 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3694 */
3695 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3696 {
3697 struct workqueue_struct *wq = pwq->wq;
3698 bool freezable = wq->flags & WQ_FREEZABLE;
3699 unsigned long flags;
3700
3701 /* for @wq->saved_max_active */
3702 lockdep_assert_held(&wq->mutex);
3703
3704 /* fast exit for non-freezable wqs */
3705 if (!freezable && pwq->max_active == wq->saved_max_active)
3706 return;
3707
3708 /* this function can be called during early boot w/ irq disabled */
3709 spin_lock_irqsave(&pwq->pool->lock, flags);
3710
3711 /*
3712 * During [un]freezing, the caller is responsible for ensuring that
3713 * this function is called at least once after @workqueue_freezing
3714 * is updated and visible.
3715 */
3716 if (!freezable || !workqueue_freezing) {
3717 pwq->max_active = wq->saved_max_active;
3718
3719 while (!list_empty(&pwq->delayed_works) &&
3720 pwq->nr_active < pwq->max_active)
3721 pwq_activate_first_delayed(pwq);
3722
3723 /*
3724 * Need to kick a worker after thawed or an unbound wq's
3725 * max_active is bumped. It's a slow path. Do it always.
3726 */
3727 wake_up_worker(pwq->pool);
3728 } else {
3729 pwq->max_active = 0;
3730 }
3731
3732 spin_unlock_irqrestore(&pwq->pool->lock, flags);
3733 }
3734
3735 /* initialize newly alloced @pwq which is associated with @wq and @pool */
3736 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3737 struct worker_pool *pool)
3738 {
3739 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3740
3741 memset(pwq, 0, sizeof(*pwq));
3742
3743 pwq->pool = pool;
3744 pwq->wq = wq;
3745 pwq->flush_color = -1;
3746 pwq->refcnt = 1;
3747 INIT_LIST_HEAD(&pwq->delayed_works);
3748 INIT_LIST_HEAD(&pwq->pwqs_node);
3749 INIT_LIST_HEAD(&pwq->mayday_node);
3750 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3751 }
3752
3753 /* sync @pwq with the current state of its associated wq and link it */
3754 static void link_pwq(struct pool_workqueue *pwq)
3755 {
3756 struct workqueue_struct *wq = pwq->wq;
3757
3758 lockdep_assert_held(&wq->mutex);
3759
3760 /* may be called multiple times, ignore if already linked */
3761 if (!list_empty(&pwq->pwqs_node))
3762 return;
3763
3764 /* set the matching work_color */
3765 pwq->work_color = wq->work_color;
3766
3767 /* sync max_active to the current setting */
3768 pwq_adjust_max_active(pwq);
3769
3770 /* link in @pwq */
3771 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3772 }
3773
3774 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3775 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3776 const struct workqueue_attrs *attrs)
3777 {
3778 struct worker_pool *pool;
3779 struct pool_workqueue *pwq;
3780
3781 lockdep_assert_held(&wq_pool_mutex);
3782
3783 pool = get_unbound_pool(attrs);
3784 if (!pool)
3785 return NULL;
3786
3787 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3788 if (!pwq) {
3789 put_unbound_pool(pool);
3790 return NULL;
3791 }
3792
3793 init_pwq(pwq, wq, pool);
3794 return pwq;
3795 }
3796
3797 /**
3798 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3799 * @attrs: the wq_attrs of the default pwq of the target workqueue
3800 * @node: the target NUMA node
3801 * @cpu_going_down: if >= 0, the CPU to consider as offline
3802 * @cpumask: outarg, the resulting cpumask
3803 *
3804 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3805 * @cpu_going_down is >= 0, that cpu is considered offline during
3806 * calculation. The result is stored in @cpumask.
3807 *
3808 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
3809 * enabled and @node has online CPUs requested by @attrs, the returned
3810 * cpumask is the intersection of the possible CPUs of @node and
3811 * @attrs->cpumask.
3812 *
3813 * The caller is responsible for ensuring that the cpumask of @node stays
3814 * stable.
3815 *
3816 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3817 * %false if equal.
3818 */
3819 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3820 int cpu_going_down, cpumask_t *cpumask)
3821 {
3822 if (!wq_numa_enabled || attrs->no_numa)
3823 goto use_dfl;
3824
3825 /* does @node have any online CPUs @attrs wants? */
3826 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3827 if (cpu_going_down >= 0)
3828 cpumask_clear_cpu(cpu_going_down, cpumask);
3829
3830 if (cpumask_empty(cpumask))
3831 goto use_dfl;
3832
3833 /* yeap, return possible CPUs in @node that @attrs wants */
3834 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3835
3836 if (cpumask_empty(cpumask)) {
3837 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3838 "possible intersect\n");
3839 return false;
3840 }
3841
3842 return !cpumask_equal(cpumask, attrs->cpumask);
3843
3844 use_dfl:
3845 cpumask_copy(cpumask, attrs->cpumask);
3846 return false;
3847 }
3848
3849 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3850 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3851 int node,
3852 struct pool_workqueue *pwq)
3853 {
3854 struct pool_workqueue *old_pwq;
3855
3856 lockdep_assert_held(&wq_pool_mutex);
3857 lockdep_assert_held(&wq->mutex);
3858
3859 /* link_pwq() can handle duplicate calls */
3860 link_pwq(pwq);
3861
3862 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3863 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3864 return old_pwq;
3865 }
3866
3867 /* context to store the prepared attrs & pwqs before applying */
3868 struct apply_wqattrs_ctx {
3869 struct workqueue_struct *wq; /* target workqueue */
3870 struct workqueue_attrs *attrs; /* attrs to apply */
3871 struct list_head list; /* queued for batching commit */
3872 struct pool_workqueue *dfl_pwq;
3873 struct pool_workqueue *pwq_tbl[];
3874 };
3875
3876 /* free the resources after success or abort */
3877 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3878 {
3879 if (ctx) {
3880 int node;
3881
3882 for_each_node(node)
3883 put_pwq_unlocked(ctx->pwq_tbl[node]);
3884 put_pwq_unlocked(ctx->dfl_pwq);
3885
3886 free_workqueue_attrs(ctx->attrs);
3887
3888 kfree(ctx);
3889 }
3890 }
3891
3892 /* allocate the attrs and pwqs for later installation */
3893 static struct apply_wqattrs_ctx *
3894 apply_wqattrs_prepare(struct workqueue_struct *wq,
3895 const struct workqueue_attrs *attrs)
3896 {
3897 struct apply_wqattrs_ctx *ctx;
3898 struct workqueue_attrs *new_attrs, *tmp_attrs;
3899 int node;
3900
3901 lockdep_assert_held(&wq_pool_mutex);
3902
3903 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
3904
3905 new_attrs = alloc_workqueue_attrs();
3906 tmp_attrs = alloc_workqueue_attrs();
3907 if (!ctx || !new_attrs || !tmp_attrs)
3908 goto out_free;
3909
3910 /*
3911 * Calculate the attrs of the default pwq.
3912 * If the user configured cpumask doesn't overlap with the
3913 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3914 */
3915 copy_workqueue_attrs(new_attrs, attrs);
3916 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3917 if (unlikely(cpumask_empty(new_attrs->cpumask)))
3918 cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3919
3920 /*
3921 * We may create multiple pwqs with differing cpumasks. Make a
3922 * copy of @new_attrs which will be modified and used to obtain
3923 * pools.
3924 */
3925 copy_workqueue_attrs(tmp_attrs, new_attrs);
3926
3927 /*
3928 * If something goes wrong during CPU up/down, we'll fall back to
3929 * the default pwq covering whole @attrs->cpumask. Always create
3930 * it even if we don't use it immediately.
3931 */
3932 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3933 if (!ctx->dfl_pwq)
3934 goto out_free;
3935
3936 for_each_node(node) {
3937 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3938 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3939 if (!ctx->pwq_tbl[node])
3940 goto out_free;
3941 } else {
3942 ctx->dfl_pwq->refcnt++;
3943 ctx->pwq_tbl[node] = ctx->dfl_pwq;
3944 }
3945 }
3946
3947 /* save the user configured attrs and sanitize it. */
3948 copy_workqueue_attrs(new_attrs, attrs);
3949 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3950 ctx->attrs = new_attrs;
3951
3952 ctx->wq = wq;
3953 free_workqueue_attrs(tmp_attrs);
3954 return ctx;
3955
3956 out_free:
3957 free_workqueue_attrs(tmp_attrs);
3958 free_workqueue_attrs(new_attrs);
3959 apply_wqattrs_cleanup(ctx);
3960 return NULL;
3961 }
3962
3963 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
3964 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3965 {
3966 int node;
3967
3968 /* all pwqs have been created successfully, let's install'em */
3969 mutex_lock(&ctx->wq->mutex);
3970
3971 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
3972
3973 /* save the previous pwq and install the new one */
3974 for_each_node(node)
3975 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
3976 ctx->pwq_tbl[node]);
3977
3978 /* @dfl_pwq might not have been used, ensure it's linked */
3979 link_pwq(ctx->dfl_pwq);
3980 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
3981
3982 mutex_unlock(&ctx->wq->mutex);
3983 }
3984
3985 static void apply_wqattrs_lock(void)
3986 {
3987 /* CPUs should stay stable across pwq creations and installations */
3988 get_online_cpus();
3989 mutex_lock(&wq_pool_mutex);
3990 }
3991
3992 static void apply_wqattrs_unlock(void)
3993 {
3994 mutex_unlock(&wq_pool_mutex);
3995 put_online_cpus();
3996 }
3997
3998 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3999 const struct workqueue_attrs *attrs)
4000 {
4001 struct apply_wqattrs_ctx *ctx;
4002
4003 /* only unbound workqueues can change attributes */
4004 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4005 return -EINVAL;
4006
4007 /* creating multiple pwqs breaks ordering guarantee */
4008 if (!list_empty(&wq->pwqs)) {
4009 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4010 return -EINVAL;
4011
4012 wq->flags &= ~__WQ_ORDERED;
4013 }
4014
4015 ctx = apply_wqattrs_prepare(wq, attrs);
4016 if (!ctx)
4017 return -ENOMEM;
4018
4019 /* the ctx has been prepared successfully, let's commit it */
4020 apply_wqattrs_commit(ctx);
4021 apply_wqattrs_cleanup(ctx);
4022
4023 return 0;
4024 }
4025
4026 /**
4027 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4028 * @wq: the target workqueue
4029 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4030 *
4031 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
4032 * machines, this function maps a separate pwq to each NUMA node with
4033 * possibles CPUs in @attrs->cpumask so that work items are affine to the
4034 * NUMA node it was issued on. Older pwqs are released as in-flight work
4035 * items finish. Note that a work item which repeatedly requeues itself
4036 * back-to-back will stay on its current pwq.
4037 *
4038 * Performs GFP_KERNEL allocations.
4039 *
4040 * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
4041 *
4042 * Return: 0 on success and -errno on failure.
4043 */
4044 int apply_workqueue_attrs(struct workqueue_struct *wq,
4045 const struct workqueue_attrs *attrs)
4046 {
4047 int ret;
4048
4049 lockdep_assert_cpus_held();
4050
4051 mutex_lock(&wq_pool_mutex);
4052 ret = apply_workqueue_attrs_locked(wq, attrs);
4053 mutex_unlock(&wq_pool_mutex);
4054
4055 return ret;
4056 }
4057
4058 /**
4059 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
4060 * @wq: the target workqueue
4061 * @cpu: the CPU coming up or going down
4062 * @online: whether @cpu is coming up or going down
4063 *
4064 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4065 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
4066 * @wq accordingly.
4067 *
4068 * If NUMA affinity can't be adjusted due to memory allocation failure, it
4069 * falls back to @wq->dfl_pwq which may not be optimal but is always
4070 * correct.
4071 *
4072 * Note that when the last allowed CPU of a NUMA node goes offline for a
4073 * workqueue with a cpumask spanning multiple nodes, the workers which were
4074 * already executing the work items for the workqueue will lose their CPU
4075 * affinity and may execute on any CPU. This is similar to how per-cpu
4076 * workqueues behave on CPU_DOWN. If a workqueue user wants strict
4077 * affinity, it's the user's responsibility to flush the work item from
4078 * CPU_DOWN_PREPARE.
4079 */
4080 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4081 bool online)
4082 {
4083 int node = cpu_to_node(cpu);
4084 int cpu_off = online ? -1 : cpu;
4085 struct pool_workqueue *old_pwq = NULL, *pwq;
4086 struct workqueue_attrs *target_attrs;
4087 cpumask_t *cpumask;
4088
4089 lockdep_assert_held(&wq_pool_mutex);
4090
4091 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4092 wq->unbound_attrs->no_numa)
4093 return;
4094
4095 /*
4096 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4097 * Let's use a preallocated one. The following buf is protected by
4098 * CPU hotplug exclusion.
4099 */
4100 target_attrs = wq_update_unbound_numa_attrs_buf;
4101 cpumask = target_attrs->cpumask;
4102
4103 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4104 pwq = unbound_pwq_by_node(wq, node);
4105
4106 /*
4107 * Let's determine what needs to be done. If the target cpumask is
4108 * different from the default pwq's, we need to compare it to @pwq's
4109 * and create a new one if they don't match. If the target cpumask
4110 * equals the default pwq's, the default pwq should be used.
4111 */
4112 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4113 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4114 return;
4115 } else {
4116 goto use_dfl_pwq;
4117 }
4118
4119 /* create a new pwq */
4120 pwq = alloc_unbound_pwq(wq, target_attrs);
4121 if (!pwq) {
4122 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4123 wq->name);
4124 goto use_dfl_pwq;
4125 }
4126
4127 /* Install the new pwq. */
4128 mutex_lock(&wq->mutex);
4129 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4130 goto out_unlock;
4131
4132 use_dfl_pwq:
4133 mutex_lock(&wq->mutex);
4134 spin_lock_irq(&wq->dfl_pwq->pool->lock);
4135 get_pwq(wq->dfl_pwq);
4136 spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4137 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4138 out_unlock:
4139 mutex_unlock(&wq->mutex);
4140 put_pwq_unlocked(old_pwq);
4141 }
4142
4143 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4144 {
4145 bool highpri = wq->flags & WQ_HIGHPRI;
4146 int cpu, ret;
4147
4148 if (!(wq->flags & WQ_UNBOUND)) {
4149 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4150 if (!wq->cpu_pwqs)
4151 return -ENOMEM;
4152
4153 for_each_possible_cpu(cpu) {
4154 struct pool_workqueue *pwq =
4155 per_cpu_ptr(wq->cpu_pwqs, cpu);
4156 struct worker_pool *cpu_pools =
4157 per_cpu(cpu_worker_pools, cpu);
4158
4159 init_pwq(pwq, wq, &cpu_pools[highpri]);
4160
4161 mutex_lock(&wq->mutex);
4162 link_pwq(pwq);
4163 mutex_unlock(&wq->mutex);
4164 }
4165 return 0;
4166 }
4167
4168 get_online_cpus();
4169 if (wq->flags & __WQ_ORDERED) {
4170 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4171 /* there should only be single pwq for ordering guarantee */
4172 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4173 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4174 "ordering guarantee broken for workqueue %s\n", wq->name);
4175 } else {
4176 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4177 }
4178 put_online_cpus();
4179
4180 return ret;
4181 }
4182
4183 static int wq_clamp_max_active(int max_active, unsigned int flags,
4184 const char *name)
4185 {
4186 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4187
4188 if (max_active < 1 || max_active > lim)
4189 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4190 max_active, name, 1, lim);
4191
4192 return clamp_val(max_active, 1, lim);
4193 }
4194
4195 /*
4196 * Workqueues which may be used during memory reclaim should have a rescuer
4197 * to guarantee forward progress.
4198 */
4199 static int init_rescuer(struct workqueue_struct *wq)
4200 {
4201 struct worker *rescuer;
4202 int ret;
4203
4204 if (!(wq->flags & WQ_MEM_RECLAIM))
4205 return 0;
4206
4207 rescuer = alloc_worker(NUMA_NO_NODE);
4208 if (!rescuer)
4209 return -ENOMEM;
4210
4211 rescuer->rescue_wq = wq;
4212 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4213 ret = PTR_ERR_OR_ZERO(rescuer->task);
4214 if (ret) {
4215 kfree(rescuer);
4216 return ret;
4217 }
4218
4219 wq->rescuer = rescuer;
4220 kthread_bind_mask(rescuer->task, cpu_possible_mask);
4221 wake_up_process(rescuer->task);
4222
4223 return 0;
4224 }
4225
4226 __printf(1, 4)
4227 struct workqueue_struct *alloc_workqueue(const char *fmt,
4228 unsigned int flags,
4229 int max_active, ...)
4230 {
4231 size_t tbl_size = 0;
4232 va_list args;
4233 struct workqueue_struct *wq;
4234 struct pool_workqueue *pwq;
4235
4236 /*
4237 * Unbound && max_active == 1 used to imply ordered, which is no
4238 * longer the case on NUMA machines due to per-node pools. While
4239 * alloc_ordered_workqueue() is the right way to create an ordered
4240 * workqueue, keep the previous behavior to avoid subtle breakages
4241 * on NUMA.
4242 */
4243 if ((flags & WQ_UNBOUND) && max_active == 1)
4244 flags |= __WQ_ORDERED;
4245
4246 /* see the comment above the definition of WQ_POWER_EFFICIENT */
4247 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4248 flags |= WQ_UNBOUND;
4249
4250 /* allocate wq and format name */
4251 if (flags & WQ_UNBOUND)
4252 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4253
4254 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4255 if (!wq)
4256 return NULL;
4257
4258 if (flags & WQ_UNBOUND) {
4259 wq->unbound_attrs = alloc_workqueue_attrs();
4260 if (!wq->unbound_attrs)
4261 goto err_free_wq;
4262 }
4263
4264 va_start(args, max_active);
4265 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4266 va_end(args);
4267
4268 max_active = max_active ?: WQ_DFL_ACTIVE;
4269 max_active = wq_clamp_max_active(max_active, flags, wq->name);
4270
4271 /* init wq */
4272 wq->flags = flags;
4273 wq->saved_max_active = max_active;
4274 mutex_init(&wq->mutex);
4275 atomic_set(&wq->nr_pwqs_to_flush, 0);
4276 INIT_LIST_HEAD(&wq->pwqs);
4277 INIT_LIST_HEAD(&wq->flusher_queue);
4278 INIT_LIST_HEAD(&wq->flusher_overflow);
4279 INIT_LIST_HEAD(&wq->maydays);
4280
4281 wq_init_lockdep(wq);
4282 INIT_LIST_HEAD(&wq->list);
4283
4284 if (alloc_and_link_pwqs(wq) < 0)
4285 goto err_unreg_lockdep;
4286
4287 if (wq_online && init_rescuer(wq) < 0)
4288 goto err_destroy;
4289
4290 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4291 goto err_destroy;
4292
4293 /*
4294 * wq_pool_mutex protects global freeze state and workqueues list.
4295 * Grab it, adjust max_active and add the new @wq to workqueues
4296 * list.
4297 */
4298 mutex_lock(&wq_pool_mutex);
4299
4300 mutex_lock(&wq->mutex);
4301 for_each_pwq(pwq, wq)
4302 pwq_adjust_max_active(pwq);
4303 mutex_unlock(&wq->mutex);
4304
4305 list_add_tail_rcu(&wq->list, &workqueues);
4306
4307 mutex_unlock(&wq_pool_mutex);
4308
4309 return wq;
4310
4311 err_unreg_lockdep:
4312 wq_unregister_lockdep(wq);
4313 wq_free_lockdep(wq);
4314 err_free_wq:
4315 free_workqueue_attrs(wq->unbound_attrs);
4316 kfree(wq);
4317 return NULL;
4318 err_destroy:
4319 destroy_workqueue(wq);
4320 return NULL;
4321 }
4322 EXPORT_SYMBOL_GPL(alloc_workqueue);
4323
4324 /**
4325 * destroy_workqueue - safely terminate a workqueue
4326 * @wq: target workqueue
4327 *
4328 * Safely destroy a workqueue. All work currently pending will be done first.
4329 */
4330 void destroy_workqueue(struct workqueue_struct *wq)
4331 {
4332 struct pool_workqueue *pwq;
4333 int node;
4334
4335 /*
4336 * Remove it from sysfs first so that sanity check failure doesn't
4337 * lead to sysfs name conflicts.
4338 */
4339 workqueue_sysfs_unregister(wq);
4340
4341 /* drain it before proceeding with destruction */
4342 drain_workqueue(wq);
4343
4344 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4345 if (wq->rescuer) {
4346 struct worker *rescuer = wq->rescuer;
4347
4348 /* this prevents new queueing */
4349 spin_lock_irq(&wq_mayday_lock);
4350 wq->rescuer = NULL;
4351 spin_unlock_irq(&wq_mayday_lock);
4352
4353 /* rescuer will empty maydays list before exiting */
4354 kthread_stop(rescuer->task);
4355 kfree(rescuer);
4356 }
4357
4358 /* sanity checks */
4359 mutex_lock(&wq->mutex);
4360 for_each_pwq(pwq, wq) {
4361 int i;
4362
4363 for (i = 0; i < WORK_NR_COLORS; i++) {
4364 if (WARN_ON(pwq->nr_in_flight[i])) {
4365 mutex_unlock(&wq->mutex);
4366 show_workqueue_state();
4367 return;
4368 }
4369 }
4370
4371 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4372 WARN_ON(pwq->nr_active) ||
4373 WARN_ON(!list_empty(&pwq->delayed_works))) {
4374 mutex_unlock(&wq->mutex);
4375 show_workqueue_state();
4376 return;
4377 }
4378 }
4379 mutex_unlock(&wq->mutex);
4380
4381 /*
4382 * wq list is used to freeze wq, remove from list after
4383 * flushing is complete in case freeze races us.
4384 */
4385 mutex_lock(&wq_pool_mutex);
4386 list_del_rcu(&wq->list);
4387 mutex_unlock(&wq_pool_mutex);
4388
4389 if (!(wq->flags & WQ_UNBOUND)) {
4390 wq_unregister_lockdep(wq);
4391 /*
4392 * The base ref is never dropped on per-cpu pwqs. Directly
4393 * schedule RCU free.
4394 */
4395 call_rcu(&wq->rcu, rcu_free_wq);
4396 } else {
4397 /*
4398 * We're the sole accessor of @wq at this point. Directly
4399 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4400 * @wq will be freed when the last pwq is released.
4401 */
4402 for_each_node(node) {
4403 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4404 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4405 put_pwq_unlocked(pwq);
4406 }
4407
4408 /*
4409 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
4410 * put. Don't access it afterwards.
4411 */
4412 pwq = wq->dfl_pwq;
4413 wq->dfl_pwq = NULL;
4414 put_pwq_unlocked(pwq);
4415 }
4416 }
4417 EXPORT_SYMBOL_GPL(destroy_workqueue);
4418
4419 /**
4420 * workqueue_set_max_active - adjust max_active of a workqueue
4421 * @wq: target workqueue
4422 * @max_active: new max_active value.
4423 *
4424 * Set max_active of @wq to @max_active.
4425 *
4426 * CONTEXT:
4427 * Don't call from IRQ context.
4428 */
4429 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4430 {
4431 struct pool_workqueue *pwq;
4432
4433 /* disallow meddling with max_active for ordered workqueues */
4434 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4435 return;
4436
4437 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4438
4439 mutex_lock(&wq->mutex);
4440
4441 wq->flags &= ~__WQ_ORDERED;
4442 wq->saved_max_active = max_active;
4443
4444 for_each_pwq(pwq, wq)
4445 pwq_adjust_max_active(pwq);
4446
4447 mutex_unlock(&wq->mutex);
4448 }
4449 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4450
4451 /**
4452 * current_work - retrieve %current task's work struct
4453 *
4454 * Determine if %current task is a workqueue worker and what it's working on.
4455 * Useful to find out the context that the %current task is running in.
4456 *
4457 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4458 */
4459 struct work_struct *current_work(void)
4460 {
4461 struct worker *worker = current_wq_worker();
4462
4463 return worker ? worker->current_work : NULL;
4464 }
4465 EXPORT_SYMBOL(current_work);
4466
4467 /**
4468 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4469 *
4470 * Determine whether %current is a workqueue rescuer. Can be used from
4471 * work functions to determine whether it's being run off the rescuer task.
4472 *
4473 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4474 */
4475 bool current_is_workqueue_rescuer(void)
4476 {
4477 struct worker *worker = current_wq_worker();
4478
4479 return worker && worker->rescue_wq;
4480 }
4481
4482 /**
4483 * workqueue_congested - test whether a workqueue is congested
4484 * @cpu: CPU in question
4485 * @wq: target workqueue
4486 *
4487 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4488 * no synchronization around this function and the test result is
4489 * unreliable and only useful as advisory hints or for debugging.
4490 *
4491 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4492 * Note that both per-cpu and unbound workqueues may be associated with
4493 * multiple pool_workqueues which have separate congested states. A
4494 * workqueue being congested on one CPU doesn't mean the workqueue is also
4495 * contested on other CPUs / NUMA nodes.
4496 *
4497 * Return:
4498 * %true if congested, %false otherwise.
4499 */
4500 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4501 {
4502 struct pool_workqueue *pwq;
4503 bool ret;
4504
4505 rcu_read_lock();
4506 preempt_disable();
4507
4508 if (cpu == WORK_CPU_UNBOUND)
4509 cpu = smp_processor_id();
4510
4511 if (!(wq->flags & WQ_UNBOUND))
4512 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4513 else
4514 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4515
4516 ret = !list_empty(&pwq->delayed_works);
4517 preempt_enable();
4518 rcu_read_unlock();
4519
4520 return ret;
4521 }
4522 EXPORT_SYMBOL_GPL(workqueue_congested);
4523
4524 /**
4525 * work_busy - test whether a work is currently pending or running
4526 * @work: the work to be tested
4527 *
4528 * Test whether @work is currently pending or running. There is no
4529 * synchronization around this function and the test result is
4530 * unreliable and only useful as advisory hints or for debugging.
4531 *
4532 * Return:
4533 * OR'd bitmask of WORK_BUSY_* bits.
4534 */
4535 unsigned int work_busy(struct work_struct *work)
4536 {
4537 struct worker_pool *pool;
4538 unsigned long flags;
4539 unsigned int ret = 0;
4540
4541 if (work_pending(work))
4542 ret |= WORK_BUSY_PENDING;
4543
4544 rcu_read_lock();
4545 pool = get_work_pool(work);
4546 if (pool) {
4547 spin_lock_irqsave(&pool->lock, flags);
4548 if (find_worker_executing_work(pool, work))
4549 ret |= WORK_BUSY_RUNNING;
4550 spin_unlock_irqrestore(&pool->lock, flags);
4551 }
4552 rcu_read_unlock();
4553
4554 return ret;
4555 }
4556 EXPORT_SYMBOL_GPL(work_busy);
4557
4558 /**
4559 * set_worker_desc - set description for the current work item
4560 * @fmt: printf-style format string
4561 * @...: arguments for the format string
4562 *
4563 * This function can be called by a running work function to describe what
4564 * the work item is about. If the worker task gets dumped, this
4565 * information will be printed out together to help debugging. The
4566 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4567 */
4568 void set_worker_desc(const char *fmt, ...)
4569 {
4570 struct worker *worker = current_wq_worker();
4571 va_list args;
4572
4573 if (worker) {
4574 va_start(args, fmt);
4575 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4576 va_end(args);
4577 }
4578 }
4579 EXPORT_SYMBOL_GPL(set_worker_desc);
4580
4581 /**
4582 * print_worker_info - print out worker information and description
4583 * @log_lvl: the log level to use when printing
4584 * @task: target task
4585 *
4586 * If @task is a worker and currently executing a work item, print out the
4587 * name of the workqueue being serviced and worker description set with
4588 * set_worker_desc() by the currently executing work item.
4589 *
4590 * This function can be safely called on any task as long as the
4591 * task_struct itself is accessible. While safe, this function isn't
4592 * synchronized and may print out mixups or garbages of limited length.
4593 */
4594 void print_worker_info(const char *log_lvl, struct task_struct *task)
4595 {
4596 work_func_t *fn = NULL;
4597 char name[WQ_NAME_LEN] = { };
4598 char desc[WORKER_DESC_LEN] = { };
4599 struct pool_workqueue *pwq = NULL;
4600 struct workqueue_struct *wq = NULL;
4601 struct worker *worker;
4602
4603 if (!(task->flags & PF_WQ_WORKER))
4604 return;
4605
4606 /*
4607 * This function is called without any synchronization and @task
4608 * could be in any state. Be careful with dereferences.
4609 */
4610 worker = kthread_probe_data(task);
4611
4612 /*
4613 * Carefully copy the associated workqueue's workfn, name and desc.
4614 * Keep the original last '\0' in case the original is garbage.
4615 */
4616 probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4617 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4618 probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4619 probe_kernel_read(name, wq->name, sizeof(name) - 1);
4620 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4621
4622 if (fn || name[0] || desc[0]) {
4623 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
4624 if (strcmp(name, desc))
4625 pr_cont(" (%s)", desc);
4626 pr_cont("\n");
4627 }
4628 }
4629
4630 static void pr_cont_pool_info(struct worker_pool *pool)
4631 {
4632 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4633 if (pool->node != NUMA_NO_NODE)
4634 pr_cont(" node=%d", pool->node);
4635 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4636 }
4637
4638 static void pr_cont_work(bool comma, struct work_struct *work)
4639 {
4640 if (work->func == wq_barrier_func) {
4641 struct wq_barrier *barr;
4642
4643 barr = container_of(work, struct wq_barrier, work);
4644
4645 pr_cont("%s BAR(%d)", comma ? "," : "",
4646 task_pid_nr(barr->task));
4647 } else {
4648 pr_cont("%s %ps", comma ? "," : "", work->func);
4649 }
4650 }
4651
4652 static void show_pwq(struct pool_workqueue *pwq)
4653 {
4654 struct worker_pool *pool = pwq->pool;
4655 struct work_struct *work;
4656 struct worker *worker;
4657 bool has_in_flight = false, has_pending = false;
4658 int bkt;
4659
4660 pr_info(" pwq %d:", pool->id);
4661 pr_cont_pool_info(pool);
4662
4663 pr_cont(" active=%d/%d refcnt=%d%s\n",
4664 pwq->nr_active, pwq->max_active, pwq->refcnt,
4665 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4666
4667 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4668 if (worker->current_pwq == pwq) {
4669 has_in_flight = true;
4670 break;
4671 }
4672 }
4673 if (has_in_flight) {
4674 bool comma = false;
4675
4676 pr_info(" in-flight:");
4677 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4678 if (worker->current_pwq != pwq)
4679 continue;
4680
4681 pr_cont("%s %d%s:%ps", comma ? "," : "",
4682 task_pid_nr(worker->task),
4683 worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4684 worker->current_func);
4685 list_for_each_entry(work, &worker->scheduled, entry)
4686 pr_cont_work(false, work);
4687 comma = true;
4688 }
4689 pr_cont("\n");
4690 }
4691
4692 list_for_each_entry(work, &pool->worklist, entry) {
4693 if (get_work_pwq(work) == pwq) {
4694 has_pending = true;
4695 break;
4696 }
4697 }
4698 if (has_pending) {
4699 bool comma = false;
4700
4701 pr_info(" pending:");
4702 list_for_each_entry(work, &pool->worklist, entry) {
4703 if (get_work_pwq(work) != pwq)
4704 continue;
4705
4706 pr_cont_work(comma, work);
4707 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4708 }
4709 pr_cont("\n");
4710 }
4711
4712 if (!list_empty(&pwq->delayed_works)) {
4713 bool comma = false;
4714
4715 pr_info(" delayed:");
4716 list_for_each_entry(work, &pwq->delayed_works, entry) {
4717 pr_cont_work(comma, work);
4718 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4719 }
4720 pr_cont("\n");
4721 }
4722 }
4723
4724 /**
4725 * show_workqueue_state - dump workqueue state
4726 *
4727 * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4728 * all busy workqueues and pools.
4729 */
4730 void show_workqueue_state(void)
4731 {
4732 struct workqueue_struct *wq;
4733 struct worker_pool *pool;
4734 unsigned long flags;
4735 int pi;
4736
4737 rcu_read_lock();
4738
4739 pr_info("Showing busy workqueues and worker pools:\n");
4740
4741 list_for_each_entry_rcu(wq, &workqueues, list) {
4742 struct pool_workqueue *pwq;
4743 bool idle = true;
4744
4745 for_each_pwq(pwq, wq) {
4746 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4747 idle = false;
4748 break;
4749 }
4750 }
4751 if (idle)
4752 continue;
4753
4754 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4755
4756 for_each_pwq(pwq, wq) {
4757 spin_lock_irqsave(&pwq->pool->lock, flags);
4758 if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4759 show_pwq(pwq);
4760 spin_unlock_irqrestore(&pwq->pool->lock, flags);
4761 /*
4762 * We could be printing a lot from atomic context, e.g.
4763 * sysrq-t -> show_workqueue_state(). Avoid triggering
4764 * hard lockup.
4765 */
4766 touch_nmi_watchdog();
4767 }
4768 }
4769
4770 for_each_pool(pool, pi) {
4771 struct worker *worker;
4772 bool first = true;
4773
4774 spin_lock_irqsave(&pool->lock, flags);
4775 if (pool->nr_workers == pool->nr_idle)
4776 goto next_pool;
4777
4778 pr_info("pool %d:", pool->id);
4779 pr_cont_pool_info(pool);
4780 pr_cont(" hung=%us workers=%d",
4781 jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4782 pool->nr_workers);
4783 if (pool->manager)
4784 pr_cont(" manager: %d",
4785 task_pid_nr(pool->manager->task));
4786 list_for_each_entry(worker, &pool->idle_list, entry) {
4787 pr_cont(" %s%d", first ? "idle: " : "",
4788 task_pid_nr(worker->task));
4789 first = false;
4790 }
4791 pr_cont("\n");
4792 next_pool:
4793 spin_unlock_irqrestore(&pool->lock, flags);
4794 /*
4795 * We could be printing a lot from atomic context, e.g.
4796 * sysrq-t -> show_workqueue_state(). Avoid triggering
4797 * hard lockup.
4798 */
4799 touch_nmi_watchdog();
4800 }
4801
4802 rcu_read_unlock();
4803 }
4804
4805 /* used to show worker information through /proc/PID/{comm,stat,status} */
4806 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
4807 {
4808 int off;
4809
4810 /* always show the actual comm */
4811 off = strscpy(buf, task->comm, size);
4812 if (off < 0)
4813 return;
4814
4815 /* stabilize PF_WQ_WORKER and worker pool association */
4816 mutex_lock(&wq_pool_attach_mutex);
4817
4818 if (task->flags & PF_WQ_WORKER) {
4819 struct worker *worker = kthread_data(task);
4820 struct worker_pool *pool = worker->pool;
4821
4822 if (pool) {
4823 spin_lock_irq(&pool->lock);
4824 /*
4825 * ->desc tracks information (wq name or
4826 * set_worker_desc()) for the latest execution. If
4827 * current, prepend '+', otherwise '-'.
4828 */
4829 if (worker->desc[0] != '\0') {
4830 if (worker->current_work)
4831 scnprintf(buf + off, size - off, "+%s",
4832 worker->desc);
4833 else
4834 scnprintf(buf + off, size - off, "-%s",
4835 worker->desc);
4836 }
4837 spin_unlock_irq(&pool->lock);
4838 }
4839 }
4840
4841 mutex_unlock(&wq_pool_attach_mutex);
4842 }
4843
4844 #ifdef CONFIG_SMP
4845
4846 /*
4847 * CPU hotplug.
4848 *
4849 * There are two challenges in supporting CPU hotplug. Firstly, there
4850 * are a lot of assumptions on strong associations among work, pwq and
4851 * pool which make migrating pending and scheduled works very
4852 * difficult to implement without impacting hot paths. Secondly,
4853 * worker pools serve mix of short, long and very long running works making
4854 * blocked draining impractical.
4855 *
4856 * This is solved by allowing the pools to be disassociated from the CPU
4857 * running as an unbound one and allowing it to be reattached later if the
4858 * cpu comes back online.
4859 */
4860
4861 static void unbind_workers(int cpu)
4862 {
4863 struct worker_pool *pool;
4864 struct worker *worker;
4865
4866 for_each_cpu_worker_pool(pool, cpu) {
4867 mutex_lock(&wq_pool_attach_mutex);
4868 spin_lock_irq(&pool->lock);
4869
4870 /*
4871 * We've blocked all attach/detach operations. Make all workers
4872 * unbound and set DISASSOCIATED. Before this, all workers
4873 * except for the ones which are still executing works from
4874 * before the last CPU down must be on the cpu. After
4875 * this, they may become diasporas.
4876 */
4877 for_each_pool_worker(worker, pool)
4878 worker->flags |= WORKER_UNBOUND;
4879
4880 pool->flags |= POOL_DISASSOCIATED;
4881
4882 spin_unlock_irq(&pool->lock);
4883 mutex_unlock(&wq_pool_attach_mutex);
4884
4885 /*
4886 * Call schedule() so that we cross rq->lock and thus can
4887 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4888 * This is necessary as scheduler callbacks may be invoked
4889 * from other cpus.
4890 */
4891 schedule();
4892
4893 /*
4894 * Sched callbacks are disabled now. Zap nr_running.
4895 * After this, nr_running stays zero and need_more_worker()
4896 * and keep_working() are always true as long as the
4897 * worklist is not empty. This pool now behaves as an
4898 * unbound (in terms of concurrency management) pool which
4899 * are served by workers tied to the pool.
4900 */
4901 atomic_set(&pool->nr_running, 0);
4902
4903 /*
4904 * With concurrency management just turned off, a busy
4905 * worker blocking could lead to lengthy stalls. Kick off
4906 * unbound chain execution of currently pending work items.
4907 */
4908 spin_lock_irq(&pool->lock);
4909 wake_up_worker(pool);
4910 spin_unlock_irq(&pool->lock);
4911 }
4912 }
4913
4914 /**
4915 * rebind_workers - rebind all workers of a pool to the associated CPU
4916 * @pool: pool of interest
4917 *
4918 * @pool->cpu is coming online. Rebind all workers to the CPU.
4919 */
4920 static void rebind_workers(struct worker_pool *pool)
4921 {
4922 struct worker *worker;
4923
4924 lockdep_assert_held(&wq_pool_attach_mutex);
4925
4926 /*
4927 * Restore CPU affinity of all workers. As all idle workers should
4928 * be on the run-queue of the associated CPU before any local
4929 * wake-ups for concurrency management happen, restore CPU affinity
4930 * of all workers first and then clear UNBOUND. As we're called
4931 * from CPU_ONLINE, the following shouldn't fail.
4932 */
4933 for_each_pool_worker(worker, pool)
4934 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4935 pool->attrs->cpumask) < 0);
4936
4937 spin_lock_irq(&pool->lock);
4938
4939 pool->flags &= ~POOL_DISASSOCIATED;
4940
4941 for_each_pool_worker(worker, pool) {
4942 unsigned int worker_flags = worker->flags;
4943
4944 /*
4945 * A bound idle worker should actually be on the runqueue
4946 * of the associated CPU for local wake-ups targeting it to
4947 * work. Kick all idle workers so that they migrate to the
4948 * associated CPU. Doing this in the same loop as
4949 * replacing UNBOUND with REBOUND is safe as no worker will
4950 * be bound before @pool->lock is released.
4951 */
4952 if (worker_flags & WORKER_IDLE)
4953 wake_up_process(worker->task);
4954
4955 /*
4956 * We want to clear UNBOUND but can't directly call
4957 * worker_clr_flags() or adjust nr_running. Atomically
4958 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4959 * @worker will clear REBOUND using worker_clr_flags() when
4960 * it initiates the next execution cycle thus restoring
4961 * concurrency management. Note that when or whether
4962 * @worker clears REBOUND doesn't affect correctness.
4963 *
4964 * WRITE_ONCE() is necessary because @worker->flags may be
4965 * tested without holding any lock in
4966 * wq_worker_running(). Without it, NOT_RUNNING test may
4967 * fail incorrectly leading to premature concurrency
4968 * management operations.
4969 */
4970 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4971 worker_flags |= WORKER_REBOUND;
4972 worker_flags &= ~WORKER_UNBOUND;
4973 WRITE_ONCE(worker->flags, worker_flags);
4974 }
4975
4976 spin_unlock_irq(&pool->lock);
4977 }
4978
4979 /**
4980 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4981 * @pool: unbound pool of interest
4982 * @cpu: the CPU which is coming up
4983 *
4984 * An unbound pool may end up with a cpumask which doesn't have any online
4985 * CPUs. When a worker of such pool get scheduled, the scheduler resets
4986 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
4987 * online CPU before, cpus_allowed of all its workers should be restored.
4988 */
4989 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4990 {
4991 static cpumask_t cpumask;
4992 struct worker *worker;
4993
4994 lockdep_assert_held(&wq_pool_attach_mutex);
4995
4996 /* is @cpu allowed for @pool? */
4997 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4998 return;
4999
5000 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
5001
5002 /* as we're called from CPU_ONLINE, the following shouldn't fail */
5003 for_each_pool_worker(worker, pool)
5004 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
5005 }
5006
5007 int workqueue_prepare_cpu(unsigned int cpu)
5008 {
5009 struct worker_pool *pool;
5010
5011 for_each_cpu_worker_pool(pool, cpu) {
5012 if (pool->nr_workers)
5013 continue;
5014 if (!create_worker(pool))
5015 return -ENOMEM;
5016 }
5017 return 0;
5018 }
5019
5020 int workqueue_online_cpu(unsigned int cpu)
5021 {
5022 struct worker_pool *pool;
5023 struct workqueue_struct *wq;
5024 int pi;
5025
5026 mutex_lock(&wq_pool_mutex);
5027
5028 for_each_pool(pool, pi) {
5029 mutex_lock(&wq_pool_attach_mutex);
5030
5031 if (pool->cpu == cpu)
5032 rebind_workers(pool);
5033 else if (pool->cpu < 0)
5034 restore_unbound_workers_cpumask(pool, cpu);
5035
5036 mutex_unlock(&wq_pool_attach_mutex);
5037 }
5038
5039 /* update NUMA affinity of unbound workqueues */
5040 list_for_each_entry(wq, &workqueues, list)
5041 wq_update_unbound_numa(wq, cpu, true);
5042
5043 mutex_unlock(&wq_pool_mutex);
5044 return 0;
5045 }
5046
5047 int workqueue_offline_cpu(unsigned int cpu)
5048 {
5049 struct workqueue_struct *wq;
5050
5051 /* unbinding per-cpu workers should happen on the local CPU */
5052 if (WARN_ON(cpu != smp_processor_id()))
5053 return -1;
5054
5055 unbind_workers(cpu);
5056
5057 /* update NUMA affinity of unbound workqueues */
5058 mutex_lock(&wq_pool_mutex);
5059 list_for_each_entry(wq, &workqueues, list)
5060 wq_update_unbound_numa(wq, cpu, false);
5061 mutex_unlock(&wq_pool_mutex);
5062
5063 return 0;
5064 }
5065
5066 struct work_for_cpu {
5067 struct work_struct work;
5068 long (*fn)(void *);
5069 void *arg;
5070 long ret;
5071 };
5072
5073 static void work_for_cpu_fn(struct work_struct *work)
5074 {
5075 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5076
5077 wfc->ret = wfc->fn(wfc->arg);
5078 }
5079
5080 /**
5081 * work_on_cpu - run a function in thread context on a particular cpu
5082 * @cpu: the cpu to run on
5083 * @fn: the function to run
5084 * @arg: the function arg
5085 *
5086 * It is up to the caller to ensure that the cpu doesn't go offline.
5087 * The caller must not hold any locks which would prevent @fn from completing.
5088 *
5089 * Return: The value @fn returns.
5090 */
5091 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
5092 {
5093 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5094
5095 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5096 schedule_work_on(cpu, &wfc.work);
5097 flush_work(&wfc.work);
5098 destroy_work_on_stack(&wfc.work);
5099 return wfc.ret;
5100 }
5101 EXPORT_SYMBOL_GPL(work_on_cpu);
5102
5103 /**
5104 * work_on_cpu_safe - run a function in thread context on a particular cpu
5105 * @cpu: the cpu to run on
5106 * @fn: the function to run
5107 * @arg: the function argument
5108 *
5109 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5110 * any locks which would prevent @fn from completing.
5111 *
5112 * Return: The value @fn returns.
5113 */
5114 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5115 {
5116 long ret = -ENODEV;
5117
5118 get_online_cpus();
5119 if (cpu_online(cpu))
5120 ret = work_on_cpu(cpu, fn, arg);
5121 put_online_cpus();
5122 return ret;
5123 }
5124 EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5125 #endif /* CONFIG_SMP */
5126
5127 #ifdef CONFIG_FREEZER
5128
5129 /**
5130 * freeze_workqueues_begin - begin freezing workqueues
5131 *
5132 * Start freezing workqueues. After this function returns, all freezable
5133 * workqueues will queue new works to their delayed_works list instead of
5134 * pool->worklist.
5135 *
5136 * CONTEXT:
5137 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5138 */
5139 void freeze_workqueues_begin(void)
5140 {
5141 struct workqueue_struct *wq;
5142 struct pool_workqueue *pwq;
5143
5144 mutex_lock(&wq_pool_mutex);
5145
5146 WARN_ON_ONCE(workqueue_freezing);
5147 workqueue_freezing = true;
5148
5149 list_for_each_entry(wq, &workqueues, list) {
5150 mutex_lock(&wq->mutex);
5151 for_each_pwq(pwq, wq)
5152 pwq_adjust_max_active(pwq);
5153 mutex_unlock(&wq->mutex);
5154 }
5155
5156 mutex_unlock(&wq_pool_mutex);
5157 }
5158
5159 /**
5160 * freeze_workqueues_busy - are freezable workqueues still busy?
5161 *
5162 * Check whether freezing is complete. This function must be called
5163 * between freeze_workqueues_begin() and thaw_workqueues().
5164 *
5165 * CONTEXT:
5166 * Grabs and releases wq_pool_mutex.
5167 *
5168 * Return:
5169 * %true if some freezable workqueues are still busy. %false if freezing
5170 * is complete.
5171 */
5172 bool freeze_workqueues_busy(void)
5173 {
5174 bool busy = false;
5175 struct workqueue_struct *wq;
5176 struct pool_workqueue *pwq;
5177
5178 mutex_lock(&wq_pool_mutex);
5179
5180 WARN_ON_ONCE(!workqueue_freezing);
5181
5182 list_for_each_entry(wq, &workqueues, list) {
5183 if (!(wq->flags & WQ_FREEZABLE))
5184 continue;
5185 /*
5186 * nr_active is monotonically decreasing. It's safe
5187 * to peek without lock.
5188 */
5189 rcu_read_lock();
5190 for_each_pwq(pwq, wq) {
5191 WARN_ON_ONCE(pwq->nr_active < 0);
5192 if (pwq->nr_active) {
5193 busy = true;
5194 rcu_read_unlock();
5195 goto out_unlock;
5196 }
5197 }
5198 rcu_read_unlock();
5199 }
5200 out_unlock:
5201 mutex_unlock(&wq_pool_mutex);
5202 return busy;
5203 }
5204
5205 /**
5206 * thaw_workqueues - thaw workqueues
5207 *
5208 * Thaw workqueues. Normal queueing is restored and all collected
5209 * frozen works are transferred to their respective pool worklists.
5210 *
5211 * CONTEXT:
5212 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5213 */
5214 void thaw_workqueues(void)
5215 {
5216 struct workqueue_struct *wq;
5217 struct pool_workqueue *pwq;
5218
5219 mutex_lock(&wq_pool_mutex);
5220
5221 if (!workqueue_freezing)
5222 goto out_unlock;
5223
5224 workqueue_freezing = false;
5225
5226 /* restore max_active and repopulate worklist */
5227 list_for_each_entry(wq, &workqueues, list) {
5228 mutex_lock(&wq->mutex);
5229 for_each_pwq(pwq, wq)
5230 pwq_adjust_max_active(pwq);
5231 mutex_unlock(&wq->mutex);
5232 }
5233
5234 out_unlock:
5235 mutex_unlock(&wq_pool_mutex);
5236 }
5237 #endif /* CONFIG_FREEZER */
5238
5239 static int workqueue_apply_unbound_cpumask(void)
5240 {
5241 LIST_HEAD(ctxs);
5242 int ret = 0;
5243 struct workqueue_struct *wq;
5244 struct apply_wqattrs_ctx *ctx, *n;
5245
5246 lockdep_assert_held(&wq_pool_mutex);
5247
5248 list_for_each_entry(wq, &workqueues, list) {
5249 if (!(wq->flags & WQ_UNBOUND))
5250 continue;
5251 /* creating multiple pwqs breaks ordering guarantee */
5252 if (wq->flags & __WQ_ORDERED)
5253 continue;
5254
5255 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
5256 if (!ctx) {
5257 ret = -ENOMEM;
5258 break;
5259 }
5260
5261 list_add_tail(&ctx->list, &ctxs);
5262 }
5263
5264 list_for_each_entry_safe(ctx, n, &ctxs, list) {
5265 if (!ret)
5266 apply_wqattrs_commit(ctx);
5267 apply_wqattrs_cleanup(ctx);
5268 }
5269
5270 return ret;
5271 }
5272
5273 /**
5274 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5275 * @cpumask: the cpumask to set
5276 *
5277 * The low-level workqueues cpumask is a global cpumask that limits
5278 * the affinity of all unbound workqueues. This function check the @cpumask
5279 * and apply it to all unbound workqueues and updates all pwqs of them.
5280 *
5281 * Retun: 0 - Success
5282 * -EINVAL - Invalid @cpumask
5283 * -ENOMEM - Failed to allocate memory for attrs or pwqs.
5284 */
5285 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5286 {
5287 int ret = -EINVAL;
5288 cpumask_var_t saved_cpumask;
5289
5290 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
5291 return -ENOMEM;
5292
5293 /*
5294 * Not excluding isolated cpus on purpose.
5295 * If the user wishes to include them, we allow that.
5296 */
5297 cpumask_and(cpumask, cpumask, cpu_possible_mask);
5298 if (!cpumask_empty(cpumask)) {
5299 apply_wqattrs_lock();
5300
5301 /* save the old wq_unbound_cpumask. */
5302 cpumask_copy(saved_cpumask, wq_unbound_cpumask);
5303
5304 /* update wq_unbound_cpumask at first and apply it to wqs. */
5305 cpumask_copy(wq_unbound_cpumask, cpumask);
5306 ret = workqueue_apply_unbound_cpumask();
5307
5308 /* restore the wq_unbound_cpumask when failed. */
5309 if (ret < 0)
5310 cpumask_copy(wq_unbound_cpumask, saved_cpumask);
5311
5312 apply_wqattrs_unlock();
5313 }
5314
5315 free_cpumask_var(saved_cpumask);
5316 return ret;
5317 }
5318
5319 #ifdef CONFIG_SYSFS
5320 /*
5321 * Workqueues with WQ_SYSFS flag set is visible to userland via
5322 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
5323 * following attributes.
5324 *
5325 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
5326 * max_active RW int : maximum number of in-flight work items
5327 *
5328 * Unbound workqueues have the following extra attributes.
5329 *
5330 * pool_ids RO int : the associated pool IDs for each node
5331 * nice RW int : nice value of the workers
5332 * cpumask RW mask : bitmask of allowed CPUs for the workers
5333 * numa RW bool : whether enable NUMA affinity
5334 */
5335 struct wq_device {
5336 struct workqueue_struct *wq;
5337 struct device dev;
5338 };
5339
5340 static struct workqueue_struct *dev_to_wq(struct device *dev)
5341 {
5342 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5343
5344 return wq_dev->wq;
5345 }
5346
5347 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5348 char *buf)
5349 {
5350 struct workqueue_struct *wq = dev_to_wq(dev);
5351
5352 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5353 }
5354 static DEVICE_ATTR_RO(per_cpu);
5355
5356 static ssize_t max_active_show(struct device *dev,
5357 struct device_attribute *attr, char *buf)
5358 {
5359 struct workqueue_struct *wq = dev_to_wq(dev);
5360
5361 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5362 }
5363
5364 static ssize_t max_active_store(struct device *dev,
5365 struct device_attribute *attr, const char *buf,
5366 size_t count)
5367 {
5368 struct workqueue_struct *wq = dev_to_wq(dev);
5369 int val;
5370
5371 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5372 return -EINVAL;
5373
5374 workqueue_set_max_active(wq, val);
5375 return count;
5376 }
5377 static DEVICE_ATTR_RW(max_active);
5378
5379 static struct attribute *wq_sysfs_attrs[] = {
5380 &dev_attr_per_cpu.attr,
5381 &dev_attr_max_active.attr,
5382 NULL,
5383 };
5384 ATTRIBUTE_GROUPS(wq_sysfs);
5385
5386 static ssize_t wq_pool_ids_show(struct device *dev,
5387 struct device_attribute *attr, char *buf)
5388 {
5389 struct workqueue_struct *wq = dev_to_wq(dev);
5390 const char *delim = "";
5391 int node, written = 0;
5392
5393 get_online_cpus();
5394 rcu_read_lock();
5395 for_each_node(node) {
5396 written += scnprintf(buf + written, PAGE_SIZE - written,
5397 "%s%d:%d", delim, node,
5398 unbound_pwq_by_node(wq, node)->pool->id);
5399 delim = " ";
5400 }
5401 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5402 rcu_read_unlock();
5403 put_online_cpus();
5404
5405 return written;
5406 }
5407
5408 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5409 char *buf)
5410 {
5411 struct workqueue_struct *wq = dev_to_wq(dev);
5412 int written;
5413
5414 mutex_lock(&wq->mutex);
5415 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5416 mutex_unlock(&wq->mutex);
5417
5418 return written;
5419 }
5420
5421 /* prepare workqueue_attrs for sysfs store operations */
5422 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5423 {
5424 struct workqueue_attrs *attrs;
5425
5426 lockdep_assert_held(&wq_pool_mutex);
5427
5428 attrs = alloc_workqueue_attrs();
5429 if (!attrs)
5430 return NULL;
5431
5432 copy_workqueue_attrs(attrs, wq->unbound_attrs);
5433 return attrs;
5434 }
5435
5436 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5437 const char *buf, size_t count)
5438 {
5439 struct workqueue_struct *wq = dev_to_wq(dev);
5440 struct workqueue_attrs *attrs;
5441 int ret = -ENOMEM;
5442
5443 apply_wqattrs_lock();
5444
5445 attrs = wq_sysfs_prep_attrs(wq);
5446 if (!attrs)
5447 goto out_unlock;
5448
5449 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5450 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5451 ret = apply_workqueue_attrs_locked(wq, attrs);
5452 else
5453 ret = -EINVAL;
5454
5455 out_unlock:
5456 apply_wqattrs_unlock();
5457 free_workqueue_attrs(attrs);
5458 return ret ?: count;
5459 }
5460
5461 static ssize_t wq_cpumask_show(struct device *dev,
5462 struct device_attribute *attr, char *buf)
5463 {
5464 struct workqueue_struct *wq = dev_to_wq(dev);
5465 int written;
5466
5467 mutex_lock(&wq->mutex);
5468 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5469 cpumask_pr_args(wq->unbound_attrs->cpumask));
5470 mutex_unlock(&wq->mutex);
5471 return written;
5472 }
5473
5474 static ssize_t wq_cpumask_store(struct device *dev,
5475 struct device_attribute *attr,
5476 const char *buf, size_t count)
5477 {
5478 struct workqueue_struct *wq = dev_to_wq(dev);
5479 struct workqueue_attrs *attrs;
5480 int ret = -ENOMEM;
5481
5482 apply_wqattrs_lock();
5483
5484 attrs = wq_sysfs_prep_attrs(wq);
5485 if (!attrs)
5486 goto out_unlock;
5487
5488 ret = cpumask_parse(buf, attrs->cpumask);
5489 if (!ret)
5490 ret = apply_workqueue_attrs_locked(wq, attrs);
5491
5492 out_unlock:
5493 apply_wqattrs_unlock();
5494 free_workqueue_attrs(attrs);
5495 return ret ?: count;
5496 }
5497
5498 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5499 char *buf)
5500 {
5501 struct workqueue_struct *wq = dev_to_wq(dev);
5502 int written;
5503
5504 mutex_lock(&wq->mutex);
5505 written = scnprintf(buf, PAGE_SIZE, "%d\n",
5506 !wq->unbound_attrs->no_numa);
5507 mutex_unlock(&wq->mutex);
5508
5509 return written;
5510 }
5511
5512 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5513 const char *buf, size_t count)
5514 {
5515 struct workqueue_struct *wq = dev_to_wq(dev);
5516 struct workqueue_attrs *attrs;
5517 int v, ret = -ENOMEM;
5518
5519 apply_wqattrs_lock();
5520
5521 attrs = wq_sysfs_prep_attrs(wq);
5522 if (!attrs)
5523 goto out_unlock;
5524
5525 ret = -EINVAL;
5526 if (sscanf(buf, "%d", &v) == 1) {
5527 attrs->no_numa = !v;
5528 ret = apply_workqueue_attrs_locked(wq, attrs);
5529 }
5530
5531 out_unlock:
5532 apply_wqattrs_unlock();
5533 free_workqueue_attrs(attrs);
5534 return ret ?: count;
5535 }
5536
5537 static struct device_attribute wq_sysfs_unbound_attrs[] = {
5538 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5539 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5540 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5541 __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5542 __ATTR_NULL,
5543 };
5544
5545 static struct bus_type wq_subsys = {
5546 .name = "workqueue",
5547 .dev_groups = wq_sysfs_groups,
5548 };
5549
5550 static ssize_t wq_unbound_cpumask_show(struct device *dev,
5551 struct device_attribute *attr, char *buf)
5552 {
5553 int written;
5554
5555 mutex_lock(&wq_pool_mutex);
5556 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5557 cpumask_pr_args(wq_unbound_cpumask));
5558 mutex_unlock(&wq_pool_mutex);
5559
5560 return written;
5561 }
5562
5563 static ssize_t wq_unbound_cpumask_store(struct device *dev,
5564 struct device_attribute *attr, const char *buf, size_t count)
5565 {
5566 cpumask_var_t cpumask;
5567 int ret;
5568
5569 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5570 return -ENOMEM;
5571
5572 ret = cpumask_parse(buf, cpumask);
5573 if (!ret)
5574 ret = workqueue_set_unbound_cpumask(cpumask);
5575
5576 free_cpumask_var(cpumask);
5577 return ret ? ret : count;
5578 }
5579
5580 static struct device_attribute wq_sysfs_cpumask_attr =
5581 __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5582 wq_unbound_cpumask_store);
5583
5584 static int __init wq_sysfs_init(void)
5585 {
5586 int err;
5587
5588 err = subsys_virtual_register(&wq_subsys, NULL);
5589 if (err)
5590 return err;
5591
5592 return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5593 }
5594 core_initcall(wq_sysfs_init);
5595
5596 static void wq_device_release(struct device *dev)
5597 {
5598 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5599
5600 kfree(wq_dev);
5601 }
5602
5603 /**
5604 * workqueue_sysfs_register - make a workqueue visible in sysfs
5605 * @wq: the workqueue to register
5606 *
5607 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5608 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5609 * which is the preferred method.
5610 *
5611 * Workqueue user should use this function directly iff it wants to apply
5612 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5613 * apply_workqueue_attrs() may race against userland updating the
5614 * attributes.
5615 *
5616 * Return: 0 on success, -errno on failure.
5617 */
5618 int workqueue_sysfs_register(struct workqueue_struct *wq)
5619 {
5620 struct wq_device *wq_dev;
5621 int ret;
5622
5623 /*
5624 * Adjusting max_active or creating new pwqs by applying
5625 * attributes breaks ordering guarantee. Disallow exposing ordered
5626 * workqueues.
5627 */
5628 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5629 return -EINVAL;
5630
5631 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5632 if (!wq_dev)
5633 return -ENOMEM;
5634
5635 wq_dev->wq = wq;
5636 wq_dev->dev.bus = &wq_subsys;
5637 wq_dev->dev.release = wq_device_release;
5638 dev_set_name(&wq_dev->dev, "%s", wq->name);
5639
5640 /*
5641 * unbound_attrs are created separately. Suppress uevent until
5642 * everything is ready.
5643 */
5644 dev_set_uevent_suppress(&wq_dev->dev, true);
5645
5646 ret = device_register(&wq_dev->dev);
5647 if (ret) {
5648 put_device(&wq_dev->dev);
5649 wq->wq_dev = NULL;
5650 return ret;
5651 }
5652
5653 if (wq->flags & WQ_UNBOUND) {
5654 struct device_attribute *attr;
5655
5656 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5657 ret = device_create_file(&wq_dev->dev, attr);
5658 if (ret) {
5659 device_unregister(&wq_dev->dev);
5660 wq->wq_dev = NULL;
5661 return ret;
5662 }
5663 }
5664 }
5665
5666 dev_set_uevent_suppress(&wq_dev->dev, false);
5667 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5668 return 0;
5669 }
5670
5671 /**
5672 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5673 * @wq: the workqueue to unregister
5674 *
5675 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5676 */
5677 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5678 {
5679 struct wq_device *wq_dev = wq->wq_dev;
5680
5681 if (!wq->wq_dev)
5682 return;
5683
5684 wq->wq_dev = NULL;
5685 device_unregister(&wq_dev->dev);
5686 }
5687 #else /* CONFIG_SYSFS */
5688 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5689 #endif /* CONFIG_SYSFS */
5690
5691 /*
5692 * Workqueue watchdog.
5693 *
5694 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5695 * flush dependency, a concurrency managed work item which stays RUNNING
5696 * indefinitely. Workqueue stalls can be very difficult to debug as the
5697 * usual warning mechanisms don't trigger and internal workqueue state is
5698 * largely opaque.
5699 *
5700 * Workqueue watchdog monitors all worker pools periodically and dumps
5701 * state if some pools failed to make forward progress for a while where
5702 * forward progress is defined as the first item on ->worklist changing.
5703 *
5704 * This mechanism is controlled through the kernel parameter
5705 * "workqueue.watchdog_thresh" which can be updated at runtime through the
5706 * corresponding sysfs parameter file.
5707 */
5708 #ifdef CONFIG_WQ_WATCHDOG
5709
5710 static unsigned long wq_watchdog_thresh = 30;
5711 static struct timer_list wq_watchdog_timer;
5712
5713 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5714 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5715
5716 static void wq_watchdog_reset_touched(void)
5717 {
5718 int cpu;
5719
5720 wq_watchdog_touched = jiffies;
5721 for_each_possible_cpu(cpu)
5722 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5723 }
5724
5725 static void wq_watchdog_timer_fn(struct timer_list *unused)
5726 {
5727 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5728 bool lockup_detected = false;
5729 struct worker_pool *pool;
5730 int pi;
5731
5732 if (!thresh)
5733 return;
5734
5735 rcu_read_lock();
5736
5737 for_each_pool(pool, pi) {
5738 unsigned long pool_ts, touched, ts;
5739
5740 if (list_empty(&pool->worklist))
5741 continue;
5742
5743 /* get the latest of pool and touched timestamps */
5744 pool_ts = READ_ONCE(pool->watchdog_ts);
5745 touched = READ_ONCE(wq_watchdog_touched);
5746
5747 if (time_after(pool_ts, touched))
5748 ts = pool_ts;
5749 else
5750 ts = touched;
5751
5752 if (pool->cpu >= 0) {
5753 unsigned long cpu_touched =
5754 READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5755 pool->cpu));
5756 if (time_after(cpu_touched, ts))
5757 ts = cpu_touched;
5758 }
5759
5760 /* did we stall? */
5761 if (time_after(jiffies, ts + thresh)) {
5762 lockup_detected = true;
5763 pr_emerg("BUG: workqueue lockup - pool");
5764 pr_cont_pool_info(pool);
5765 pr_cont(" stuck for %us!\n",
5766 jiffies_to_msecs(jiffies - pool_ts) / 1000);
5767 }
5768 }
5769
5770 rcu_read_unlock();
5771
5772 if (lockup_detected)
5773 show_workqueue_state();
5774
5775 wq_watchdog_reset_touched();
5776 mod_timer(&wq_watchdog_timer, jiffies + thresh);
5777 }
5778
5779 notrace void wq_watchdog_touch(int cpu)
5780 {
5781 if (cpu >= 0)
5782 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5783 else
5784 wq_watchdog_touched = jiffies;
5785 }
5786
5787 static void wq_watchdog_set_thresh(unsigned long thresh)
5788 {
5789 wq_watchdog_thresh = 0;
5790 del_timer_sync(&wq_watchdog_timer);
5791
5792 if (thresh) {
5793 wq_watchdog_thresh = thresh;
5794 wq_watchdog_reset_touched();
5795 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5796 }
5797 }
5798
5799 static int wq_watchdog_param_set_thresh(const char *val,
5800 const struct kernel_param *kp)
5801 {
5802 unsigned long thresh;
5803 int ret;
5804
5805 ret = kstrtoul(val, 0, &thresh);
5806 if (ret)
5807 return ret;
5808
5809 if (system_wq)
5810 wq_watchdog_set_thresh(thresh);
5811 else
5812 wq_watchdog_thresh = thresh;
5813
5814 return 0;
5815 }
5816
5817 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5818 .set = wq_watchdog_param_set_thresh,
5819 .get = param_get_ulong,
5820 };
5821
5822 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5823 0644);
5824
5825 static void wq_watchdog_init(void)
5826 {
5827 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
5828 wq_watchdog_set_thresh(wq_watchdog_thresh);
5829 }
5830
5831 #else /* CONFIG_WQ_WATCHDOG */
5832
5833 static inline void wq_watchdog_init(void) { }
5834
5835 #endif /* CONFIG_WQ_WATCHDOG */
5836
5837 static void __init wq_numa_init(void)
5838 {
5839 cpumask_var_t *tbl;
5840 int node, cpu;
5841
5842 if (num_possible_nodes() <= 1)
5843 return;
5844
5845 if (wq_disable_numa) {
5846 pr_info("workqueue: NUMA affinity support disabled\n");
5847 return;
5848 }
5849
5850 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
5851 BUG_ON(!wq_update_unbound_numa_attrs_buf);
5852
5853 /*
5854 * We want masks of possible CPUs of each node which isn't readily
5855 * available. Build one from cpu_to_node() which should have been
5856 * fully initialized by now.
5857 */
5858 tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
5859 BUG_ON(!tbl);
5860
5861 for_each_node(node)
5862 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5863 node_online(node) ? node : NUMA_NO_NODE));
5864
5865 for_each_possible_cpu(cpu) {
5866 node = cpu_to_node(cpu);
5867 if (WARN_ON(node == NUMA_NO_NODE)) {
5868 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5869 /* happens iff arch is bonkers, let's just proceed */
5870 return;
5871 }
5872 cpumask_set_cpu(cpu, tbl[node]);
5873 }
5874
5875 wq_numa_possible_cpumask = tbl;
5876 wq_numa_enabled = true;
5877 }
5878
5879 /**
5880 * workqueue_init_early - early init for workqueue subsystem
5881 *
5882 * This is the first half of two-staged workqueue subsystem initialization
5883 * and invoked as soon as the bare basics - memory allocation, cpumasks and
5884 * idr are up. It sets up all the data structures and system workqueues
5885 * and allows early boot code to create workqueues and queue/cancel work
5886 * items. Actual work item execution starts only after kthreads can be
5887 * created and scheduled right before early initcalls.
5888 */
5889 int __init workqueue_init_early(void)
5890 {
5891 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5892 int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
5893 int i, cpu;
5894
5895 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5896
5897 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5898 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
5899
5900 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5901
5902 /* initialize CPU pools */
5903 for_each_possible_cpu(cpu) {
5904 struct worker_pool *pool;
5905
5906 i = 0;
5907 for_each_cpu_worker_pool(pool, cpu) {
5908 BUG_ON(init_worker_pool(pool));
5909 pool->cpu = cpu;
5910 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5911 pool->attrs->nice = std_nice[i++];
5912 pool->node = cpu_to_node(cpu);
5913
5914 /* alloc pool ID */
5915 mutex_lock(&wq_pool_mutex);
5916 BUG_ON(worker_pool_assign_id(pool));
5917 mutex_unlock(&wq_pool_mutex);
5918 }
5919 }
5920
5921 /* create default unbound and ordered wq attrs */
5922 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5923 struct workqueue_attrs *attrs;
5924
5925 BUG_ON(!(attrs = alloc_workqueue_attrs()));
5926 attrs->nice = std_nice[i];
5927 unbound_std_wq_attrs[i] = attrs;
5928
5929 /*
5930 * An ordered wq should have only one pwq as ordering is
5931 * guaranteed by max_active which is enforced by pwqs.
5932 * Turn off NUMA so that dfl_pwq is used for all nodes.
5933 */
5934 BUG_ON(!(attrs = alloc_workqueue_attrs()));
5935 attrs->nice = std_nice[i];
5936 attrs->no_numa = true;
5937 ordered_wq_attrs[i] = attrs;
5938 }
5939
5940 system_wq = alloc_workqueue("events", 0, 0);
5941 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5942 system_long_wq = alloc_workqueue("events_long", 0, 0);
5943 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5944 WQ_UNBOUND_MAX_ACTIVE);
5945 system_freezable_wq = alloc_workqueue("events_freezable",
5946 WQ_FREEZABLE, 0);
5947 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5948 WQ_POWER_EFFICIENT, 0);
5949 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5950 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5951 0);
5952 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5953 !system_unbound_wq || !system_freezable_wq ||
5954 !system_power_efficient_wq ||
5955 !system_freezable_power_efficient_wq);
5956
5957 return 0;
5958 }
5959
5960 /**
5961 * workqueue_init - bring workqueue subsystem fully online
5962 *
5963 * This is the latter half of two-staged workqueue subsystem initialization
5964 * and invoked as soon as kthreads can be created and scheduled.
5965 * Workqueues have been created and work items queued on them, but there
5966 * are no kworkers executing the work items yet. Populate the worker pools
5967 * with the initial workers and enable future kworker creations.
5968 */
5969 int __init workqueue_init(void)
5970 {
5971 struct workqueue_struct *wq;
5972 struct worker_pool *pool;
5973 int cpu, bkt;
5974
5975 /*
5976 * It'd be simpler to initialize NUMA in workqueue_init_early() but
5977 * CPU to node mapping may not be available that early on some
5978 * archs such as power and arm64. As per-cpu pools created
5979 * previously could be missing node hint and unbound pools NUMA
5980 * affinity, fix them up.
5981 *
5982 * Also, while iterating workqueues, create rescuers if requested.
5983 */
5984 wq_numa_init();
5985
5986 mutex_lock(&wq_pool_mutex);
5987
5988 for_each_possible_cpu(cpu) {
5989 for_each_cpu_worker_pool(pool, cpu) {
5990 pool->node = cpu_to_node(cpu);
5991 }
5992 }
5993
5994 list_for_each_entry(wq, &workqueues, list) {
5995 wq_update_unbound_numa(wq, smp_processor_id(), true);
5996 WARN(init_rescuer(wq),
5997 "workqueue: failed to create early rescuer for %s",
5998 wq->name);
5999 }
6000
6001 mutex_unlock(&wq_pool_mutex);
6002
6003 /* create the initial workers */
6004 for_each_online_cpu(cpu) {
6005 for_each_cpu_worker_pool(pool, cpu) {
6006 pool->flags &= ~POOL_DISASSOCIATED;
6007 BUG_ON(!create_worker(pool));
6008 }
6009 }
6010
6011 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6012 BUG_ON(!create_worker(pool));
6013
6014 wq_online = true;
6015 wq_watchdog_init();
6016
6017 return 0;
6018 }