]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/io-wq.c
io-wq: remove worker to owner tw dependency
[mirror_ubuntu-jammy-kernel.git] / fs / io-wq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Basic worker thread pool for io_uring
4 *
5 * Copyright (C) 2019 Jens Axboe
6 *
7 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/tracehook.h>
17 #include <uapi/linux/io_uring.h>
18
19 #include "io-wq.h"
20
21 #define WORKER_IDLE_TIMEOUT (5 * HZ)
22
23 enum {
24 IO_WORKER_F_UP = 1, /* up and active */
25 IO_WORKER_F_RUNNING = 2, /* account as running */
26 IO_WORKER_F_FREE = 4, /* worker on free list */
27 IO_WORKER_F_BOUND = 8, /* is doing bounded work */
28 };
29
30 enum {
31 IO_WQ_BIT_EXIT = 0, /* wq exiting */
32 };
33
34 enum {
35 IO_ACCT_STALLED_BIT = 0, /* stalled on hash */
36 };
37
38 /*
39 * One for each thread in a wqe pool
40 */
41 struct io_worker {
42 refcount_t ref;
43 unsigned flags;
44 struct hlist_nulls_node nulls_node;
45 struct list_head all_list;
46 struct task_struct *task;
47 struct io_wqe *wqe;
48
49 struct io_wq_work *cur_work;
50 spinlock_t lock;
51
52 struct completion ref_done;
53
54 unsigned long create_state;
55 struct callback_head create_work;
56 int create_index;
57
58 union {
59 struct rcu_head rcu;
60 struct work_struct work;
61 };
62 };
63
64 #if BITS_PER_LONG == 64
65 #define IO_WQ_HASH_ORDER 6
66 #else
67 #define IO_WQ_HASH_ORDER 5
68 #endif
69
70 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
71
72 struct io_wqe_acct {
73 unsigned nr_workers;
74 unsigned max_workers;
75 int index;
76 atomic_t nr_running;
77 struct io_wq_work_list work_list;
78 unsigned long flags;
79 };
80
81 enum {
82 IO_WQ_ACCT_BOUND,
83 IO_WQ_ACCT_UNBOUND,
84 IO_WQ_ACCT_NR,
85 };
86
87 /*
88 * Per-node worker thread pool
89 */
90 struct io_wqe {
91 raw_spinlock_t lock;
92 struct io_wqe_acct acct[2];
93
94 int node;
95
96 struct hlist_nulls_head free_list;
97 struct list_head all_list;
98
99 struct wait_queue_entry wait;
100
101 struct io_wq *wq;
102 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
103
104 cpumask_var_t cpu_mask;
105 };
106
107 /*
108 * Per io_wq state
109 */
110 struct io_wq {
111 unsigned long state;
112
113 free_work_fn *free_work;
114 io_wq_work_fn *do_work;
115
116 struct io_wq_hash *hash;
117
118 atomic_t worker_refs;
119 struct completion worker_done;
120
121 struct hlist_node cpuhp_node;
122
123 struct task_struct *task;
124
125 struct io_wqe *wqes[];
126 };
127
128 static enum cpuhp_state io_wq_online;
129
130 struct io_cb_cancel_data {
131 work_cancel_fn *fn;
132 void *data;
133 int nr_running;
134 int nr_pending;
135 bool cancel_all;
136 };
137
138 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
139 static void io_wqe_dec_running(struct io_worker *worker);
140 static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
141 struct io_wqe_acct *acct,
142 struct io_cb_cancel_data *match);
143 static void create_worker_cb(struct callback_head *cb);
144
145 static bool io_worker_get(struct io_worker *worker)
146 {
147 return refcount_inc_not_zero(&worker->ref);
148 }
149
150 static void io_worker_release(struct io_worker *worker)
151 {
152 if (refcount_dec_and_test(&worker->ref))
153 complete(&worker->ref_done);
154 }
155
156 static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
157 {
158 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
159 }
160
161 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
162 struct io_wq_work *work)
163 {
164 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
165 }
166
167 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
168 {
169 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
170 }
171
172 static void io_worker_ref_put(struct io_wq *wq)
173 {
174 if (atomic_dec_and_test(&wq->worker_refs))
175 complete(&wq->worker_done);
176 }
177
178 static void io_worker_cancel_cb(struct io_worker *worker)
179 {
180 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
181 struct io_wqe *wqe = worker->wqe;
182 struct io_wq *wq = wqe->wq;
183
184 atomic_dec(&acct->nr_running);
185 raw_spin_lock(&worker->wqe->lock);
186 acct->nr_workers--;
187 raw_spin_unlock(&worker->wqe->lock);
188 io_worker_ref_put(wq);
189 clear_bit_unlock(0, &worker->create_state);
190 io_worker_release(worker);
191 }
192
193 static bool io_task_worker_match(struct callback_head *cb, void *data)
194 {
195 struct io_worker *worker;
196
197 if (cb->func != create_worker_cb)
198 return false;
199 worker = container_of(cb, struct io_worker, create_work);
200 return worker == data;
201 }
202
203 static void io_worker_exit(struct io_worker *worker)
204 {
205 struct io_wqe *wqe = worker->wqe;
206 struct io_wq *wq = wqe->wq;
207
208 while (1) {
209 struct callback_head *cb = task_work_cancel_match(wq->task,
210 io_task_worker_match, worker);
211
212 if (!cb)
213 break;
214 io_worker_cancel_cb(worker);
215 }
216
217 if (refcount_dec_and_test(&worker->ref))
218 complete(&worker->ref_done);
219 wait_for_completion(&worker->ref_done);
220
221 raw_spin_lock(&wqe->lock);
222 if (worker->flags & IO_WORKER_F_FREE)
223 hlist_nulls_del_rcu(&worker->nulls_node);
224 list_del_rcu(&worker->all_list);
225 preempt_disable();
226 io_wqe_dec_running(worker);
227 worker->flags = 0;
228 current->flags &= ~PF_IO_WORKER;
229 preempt_enable();
230 raw_spin_unlock(&wqe->lock);
231
232 kfree_rcu(worker, rcu);
233 io_worker_ref_put(wqe->wq);
234 do_exit(0);
235 }
236
237 static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
238 {
239 if (!wq_list_empty(&acct->work_list) &&
240 !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
241 return true;
242 return false;
243 }
244
245 /*
246 * Check head of free list for an available worker. If one isn't available,
247 * caller must create one.
248 */
249 static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
250 struct io_wqe_acct *acct)
251 __must_hold(RCU)
252 {
253 struct hlist_nulls_node *n;
254 struct io_worker *worker;
255
256 /*
257 * Iterate free_list and see if we can find an idle worker to
258 * activate. If a given worker is on the free_list but in the process
259 * of exiting, keep trying.
260 */
261 hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
262 if (!io_worker_get(worker))
263 continue;
264 if (io_wqe_get_acct(worker) != acct) {
265 io_worker_release(worker);
266 continue;
267 }
268 if (wake_up_process(worker->task)) {
269 io_worker_release(worker);
270 return true;
271 }
272 io_worker_release(worker);
273 }
274
275 return false;
276 }
277
278 /*
279 * We need a worker. If we find a free one, we're good. If not, and we're
280 * below the max number of workers, create one.
281 */
282 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
283 {
284 /*
285 * Most likely an attempt to queue unbounded work on an io_wq that
286 * wasn't setup with any unbounded workers.
287 */
288 if (unlikely(!acct->max_workers))
289 pr_warn_once("io-wq is not configured for unbound workers");
290
291 raw_spin_lock(&wqe->lock);
292 if (acct->nr_workers >= acct->max_workers) {
293 raw_spin_unlock(&wqe->lock);
294 return true;
295 }
296 acct->nr_workers++;
297 raw_spin_unlock(&wqe->lock);
298 atomic_inc(&acct->nr_running);
299 atomic_inc(&wqe->wq->worker_refs);
300 return create_io_worker(wqe->wq, wqe, acct->index);
301 }
302
303 static void io_wqe_inc_running(struct io_worker *worker)
304 {
305 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
306
307 atomic_inc(&acct->nr_running);
308 }
309
310 static void create_worker_cb(struct callback_head *cb)
311 {
312 struct io_worker *worker;
313 struct io_wq *wq;
314 struct io_wqe *wqe;
315 struct io_wqe_acct *acct;
316 bool do_create = false;
317
318 worker = container_of(cb, struct io_worker, create_work);
319 wqe = worker->wqe;
320 wq = wqe->wq;
321 acct = &wqe->acct[worker->create_index];
322 raw_spin_lock(&wqe->lock);
323 if (acct->nr_workers < acct->max_workers) {
324 acct->nr_workers++;
325 do_create = true;
326 }
327 raw_spin_unlock(&wqe->lock);
328 if (do_create) {
329 create_io_worker(wq, wqe, worker->create_index);
330 } else {
331 atomic_dec(&acct->nr_running);
332 io_worker_ref_put(wq);
333 }
334 clear_bit_unlock(0, &worker->create_state);
335 io_worker_release(worker);
336 }
337
338 static bool io_queue_worker_create(struct io_worker *worker,
339 struct io_wqe_acct *acct,
340 task_work_func_t func)
341 {
342 struct io_wqe *wqe = worker->wqe;
343 struct io_wq *wq = wqe->wq;
344
345 /* raced with exit, just ignore create call */
346 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
347 goto fail;
348 if (!io_worker_get(worker))
349 goto fail;
350 /*
351 * create_state manages ownership of create_work/index. We should
352 * only need one entry per worker, as the worker going to sleep
353 * will trigger the condition, and waking will clear it once it
354 * runs the task_work.
355 */
356 if (test_bit(0, &worker->create_state) ||
357 test_and_set_bit_lock(0, &worker->create_state))
358 goto fail_release;
359
360 init_task_work(&worker->create_work, func);
361 worker->create_index = acct->index;
362 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL))
363 return true;
364 clear_bit_unlock(0, &worker->create_state);
365 fail_release:
366 io_worker_release(worker);
367 fail:
368 atomic_dec(&acct->nr_running);
369 io_worker_ref_put(wq);
370 return false;
371 }
372
373 static void io_wqe_dec_running(struct io_worker *worker)
374 __must_hold(wqe->lock)
375 {
376 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
377 struct io_wqe *wqe = worker->wqe;
378
379 if (!(worker->flags & IO_WORKER_F_UP))
380 return;
381
382 if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) {
383 atomic_inc(&acct->nr_running);
384 atomic_inc(&wqe->wq->worker_refs);
385 io_queue_worker_create(worker, acct, create_worker_cb);
386 }
387 }
388
389 /*
390 * Worker will start processing some work. Move it to the busy list, if
391 * it's currently on the freelist
392 */
393 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
394 struct io_wq_work *work)
395 __must_hold(wqe->lock)
396 {
397 if (worker->flags & IO_WORKER_F_FREE) {
398 worker->flags &= ~IO_WORKER_F_FREE;
399 hlist_nulls_del_init_rcu(&worker->nulls_node);
400 }
401 }
402
403 /*
404 * No work, worker going to sleep. Move to freelist, and unuse mm if we
405 * have one attached. Dropping the mm may potentially sleep, so we drop
406 * the lock in that case and return success. Since the caller has to
407 * retry the loop in that case (we changed task state), we don't regrab
408 * the lock if we return success.
409 */
410 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
411 __must_hold(wqe->lock)
412 {
413 if (!(worker->flags & IO_WORKER_F_FREE)) {
414 worker->flags |= IO_WORKER_F_FREE;
415 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
416 }
417 }
418
419 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
420 {
421 return work->flags >> IO_WQ_HASH_SHIFT;
422 }
423
424 static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
425 {
426 struct io_wq *wq = wqe->wq;
427
428 spin_lock_irq(&wq->hash->wait.lock);
429 if (list_empty(&wqe->wait.entry)) {
430 __add_wait_queue(&wq->hash->wait, &wqe->wait);
431 if (!test_bit(hash, &wq->hash->map)) {
432 __set_current_state(TASK_RUNNING);
433 list_del_init(&wqe->wait.entry);
434 }
435 }
436 spin_unlock_irq(&wq->hash->wait.lock);
437 }
438
439 static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
440 struct io_worker *worker)
441 __must_hold(wqe->lock)
442 {
443 struct io_wq_work_node *node, *prev;
444 struct io_wq_work *work, *tail;
445 unsigned int stall_hash = -1U;
446 struct io_wqe *wqe = worker->wqe;
447
448 wq_list_for_each(node, prev, &acct->work_list) {
449 unsigned int hash;
450
451 work = container_of(node, struct io_wq_work, list);
452
453 /* not hashed, can run anytime */
454 if (!io_wq_is_hashed(work)) {
455 wq_list_del(&acct->work_list, node, prev);
456 return work;
457 }
458
459 hash = io_get_work_hash(work);
460 /* all items with this hash lie in [work, tail] */
461 tail = wqe->hash_tail[hash];
462
463 /* hashed, can run if not already running */
464 if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
465 wqe->hash_tail[hash] = NULL;
466 wq_list_cut(&acct->work_list, &tail->list, prev);
467 return work;
468 }
469 if (stall_hash == -1U)
470 stall_hash = hash;
471 /* fast forward to a next hash, for-each will fix up @prev */
472 node = &tail->list;
473 }
474
475 if (stall_hash != -1U) {
476 /*
477 * Set this before dropping the lock to avoid racing with new
478 * work being added and clearing the stalled bit.
479 */
480 set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
481 raw_spin_unlock(&wqe->lock);
482 io_wait_on_hash(wqe, stall_hash);
483 raw_spin_lock(&wqe->lock);
484 }
485
486 return NULL;
487 }
488
489 static bool io_flush_signals(void)
490 {
491 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
492 __set_current_state(TASK_RUNNING);
493 tracehook_notify_signal();
494 return true;
495 }
496 return false;
497 }
498
499 static void io_assign_current_work(struct io_worker *worker,
500 struct io_wq_work *work)
501 {
502 if (work) {
503 io_flush_signals();
504 cond_resched();
505 }
506
507 spin_lock(&worker->lock);
508 worker->cur_work = work;
509 spin_unlock(&worker->lock);
510 }
511
512 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
513
514 static void io_worker_handle_work(struct io_worker *worker)
515 __releases(wqe->lock)
516 {
517 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
518 struct io_wqe *wqe = worker->wqe;
519 struct io_wq *wq = wqe->wq;
520 bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
521
522 do {
523 struct io_wq_work *work;
524 get_next:
525 /*
526 * If we got some work, mark us as busy. If we didn't, but
527 * the list isn't empty, it means we stalled on hashed work.
528 * Mark us stalled so we don't keep looking for work when we
529 * can't make progress, any work completion or insertion will
530 * clear the stalled flag.
531 */
532 work = io_get_next_work(acct, worker);
533 if (work)
534 __io_worker_busy(wqe, worker, work);
535
536 raw_spin_unlock(&wqe->lock);
537 if (!work)
538 break;
539 io_assign_current_work(worker, work);
540 __set_current_state(TASK_RUNNING);
541
542 /* handle a whole dependent link */
543 do {
544 struct io_wq_work *next_hashed, *linked;
545 unsigned int hash = io_get_work_hash(work);
546
547 next_hashed = wq_next_work(work);
548
549 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
550 work->flags |= IO_WQ_WORK_CANCEL;
551 wq->do_work(work);
552 io_assign_current_work(worker, NULL);
553
554 linked = wq->free_work(work);
555 work = next_hashed;
556 if (!work && linked && !io_wq_is_hashed(linked)) {
557 work = linked;
558 linked = NULL;
559 }
560 io_assign_current_work(worker, work);
561 if (linked)
562 io_wqe_enqueue(wqe, linked);
563
564 if (hash != -1U && !next_hashed) {
565 clear_bit(hash, &wq->hash->map);
566 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
567 if (wq_has_sleeper(&wq->hash->wait))
568 wake_up(&wq->hash->wait);
569 raw_spin_lock(&wqe->lock);
570 /* skip unnecessary unlock-lock wqe->lock */
571 if (!work)
572 goto get_next;
573 raw_spin_unlock(&wqe->lock);
574 }
575 } while (work);
576
577 raw_spin_lock(&wqe->lock);
578 } while (1);
579 }
580
581 static int io_wqe_worker(void *data)
582 {
583 struct io_worker *worker = data;
584 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
585 struct io_wqe *wqe = worker->wqe;
586 struct io_wq *wq = wqe->wq;
587 bool last_timeout = false;
588 char buf[TASK_COMM_LEN];
589
590 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
591
592 snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
593 set_task_comm(current, buf);
594
595 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
596 long ret;
597
598 set_current_state(TASK_INTERRUPTIBLE);
599 loop:
600 raw_spin_lock(&wqe->lock);
601 if (io_acct_run_queue(acct)) {
602 io_worker_handle_work(worker);
603 goto loop;
604 }
605 /* timed out, exit unless we're the last worker */
606 if (last_timeout && acct->nr_workers > 1) {
607 acct->nr_workers--;
608 raw_spin_unlock(&wqe->lock);
609 __set_current_state(TASK_RUNNING);
610 break;
611 }
612 last_timeout = false;
613 __io_worker_idle(wqe, worker);
614 raw_spin_unlock(&wqe->lock);
615 if (io_flush_signals())
616 continue;
617 ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
618 if (signal_pending(current)) {
619 struct ksignal ksig;
620
621 if (!get_signal(&ksig))
622 continue;
623 break;
624 }
625 last_timeout = !ret;
626 }
627
628 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
629 raw_spin_lock(&wqe->lock);
630 io_worker_handle_work(worker);
631 }
632
633 io_worker_exit(worker);
634 return 0;
635 }
636
637 /*
638 * Called when a worker is scheduled in. Mark us as currently running.
639 */
640 void io_wq_worker_running(struct task_struct *tsk)
641 {
642 struct io_worker *worker = tsk->pf_io_worker;
643
644 if (!worker)
645 return;
646 if (!(worker->flags & IO_WORKER_F_UP))
647 return;
648 if (worker->flags & IO_WORKER_F_RUNNING)
649 return;
650 worker->flags |= IO_WORKER_F_RUNNING;
651 io_wqe_inc_running(worker);
652 }
653
654 /*
655 * Called when worker is going to sleep. If there are no workers currently
656 * running and we have work pending, wake up a free one or create a new one.
657 */
658 void io_wq_worker_sleeping(struct task_struct *tsk)
659 {
660 struct io_worker *worker = tsk->pf_io_worker;
661
662 if (!worker)
663 return;
664 if (!(worker->flags & IO_WORKER_F_UP))
665 return;
666 if (!(worker->flags & IO_WORKER_F_RUNNING))
667 return;
668
669 worker->flags &= ~IO_WORKER_F_RUNNING;
670
671 raw_spin_lock(&worker->wqe->lock);
672 io_wqe_dec_running(worker);
673 raw_spin_unlock(&worker->wqe->lock);
674 }
675
676 static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
677 struct task_struct *tsk)
678 {
679 tsk->pf_io_worker = worker;
680 worker->task = tsk;
681 set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
682 tsk->flags |= PF_NO_SETAFFINITY;
683
684 raw_spin_lock(&wqe->lock);
685 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
686 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
687 worker->flags |= IO_WORKER_F_FREE;
688 raw_spin_unlock(&wqe->lock);
689 wake_up_new_task(tsk);
690 }
691
692 static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
693 {
694 return true;
695 }
696
697 static inline bool io_should_retry_thread(long err)
698 {
699 switch (err) {
700 case -EAGAIN:
701 case -ERESTARTSYS:
702 case -ERESTARTNOINTR:
703 case -ERESTARTNOHAND:
704 return true;
705 default:
706 return false;
707 }
708 }
709
710 static void create_worker_cont(struct callback_head *cb)
711 {
712 struct io_worker *worker;
713 struct task_struct *tsk;
714 struct io_wqe *wqe;
715
716 worker = container_of(cb, struct io_worker, create_work);
717 clear_bit_unlock(0, &worker->create_state);
718 wqe = worker->wqe;
719 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
720 if (!IS_ERR(tsk)) {
721 io_init_new_worker(wqe, worker, tsk);
722 io_worker_release(worker);
723 return;
724 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
725 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
726
727 atomic_dec(&acct->nr_running);
728 raw_spin_lock(&wqe->lock);
729 acct->nr_workers--;
730 if (!acct->nr_workers) {
731 struct io_cb_cancel_data match = {
732 .fn = io_wq_work_match_all,
733 .cancel_all = true,
734 };
735
736 while (io_acct_cancel_pending_work(wqe, acct, &match))
737 raw_spin_lock(&wqe->lock);
738 }
739 raw_spin_unlock(&wqe->lock);
740 io_worker_ref_put(wqe->wq);
741 kfree(worker);
742 return;
743 }
744
745 /* re-create attempts grab a new worker ref, drop the existing one */
746 io_worker_release(worker);
747 schedule_work(&worker->work);
748 }
749
750 static void io_workqueue_create(struct work_struct *work)
751 {
752 struct io_worker *worker = container_of(work, struct io_worker, work);
753 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
754
755 if (!io_queue_worker_create(worker, acct, create_worker_cont)) {
756 clear_bit_unlock(0, &worker->create_state);
757 io_worker_release(worker);
758 kfree(worker);
759 }
760 }
761
762 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
763 {
764 struct io_wqe_acct *acct = &wqe->acct[index];
765 struct io_worker *worker;
766 struct task_struct *tsk;
767
768 __set_current_state(TASK_RUNNING);
769
770 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
771 if (!worker) {
772 fail:
773 atomic_dec(&acct->nr_running);
774 raw_spin_lock(&wqe->lock);
775 acct->nr_workers--;
776 raw_spin_unlock(&wqe->lock);
777 io_worker_ref_put(wq);
778 return false;
779 }
780
781 refcount_set(&worker->ref, 1);
782 worker->wqe = wqe;
783 spin_lock_init(&worker->lock);
784 init_completion(&worker->ref_done);
785
786 if (index == IO_WQ_ACCT_BOUND)
787 worker->flags |= IO_WORKER_F_BOUND;
788
789 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
790 if (!IS_ERR(tsk)) {
791 io_init_new_worker(wqe, worker, tsk);
792 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
793 kfree(worker);
794 goto fail;
795 } else {
796 INIT_WORK(&worker->work, io_workqueue_create);
797 schedule_work(&worker->work);
798 }
799
800 return true;
801 }
802
803 /*
804 * Iterate the passed in list and call the specific function for each
805 * worker that isn't exiting
806 */
807 static bool io_wq_for_each_worker(struct io_wqe *wqe,
808 bool (*func)(struct io_worker *, void *),
809 void *data)
810 {
811 struct io_worker *worker;
812 bool ret = false;
813
814 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
815 if (io_worker_get(worker)) {
816 /* no task if node is/was offline */
817 if (worker->task)
818 ret = func(worker, data);
819 io_worker_release(worker);
820 if (ret)
821 break;
822 }
823 }
824
825 return ret;
826 }
827
828 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
829 {
830 set_notify_signal(worker->task);
831 wake_up_process(worker->task);
832 return false;
833 }
834
835 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
836 {
837 struct io_wq *wq = wqe->wq;
838
839 do {
840 work->flags |= IO_WQ_WORK_CANCEL;
841 wq->do_work(work);
842 work = wq->free_work(work);
843 } while (work);
844 }
845
846 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
847 {
848 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
849 unsigned int hash;
850 struct io_wq_work *tail;
851
852 if (!io_wq_is_hashed(work)) {
853 append:
854 wq_list_add_tail(&work->list, &acct->work_list);
855 return;
856 }
857
858 hash = io_get_work_hash(work);
859 tail = wqe->hash_tail[hash];
860 wqe->hash_tail[hash] = work;
861 if (!tail)
862 goto append;
863
864 wq_list_add_after(&work->list, &tail->list, &acct->work_list);
865 }
866
867 static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
868 {
869 return work == data;
870 }
871
872 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
873 {
874 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
875 unsigned work_flags = work->flags;
876 bool do_create;
877
878 /*
879 * If io-wq is exiting for this task, or if the request has explicitly
880 * been marked as one that should not get executed, cancel it here.
881 */
882 if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
883 (work->flags & IO_WQ_WORK_CANCEL)) {
884 io_run_cancel(work, wqe);
885 return;
886 }
887
888 raw_spin_lock(&wqe->lock);
889 io_wqe_insert_work(wqe, work);
890 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
891
892 rcu_read_lock();
893 do_create = !io_wqe_activate_free_worker(wqe, acct);
894 rcu_read_unlock();
895
896 raw_spin_unlock(&wqe->lock);
897
898 if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
899 !atomic_read(&acct->nr_running))) {
900 bool did_create;
901
902 did_create = io_wqe_create_worker(wqe, acct);
903 if (likely(did_create))
904 return;
905
906 raw_spin_lock(&wqe->lock);
907 /* fatal condition, failed to create the first worker */
908 if (!acct->nr_workers) {
909 struct io_cb_cancel_data match = {
910 .fn = io_wq_work_match_item,
911 .data = work,
912 .cancel_all = false,
913 };
914
915 if (io_acct_cancel_pending_work(wqe, acct, &match))
916 raw_spin_lock(&wqe->lock);
917 }
918 raw_spin_unlock(&wqe->lock);
919 }
920 }
921
922 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
923 {
924 struct io_wqe *wqe = wq->wqes[numa_node_id()];
925
926 io_wqe_enqueue(wqe, work);
927 }
928
929 /*
930 * Work items that hash to the same value will not be done in parallel.
931 * Used to limit concurrent writes, generally hashed by inode.
932 */
933 void io_wq_hash_work(struct io_wq_work *work, void *val)
934 {
935 unsigned int bit;
936
937 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
938 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
939 }
940
941 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
942 {
943 struct io_cb_cancel_data *match = data;
944
945 /*
946 * Hold the lock to avoid ->cur_work going out of scope, caller
947 * may dereference the passed in work.
948 */
949 spin_lock(&worker->lock);
950 if (worker->cur_work &&
951 match->fn(worker->cur_work, match->data)) {
952 set_notify_signal(worker->task);
953 match->nr_running++;
954 }
955 spin_unlock(&worker->lock);
956
957 return match->nr_running && !match->cancel_all;
958 }
959
960 static inline void io_wqe_remove_pending(struct io_wqe *wqe,
961 struct io_wq_work *work,
962 struct io_wq_work_node *prev)
963 {
964 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
965 unsigned int hash = io_get_work_hash(work);
966 struct io_wq_work *prev_work = NULL;
967
968 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
969 if (prev)
970 prev_work = container_of(prev, struct io_wq_work, list);
971 if (prev_work && io_get_work_hash(prev_work) == hash)
972 wqe->hash_tail[hash] = prev_work;
973 else
974 wqe->hash_tail[hash] = NULL;
975 }
976 wq_list_del(&acct->work_list, &work->list, prev);
977 }
978
979 static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
980 struct io_wqe_acct *acct,
981 struct io_cb_cancel_data *match)
982 __releases(wqe->lock)
983 {
984 struct io_wq_work_node *node, *prev;
985 struct io_wq_work *work;
986
987 wq_list_for_each(node, prev, &acct->work_list) {
988 work = container_of(node, struct io_wq_work, list);
989 if (!match->fn(work, match->data))
990 continue;
991 io_wqe_remove_pending(wqe, work, prev);
992 raw_spin_unlock(&wqe->lock);
993 io_run_cancel(work, wqe);
994 match->nr_pending++;
995 /* not safe to continue after unlock */
996 return true;
997 }
998
999 return false;
1000 }
1001
1002 static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
1003 struct io_cb_cancel_data *match)
1004 {
1005 int i;
1006 retry:
1007 raw_spin_lock(&wqe->lock);
1008 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1009 struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
1010
1011 if (io_acct_cancel_pending_work(wqe, acct, match)) {
1012 if (match->cancel_all)
1013 goto retry;
1014 return;
1015 }
1016 }
1017 raw_spin_unlock(&wqe->lock);
1018 }
1019
1020 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
1021 struct io_cb_cancel_data *match)
1022 {
1023 rcu_read_lock();
1024 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
1025 rcu_read_unlock();
1026 }
1027
1028 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1029 void *data, bool cancel_all)
1030 {
1031 struct io_cb_cancel_data match = {
1032 .fn = cancel,
1033 .data = data,
1034 .cancel_all = cancel_all,
1035 };
1036 int node;
1037
1038 /*
1039 * First check pending list, if we're lucky we can just remove it
1040 * from there. CANCEL_OK means that the work is returned as-new,
1041 * no completion will be posted for it.
1042 */
1043 for_each_node(node) {
1044 struct io_wqe *wqe = wq->wqes[node];
1045
1046 io_wqe_cancel_pending_work(wqe, &match);
1047 if (match.nr_pending && !match.cancel_all)
1048 return IO_WQ_CANCEL_OK;
1049 }
1050
1051 /*
1052 * Now check if a free (going busy) or busy worker has the work
1053 * currently running. If we find it there, we'll return CANCEL_RUNNING
1054 * as an indication that we attempt to signal cancellation. The
1055 * completion will run normally in this case.
1056 */
1057 for_each_node(node) {
1058 struct io_wqe *wqe = wq->wqes[node];
1059
1060 io_wqe_cancel_running_work(wqe, &match);
1061 if (match.nr_running && !match.cancel_all)
1062 return IO_WQ_CANCEL_RUNNING;
1063 }
1064
1065 if (match.nr_running)
1066 return IO_WQ_CANCEL_RUNNING;
1067 if (match.nr_pending)
1068 return IO_WQ_CANCEL_OK;
1069 return IO_WQ_CANCEL_NOTFOUND;
1070 }
1071
1072 static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1073 int sync, void *key)
1074 {
1075 struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
1076 int i;
1077
1078 list_del_init(&wait->entry);
1079
1080 rcu_read_lock();
1081 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1082 struct io_wqe_acct *acct = &wqe->acct[i];
1083
1084 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1085 io_wqe_activate_free_worker(wqe, acct);
1086 }
1087 rcu_read_unlock();
1088 return 1;
1089 }
1090
1091 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1092 {
1093 int ret, node, i;
1094 struct io_wq *wq;
1095
1096 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1097 return ERR_PTR(-EINVAL);
1098 if (WARN_ON_ONCE(!bounded))
1099 return ERR_PTR(-EINVAL);
1100
1101 wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
1102 if (!wq)
1103 return ERR_PTR(-ENOMEM);
1104 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1105 if (ret)
1106 goto err_wq;
1107
1108 refcount_inc(&data->hash->refs);
1109 wq->hash = data->hash;
1110 wq->free_work = data->free_work;
1111 wq->do_work = data->do_work;
1112
1113 ret = -ENOMEM;
1114 for_each_node(node) {
1115 struct io_wqe *wqe;
1116 int alloc_node = node;
1117
1118 if (!node_online(alloc_node))
1119 alloc_node = NUMA_NO_NODE;
1120 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1121 if (!wqe)
1122 goto err;
1123 if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
1124 goto err;
1125 cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
1126 wq->wqes[node] = wqe;
1127 wqe->node = alloc_node;
1128 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1129 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1130 task_rlimit(current, RLIMIT_NPROC);
1131 INIT_LIST_HEAD(&wqe->wait.entry);
1132 wqe->wait.func = io_wqe_hash_wake;
1133 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1134 struct io_wqe_acct *acct = &wqe->acct[i];
1135
1136 acct->index = i;
1137 atomic_set(&acct->nr_running, 0);
1138 INIT_WQ_LIST(&acct->work_list);
1139 }
1140 wqe->wq = wq;
1141 raw_spin_lock_init(&wqe->lock);
1142 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1143 INIT_LIST_HEAD(&wqe->all_list);
1144 }
1145
1146 wq->task = get_task_struct(data->task);
1147 atomic_set(&wq->worker_refs, 1);
1148 init_completion(&wq->worker_done);
1149 return wq;
1150 err:
1151 io_wq_put_hash(data->hash);
1152 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1153 for_each_node(node) {
1154 if (!wq->wqes[node])
1155 continue;
1156 free_cpumask_var(wq->wqes[node]->cpu_mask);
1157 kfree(wq->wqes[node]);
1158 }
1159 err_wq:
1160 kfree(wq);
1161 return ERR_PTR(ret);
1162 }
1163
1164 static bool io_task_work_match(struct callback_head *cb, void *data)
1165 {
1166 struct io_worker *worker;
1167
1168 if (cb->func != create_worker_cb && cb->func != create_worker_cont)
1169 return false;
1170 worker = container_of(cb, struct io_worker, create_work);
1171 return worker->wqe->wq == data;
1172 }
1173
1174 void io_wq_exit_start(struct io_wq *wq)
1175 {
1176 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1177 }
1178
1179 static void io_wq_exit_workers(struct io_wq *wq)
1180 {
1181 struct callback_head *cb;
1182 int node;
1183
1184 if (!wq->task)
1185 return;
1186
1187 while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
1188 struct io_worker *worker;
1189
1190 worker = container_of(cb, struct io_worker, create_work);
1191 io_worker_cancel_cb(worker);
1192 }
1193
1194 rcu_read_lock();
1195 for_each_node(node) {
1196 struct io_wqe *wqe = wq->wqes[node];
1197
1198 io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
1199 }
1200 rcu_read_unlock();
1201 io_worker_ref_put(wq);
1202 wait_for_completion(&wq->worker_done);
1203
1204 for_each_node(node) {
1205 spin_lock_irq(&wq->hash->wait.lock);
1206 list_del_init(&wq->wqes[node]->wait.entry);
1207 spin_unlock_irq(&wq->hash->wait.lock);
1208 }
1209 put_task_struct(wq->task);
1210 wq->task = NULL;
1211 }
1212
1213 static void io_wq_destroy(struct io_wq *wq)
1214 {
1215 int node;
1216
1217 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1218
1219 for_each_node(node) {
1220 struct io_wqe *wqe = wq->wqes[node];
1221 struct io_cb_cancel_data match = {
1222 .fn = io_wq_work_match_all,
1223 .cancel_all = true,
1224 };
1225 io_wqe_cancel_pending_work(wqe, &match);
1226 free_cpumask_var(wqe->cpu_mask);
1227 kfree(wqe);
1228 }
1229 io_wq_put_hash(wq->hash);
1230 kfree(wq);
1231 }
1232
1233 void io_wq_put_and_exit(struct io_wq *wq)
1234 {
1235 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1236
1237 io_wq_exit_workers(wq);
1238 io_wq_destroy(wq);
1239 }
1240
1241 struct online_data {
1242 unsigned int cpu;
1243 bool online;
1244 };
1245
1246 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1247 {
1248 struct online_data *od = data;
1249
1250 if (od->online)
1251 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
1252 else
1253 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
1254 return false;
1255 }
1256
1257 static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
1258 {
1259 struct online_data od = {
1260 .cpu = cpu,
1261 .online = online
1262 };
1263 int i;
1264
1265 rcu_read_lock();
1266 for_each_node(i)
1267 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
1268 rcu_read_unlock();
1269 return 0;
1270 }
1271
1272 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1273 {
1274 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1275
1276 return __io_wq_cpu_online(wq, cpu, true);
1277 }
1278
1279 static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1280 {
1281 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1282
1283 return __io_wq_cpu_online(wq, cpu, false);
1284 }
1285
1286 int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
1287 {
1288 int i;
1289
1290 rcu_read_lock();
1291 for_each_node(i) {
1292 struct io_wqe *wqe = wq->wqes[i];
1293
1294 if (mask)
1295 cpumask_copy(wqe->cpu_mask, mask);
1296 else
1297 cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
1298 }
1299 rcu_read_unlock();
1300 return 0;
1301 }
1302
1303 /*
1304 * Set max number of unbounded workers, returns old value. If new_count is 0,
1305 * then just return the old value.
1306 */
1307 int io_wq_max_workers(struct io_wq *wq, int *new_count)
1308 {
1309 int i, node, prev = 0;
1310
1311 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
1312 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1313 BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
1314
1315 for (i = 0; i < 2; i++) {
1316 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1317 new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1318 }
1319
1320 rcu_read_lock();
1321 for_each_node(node) {
1322 struct io_wqe *wqe = wq->wqes[node];
1323 struct io_wqe_acct *acct;
1324
1325 raw_spin_lock(&wqe->lock);
1326 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1327 acct = &wqe->acct[i];
1328 prev = max_t(int, acct->max_workers, prev);
1329 if (new_count[i])
1330 acct->max_workers = new_count[i];
1331 new_count[i] = prev;
1332 }
1333 raw_spin_unlock(&wqe->lock);
1334 }
1335 rcu_read_unlock();
1336 return 0;
1337 }
1338
1339 static __init int io_wq_init(void)
1340 {
1341 int ret;
1342
1343 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1344 io_wq_cpu_online, io_wq_cpu_offline);
1345 if (ret < 0)
1346 return ret;
1347 io_wq_online = ret;
1348 return 0;
1349 }
1350 subsys_initcall(io_wq_init);