]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/io-wq.c
Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-jammy-kernel.git] / fs / io-wq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Basic worker thread pool for io_uring
4 *
5 * Copyright (C) 2019 Jens Axboe
6 *
7 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/mm.h>
13 #include <linux/sched/mm.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/rculist_nulls.h>
17 #include <linux/cpu.h>
18 #include <linux/tracehook.h>
19
20 #include "io-wq.h"
21
22 #define WORKER_IDLE_TIMEOUT (5 * HZ)
23
24 enum {
25 IO_WORKER_F_UP = 1, /* up and active */
26 IO_WORKER_F_RUNNING = 2, /* account as running */
27 IO_WORKER_F_FREE = 4, /* worker on free list */
28 IO_WORKER_F_FIXED = 8, /* static idle worker */
29 IO_WORKER_F_BOUND = 16, /* is doing bounded work */
30 };
31
32 enum {
33 IO_WQ_BIT_EXIT = 0, /* wq exiting */
34 };
35
36 enum {
37 IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
38 };
39
40 /*
41 * One for each thread in a wqe pool
42 */
43 struct io_worker {
44 refcount_t ref;
45 unsigned flags;
46 struct hlist_nulls_node nulls_node;
47 struct list_head all_list;
48 struct task_struct *task;
49 struct io_wqe *wqe;
50
51 struct io_wq_work *cur_work;
52 spinlock_t lock;
53
54 struct completion ref_done;
55
56 struct rcu_head rcu;
57 };
58
59 #if BITS_PER_LONG == 64
60 #define IO_WQ_HASH_ORDER 6
61 #else
62 #define IO_WQ_HASH_ORDER 5
63 #endif
64
65 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
66
67 struct io_wqe_acct {
68 unsigned nr_workers;
69 unsigned max_workers;
70 int index;
71 atomic_t nr_running;
72 };
73
74 enum {
75 IO_WQ_ACCT_BOUND,
76 IO_WQ_ACCT_UNBOUND,
77 };
78
79 /*
80 * Per-node worker thread pool
81 */
82 struct io_wqe {
83 struct {
84 raw_spinlock_t lock;
85 struct io_wq_work_list work_list;
86 unsigned flags;
87 } ____cacheline_aligned_in_smp;
88
89 int node;
90 struct io_wqe_acct acct[2];
91
92 struct hlist_nulls_head free_list;
93 struct list_head all_list;
94
95 struct wait_queue_entry wait;
96
97 struct io_wq *wq;
98 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
99 };
100
101 /*
102 * Per io_wq state
103 */
104 struct io_wq {
105 struct io_wqe **wqes;
106 unsigned long state;
107
108 free_work_fn *free_work;
109 io_wq_work_fn *do_work;
110
111 struct io_wq_hash *hash;
112
113 refcount_t refs;
114
115 atomic_t worker_refs;
116 struct completion worker_done;
117
118 struct hlist_node cpuhp_node;
119
120 struct task_struct *task;
121 };
122
123 static enum cpuhp_state io_wq_online;
124
125 struct io_cb_cancel_data {
126 work_cancel_fn *fn;
127 void *data;
128 int nr_running;
129 int nr_pending;
130 bool cancel_all;
131 };
132
133 static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
134
135 static bool io_worker_get(struct io_worker *worker)
136 {
137 return refcount_inc_not_zero(&worker->ref);
138 }
139
140 static void io_worker_release(struct io_worker *worker)
141 {
142 if (refcount_dec_and_test(&worker->ref))
143 complete(&worker->ref_done);
144 }
145
146 static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
147 {
148 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
149 }
150
151 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
152 struct io_wq_work *work)
153 {
154 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
155 }
156
157 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
158 {
159 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
160 }
161
162 static void io_worker_ref_put(struct io_wq *wq)
163 {
164 if (atomic_dec_and_test(&wq->worker_refs))
165 complete(&wq->worker_done);
166 }
167
168 static void io_worker_exit(struct io_worker *worker)
169 {
170 struct io_wqe *wqe = worker->wqe;
171 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
172 unsigned flags;
173
174 if (refcount_dec_and_test(&worker->ref))
175 complete(&worker->ref_done);
176 wait_for_completion(&worker->ref_done);
177
178 preempt_disable();
179 current->flags &= ~PF_IO_WORKER;
180 flags = worker->flags;
181 worker->flags = 0;
182 if (flags & IO_WORKER_F_RUNNING)
183 atomic_dec(&acct->nr_running);
184 worker->flags = 0;
185 preempt_enable();
186
187 raw_spin_lock_irq(&wqe->lock);
188 if (flags & IO_WORKER_F_FREE)
189 hlist_nulls_del_rcu(&worker->nulls_node);
190 list_del_rcu(&worker->all_list);
191 acct->nr_workers--;
192 raw_spin_unlock_irq(&wqe->lock);
193
194 kfree_rcu(worker, rcu);
195 io_worker_ref_put(wqe->wq);
196 do_exit(0);
197 }
198
199 static inline bool io_wqe_run_queue(struct io_wqe *wqe)
200 __must_hold(wqe->lock)
201 {
202 if (!wq_list_empty(&wqe->work_list) &&
203 !(wqe->flags & IO_WQE_FLAG_STALLED))
204 return true;
205 return false;
206 }
207
208 /*
209 * Check head of free list for an available worker. If one isn't available,
210 * caller must create one.
211 */
212 static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
213 __must_hold(RCU)
214 {
215 struct hlist_nulls_node *n;
216 struct io_worker *worker;
217
218 n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
219 if (is_a_nulls(n))
220 return false;
221
222 worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
223 if (io_worker_get(worker)) {
224 wake_up_process(worker->task);
225 io_worker_release(worker);
226 return true;
227 }
228
229 return false;
230 }
231
232 /*
233 * We need a worker. If we find a free one, we're good. If not, and we're
234 * below the max number of workers, create one.
235 */
236 static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
237 {
238 bool ret;
239
240 /*
241 * Most likely an attempt to queue unbounded work on an io_wq that
242 * wasn't setup with any unbounded workers.
243 */
244 WARN_ON_ONCE(!acct->max_workers);
245
246 rcu_read_lock();
247 ret = io_wqe_activate_free_worker(wqe);
248 rcu_read_unlock();
249
250 if (!ret && acct->nr_workers < acct->max_workers) {
251 atomic_inc(&acct->nr_running);
252 atomic_inc(&wqe->wq->worker_refs);
253 create_io_worker(wqe->wq, wqe, acct->index);
254 }
255 }
256
257 static void io_wqe_inc_running(struct io_worker *worker)
258 {
259 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
260
261 atomic_inc(&acct->nr_running);
262 }
263
264 struct create_worker_data {
265 struct callback_head work;
266 struct io_wqe *wqe;
267 int index;
268 };
269
270 static void create_worker_cb(struct callback_head *cb)
271 {
272 struct create_worker_data *cwd;
273 struct io_wq *wq;
274
275 cwd = container_of(cb, struct create_worker_data, work);
276 wq = cwd->wqe->wq;
277 create_io_worker(wq, cwd->wqe, cwd->index);
278 kfree(cwd);
279 }
280
281 static void io_queue_worker_create(struct io_wqe *wqe, struct io_wqe_acct *acct)
282 {
283 struct create_worker_data *cwd;
284 struct io_wq *wq = wqe->wq;
285
286 /* raced with exit, just ignore create call */
287 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
288 goto fail;
289
290 cwd = kmalloc(sizeof(*cwd), GFP_ATOMIC);
291 if (cwd) {
292 init_task_work(&cwd->work, create_worker_cb);
293 cwd->wqe = wqe;
294 cwd->index = acct->index;
295 if (!task_work_add(wq->task, &cwd->work, TWA_SIGNAL))
296 return;
297
298 kfree(cwd);
299 }
300 fail:
301 atomic_dec(&acct->nr_running);
302 io_worker_ref_put(wq);
303 }
304
305 static void io_wqe_dec_running(struct io_worker *worker)
306 __must_hold(wqe->lock)
307 {
308 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
309 struct io_wqe *wqe = worker->wqe;
310
311 if (!(worker->flags & IO_WORKER_F_UP))
312 return;
313
314 if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) {
315 atomic_inc(&acct->nr_running);
316 atomic_inc(&wqe->wq->worker_refs);
317 io_queue_worker_create(wqe, acct);
318 }
319 }
320
321 /*
322 * Worker will start processing some work. Move it to the busy list, if
323 * it's currently on the freelist
324 */
325 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
326 struct io_wq_work *work)
327 __must_hold(wqe->lock)
328 {
329 bool worker_bound, work_bound;
330
331 BUILD_BUG_ON((IO_WQ_ACCT_UNBOUND ^ IO_WQ_ACCT_BOUND) != 1);
332
333 if (worker->flags & IO_WORKER_F_FREE) {
334 worker->flags &= ~IO_WORKER_F_FREE;
335 hlist_nulls_del_init_rcu(&worker->nulls_node);
336 }
337
338 /*
339 * If worker is moving from bound to unbound (or vice versa), then
340 * ensure we update the running accounting.
341 */
342 worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
343 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
344 if (worker_bound != work_bound) {
345 int index = work_bound ? IO_WQ_ACCT_UNBOUND : IO_WQ_ACCT_BOUND;
346 io_wqe_dec_running(worker);
347 worker->flags ^= IO_WORKER_F_BOUND;
348 wqe->acct[index].nr_workers--;
349 wqe->acct[index ^ 1].nr_workers++;
350 io_wqe_inc_running(worker);
351 }
352 }
353
354 /*
355 * No work, worker going to sleep. Move to freelist, and unuse mm if we
356 * have one attached. Dropping the mm may potentially sleep, so we drop
357 * the lock in that case and return success. Since the caller has to
358 * retry the loop in that case (we changed task state), we don't regrab
359 * the lock if we return success.
360 */
361 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
362 __must_hold(wqe->lock)
363 {
364 if (!(worker->flags & IO_WORKER_F_FREE)) {
365 worker->flags |= IO_WORKER_F_FREE;
366 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
367 }
368 }
369
370 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
371 {
372 return work->flags >> IO_WQ_HASH_SHIFT;
373 }
374
375 static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
376 {
377 struct io_wq *wq = wqe->wq;
378
379 spin_lock(&wq->hash->wait.lock);
380 if (list_empty(&wqe->wait.entry)) {
381 __add_wait_queue(&wq->hash->wait, &wqe->wait);
382 if (!test_bit(hash, &wq->hash->map)) {
383 __set_current_state(TASK_RUNNING);
384 list_del_init(&wqe->wait.entry);
385 }
386 }
387 spin_unlock(&wq->hash->wait.lock);
388 }
389
390 static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
391 __must_hold(wqe->lock)
392 {
393 struct io_wq_work_node *node, *prev;
394 struct io_wq_work *work, *tail;
395 unsigned int stall_hash = -1U;
396
397 wq_list_for_each(node, prev, &wqe->work_list) {
398 unsigned int hash;
399
400 work = container_of(node, struct io_wq_work, list);
401
402 /* not hashed, can run anytime */
403 if (!io_wq_is_hashed(work)) {
404 wq_list_del(&wqe->work_list, node, prev);
405 return work;
406 }
407
408 hash = io_get_work_hash(work);
409 /* all items with this hash lie in [work, tail] */
410 tail = wqe->hash_tail[hash];
411
412 /* hashed, can run if not already running */
413 if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
414 wqe->hash_tail[hash] = NULL;
415 wq_list_cut(&wqe->work_list, &tail->list, prev);
416 return work;
417 }
418 if (stall_hash == -1U)
419 stall_hash = hash;
420 /* fast forward to a next hash, for-each will fix up @prev */
421 node = &tail->list;
422 }
423
424 if (stall_hash != -1U) {
425 raw_spin_unlock(&wqe->lock);
426 io_wait_on_hash(wqe, stall_hash);
427 raw_spin_lock(&wqe->lock);
428 }
429
430 return NULL;
431 }
432
433 static bool io_flush_signals(void)
434 {
435 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
436 __set_current_state(TASK_RUNNING);
437 tracehook_notify_signal();
438 return true;
439 }
440 return false;
441 }
442
443 static void io_assign_current_work(struct io_worker *worker,
444 struct io_wq_work *work)
445 {
446 if (work) {
447 io_flush_signals();
448 cond_resched();
449 }
450
451 spin_lock_irq(&worker->lock);
452 worker->cur_work = work;
453 spin_unlock_irq(&worker->lock);
454 }
455
456 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
457
458 static void io_worker_handle_work(struct io_worker *worker)
459 __releases(wqe->lock)
460 {
461 struct io_wqe *wqe = worker->wqe;
462 struct io_wq *wq = wqe->wq;
463 bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
464
465 do {
466 struct io_wq_work *work;
467 get_next:
468 /*
469 * If we got some work, mark us as busy. If we didn't, but
470 * the list isn't empty, it means we stalled on hashed work.
471 * Mark us stalled so we don't keep looking for work when we
472 * can't make progress, any work completion or insertion will
473 * clear the stalled flag.
474 */
475 work = io_get_next_work(wqe);
476 if (work)
477 __io_worker_busy(wqe, worker, work);
478 else if (!wq_list_empty(&wqe->work_list))
479 wqe->flags |= IO_WQE_FLAG_STALLED;
480
481 raw_spin_unlock_irq(&wqe->lock);
482 if (!work)
483 break;
484 io_assign_current_work(worker, work);
485 __set_current_state(TASK_RUNNING);
486
487 /* handle a whole dependent link */
488 do {
489 struct io_wq_work *next_hashed, *linked;
490 unsigned int hash = io_get_work_hash(work);
491
492 next_hashed = wq_next_work(work);
493
494 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
495 work->flags |= IO_WQ_WORK_CANCEL;
496 wq->do_work(work);
497 io_assign_current_work(worker, NULL);
498
499 linked = wq->free_work(work);
500 work = next_hashed;
501 if (!work && linked && !io_wq_is_hashed(linked)) {
502 work = linked;
503 linked = NULL;
504 }
505 io_assign_current_work(worker, work);
506 if (linked)
507 io_wqe_enqueue(wqe, linked);
508
509 if (hash != -1U && !next_hashed) {
510 clear_bit(hash, &wq->hash->map);
511 if (wq_has_sleeper(&wq->hash->wait))
512 wake_up(&wq->hash->wait);
513 raw_spin_lock_irq(&wqe->lock);
514 wqe->flags &= ~IO_WQE_FLAG_STALLED;
515 /* skip unnecessary unlock-lock wqe->lock */
516 if (!work)
517 goto get_next;
518 raw_spin_unlock_irq(&wqe->lock);
519 }
520 } while (work);
521
522 raw_spin_lock_irq(&wqe->lock);
523 } while (1);
524 }
525
526 static int io_wqe_worker(void *data)
527 {
528 struct io_worker *worker = data;
529 struct io_wqe *wqe = worker->wqe;
530 struct io_wq *wq = wqe->wq;
531 char buf[TASK_COMM_LEN];
532
533 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
534
535 snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
536 set_task_comm(current, buf);
537
538 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
539 long ret;
540
541 set_current_state(TASK_INTERRUPTIBLE);
542 loop:
543 raw_spin_lock_irq(&wqe->lock);
544 if (io_wqe_run_queue(wqe)) {
545 io_worker_handle_work(worker);
546 goto loop;
547 }
548 __io_worker_idle(wqe, worker);
549 raw_spin_unlock_irq(&wqe->lock);
550 if (io_flush_signals())
551 continue;
552 ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
553 if (signal_pending(current)) {
554 struct ksignal ksig;
555
556 if (!get_signal(&ksig))
557 continue;
558 break;
559 }
560 if (ret)
561 continue;
562 /* timed out, exit unless we're the fixed worker */
563 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
564 !(worker->flags & IO_WORKER_F_FIXED))
565 break;
566 }
567
568 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
569 raw_spin_lock_irq(&wqe->lock);
570 if (!wq_list_empty(&wqe->work_list))
571 io_worker_handle_work(worker);
572 else
573 raw_spin_unlock_irq(&wqe->lock);
574 }
575
576 io_worker_exit(worker);
577 return 0;
578 }
579
580 /*
581 * Called when a worker is scheduled in. Mark us as currently running.
582 */
583 void io_wq_worker_running(struct task_struct *tsk)
584 {
585 struct io_worker *worker = tsk->pf_io_worker;
586
587 if (!worker)
588 return;
589 if (!(worker->flags & IO_WORKER_F_UP))
590 return;
591 if (worker->flags & IO_WORKER_F_RUNNING)
592 return;
593 worker->flags |= IO_WORKER_F_RUNNING;
594 io_wqe_inc_running(worker);
595 }
596
597 /*
598 * Called when worker is going to sleep. If there are no workers currently
599 * running and we have work pending, wake up a free one or create a new one.
600 */
601 void io_wq_worker_sleeping(struct task_struct *tsk)
602 {
603 struct io_worker *worker = tsk->pf_io_worker;
604
605 if (!worker)
606 return;
607 if (!(worker->flags & IO_WORKER_F_UP))
608 return;
609 if (!(worker->flags & IO_WORKER_F_RUNNING))
610 return;
611
612 worker->flags &= ~IO_WORKER_F_RUNNING;
613
614 raw_spin_lock_irq(&worker->wqe->lock);
615 io_wqe_dec_running(worker);
616 raw_spin_unlock_irq(&worker->wqe->lock);
617 }
618
619 static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
620 {
621 struct io_wqe_acct *acct = &wqe->acct[index];
622 struct io_worker *worker;
623 struct task_struct *tsk;
624
625 __set_current_state(TASK_RUNNING);
626
627 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
628 if (!worker)
629 goto fail;
630
631 refcount_set(&worker->ref, 1);
632 worker->nulls_node.pprev = NULL;
633 worker->wqe = wqe;
634 spin_lock_init(&worker->lock);
635 init_completion(&worker->ref_done);
636
637 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
638 if (IS_ERR(tsk)) {
639 kfree(worker);
640 fail:
641 atomic_dec(&acct->nr_running);
642 io_worker_ref_put(wq);
643 return;
644 }
645
646 tsk->pf_io_worker = worker;
647 worker->task = tsk;
648 set_cpus_allowed_ptr(tsk, cpumask_of_node(wqe->node));
649 tsk->flags |= PF_NO_SETAFFINITY;
650
651 raw_spin_lock_irq(&wqe->lock);
652 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
653 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
654 worker->flags |= IO_WORKER_F_FREE;
655 if (index == IO_WQ_ACCT_BOUND)
656 worker->flags |= IO_WORKER_F_BOUND;
657 if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
658 worker->flags |= IO_WORKER_F_FIXED;
659 acct->nr_workers++;
660 raw_spin_unlock_irq(&wqe->lock);
661 wake_up_new_task(tsk);
662 }
663
664 /*
665 * Iterate the passed in list and call the specific function for each
666 * worker that isn't exiting
667 */
668 static bool io_wq_for_each_worker(struct io_wqe *wqe,
669 bool (*func)(struct io_worker *, void *),
670 void *data)
671 {
672 struct io_worker *worker;
673 bool ret = false;
674
675 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
676 if (io_worker_get(worker)) {
677 /* no task if node is/was offline */
678 if (worker->task)
679 ret = func(worker, data);
680 io_worker_release(worker);
681 if (ret)
682 break;
683 }
684 }
685
686 return ret;
687 }
688
689 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
690 {
691 set_notify_signal(worker->task);
692 wake_up_process(worker->task);
693 return false;
694 }
695
696 static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
697 {
698 return true;
699 }
700
701 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
702 {
703 struct io_wq *wq = wqe->wq;
704
705 do {
706 work->flags |= IO_WQ_WORK_CANCEL;
707 wq->do_work(work);
708 work = wq->free_work(work);
709 } while (work);
710 }
711
712 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
713 {
714 unsigned int hash;
715 struct io_wq_work *tail;
716
717 if (!io_wq_is_hashed(work)) {
718 append:
719 wq_list_add_tail(&work->list, &wqe->work_list);
720 return;
721 }
722
723 hash = io_get_work_hash(work);
724 tail = wqe->hash_tail[hash];
725 wqe->hash_tail[hash] = work;
726 if (!tail)
727 goto append;
728
729 wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
730 }
731
732 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
733 {
734 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
735 int work_flags;
736 unsigned long flags;
737
738 if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
739 io_run_cancel(work, wqe);
740 return;
741 }
742
743 work_flags = work->flags;
744 raw_spin_lock_irqsave(&wqe->lock, flags);
745 io_wqe_insert_work(wqe, work);
746 wqe->flags &= ~IO_WQE_FLAG_STALLED;
747 raw_spin_unlock_irqrestore(&wqe->lock, flags);
748
749 if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
750 !atomic_read(&acct->nr_running))
751 io_wqe_wake_worker(wqe, acct);
752 }
753
754 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
755 {
756 struct io_wqe *wqe = wq->wqes[numa_node_id()];
757
758 io_wqe_enqueue(wqe, work);
759 }
760
761 /*
762 * Work items that hash to the same value will not be done in parallel.
763 * Used to limit concurrent writes, generally hashed by inode.
764 */
765 void io_wq_hash_work(struct io_wq_work *work, void *val)
766 {
767 unsigned int bit;
768
769 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
770 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
771 }
772
773 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
774 {
775 struct io_cb_cancel_data *match = data;
776 unsigned long flags;
777
778 /*
779 * Hold the lock to avoid ->cur_work going out of scope, caller
780 * may dereference the passed in work.
781 */
782 spin_lock_irqsave(&worker->lock, flags);
783 if (worker->cur_work &&
784 match->fn(worker->cur_work, match->data)) {
785 set_notify_signal(worker->task);
786 match->nr_running++;
787 }
788 spin_unlock_irqrestore(&worker->lock, flags);
789
790 return match->nr_running && !match->cancel_all;
791 }
792
793 static inline void io_wqe_remove_pending(struct io_wqe *wqe,
794 struct io_wq_work *work,
795 struct io_wq_work_node *prev)
796 {
797 unsigned int hash = io_get_work_hash(work);
798 struct io_wq_work *prev_work = NULL;
799
800 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
801 if (prev)
802 prev_work = container_of(prev, struct io_wq_work, list);
803 if (prev_work && io_get_work_hash(prev_work) == hash)
804 wqe->hash_tail[hash] = prev_work;
805 else
806 wqe->hash_tail[hash] = NULL;
807 }
808 wq_list_del(&wqe->work_list, &work->list, prev);
809 }
810
811 static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
812 struct io_cb_cancel_data *match)
813 {
814 struct io_wq_work_node *node, *prev;
815 struct io_wq_work *work;
816 unsigned long flags;
817
818 retry:
819 raw_spin_lock_irqsave(&wqe->lock, flags);
820 wq_list_for_each(node, prev, &wqe->work_list) {
821 work = container_of(node, struct io_wq_work, list);
822 if (!match->fn(work, match->data))
823 continue;
824 io_wqe_remove_pending(wqe, work, prev);
825 raw_spin_unlock_irqrestore(&wqe->lock, flags);
826 io_run_cancel(work, wqe);
827 match->nr_pending++;
828 if (!match->cancel_all)
829 return;
830
831 /* not safe to continue after unlock */
832 goto retry;
833 }
834 raw_spin_unlock_irqrestore(&wqe->lock, flags);
835 }
836
837 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
838 struct io_cb_cancel_data *match)
839 {
840 rcu_read_lock();
841 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
842 rcu_read_unlock();
843 }
844
845 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
846 void *data, bool cancel_all)
847 {
848 struct io_cb_cancel_data match = {
849 .fn = cancel,
850 .data = data,
851 .cancel_all = cancel_all,
852 };
853 int node;
854
855 /*
856 * First check pending list, if we're lucky we can just remove it
857 * from there. CANCEL_OK means that the work is returned as-new,
858 * no completion will be posted for it.
859 */
860 for_each_node(node) {
861 struct io_wqe *wqe = wq->wqes[node];
862
863 io_wqe_cancel_pending_work(wqe, &match);
864 if (match.nr_pending && !match.cancel_all)
865 return IO_WQ_CANCEL_OK;
866 }
867
868 /*
869 * Now check if a free (going busy) or busy worker has the work
870 * currently running. If we find it there, we'll return CANCEL_RUNNING
871 * as an indication that we attempt to signal cancellation. The
872 * completion will run normally in this case.
873 */
874 for_each_node(node) {
875 struct io_wqe *wqe = wq->wqes[node];
876
877 io_wqe_cancel_running_work(wqe, &match);
878 if (match.nr_running && !match.cancel_all)
879 return IO_WQ_CANCEL_RUNNING;
880 }
881
882 if (match.nr_running)
883 return IO_WQ_CANCEL_RUNNING;
884 if (match.nr_pending)
885 return IO_WQ_CANCEL_OK;
886 return IO_WQ_CANCEL_NOTFOUND;
887 }
888
889 static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
890 int sync, void *key)
891 {
892 struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
893
894 list_del_init(&wait->entry);
895
896 rcu_read_lock();
897 io_wqe_activate_free_worker(wqe);
898 rcu_read_unlock();
899 return 1;
900 }
901
902 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
903 {
904 int ret = -ENOMEM, node;
905 struct io_wq *wq;
906
907 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
908 return ERR_PTR(-EINVAL);
909
910 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
911 if (!wq)
912 return ERR_PTR(-ENOMEM);
913
914 wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
915 if (!wq->wqes)
916 goto err_wq;
917
918 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
919 if (ret)
920 goto err_wqes;
921
922 refcount_inc(&data->hash->refs);
923 wq->hash = data->hash;
924 wq->free_work = data->free_work;
925 wq->do_work = data->do_work;
926
927 ret = -ENOMEM;
928 for_each_node(node) {
929 struct io_wqe *wqe;
930 int alloc_node = node;
931
932 if (!node_online(alloc_node))
933 alloc_node = NUMA_NO_NODE;
934 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
935 if (!wqe)
936 goto err;
937 wq->wqes[node] = wqe;
938 wqe->node = alloc_node;
939 wqe->acct[IO_WQ_ACCT_BOUND].index = IO_WQ_ACCT_BOUND;
940 wqe->acct[IO_WQ_ACCT_UNBOUND].index = IO_WQ_ACCT_UNBOUND;
941 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
942 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
943 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
944 task_rlimit(current, RLIMIT_NPROC);
945 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
946 wqe->wait.func = io_wqe_hash_wake;
947 INIT_LIST_HEAD(&wqe->wait.entry);
948 wqe->wq = wq;
949 raw_spin_lock_init(&wqe->lock);
950 INIT_WQ_LIST(&wqe->work_list);
951 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
952 INIT_LIST_HEAD(&wqe->all_list);
953 }
954
955 wq->task = get_task_struct(data->task);
956 refcount_set(&wq->refs, 1);
957 atomic_set(&wq->worker_refs, 1);
958 init_completion(&wq->worker_done);
959 return wq;
960 err:
961 io_wq_put_hash(data->hash);
962 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
963 for_each_node(node)
964 kfree(wq->wqes[node]);
965 err_wqes:
966 kfree(wq->wqes);
967 err_wq:
968 kfree(wq);
969 return ERR_PTR(ret);
970 }
971
972 static bool io_task_work_match(struct callback_head *cb, void *data)
973 {
974 struct create_worker_data *cwd;
975
976 if (cb->func != create_worker_cb)
977 return false;
978 cwd = container_of(cb, struct create_worker_data, work);
979 return cwd->wqe->wq == data;
980 }
981
982 static void io_wq_exit_workers(struct io_wq *wq)
983 {
984 struct callback_head *cb;
985 int node;
986
987 set_bit(IO_WQ_BIT_EXIT, &wq->state);
988
989 if (!wq->task)
990 return;
991
992 while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
993 struct create_worker_data *cwd;
994
995 cwd = container_of(cb, struct create_worker_data, work);
996 atomic_dec(&cwd->wqe->acct[cwd->index].nr_running);
997 io_worker_ref_put(wq);
998 kfree(cwd);
999 }
1000
1001 rcu_read_lock();
1002 for_each_node(node) {
1003 struct io_wqe *wqe = wq->wqes[node];
1004
1005 io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
1006 spin_lock_irq(&wq->hash->wait.lock);
1007 list_del_init(&wq->wqes[node]->wait.entry);
1008 spin_unlock_irq(&wq->hash->wait.lock);
1009 }
1010 rcu_read_unlock();
1011 io_worker_ref_put(wq);
1012 wait_for_completion(&wq->worker_done);
1013 put_task_struct(wq->task);
1014 wq->task = NULL;
1015 }
1016
1017 static void io_wq_destroy(struct io_wq *wq)
1018 {
1019 int node;
1020
1021 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1022
1023 io_wq_exit_workers(wq);
1024
1025 for_each_node(node) {
1026 struct io_wqe *wqe = wq->wqes[node];
1027 struct io_cb_cancel_data match = {
1028 .fn = io_wq_work_match_all,
1029 .cancel_all = true,
1030 };
1031 io_wqe_cancel_pending_work(wqe, &match);
1032 kfree(wqe);
1033 }
1034 io_wq_put_hash(wq->hash);
1035 kfree(wq->wqes);
1036 kfree(wq);
1037 }
1038
1039 void io_wq_put(struct io_wq *wq)
1040 {
1041 if (refcount_dec_and_test(&wq->refs))
1042 io_wq_destroy(wq);
1043 }
1044
1045 void io_wq_put_and_exit(struct io_wq *wq)
1046 {
1047 io_wq_exit_workers(wq);
1048 io_wq_put(wq);
1049 }
1050
1051 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1052 {
1053 set_cpus_allowed_ptr(worker->task, cpumask_of_node(worker->wqe->node));
1054
1055 return false;
1056 }
1057
1058 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1059 {
1060 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1061 int i;
1062
1063 rcu_read_lock();
1064 for_each_node(i)
1065 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
1066 rcu_read_unlock();
1067 return 0;
1068 }
1069
1070 static __init int io_wq_init(void)
1071 {
1072 int ret;
1073
1074 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1075 io_wq_cpu_online, NULL);
1076 if (ret < 0)
1077 return ret;
1078 io_wq_online = ret;
1079 return 0;
1080 }
1081 subsys_initcall(io_wq_init);