1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/tracehook.h>
17 #include <uapi/linux/io_uring.h>
21 #define WORKER_IDLE_TIMEOUT (5 * HZ)
24 IO_WORKER_F_UP
= 1, /* up and active */
25 IO_WORKER_F_RUNNING
= 2, /* account as running */
26 IO_WORKER_F_FREE
= 4, /* worker on free list */
27 IO_WORKER_F_BOUND
= 8, /* is doing bounded work */
31 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
35 IO_ACCT_STALLED_BIT
= 0, /* stalled on hash */
39 * One for each thread in a wqe pool
44 struct hlist_nulls_node nulls_node
;
45 struct list_head all_list
;
46 struct task_struct
*task
;
49 struct io_wq_work
*cur_work
;
52 struct completion ref_done
;
54 unsigned long create_state
;
55 struct callback_head create_work
;
60 struct work_struct work
;
64 #if BITS_PER_LONG == 64
65 #define IO_WQ_HASH_ORDER 6
67 #define IO_WQ_HASH_ORDER 5
70 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
77 struct io_wq_work_list work_list
;
88 * Per-node worker thread pool
92 struct io_wqe_acct acct
[2];
96 struct hlist_nulls_head free_list
;
97 struct list_head all_list
;
99 struct wait_queue_entry wait
;
102 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
104 cpumask_var_t cpu_mask
;
113 free_work_fn
*free_work
;
114 io_wq_work_fn
*do_work
;
116 struct io_wq_hash
*hash
;
118 atomic_t worker_refs
;
119 struct completion worker_done
;
121 struct hlist_node cpuhp_node
;
123 struct task_struct
*task
;
125 struct io_wqe
*wqes
[];
128 static enum cpuhp_state io_wq_online
;
130 struct io_cb_cancel_data
{
138 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
);
139 static void io_wqe_dec_running(struct io_worker
*worker
);
140 static bool io_acct_cancel_pending_work(struct io_wqe
*wqe
,
141 struct io_wqe_acct
*acct
,
142 struct io_cb_cancel_data
*match
);
143 static void create_worker_cb(struct callback_head
*cb
);
144 static void io_wq_cancel_tw_create(struct io_wq
*wq
);
146 static bool io_worker_get(struct io_worker
*worker
)
148 return refcount_inc_not_zero(&worker
->ref
);
151 static void io_worker_release(struct io_worker
*worker
)
153 if (refcount_dec_and_test(&worker
->ref
))
154 complete(&worker
->ref_done
);
157 static inline struct io_wqe_acct
*io_get_acct(struct io_wqe
*wqe
, bool bound
)
159 return &wqe
->acct
[bound
? IO_WQ_ACCT_BOUND
: IO_WQ_ACCT_UNBOUND
];
162 static inline struct io_wqe_acct
*io_work_get_acct(struct io_wqe
*wqe
,
163 struct io_wq_work
*work
)
165 return io_get_acct(wqe
, !(work
->flags
& IO_WQ_WORK_UNBOUND
));
168 static inline struct io_wqe_acct
*io_wqe_get_acct(struct io_worker
*worker
)
170 return io_get_acct(worker
->wqe
, worker
->flags
& IO_WORKER_F_BOUND
);
173 static void io_worker_ref_put(struct io_wq
*wq
)
175 if (atomic_dec_and_test(&wq
->worker_refs
))
176 complete(&wq
->worker_done
);
179 static void io_worker_cancel_cb(struct io_worker
*worker
)
181 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
182 struct io_wqe
*wqe
= worker
->wqe
;
183 struct io_wq
*wq
= wqe
->wq
;
185 atomic_dec(&acct
->nr_running
);
186 raw_spin_lock(&worker
->wqe
->lock
);
188 raw_spin_unlock(&worker
->wqe
->lock
);
189 io_worker_ref_put(wq
);
190 clear_bit_unlock(0, &worker
->create_state
);
191 io_worker_release(worker
);
194 static bool io_task_worker_match(struct callback_head
*cb
, void *data
)
196 struct io_worker
*worker
;
198 if (cb
->func
!= create_worker_cb
)
200 worker
= container_of(cb
, struct io_worker
, create_work
);
201 return worker
== data
;
204 static void io_worker_exit(struct io_worker
*worker
)
206 struct io_wqe
*wqe
= worker
->wqe
;
207 struct io_wq
*wq
= wqe
->wq
;
210 struct callback_head
*cb
= task_work_cancel_match(wq
->task
,
211 io_task_worker_match
, worker
);
215 io_worker_cancel_cb(worker
);
218 if (refcount_dec_and_test(&worker
->ref
))
219 complete(&worker
->ref_done
);
220 wait_for_completion(&worker
->ref_done
);
222 raw_spin_lock(&wqe
->lock
);
223 if (worker
->flags
& IO_WORKER_F_FREE
)
224 hlist_nulls_del_rcu(&worker
->nulls_node
);
225 list_del_rcu(&worker
->all_list
);
227 io_wqe_dec_running(worker
);
229 current
->flags
&= ~PF_IO_WORKER
;
231 raw_spin_unlock(&wqe
->lock
);
233 kfree_rcu(worker
, rcu
);
234 io_worker_ref_put(wqe
->wq
);
238 static inline bool io_acct_run_queue(struct io_wqe_acct
*acct
)
240 if (!wq_list_empty(&acct
->work_list
) &&
241 !test_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
247 * Check head of free list for an available worker. If one isn't available,
248 * caller must create one.
250 static bool io_wqe_activate_free_worker(struct io_wqe
*wqe
,
251 struct io_wqe_acct
*acct
)
254 struct hlist_nulls_node
*n
;
255 struct io_worker
*worker
;
258 * Iterate free_list and see if we can find an idle worker to
259 * activate. If a given worker is on the free_list but in the process
260 * of exiting, keep trying.
262 hlist_nulls_for_each_entry_rcu(worker
, n
, &wqe
->free_list
, nulls_node
) {
263 if (!io_worker_get(worker
))
265 if (io_wqe_get_acct(worker
) != acct
) {
266 io_worker_release(worker
);
269 if (wake_up_process(worker
->task
)) {
270 io_worker_release(worker
);
273 io_worker_release(worker
);
280 * We need a worker. If we find a free one, we're good. If not, and we're
281 * below the max number of workers, create one.
283 static bool io_wqe_create_worker(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
)
286 * Most likely an attempt to queue unbounded work on an io_wq that
287 * wasn't setup with any unbounded workers.
289 if (unlikely(!acct
->max_workers
))
290 pr_warn_once("io-wq is not configured for unbound workers");
292 raw_spin_lock(&wqe
->lock
);
293 if (acct
->nr_workers
>= acct
->max_workers
) {
294 raw_spin_unlock(&wqe
->lock
);
298 raw_spin_unlock(&wqe
->lock
);
299 atomic_inc(&acct
->nr_running
);
300 atomic_inc(&wqe
->wq
->worker_refs
);
301 return create_io_worker(wqe
->wq
, wqe
, acct
->index
);
304 static void io_wqe_inc_running(struct io_worker
*worker
)
306 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
308 atomic_inc(&acct
->nr_running
);
311 static void create_worker_cb(struct callback_head
*cb
)
313 struct io_worker
*worker
;
316 struct io_wqe_acct
*acct
;
317 bool do_create
= false;
319 worker
= container_of(cb
, struct io_worker
, create_work
);
322 acct
= &wqe
->acct
[worker
->create_index
];
323 raw_spin_lock(&wqe
->lock
);
324 if (acct
->nr_workers
< acct
->max_workers
) {
328 raw_spin_unlock(&wqe
->lock
);
330 create_io_worker(wq
, wqe
, worker
->create_index
);
332 atomic_dec(&acct
->nr_running
);
333 io_worker_ref_put(wq
);
335 clear_bit_unlock(0, &worker
->create_state
);
336 io_worker_release(worker
);
339 static bool io_queue_worker_create(struct io_worker
*worker
,
340 struct io_wqe_acct
*acct
,
341 task_work_func_t func
)
343 struct io_wqe
*wqe
= worker
->wqe
;
344 struct io_wq
*wq
= wqe
->wq
;
346 /* raced with exit, just ignore create call */
347 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
349 if (!io_worker_get(worker
))
352 * create_state manages ownership of create_work/index. We should
353 * only need one entry per worker, as the worker going to sleep
354 * will trigger the condition, and waking will clear it once it
355 * runs the task_work.
357 if (test_bit(0, &worker
->create_state
) ||
358 test_and_set_bit_lock(0, &worker
->create_state
))
361 atomic_inc(&wq
->worker_refs
);
362 init_task_work(&worker
->create_work
, func
);
363 worker
->create_index
= acct
->index
;
364 if (!task_work_add(wq
->task
, &worker
->create_work
, TWA_SIGNAL
)) {
366 * EXIT may have been set after checking it above, check after
367 * adding the task_work and remove any creation item if it is
368 * now set. wq exit does that too, but we can have added this
369 * work item after we canceled in io_wq_exit_workers().
371 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
372 io_wq_cancel_tw_create(wq
);
373 io_worker_ref_put(wq
);
376 io_worker_ref_put(wq
);
377 clear_bit_unlock(0, &worker
->create_state
);
379 io_worker_release(worker
);
381 atomic_dec(&acct
->nr_running
);
382 io_worker_ref_put(wq
);
386 static void io_wqe_dec_running(struct io_worker
*worker
)
387 __must_hold(wqe
->lock
)
389 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
390 struct io_wqe
*wqe
= worker
->wqe
;
392 if (!(worker
->flags
& IO_WORKER_F_UP
))
395 if (atomic_dec_and_test(&acct
->nr_running
) && io_acct_run_queue(acct
)) {
396 atomic_inc(&acct
->nr_running
);
397 atomic_inc(&wqe
->wq
->worker_refs
);
398 raw_spin_unlock(&wqe
->lock
);
399 io_queue_worker_create(worker
, acct
, create_worker_cb
);
400 raw_spin_lock(&wqe
->lock
);
405 * Worker will start processing some work. Move it to the busy list, if
406 * it's currently on the freelist
408 static void __io_worker_busy(struct io_wqe
*wqe
, struct io_worker
*worker
,
409 struct io_wq_work
*work
)
410 __must_hold(wqe
->lock
)
412 if (worker
->flags
& IO_WORKER_F_FREE
) {
413 worker
->flags
&= ~IO_WORKER_F_FREE
;
414 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
419 * No work, worker going to sleep. Move to freelist, and unuse mm if we
420 * have one attached. Dropping the mm may potentially sleep, so we drop
421 * the lock in that case and return success. Since the caller has to
422 * retry the loop in that case (we changed task state), we don't regrab
423 * the lock if we return success.
425 static void __io_worker_idle(struct io_wqe
*wqe
, struct io_worker
*worker
)
426 __must_hold(wqe
->lock
)
428 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
429 worker
->flags
|= IO_WORKER_F_FREE
;
430 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
434 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
436 return work
->flags
>> IO_WQ_HASH_SHIFT
;
439 static bool io_wait_on_hash(struct io_wqe
*wqe
, unsigned int hash
)
441 struct io_wq
*wq
= wqe
->wq
;
444 spin_lock_irq(&wq
->hash
->wait
.lock
);
445 if (list_empty(&wqe
->wait
.entry
)) {
446 __add_wait_queue(&wq
->hash
->wait
, &wqe
->wait
);
447 if (!test_bit(hash
, &wq
->hash
->map
)) {
448 __set_current_state(TASK_RUNNING
);
449 list_del_init(&wqe
->wait
.entry
);
453 spin_unlock_irq(&wq
->hash
->wait
.lock
);
457 static struct io_wq_work
*io_get_next_work(struct io_wqe_acct
*acct
,
458 struct io_worker
*worker
)
459 __must_hold(wqe
->lock
)
461 struct io_wq_work_node
*node
, *prev
;
462 struct io_wq_work
*work
, *tail
;
463 unsigned int stall_hash
= -1U;
464 struct io_wqe
*wqe
= worker
->wqe
;
466 wq_list_for_each(node
, prev
, &acct
->work_list
) {
469 work
= container_of(node
, struct io_wq_work
, list
);
471 /* not hashed, can run anytime */
472 if (!io_wq_is_hashed(work
)) {
473 wq_list_del(&acct
->work_list
, node
, prev
);
477 hash
= io_get_work_hash(work
);
478 /* all items with this hash lie in [work, tail] */
479 tail
= wqe
->hash_tail
[hash
];
481 /* hashed, can run if not already running */
482 if (!test_and_set_bit(hash
, &wqe
->wq
->hash
->map
)) {
483 wqe
->hash_tail
[hash
] = NULL
;
484 wq_list_cut(&acct
->work_list
, &tail
->list
, prev
);
487 if (stall_hash
== -1U)
489 /* fast forward to a next hash, for-each will fix up @prev */
493 if (stall_hash
!= -1U) {
497 * Set this before dropping the lock to avoid racing with new
498 * work being added and clearing the stalled bit.
500 set_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
501 raw_spin_unlock(&wqe
->lock
);
502 unstalled
= io_wait_on_hash(wqe
, stall_hash
);
503 raw_spin_lock(&wqe
->lock
);
505 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
506 if (wq_has_sleeper(&wqe
->wq
->hash
->wait
))
507 wake_up(&wqe
->wq
->hash
->wait
);
514 static bool io_flush_signals(void)
516 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL
))) {
517 __set_current_state(TASK_RUNNING
);
518 tracehook_notify_signal();
524 static void io_assign_current_work(struct io_worker
*worker
,
525 struct io_wq_work
*work
)
532 spin_lock(&worker
->lock
);
533 worker
->cur_work
= work
;
534 spin_unlock(&worker
->lock
);
537 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
);
539 static void io_worker_handle_work(struct io_worker
*worker
)
540 __releases(wqe
->lock
)
542 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
543 struct io_wqe
*wqe
= worker
->wqe
;
544 struct io_wq
*wq
= wqe
->wq
;
545 bool do_kill
= test_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
548 struct io_wq_work
*work
;
551 * If we got some work, mark us as busy. If we didn't, but
552 * the list isn't empty, it means we stalled on hashed work.
553 * Mark us stalled so we don't keep looking for work when we
554 * can't make progress, any work completion or insertion will
555 * clear the stalled flag.
557 work
= io_get_next_work(acct
, worker
);
559 __io_worker_busy(wqe
, worker
, work
);
561 raw_spin_unlock(&wqe
->lock
);
564 io_assign_current_work(worker
, work
);
565 __set_current_state(TASK_RUNNING
);
567 /* handle a whole dependent link */
569 struct io_wq_work
*next_hashed
, *linked
;
570 unsigned int hash
= io_get_work_hash(work
);
572 next_hashed
= wq_next_work(work
);
574 if (unlikely(do_kill
) && (work
->flags
& IO_WQ_WORK_UNBOUND
))
575 work
->flags
|= IO_WQ_WORK_CANCEL
;
577 io_assign_current_work(worker
, NULL
);
579 linked
= wq
->free_work(work
);
581 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
585 io_assign_current_work(worker
, work
);
587 io_wqe_enqueue(wqe
, linked
);
589 if (hash
!= -1U && !next_hashed
) {
590 /* serialize hash clear with wake_up() */
591 spin_lock_irq(&wq
->hash
->wait
.lock
);
592 clear_bit(hash
, &wq
->hash
->map
);
593 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
594 spin_unlock_irq(&wq
->hash
->wait
.lock
);
595 if (wq_has_sleeper(&wq
->hash
->wait
))
596 wake_up(&wq
->hash
->wait
);
597 raw_spin_lock(&wqe
->lock
);
598 /* skip unnecessary unlock-lock wqe->lock */
601 raw_spin_unlock(&wqe
->lock
);
605 raw_spin_lock(&wqe
->lock
);
609 static int io_wqe_worker(void *data
)
611 struct io_worker
*worker
= data
;
612 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
613 struct io_wqe
*wqe
= worker
->wqe
;
614 struct io_wq
*wq
= wqe
->wq
;
615 bool last_timeout
= false;
616 char buf
[TASK_COMM_LEN
];
618 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
620 snprintf(buf
, sizeof(buf
), "iou-wrk-%d", wq
->task
->pid
);
621 set_task_comm(current
, buf
);
623 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
626 set_current_state(TASK_INTERRUPTIBLE
);
628 raw_spin_lock(&wqe
->lock
);
629 if (io_acct_run_queue(acct
)) {
630 io_worker_handle_work(worker
);
633 /* timed out, exit unless we're the last worker */
634 if (last_timeout
&& acct
->nr_workers
> 1) {
636 raw_spin_unlock(&wqe
->lock
);
637 __set_current_state(TASK_RUNNING
);
640 last_timeout
= false;
641 __io_worker_idle(wqe
, worker
);
642 raw_spin_unlock(&wqe
->lock
);
643 if (io_flush_signals())
645 ret
= schedule_timeout(WORKER_IDLE_TIMEOUT
);
646 if (signal_pending(current
)) {
649 if (!get_signal(&ksig
))
656 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
657 raw_spin_lock(&wqe
->lock
);
658 io_worker_handle_work(worker
);
661 io_worker_exit(worker
);
666 * Called when a worker is scheduled in. Mark us as currently running.
668 void io_wq_worker_running(struct task_struct
*tsk
)
670 struct io_worker
*worker
= tsk
->pf_io_worker
;
674 if (!(worker
->flags
& IO_WORKER_F_UP
))
676 if (worker
->flags
& IO_WORKER_F_RUNNING
)
678 worker
->flags
|= IO_WORKER_F_RUNNING
;
679 io_wqe_inc_running(worker
);
683 * Called when worker is going to sleep. If there are no workers currently
684 * running and we have work pending, wake up a free one or create a new one.
686 void io_wq_worker_sleeping(struct task_struct
*tsk
)
688 struct io_worker
*worker
= tsk
->pf_io_worker
;
692 if (!(worker
->flags
& IO_WORKER_F_UP
))
694 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
697 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
699 raw_spin_lock(&worker
->wqe
->lock
);
700 io_wqe_dec_running(worker
);
701 raw_spin_unlock(&worker
->wqe
->lock
);
704 static void io_init_new_worker(struct io_wqe
*wqe
, struct io_worker
*worker
,
705 struct task_struct
*tsk
)
707 tsk
->pf_io_worker
= worker
;
709 set_cpus_allowed_ptr(tsk
, wqe
->cpu_mask
);
710 tsk
->flags
|= PF_NO_SETAFFINITY
;
712 raw_spin_lock(&wqe
->lock
);
713 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
714 list_add_tail_rcu(&worker
->all_list
, &wqe
->all_list
);
715 worker
->flags
|= IO_WORKER_F_FREE
;
716 raw_spin_unlock(&wqe
->lock
);
717 wake_up_new_task(tsk
);
720 static bool io_wq_work_match_all(struct io_wq_work
*work
, void *data
)
725 static inline bool io_should_retry_thread(long err
)
728 * Prevent perpetual task_work retry, if the task (or its group) is
731 if (fatal_signal_pending(current
))
737 case -ERESTARTNOINTR
:
738 case -ERESTARTNOHAND
:
745 static void create_worker_cont(struct callback_head
*cb
)
747 struct io_worker
*worker
;
748 struct task_struct
*tsk
;
751 worker
= container_of(cb
, struct io_worker
, create_work
);
752 clear_bit_unlock(0, &worker
->create_state
);
754 tsk
= create_io_thread(io_wqe_worker
, worker
, wqe
->node
);
756 io_init_new_worker(wqe
, worker
, tsk
);
757 io_worker_release(worker
);
759 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
760 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
762 atomic_dec(&acct
->nr_running
);
763 raw_spin_lock(&wqe
->lock
);
765 if (!acct
->nr_workers
) {
766 struct io_cb_cancel_data match
= {
767 .fn
= io_wq_work_match_all
,
771 while (io_acct_cancel_pending_work(wqe
, acct
, &match
))
772 raw_spin_lock(&wqe
->lock
);
774 raw_spin_unlock(&wqe
->lock
);
775 io_worker_ref_put(wqe
->wq
);
780 /* re-create attempts grab a new worker ref, drop the existing one */
781 io_worker_release(worker
);
782 schedule_work(&worker
->work
);
785 static void io_workqueue_create(struct work_struct
*work
)
787 struct io_worker
*worker
= container_of(work
, struct io_worker
, work
);
788 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
790 if (!io_queue_worker_create(worker
, acct
, create_worker_cont
))
794 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
)
796 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
797 struct io_worker
*worker
;
798 struct task_struct
*tsk
;
800 __set_current_state(TASK_RUNNING
);
802 worker
= kzalloc_node(sizeof(*worker
), GFP_KERNEL
, wqe
->node
);
805 atomic_dec(&acct
->nr_running
);
806 raw_spin_lock(&wqe
->lock
);
808 raw_spin_unlock(&wqe
->lock
);
809 io_worker_ref_put(wq
);
813 refcount_set(&worker
->ref
, 1);
815 spin_lock_init(&worker
->lock
);
816 init_completion(&worker
->ref_done
);
818 if (index
== IO_WQ_ACCT_BOUND
)
819 worker
->flags
|= IO_WORKER_F_BOUND
;
821 tsk
= create_io_thread(io_wqe_worker
, worker
, wqe
->node
);
823 io_init_new_worker(wqe
, worker
, tsk
);
824 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
828 INIT_WORK(&worker
->work
, io_workqueue_create
);
829 schedule_work(&worker
->work
);
836 * Iterate the passed in list and call the specific function for each
837 * worker that isn't exiting
839 static bool io_wq_for_each_worker(struct io_wqe
*wqe
,
840 bool (*func
)(struct io_worker
*, void *),
843 struct io_worker
*worker
;
846 list_for_each_entry_rcu(worker
, &wqe
->all_list
, all_list
) {
847 if (io_worker_get(worker
)) {
848 /* no task if node is/was offline */
850 ret
= func(worker
, data
);
851 io_worker_release(worker
);
860 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
862 set_notify_signal(worker
->task
);
863 wake_up_process(worker
->task
);
867 static void io_run_cancel(struct io_wq_work
*work
, struct io_wqe
*wqe
)
869 struct io_wq
*wq
= wqe
->wq
;
872 work
->flags
|= IO_WQ_WORK_CANCEL
;
874 work
= wq
->free_work(work
);
878 static void io_wqe_insert_work(struct io_wqe
*wqe
, struct io_wq_work
*work
)
880 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
882 struct io_wq_work
*tail
;
884 if (!io_wq_is_hashed(work
)) {
886 wq_list_add_tail(&work
->list
, &acct
->work_list
);
890 hash
= io_get_work_hash(work
);
891 tail
= wqe
->hash_tail
[hash
];
892 wqe
->hash_tail
[hash
] = work
;
896 wq_list_add_after(&work
->list
, &tail
->list
, &acct
->work_list
);
899 static bool io_wq_work_match_item(struct io_wq_work
*work
, void *data
)
904 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
)
906 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
907 unsigned work_flags
= work
->flags
;
911 * If io-wq is exiting for this task, or if the request has explicitly
912 * been marked as one that should not get executed, cancel it here.
914 if (test_bit(IO_WQ_BIT_EXIT
, &wqe
->wq
->state
) ||
915 (work
->flags
& IO_WQ_WORK_CANCEL
)) {
916 io_run_cancel(work
, wqe
);
920 raw_spin_lock(&wqe
->lock
);
921 io_wqe_insert_work(wqe
, work
);
922 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
925 do_create
= !io_wqe_activate_free_worker(wqe
, acct
);
928 raw_spin_unlock(&wqe
->lock
);
930 if (do_create
&& ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
931 !atomic_read(&acct
->nr_running
))) {
934 did_create
= io_wqe_create_worker(wqe
, acct
);
935 if (likely(did_create
))
938 raw_spin_lock(&wqe
->lock
);
939 /* fatal condition, failed to create the first worker */
940 if (!acct
->nr_workers
) {
941 struct io_cb_cancel_data match
= {
942 .fn
= io_wq_work_match_item
,
947 if (io_acct_cancel_pending_work(wqe
, acct
, &match
))
948 raw_spin_lock(&wqe
->lock
);
950 raw_spin_unlock(&wqe
->lock
);
954 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
956 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
958 io_wqe_enqueue(wqe
, work
);
962 * Work items that hash to the same value will not be done in parallel.
963 * Used to limit concurrent writes, generally hashed by inode.
965 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
969 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
970 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
973 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
975 struct io_cb_cancel_data
*match
= data
;
978 * Hold the lock to avoid ->cur_work going out of scope, caller
979 * may dereference the passed in work.
981 spin_lock(&worker
->lock
);
982 if (worker
->cur_work
&&
983 match
->fn(worker
->cur_work
, match
->data
)) {
984 set_notify_signal(worker
->task
);
987 spin_unlock(&worker
->lock
);
989 return match
->nr_running
&& !match
->cancel_all
;
992 static inline void io_wqe_remove_pending(struct io_wqe
*wqe
,
993 struct io_wq_work
*work
,
994 struct io_wq_work_node
*prev
)
996 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
997 unsigned int hash
= io_get_work_hash(work
);
998 struct io_wq_work
*prev_work
= NULL
;
1000 if (io_wq_is_hashed(work
) && work
== wqe
->hash_tail
[hash
]) {
1002 prev_work
= container_of(prev
, struct io_wq_work
, list
);
1003 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
1004 wqe
->hash_tail
[hash
] = prev_work
;
1006 wqe
->hash_tail
[hash
] = NULL
;
1008 wq_list_del(&acct
->work_list
, &work
->list
, prev
);
1011 static bool io_acct_cancel_pending_work(struct io_wqe
*wqe
,
1012 struct io_wqe_acct
*acct
,
1013 struct io_cb_cancel_data
*match
)
1014 __releases(wqe
->lock
)
1016 struct io_wq_work_node
*node
, *prev
;
1017 struct io_wq_work
*work
;
1019 wq_list_for_each(node
, prev
, &acct
->work_list
) {
1020 work
= container_of(node
, struct io_wq_work
, list
);
1021 if (!match
->fn(work
, match
->data
))
1023 io_wqe_remove_pending(wqe
, work
, prev
);
1024 raw_spin_unlock(&wqe
->lock
);
1025 io_run_cancel(work
, wqe
);
1026 match
->nr_pending
++;
1027 /* not safe to continue after unlock */
1034 static void io_wqe_cancel_pending_work(struct io_wqe
*wqe
,
1035 struct io_cb_cancel_data
*match
)
1039 raw_spin_lock(&wqe
->lock
);
1040 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1041 struct io_wqe_acct
*acct
= io_get_acct(wqe
, i
== 0);
1043 if (io_acct_cancel_pending_work(wqe
, acct
, match
)) {
1044 if (match
->cancel_all
)
1049 raw_spin_unlock(&wqe
->lock
);
1052 static void io_wqe_cancel_running_work(struct io_wqe
*wqe
,
1053 struct io_cb_cancel_data
*match
)
1056 io_wq_for_each_worker(wqe
, io_wq_worker_cancel
, match
);
1060 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
1061 void *data
, bool cancel_all
)
1063 struct io_cb_cancel_data match
= {
1066 .cancel_all
= cancel_all
,
1071 * First check pending list, if we're lucky we can just remove it
1072 * from there. CANCEL_OK means that the work is returned as-new,
1073 * no completion will be posted for it.
1075 for_each_node(node
) {
1076 struct io_wqe
*wqe
= wq
->wqes
[node
];
1078 io_wqe_cancel_pending_work(wqe
, &match
);
1079 if (match
.nr_pending
&& !match
.cancel_all
)
1080 return IO_WQ_CANCEL_OK
;
1084 * Now check if a free (going busy) or busy worker has the work
1085 * currently running. If we find it there, we'll return CANCEL_RUNNING
1086 * as an indication that we attempt to signal cancellation. The
1087 * completion will run normally in this case.
1089 for_each_node(node
) {
1090 struct io_wqe
*wqe
= wq
->wqes
[node
];
1092 io_wqe_cancel_running_work(wqe
, &match
);
1093 if (match
.nr_running
&& !match
.cancel_all
)
1094 return IO_WQ_CANCEL_RUNNING
;
1097 if (match
.nr_running
)
1098 return IO_WQ_CANCEL_RUNNING
;
1099 if (match
.nr_pending
)
1100 return IO_WQ_CANCEL_OK
;
1101 return IO_WQ_CANCEL_NOTFOUND
;
1104 static int io_wqe_hash_wake(struct wait_queue_entry
*wait
, unsigned mode
,
1105 int sync
, void *key
)
1107 struct io_wqe
*wqe
= container_of(wait
, struct io_wqe
, wait
);
1110 list_del_init(&wait
->entry
);
1113 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1114 struct io_wqe_acct
*acct
= &wqe
->acct
[i
];
1116 if (test_and_clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
1117 io_wqe_activate_free_worker(wqe
, acct
);
1123 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1128 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1129 return ERR_PTR(-EINVAL
);
1130 if (WARN_ON_ONCE(!bounded
))
1131 return ERR_PTR(-EINVAL
);
1133 wq
= kzalloc(struct_size(wq
, wqes
, nr_node_ids
), GFP_KERNEL
);
1135 return ERR_PTR(-ENOMEM
);
1136 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1140 refcount_inc(&data
->hash
->refs
);
1141 wq
->hash
= data
->hash
;
1142 wq
->free_work
= data
->free_work
;
1143 wq
->do_work
= data
->do_work
;
1146 for_each_node(node
) {
1148 int alloc_node
= node
;
1150 if (!node_online(alloc_node
))
1151 alloc_node
= NUMA_NO_NODE
;
1152 wqe
= kzalloc_node(sizeof(struct io_wqe
), GFP_KERNEL
, alloc_node
);
1155 if (!alloc_cpumask_var(&wqe
->cpu_mask
, GFP_KERNEL
))
1157 cpumask_copy(wqe
->cpu_mask
, cpumask_of_node(node
));
1158 wq
->wqes
[node
] = wqe
;
1159 wqe
->node
= alloc_node
;
1160 wqe
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1161 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1162 task_rlimit(current
, RLIMIT_NPROC
);
1163 INIT_LIST_HEAD(&wqe
->wait
.entry
);
1164 wqe
->wait
.func
= io_wqe_hash_wake
;
1165 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1166 struct io_wqe_acct
*acct
= &wqe
->acct
[i
];
1169 atomic_set(&acct
->nr_running
, 0);
1170 INIT_WQ_LIST(&acct
->work_list
);
1173 raw_spin_lock_init(&wqe
->lock
);
1174 INIT_HLIST_NULLS_HEAD(&wqe
->free_list
, 0);
1175 INIT_LIST_HEAD(&wqe
->all_list
);
1178 wq
->task
= get_task_struct(data
->task
);
1179 atomic_set(&wq
->worker_refs
, 1);
1180 init_completion(&wq
->worker_done
);
1183 io_wq_put_hash(data
->hash
);
1184 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1185 for_each_node(node
) {
1186 if (!wq
->wqes
[node
])
1188 free_cpumask_var(wq
->wqes
[node
]->cpu_mask
);
1189 kfree(wq
->wqes
[node
]);
1193 return ERR_PTR(ret
);
1196 static bool io_task_work_match(struct callback_head
*cb
, void *data
)
1198 struct io_worker
*worker
;
1200 if (cb
->func
!= create_worker_cb
&& cb
->func
!= create_worker_cont
)
1202 worker
= container_of(cb
, struct io_worker
, create_work
);
1203 return worker
->wqe
->wq
== data
;
1206 void io_wq_exit_start(struct io_wq
*wq
)
1208 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1211 static void io_wq_cancel_tw_create(struct io_wq
*wq
)
1213 struct callback_head
*cb
;
1215 while ((cb
= task_work_cancel_match(wq
->task
, io_task_work_match
, wq
)) != NULL
) {
1216 struct io_worker
*worker
;
1218 worker
= container_of(cb
, struct io_worker
, create_work
);
1219 io_worker_cancel_cb(worker
);
1223 static void io_wq_exit_workers(struct io_wq
*wq
)
1230 io_wq_cancel_tw_create(wq
);
1233 for_each_node(node
) {
1234 struct io_wqe
*wqe
= wq
->wqes
[node
];
1236 io_wq_for_each_worker(wqe
, io_wq_worker_wake
, NULL
);
1239 io_worker_ref_put(wq
);
1240 wait_for_completion(&wq
->worker_done
);
1242 for_each_node(node
) {
1243 spin_lock_irq(&wq
->hash
->wait
.lock
);
1244 list_del_init(&wq
->wqes
[node
]->wait
.entry
);
1245 spin_unlock_irq(&wq
->hash
->wait
.lock
);
1247 put_task_struct(wq
->task
);
1251 static void io_wq_destroy(struct io_wq
*wq
)
1255 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1257 for_each_node(node
) {
1258 struct io_wqe
*wqe
= wq
->wqes
[node
];
1259 struct io_cb_cancel_data match
= {
1260 .fn
= io_wq_work_match_all
,
1263 io_wqe_cancel_pending_work(wqe
, &match
);
1264 free_cpumask_var(wqe
->cpu_mask
);
1267 io_wq_put_hash(wq
->hash
);
1271 void io_wq_put_and_exit(struct io_wq
*wq
)
1273 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
));
1275 io_wq_exit_workers(wq
);
1279 struct online_data
{
1284 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1286 struct online_data
*od
= data
;
1289 cpumask_set_cpu(od
->cpu
, worker
->wqe
->cpu_mask
);
1291 cpumask_clear_cpu(od
->cpu
, worker
->wqe
->cpu_mask
);
1295 static int __io_wq_cpu_online(struct io_wq
*wq
, unsigned int cpu
, bool online
)
1297 struct online_data od
= {
1305 io_wq_for_each_worker(wq
->wqes
[i
], io_wq_worker_affinity
, &od
);
1310 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1312 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1314 return __io_wq_cpu_online(wq
, cpu
, true);
1317 static int io_wq_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
1319 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1321 return __io_wq_cpu_online(wq
, cpu
, false);
1324 int io_wq_cpu_affinity(struct io_wq
*wq
, cpumask_var_t mask
)
1330 struct io_wqe
*wqe
= wq
->wqes
[i
];
1333 cpumask_copy(wqe
->cpu_mask
, mask
);
1335 cpumask_copy(wqe
->cpu_mask
, cpumask_of_node(i
));
1342 * Set max number of unbounded workers, returns old value. If new_count is 0,
1343 * then just return the old value.
1345 int io_wq_max_workers(struct io_wq
*wq
, int *new_count
)
1347 int prev
[IO_WQ_ACCT_NR
];
1348 bool first_node
= true;
1351 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND
!= (int) IO_WQ_BOUND
);
1352 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND
!= (int) IO_WQ_UNBOUND
);
1353 BUILD_BUG_ON((int) IO_WQ_ACCT_NR
!= 2);
1355 for (i
= 0; i
< 2; i
++) {
1356 if (new_count
[i
] > task_rlimit(current
, RLIMIT_NPROC
))
1357 new_count
[i
] = task_rlimit(current
, RLIMIT_NPROC
);
1360 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1364 for_each_node(node
) {
1365 struct io_wqe
*wqe
= wq
->wqes
[node
];
1366 struct io_wqe_acct
*acct
;
1368 raw_spin_lock(&wqe
->lock
);
1369 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1370 acct
= &wqe
->acct
[i
];
1372 prev
[i
] = max_t(int, acct
->max_workers
, prev
[i
]);
1374 acct
->max_workers
= new_count
[i
];
1376 raw_spin_unlock(&wqe
->lock
);
1381 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++)
1382 new_count
[i
] = prev
[i
];
1387 static __init
int io_wq_init(void)
1391 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1392 io_wq_cpu_online
, io_wq_cpu_offline
);
1398 subsys_initcall(io_wq_init
);