1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/tracehook.h>
17 #include <uapi/linux/io_uring.h>
21 #define WORKER_IDLE_TIMEOUT (5 * HZ)
24 IO_WORKER_F_UP
= 1, /* up and active */
25 IO_WORKER_F_RUNNING
= 2, /* account as running */
26 IO_WORKER_F_FREE
= 4, /* worker on free list */
27 IO_WORKER_F_BOUND
= 8, /* is doing bounded work */
31 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
35 IO_ACCT_STALLED_BIT
= 0, /* stalled on hash */
39 * One for each thread in a wqe pool
44 struct hlist_nulls_node nulls_node
;
45 struct list_head all_list
;
46 struct task_struct
*task
;
49 struct io_wq_work
*cur_work
;
52 struct completion ref_done
;
54 unsigned long create_state
;
55 struct callback_head create_work
;
60 struct work_struct work
;
64 #if BITS_PER_LONG == 64
65 #define IO_WQ_HASH_ORDER 6
67 #define IO_WQ_HASH_ORDER 5
70 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
77 struct io_wq_work_list work_list
;
88 * Per-node worker thread pool
92 struct io_wqe_acct acct
[2];
96 struct hlist_nulls_head free_list
;
97 struct list_head all_list
;
99 struct wait_queue_entry wait
;
102 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
104 cpumask_var_t cpu_mask
;
113 free_work_fn
*free_work
;
114 io_wq_work_fn
*do_work
;
116 struct io_wq_hash
*hash
;
118 atomic_t worker_refs
;
119 struct completion worker_done
;
121 struct hlist_node cpuhp_node
;
123 struct task_struct
*task
;
125 struct io_wqe
*wqes
[];
128 static enum cpuhp_state io_wq_online
;
130 struct io_cb_cancel_data
{
138 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
);
139 static void io_wqe_dec_running(struct io_worker
*worker
);
140 static bool io_acct_cancel_pending_work(struct io_wqe
*wqe
,
141 struct io_wqe_acct
*acct
,
142 struct io_cb_cancel_data
*match
);
143 static void create_worker_cb(struct callback_head
*cb
);
145 static bool io_worker_get(struct io_worker
*worker
)
147 return refcount_inc_not_zero(&worker
->ref
);
150 static void io_worker_release(struct io_worker
*worker
)
152 if (refcount_dec_and_test(&worker
->ref
))
153 complete(&worker
->ref_done
);
156 static inline struct io_wqe_acct
*io_get_acct(struct io_wqe
*wqe
, bool bound
)
158 return &wqe
->acct
[bound
? IO_WQ_ACCT_BOUND
: IO_WQ_ACCT_UNBOUND
];
161 static inline struct io_wqe_acct
*io_work_get_acct(struct io_wqe
*wqe
,
162 struct io_wq_work
*work
)
164 return io_get_acct(wqe
, !(work
->flags
& IO_WQ_WORK_UNBOUND
));
167 static inline struct io_wqe_acct
*io_wqe_get_acct(struct io_worker
*worker
)
169 return io_get_acct(worker
->wqe
, worker
->flags
& IO_WORKER_F_BOUND
);
172 static void io_worker_ref_put(struct io_wq
*wq
)
174 if (atomic_dec_and_test(&wq
->worker_refs
))
175 complete(&wq
->worker_done
);
178 static void io_worker_cancel_cb(struct io_worker
*worker
)
180 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
181 struct io_wqe
*wqe
= worker
->wqe
;
182 struct io_wq
*wq
= wqe
->wq
;
184 atomic_dec(&acct
->nr_running
);
185 raw_spin_lock(&worker
->wqe
->lock
);
187 raw_spin_unlock(&worker
->wqe
->lock
);
188 io_worker_ref_put(wq
);
189 clear_bit_unlock(0, &worker
->create_state
);
190 io_worker_release(worker
);
193 static bool io_task_worker_match(struct callback_head
*cb
, void *data
)
195 struct io_worker
*worker
;
197 if (cb
->func
!= create_worker_cb
)
199 worker
= container_of(cb
, struct io_worker
, create_work
);
200 return worker
== data
;
203 static void io_worker_exit(struct io_worker
*worker
)
205 struct io_wqe
*wqe
= worker
->wqe
;
206 struct io_wq
*wq
= wqe
->wq
;
209 struct callback_head
*cb
= task_work_cancel_match(wq
->task
,
210 io_task_worker_match
, worker
);
214 io_worker_cancel_cb(worker
);
217 if (refcount_dec_and_test(&worker
->ref
))
218 complete(&worker
->ref_done
);
219 wait_for_completion(&worker
->ref_done
);
221 raw_spin_lock(&wqe
->lock
);
222 if (worker
->flags
& IO_WORKER_F_FREE
)
223 hlist_nulls_del_rcu(&worker
->nulls_node
);
224 list_del_rcu(&worker
->all_list
);
226 io_wqe_dec_running(worker
);
228 current
->flags
&= ~PF_IO_WORKER
;
230 raw_spin_unlock(&wqe
->lock
);
232 kfree_rcu(worker
, rcu
);
233 io_worker_ref_put(wqe
->wq
);
237 static inline bool io_acct_run_queue(struct io_wqe_acct
*acct
)
239 if (!wq_list_empty(&acct
->work_list
) &&
240 !test_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
246 * Check head of free list for an available worker. If one isn't available,
247 * caller must create one.
249 static bool io_wqe_activate_free_worker(struct io_wqe
*wqe
,
250 struct io_wqe_acct
*acct
)
253 struct hlist_nulls_node
*n
;
254 struct io_worker
*worker
;
257 * Iterate free_list and see if we can find an idle worker to
258 * activate. If a given worker is on the free_list but in the process
259 * of exiting, keep trying.
261 hlist_nulls_for_each_entry_rcu(worker
, n
, &wqe
->free_list
, nulls_node
) {
262 if (!io_worker_get(worker
))
264 if (io_wqe_get_acct(worker
) != acct
) {
265 io_worker_release(worker
);
268 if (wake_up_process(worker
->task
)) {
269 io_worker_release(worker
);
272 io_worker_release(worker
);
279 * We need a worker. If we find a free one, we're good. If not, and we're
280 * below the max number of workers, create one.
282 static bool io_wqe_create_worker(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
)
285 * Most likely an attempt to queue unbounded work on an io_wq that
286 * wasn't setup with any unbounded workers.
288 if (unlikely(!acct
->max_workers
))
289 pr_warn_once("io-wq is not configured for unbound workers");
291 raw_spin_lock(&wqe
->lock
);
292 if (acct
->nr_workers
>= acct
->max_workers
) {
293 raw_spin_unlock(&wqe
->lock
);
297 raw_spin_unlock(&wqe
->lock
);
298 atomic_inc(&acct
->nr_running
);
299 atomic_inc(&wqe
->wq
->worker_refs
);
300 return create_io_worker(wqe
->wq
, wqe
, acct
->index
);
303 static void io_wqe_inc_running(struct io_worker
*worker
)
305 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
307 atomic_inc(&acct
->nr_running
);
310 static void create_worker_cb(struct callback_head
*cb
)
312 struct io_worker
*worker
;
315 struct io_wqe_acct
*acct
;
316 bool do_create
= false;
318 worker
= container_of(cb
, struct io_worker
, create_work
);
321 acct
= &wqe
->acct
[worker
->create_index
];
322 raw_spin_lock(&wqe
->lock
);
323 if (acct
->nr_workers
< acct
->max_workers
) {
327 raw_spin_unlock(&wqe
->lock
);
329 create_io_worker(wq
, wqe
, worker
->create_index
);
331 atomic_dec(&acct
->nr_running
);
332 io_worker_ref_put(wq
);
334 clear_bit_unlock(0, &worker
->create_state
);
335 io_worker_release(worker
);
338 static bool io_queue_worker_create(struct io_worker
*worker
,
339 struct io_wqe_acct
*acct
,
340 task_work_func_t func
)
342 struct io_wqe
*wqe
= worker
->wqe
;
343 struct io_wq
*wq
= wqe
->wq
;
345 /* raced with exit, just ignore create call */
346 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
348 if (!io_worker_get(worker
))
351 * create_state manages ownership of create_work/index. We should
352 * only need one entry per worker, as the worker going to sleep
353 * will trigger the condition, and waking will clear it once it
354 * runs the task_work.
356 if (test_bit(0, &worker
->create_state
) ||
357 test_and_set_bit_lock(0, &worker
->create_state
))
360 init_task_work(&worker
->create_work
, func
);
361 worker
->create_index
= acct
->index
;
362 if (!task_work_add(wq
->task
, &worker
->create_work
, TWA_SIGNAL
))
364 clear_bit_unlock(0, &worker
->create_state
);
366 io_worker_release(worker
);
368 atomic_dec(&acct
->nr_running
);
369 io_worker_ref_put(wq
);
373 static void io_wqe_dec_running(struct io_worker
*worker
)
374 __must_hold(wqe
->lock
)
376 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
377 struct io_wqe
*wqe
= worker
->wqe
;
379 if (!(worker
->flags
& IO_WORKER_F_UP
))
382 if (atomic_dec_and_test(&acct
->nr_running
) && io_acct_run_queue(acct
)) {
383 atomic_inc(&acct
->nr_running
);
384 atomic_inc(&wqe
->wq
->worker_refs
);
385 io_queue_worker_create(worker
, acct
, create_worker_cb
);
390 * Worker will start processing some work. Move it to the busy list, if
391 * it's currently on the freelist
393 static void __io_worker_busy(struct io_wqe
*wqe
, struct io_worker
*worker
,
394 struct io_wq_work
*work
)
395 __must_hold(wqe
->lock
)
397 if (worker
->flags
& IO_WORKER_F_FREE
) {
398 worker
->flags
&= ~IO_WORKER_F_FREE
;
399 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
404 * No work, worker going to sleep. Move to freelist, and unuse mm if we
405 * have one attached. Dropping the mm may potentially sleep, so we drop
406 * the lock in that case and return success. Since the caller has to
407 * retry the loop in that case (we changed task state), we don't regrab
408 * the lock if we return success.
410 static void __io_worker_idle(struct io_wqe
*wqe
, struct io_worker
*worker
)
411 __must_hold(wqe
->lock
)
413 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
414 worker
->flags
|= IO_WORKER_F_FREE
;
415 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
419 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
421 return work
->flags
>> IO_WQ_HASH_SHIFT
;
424 static void io_wait_on_hash(struct io_wqe
*wqe
, unsigned int hash
)
426 struct io_wq
*wq
= wqe
->wq
;
428 spin_lock_irq(&wq
->hash
->wait
.lock
);
429 if (list_empty(&wqe
->wait
.entry
)) {
430 __add_wait_queue(&wq
->hash
->wait
, &wqe
->wait
);
431 if (!test_bit(hash
, &wq
->hash
->map
)) {
432 __set_current_state(TASK_RUNNING
);
433 list_del_init(&wqe
->wait
.entry
);
436 spin_unlock_irq(&wq
->hash
->wait
.lock
);
439 static struct io_wq_work
*io_get_next_work(struct io_wqe_acct
*acct
,
440 struct io_worker
*worker
)
441 __must_hold(wqe
->lock
)
443 struct io_wq_work_node
*node
, *prev
;
444 struct io_wq_work
*work
, *tail
;
445 unsigned int stall_hash
= -1U;
446 struct io_wqe
*wqe
= worker
->wqe
;
448 wq_list_for_each(node
, prev
, &acct
->work_list
) {
451 work
= container_of(node
, struct io_wq_work
, list
);
453 /* not hashed, can run anytime */
454 if (!io_wq_is_hashed(work
)) {
455 wq_list_del(&acct
->work_list
, node
, prev
);
459 hash
= io_get_work_hash(work
);
460 /* all items with this hash lie in [work, tail] */
461 tail
= wqe
->hash_tail
[hash
];
463 /* hashed, can run if not already running */
464 if (!test_and_set_bit(hash
, &wqe
->wq
->hash
->map
)) {
465 wqe
->hash_tail
[hash
] = NULL
;
466 wq_list_cut(&acct
->work_list
, &tail
->list
, prev
);
469 if (stall_hash
== -1U)
471 /* fast forward to a next hash, for-each will fix up @prev */
475 if (stall_hash
!= -1U) {
477 * Set this before dropping the lock to avoid racing with new
478 * work being added and clearing the stalled bit.
480 set_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
481 raw_spin_unlock(&wqe
->lock
);
482 io_wait_on_hash(wqe
, stall_hash
);
483 raw_spin_lock(&wqe
->lock
);
489 static bool io_flush_signals(void)
491 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL
))) {
492 __set_current_state(TASK_RUNNING
);
493 tracehook_notify_signal();
499 static void io_assign_current_work(struct io_worker
*worker
,
500 struct io_wq_work
*work
)
507 spin_lock(&worker
->lock
);
508 worker
->cur_work
= work
;
509 spin_unlock(&worker
->lock
);
512 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
);
514 static void io_worker_handle_work(struct io_worker
*worker
)
515 __releases(wqe
->lock
)
517 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
518 struct io_wqe
*wqe
= worker
->wqe
;
519 struct io_wq
*wq
= wqe
->wq
;
520 bool do_kill
= test_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
523 struct io_wq_work
*work
;
526 * If we got some work, mark us as busy. If we didn't, but
527 * the list isn't empty, it means we stalled on hashed work.
528 * Mark us stalled so we don't keep looking for work when we
529 * can't make progress, any work completion or insertion will
530 * clear the stalled flag.
532 work
= io_get_next_work(acct
, worker
);
534 __io_worker_busy(wqe
, worker
, work
);
536 raw_spin_unlock(&wqe
->lock
);
539 io_assign_current_work(worker
, work
);
540 __set_current_state(TASK_RUNNING
);
542 /* handle a whole dependent link */
544 struct io_wq_work
*next_hashed
, *linked
;
545 unsigned int hash
= io_get_work_hash(work
);
547 next_hashed
= wq_next_work(work
);
549 if (unlikely(do_kill
) && (work
->flags
& IO_WQ_WORK_UNBOUND
))
550 work
->flags
|= IO_WQ_WORK_CANCEL
;
552 io_assign_current_work(worker
, NULL
);
554 linked
= wq
->free_work(work
);
556 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
560 io_assign_current_work(worker
, work
);
562 io_wqe_enqueue(wqe
, linked
);
564 if (hash
!= -1U && !next_hashed
) {
565 clear_bit(hash
, &wq
->hash
->map
);
566 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
567 if (wq_has_sleeper(&wq
->hash
->wait
))
568 wake_up(&wq
->hash
->wait
);
569 raw_spin_lock(&wqe
->lock
);
570 /* skip unnecessary unlock-lock wqe->lock */
573 raw_spin_unlock(&wqe
->lock
);
577 raw_spin_lock(&wqe
->lock
);
581 static int io_wqe_worker(void *data
)
583 struct io_worker
*worker
= data
;
584 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
585 struct io_wqe
*wqe
= worker
->wqe
;
586 struct io_wq
*wq
= wqe
->wq
;
587 bool last_timeout
= false;
588 char buf
[TASK_COMM_LEN
];
590 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
592 snprintf(buf
, sizeof(buf
), "iou-wrk-%d", wq
->task
->pid
);
593 set_task_comm(current
, buf
);
595 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
598 set_current_state(TASK_INTERRUPTIBLE
);
600 raw_spin_lock(&wqe
->lock
);
601 if (io_acct_run_queue(acct
)) {
602 io_worker_handle_work(worker
);
605 /* timed out, exit unless we're the last worker */
606 if (last_timeout
&& acct
->nr_workers
> 1) {
608 raw_spin_unlock(&wqe
->lock
);
609 __set_current_state(TASK_RUNNING
);
612 last_timeout
= false;
613 __io_worker_idle(wqe
, worker
);
614 raw_spin_unlock(&wqe
->lock
);
615 if (io_flush_signals())
617 ret
= schedule_timeout(WORKER_IDLE_TIMEOUT
);
618 if (signal_pending(current
)) {
621 if (!get_signal(&ksig
))
628 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
629 raw_spin_lock(&wqe
->lock
);
630 io_worker_handle_work(worker
);
633 io_worker_exit(worker
);
638 * Called when a worker is scheduled in. Mark us as currently running.
640 void io_wq_worker_running(struct task_struct
*tsk
)
642 struct io_worker
*worker
= tsk
->pf_io_worker
;
646 if (!(worker
->flags
& IO_WORKER_F_UP
))
648 if (worker
->flags
& IO_WORKER_F_RUNNING
)
650 worker
->flags
|= IO_WORKER_F_RUNNING
;
651 io_wqe_inc_running(worker
);
655 * Called when worker is going to sleep. If there are no workers currently
656 * running and we have work pending, wake up a free one or create a new one.
658 void io_wq_worker_sleeping(struct task_struct
*tsk
)
660 struct io_worker
*worker
= tsk
->pf_io_worker
;
664 if (!(worker
->flags
& IO_WORKER_F_UP
))
666 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
669 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
671 raw_spin_lock(&worker
->wqe
->lock
);
672 io_wqe_dec_running(worker
);
673 raw_spin_unlock(&worker
->wqe
->lock
);
676 static void io_init_new_worker(struct io_wqe
*wqe
, struct io_worker
*worker
,
677 struct task_struct
*tsk
)
679 tsk
->pf_io_worker
= worker
;
681 set_cpus_allowed_ptr(tsk
, wqe
->cpu_mask
);
682 tsk
->flags
|= PF_NO_SETAFFINITY
;
684 raw_spin_lock(&wqe
->lock
);
685 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
686 list_add_tail_rcu(&worker
->all_list
, &wqe
->all_list
);
687 worker
->flags
|= IO_WORKER_F_FREE
;
688 raw_spin_unlock(&wqe
->lock
);
689 wake_up_new_task(tsk
);
692 static bool io_wq_work_match_all(struct io_wq_work
*work
, void *data
)
697 static inline bool io_should_retry_thread(long err
)
702 case -ERESTARTNOINTR
:
703 case -ERESTARTNOHAND
:
710 static void create_worker_cont(struct callback_head
*cb
)
712 struct io_worker
*worker
;
713 struct task_struct
*tsk
;
716 worker
= container_of(cb
, struct io_worker
, create_work
);
717 clear_bit_unlock(0, &worker
->create_state
);
719 tsk
= create_io_thread(io_wqe_worker
, worker
, wqe
->node
);
721 io_init_new_worker(wqe
, worker
, tsk
);
722 io_worker_release(worker
);
724 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
725 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
727 atomic_dec(&acct
->nr_running
);
728 raw_spin_lock(&wqe
->lock
);
730 if (!acct
->nr_workers
) {
731 struct io_cb_cancel_data match
= {
732 .fn
= io_wq_work_match_all
,
736 while (io_acct_cancel_pending_work(wqe
, acct
, &match
))
737 raw_spin_lock(&wqe
->lock
);
739 raw_spin_unlock(&wqe
->lock
);
740 io_worker_ref_put(wqe
->wq
);
745 /* re-create attempts grab a new worker ref, drop the existing one */
746 io_worker_release(worker
);
747 schedule_work(&worker
->work
);
750 static void io_workqueue_create(struct work_struct
*work
)
752 struct io_worker
*worker
= container_of(work
, struct io_worker
, work
);
753 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
755 if (!io_queue_worker_create(worker
, acct
, create_worker_cont
)) {
756 clear_bit_unlock(0, &worker
->create_state
);
757 io_worker_release(worker
);
762 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
)
764 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
765 struct io_worker
*worker
;
766 struct task_struct
*tsk
;
768 __set_current_state(TASK_RUNNING
);
770 worker
= kzalloc_node(sizeof(*worker
), GFP_KERNEL
, wqe
->node
);
773 atomic_dec(&acct
->nr_running
);
774 raw_spin_lock(&wqe
->lock
);
776 raw_spin_unlock(&wqe
->lock
);
777 io_worker_ref_put(wq
);
781 refcount_set(&worker
->ref
, 1);
783 spin_lock_init(&worker
->lock
);
784 init_completion(&worker
->ref_done
);
786 if (index
== IO_WQ_ACCT_BOUND
)
787 worker
->flags
|= IO_WORKER_F_BOUND
;
789 tsk
= create_io_thread(io_wqe_worker
, worker
, wqe
->node
);
791 io_init_new_worker(wqe
, worker
, tsk
);
792 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
796 INIT_WORK(&worker
->work
, io_workqueue_create
);
797 schedule_work(&worker
->work
);
804 * Iterate the passed in list and call the specific function for each
805 * worker that isn't exiting
807 static bool io_wq_for_each_worker(struct io_wqe
*wqe
,
808 bool (*func
)(struct io_worker
*, void *),
811 struct io_worker
*worker
;
814 list_for_each_entry_rcu(worker
, &wqe
->all_list
, all_list
) {
815 if (io_worker_get(worker
)) {
816 /* no task if node is/was offline */
818 ret
= func(worker
, data
);
819 io_worker_release(worker
);
828 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
830 set_notify_signal(worker
->task
);
831 wake_up_process(worker
->task
);
835 static void io_run_cancel(struct io_wq_work
*work
, struct io_wqe
*wqe
)
837 struct io_wq
*wq
= wqe
->wq
;
840 work
->flags
|= IO_WQ_WORK_CANCEL
;
842 work
= wq
->free_work(work
);
846 static void io_wqe_insert_work(struct io_wqe
*wqe
, struct io_wq_work
*work
)
848 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
850 struct io_wq_work
*tail
;
852 if (!io_wq_is_hashed(work
)) {
854 wq_list_add_tail(&work
->list
, &acct
->work_list
);
858 hash
= io_get_work_hash(work
);
859 tail
= wqe
->hash_tail
[hash
];
860 wqe
->hash_tail
[hash
] = work
;
864 wq_list_add_after(&work
->list
, &tail
->list
, &acct
->work_list
);
867 static bool io_wq_work_match_item(struct io_wq_work
*work
, void *data
)
872 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
)
874 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
875 unsigned work_flags
= work
->flags
;
879 * If io-wq is exiting for this task, or if the request has explicitly
880 * been marked as one that should not get executed, cancel it here.
882 if (test_bit(IO_WQ_BIT_EXIT
, &wqe
->wq
->state
) ||
883 (work
->flags
& IO_WQ_WORK_CANCEL
)) {
884 io_run_cancel(work
, wqe
);
888 raw_spin_lock(&wqe
->lock
);
889 io_wqe_insert_work(wqe
, work
);
890 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
893 do_create
= !io_wqe_activate_free_worker(wqe
, acct
);
896 raw_spin_unlock(&wqe
->lock
);
898 if (do_create
&& ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
899 !atomic_read(&acct
->nr_running
))) {
902 did_create
= io_wqe_create_worker(wqe
, acct
);
903 if (likely(did_create
))
906 raw_spin_lock(&wqe
->lock
);
907 /* fatal condition, failed to create the first worker */
908 if (!acct
->nr_workers
) {
909 struct io_cb_cancel_data match
= {
910 .fn
= io_wq_work_match_item
,
915 if (io_acct_cancel_pending_work(wqe
, acct
, &match
))
916 raw_spin_lock(&wqe
->lock
);
918 raw_spin_unlock(&wqe
->lock
);
922 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
924 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
926 io_wqe_enqueue(wqe
, work
);
930 * Work items that hash to the same value will not be done in parallel.
931 * Used to limit concurrent writes, generally hashed by inode.
933 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
937 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
938 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
941 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
943 struct io_cb_cancel_data
*match
= data
;
946 * Hold the lock to avoid ->cur_work going out of scope, caller
947 * may dereference the passed in work.
949 spin_lock(&worker
->lock
);
950 if (worker
->cur_work
&&
951 match
->fn(worker
->cur_work
, match
->data
)) {
952 set_notify_signal(worker
->task
);
955 spin_unlock(&worker
->lock
);
957 return match
->nr_running
&& !match
->cancel_all
;
960 static inline void io_wqe_remove_pending(struct io_wqe
*wqe
,
961 struct io_wq_work
*work
,
962 struct io_wq_work_node
*prev
)
964 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
965 unsigned int hash
= io_get_work_hash(work
);
966 struct io_wq_work
*prev_work
= NULL
;
968 if (io_wq_is_hashed(work
) && work
== wqe
->hash_tail
[hash
]) {
970 prev_work
= container_of(prev
, struct io_wq_work
, list
);
971 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
972 wqe
->hash_tail
[hash
] = prev_work
;
974 wqe
->hash_tail
[hash
] = NULL
;
976 wq_list_del(&acct
->work_list
, &work
->list
, prev
);
979 static bool io_acct_cancel_pending_work(struct io_wqe
*wqe
,
980 struct io_wqe_acct
*acct
,
981 struct io_cb_cancel_data
*match
)
982 __releases(wqe
->lock
)
984 struct io_wq_work_node
*node
, *prev
;
985 struct io_wq_work
*work
;
987 wq_list_for_each(node
, prev
, &acct
->work_list
) {
988 work
= container_of(node
, struct io_wq_work
, list
);
989 if (!match
->fn(work
, match
->data
))
991 io_wqe_remove_pending(wqe
, work
, prev
);
992 raw_spin_unlock(&wqe
->lock
);
993 io_run_cancel(work
, wqe
);
995 /* not safe to continue after unlock */
1002 static void io_wqe_cancel_pending_work(struct io_wqe
*wqe
,
1003 struct io_cb_cancel_data
*match
)
1007 raw_spin_lock(&wqe
->lock
);
1008 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1009 struct io_wqe_acct
*acct
= io_get_acct(wqe
, i
== 0);
1011 if (io_acct_cancel_pending_work(wqe
, acct
, match
)) {
1012 if (match
->cancel_all
)
1017 raw_spin_unlock(&wqe
->lock
);
1020 static void io_wqe_cancel_running_work(struct io_wqe
*wqe
,
1021 struct io_cb_cancel_data
*match
)
1024 io_wq_for_each_worker(wqe
, io_wq_worker_cancel
, match
);
1028 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
1029 void *data
, bool cancel_all
)
1031 struct io_cb_cancel_data match
= {
1034 .cancel_all
= cancel_all
,
1039 * First check pending list, if we're lucky we can just remove it
1040 * from there. CANCEL_OK means that the work is returned as-new,
1041 * no completion will be posted for it.
1043 for_each_node(node
) {
1044 struct io_wqe
*wqe
= wq
->wqes
[node
];
1046 io_wqe_cancel_pending_work(wqe
, &match
);
1047 if (match
.nr_pending
&& !match
.cancel_all
)
1048 return IO_WQ_CANCEL_OK
;
1052 * Now check if a free (going busy) or busy worker has the work
1053 * currently running. If we find it there, we'll return CANCEL_RUNNING
1054 * as an indication that we attempt to signal cancellation. The
1055 * completion will run normally in this case.
1057 for_each_node(node
) {
1058 struct io_wqe
*wqe
= wq
->wqes
[node
];
1060 io_wqe_cancel_running_work(wqe
, &match
);
1061 if (match
.nr_running
&& !match
.cancel_all
)
1062 return IO_WQ_CANCEL_RUNNING
;
1065 if (match
.nr_running
)
1066 return IO_WQ_CANCEL_RUNNING
;
1067 if (match
.nr_pending
)
1068 return IO_WQ_CANCEL_OK
;
1069 return IO_WQ_CANCEL_NOTFOUND
;
1072 static int io_wqe_hash_wake(struct wait_queue_entry
*wait
, unsigned mode
,
1073 int sync
, void *key
)
1075 struct io_wqe
*wqe
= container_of(wait
, struct io_wqe
, wait
);
1078 list_del_init(&wait
->entry
);
1081 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1082 struct io_wqe_acct
*acct
= &wqe
->acct
[i
];
1084 if (test_and_clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
1085 io_wqe_activate_free_worker(wqe
, acct
);
1091 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1096 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1097 return ERR_PTR(-EINVAL
);
1098 if (WARN_ON_ONCE(!bounded
))
1099 return ERR_PTR(-EINVAL
);
1101 wq
= kzalloc(struct_size(wq
, wqes
, nr_node_ids
), GFP_KERNEL
);
1103 return ERR_PTR(-ENOMEM
);
1104 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1108 refcount_inc(&data
->hash
->refs
);
1109 wq
->hash
= data
->hash
;
1110 wq
->free_work
= data
->free_work
;
1111 wq
->do_work
= data
->do_work
;
1114 for_each_node(node
) {
1116 int alloc_node
= node
;
1118 if (!node_online(alloc_node
))
1119 alloc_node
= NUMA_NO_NODE
;
1120 wqe
= kzalloc_node(sizeof(struct io_wqe
), GFP_KERNEL
, alloc_node
);
1123 if (!alloc_cpumask_var(&wqe
->cpu_mask
, GFP_KERNEL
))
1125 cpumask_copy(wqe
->cpu_mask
, cpumask_of_node(node
));
1126 wq
->wqes
[node
] = wqe
;
1127 wqe
->node
= alloc_node
;
1128 wqe
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1129 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1130 task_rlimit(current
, RLIMIT_NPROC
);
1131 INIT_LIST_HEAD(&wqe
->wait
.entry
);
1132 wqe
->wait
.func
= io_wqe_hash_wake
;
1133 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1134 struct io_wqe_acct
*acct
= &wqe
->acct
[i
];
1137 atomic_set(&acct
->nr_running
, 0);
1138 INIT_WQ_LIST(&acct
->work_list
);
1141 raw_spin_lock_init(&wqe
->lock
);
1142 INIT_HLIST_NULLS_HEAD(&wqe
->free_list
, 0);
1143 INIT_LIST_HEAD(&wqe
->all_list
);
1146 wq
->task
= get_task_struct(data
->task
);
1147 atomic_set(&wq
->worker_refs
, 1);
1148 init_completion(&wq
->worker_done
);
1151 io_wq_put_hash(data
->hash
);
1152 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1153 for_each_node(node
) {
1154 if (!wq
->wqes
[node
])
1156 free_cpumask_var(wq
->wqes
[node
]->cpu_mask
);
1157 kfree(wq
->wqes
[node
]);
1161 return ERR_PTR(ret
);
1164 static bool io_task_work_match(struct callback_head
*cb
, void *data
)
1166 struct io_worker
*worker
;
1168 if (cb
->func
!= create_worker_cb
&& cb
->func
!= create_worker_cont
)
1170 worker
= container_of(cb
, struct io_worker
, create_work
);
1171 return worker
->wqe
->wq
== data
;
1174 void io_wq_exit_start(struct io_wq
*wq
)
1176 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1179 static void io_wq_exit_workers(struct io_wq
*wq
)
1181 struct callback_head
*cb
;
1187 while ((cb
= task_work_cancel_match(wq
->task
, io_task_work_match
, wq
)) != NULL
) {
1188 struct io_worker
*worker
;
1190 worker
= container_of(cb
, struct io_worker
, create_work
);
1191 io_worker_cancel_cb(worker
);
1195 for_each_node(node
) {
1196 struct io_wqe
*wqe
= wq
->wqes
[node
];
1198 io_wq_for_each_worker(wqe
, io_wq_worker_wake
, NULL
);
1201 io_worker_ref_put(wq
);
1202 wait_for_completion(&wq
->worker_done
);
1204 for_each_node(node
) {
1205 spin_lock_irq(&wq
->hash
->wait
.lock
);
1206 list_del_init(&wq
->wqes
[node
]->wait
.entry
);
1207 spin_unlock_irq(&wq
->hash
->wait
.lock
);
1209 put_task_struct(wq
->task
);
1213 static void io_wq_destroy(struct io_wq
*wq
)
1217 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1219 for_each_node(node
) {
1220 struct io_wqe
*wqe
= wq
->wqes
[node
];
1221 struct io_cb_cancel_data match
= {
1222 .fn
= io_wq_work_match_all
,
1225 io_wqe_cancel_pending_work(wqe
, &match
);
1226 free_cpumask_var(wqe
->cpu_mask
);
1229 io_wq_put_hash(wq
->hash
);
1233 void io_wq_put_and_exit(struct io_wq
*wq
)
1235 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
));
1237 io_wq_exit_workers(wq
);
1241 struct online_data
{
1246 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1248 struct online_data
*od
= data
;
1251 cpumask_set_cpu(od
->cpu
, worker
->wqe
->cpu_mask
);
1253 cpumask_clear_cpu(od
->cpu
, worker
->wqe
->cpu_mask
);
1257 static int __io_wq_cpu_online(struct io_wq
*wq
, unsigned int cpu
, bool online
)
1259 struct online_data od
= {
1267 io_wq_for_each_worker(wq
->wqes
[i
], io_wq_worker_affinity
, &od
);
1272 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1274 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1276 return __io_wq_cpu_online(wq
, cpu
, true);
1279 static int io_wq_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
1281 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1283 return __io_wq_cpu_online(wq
, cpu
, false);
1286 int io_wq_cpu_affinity(struct io_wq
*wq
, cpumask_var_t mask
)
1292 struct io_wqe
*wqe
= wq
->wqes
[i
];
1295 cpumask_copy(wqe
->cpu_mask
, mask
);
1297 cpumask_copy(wqe
->cpu_mask
, cpumask_of_node(i
));
1304 * Set max number of unbounded workers, returns old value. If new_count is 0,
1305 * then just return the old value.
1307 int io_wq_max_workers(struct io_wq
*wq
, int *new_count
)
1309 int i
, node
, prev
= 0;
1311 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND
!= (int) IO_WQ_BOUND
);
1312 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND
!= (int) IO_WQ_UNBOUND
);
1313 BUILD_BUG_ON((int) IO_WQ_ACCT_NR
!= 2);
1315 for (i
= 0; i
< 2; i
++) {
1316 if (new_count
[i
] > task_rlimit(current
, RLIMIT_NPROC
))
1317 new_count
[i
] = task_rlimit(current
, RLIMIT_NPROC
);
1321 for_each_node(node
) {
1322 struct io_wqe
*wqe
= wq
->wqes
[node
];
1323 struct io_wqe_acct
*acct
;
1325 raw_spin_lock(&wqe
->lock
);
1326 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1327 acct
= &wqe
->acct
[i
];
1328 prev
= max_t(int, acct
->max_workers
, prev
);
1330 acct
->max_workers
= new_count
[i
];
1331 new_count
[i
] = prev
;
1333 raw_spin_unlock(&wqe
->lock
);
1339 static __init
int io_wq_init(void)
1343 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1344 io_wq_cpu_online
, io_wq_cpu_offline
);
1350 subsys_initcall(io_wq_init
);