1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/tracehook.h>
17 #include <uapi/linux/io_uring.h>
21 #define WORKER_IDLE_TIMEOUT (5 * HZ)
24 IO_WORKER_F_UP
= 1, /* up and active */
25 IO_WORKER_F_RUNNING
= 2, /* account as running */
26 IO_WORKER_F_FREE
= 4, /* worker on free list */
27 IO_WORKER_F_BOUND
= 8, /* is doing bounded work */
31 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
35 IO_ACCT_STALLED_BIT
= 0, /* stalled on hash */
39 * One for each thread in a wqe pool
44 struct hlist_nulls_node nulls_node
;
45 struct list_head all_list
;
46 struct task_struct
*task
;
49 struct io_wq_work
*cur_work
;
52 struct completion ref_done
;
54 unsigned long create_state
;
55 struct callback_head create_work
;
60 struct work_struct work
;
64 #if BITS_PER_LONG == 64
65 #define IO_WQ_HASH_ORDER 6
67 #define IO_WQ_HASH_ORDER 5
70 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
77 struct io_wq_work_list work_list
;
88 * Per-node worker thread pool
92 struct io_wqe_acct acct
[2];
96 struct hlist_nulls_head free_list
;
97 struct list_head all_list
;
99 struct wait_queue_entry wait
;
102 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
104 cpumask_var_t cpu_mask
;
113 free_work_fn
*free_work
;
114 io_wq_work_fn
*do_work
;
116 struct io_wq_hash
*hash
;
118 atomic_t worker_refs
;
119 struct completion worker_done
;
121 struct hlist_node cpuhp_node
;
123 struct task_struct
*task
;
125 struct io_wqe
*wqes
[];
128 static enum cpuhp_state io_wq_online
;
130 struct io_cb_cancel_data
{
138 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
);
139 static void io_wqe_dec_running(struct io_worker
*worker
);
140 static bool io_acct_cancel_pending_work(struct io_wqe
*wqe
,
141 struct io_wqe_acct
*acct
,
142 struct io_cb_cancel_data
*match
);
144 static bool io_worker_get(struct io_worker
*worker
)
146 return refcount_inc_not_zero(&worker
->ref
);
149 static void io_worker_release(struct io_worker
*worker
)
151 if (refcount_dec_and_test(&worker
->ref
))
152 complete(&worker
->ref_done
);
155 static inline struct io_wqe_acct
*io_get_acct(struct io_wqe
*wqe
, bool bound
)
157 return &wqe
->acct
[bound
? IO_WQ_ACCT_BOUND
: IO_WQ_ACCT_UNBOUND
];
160 static inline struct io_wqe_acct
*io_work_get_acct(struct io_wqe
*wqe
,
161 struct io_wq_work
*work
)
163 return io_get_acct(wqe
, !(work
->flags
& IO_WQ_WORK_UNBOUND
));
166 static inline struct io_wqe_acct
*io_wqe_get_acct(struct io_worker
*worker
)
168 return io_get_acct(worker
->wqe
, worker
->flags
& IO_WORKER_F_BOUND
);
171 static void io_worker_ref_put(struct io_wq
*wq
)
173 if (atomic_dec_and_test(&wq
->worker_refs
))
174 complete(&wq
->worker_done
);
177 static void io_worker_exit(struct io_worker
*worker
)
179 struct io_wqe
*wqe
= worker
->wqe
;
181 if (refcount_dec_and_test(&worker
->ref
))
182 complete(&worker
->ref_done
);
183 wait_for_completion(&worker
->ref_done
);
185 raw_spin_lock(&wqe
->lock
);
186 if (worker
->flags
& IO_WORKER_F_FREE
)
187 hlist_nulls_del_rcu(&worker
->nulls_node
);
188 list_del_rcu(&worker
->all_list
);
190 io_wqe_dec_running(worker
);
192 current
->flags
&= ~PF_IO_WORKER
;
194 raw_spin_unlock(&wqe
->lock
);
196 kfree_rcu(worker
, rcu
);
197 io_worker_ref_put(wqe
->wq
);
201 static inline bool io_acct_run_queue(struct io_wqe_acct
*acct
)
203 if (!wq_list_empty(&acct
->work_list
) &&
204 !test_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
210 * Check head of free list for an available worker. If one isn't available,
211 * caller must create one.
213 static bool io_wqe_activate_free_worker(struct io_wqe
*wqe
,
214 struct io_wqe_acct
*acct
)
217 struct hlist_nulls_node
*n
;
218 struct io_worker
*worker
;
221 * Iterate free_list and see if we can find an idle worker to
222 * activate. If a given worker is on the free_list but in the process
223 * of exiting, keep trying.
225 hlist_nulls_for_each_entry_rcu(worker
, n
, &wqe
->free_list
, nulls_node
) {
226 if (!io_worker_get(worker
))
228 if (io_wqe_get_acct(worker
) != acct
) {
229 io_worker_release(worker
);
232 if (wake_up_process(worker
->task
)) {
233 io_worker_release(worker
);
236 io_worker_release(worker
);
243 * We need a worker. If we find a free one, we're good. If not, and we're
244 * below the max number of workers, create one.
246 static bool io_wqe_create_worker(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
)
249 * Most likely an attempt to queue unbounded work on an io_wq that
250 * wasn't setup with any unbounded workers.
252 if (unlikely(!acct
->max_workers
))
253 pr_warn_once("io-wq is not configured for unbound workers");
255 raw_spin_lock(&wqe
->lock
);
256 if (acct
->nr_workers
>= acct
->max_workers
) {
257 raw_spin_unlock(&wqe
->lock
);
261 raw_spin_unlock(&wqe
->lock
);
262 atomic_inc(&acct
->nr_running
);
263 atomic_inc(&wqe
->wq
->worker_refs
);
264 return create_io_worker(wqe
->wq
, wqe
, acct
->index
);
267 static void io_wqe_inc_running(struct io_worker
*worker
)
269 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
271 atomic_inc(&acct
->nr_running
);
274 static void create_worker_cb(struct callback_head
*cb
)
276 struct io_worker
*worker
;
279 struct io_wqe_acct
*acct
;
280 bool do_create
= false;
282 worker
= container_of(cb
, struct io_worker
, create_work
);
285 acct
= &wqe
->acct
[worker
->create_index
];
286 raw_spin_lock(&wqe
->lock
);
287 if (acct
->nr_workers
< acct
->max_workers
) {
291 raw_spin_unlock(&wqe
->lock
);
293 create_io_worker(wq
, wqe
, worker
->create_index
);
295 atomic_dec(&acct
->nr_running
);
296 io_worker_ref_put(wq
);
298 clear_bit_unlock(0, &worker
->create_state
);
299 io_worker_release(worker
);
302 static bool io_queue_worker_create(struct io_worker
*worker
,
303 struct io_wqe_acct
*acct
,
304 task_work_func_t func
)
306 struct io_wqe
*wqe
= worker
->wqe
;
307 struct io_wq
*wq
= wqe
->wq
;
309 /* raced with exit, just ignore create call */
310 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
))
312 if (!io_worker_get(worker
))
315 * create_state manages ownership of create_work/index. We should
316 * only need one entry per worker, as the worker going to sleep
317 * will trigger the condition, and waking will clear it once it
318 * runs the task_work.
320 if (test_bit(0, &worker
->create_state
) ||
321 test_and_set_bit_lock(0, &worker
->create_state
))
324 init_task_work(&worker
->create_work
, func
);
325 worker
->create_index
= acct
->index
;
326 if (!task_work_add(wq
->task
, &worker
->create_work
, TWA_SIGNAL
))
328 clear_bit_unlock(0, &worker
->create_state
);
330 io_worker_release(worker
);
332 atomic_dec(&acct
->nr_running
);
333 io_worker_ref_put(wq
);
337 static void io_wqe_dec_running(struct io_worker
*worker
)
338 __must_hold(wqe
->lock
)
340 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
341 struct io_wqe
*wqe
= worker
->wqe
;
343 if (!(worker
->flags
& IO_WORKER_F_UP
))
346 if (atomic_dec_and_test(&acct
->nr_running
) && io_acct_run_queue(acct
)) {
347 atomic_inc(&acct
->nr_running
);
348 atomic_inc(&wqe
->wq
->worker_refs
);
349 io_queue_worker_create(worker
, acct
, create_worker_cb
);
354 * Worker will start processing some work. Move it to the busy list, if
355 * it's currently on the freelist
357 static void __io_worker_busy(struct io_wqe
*wqe
, struct io_worker
*worker
,
358 struct io_wq_work
*work
)
359 __must_hold(wqe
->lock
)
361 if (worker
->flags
& IO_WORKER_F_FREE
) {
362 worker
->flags
&= ~IO_WORKER_F_FREE
;
363 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
368 * No work, worker going to sleep. Move to freelist, and unuse mm if we
369 * have one attached. Dropping the mm may potentially sleep, so we drop
370 * the lock in that case and return success. Since the caller has to
371 * retry the loop in that case (we changed task state), we don't regrab
372 * the lock if we return success.
374 static void __io_worker_idle(struct io_wqe
*wqe
, struct io_worker
*worker
)
375 __must_hold(wqe
->lock
)
377 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
378 worker
->flags
|= IO_WORKER_F_FREE
;
379 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
383 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
385 return work
->flags
>> IO_WQ_HASH_SHIFT
;
388 static void io_wait_on_hash(struct io_wqe
*wqe
, unsigned int hash
)
390 struct io_wq
*wq
= wqe
->wq
;
392 spin_lock_irq(&wq
->hash
->wait
.lock
);
393 if (list_empty(&wqe
->wait
.entry
)) {
394 __add_wait_queue(&wq
->hash
->wait
, &wqe
->wait
);
395 if (!test_bit(hash
, &wq
->hash
->map
)) {
396 __set_current_state(TASK_RUNNING
);
397 list_del_init(&wqe
->wait
.entry
);
400 spin_unlock_irq(&wq
->hash
->wait
.lock
);
403 static struct io_wq_work
*io_get_next_work(struct io_wqe_acct
*acct
,
404 struct io_worker
*worker
)
405 __must_hold(wqe
->lock
)
407 struct io_wq_work_node
*node
, *prev
;
408 struct io_wq_work
*work
, *tail
;
409 unsigned int stall_hash
= -1U;
410 struct io_wqe
*wqe
= worker
->wqe
;
412 wq_list_for_each(node
, prev
, &acct
->work_list
) {
415 work
= container_of(node
, struct io_wq_work
, list
);
417 /* not hashed, can run anytime */
418 if (!io_wq_is_hashed(work
)) {
419 wq_list_del(&acct
->work_list
, node
, prev
);
423 hash
= io_get_work_hash(work
);
424 /* all items with this hash lie in [work, tail] */
425 tail
= wqe
->hash_tail
[hash
];
427 /* hashed, can run if not already running */
428 if (!test_and_set_bit(hash
, &wqe
->wq
->hash
->map
)) {
429 wqe
->hash_tail
[hash
] = NULL
;
430 wq_list_cut(&acct
->work_list
, &tail
->list
, prev
);
433 if (stall_hash
== -1U)
435 /* fast forward to a next hash, for-each will fix up @prev */
439 if (stall_hash
!= -1U) {
441 * Set this before dropping the lock to avoid racing with new
442 * work being added and clearing the stalled bit.
444 set_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
445 raw_spin_unlock(&wqe
->lock
);
446 io_wait_on_hash(wqe
, stall_hash
);
447 raw_spin_lock(&wqe
->lock
);
453 static bool io_flush_signals(void)
455 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL
))) {
456 __set_current_state(TASK_RUNNING
);
457 tracehook_notify_signal();
463 static void io_assign_current_work(struct io_worker
*worker
,
464 struct io_wq_work
*work
)
471 spin_lock(&worker
->lock
);
472 worker
->cur_work
= work
;
473 spin_unlock(&worker
->lock
);
476 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
);
478 static void io_worker_handle_work(struct io_worker
*worker
)
479 __releases(wqe
->lock
)
481 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
482 struct io_wqe
*wqe
= worker
->wqe
;
483 struct io_wq
*wq
= wqe
->wq
;
484 bool do_kill
= test_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
487 struct io_wq_work
*work
;
490 * If we got some work, mark us as busy. If we didn't, but
491 * the list isn't empty, it means we stalled on hashed work.
492 * Mark us stalled so we don't keep looking for work when we
493 * can't make progress, any work completion or insertion will
494 * clear the stalled flag.
496 work
= io_get_next_work(acct
, worker
);
498 __io_worker_busy(wqe
, worker
, work
);
500 raw_spin_unlock(&wqe
->lock
);
503 io_assign_current_work(worker
, work
);
504 __set_current_state(TASK_RUNNING
);
506 /* handle a whole dependent link */
508 struct io_wq_work
*next_hashed
, *linked
;
509 unsigned int hash
= io_get_work_hash(work
);
511 next_hashed
= wq_next_work(work
);
513 if (unlikely(do_kill
) && (work
->flags
& IO_WQ_WORK_UNBOUND
))
514 work
->flags
|= IO_WQ_WORK_CANCEL
;
516 io_assign_current_work(worker
, NULL
);
518 linked
= wq
->free_work(work
);
520 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
524 io_assign_current_work(worker
, work
);
526 io_wqe_enqueue(wqe
, linked
);
528 if (hash
!= -1U && !next_hashed
) {
529 clear_bit(hash
, &wq
->hash
->map
);
530 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
531 if (wq_has_sleeper(&wq
->hash
->wait
))
532 wake_up(&wq
->hash
->wait
);
533 raw_spin_lock(&wqe
->lock
);
534 /* skip unnecessary unlock-lock wqe->lock */
537 raw_spin_unlock(&wqe
->lock
);
541 raw_spin_lock(&wqe
->lock
);
545 static int io_wqe_worker(void *data
)
547 struct io_worker
*worker
= data
;
548 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
549 struct io_wqe
*wqe
= worker
->wqe
;
550 struct io_wq
*wq
= wqe
->wq
;
551 bool last_timeout
= false;
552 char buf
[TASK_COMM_LEN
];
554 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
556 snprintf(buf
, sizeof(buf
), "iou-wrk-%d", wq
->task
->pid
);
557 set_task_comm(current
, buf
);
559 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
562 set_current_state(TASK_INTERRUPTIBLE
);
564 raw_spin_lock(&wqe
->lock
);
565 if (io_acct_run_queue(acct
)) {
566 io_worker_handle_work(worker
);
569 /* timed out, exit unless we're the last worker */
570 if (last_timeout
&& acct
->nr_workers
> 1) {
572 raw_spin_unlock(&wqe
->lock
);
573 __set_current_state(TASK_RUNNING
);
576 last_timeout
= false;
577 __io_worker_idle(wqe
, worker
);
578 raw_spin_unlock(&wqe
->lock
);
579 if (io_flush_signals())
581 ret
= schedule_timeout(WORKER_IDLE_TIMEOUT
);
582 if (signal_pending(current
)) {
585 if (!get_signal(&ksig
))
592 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
593 raw_spin_lock(&wqe
->lock
);
594 io_worker_handle_work(worker
);
597 io_worker_exit(worker
);
602 * Called when a worker is scheduled in. Mark us as currently running.
604 void io_wq_worker_running(struct task_struct
*tsk
)
606 struct io_worker
*worker
= tsk
->pf_io_worker
;
610 if (!(worker
->flags
& IO_WORKER_F_UP
))
612 if (worker
->flags
& IO_WORKER_F_RUNNING
)
614 worker
->flags
|= IO_WORKER_F_RUNNING
;
615 io_wqe_inc_running(worker
);
619 * Called when worker is going to sleep. If there are no workers currently
620 * running and we have work pending, wake up a free one or create a new one.
622 void io_wq_worker_sleeping(struct task_struct
*tsk
)
624 struct io_worker
*worker
= tsk
->pf_io_worker
;
628 if (!(worker
->flags
& IO_WORKER_F_UP
))
630 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
633 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
635 raw_spin_lock(&worker
->wqe
->lock
);
636 io_wqe_dec_running(worker
);
637 raw_spin_unlock(&worker
->wqe
->lock
);
640 static void io_init_new_worker(struct io_wqe
*wqe
, struct io_worker
*worker
,
641 struct task_struct
*tsk
)
643 tsk
->pf_io_worker
= worker
;
645 set_cpus_allowed_ptr(tsk
, wqe
->cpu_mask
);
646 tsk
->flags
|= PF_NO_SETAFFINITY
;
648 raw_spin_lock(&wqe
->lock
);
649 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
650 list_add_tail_rcu(&worker
->all_list
, &wqe
->all_list
);
651 worker
->flags
|= IO_WORKER_F_FREE
;
652 raw_spin_unlock(&wqe
->lock
);
653 wake_up_new_task(tsk
);
656 static bool io_wq_work_match_all(struct io_wq_work
*work
, void *data
)
661 static inline bool io_should_retry_thread(long err
)
666 case -ERESTARTNOINTR
:
667 case -ERESTARTNOHAND
:
674 static void create_worker_cont(struct callback_head
*cb
)
676 struct io_worker
*worker
;
677 struct task_struct
*tsk
;
680 worker
= container_of(cb
, struct io_worker
, create_work
);
681 clear_bit_unlock(0, &worker
->create_state
);
683 tsk
= create_io_thread(io_wqe_worker
, worker
, wqe
->node
);
685 io_init_new_worker(wqe
, worker
, tsk
);
686 io_worker_release(worker
);
688 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
689 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
691 atomic_dec(&acct
->nr_running
);
692 raw_spin_lock(&wqe
->lock
);
694 if (!acct
->nr_workers
) {
695 struct io_cb_cancel_data match
= {
696 .fn
= io_wq_work_match_all
,
700 while (io_acct_cancel_pending_work(wqe
, acct
, &match
))
701 raw_spin_lock(&wqe
->lock
);
703 raw_spin_unlock(&wqe
->lock
);
704 io_worker_ref_put(wqe
->wq
);
709 /* re-create attempts grab a new worker ref, drop the existing one */
710 io_worker_release(worker
);
711 schedule_work(&worker
->work
);
714 static void io_workqueue_create(struct work_struct
*work
)
716 struct io_worker
*worker
= container_of(work
, struct io_worker
, work
);
717 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
719 if (!io_queue_worker_create(worker
, acct
, create_worker_cont
)) {
720 clear_bit_unlock(0, &worker
->create_state
);
721 io_worker_release(worker
);
726 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
)
728 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
729 struct io_worker
*worker
;
730 struct task_struct
*tsk
;
732 __set_current_state(TASK_RUNNING
);
734 worker
= kzalloc_node(sizeof(*worker
), GFP_KERNEL
, wqe
->node
);
737 atomic_dec(&acct
->nr_running
);
738 raw_spin_lock(&wqe
->lock
);
740 raw_spin_unlock(&wqe
->lock
);
741 io_worker_ref_put(wq
);
745 refcount_set(&worker
->ref
, 1);
747 spin_lock_init(&worker
->lock
);
748 init_completion(&worker
->ref_done
);
750 if (index
== IO_WQ_ACCT_BOUND
)
751 worker
->flags
|= IO_WORKER_F_BOUND
;
753 tsk
= create_io_thread(io_wqe_worker
, worker
, wqe
->node
);
755 io_init_new_worker(wqe
, worker
, tsk
);
756 } else if (!io_should_retry_thread(PTR_ERR(tsk
))) {
760 INIT_WORK(&worker
->work
, io_workqueue_create
);
761 schedule_work(&worker
->work
);
768 * Iterate the passed in list and call the specific function for each
769 * worker that isn't exiting
771 static bool io_wq_for_each_worker(struct io_wqe
*wqe
,
772 bool (*func
)(struct io_worker
*, void *),
775 struct io_worker
*worker
;
778 list_for_each_entry_rcu(worker
, &wqe
->all_list
, all_list
) {
779 if (io_worker_get(worker
)) {
780 /* no task if node is/was offline */
782 ret
= func(worker
, data
);
783 io_worker_release(worker
);
792 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
794 set_notify_signal(worker
->task
);
795 wake_up_process(worker
->task
);
799 static void io_run_cancel(struct io_wq_work
*work
, struct io_wqe
*wqe
)
801 struct io_wq
*wq
= wqe
->wq
;
804 work
->flags
|= IO_WQ_WORK_CANCEL
;
806 work
= wq
->free_work(work
);
810 static void io_wqe_insert_work(struct io_wqe
*wqe
, struct io_wq_work
*work
)
812 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
814 struct io_wq_work
*tail
;
816 if (!io_wq_is_hashed(work
)) {
818 wq_list_add_tail(&work
->list
, &acct
->work_list
);
822 hash
= io_get_work_hash(work
);
823 tail
= wqe
->hash_tail
[hash
];
824 wqe
->hash_tail
[hash
] = work
;
828 wq_list_add_after(&work
->list
, &tail
->list
, &acct
->work_list
);
831 static bool io_wq_work_match_item(struct io_wq_work
*work
, void *data
)
836 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
)
838 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
839 unsigned work_flags
= work
->flags
;
843 * If io-wq is exiting for this task, or if the request has explicitly
844 * been marked as one that should not get executed, cancel it here.
846 if (test_bit(IO_WQ_BIT_EXIT
, &wqe
->wq
->state
) ||
847 (work
->flags
& IO_WQ_WORK_CANCEL
)) {
848 io_run_cancel(work
, wqe
);
852 raw_spin_lock(&wqe
->lock
);
853 io_wqe_insert_work(wqe
, work
);
854 clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
);
857 do_create
= !io_wqe_activate_free_worker(wqe
, acct
);
860 raw_spin_unlock(&wqe
->lock
);
862 if (do_create
&& ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
863 !atomic_read(&acct
->nr_running
))) {
866 did_create
= io_wqe_create_worker(wqe
, acct
);
867 if (likely(did_create
))
870 raw_spin_lock(&wqe
->lock
);
871 /* fatal condition, failed to create the first worker */
872 if (!acct
->nr_workers
) {
873 struct io_cb_cancel_data match
= {
874 .fn
= io_wq_work_match_item
,
879 if (io_acct_cancel_pending_work(wqe
, acct
, &match
))
880 raw_spin_lock(&wqe
->lock
);
882 raw_spin_unlock(&wqe
->lock
);
886 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
888 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
890 io_wqe_enqueue(wqe
, work
);
894 * Work items that hash to the same value will not be done in parallel.
895 * Used to limit concurrent writes, generally hashed by inode.
897 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
901 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
902 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
905 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
907 struct io_cb_cancel_data
*match
= data
;
910 * Hold the lock to avoid ->cur_work going out of scope, caller
911 * may dereference the passed in work.
913 spin_lock(&worker
->lock
);
914 if (worker
->cur_work
&&
915 match
->fn(worker
->cur_work
, match
->data
)) {
916 set_notify_signal(worker
->task
);
919 spin_unlock(&worker
->lock
);
921 return match
->nr_running
&& !match
->cancel_all
;
924 static inline void io_wqe_remove_pending(struct io_wqe
*wqe
,
925 struct io_wq_work
*work
,
926 struct io_wq_work_node
*prev
)
928 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
929 unsigned int hash
= io_get_work_hash(work
);
930 struct io_wq_work
*prev_work
= NULL
;
932 if (io_wq_is_hashed(work
) && work
== wqe
->hash_tail
[hash
]) {
934 prev_work
= container_of(prev
, struct io_wq_work
, list
);
935 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
936 wqe
->hash_tail
[hash
] = prev_work
;
938 wqe
->hash_tail
[hash
] = NULL
;
940 wq_list_del(&acct
->work_list
, &work
->list
, prev
);
943 static bool io_acct_cancel_pending_work(struct io_wqe
*wqe
,
944 struct io_wqe_acct
*acct
,
945 struct io_cb_cancel_data
*match
)
946 __releases(wqe
->lock
)
948 struct io_wq_work_node
*node
, *prev
;
949 struct io_wq_work
*work
;
951 wq_list_for_each(node
, prev
, &acct
->work_list
) {
952 work
= container_of(node
, struct io_wq_work
, list
);
953 if (!match
->fn(work
, match
->data
))
955 io_wqe_remove_pending(wqe
, work
, prev
);
956 raw_spin_unlock(&wqe
->lock
);
957 io_run_cancel(work
, wqe
);
959 /* not safe to continue after unlock */
966 static void io_wqe_cancel_pending_work(struct io_wqe
*wqe
,
967 struct io_cb_cancel_data
*match
)
971 raw_spin_lock(&wqe
->lock
);
972 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
973 struct io_wqe_acct
*acct
= io_get_acct(wqe
, i
== 0);
975 if (io_acct_cancel_pending_work(wqe
, acct
, match
)) {
976 if (match
->cancel_all
)
981 raw_spin_unlock(&wqe
->lock
);
984 static void io_wqe_cancel_running_work(struct io_wqe
*wqe
,
985 struct io_cb_cancel_data
*match
)
988 io_wq_for_each_worker(wqe
, io_wq_worker_cancel
, match
);
992 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
993 void *data
, bool cancel_all
)
995 struct io_cb_cancel_data match
= {
998 .cancel_all
= cancel_all
,
1003 * First check pending list, if we're lucky we can just remove it
1004 * from there. CANCEL_OK means that the work is returned as-new,
1005 * no completion will be posted for it.
1007 for_each_node(node
) {
1008 struct io_wqe
*wqe
= wq
->wqes
[node
];
1010 io_wqe_cancel_pending_work(wqe
, &match
);
1011 if (match
.nr_pending
&& !match
.cancel_all
)
1012 return IO_WQ_CANCEL_OK
;
1016 * Now check if a free (going busy) or busy worker has the work
1017 * currently running. If we find it there, we'll return CANCEL_RUNNING
1018 * as an indication that we attempt to signal cancellation. The
1019 * completion will run normally in this case.
1021 for_each_node(node
) {
1022 struct io_wqe
*wqe
= wq
->wqes
[node
];
1024 io_wqe_cancel_running_work(wqe
, &match
);
1025 if (match
.nr_running
&& !match
.cancel_all
)
1026 return IO_WQ_CANCEL_RUNNING
;
1029 if (match
.nr_running
)
1030 return IO_WQ_CANCEL_RUNNING
;
1031 if (match
.nr_pending
)
1032 return IO_WQ_CANCEL_OK
;
1033 return IO_WQ_CANCEL_NOTFOUND
;
1036 static int io_wqe_hash_wake(struct wait_queue_entry
*wait
, unsigned mode
,
1037 int sync
, void *key
)
1039 struct io_wqe
*wqe
= container_of(wait
, struct io_wqe
, wait
);
1042 list_del_init(&wait
->entry
);
1045 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1046 struct io_wqe_acct
*acct
= &wqe
->acct
[i
];
1048 if (test_and_clear_bit(IO_ACCT_STALLED_BIT
, &acct
->flags
))
1049 io_wqe_activate_free_worker(wqe
, acct
);
1055 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1060 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1061 return ERR_PTR(-EINVAL
);
1062 if (WARN_ON_ONCE(!bounded
))
1063 return ERR_PTR(-EINVAL
);
1065 wq
= kzalloc(struct_size(wq
, wqes
, nr_node_ids
), GFP_KERNEL
);
1067 return ERR_PTR(-ENOMEM
);
1068 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1072 refcount_inc(&data
->hash
->refs
);
1073 wq
->hash
= data
->hash
;
1074 wq
->free_work
= data
->free_work
;
1075 wq
->do_work
= data
->do_work
;
1078 for_each_node(node
) {
1080 int alloc_node
= node
;
1082 if (!node_online(alloc_node
))
1083 alloc_node
= NUMA_NO_NODE
;
1084 wqe
= kzalloc_node(sizeof(struct io_wqe
), GFP_KERNEL
, alloc_node
);
1087 if (!alloc_cpumask_var(&wqe
->cpu_mask
, GFP_KERNEL
))
1089 cpumask_copy(wqe
->cpu_mask
, cpumask_of_node(node
));
1090 wq
->wqes
[node
] = wqe
;
1091 wqe
->node
= alloc_node
;
1092 wqe
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1093 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1094 task_rlimit(current
, RLIMIT_NPROC
);
1095 INIT_LIST_HEAD(&wqe
->wait
.entry
);
1096 wqe
->wait
.func
= io_wqe_hash_wake
;
1097 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1098 struct io_wqe_acct
*acct
= &wqe
->acct
[i
];
1101 atomic_set(&acct
->nr_running
, 0);
1102 INIT_WQ_LIST(&acct
->work_list
);
1105 raw_spin_lock_init(&wqe
->lock
);
1106 INIT_HLIST_NULLS_HEAD(&wqe
->free_list
, 0);
1107 INIT_LIST_HEAD(&wqe
->all_list
);
1110 wq
->task
= get_task_struct(data
->task
);
1111 atomic_set(&wq
->worker_refs
, 1);
1112 init_completion(&wq
->worker_done
);
1115 io_wq_put_hash(data
->hash
);
1116 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1117 for_each_node(node
) {
1118 if (!wq
->wqes
[node
])
1120 free_cpumask_var(wq
->wqes
[node
]->cpu_mask
);
1121 kfree(wq
->wqes
[node
]);
1125 return ERR_PTR(ret
);
1128 static bool io_task_work_match(struct callback_head
*cb
, void *data
)
1130 struct io_worker
*worker
;
1132 if (cb
->func
!= create_worker_cb
&& cb
->func
!= create_worker_cont
)
1134 worker
= container_of(cb
, struct io_worker
, create_work
);
1135 return worker
->wqe
->wq
== data
;
1138 void io_wq_exit_start(struct io_wq
*wq
)
1140 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1143 static void io_wq_exit_workers(struct io_wq
*wq
)
1145 struct callback_head
*cb
;
1151 while ((cb
= task_work_cancel_match(wq
->task
, io_task_work_match
, wq
)) != NULL
) {
1152 struct io_worker
*worker
;
1153 struct io_wqe_acct
*acct
;
1155 worker
= container_of(cb
, struct io_worker
, create_work
);
1156 acct
= io_wqe_get_acct(worker
);
1157 atomic_dec(&acct
->nr_running
);
1158 raw_spin_lock(&worker
->wqe
->lock
);
1160 raw_spin_unlock(&worker
->wqe
->lock
);
1161 io_worker_ref_put(wq
);
1162 clear_bit_unlock(0, &worker
->create_state
);
1163 io_worker_release(worker
);
1167 for_each_node(node
) {
1168 struct io_wqe
*wqe
= wq
->wqes
[node
];
1170 io_wq_for_each_worker(wqe
, io_wq_worker_wake
, NULL
);
1173 io_worker_ref_put(wq
);
1174 wait_for_completion(&wq
->worker_done
);
1176 for_each_node(node
) {
1177 spin_lock_irq(&wq
->hash
->wait
.lock
);
1178 list_del_init(&wq
->wqes
[node
]->wait
.entry
);
1179 spin_unlock_irq(&wq
->hash
->wait
.lock
);
1181 put_task_struct(wq
->task
);
1185 static void io_wq_destroy(struct io_wq
*wq
)
1189 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1191 for_each_node(node
) {
1192 struct io_wqe
*wqe
= wq
->wqes
[node
];
1193 struct io_cb_cancel_data match
= {
1194 .fn
= io_wq_work_match_all
,
1197 io_wqe_cancel_pending_work(wqe
, &match
);
1198 free_cpumask_var(wqe
->cpu_mask
);
1201 io_wq_put_hash(wq
->hash
);
1205 void io_wq_put_and_exit(struct io_wq
*wq
)
1207 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
));
1209 io_wq_exit_workers(wq
);
1213 struct online_data
{
1218 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1220 struct online_data
*od
= data
;
1223 cpumask_set_cpu(od
->cpu
, worker
->wqe
->cpu_mask
);
1225 cpumask_clear_cpu(od
->cpu
, worker
->wqe
->cpu_mask
);
1229 static int __io_wq_cpu_online(struct io_wq
*wq
, unsigned int cpu
, bool online
)
1231 struct online_data od
= {
1239 io_wq_for_each_worker(wq
->wqes
[i
], io_wq_worker_affinity
, &od
);
1244 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1246 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1248 return __io_wq_cpu_online(wq
, cpu
, true);
1251 static int io_wq_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
1253 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1255 return __io_wq_cpu_online(wq
, cpu
, false);
1258 int io_wq_cpu_affinity(struct io_wq
*wq
, cpumask_var_t mask
)
1264 struct io_wqe
*wqe
= wq
->wqes
[i
];
1267 cpumask_copy(wqe
->cpu_mask
, mask
);
1269 cpumask_copy(wqe
->cpu_mask
, cpumask_of_node(i
));
1276 * Set max number of unbounded workers, returns old value. If new_count is 0,
1277 * then just return the old value.
1279 int io_wq_max_workers(struct io_wq
*wq
, int *new_count
)
1281 int i
, node
, prev
= 0;
1283 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND
!= (int) IO_WQ_BOUND
);
1284 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND
!= (int) IO_WQ_UNBOUND
);
1285 BUILD_BUG_ON((int) IO_WQ_ACCT_NR
!= 2);
1287 for (i
= 0; i
< 2; i
++) {
1288 if (new_count
[i
] > task_rlimit(current
, RLIMIT_NPROC
))
1289 new_count
[i
] = task_rlimit(current
, RLIMIT_NPROC
);
1293 for_each_node(node
) {
1294 struct io_wqe
*wqe
= wq
->wqes
[node
];
1295 struct io_wqe_acct
*acct
;
1297 raw_spin_lock(&wqe
->lock
);
1298 for (i
= 0; i
< IO_WQ_ACCT_NR
; i
++) {
1299 acct
= &wqe
->acct
[i
];
1300 prev
= max_t(int, acct
->max_workers
, prev
);
1302 acct
->max_workers
= new_count
[i
];
1303 new_count
[i
] = prev
;
1305 raw_spin_unlock(&wqe
->lock
);
1311 static __init
int io_wq_init(void)
1315 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1316 io_wq_cpu_online
, io_wq_cpu_offline
);
1322 subsys_initcall(io_wq_init
);