1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
13 #include <linux/sched/mm.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/rculist_nulls.h>
17 #include <linux/cpu.h>
18 #include <linux/tracehook.h>
20 #include "../kernel/sched/sched.h"
23 #define WORKER_IDLE_TIMEOUT (5 * HZ)
26 IO_WORKER_F_UP
= 1, /* up and active */
27 IO_WORKER_F_RUNNING
= 2, /* account as running */
28 IO_WORKER_F_FREE
= 4, /* worker on free list */
29 IO_WORKER_F_FIXED
= 8, /* static idle worker */
30 IO_WORKER_F_BOUND
= 16, /* is doing bounded work */
34 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
38 IO_WQE_FLAG_STALLED
= 1, /* stalled on hash */
42 * One for each thread in a wqe pool
47 struct hlist_nulls_node nulls_node
;
48 struct list_head all_list
;
49 struct task_struct
*task
;
52 struct io_wq_work
*cur_work
;
55 struct completion ref_done
;
60 #if BITS_PER_LONG == 64
61 #define IO_WQ_HASH_ORDER 6
63 #define IO_WQ_HASH_ORDER 5
66 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
80 * Per-node worker thread pool
85 struct io_wq_work_list work_list
;
87 } ____cacheline_aligned_in_smp
;
90 struct io_wqe_acct acct
[2];
92 struct hlist_nulls_head free_list
;
93 struct list_head all_list
;
95 struct wait_queue_entry wait
;
98 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
105 struct io_wqe
**wqes
;
108 free_work_fn
*free_work
;
109 io_wq_work_fn
*do_work
;
111 struct task_struct
*manager
;
113 struct io_wq_hash
*hash
;
116 struct completion exited
;
118 atomic_t worker_refs
;
119 struct completion worker_done
;
121 struct hlist_node cpuhp_node
;
126 static enum cpuhp_state io_wq_online
;
128 struct io_cb_cancel_data
{
136 static void io_wqe_cancel_pending_work(struct io_wqe
*wqe
,
137 struct io_cb_cancel_data
*match
);
139 static bool io_worker_get(struct io_worker
*worker
)
141 return refcount_inc_not_zero(&worker
->ref
);
144 static void io_worker_release(struct io_worker
*worker
)
146 if (refcount_dec_and_test(&worker
->ref
))
147 complete(&worker
->ref_done
);
150 static inline struct io_wqe_acct
*io_work_get_acct(struct io_wqe
*wqe
,
151 struct io_wq_work
*work
)
153 if (work
->flags
& IO_WQ_WORK_UNBOUND
)
154 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
156 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
159 static inline struct io_wqe_acct
*io_wqe_get_acct(struct io_worker
*worker
)
161 struct io_wqe
*wqe
= worker
->wqe
;
163 if (worker
->flags
& IO_WORKER_F_BOUND
)
164 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
166 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
169 static void io_worker_exit(struct io_worker
*worker
)
171 struct io_wqe
*wqe
= worker
->wqe
;
172 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
175 if (refcount_dec_and_test(&worker
->ref
))
176 complete(&worker
->ref_done
);
177 wait_for_completion(&worker
->ref_done
);
180 current
->flags
&= ~PF_IO_WORKER
;
181 flags
= worker
->flags
;
183 if (flags
& IO_WORKER_F_RUNNING
)
184 atomic_dec(&acct
->nr_running
);
188 raw_spin_lock_irq(&wqe
->lock
);
189 if (flags
& IO_WORKER_F_FREE
)
190 hlist_nulls_del_rcu(&worker
->nulls_node
);
191 list_del_rcu(&worker
->all_list
);
193 raw_spin_unlock_irq(&wqe
->lock
);
195 kfree_rcu(worker
, rcu
);
196 if (atomic_dec_and_test(&wqe
->wq
->worker_refs
))
197 complete(&wqe
->wq
->worker_done
);
201 static inline bool io_wqe_run_queue(struct io_wqe
*wqe
)
202 __must_hold(wqe
->lock
)
204 if (!wq_list_empty(&wqe
->work_list
) &&
205 !(wqe
->flags
& IO_WQE_FLAG_STALLED
))
211 * Check head of free list for an available worker. If one isn't available,
212 * caller must wake up the wq manager to create one.
214 static bool io_wqe_activate_free_worker(struct io_wqe
*wqe
)
217 struct hlist_nulls_node
*n
;
218 struct io_worker
*worker
;
220 n
= rcu_dereference(hlist_nulls_first_rcu(&wqe
->free_list
));
224 worker
= hlist_nulls_entry(n
, struct io_worker
, nulls_node
);
225 if (io_worker_get(worker
)) {
226 wake_up_process(worker
->task
);
227 io_worker_release(worker
);
235 * We need a worker. If we find a free one, we're good. If not, and we're
236 * below the max number of workers, wake up the manager to create one.
238 static void io_wqe_wake_worker(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
)
243 * Most likely an attempt to queue unbounded work on an io_wq that
244 * wasn't setup with any unbounded workers.
246 WARN_ON_ONCE(!acct
->max_workers
);
249 ret
= io_wqe_activate_free_worker(wqe
);
252 if (!ret
&& acct
->nr_workers
< acct
->max_workers
)
253 wake_up_process(wqe
->wq
->manager
);
256 static void io_wqe_inc_running(struct io_worker
*worker
)
258 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
260 atomic_inc(&acct
->nr_running
);
263 static void io_wqe_dec_running(struct io_worker
*worker
)
264 __must_hold(wqe
->lock
)
266 struct io_wqe_acct
*acct
= io_wqe_get_acct(worker
);
267 struct io_wqe
*wqe
= worker
->wqe
;
269 if (atomic_dec_and_test(&acct
->nr_running
) && io_wqe_run_queue(wqe
))
270 io_wqe_wake_worker(wqe
, acct
);
274 * Worker will start processing some work. Move it to the busy list, if
275 * it's currently on the freelist
277 static void __io_worker_busy(struct io_wqe
*wqe
, struct io_worker
*worker
,
278 struct io_wq_work
*work
)
279 __must_hold(wqe
->lock
)
281 bool worker_bound
, work_bound
;
283 if (worker
->flags
& IO_WORKER_F_FREE
) {
284 worker
->flags
&= ~IO_WORKER_F_FREE
;
285 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
289 * If worker is moving from bound to unbound (or vice versa), then
290 * ensure we update the running accounting.
292 worker_bound
= (worker
->flags
& IO_WORKER_F_BOUND
) != 0;
293 work_bound
= (work
->flags
& IO_WQ_WORK_UNBOUND
) == 0;
294 if (worker_bound
!= work_bound
) {
295 io_wqe_dec_running(worker
);
297 worker
->flags
|= IO_WORKER_F_BOUND
;
298 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
--;
299 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
++;
301 worker
->flags
&= ~IO_WORKER_F_BOUND
;
302 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
++;
303 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
--;
305 io_wqe_inc_running(worker
);
310 * No work, worker going to sleep. Move to freelist, and unuse mm if we
311 * have one attached. Dropping the mm may potentially sleep, so we drop
312 * the lock in that case and return success. Since the caller has to
313 * retry the loop in that case (we changed task state), we don't regrab
314 * the lock if we return success.
316 static void __io_worker_idle(struct io_wqe
*wqe
, struct io_worker
*worker
)
317 __must_hold(wqe
->lock
)
319 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
320 worker
->flags
|= IO_WORKER_F_FREE
;
321 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
325 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
327 return work
->flags
>> IO_WQ_HASH_SHIFT
;
330 static void io_wait_on_hash(struct io_wqe
*wqe
, unsigned int hash
)
332 struct io_wq
*wq
= wqe
->wq
;
334 spin_lock(&wq
->hash
->wait
.lock
);
335 if (list_empty(&wqe
->wait
.entry
)) {
336 __add_wait_queue(&wq
->hash
->wait
, &wqe
->wait
);
337 if (!test_bit(hash
, &wq
->hash
->map
)) {
338 __set_current_state(TASK_RUNNING
);
339 list_del_init(&wqe
->wait
.entry
);
342 spin_unlock(&wq
->hash
->wait
.lock
);
345 static struct io_wq_work
*io_get_next_work(struct io_wqe
*wqe
)
346 __must_hold(wqe
->lock
)
348 struct io_wq_work_node
*node
, *prev
;
349 struct io_wq_work
*work
, *tail
;
350 unsigned int stall_hash
= -1U;
352 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
355 work
= container_of(node
, struct io_wq_work
, list
);
357 /* not hashed, can run anytime */
358 if (!io_wq_is_hashed(work
)) {
359 wq_list_del(&wqe
->work_list
, node
, prev
);
363 hash
= io_get_work_hash(work
);
364 /* all items with this hash lie in [work, tail] */
365 tail
= wqe
->hash_tail
[hash
];
367 /* hashed, can run if not already running */
368 if (!test_and_set_bit(hash
, &wqe
->wq
->hash
->map
)) {
369 wqe
->hash_tail
[hash
] = NULL
;
370 wq_list_cut(&wqe
->work_list
, &tail
->list
, prev
);
373 if (stall_hash
== -1U)
375 /* fast forward to a next hash, for-each will fix up @prev */
379 if (stall_hash
!= -1U) {
380 raw_spin_unlock(&wqe
->lock
);
381 io_wait_on_hash(wqe
, stall_hash
);
382 raw_spin_lock(&wqe
->lock
);
388 static bool io_flush_signals(void)
390 if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL
))) {
391 __set_current_state(TASK_RUNNING
);
392 tracehook_notify_signal();
398 static void io_assign_current_work(struct io_worker
*worker
,
399 struct io_wq_work
*work
)
406 spin_lock_irq(&worker
->lock
);
407 worker
->cur_work
= work
;
408 spin_unlock_irq(&worker
->lock
);
411 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
);
413 static void io_worker_handle_work(struct io_worker
*worker
)
414 __releases(wqe
->lock
)
416 struct io_wqe
*wqe
= worker
->wqe
;
417 struct io_wq
*wq
= wqe
->wq
;
418 bool do_kill
= test_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
421 struct io_wq_work
*work
;
424 * If we got some work, mark us as busy. If we didn't, but
425 * the list isn't empty, it means we stalled on hashed work.
426 * Mark us stalled so we don't keep looking for work when we
427 * can't make progress, any work completion or insertion will
428 * clear the stalled flag.
430 work
= io_get_next_work(wqe
);
432 __io_worker_busy(wqe
, worker
, work
);
433 else if (!wq_list_empty(&wqe
->work_list
))
434 wqe
->flags
|= IO_WQE_FLAG_STALLED
;
436 raw_spin_unlock_irq(&wqe
->lock
);
439 io_assign_current_work(worker
, work
);
440 __set_current_state(TASK_RUNNING
);
442 /* handle a whole dependent link */
444 struct io_wq_work
*next_hashed
, *linked
;
445 unsigned int hash
= io_get_work_hash(work
);
447 next_hashed
= wq_next_work(work
);
449 if (unlikely(do_kill
) && (work
->flags
& IO_WQ_WORK_UNBOUND
))
450 work
->flags
|= IO_WQ_WORK_CANCEL
;
452 io_assign_current_work(worker
, NULL
);
454 linked
= wq
->free_work(work
);
456 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
460 io_assign_current_work(worker
, work
);
462 io_wqe_enqueue(wqe
, linked
);
464 if (hash
!= -1U && !next_hashed
) {
465 clear_bit(hash
, &wq
->hash
->map
);
466 if (wq_has_sleeper(&wq
->hash
->wait
))
467 wake_up(&wq
->hash
->wait
);
468 raw_spin_lock_irq(&wqe
->lock
);
469 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
470 /* skip unnecessary unlock-lock wqe->lock */
473 raw_spin_unlock_irq(&wqe
->lock
);
477 raw_spin_lock_irq(&wqe
->lock
);
481 static int io_wqe_worker(void *data
)
483 struct io_worker
*worker
= data
;
484 struct io_wqe
*wqe
= worker
->wqe
;
485 struct io_wq
*wq
= wqe
->wq
;
486 char buf
[TASK_COMM_LEN
];
488 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
489 io_wqe_inc_running(worker
);
491 snprintf(buf
, sizeof(buf
), "iou-wrk-%d", wq
->task_pid
);
492 set_task_comm(current
, buf
);
494 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
497 set_current_state(TASK_INTERRUPTIBLE
);
499 raw_spin_lock_irq(&wqe
->lock
);
500 if (io_wqe_run_queue(wqe
)) {
501 io_worker_handle_work(worker
);
504 __io_worker_idle(wqe
, worker
);
505 raw_spin_unlock_irq(&wqe
->lock
);
506 if (io_flush_signals())
508 ret
= schedule_timeout(WORKER_IDLE_TIMEOUT
);
509 if (signal_pending(current
)) {
512 if (!get_signal(&ksig
))
518 /* timed out, exit unless we're the fixed worker */
519 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
) ||
520 !(worker
->flags
& IO_WORKER_F_FIXED
))
524 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
525 raw_spin_lock_irq(&wqe
->lock
);
526 if (!wq_list_empty(&wqe
->work_list
))
527 io_worker_handle_work(worker
);
529 raw_spin_unlock_irq(&wqe
->lock
);
532 io_worker_exit(worker
);
537 * Called when a worker is scheduled in. Mark us as currently running.
539 void io_wq_worker_running(struct task_struct
*tsk
)
541 struct io_worker
*worker
= tsk
->pf_io_worker
;
545 if (!(worker
->flags
& IO_WORKER_F_UP
))
547 if (worker
->flags
& IO_WORKER_F_RUNNING
)
549 worker
->flags
|= IO_WORKER_F_RUNNING
;
550 io_wqe_inc_running(worker
);
554 * Called when worker is going to sleep. If there are no workers currently
555 * running and we have work pending, wake up a free one or have the manager
558 void io_wq_worker_sleeping(struct task_struct
*tsk
)
560 struct io_worker
*worker
= tsk
->pf_io_worker
;
564 if (!(worker
->flags
& IO_WORKER_F_UP
))
566 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
569 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
571 raw_spin_lock_irq(&worker
->wqe
->lock
);
572 io_wqe_dec_running(worker
);
573 raw_spin_unlock_irq(&worker
->wqe
->lock
);
576 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
)
578 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
579 struct io_worker
*worker
;
580 struct task_struct
*tsk
;
582 __set_current_state(TASK_RUNNING
);
584 worker
= kzalloc_node(sizeof(*worker
), GFP_KERNEL
, wqe
->node
);
588 refcount_set(&worker
->ref
, 1);
589 worker
->nulls_node
.pprev
= NULL
;
591 spin_lock_init(&worker
->lock
);
592 init_completion(&worker
->ref_done
);
594 atomic_inc(&wq
->worker_refs
);
596 tsk
= create_io_thread(io_wqe_worker
, worker
, wqe
->node
);
598 if (atomic_dec_and_test(&wq
->worker_refs
))
599 complete(&wq
->worker_done
);
604 tsk
->pf_io_worker
= worker
;
606 set_cpus_allowed_ptr(tsk
, cpumask_of_node(wqe
->node
));
607 tsk
->flags
|= PF_NO_SETAFFINITY
;
609 raw_spin_lock_irq(&wqe
->lock
);
610 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
611 list_add_tail_rcu(&worker
->all_list
, &wqe
->all_list
);
612 worker
->flags
|= IO_WORKER_F_FREE
;
613 if (index
== IO_WQ_ACCT_BOUND
)
614 worker
->flags
|= IO_WORKER_F_BOUND
;
615 if (!acct
->nr_workers
&& (worker
->flags
& IO_WORKER_F_BOUND
))
616 worker
->flags
|= IO_WORKER_F_FIXED
;
618 raw_spin_unlock_irq(&wqe
->lock
);
619 wake_up_new_task(tsk
);
623 static inline bool io_wqe_need_worker(struct io_wqe
*wqe
, int index
)
624 __must_hold(wqe
->lock
)
626 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
628 if (acct
->nr_workers
&& test_bit(IO_WQ_BIT_EXIT
, &wqe
->wq
->state
))
630 /* if we have available workers or no work, no need */
631 if (!hlist_nulls_empty(&wqe
->free_list
) || !io_wqe_run_queue(wqe
))
633 return acct
->nr_workers
< acct
->max_workers
;
637 * Iterate the passed in list and call the specific function for each
638 * worker that isn't exiting
640 static bool io_wq_for_each_worker(struct io_wqe
*wqe
,
641 bool (*func
)(struct io_worker
*, void *),
644 struct io_worker
*worker
;
647 list_for_each_entry_rcu(worker
, &wqe
->all_list
, all_list
) {
648 if (io_worker_get(worker
)) {
649 /* no task if node is/was offline */
651 ret
= func(worker
, data
);
652 io_worker_release(worker
);
661 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
663 set_notify_signal(worker
->task
);
664 wake_up_process(worker
->task
);
668 static void io_wq_check_workers(struct io_wq
*wq
)
672 for_each_node(node
) {
673 struct io_wqe
*wqe
= wq
->wqes
[node
];
674 bool fork_worker
[2] = { false, false };
676 if (!node_online(node
))
679 raw_spin_lock_irq(&wqe
->lock
);
680 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_BOUND
))
681 fork_worker
[IO_WQ_ACCT_BOUND
] = true;
682 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_UNBOUND
))
683 fork_worker
[IO_WQ_ACCT_UNBOUND
] = true;
684 raw_spin_unlock_irq(&wqe
->lock
);
685 if (fork_worker
[IO_WQ_ACCT_BOUND
])
686 create_io_worker(wq
, wqe
, IO_WQ_ACCT_BOUND
);
687 if (fork_worker
[IO_WQ_ACCT_UNBOUND
])
688 create_io_worker(wq
, wqe
, IO_WQ_ACCT_UNBOUND
);
692 static bool io_wq_work_match_all(struct io_wq_work
*work
, void *data
)
697 static void io_wq_cancel_pending(struct io_wq
*wq
)
699 struct io_cb_cancel_data match
= {
700 .fn
= io_wq_work_match_all
,
706 io_wqe_cancel_pending_work(wq
->wqes
[node
], &match
);
710 * Manager thread. Tasked with creating new workers, if we need them.
712 static int io_wq_manager(void *data
)
714 struct io_wq
*wq
= data
;
715 char buf
[TASK_COMM_LEN
];
718 snprintf(buf
, sizeof(buf
), "iou-mgr-%d", wq
->task_pid
);
719 set_task_comm(current
, buf
);
722 set_current_state(TASK_INTERRUPTIBLE
);
723 io_wq_check_workers(wq
);
724 schedule_timeout(HZ
);
725 if (signal_pending(current
)) {
728 if (!get_signal(&ksig
))
730 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
732 } while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
));
734 io_wq_check_workers(wq
);
738 io_wq_for_each_worker(wq
->wqes
[node
], io_wq_worker_wake
, NULL
);
741 if (atomic_dec_and_test(&wq
->worker_refs
))
742 complete(&wq
->worker_done
);
743 wait_for_completion(&wq
->worker_done
);
745 spin_lock_irq(&wq
->hash
->wait
.lock
);
747 list_del_init(&wq
->wqes
[node
]->wait
.entry
);
748 spin_unlock_irq(&wq
->hash
->wait
.lock
);
750 io_wq_cancel_pending(wq
);
751 complete(&wq
->exited
);
755 static void io_run_cancel(struct io_wq_work
*work
, struct io_wqe
*wqe
)
757 struct io_wq
*wq
= wqe
->wq
;
760 work
->flags
|= IO_WQ_WORK_CANCEL
;
762 work
= wq
->free_work(work
);
766 static void io_wqe_insert_work(struct io_wqe
*wqe
, struct io_wq_work
*work
)
769 struct io_wq_work
*tail
;
771 if (!io_wq_is_hashed(work
)) {
773 wq_list_add_tail(&work
->list
, &wqe
->work_list
);
777 hash
= io_get_work_hash(work
);
778 tail
= wqe
->hash_tail
[hash
];
779 wqe
->hash_tail
[hash
] = work
;
783 wq_list_add_after(&work
->list
, &tail
->list
, &wqe
->work_list
);
786 static int io_wq_fork_manager(struct io_wq
*wq
)
788 struct task_struct
*tsk
;
793 WARN_ON_ONCE(test_bit(IO_WQ_BIT_EXIT
, &wq
->state
));
795 init_completion(&wq
->worker_done
);
796 atomic_set(&wq
->worker_refs
, 1);
797 tsk
= create_io_thread(io_wq_manager
, wq
, NUMA_NO_NODE
);
799 wq
->manager
= get_task_struct(tsk
);
800 wake_up_new_task(tsk
);
804 if (atomic_dec_and_test(&wq
->worker_refs
))
805 complete(&wq
->worker_done
);
810 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
)
812 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
816 /* Can only happen if manager creation fails after exec */
817 if (io_wq_fork_manager(wqe
->wq
) ||
818 test_bit(IO_WQ_BIT_EXIT
, &wqe
->wq
->state
)) {
819 io_run_cancel(work
, wqe
);
823 work_flags
= work
->flags
;
824 raw_spin_lock_irqsave(&wqe
->lock
, flags
);
825 io_wqe_insert_work(wqe
, work
);
826 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
827 raw_spin_unlock_irqrestore(&wqe
->lock
, flags
);
829 if ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
830 !atomic_read(&acct
->nr_running
))
831 io_wqe_wake_worker(wqe
, acct
);
834 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
836 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
838 io_wqe_enqueue(wqe
, work
);
842 * Work items that hash to the same value will not be done in parallel.
843 * Used to limit concurrent writes, generally hashed by inode.
845 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
849 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
850 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
853 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
855 struct io_cb_cancel_data
*match
= data
;
859 * Hold the lock to avoid ->cur_work going out of scope, caller
860 * may dereference the passed in work.
862 spin_lock_irqsave(&worker
->lock
, flags
);
863 if (worker
->cur_work
&&
864 match
->fn(worker
->cur_work
, match
->data
)) {
865 set_notify_signal(worker
->task
);
868 spin_unlock_irqrestore(&worker
->lock
, flags
);
870 return match
->nr_running
&& !match
->cancel_all
;
873 static inline void io_wqe_remove_pending(struct io_wqe
*wqe
,
874 struct io_wq_work
*work
,
875 struct io_wq_work_node
*prev
)
877 unsigned int hash
= io_get_work_hash(work
);
878 struct io_wq_work
*prev_work
= NULL
;
880 if (io_wq_is_hashed(work
) && work
== wqe
->hash_tail
[hash
]) {
882 prev_work
= container_of(prev
, struct io_wq_work
, list
);
883 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
884 wqe
->hash_tail
[hash
] = prev_work
;
886 wqe
->hash_tail
[hash
] = NULL
;
888 wq_list_del(&wqe
->work_list
, &work
->list
, prev
);
891 static void io_wqe_cancel_pending_work(struct io_wqe
*wqe
,
892 struct io_cb_cancel_data
*match
)
894 struct io_wq_work_node
*node
, *prev
;
895 struct io_wq_work
*work
;
899 raw_spin_lock_irqsave(&wqe
->lock
, flags
);
900 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
901 work
= container_of(node
, struct io_wq_work
, list
);
902 if (!match
->fn(work
, match
->data
))
904 io_wqe_remove_pending(wqe
, work
, prev
);
905 raw_spin_unlock_irqrestore(&wqe
->lock
, flags
);
906 io_run_cancel(work
, wqe
);
908 if (!match
->cancel_all
)
911 /* not safe to continue after unlock */
914 raw_spin_unlock_irqrestore(&wqe
->lock
, flags
);
917 static void io_wqe_cancel_running_work(struct io_wqe
*wqe
,
918 struct io_cb_cancel_data
*match
)
921 io_wq_for_each_worker(wqe
, io_wq_worker_cancel
, match
);
925 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
926 void *data
, bool cancel_all
)
928 struct io_cb_cancel_data match
= {
931 .cancel_all
= cancel_all
,
936 * First check pending list, if we're lucky we can just remove it
937 * from there. CANCEL_OK means that the work is returned as-new,
938 * no completion will be posted for it.
940 for_each_node(node
) {
941 struct io_wqe
*wqe
= wq
->wqes
[node
];
943 io_wqe_cancel_pending_work(wqe
, &match
);
944 if (match
.nr_pending
&& !match
.cancel_all
)
945 return IO_WQ_CANCEL_OK
;
949 * Now check if a free (going busy) or busy worker has the work
950 * currently running. If we find it there, we'll return CANCEL_RUNNING
951 * as an indication that we attempt to signal cancellation. The
952 * completion will run normally in this case.
954 for_each_node(node
) {
955 struct io_wqe
*wqe
= wq
->wqes
[node
];
957 io_wqe_cancel_running_work(wqe
, &match
);
958 if (match
.nr_running
&& !match
.cancel_all
)
959 return IO_WQ_CANCEL_RUNNING
;
962 if (match
.nr_running
)
963 return IO_WQ_CANCEL_RUNNING
;
964 if (match
.nr_pending
)
965 return IO_WQ_CANCEL_OK
;
966 return IO_WQ_CANCEL_NOTFOUND
;
969 static int io_wqe_hash_wake(struct wait_queue_entry
*wait
, unsigned mode
,
972 struct io_wqe
*wqe
= container_of(wait
, struct io_wqe
, wait
);
975 list_del_init(&wait
->entry
);
978 ret
= io_wqe_activate_free_worker(wqe
);
982 wake_up_process(wqe
->wq
->manager
);
987 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
989 int ret
= -ENOMEM
, node
;
992 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
993 return ERR_PTR(-EINVAL
);
995 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
997 return ERR_PTR(-ENOMEM
);
999 wq
->wqes
= kcalloc(nr_node_ids
, sizeof(struct io_wqe
*), GFP_KERNEL
);
1003 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1007 refcount_inc(&data
->hash
->refs
);
1008 wq
->hash
= data
->hash
;
1009 wq
->free_work
= data
->free_work
;
1010 wq
->do_work
= data
->do_work
;
1013 for_each_node(node
) {
1015 int alloc_node
= node
;
1017 if (!node_online(alloc_node
))
1018 alloc_node
= NUMA_NO_NODE
;
1019 wqe
= kzalloc_node(sizeof(struct io_wqe
), GFP_KERNEL
, alloc_node
);
1022 wq
->wqes
[node
] = wqe
;
1023 wqe
->node
= alloc_node
;
1024 wqe
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1025 atomic_set(&wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_running
, 0);
1026 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1027 task_rlimit(current
, RLIMIT_NPROC
);
1028 atomic_set(&wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_running
, 0);
1029 wqe
->wait
.func
= io_wqe_hash_wake
;
1030 INIT_LIST_HEAD(&wqe
->wait
.entry
);
1032 raw_spin_lock_init(&wqe
->lock
);
1033 INIT_WQ_LIST(&wqe
->work_list
);
1034 INIT_HLIST_NULLS_HEAD(&wqe
->free_list
, 0);
1035 INIT_LIST_HEAD(&wqe
->all_list
);
1038 wq
->task_pid
= current
->pid
;
1039 init_completion(&wq
->exited
);
1040 refcount_set(&wq
->refs
, 1);
1042 ret
= io_wq_fork_manager(wq
);
1046 io_wq_put_hash(data
->hash
);
1047 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1049 kfree(wq
->wqes
[node
]);
1054 return ERR_PTR(ret
);
1057 static void io_wq_destroy_manager(struct io_wq
*wq
)
1060 wake_up_process(wq
->manager
);
1061 wait_for_completion(&wq
->exited
);
1062 put_task_struct(wq
->manager
);
1067 static void io_wq_destroy(struct io_wq
*wq
)
1071 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1073 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1074 io_wq_destroy_manager(wq
);
1076 for_each_node(node
) {
1077 struct io_wqe
*wqe
= wq
->wqes
[node
];
1078 struct io_cb_cancel_data match
= {
1079 .fn
= io_wq_work_match_all
,
1082 io_wqe_cancel_pending_work(wqe
, &match
);
1085 io_wq_put_hash(wq
->hash
);
1090 void io_wq_put(struct io_wq
*wq
)
1092 if (refcount_dec_and_test(&wq
->refs
))
1096 void io_wq_put_and_exit(struct io_wq
*wq
)
1098 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1099 io_wq_destroy_manager(wq
);
1103 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1105 struct task_struct
*task
= worker
->task
;
1109 rq
= task_rq_lock(task
, &rf
);
1110 do_set_cpus_allowed(task
, cpumask_of_node(worker
->wqe
->node
));
1111 task
->flags
|= PF_NO_SETAFFINITY
;
1112 task_rq_unlock(rq
, task
, &rf
);
1116 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1118 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1123 io_wq_for_each_worker(wq
->wqes
[i
], io_wq_worker_affinity
, NULL
);
1128 static __init
int io_wq_init(void)
1132 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1133 io_wq_cpu_online
, NULL
);
1139 subsys_initcall(io_wq_init
);