1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
13 #include <linux/sched/mm.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/kthread.h>
17 #include <linux/rculist_nulls.h>
18 #include <linux/fs_struct.h>
19 #include <linux/task_work.h>
20 #include <linux/blk-cgroup.h>
21 #include <linux/audit.h>
22 #include <linux/cpu.h>
24 #include "../kernel/sched/sched.h"
27 #define WORKER_IDLE_TIMEOUT (5 * HZ)
30 IO_WORKER_F_UP
= 1, /* up and active */
31 IO_WORKER_F_RUNNING
= 2, /* account as running */
32 IO_WORKER_F_FREE
= 4, /* worker on free list */
33 IO_WORKER_F_FIXED
= 8, /* static idle worker */
34 IO_WORKER_F_BOUND
= 16, /* is doing bounded work */
38 IO_WQ_BIT_EXIT
= 0, /* wq exiting */
39 IO_WQ_BIT_CANCEL
= 1, /* cancel work on list */
40 IO_WQ_BIT_ERROR
= 2, /* error on setup */
44 IO_WQE_FLAG_STALLED
= 1, /* stalled on hash */
48 * One for each thread in a wqe pool
53 struct hlist_nulls_node nulls_node
;
54 struct list_head all_list
;
55 struct task_struct
*task
;
58 struct io_wq_work
*cur_work
;
63 #ifdef CONFIG_BLK_CGROUP
64 struct cgroup_subsys_state
*blkcg_css
;
66 const struct cred
*cur_creds
;
67 const struct cred
*saved_creds
;
68 struct files_struct
*restore_files
;
69 struct nsproxy
*restore_nsproxy
;
70 struct fs_struct
*restore_fs
;
73 #if BITS_PER_LONG == 64
74 #define IO_WQ_HASH_ORDER 6
76 #define IO_WQ_HASH_ORDER 5
79 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
93 * Per-node worker thread pool
98 struct io_wq_work_list work_list
;
99 unsigned long hash_map
;
101 } ____cacheline_aligned_in_smp
;
104 struct io_wqe_acct acct
[2];
106 struct hlist_nulls_head free_list
;
107 struct list_head all_list
;
110 struct io_wq_work
*hash_tail
[IO_WQ_NR_HASH_BUCKETS
];
117 struct io_wqe
**wqes
;
120 free_work_fn
*free_work
;
121 io_wq_work_fn
*do_work
;
123 struct task_struct
*manager
;
124 struct user_struct
*user
;
126 struct completion done
;
128 struct hlist_node cpuhp_node
;
133 static enum cpuhp_state io_wq_online
;
135 static bool io_worker_get(struct io_worker
*worker
)
137 return refcount_inc_not_zero(&worker
->ref
);
140 static void io_worker_release(struct io_worker
*worker
)
142 if (refcount_dec_and_test(&worker
->ref
))
143 wake_up_process(worker
->task
);
147 * Note: drops the wqe->lock if returning true! The caller must re-acquire
148 * the lock in that case. Some callers need to restart handling if this
149 * happens, so we can't just re-acquire the lock on behalf of the caller.
151 static bool __io_worker_unuse(struct io_wqe
*wqe
, struct io_worker
*worker
)
153 bool dropped_lock
= false;
155 if (worker
->saved_creds
) {
156 revert_creds(worker
->saved_creds
);
157 worker
->cur_creds
= worker
->saved_creds
= NULL
;
160 if (current
->files
!= worker
->restore_files
) {
161 __acquire(&wqe
->lock
);
162 raw_spin_unlock_irq(&wqe
->lock
);
166 current
->files
= worker
->restore_files
;
167 current
->nsproxy
= worker
->restore_nsproxy
;
168 task_unlock(current
);
171 if (current
->fs
!= worker
->restore_fs
)
172 current
->fs
= worker
->restore_fs
;
175 * If we have an active mm, we need to drop the wq lock before unusing
176 * it. If we do, return true and let the caller retry the idle loop.
180 __acquire(&wqe
->lock
);
181 raw_spin_unlock_irq(&wqe
->lock
);
184 __set_current_state(TASK_RUNNING
);
185 kthread_unuse_mm(worker
->mm
);
190 #ifdef CONFIG_BLK_CGROUP
191 if (worker
->blkcg_css
) {
192 kthread_associate_blkcg(NULL
);
193 worker
->blkcg_css
= NULL
;
196 if (current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
!= RLIM_INFINITY
)
197 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
201 static inline struct io_wqe_acct
*io_work_get_acct(struct io_wqe
*wqe
,
202 struct io_wq_work
*work
)
204 if (work
->flags
& IO_WQ_WORK_UNBOUND
)
205 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
207 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
210 static inline struct io_wqe_acct
*io_wqe_get_acct(struct io_wqe
*wqe
,
211 struct io_worker
*worker
)
213 if (worker
->flags
& IO_WORKER_F_BOUND
)
214 return &wqe
->acct
[IO_WQ_ACCT_BOUND
];
216 return &wqe
->acct
[IO_WQ_ACCT_UNBOUND
];
219 static void io_worker_exit(struct io_worker
*worker
)
221 struct io_wqe
*wqe
= worker
->wqe
;
222 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
225 * If we're not at zero, someone else is holding a brief reference
226 * to the worker. Wait for that to go away.
228 set_current_state(TASK_INTERRUPTIBLE
);
229 if (!refcount_dec_and_test(&worker
->ref
))
231 __set_current_state(TASK_RUNNING
);
234 current
->flags
&= ~PF_IO_WORKER
;
235 if (worker
->flags
& IO_WORKER_F_RUNNING
)
236 atomic_dec(&acct
->nr_running
);
237 if (!(worker
->flags
& IO_WORKER_F_BOUND
))
238 atomic_dec(&wqe
->wq
->user
->processes
);
242 raw_spin_lock_irq(&wqe
->lock
);
243 hlist_nulls_del_rcu(&worker
->nulls_node
);
244 list_del_rcu(&worker
->all_list
);
245 if (__io_worker_unuse(wqe
, worker
)) {
246 __release(&wqe
->lock
);
247 raw_spin_lock_irq(&wqe
->lock
);
250 raw_spin_unlock_irq(&wqe
->lock
);
252 kfree_rcu(worker
, rcu
);
253 if (refcount_dec_and_test(&wqe
->wq
->refs
))
254 complete(&wqe
->wq
->done
);
257 static inline bool io_wqe_run_queue(struct io_wqe
*wqe
)
258 __must_hold(wqe
->lock
)
260 if (!wq_list_empty(&wqe
->work_list
) &&
261 !(wqe
->flags
& IO_WQE_FLAG_STALLED
))
267 * Check head of free list for an available worker. If one isn't available,
268 * caller must wake up the wq manager to create one.
270 static bool io_wqe_activate_free_worker(struct io_wqe
*wqe
)
273 struct hlist_nulls_node
*n
;
274 struct io_worker
*worker
;
276 n
= rcu_dereference(hlist_nulls_first_rcu(&wqe
->free_list
));
280 worker
= hlist_nulls_entry(n
, struct io_worker
, nulls_node
);
281 if (io_worker_get(worker
)) {
282 wake_up_process(worker
->task
);
283 io_worker_release(worker
);
291 * We need a worker. If we find a free one, we're good. If not, and we're
292 * below the max number of workers, wake up the manager to create one.
294 static void io_wqe_wake_worker(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
)
299 * Most likely an attempt to queue unbounded work on an io_wq that
300 * wasn't setup with any unbounded workers.
302 WARN_ON_ONCE(!acct
->max_workers
);
305 ret
= io_wqe_activate_free_worker(wqe
);
308 if (!ret
&& acct
->nr_workers
< acct
->max_workers
)
309 wake_up_process(wqe
->wq
->manager
);
312 static void io_wqe_inc_running(struct io_wqe
*wqe
, struct io_worker
*worker
)
314 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
316 atomic_inc(&acct
->nr_running
);
319 static void io_wqe_dec_running(struct io_wqe
*wqe
, struct io_worker
*worker
)
320 __must_hold(wqe
->lock
)
322 struct io_wqe_acct
*acct
= io_wqe_get_acct(wqe
, worker
);
324 if (atomic_dec_and_test(&acct
->nr_running
) && io_wqe_run_queue(wqe
))
325 io_wqe_wake_worker(wqe
, acct
);
328 static void io_worker_start(struct io_wqe
*wqe
, struct io_worker
*worker
)
330 allow_kernel_signal(SIGINT
);
332 current
->flags
|= PF_IO_WORKER
;
334 worker
->flags
|= (IO_WORKER_F_UP
| IO_WORKER_F_RUNNING
);
335 worker
->restore_files
= current
->files
;
336 worker
->restore_nsproxy
= current
->nsproxy
;
337 worker
->restore_fs
= current
->fs
;
338 io_wqe_inc_running(wqe
, worker
);
342 * Worker will start processing some work. Move it to the busy list, if
343 * it's currently on the freelist
345 static void __io_worker_busy(struct io_wqe
*wqe
, struct io_worker
*worker
,
346 struct io_wq_work
*work
)
347 __must_hold(wqe
->lock
)
349 bool worker_bound
, work_bound
;
351 if (worker
->flags
& IO_WORKER_F_FREE
) {
352 worker
->flags
&= ~IO_WORKER_F_FREE
;
353 hlist_nulls_del_init_rcu(&worker
->nulls_node
);
357 * If worker is moving from bound to unbound (or vice versa), then
358 * ensure we update the running accounting.
360 worker_bound
= (worker
->flags
& IO_WORKER_F_BOUND
) != 0;
361 work_bound
= (work
->flags
& IO_WQ_WORK_UNBOUND
) == 0;
362 if (worker_bound
!= work_bound
) {
363 io_wqe_dec_running(wqe
, worker
);
365 worker
->flags
|= IO_WORKER_F_BOUND
;
366 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
--;
367 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
++;
368 atomic_dec(&wqe
->wq
->user
->processes
);
370 worker
->flags
&= ~IO_WORKER_F_BOUND
;
371 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_workers
++;
372 wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_workers
--;
373 atomic_inc(&wqe
->wq
->user
->processes
);
375 io_wqe_inc_running(wqe
, worker
);
380 * No work, worker going to sleep. Move to freelist, and unuse mm if we
381 * have one attached. Dropping the mm may potentially sleep, so we drop
382 * the lock in that case and return success. Since the caller has to
383 * retry the loop in that case (we changed task state), we don't regrab
384 * the lock if we return success.
386 static bool __io_worker_idle(struct io_wqe
*wqe
, struct io_worker
*worker
)
387 __must_hold(wqe
->lock
)
389 if (!(worker
->flags
& IO_WORKER_F_FREE
)) {
390 worker
->flags
|= IO_WORKER_F_FREE
;
391 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
394 return __io_worker_unuse(wqe
, worker
);
397 static inline unsigned int io_get_work_hash(struct io_wq_work
*work
)
399 return work
->flags
>> IO_WQ_HASH_SHIFT
;
402 static struct io_wq_work
*io_get_next_work(struct io_wqe
*wqe
)
403 __must_hold(wqe
->lock
)
405 struct io_wq_work_node
*node
, *prev
;
406 struct io_wq_work
*work
, *tail
;
409 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
410 work
= container_of(node
, struct io_wq_work
, list
);
412 /* not hashed, can run anytime */
413 if (!io_wq_is_hashed(work
)) {
414 wq_list_del(&wqe
->work_list
, node
, prev
);
418 /* hashed, can run if not already running */
419 hash
= io_get_work_hash(work
);
420 if (!(wqe
->hash_map
& BIT(hash
))) {
421 wqe
->hash_map
|= BIT(hash
);
422 /* all items with this hash lie in [work, tail] */
423 tail
= wqe
->hash_tail
[hash
];
424 wqe
->hash_tail
[hash
] = NULL
;
425 wq_list_cut(&wqe
->work_list
, &tail
->list
, prev
);
433 static void io_wq_switch_mm(struct io_worker
*worker
, struct io_wq_work
*work
)
436 kthread_unuse_mm(worker
->mm
);
441 if (mmget_not_zero(work
->identity
->mm
)) {
442 kthread_use_mm(work
->identity
->mm
);
443 worker
->mm
= work
->identity
->mm
;
447 /* failed grabbing mm, ensure work gets cancelled */
448 work
->flags
|= IO_WQ_WORK_CANCEL
;
451 static inline void io_wq_switch_blkcg(struct io_worker
*worker
,
452 struct io_wq_work
*work
)
454 #ifdef CONFIG_BLK_CGROUP
455 if (!(work
->flags
& IO_WQ_WORK_BLKCG
))
457 if (work
->identity
->blkcg_css
!= worker
->blkcg_css
) {
458 kthread_associate_blkcg(work
->identity
->blkcg_css
);
459 worker
->blkcg_css
= work
->identity
->blkcg_css
;
464 static void io_wq_switch_creds(struct io_worker
*worker
,
465 struct io_wq_work
*work
)
467 const struct cred
*old_creds
= override_creds(work
->identity
->creds
);
469 worker
->cur_creds
= work
->identity
->creds
;
470 if (worker
->saved_creds
)
471 put_cred(old_creds
); /* creds set by previous switch */
473 worker
->saved_creds
= old_creds
;
476 static void io_impersonate_work(struct io_worker
*worker
,
477 struct io_wq_work
*work
)
479 if ((work
->flags
& IO_WQ_WORK_FILES
) &&
480 current
->files
!= work
->identity
->files
) {
482 current
->files
= work
->identity
->files
;
483 current
->nsproxy
= work
->identity
->nsproxy
;
484 task_unlock(current
);
486 if ((work
->flags
& IO_WQ_WORK_FS
) && current
->fs
!= work
->identity
->fs
)
487 current
->fs
= work
->identity
->fs
;
488 if ((work
->flags
& IO_WQ_WORK_MM
) && work
->identity
->mm
!= worker
->mm
)
489 io_wq_switch_mm(worker
, work
);
490 if ((work
->flags
& IO_WQ_WORK_CREDS
) &&
491 worker
->cur_creds
!= work
->identity
->creds
)
492 io_wq_switch_creds(worker
, work
);
493 if (work
->flags
& IO_WQ_WORK_FSIZE
)
494 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= work
->identity
->fsize
;
495 else if (current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
!= RLIM_INFINITY
)
496 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
497 io_wq_switch_blkcg(worker
, work
);
499 current
->loginuid
= work
->identity
->loginuid
;
500 current
->sessionid
= work
->identity
->sessionid
;
504 static void io_assign_current_work(struct io_worker
*worker
,
505 struct io_wq_work
*work
)
508 /* flush pending signals before assigning new work */
509 if (signal_pending(current
))
510 flush_signals(current
);
515 current
->loginuid
= KUIDT_INIT(AUDIT_UID_UNSET
);
516 current
->sessionid
= AUDIT_SID_UNSET
;
519 spin_lock_irq(&worker
->lock
);
520 worker
->cur_work
= work
;
521 spin_unlock_irq(&worker
->lock
);
524 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
);
526 static void io_worker_handle_work(struct io_worker
*worker
)
527 __releases(wqe
->lock
)
529 struct io_wqe
*wqe
= worker
->wqe
;
530 struct io_wq
*wq
= wqe
->wq
;
533 struct io_wq_work
*work
;
536 * If we got some work, mark us as busy. If we didn't, but
537 * the list isn't empty, it means we stalled on hashed work.
538 * Mark us stalled so we don't keep looking for work when we
539 * can't make progress, any work completion or insertion will
540 * clear the stalled flag.
542 work
= io_get_next_work(wqe
);
544 __io_worker_busy(wqe
, worker
, work
);
545 else if (!wq_list_empty(&wqe
->work_list
))
546 wqe
->flags
|= IO_WQE_FLAG_STALLED
;
548 raw_spin_unlock_irq(&wqe
->lock
);
551 io_assign_current_work(worker
, work
);
553 /* handle a whole dependent link */
555 struct io_wq_work
*old_work
, *next_hashed
, *linked
;
556 unsigned int hash
= io_get_work_hash(work
);
558 next_hashed
= wq_next_work(work
);
559 io_impersonate_work(worker
, work
);
561 * OK to set IO_WQ_WORK_CANCEL even for uncancellable
562 * work, the worker function will do the right thing.
564 if (test_bit(IO_WQ_BIT_CANCEL
, &wq
->state
))
565 work
->flags
|= IO_WQ_WORK_CANCEL
;
568 linked
= wq
->do_work(work
);
571 if (!work
&& linked
&& !io_wq_is_hashed(linked
)) {
575 io_assign_current_work(worker
, work
);
576 wq
->free_work(old_work
);
579 io_wqe_enqueue(wqe
, linked
);
581 if (hash
!= -1U && !next_hashed
) {
582 raw_spin_lock_irq(&wqe
->lock
);
583 wqe
->hash_map
&= ~BIT_ULL(hash
);
584 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
585 /* skip unnecessary unlock-lock wqe->lock */
588 raw_spin_unlock_irq(&wqe
->lock
);
592 raw_spin_lock_irq(&wqe
->lock
);
596 static int io_wqe_worker(void *data
)
598 struct io_worker
*worker
= data
;
599 struct io_wqe
*wqe
= worker
->wqe
;
600 struct io_wq
*wq
= wqe
->wq
;
602 io_worker_start(wqe
, worker
);
604 while (!test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
605 set_current_state(TASK_INTERRUPTIBLE
);
607 raw_spin_lock_irq(&wqe
->lock
);
608 if (io_wqe_run_queue(wqe
)) {
609 __set_current_state(TASK_RUNNING
);
610 io_worker_handle_work(worker
);
613 /* drops the lock on success, retry */
614 if (__io_worker_idle(wqe
, worker
)) {
615 __release(&wqe
->lock
);
618 raw_spin_unlock_irq(&wqe
->lock
);
619 if (signal_pending(current
))
620 flush_signals(current
);
621 if (schedule_timeout(WORKER_IDLE_TIMEOUT
))
623 /* timed out, exit unless we're the fixed worker */
624 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
) ||
625 !(worker
->flags
& IO_WORKER_F_FIXED
))
629 if (test_bit(IO_WQ_BIT_EXIT
, &wq
->state
)) {
630 raw_spin_lock_irq(&wqe
->lock
);
631 if (!wq_list_empty(&wqe
->work_list
))
632 io_worker_handle_work(worker
);
634 raw_spin_unlock_irq(&wqe
->lock
);
637 io_worker_exit(worker
);
642 * Called when a worker is scheduled in. Mark us as currently running.
644 void io_wq_worker_running(struct task_struct
*tsk
)
646 struct io_worker
*worker
= kthread_data(tsk
);
647 struct io_wqe
*wqe
= worker
->wqe
;
649 if (!(worker
->flags
& IO_WORKER_F_UP
))
651 if (worker
->flags
& IO_WORKER_F_RUNNING
)
653 worker
->flags
|= IO_WORKER_F_RUNNING
;
654 io_wqe_inc_running(wqe
, worker
);
658 * Called when worker is going to sleep. If there are no workers currently
659 * running and we have work pending, wake up a free one or have the manager
662 void io_wq_worker_sleeping(struct task_struct
*tsk
)
664 struct io_worker
*worker
= kthread_data(tsk
);
665 struct io_wqe
*wqe
= worker
->wqe
;
667 if (!(worker
->flags
& IO_WORKER_F_UP
))
669 if (!(worker
->flags
& IO_WORKER_F_RUNNING
))
672 worker
->flags
&= ~IO_WORKER_F_RUNNING
;
674 raw_spin_lock_irq(&wqe
->lock
);
675 io_wqe_dec_running(wqe
, worker
);
676 raw_spin_unlock_irq(&wqe
->lock
);
679 static bool create_io_worker(struct io_wq
*wq
, struct io_wqe
*wqe
, int index
)
681 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
682 struct io_worker
*worker
;
684 worker
= kzalloc_node(sizeof(*worker
), GFP_KERNEL
, wqe
->node
);
688 refcount_set(&worker
->ref
, 1);
689 worker
->nulls_node
.pprev
= NULL
;
691 spin_lock_init(&worker
->lock
);
693 worker
->task
= kthread_create_on_node(io_wqe_worker
, worker
, wqe
->node
,
694 "io_wqe_worker-%d/%d", index
, wqe
->node
);
695 if (IS_ERR(worker
->task
)) {
699 kthread_bind_mask(worker
->task
, cpumask_of_node(wqe
->node
));
701 raw_spin_lock_irq(&wqe
->lock
);
702 hlist_nulls_add_head_rcu(&worker
->nulls_node
, &wqe
->free_list
);
703 list_add_tail_rcu(&worker
->all_list
, &wqe
->all_list
);
704 worker
->flags
|= IO_WORKER_F_FREE
;
705 if (index
== IO_WQ_ACCT_BOUND
)
706 worker
->flags
|= IO_WORKER_F_BOUND
;
707 if (!acct
->nr_workers
&& (worker
->flags
& IO_WORKER_F_BOUND
))
708 worker
->flags
|= IO_WORKER_F_FIXED
;
710 raw_spin_unlock_irq(&wqe
->lock
);
712 if (index
== IO_WQ_ACCT_UNBOUND
)
713 atomic_inc(&wq
->user
->processes
);
715 refcount_inc(&wq
->refs
);
716 wake_up_process(worker
->task
);
720 static inline bool io_wqe_need_worker(struct io_wqe
*wqe
, int index
)
721 __must_hold(wqe
->lock
)
723 struct io_wqe_acct
*acct
= &wqe
->acct
[index
];
725 /* if we have available workers or no work, no need */
726 if (!hlist_nulls_empty(&wqe
->free_list
) || !io_wqe_run_queue(wqe
))
728 return acct
->nr_workers
< acct
->max_workers
;
731 static bool io_wqe_worker_send_sig(struct io_worker
*worker
, void *data
)
733 send_sig(SIGINT
, worker
->task
, 1);
738 * Iterate the passed in list and call the specific function for each
739 * worker that isn't exiting
741 static bool io_wq_for_each_worker(struct io_wqe
*wqe
,
742 bool (*func
)(struct io_worker
*, void *),
745 struct io_worker
*worker
;
748 list_for_each_entry_rcu(worker
, &wqe
->all_list
, all_list
) {
749 if (io_worker_get(worker
)) {
750 /* no task if node is/was offline */
752 ret
= func(worker
, data
);
753 io_worker_release(worker
);
762 static bool io_wq_worker_wake(struct io_worker
*worker
, void *data
)
764 wake_up_process(worker
->task
);
769 * Manager thread. Tasked with creating new workers, if we need them.
771 static int io_wq_manager(void *data
)
773 struct io_wq
*wq
= data
;
776 /* create fixed workers */
777 refcount_set(&wq
->refs
, 1);
778 for_each_node(node
) {
779 if (!node_online(node
))
781 if (create_io_worker(wq
, wq
->wqes
[node
], IO_WQ_ACCT_BOUND
))
783 set_bit(IO_WQ_BIT_ERROR
, &wq
->state
);
784 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
790 while (!kthread_should_stop()) {
791 if (current
->task_works
)
794 for_each_node(node
) {
795 struct io_wqe
*wqe
= wq
->wqes
[node
];
796 bool fork_worker
[2] = { false, false };
798 if (!node_online(node
))
801 raw_spin_lock_irq(&wqe
->lock
);
802 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_BOUND
))
803 fork_worker
[IO_WQ_ACCT_BOUND
] = true;
804 if (io_wqe_need_worker(wqe
, IO_WQ_ACCT_UNBOUND
))
805 fork_worker
[IO_WQ_ACCT_UNBOUND
] = true;
806 raw_spin_unlock_irq(&wqe
->lock
);
807 if (fork_worker
[IO_WQ_ACCT_BOUND
])
808 create_io_worker(wq
, wqe
, IO_WQ_ACCT_BOUND
);
809 if (fork_worker
[IO_WQ_ACCT_UNBOUND
])
810 create_io_worker(wq
, wqe
, IO_WQ_ACCT_UNBOUND
);
812 set_current_state(TASK_INTERRUPTIBLE
);
813 schedule_timeout(HZ
);
816 if (current
->task_works
)
820 if (refcount_dec_and_test(&wq
->refs
)) {
824 /* if ERROR is set and we get here, we have workers to wake */
825 if (test_bit(IO_WQ_BIT_ERROR
, &wq
->state
)) {
828 io_wq_for_each_worker(wq
->wqes
[node
], io_wq_worker_wake
, NULL
);
834 static bool io_wq_can_queue(struct io_wqe
*wqe
, struct io_wqe_acct
*acct
,
835 struct io_wq_work
*work
)
839 if (!(work
->flags
& IO_WQ_WORK_UNBOUND
))
841 if (atomic_read(&acct
->nr_running
))
845 free_worker
= !hlist_nulls_empty(&wqe
->free_list
);
850 if (atomic_read(&wqe
->wq
->user
->processes
) >= acct
->max_workers
&&
851 !(capable(CAP_SYS_RESOURCE
) || capable(CAP_SYS_ADMIN
)))
857 static void io_run_cancel(struct io_wq_work
*work
, struct io_wqe
*wqe
)
859 struct io_wq
*wq
= wqe
->wq
;
862 struct io_wq_work
*old_work
= work
;
864 work
->flags
|= IO_WQ_WORK_CANCEL
;
865 work
= wq
->do_work(work
);
866 wq
->free_work(old_work
);
870 static void io_wqe_insert_work(struct io_wqe
*wqe
, struct io_wq_work
*work
)
873 struct io_wq_work
*tail
;
875 if (!io_wq_is_hashed(work
)) {
877 wq_list_add_tail(&work
->list
, &wqe
->work_list
);
881 hash
= io_get_work_hash(work
);
882 tail
= wqe
->hash_tail
[hash
];
883 wqe
->hash_tail
[hash
] = work
;
887 wq_list_add_after(&work
->list
, &tail
->list
, &wqe
->work_list
);
890 static void io_wqe_enqueue(struct io_wqe
*wqe
, struct io_wq_work
*work
)
892 struct io_wqe_acct
*acct
= io_work_get_acct(wqe
, work
);
897 * Do early check to see if we need a new unbound worker, and if we do,
898 * if we're allowed to do so. This isn't 100% accurate as there's a
899 * gap between this check and incrementing the value, but that's OK.
900 * It's close enough to not be an issue, fork() has the same delay.
902 if (unlikely(!io_wq_can_queue(wqe
, acct
, work
))) {
903 io_run_cancel(work
, wqe
);
907 work_flags
= work
->flags
;
908 raw_spin_lock_irqsave(&wqe
->lock
, flags
);
909 io_wqe_insert_work(wqe
, work
);
910 wqe
->flags
&= ~IO_WQE_FLAG_STALLED
;
911 raw_spin_unlock_irqrestore(&wqe
->lock
, flags
);
913 if ((work_flags
& IO_WQ_WORK_CONCURRENT
) ||
914 !atomic_read(&acct
->nr_running
))
915 io_wqe_wake_worker(wqe
, acct
);
918 void io_wq_enqueue(struct io_wq
*wq
, struct io_wq_work
*work
)
920 struct io_wqe
*wqe
= wq
->wqes
[numa_node_id()];
922 io_wqe_enqueue(wqe
, work
);
926 * Work items that hash to the same value will not be done in parallel.
927 * Used to limit concurrent writes, generally hashed by inode.
929 void io_wq_hash_work(struct io_wq_work
*work
, void *val
)
933 bit
= hash_ptr(val
, IO_WQ_HASH_ORDER
);
934 work
->flags
|= (IO_WQ_WORK_HASHED
| (bit
<< IO_WQ_HASH_SHIFT
));
937 void io_wq_cancel_all(struct io_wq
*wq
)
941 set_bit(IO_WQ_BIT_CANCEL
, &wq
->state
);
944 for_each_node(node
) {
945 struct io_wqe
*wqe
= wq
->wqes
[node
];
947 io_wq_for_each_worker(wqe
, io_wqe_worker_send_sig
, NULL
);
952 struct io_cb_cancel_data
{
960 static bool io_wq_worker_cancel(struct io_worker
*worker
, void *data
)
962 struct io_cb_cancel_data
*match
= data
;
966 * Hold the lock to avoid ->cur_work going out of scope, caller
967 * may dereference the passed in work.
969 spin_lock_irqsave(&worker
->lock
, flags
);
970 if (worker
->cur_work
&&
971 !(worker
->cur_work
->flags
& IO_WQ_WORK_NO_CANCEL
) &&
972 match
->fn(worker
->cur_work
, match
->data
)) {
973 send_sig(SIGINT
, worker
->task
, 1);
976 spin_unlock_irqrestore(&worker
->lock
, flags
);
978 return match
->nr_running
&& !match
->cancel_all
;
981 static inline void io_wqe_remove_pending(struct io_wqe
*wqe
,
982 struct io_wq_work
*work
,
983 struct io_wq_work_node
*prev
)
985 unsigned int hash
= io_get_work_hash(work
);
986 struct io_wq_work
*prev_work
= NULL
;
988 if (io_wq_is_hashed(work
) && work
== wqe
->hash_tail
[hash
]) {
990 prev_work
= container_of(prev
, struct io_wq_work
, list
);
991 if (prev_work
&& io_get_work_hash(prev_work
) == hash
)
992 wqe
->hash_tail
[hash
] = prev_work
;
994 wqe
->hash_tail
[hash
] = NULL
;
996 wq_list_del(&wqe
->work_list
, &work
->list
, prev
);
999 static void io_wqe_cancel_pending_work(struct io_wqe
*wqe
,
1000 struct io_cb_cancel_data
*match
)
1002 struct io_wq_work_node
*node
, *prev
;
1003 struct io_wq_work
*work
;
1004 unsigned long flags
;
1007 raw_spin_lock_irqsave(&wqe
->lock
, flags
);
1008 wq_list_for_each(node
, prev
, &wqe
->work_list
) {
1009 work
= container_of(node
, struct io_wq_work
, list
);
1010 if (!match
->fn(work
, match
->data
))
1012 io_wqe_remove_pending(wqe
, work
, prev
);
1013 raw_spin_unlock_irqrestore(&wqe
->lock
, flags
);
1014 io_run_cancel(work
, wqe
);
1015 match
->nr_pending
++;
1016 if (!match
->cancel_all
)
1019 /* not safe to continue after unlock */
1022 raw_spin_unlock_irqrestore(&wqe
->lock
, flags
);
1025 static void io_wqe_cancel_running_work(struct io_wqe
*wqe
,
1026 struct io_cb_cancel_data
*match
)
1029 io_wq_for_each_worker(wqe
, io_wq_worker_cancel
, match
);
1033 enum io_wq_cancel
io_wq_cancel_cb(struct io_wq
*wq
, work_cancel_fn
*cancel
,
1034 void *data
, bool cancel_all
)
1036 struct io_cb_cancel_data match
= {
1039 .cancel_all
= cancel_all
,
1044 * First check pending list, if we're lucky we can just remove it
1045 * from there. CANCEL_OK means that the work is returned as-new,
1046 * no completion will be posted for it.
1048 for_each_node(node
) {
1049 struct io_wqe
*wqe
= wq
->wqes
[node
];
1051 io_wqe_cancel_pending_work(wqe
, &match
);
1052 if (match
.nr_pending
&& !match
.cancel_all
)
1053 return IO_WQ_CANCEL_OK
;
1057 * Now check if a free (going busy) or busy worker has the work
1058 * currently running. If we find it there, we'll return CANCEL_RUNNING
1059 * as an indication that we attempt to signal cancellation. The
1060 * completion will run normally in this case.
1062 for_each_node(node
) {
1063 struct io_wqe
*wqe
= wq
->wqes
[node
];
1065 io_wqe_cancel_running_work(wqe
, &match
);
1066 if (match
.nr_running
&& !match
.cancel_all
)
1067 return IO_WQ_CANCEL_RUNNING
;
1070 if (match
.nr_running
)
1071 return IO_WQ_CANCEL_RUNNING
;
1072 if (match
.nr_pending
)
1073 return IO_WQ_CANCEL_OK
;
1074 return IO_WQ_CANCEL_NOTFOUND
;
1077 static bool io_wq_io_cb_cancel_data(struct io_wq_work
*work
, void *data
)
1079 return work
== data
;
1082 enum io_wq_cancel
io_wq_cancel_work(struct io_wq
*wq
, struct io_wq_work
*cwork
)
1084 return io_wq_cancel_cb(wq
, io_wq_io_cb_cancel_data
, (void *)cwork
, false);
1087 struct io_wq
*io_wq_create(unsigned bounded
, struct io_wq_data
*data
)
1089 int ret
= -ENOMEM
, node
;
1092 if (WARN_ON_ONCE(!data
->free_work
|| !data
->do_work
))
1093 return ERR_PTR(-EINVAL
);
1095 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
1097 return ERR_PTR(-ENOMEM
);
1099 wq
->wqes
= kcalloc(nr_node_ids
, sizeof(struct io_wqe
*), GFP_KERNEL
);
1103 ret
= cpuhp_state_add_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1107 wq
->free_work
= data
->free_work
;
1108 wq
->do_work
= data
->do_work
;
1110 /* caller must already hold a reference to this */
1111 wq
->user
= data
->user
;
1114 for_each_node(node
) {
1116 int alloc_node
= node
;
1118 if (!node_online(alloc_node
))
1119 alloc_node
= NUMA_NO_NODE
;
1120 wqe
= kzalloc_node(sizeof(struct io_wqe
), GFP_KERNEL
, alloc_node
);
1123 wq
->wqes
[node
] = wqe
;
1124 wqe
->node
= alloc_node
;
1125 wqe
->acct
[IO_WQ_ACCT_BOUND
].max_workers
= bounded
;
1126 atomic_set(&wqe
->acct
[IO_WQ_ACCT_BOUND
].nr_running
, 0);
1128 wqe
->acct
[IO_WQ_ACCT_UNBOUND
].max_workers
=
1129 task_rlimit(current
, RLIMIT_NPROC
);
1131 atomic_set(&wqe
->acct
[IO_WQ_ACCT_UNBOUND
].nr_running
, 0);
1133 raw_spin_lock_init(&wqe
->lock
);
1134 INIT_WQ_LIST(&wqe
->work_list
);
1135 INIT_HLIST_NULLS_HEAD(&wqe
->free_list
, 0);
1136 INIT_LIST_HEAD(&wqe
->all_list
);
1139 init_completion(&wq
->done
);
1141 wq
->manager
= kthread_create(io_wq_manager
, wq
, "io_wq_manager");
1142 if (!IS_ERR(wq
->manager
)) {
1143 wake_up_process(wq
->manager
);
1144 wait_for_completion(&wq
->done
);
1145 if (test_bit(IO_WQ_BIT_ERROR
, &wq
->state
)) {
1149 refcount_set(&wq
->use_refs
, 1);
1150 reinit_completion(&wq
->done
);
1154 ret
= PTR_ERR(wq
->manager
);
1155 complete(&wq
->done
);
1157 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1159 kfree(wq
->wqes
[node
]);
1164 return ERR_PTR(ret
);
1167 bool io_wq_get(struct io_wq
*wq
, struct io_wq_data
*data
)
1169 if (data
->free_work
!= wq
->free_work
|| data
->do_work
!= wq
->do_work
)
1172 return refcount_inc_not_zero(&wq
->use_refs
);
1175 static void __io_wq_destroy(struct io_wq
*wq
)
1179 cpuhp_state_remove_instance_nocalls(io_wq_online
, &wq
->cpuhp_node
);
1181 set_bit(IO_WQ_BIT_EXIT
, &wq
->state
);
1183 kthread_stop(wq
->manager
);
1187 io_wq_for_each_worker(wq
->wqes
[node
], io_wq_worker_wake
, NULL
);
1190 wait_for_completion(&wq
->done
);
1193 kfree(wq
->wqes
[node
]);
1198 void io_wq_destroy(struct io_wq
*wq
)
1200 if (refcount_dec_and_test(&wq
->use_refs
))
1201 __io_wq_destroy(wq
);
1204 struct task_struct
*io_wq_get_task(struct io_wq
*wq
)
1209 static bool io_wq_worker_affinity(struct io_worker
*worker
, void *data
)
1211 struct task_struct
*task
= worker
->task
;
1215 rq
= task_rq_lock(task
, &rf
);
1216 do_set_cpus_allowed(task
, cpumask_of_node(worker
->wqe
->node
));
1217 task
->flags
|= PF_NO_SETAFFINITY
;
1218 task_rq_unlock(rq
, task
, &rf
);
1222 static int io_wq_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
1224 struct io_wq
*wq
= hlist_entry_safe(node
, struct io_wq
, cpuhp_node
);
1229 io_wq_for_each_worker(wq
->wqes
[i
], io_wq_worker_affinity
, NULL
);
1234 static __init
int io_wq_init(void)
1238 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "io-wq/online",
1239 io_wq_cpu_online
, NULL
);
1245 subsys_initcall(io_wq_init
);