]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/io-wq.c
io_uring: fix missing io_queue_linked_timeout()
[mirror_ubuntu-hirsute-kernel.git] / fs / io-wq.c
CommitLineData
771b53d0
JA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Basic worker thread pool for io_uring
4 *
5 * Copyright (C) 2019 Jens Axboe
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/sched/signal.h>
12#include <linux/mm.h>
771b53d0
JA
13#include <linux/sched/mm.h>
14#include <linux/percpu.h>
15#include <linux/slab.h>
16#include <linux/kthread.h>
17#include <linux/rculist_nulls.h>
9392a27d 18#include <linux/fs_struct.h>
aa96bf8a 19#include <linux/task_work.h>
771b53d0
JA
20
21#include "io-wq.h"
22
23#define WORKER_IDLE_TIMEOUT (5 * HZ)
24
25enum {
26 IO_WORKER_F_UP = 1, /* up and active */
27 IO_WORKER_F_RUNNING = 2, /* account as running */
28 IO_WORKER_F_FREE = 4, /* worker on free list */
29 IO_WORKER_F_EXITING = 8, /* worker exiting */
30 IO_WORKER_F_FIXED = 16, /* static idle worker */
c5def4ab 31 IO_WORKER_F_BOUND = 32, /* is doing bounded work */
771b53d0
JA
32};
33
34enum {
35 IO_WQ_BIT_EXIT = 0, /* wq exiting */
36 IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
b60fda60 37 IO_WQ_BIT_ERROR = 2, /* error on setup */
771b53d0
JA
38};
39
40enum {
41 IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
42};
43
44/*
45 * One for each thread in a wqe pool
46 */
47struct io_worker {
48 refcount_t ref;
49 unsigned flags;
50 struct hlist_nulls_node nulls_node;
e61df66c 51 struct list_head all_list;
771b53d0 52 struct task_struct *task;
771b53d0 53 struct io_wqe *wqe;
36c2f922 54
771b53d0 55 struct io_wq_work *cur_work;
36c2f922 56 spinlock_t lock;
771b53d0
JA
57
58 struct rcu_head rcu;
59 struct mm_struct *mm;
cccf0ee8
JA
60 const struct cred *cur_creds;
61 const struct cred *saved_creds;
fcb323cc 62 struct files_struct *restore_files;
9392a27d 63 struct fs_struct *restore_fs;
771b53d0
JA
64};
65
771b53d0
JA
66#if BITS_PER_LONG == 64
67#define IO_WQ_HASH_ORDER 6
68#else
69#define IO_WQ_HASH_ORDER 5
70#endif
71
86f3cd1b
PB
72#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
73
c5def4ab
JA
74struct io_wqe_acct {
75 unsigned nr_workers;
76 unsigned max_workers;
77 atomic_t nr_running;
78};
79
80enum {
81 IO_WQ_ACCT_BOUND,
82 IO_WQ_ACCT_UNBOUND,
83};
84
771b53d0
JA
85/*
86 * Per-node worker thread pool
87 */
88struct io_wqe {
89 struct {
90 spinlock_t lock;
6206f0e1 91 struct io_wq_work_list work_list;
771b53d0
JA
92 unsigned long hash_map;
93 unsigned flags;
94 } ____cacheline_aligned_in_smp;
95
96 int node;
c5def4ab 97 struct io_wqe_acct acct[2];
771b53d0 98
021d1cdd 99 struct hlist_nulls_head free_list;
e61df66c 100 struct list_head all_list;
771b53d0
JA
101
102 struct io_wq *wq;
86f3cd1b 103 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
771b53d0
JA
104};
105
106/*
107 * Per io_wq state
108 */
109struct io_wq {
110 struct io_wqe **wqes;
111 unsigned long state;
771b53d0 112
e9fd9396 113 free_work_fn *free_work;
f5fa38c5 114 io_wq_work_fn *do_work;
7d723065 115
771b53d0 116 struct task_struct *manager;
c5def4ab 117 struct user_struct *user;
771b53d0
JA
118 refcount_t refs;
119 struct completion done;
848f7e18
JA
120
121 refcount_t use_refs;
771b53d0
JA
122};
123
771b53d0
JA
124static bool io_worker_get(struct io_worker *worker)
125{
126 return refcount_inc_not_zero(&worker->ref);
127}
128
129static void io_worker_release(struct io_worker *worker)
130{
131 if (refcount_dec_and_test(&worker->ref))
132 wake_up_process(worker->task);
133}
134
135/*
136 * Note: drops the wqe->lock if returning true! The caller must re-acquire
137 * the lock in that case. Some callers need to restart handling if this
138 * happens, so we can't just re-acquire the lock on behalf of the caller.
139 */
140static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
141{
fcb323cc
JA
142 bool dropped_lock = false;
143
cccf0ee8
JA
144 if (worker->saved_creds) {
145 revert_creds(worker->saved_creds);
146 worker->cur_creds = worker->saved_creds = NULL;
181e448d
JA
147 }
148
fcb323cc
JA
149 if (current->files != worker->restore_files) {
150 __acquire(&wqe->lock);
151 spin_unlock_irq(&wqe->lock);
152 dropped_lock = true;
153
154 task_lock(current);
155 current->files = worker->restore_files;
156 task_unlock(current);
157 }
158
9392a27d
JA
159 if (current->fs != worker->restore_fs)
160 current->fs = worker->restore_fs;
161
771b53d0
JA
162 /*
163 * If we have an active mm, we need to drop the wq lock before unusing
164 * it. If we do, return true and let the caller retry the idle loop.
165 */
166 if (worker->mm) {
fcb323cc
JA
167 if (!dropped_lock) {
168 __acquire(&wqe->lock);
169 spin_unlock_irq(&wqe->lock);
170 dropped_lock = true;
171 }
771b53d0 172 __set_current_state(TASK_RUNNING);
f5678e7f 173 kthread_unuse_mm(worker->mm);
771b53d0
JA
174 mmput(worker->mm);
175 worker->mm = NULL;
771b53d0
JA
176 }
177
fcb323cc 178 return dropped_lock;
771b53d0
JA
179}
180
c5def4ab
JA
181static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
182 struct io_wq_work *work)
183{
184 if (work->flags & IO_WQ_WORK_UNBOUND)
185 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
186
187 return &wqe->acct[IO_WQ_ACCT_BOUND];
188}
189
190static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
191 struct io_worker *worker)
192{
193 if (worker->flags & IO_WORKER_F_BOUND)
194 return &wqe->acct[IO_WQ_ACCT_BOUND];
195
196 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
197}
198
771b53d0
JA
199static void io_worker_exit(struct io_worker *worker)
200{
201 struct io_wqe *wqe = worker->wqe;
c5def4ab
JA
202 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
203 unsigned nr_workers;
771b53d0
JA
204
205 /*
206 * If we're not at zero, someone else is holding a brief reference
207 * to the worker. Wait for that to go away.
208 */
209 set_current_state(TASK_INTERRUPTIBLE);
210 if (!refcount_dec_and_test(&worker->ref))
211 schedule();
212 __set_current_state(TASK_RUNNING);
213
214 preempt_disable();
215 current->flags &= ~PF_IO_WORKER;
216 if (worker->flags & IO_WORKER_F_RUNNING)
c5def4ab
JA
217 atomic_dec(&acct->nr_running);
218 if (!(worker->flags & IO_WORKER_F_BOUND))
219 atomic_dec(&wqe->wq->user->processes);
771b53d0
JA
220 worker->flags = 0;
221 preempt_enable();
222
223 spin_lock_irq(&wqe->lock);
224 hlist_nulls_del_rcu(&worker->nulls_node);
e61df66c 225 list_del_rcu(&worker->all_list);
771b53d0
JA
226 if (__io_worker_unuse(wqe, worker)) {
227 __release(&wqe->lock);
228 spin_lock_irq(&wqe->lock);
229 }
c5def4ab
JA
230 acct->nr_workers--;
231 nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
232 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
771b53d0
JA
233 spin_unlock_irq(&wqe->lock);
234
235 /* all workers gone, wq exit can proceed */
c5def4ab 236 if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
771b53d0
JA
237 complete(&wqe->wq->done);
238
364b05fd 239 kfree_rcu(worker, rcu);
771b53d0
JA
240}
241
c5def4ab
JA
242static inline bool io_wqe_run_queue(struct io_wqe *wqe)
243 __must_hold(wqe->lock)
244{
6206f0e1
JA
245 if (!wq_list_empty(&wqe->work_list) &&
246 !(wqe->flags & IO_WQE_FLAG_STALLED))
c5def4ab
JA
247 return true;
248 return false;
249}
250
251/*
252 * Check head of free list for an available worker. If one isn't available,
253 * caller must wake up the wq manager to create one.
254 */
255static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
256 __must_hold(RCU)
257{
258 struct hlist_nulls_node *n;
259 struct io_worker *worker;
260
021d1cdd 261 n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
c5def4ab
JA
262 if (is_a_nulls(n))
263 return false;
264
265 worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
266 if (io_worker_get(worker)) {
506d95ff 267 wake_up_process(worker->task);
c5def4ab
JA
268 io_worker_release(worker);
269 return true;
270 }
271
272 return false;
273}
274
275/*
276 * We need a worker. If we find a free one, we're good. If not, and we're
277 * below the max number of workers, wake up the manager to create one.
278 */
279static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
280{
281 bool ret;
282
283 /*
284 * Most likely an attempt to queue unbounded work on an io_wq that
285 * wasn't setup with any unbounded workers.
286 */
287 WARN_ON_ONCE(!acct->max_workers);
288
289 rcu_read_lock();
290 ret = io_wqe_activate_free_worker(wqe);
291 rcu_read_unlock();
292
293 if (!ret && acct->nr_workers < acct->max_workers)
294 wake_up_process(wqe->wq->manager);
295}
296
297static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
298{
299 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
300
301 atomic_inc(&acct->nr_running);
302}
303
304static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
305 __must_hold(wqe->lock)
306{
307 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
308
309 if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
310 io_wqe_wake_worker(wqe, acct);
311}
312
771b53d0
JA
313static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
314{
315 allow_kernel_signal(SIGINT);
316
317 current->flags |= PF_IO_WORKER;
318
319 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
fcb323cc 320 worker->restore_files = current->files;
9392a27d 321 worker->restore_fs = current->fs;
c5def4ab 322 io_wqe_inc_running(wqe, worker);
771b53d0
JA
323}
324
325/*
326 * Worker will start processing some work. Move it to the busy list, if
327 * it's currently on the freelist
328 */
329static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
330 struct io_wq_work *work)
331 __must_hold(wqe->lock)
332{
c5def4ab
JA
333 bool worker_bound, work_bound;
334
771b53d0
JA
335 if (worker->flags & IO_WORKER_F_FREE) {
336 worker->flags &= ~IO_WORKER_F_FREE;
337 hlist_nulls_del_init_rcu(&worker->nulls_node);
771b53d0 338 }
c5def4ab
JA
339
340 /*
341 * If worker is moving from bound to unbound (or vice versa), then
342 * ensure we update the running accounting.
343 */
b2e9c7d6
DC
344 worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
345 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
346 if (worker_bound != work_bound) {
c5def4ab
JA
347 io_wqe_dec_running(wqe, worker);
348 if (work_bound) {
349 worker->flags |= IO_WORKER_F_BOUND;
350 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
351 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
352 atomic_dec(&wqe->wq->user->processes);
353 } else {
354 worker->flags &= ~IO_WORKER_F_BOUND;
355 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
356 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
357 atomic_inc(&wqe->wq->user->processes);
358 }
359 io_wqe_inc_running(wqe, worker);
360 }
771b53d0
JA
361}
362
363/*
364 * No work, worker going to sleep. Move to freelist, and unuse mm if we
365 * have one attached. Dropping the mm may potentially sleep, so we drop
366 * the lock in that case and return success. Since the caller has to
367 * retry the loop in that case (we changed task state), we don't regrab
368 * the lock if we return success.
369 */
370static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
371 __must_hold(wqe->lock)
372{
373 if (!(worker->flags & IO_WORKER_F_FREE)) {
374 worker->flags |= IO_WORKER_F_FREE;
021d1cdd 375 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
771b53d0
JA
376 }
377
378 return __io_worker_unuse(wqe, worker);
379}
380
60cf46ae
PB
381static inline unsigned int io_get_work_hash(struct io_wq_work *work)
382{
383 return work->flags >> IO_WQ_HASH_SHIFT;
384}
385
386static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
771b53d0
JA
387 __must_hold(wqe->lock)
388{
6206f0e1 389 struct io_wq_work_node *node, *prev;
86f3cd1b 390 struct io_wq_work *work, *tail;
60cf46ae 391 unsigned int hash;
771b53d0 392
6206f0e1
JA
393 wq_list_for_each(node, prev, &wqe->work_list) {
394 work = container_of(node, struct io_wq_work, list);
395
771b53d0 396 /* not hashed, can run anytime */
8766dd51 397 if (!io_wq_is_hashed(work)) {
86f3cd1b 398 wq_list_del(&wqe->work_list, node, prev);
771b53d0
JA
399 return work;
400 }
401
402 /* hashed, can run if not already running */
60cf46ae
PB
403 hash = io_get_work_hash(work);
404 if (!(wqe->hash_map & BIT(hash))) {
405 wqe->hash_map |= BIT(hash);
86f3cd1b
PB
406 /* all items with this hash lie in [work, tail] */
407 tail = wqe->hash_tail[hash];
408 wqe->hash_tail[hash] = NULL;
409 wq_list_cut(&wqe->work_list, &tail->list, prev);
771b53d0
JA
410 return work;
411 }
412 }
413
414 return NULL;
415}
416
cccf0ee8
JA
417static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
418{
419 if (worker->mm) {
f5678e7f 420 kthread_unuse_mm(worker->mm);
cccf0ee8
JA
421 mmput(worker->mm);
422 worker->mm = NULL;
423 }
37c54f9b 424 if (!work->mm)
cccf0ee8 425 return;
37c54f9b 426
cccf0ee8 427 if (mmget_not_zero(work->mm)) {
f5678e7f 428 kthread_use_mm(work->mm);
cccf0ee8
JA
429 worker->mm = work->mm;
430 /* hang on to this mm */
431 work->mm = NULL;
432 return;
433 }
434
435 /* failed grabbing mm, ensure work gets cancelled */
436 work->flags |= IO_WQ_WORK_CANCEL;
437}
438
439static void io_wq_switch_creds(struct io_worker *worker,
440 struct io_wq_work *work)
441{
442 const struct cred *old_creds = override_creds(work->creds);
443
444 worker->cur_creds = work->creds;
445 if (worker->saved_creds)
446 put_cred(old_creds); /* creds set by previous switch */
447 else
448 worker->saved_creds = old_creds;
449}
450
dc026a73
PB
451static void io_impersonate_work(struct io_worker *worker,
452 struct io_wq_work *work)
453{
454 if (work->files && current->files != work->files) {
455 task_lock(current);
456 current->files = work->files;
457 task_unlock(current);
458 }
459 if (work->fs && current->fs != work->fs)
460 current->fs = work->fs;
461 if (work->mm != worker->mm)
462 io_wq_switch_mm(worker, work);
463 if (worker->cur_creds != work->creds)
464 io_wq_switch_creds(worker, work);
57f1a649 465 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->fsize;
dc026a73
PB
466}
467
468static void io_assign_current_work(struct io_worker *worker,
469 struct io_wq_work *work)
470{
d78298e7
PB
471 if (work) {
472 /* flush pending signals before assigning new work */
473 if (signal_pending(current))
474 flush_signals(current);
475 cond_resched();
476 }
dc026a73
PB
477
478 spin_lock_irq(&worker->lock);
479 worker->cur_work = work;
480 spin_unlock_irq(&worker->lock);
481}
482
60cf46ae
PB
483static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
484
771b53d0
JA
485static void io_worker_handle_work(struct io_worker *worker)
486 __releases(wqe->lock)
487{
771b53d0
JA
488 struct io_wqe *wqe = worker->wqe;
489 struct io_wq *wq = wqe->wq;
490
491 do {
86f3cd1b 492 struct io_wq_work *work;
60cf46ae 493 unsigned int hash;
f462fd36 494get_next:
771b53d0
JA
495 /*
496 * If we got some work, mark us as busy. If we didn't, but
497 * the list isn't empty, it means we stalled on hashed work.
498 * Mark us stalled so we don't keep looking for work when we
499 * can't make progress, any work completion or insertion will
500 * clear the stalled flag.
501 */
60cf46ae 502 work = io_get_next_work(wqe);
771b53d0
JA
503 if (work)
504 __io_worker_busy(wqe, worker, work);
6206f0e1 505 else if (!wq_list_empty(&wqe->work_list))
771b53d0
JA
506 wqe->flags |= IO_WQE_FLAG_STALLED;
507
508 spin_unlock_irq(&wqe->lock);
509 if (!work)
510 break;
58e39319 511 io_assign_current_work(worker, work);
36c2f922 512
dc026a73
PB
513 /* handle a whole dependent link */
514 do {
86f3cd1b 515 struct io_wq_work *old_work, *next_hashed, *linked;
dc026a73 516
86f3cd1b 517 next_hashed = wq_next_work(work);
58e39319 518 io_impersonate_work(worker, work);
dc026a73
PB
519 /*
520 * OK to set IO_WQ_WORK_CANCEL even for uncancellable
521 * work, the worker function will do the right thing.
522 */
523 if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
524 work->flags |= IO_WQ_WORK_CANCEL;
525
60cf46ae 526 hash = io_get_work_hash(work);
f4db7182
PB
527 old_work = work;
528 linked = wq->do_work(work);
86f3cd1b
PB
529
530 work = next_hashed;
531 if (!work && linked && !io_wq_is_hashed(linked)) {
532 work = linked;
533 linked = NULL;
534 }
535 io_assign_current_work(worker, work);
e9fd9396 536 wq->free_work(old_work);
dc026a73 537
86f3cd1b
PB
538 if (linked)
539 io_wqe_enqueue(wqe, linked);
540
541 if (hash != -1U && !next_hashed) {
dc026a73
PB
542 spin_lock_irq(&wqe->lock);
543 wqe->hash_map &= ~BIT_ULL(hash);
544 wqe->flags &= ~IO_WQE_FLAG_STALLED;
dc026a73
PB
545 /* dependent work is not hashed */
546 hash = -1U;
f462fd36
PB
547 /* skip unnecessary unlock-lock wqe->lock */
548 if (!work)
549 goto get_next;
550 spin_unlock_irq(&wqe->lock);
7d723065 551 }
58e39319 552 } while (work);
7d723065 553
dc026a73 554 spin_lock_irq(&wqe->lock);
771b53d0
JA
555 } while (1);
556}
557
771b53d0
JA
558static int io_wqe_worker(void *data)
559{
560 struct io_worker *worker = data;
561 struct io_wqe *wqe = worker->wqe;
562 struct io_wq *wq = wqe->wq;
771b53d0
JA
563
564 io_worker_start(wqe, worker);
565
566 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
506d95ff 567 set_current_state(TASK_INTERRUPTIBLE);
e995d512 568loop:
771b53d0
JA
569 spin_lock_irq(&wqe->lock);
570 if (io_wqe_run_queue(wqe)) {
571 __set_current_state(TASK_RUNNING);
572 io_worker_handle_work(worker);
e995d512 573 goto loop;
771b53d0
JA
574 }
575 /* drops the lock on success, retry */
576 if (__io_worker_idle(wqe, worker)) {
577 __release(&wqe->lock);
e995d512 578 goto loop;
771b53d0
JA
579 }
580 spin_unlock_irq(&wqe->lock);
581 if (signal_pending(current))
582 flush_signals(current);
583 if (schedule_timeout(WORKER_IDLE_TIMEOUT))
584 continue;
585 /* timed out, exit unless we're the fixed worker */
586 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
587 !(worker->flags & IO_WORKER_F_FIXED))
588 break;
589 }
590
771b53d0
JA
591 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
592 spin_lock_irq(&wqe->lock);
6206f0e1 593 if (!wq_list_empty(&wqe->work_list))
771b53d0
JA
594 io_worker_handle_work(worker);
595 else
596 spin_unlock_irq(&wqe->lock);
597 }
598
599 io_worker_exit(worker);
600 return 0;
601}
602
771b53d0
JA
603/*
604 * Called when a worker is scheduled in. Mark us as currently running.
605 */
606void io_wq_worker_running(struct task_struct *tsk)
607{
608 struct io_worker *worker = kthread_data(tsk);
609 struct io_wqe *wqe = worker->wqe;
610
611 if (!(worker->flags & IO_WORKER_F_UP))
612 return;
613 if (worker->flags & IO_WORKER_F_RUNNING)
614 return;
615 worker->flags |= IO_WORKER_F_RUNNING;
c5def4ab 616 io_wqe_inc_running(wqe, worker);
771b53d0
JA
617}
618
619/*
620 * Called when worker is going to sleep. If there are no workers currently
621 * running and we have work pending, wake up a free one or have the manager
622 * set one up.
623 */
624void io_wq_worker_sleeping(struct task_struct *tsk)
625{
626 struct io_worker *worker = kthread_data(tsk);
627 struct io_wqe *wqe = worker->wqe;
628
629 if (!(worker->flags & IO_WORKER_F_UP))
630 return;
631 if (!(worker->flags & IO_WORKER_F_RUNNING))
632 return;
633
634 worker->flags &= ~IO_WORKER_F_RUNNING;
635
636 spin_lock_irq(&wqe->lock);
c5def4ab 637 io_wqe_dec_running(wqe, worker);
771b53d0
JA
638 spin_unlock_irq(&wqe->lock);
639}
640
b60fda60 641static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
771b53d0 642{
c5def4ab 643 struct io_wqe_acct *acct =&wqe->acct[index];
771b53d0
JA
644 struct io_worker *worker;
645
ad6e005c 646 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
771b53d0 647 if (!worker)
b60fda60 648 return false;
771b53d0
JA
649
650 refcount_set(&worker->ref, 1);
651 worker->nulls_node.pprev = NULL;
771b53d0 652 worker->wqe = wqe;
36c2f922 653 spin_lock_init(&worker->lock);
771b53d0
JA
654
655 worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
c5def4ab 656 "io_wqe_worker-%d/%d", index, wqe->node);
771b53d0
JA
657 if (IS_ERR(worker->task)) {
658 kfree(worker);
b60fda60 659 return false;
771b53d0
JA
660 }
661
662 spin_lock_irq(&wqe->lock);
021d1cdd 663 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
e61df66c 664 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
771b53d0 665 worker->flags |= IO_WORKER_F_FREE;
c5def4ab
JA
666 if (index == IO_WQ_ACCT_BOUND)
667 worker->flags |= IO_WORKER_F_BOUND;
668 if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
771b53d0 669 worker->flags |= IO_WORKER_F_FIXED;
c5def4ab 670 acct->nr_workers++;
771b53d0
JA
671 spin_unlock_irq(&wqe->lock);
672
c5def4ab
JA
673 if (index == IO_WQ_ACCT_UNBOUND)
674 atomic_inc(&wq->user->processes);
675
771b53d0 676 wake_up_process(worker->task);
b60fda60 677 return true;
771b53d0
JA
678}
679
c5def4ab 680static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
771b53d0
JA
681 __must_hold(wqe->lock)
682{
c5def4ab 683 struct io_wqe_acct *acct = &wqe->acct[index];
771b53d0 684
c5def4ab 685 /* if we have available workers or no work, no need */
021d1cdd 686 if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
c5def4ab
JA
687 return false;
688 return acct->nr_workers < acct->max_workers;
771b53d0
JA
689}
690
691/*
692 * Manager thread. Tasked with creating new workers, if we need them.
693 */
694static int io_wq_manager(void *data)
695{
696 struct io_wq *wq = data;
3fc50ab5
JH
697 int workers_to_create = num_possible_nodes();
698 int node;
771b53d0 699
b60fda60 700 /* create fixed workers */
3fc50ab5
JH
701 refcount_set(&wq->refs, workers_to_create);
702 for_each_node(node) {
7563439a
JA
703 if (!node_online(node))
704 continue;
3fc50ab5
JH
705 if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
706 goto err;
707 workers_to_create--;
b60fda60 708 }
771b53d0 709
7563439a
JA
710 while (workers_to_create--)
711 refcount_dec(&wq->refs);
712
b60fda60
JA
713 complete(&wq->done);
714
715 while (!kthread_should_stop()) {
aa96bf8a
JA
716 if (current->task_works)
717 task_work_run();
718
3fc50ab5
JH
719 for_each_node(node) {
720 struct io_wqe *wqe = wq->wqes[node];
c5def4ab 721 bool fork_worker[2] = { false, false };
771b53d0 722
7563439a
JA
723 if (!node_online(node))
724 continue;
725
771b53d0 726 spin_lock_irq(&wqe->lock);
c5def4ab
JA
727 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
728 fork_worker[IO_WQ_ACCT_BOUND] = true;
729 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
730 fork_worker[IO_WQ_ACCT_UNBOUND] = true;
771b53d0 731 spin_unlock_irq(&wqe->lock);
c5def4ab
JA
732 if (fork_worker[IO_WQ_ACCT_BOUND])
733 create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
734 if (fork_worker[IO_WQ_ACCT_UNBOUND])
735 create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
771b53d0
JA
736 }
737 set_current_state(TASK_INTERRUPTIBLE);
738 schedule_timeout(HZ);
739 }
740
aa96bf8a
JA
741 if (current->task_works)
742 task_work_run();
743
771b53d0 744 return 0;
b60fda60
JA
745err:
746 set_bit(IO_WQ_BIT_ERROR, &wq->state);
747 set_bit(IO_WQ_BIT_EXIT, &wq->state);
3fc50ab5 748 if (refcount_sub_and_test(workers_to_create, &wq->refs))
b60fda60
JA
749 complete(&wq->done);
750 return 0;
771b53d0
JA
751}
752
c5def4ab
JA
753static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
754 struct io_wq_work *work)
755{
756 bool free_worker;
757
758 if (!(work->flags & IO_WQ_WORK_UNBOUND))
759 return true;
760 if (atomic_read(&acct->nr_running))
761 return true;
762
763 rcu_read_lock();
021d1cdd 764 free_worker = !hlist_nulls_empty(&wqe->free_list);
c5def4ab
JA
765 rcu_read_unlock();
766 if (free_worker)
767 return true;
768
769 if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
770 !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
771 return false;
772
773 return true;
774}
775
e9fd9396 776static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
fc04c39b 777{
e9fd9396
PB
778 struct io_wq *wq = wqe->wq;
779
fc04c39b
PB
780 do {
781 struct io_wq_work *old_work = work;
782
783 work->flags |= IO_WQ_WORK_CANCEL;
f4db7182 784 work = wq->do_work(work);
e9fd9396 785 wq->free_work(old_work);
fc04c39b
PB
786 } while (work);
787}
788
86f3cd1b
PB
789static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
790{
791 unsigned int hash;
792 struct io_wq_work *tail;
793
794 if (!io_wq_is_hashed(work)) {
795append:
796 wq_list_add_tail(&work->list, &wqe->work_list);
797 return;
798 }
799
800 hash = io_get_work_hash(work);
801 tail = wqe->hash_tail[hash];
802 wqe->hash_tail[hash] = work;
803 if (!tail)
804 goto append;
805
806 wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
807}
808
771b53d0
JA
809static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
810{
c5def4ab 811 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
895e2ca0 812 int work_flags;
771b53d0
JA
813 unsigned long flags;
814
c5def4ab
JA
815 /*
816 * Do early check to see if we need a new unbound worker, and if we do,
817 * if we're allowed to do so. This isn't 100% accurate as there's a
818 * gap between this check and incrementing the value, but that's OK.
819 * It's close enough to not be an issue, fork() has the same delay.
820 */
821 if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
e9fd9396 822 io_run_cancel(work, wqe);
c5def4ab
JA
823 return;
824 }
825
895e2ca0 826 work_flags = work->flags;
771b53d0 827 spin_lock_irqsave(&wqe->lock, flags);
86f3cd1b 828 io_wqe_insert_work(wqe, work);
771b53d0
JA
829 wqe->flags &= ~IO_WQE_FLAG_STALLED;
830 spin_unlock_irqrestore(&wqe->lock, flags);
831
895e2ca0
JA
832 if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
833 !atomic_read(&acct->nr_running))
c5def4ab 834 io_wqe_wake_worker(wqe, acct);
771b53d0
JA
835}
836
837void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
838{
839 struct io_wqe *wqe = wq->wqes[numa_node_id()];
840
841 io_wqe_enqueue(wqe, work);
842}
843
844/*
8766dd51
PB
845 * Work items that hash to the same value will not be done in parallel.
846 * Used to limit concurrent writes, generally hashed by inode.
771b53d0 847 */
8766dd51 848void io_wq_hash_work(struct io_wq_work *work, void *val)
771b53d0 849{
8766dd51 850 unsigned int bit;
771b53d0
JA
851
852 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
853 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
771b53d0
JA
854}
855
856static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
857{
858 send_sig(SIGINT, worker->task, 1);
859 return false;
860}
861
862/*
863 * Iterate the passed in list and call the specific function for each
864 * worker that isn't exiting
865 */
866static bool io_wq_for_each_worker(struct io_wqe *wqe,
771b53d0
JA
867 bool (*func)(struct io_worker *, void *),
868 void *data)
869{
771b53d0
JA
870 struct io_worker *worker;
871 bool ret = false;
872
e61df66c 873 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
771b53d0 874 if (io_worker_get(worker)) {
7563439a
JA
875 /* no task if node is/was offline */
876 if (worker->task)
877 ret = func(worker, data);
771b53d0
JA
878 io_worker_release(worker);
879 if (ret)
880 break;
881 }
882 }
e61df66c 883
771b53d0
JA
884 return ret;
885}
886
887void io_wq_cancel_all(struct io_wq *wq)
888{
3fc50ab5 889 int node;
771b53d0
JA
890
891 set_bit(IO_WQ_BIT_CANCEL, &wq->state);
892
771b53d0 893 rcu_read_lock();
3fc50ab5
JH
894 for_each_node(node) {
895 struct io_wqe *wqe = wq->wqes[node];
771b53d0 896
e61df66c 897 io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
771b53d0
JA
898 }
899 rcu_read_unlock();
900}
901
62755e35 902struct io_cb_cancel_data {
2293b419
PB
903 work_cancel_fn *fn;
904 void *data;
4f26bda1
PB
905 int nr_running;
906 int nr_pending;
907 bool cancel_all;
62755e35
JA
908};
909
2293b419 910static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
62755e35 911{
2293b419 912 struct io_cb_cancel_data *match = data;
6f72653e 913 unsigned long flags;
62755e35
JA
914
915 /*
916 * Hold the lock to avoid ->cur_work going out of scope, caller
36c2f922 917 * may dereference the passed in work.
62755e35 918 */
36c2f922 919 spin_lock_irqsave(&worker->lock, flags);
62755e35 920 if (worker->cur_work &&
0c9d5ccd 921 !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
2293b419 922 match->fn(worker->cur_work, match->data)) {
771b53d0 923 send_sig(SIGINT, worker->task, 1);
4f26bda1 924 match->nr_running++;
771b53d0 925 }
36c2f922 926 spin_unlock_irqrestore(&worker->lock, flags);
771b53d0 927
4f26bda1 928 return match->nr_running && !match->cancel_all;
771b53d0
JA
929}
930
4f26bda1 931static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
f4c2665e 932 struct io_cb_cancel_data *match)
771b53d0 933{
6206f0e1 934 struct io_wq_work_node *node, *prev;
771b53d0 935 struct io_wq_work *work;
6f72653e 936 unsigned long flags;
771b53d0 937
4f26bda1 938retry:
6f72653e 939 spin_lock_irqsave(&wqe->lock, flags);
6206f0e1
JA
940 wq_list_for_each(node, prev, &wqe->work_list) {
941 work = container_of(node, struct io_wq_work, list);
4f26bda1
PB
942 if (!match->fn(work, match->data))
943 continue;
6206f0e1 944
4f26bda1
PB
945 wq_list_del(&wqe->work_list, node, prev);
946 spin_unlock_irqrestore(&wqe->lock, flags);
947 io_run_cancel(work, wqe);
948 match->nr_pending++;
949 if (!match->cancel_all)
950 return;
951
952 /* not safe to continue after unlock */
953 goto retry;
771b53d0 954 }
6f72653e 955 spin_unlock_irqrestore(&wqe->lock, flags);
f4c2665e
PB
956}
957
4f26bda1 958static void io_wqe_cancel_running_work(struct io_wqe *wqe,
f4c2665e
PB
959 struct io_cb_cancel_data *match)
960{
771b53d0 961 rcu_read_lock();
4f26bda1 962 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
771b53d0 963 rcu_read_unlock();
771b53d0
JA
964}
965
2293b419 966enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
4f26bda1 967 void *data, bool cancel_all)
771b53d0 968{
2293b419 969 struct io_cb_cancel_data match = {
4f26bda1
PB
970 .fn = cancel,
971 .data = data,
972 .cancel_all = cancel_all,
00bcda13 973 };
3fc50ab5 974 int node;
771b53d0 975
f4c2665e
PB
976 /*
977 * First check pending list, if we're lucky we can just remove it
978 * from there. CANCEL_OK means that the work is returned as-new,
979 * no completion will be posted for it.
980 */
3fc50ab5
JH
981 for_each_node(node) {
982 struct io_wqe *wqe = wq->wqes[node];
771b53d0 983
4f26bda1
PB
984 io_wqe_cancel_pending_work(wqe, &match);
985 if (match.nr_pending && !match.cancel_all)
f4c2665e 986 return IO_WQ_CANCEL_OK;
771b53d0
JA
987 }
988
f4c2665e
PB
989 /*
990 * Now check if a free (going busy) or busy worker has the work
991 * currently running. If we find it there, we'll return CANCEL_RUNNING
992 * as an indication that we attempt to signal cancellation. The
993 * completion will run normally in this case.
994 */
995 for_each_node(node) {
996 struct io_wqe *wqe = wq->wqes[node];
997
4f26bda1
PB
998 io_wqe_cancel_running_work(wqe, &match);
999 if (match.nr_running && !match.cancel_all)
f4c2665e
PB
1000 return IO_WQ_CANCEL_RUNNING;
1001 }
1002
4f26bda1
PB
1003 if (match.nr_running)
1004 return IO_WQ_CANCEL_RUNNING;
1005 if (match.nr_pending)
1006 return IO_WQ_CANCEL_OK;
f4c2665e 1007 return IO_WQ_CANCEL_NOTFOUND;
771b53d0
JA
1008}
1009
2293b419
PB
1010static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
1011{
1012 return work == data;
1013}
1014
1015enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
1016{
4f26bda1 1017 return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
2293b419
PB
1018}
1019
576a347b 1020struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
771b53d0 1021{
3fc50ab5 1022 int ret = -ENOMEM, node;
771b53d0
JA
1023 struct io_wq *wq;
1024
f5fa38c5 1025 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
e9fd9396
PB
1026 return ERR_PTR(-EINVAL);
1027
ad6e005c 1028 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
771b53d0
JA
1029 if (!wq)
1030 return ERR_PTR(-ENOMEM);
1031
3fc50ab5 1032 wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
771b53d0
JA
1033 if (!wq->wqes) {
1034 kfree(wq);
1035 return ERR_PTR(-ENOMEM);
1036 }
1037
e9fd9396 1038 wq->free_work = data->free_work;
f5fa38c5 1039 wq->do_work = data->do_work;
7d723065 1040
c5def4ab 1041 /* caller must already hold a reference to this */
576a347b 1042 wq->user = data->user;
c5def4ab 1043
3fc50ab5 1044 for_each_node(node) {
771b53d0 1045 struct io_wqe *wqe;
7563439a 1046 int alloc_node = node;
771b53d0 1047
7563439a
JA
1048 if (!node_online(alloc_node))
1049 alloc_node = NUMA_NO_NODE;
1050 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
771b53d0 1051 if (!wqe)
3fc50ab5
JH
1052 goto err;
1053 wq->wqes[node] = wqe;
7563439a 1054 wqe->node = alloc_node;
c5def4ab
JA
1055 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1056 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
576a347b 1057 if (wq->user) {
c5def4ab
JA
1058 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1059 task_rlimit(current, RLIMIT_NPROC);
1060 }
1061 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
771b53d0
JA
1062 wqe->wq = wq;
1063 spin_lock_init(&wqe->lock);
6206f0e1 1064 INIT_WQ_LIST(&wqe->work_list);
021d1cdd 1065 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
e61df66c 1066 INIT_LIST_HEAD(&wqe->all_list);
771b53d0
JA
1067 }
1068
1069 init_completion(&wq->done);
1070
771b53d0
JA
1071 wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
1072 if (!IS_ERR(wq->manager)) {
1073 wake_up_process(wq->manager);
b60fda60
JA
1074 wait_for_completion(&wq->done);
1075 if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
1076 ret = -ENOMEM;
1077 goto err;
1078 }
848f7e18 1079 refcount_set(&wq->use_refs, 1);
b60fda60 1080 reinit_completion(&wq->done);
771b53d0
JA
1081 return wq;
1082 }
1083
1084 ret = PTR_ERR(wq->manager);
771b53d0 1085 complete(&wq->done);
b60fda60 1086err:
3fc50ab5
JH
1087 for_each_node(node)
1088 kfree(wq->wqes[node]);
b60fda60
JA
1089 kfree(wq->wqes);
1090 kfree(wq);
771b53d0
JA
1091 return ERR_PTR(ret);
1092}
1093
eba6f5a3
PB
1094bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
1095{
f5fa38c5 1096 if (data->free_work != wq->free_work || data->do_work != wq->do_work)
eba6f5a3
PB
1097 return false;
1098
1099 return refcount_inc_not_zero(&wq->use_refs);
1100}
1101
771b53d0
JA
1102static bool io_wq_worker_wake(struct io_worker *worker, void *data)
1103{
1104 wake_up_process(worker->task);
1105 return false;
1106}
1107
848f7e18 1108static void __io_wq_destroy(struct io_wq *wq)
771b53d0 1109{
3fc50ab5 1110 int node;
771b53d0 1111
b60fda60
JA
1112 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1113 if (wq->manager)
771b53d0 1114 kthread_stop(wq->manager);
771b53d0
JA
1115
1116 rcu_read_lock();
3fc50ab5
JH
1117 for_each_node(node)
1118 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
771b53d0
JA
1119 rcu_read_unlock();
1120
1121 wait_for_completion(&wq->done);
1122
3fc50ab5
JH
1123 for_each_node(node)
1124 kfree(wq->wqes[node]);
771b53d0
JA
1125 kfree(wq->wqes);
1126 kfree(wq);
1127}
848f7e18
JA
1128
1129void io_wq_destroy(struct io_wq *wq)
1130{
1131 if (refcount_dec_and_test(&wq->use_refs))
1132 __io_wq_destroy(wq);
1133}
aa96bf8a
JA
1134
1135struct task_struct *io_wq_get_task(struct io_wq *wq)
1136{
1137 return wq->manager;
1138}