]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/btrfs/async-thread.c
btrfs: Add high priority workqueue support for btrfs_workqueue_struct
[mirror_ubuntu-bionic-kernel.git] / fs / btrfs / async-thread.c
CommitLineData
8b712842
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
08a9ff32 3 * Copyright (C) 2014 Fujitsu. All rights reserved.
8b712842
CM
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/kthread.h>
5a0e3ad6 21#include <linux/slab.h>
8b712842
CM
22#include <linux/list.h>
23#include <linux/spinlock.h>
b51912c9 24#include <linux/freezer.h>
08a9ff32 25#include <linux/workqueue.h>
8b712842
CM
26#include "async-thread.h"
27
4a69a410
CM
28#define WORK_QUEUED_BIT 0
29#define WORK_DONE_BIT 1
30#define WORK_ORDER_DONE_BIT 2
d313d7a3 31#define WORK_HIGH_PRIO_BIT 3
4a69a410 32
8b712842
CM
33/*
34 * container for the kthread task pointer and the list of pending work
35 * One of these is allocated per thread.
36 */
37struct btrfs_worker_thread {
35d8ba66
CM
38 /* pool we belong to */
39 struct btrfs_workers *workers;
40
8b712842
CM
41 /* list of struct btrfs_work that are waiting for service */
42 struct list_head pending;
d313d7a3 43 struct list_head prio_pending;
8b712842
CM
44
45 /* list of worker threads from struct btrfs_workers */
46 struct list_head worker_list;
47
48 /* kthread */
49 struct task_struct *task;
50
51 /* number of things on the pending list */
52 atomic_t num_pending;
53863232 53
9042846b
CM
54 /* reference counter for this struct */
55 atomic_t refs;
56
4854ddd0 57 unsigned long sequence;
8b712842
CM
58
59 /* protects the pending list. */
60 spinlock_t lock;
61
62 /* set to non-zero when this thread is already awake and kicking */
63 int working;
35d8ba66
CM
64
65 /* are we currently idle */
66 int idle;
8b712842
CM
67};
68
0dc3b84a
JB
69static int __btrfs_start_workers(struct btrfs_workers *workers);
70
61d92c32
CM
71/*
72 * btrfs_start_workers uses kthread_run, which can block waiting for memory
73 * for a very long time. It will actually throttle on page writeback,
74 * and so it may not make progress until after our btrfs worker threads
75 * process all of the pending work structs in their queue
76 *
77 * This means we can't use btrfs_start_workers from inside a btrfs worker
78 * thread that is used as part of cleaning dirty memory, which pretty much
79 * involves all of the worker threads.
80 *
81 * Instead we have a helper queue who never has more than one thread
82 * where we scheduler thread start operations. This worker_start struct
83 * is used to contain the work and hold a pointer to the queue that needs
84 * another worker.
85 */
86struct worker_start {
87 struct btrfs_work work;
88 struct btrfs_workers *queue;
89};
90
91static void start_new_worker_func(struct btrfs_work *work)
92{
93 struct worker_start *start;
94 start = container_of(work, struct worker_start, work);
0dc3b84a 95 __btrfs_start_workers(start->queue);
61d92c32
CM
96 kfree(start);
97}
98
35d8ba66
CM
99/*
100 * helper function to move a thread onto the idle list after it
101 * has finished some requests.
102 */
103static void check_idle_worker(struct btrfs_worker_thread *worker)
104{
105 if (!worker->idle && atomic_read(&worker->num_pending) <
106 worker->workers->idle_thresh / 2) {
107 unsigned long flags;
108 spin_lock_irqsave(&worker->workers->lock, flags);
109 worker->idle = 1;
3e99d8eb
CM
110
111 /* the list may be empty if the worker is just starting */
964fb15a
ID
112 if (!list_empty(&worker->worker_list) &&
113 !worker->workers->stopping) {
3e99d8eb
CM
114 list_move(&worker->worker_list,
115 &worker->workers->idle_list);
116 }
35d8ba66
CM
117 spin_unlock_irqrestore(&worker->workers->lock, flags);
118 }
119}
120
121/*
122 * helper function to move a thread off the idle list after new
123 * pending work is added.
124 */
125static void check_busy_worker(struct btrfs_worker_thread *worker)
126{
127 if (worker->idle && atomic_read(&worker->num_pending) >=
128 worker->workers->idle_thresh) {
129 unsigned long flags;
130 spin_lock_irqsave(&worker->workers->lock, flags);
131 worker->idle = 0;
3e99d8eb 132
964fb15a
ID
133 if (!list_empty(&worker->worker_list) &&
134 !worker->workers->stopping) {
3e99d8eb
CM
135 list_move_tail(&worker->worker_list,
136 &worker->workers->worker_list);
137 }
35d8ba66
CM
138 spin_unlock_irqrestore(&worker->workers->lock, flags);
139 }
140}
141
9042846b
CM
142static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
143{
144 struct btrfs_workers *workers = worker->workers;
0dc3b84a 145 struct worker_start *start;
9042846b
CM
146 unsigned long flags;
147
148 rmb();
149 if (!workers->atomic_start_pending)
150 return;
151
0dc3b84a
JB
152 start = kzalloc(sizeof(*start), GFP_NOFS);
153 if (!start)
154 return;
155
156 start->work.func = start_new_worker_func;
157 start->queue = workers;
158
9042846b
CM
159 spin_lock_irqsave(&workers->lock, flags);
160 if (!workers->atomic_start_pending)
161 goto out;
162
163 workers->atomic_start_pending = 0;
61d92c32
CM
164 if (workers->num_workers + workers->num_workers_starting >=
165 workers->max_workers)
9042846b
CM
166 goto out;
167
61d92c32 168 workers->num_workers_starting += 1;
9042846b 169 spin_unlock_irqrestore(&workers->lock, flags);
0dc3b84a 170 btrfs_queue_worker(workers->atomic_worker_start, &start->work);
9042846b
CM
171 return;
172
173out:
0dc3b84a 174 kfree(start);
9042846b
CM
175 spin_unlock_irqrestore(&workers->lock, flags);
176}
177
143bede5 178static noinline void run_ordered_completions(struct btrfs_workers *workers,
4a69a410
CM
179 struct btrfs_work *work)
180{
4a69a410 181 if (!workers->ordered)
143bede5 182 return;
4a69a410
CM
183
184 set_bit(WORK_DONE_BIT, &work->flags);
185
4e3f9c50 186 spin_lock(&workers->order_lock);
4a69a410 187
d313d7a3
CM
188 while (1) {
189 if (!list_empty(&workers->prio_order_list)) {
190 work = list_entry(workers->prio_order_list.next,
191 struct btrfs_work, order_list);
192 } else if (!list_empty(&workers->order_list)) {
193 work = list_entry(workers->order_list.next,
194 struct btrfs_work, order_list);
195 } else {
196 break;
197 }
4a69a410
CM
198 if (!test_bit(WORK_DONE_BIT, &work->flags))
199 break;
200
201 /* we are going to call the ordered done function, but
202 * we leave the work item on the list as a barrier so
203 * that later work items that are done don't have their
204 * functions called before this one returns
205 */
206 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
207 break;
208
4e3f9c50 209 spin_unlock(&workers->order_lock);
4a69a410
CM
210
211 work->ordered_func(work);
212
e9fbcb42 213 /* now take the lock again and drop our item from the list */
4e3f9c50 214 spin_lock(&workers->order_lock);
4a69a410 215 list_del(&work->order_list);
e9fbcb42
CM
216 spin_unlock(&workers->order_lock);
217
218 /*
219 * we don't want to call the ordered free functions
220 * with the lock held though
221 */
4a69a410 222 work->ordered_free(work);
e9fbcb42 223 spin_lock(&workers->order_lock);
4a69a410
CM
224 }
225
4e3f9c50 226 spin_unlock(&workers->order_lock);
4a69a410
CM
227}
228
9042846b
CM
229static void put_worker(struct btrfs_worker_thread *worker)
230{
231 if (atomic_dec_and_test(&worker->refs))
232 kfree(worker);
233}
234
235static int try_worker_shutdown(struct btrfs_worker_thread *worker)
236{
237 int freeit = 0;
238
239 spin_lock_irq(&worker->lock);
627e421a 240 spin_lock(&worker->workers->lock);
9042846b
CM
241 if (worker->workers->num_workers > 1 &&
242 worker->idle &&
243 !worker->working &&
244 !list_empty(&worker->worker_list) &&
245 list_empty(&worker->prio_pending) &&
6e74057c
CM
246 list_empty(&worker->pending) &&
247 atomic_read(&worker->num_pending) == 0) {
9042846b
CM
248 freeit = 1;
249 list_del_init(&worker->worker_list);
250 worker->workers->num_workers--;
251 }
627e421a 252 spin_unlock(&worker->workers->lock);
9042846b
CM
253 spin_unlock_irq(&worker->lock);
254
255 if (freeit)
256 put_worker(worker);
257 return freeit;
258}
259
4f878e84
CM
260static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
261 struct list_head *prio_head,
262 struct list_head *head)
263{
264 struct btrfs_work *work = NULL;
265 struct list_head *cur = NULL;
266
51b98eff 267 if (!list_empty(prio_head)) {
4f878e84 268 cur = prio_head->next;
51b98eff
SG
269 goto out;
270 }
4f878e84
CM
271
272 smp_mb();
273 if (!list_empty(&worker->prio_pending))
274 goto refill;
275
51b98eff 276 if (!list_empty(head)) {
4f878e84 277 cur = head->next;
4f878e84 278 goto out;
51b98eff 279 }
4f878e84
CM
280
281refill:
282 spin_lock_irq(&worker->lock);
283 list_splice_tail_init(&worker->prio_pending, prio_head);
284 list_splice_tail_init(&worker->pending, head);
285
286 if (!list_empty(prio_head))
287 cur = prio_head->next;
288 else if (!list_empty(head))
289 cur = head->next;
290 spin_unlock_irq(&worker->lock);
291
292 if (!cur)
293 goto out_fail;
294
295out:
296 work = list_entry(cur, struct btrfs_work, list);
297
298out_fail:
299 return work;
300}
301
8b712842
CM
302/*
303 * main loop for servicing work items
304 */
305static int worker_loop(void *arg)
306{
307 struct btrfs_worker_thread *worker = arg;
4f878e84
CM
308 struct list_head head;
309 struct list_head prio_head;
8b712842 310 struct btrfs_work *work;
4f878e84
CM
311
312 INIT_LIST_HEAD(&head);
313 INIT_LIST_HEAD(&prio_head);
314
8b712842 315 do {
4f878e84 316again:
d313d7a3 317 while (1) {
4f878e84
CM
318
319
320 work = get_next_work(worker, &prio_head, &head);
321 if (!work)
d313d7a3
CM
322 break;
323
8b712842 324 list_del(&work->list);
4a69a410 325 clear_bit(WORK_QUEUED_BIT, &work->flags);
8b712842
CM
326
327 work->worker = worker;
8b712842
CM
328
329 work->func(work);
330
331 atomic_dec(&worker->num_pending);
4a69a410
CM
332 /*
333 * unless this is an ordered work queue,
334 * 'work' was probably freed by func above.
335 */
336 run_ordered_completions(worker->workers, work);
337
9042846b 338 check_pending_worker_creates(worker);
8f3b65a3 339 cond_resched();
8b712842 340 }
4f878e84
CM
341
342 spin_lock_irq(&worker->lock);
343 check_idle_worker(worker);
344
8b712842 345 if (freezing(current)) {
b51912c9
CM
346 worker->working = 0;
347 spin_unlock_irq(&worker->lock);
a0acae0e 348 try_to_freeze();
8b712842 349 } else {
8b712842 350 spin_unlock_irq(&worker->lock);
b51912c9
CM
351 if (!kthread_should_stop()) {
352 cpu_relax();
353 /*
354 * we've dropped the lock, did someone else
355 * jump_in?
356 */
357 smp_mb();
d313d7a3
CM
358 if (!list_empty(&worker->pending) ||
359 !list_empty(&worker->prio_pending))
b51912c9
CM
360 continue;
361
362 /*
363 * this short schedule allows more work to
364 * come in without the queue functions
365 * needing to go through wake_up_process()
366 *
367 * worker->working is still 1, so nobody
368 * is going to try and wake us up
369 */
370 schedule_timeout(1);
371 smp_mb();
d313d7a3
CM
372 if (!list_empty(&worker->pending) ||
373 !list_empty(&worker->prio_pending))
b51912c9
CM
374 continue;
375
b5555f77
AG
376 if (kthread_should_stop())
377 break;
378
b51912c9
CM
379 /* still no more work?, sleep for real */
380 spin_lock_irq(&worker->lock);
381 set_current_state(TASK_INTERRUPTIBLE);
d313d7a3 382 if (!list_empty(&worker->pending) ||
4f878e84
CM
383 !list_empty(&worker->prio_pending)) {
384 spin_unlock_irq(&worker->lock);
ed3b3d31 385 set_current_state(TASK_RUNNING);
4f878e84
CM
386 goto again;
387 }
b51912c9
CM
388
389 /*
390 * this makes sure we get a wakeup when someone
391 * adds something new to the queue
392 */
393 worker->working = 0;
394 spin_unlock_irq(&worker->lock);
395
9042846b
CM
396 if (!kthread_should_stop()) {
397 schedule_timeout(HZ * 120);
398 if (!worker->working &&
399 try_worker_shutdown(worker)) {
400 return 0;
401 }
402 }
b51912c9 403 }
8b712842
CM
404 __set_current_state(TASK_RUNNING);
405 }
406 } while (!kthread_should_stop());
407 return 0;
408}
409
410/*
411 * this will wait for all the worker threads to shutdown
412 */
143bede5 413void btrfs_stop_workers(struct btrfs_workers *workers)
8b712842
CM
414{
415 struct list_head *cur;
416 struct btrfs_worker_thread *worker;
9042846b 417 int can_stop;
8b712842 418
9042846b 419 spin_lock_irq(&workers->lock);
964fb15a 420 workers->stopping = 1;
35d8ba66 421 list_splice_init(&workers->idle_list, &workers->worker_list);
d397712b 422 while (!list_empty(&workers->worker_list)) {
8b712842
CM
423 cur = workers->worker_list.next;
424 worker = list_entry(cur, struct btrfs_worker_thread,
425 worker_list);
9042846b
CM
426
427 atomic_inc(&worker->refs);
428 workers->num_workers -= 1;
429 if (!list_empty(&worker->worker_list)) {
430 list_del_init(&worker->worker_list);
431 put_worker(worker);
432 can_stop = 1;
433 } else
434 can_stop = 0;
435 spin_unlock_irq(&workers->lock);
436 if (can_stop)
437 kthread_stop(worker->task);
438 spin_lock_irq(&workers->lock);
439 put_worker(worker);
8b712842 440 }
9042846b 441 spin_unlock_irq(&workers->lock);
8b712842
CM
442}
443
444/*
445 * simple init on struct btrfs_workers
446 */
61d92c32
CM
447void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
448 struct btrfs_workers *async_helper)
8b712842
CM
449{
450 workers->num_workers = 0;
61d92c32 451 workers->num_workers_starting = 0;
8b712842 452 INIT_LIST_HEAD(&workers->worker_list);
35d8ba66 453 INIT_LIST_HEAD(&workers->idle_list);
4a69a410 454 INIT_LIST_HEAD(&workers->order_list);
d313d7a3 455 INIT_LIST_HEAD(&workers->prio_order_list);
8b712842 456 spin_lock_init(&workers->lock);
4e3f9c50 457 spin_lock_init(&workers->order_lock);
8b712842 458 workers->max_workers = max;
61b49440 459 workers->idle_thresh = 32;
5443be45 460 workers->name = name;
4a69a410 461 workers->ordered = 0;
9042846b 462 workers->atomic_start_pending = 0;
61d92c32 463 workers->atomic_worker_start = async_helper;
964fb15a 464 workers->stopping = 0;
8b712842
CM
465}
466
467/*
468 * starts new worker threads. This does not enforce the max worker
469 * count in case you need to temporarily go past it.
470 */
0dc3b84a 471static int __btrfs_start_workers(struct btrfs_workers *workers)
8b712842
CM
472{
473 struct btrfs_worker_thread *worker;
474 int ret = 0;
8b712842 475
0dc3b84a
JB
476 worker = kzalloc(sizeof(*worker), GFP_NOFS);
477 if (!worker) {
478 ret = -ENOMEM;
479 goto fail;
480 }
8b712842 481
0dc3b84a
JB
482 INIT_LIST_HEAD(&worker->pending);
483 INIT_LIST_HEAD(&worker->prio_pending);
484 INIT_LIST_HEAD(&worker->worker_list);
485 spin_lock_init(&worker->lock);
486
487 atomic_set(&worker->num_pending, 0);
488 atomic_set(&worker->refs, 1);
489 worker->workers = workers;
964fb15a
ID
490 worker->task = kthread_create(worker_loop, worker,
491 "btrfs-%s-%d", workers->name,
492 workers->num_workers + 1);
0dc3b84a
JB
493 if (IS_ERR(worker->task)) {
494 ret = PTR_ERR(worker->task);
0dc3b84a 495 goto fail;
8b712842 496 }
964fb15a 497
0dc3b84a 498 spin_lock_irq(&workers->lock);
964fb15a
ID
499 if (workers->stopping) {
500 spin_unlock_irq(&workers->lock);
ba69994a 501 ret = -EINVAL;
964fb15a
ID
502 goto fail_kthread;
503 }
0dc3b84a
JB
504 list_add_tail(&worker->worker_list, &workers->idle_list);
505 worker->idle = 1;
506 workers->num_workers++;
507 workers->num_workers_starting--;
508 WARN_ON(workers->num_workers_starting < 0);
509 spin_unlock_irq(&workers->lock);
510
964fb15a 511 wake_up_process(worker->task);
8b712842 512 return 0;
964fb15a
ID
513
514fail_kthread:
515 kthread_stop(worker->task);
8b712842 516fail:
964fb15a 517 kfree(worker);
0dc3b84a
JB
518 spin_lock_irq(&workers->lock);
519 workers->num_workers_starting--;
520 spin_unlock_irq(&workers->lock);
8b712842
CM
521 return ret;
522}
523
0dc3b84a 524int btrfs_start_workers(struct btrfs_workers *workers)
61d92c32
CM
525{
526 spin_lock_irq(&workers->lock);
0dc3b84a 527 workers->num_workers_starting++;
61d92c32 528 spin_unlock_irq(&workers->lock);
0dc3b84a 529 return __btrfs_start_workers(workers);
61d92c32
CM
530}
531
8b712842
CM
532/*
533 * run through the list and find a worker thread that doesn't have a lot
534 * to do right now. This can return null if we aren't yet at the thread
535 * count limit and all of the threads are busy.
536 */
537static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
538{
539 struct btrfs_worker_thread *worker;
540 struct list_head *next;
61d92c32
CM
541 int enforce_min;
542
543 enforce_min = (workers->num_workers + workers->num_workers_starting) <
544 workers->max_workers;
8b712842 545
8b712842 546 /*
35d8ba66
CM
547 * if we find an idle thread, don't move it to the end of the
548 * idle list. This improves the chance that the next submission
549 * will reuse the same thread, and maybe catch it while it is still
550 * working
8b712842 551 */
35d8ba66
CM
552 if (!list_empty(&workers->idle_list)) {
553 next = workers->idle_list.next;
8b712842
CM
554 worker = list_entry(next, struct btrfs_worker_thread,
555 worker_list);
35d8ba66 556 return worker;
8b712842 557 }
35d8ba66
CM
558 if (enforce_min || list_empty(&workers->worker_list))
559 return NULL;
560
8b712842 561 /*
35d8ba66 562 * if we pick a busy task, move the task to the end of the list.
d352ac68
CM
563 * hopefully this will keep things somewhat evenly balanced.
564 * Do the move in batches based on the sequence number. This groups
565 * requests submitted at roughly the same time onto the same worker.
8b712842 566 */
35d8ba66
CM
567 next = workers->worker_list.next;
568 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
4854ddd0 569 worker->sequence++;
d352ac68 570
53863232 571 if (worker->sequence % workers->idle_thresh == 0)
4854ddd0 572 list_move_tail(next, &workers->worker_list);
8b712842
CM
573 return worker;
574}
575
d352ac68
CM
576/*
577 * selects a worker thread to take the next job. This will either find
578 * an idle worker, start a new worker up to the max count, or just return
579 * one of the existing busy workers.
580 */
8b712842
CM
581static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
582{
583 struct btrfs_worker_thread *worker;
584 unsigned long flags;
9042846b 585 struct list_head *fallback;
0dc3b84a 586 int ret;
8b712842 587
8b712842 588 spin_lock_irqsave(&workers->lock, flags);
8d532b2a 589again:
8b712842 590 worker = next_worker(workers);
8b712842
CM
591
592 if (!worker) {
61d92c32
CM
593 if (workers->num_workers + workers->num_workers_starting >=
594 workers->max_workers) {
9042846b
CM
595 goto fallback;
596 } else if (workers->atomic_worker_start) {
597 workers->atomic_start_pending = 1;
598 goto fallback;
8b712842 599 } else {
61d92c32 600 workers->num_workers_starting++;
8b712842
CM
601 spin_unlock_irqrestore(&workers->lock, flags);
602 /* we're below the limit, start another worker */
0dc3b84a 603 ret = __btrfs_start_workers(workers);
8d532b2a 604 spin_lock_irqsave(&workers->lock, flags);
0dc3b84a
JB
605 if (ret)
606 goto fallback;
8b712842
CM
607 goto again;
608 }
609 }
6e74057c 610 goto found;
9042846b
CM
611
612fallback:
613 fallback = NULL;
614 /*
615 * we have failed to find any workers, just
616 * return the first one we can find.
617 */
618 if (!list_empty(&workers->worker_list))
619 fallback = workers->worker_list.next;
620 if (!list_empty(&workers->idle_list))
621 fallback = workers->idle_list.next;
622 BUG_ON(!fallback);
623 worker = list_entry(fallback,
624 struct btrfs_worker_thread, worker_list);
6e74057c
CM
625found:
626 /*
627 * this makes sure the worker doesn't exit before it is placed
628 * onto a busy/idle list
629 */
630 atomic_inc(&worker->num_pending);
9042846b
CM
631 spin_unlock_irqrestore(&workers->lock, flags);
632 return worker;
8b712842
CM
633}
634
635/*
636 * btrfs_requeue_work just puts the work item back on the tail of the list
637 * it was taken from. It is intended for use with long running work functions
638 * that make some progress and want to give the cpu up for others.
639 */
143bede5 640void btrfs_requeue_work(struct btrfs_work *work)
8b712842
CM
641{
642 struct btrfs_worker_thread *worker = work->worker;
643 unsigned long flags;
a6837051 644 int wake = 0;
8b712842 645
4a69a410 646 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
143bede5 647 return;
8b712842
CM
648
649 spin_lock_irqsave(&worker->lock, flags);
d313d7a3
CM
650 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
651 list_add_tail(&work->list, &worker->prio_pending);
652 else
653 list_add_tail(&work->list, &worker->pending);
b51912c9 654 atomic_inc(&worker->num_pending);
75ccf47d
CM
655
656 /* by definition we're busy, take ourselves off the idle
657 * list
658 */
659 if (worker->idle) {
29c5e8ce 660 spin_lock(&worker->workers->lock);
75ccf47d
CM
661 worker->idle = 0;
662 list_move_tail(&worker->worker_list,
6e74057c 663 &worker->workers->worker_list);
29c5e8ce 664 spin_unlock(&worker->workers->lock);
75ccf47d 665 }
a6837051
CM
666 if (!worker->working) {
667 wake = 1;
668 worker->working = 1;
669 }
75ccf47d 670
a6837051
CM
671 if (wake)
672 wake_up_process(worker->task);
9042846b 673 spin_unlock_irqrestore(&worker->lock, flags);
8b712842
CM
674}
675
d313d7a3
CM
676void btrfs_set_work_high_prio(struct btrfs_work *work)
677{
678 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
679}
680
8b712842
CM
681/*
682 * places a struct btrfs_work into the pending queue of one of the kthreads
683 */
0dc3b84a 684void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
8b712842
CM
685{
686 struct btrfs_worker_thread *worker;
687 unsigned long flags;
688 int wake = 0;
689
690 /* don't requeue something already on a list */
4a69a410 691 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
0dc3b84a 692 return;
8b712842
CM
693
694 worker = find_worker(workers);
4a69a410 695 if (workers->ordered) {
4e3f9c50
CM
696 /*
697 * you're not allowed to do ordered queues from an
698 * interrupt handler
699 */
700 spin_lock(&workers->order_lock);
d313d7a3
CM
701 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
702 list_add_tail(&work->order_list,
703 &workers->prio_order_list);
704 } else {
705 list_add_tail(&work->order_list, &workers->order_list);
706 }
4e3f9c50 707 spin_unlock(&workers->order_lock);
4a69a410
CM
708 } else {
709 INIT_LIST_HEAD(&work->order_list);
710 }
8b712842
CM
711
712 spin_lock_irqsave(&worker->lock, flags);
a6837051 713
d313d7a3
CM
714 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
715 list_add_tail(&work->list, &worker->prio_pending);
716 else
717 list_add_tail(&work->list, &worker->pending);
35d8ba66 718 check_busy_worker(worker);
8b712842
CM
719
720 /*
721 * avoid calling into wake_up_process if this thread has already
722 * been kicked
723 */
724 if (!worker->working)
725 wake = 1;
726 worker->working = 1;
727
8b712842
CM
728 if (wake)
729 wake_up_process(worker->task);
9042846b 730 spin_unlock_irqrestore(&worker->lock, flags);
8b712842 731}
08a9ff32 732
1ca08976 733struct __btrfs_workqueue_struct {
08a9ff32
QW
734 struct workqueue_struct *normal_wq;
735 /* List head pointing to ordered work list */
736 struct list_head ordered_list;
737
738 /* Spinlock for ordered_list */
739 spinlock_t list_lock;
740};
741
1ca08976
QW
742struct btrfs_workqueue_struct {
743 struct __btrfs_workqueue_struct *normal;
744 struct __btrfs_workqueue_struct *high;
745};
746
747static inline struct __btrfs_workqueue_struct
748*__btrfs_alloc_workqueue(char *name, int flags, int max_active)
749{
750 struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
751
752 if (unlikely(!ret))
753 return NULL;
754
755 if (flags & WQ_HIGHPRI)
756 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
757 max_active, "btrfs", name);
758 else
759 ret->normal_wq = alloc_workqueue("%s-%s", flags,
760 max_active, "btrfs", name);
761 if (unlikely(!ret->normal_wq)) {
762 kfree(ret);
763 return NULL;
764 }
765
766 INIT_LIST_HEAD(&ret->ordered_list);
767 spin_lock_init(&ret->list_lock);
768 return ret;
769}
770
771static inline void
772__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq);
773
08a9ff32
QW
774struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
775 int flags,
776 int max_active)
777{
778 struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
779
780 if (unlikely(!ret))
781 return NULL;
782
1ca08976
QW
783 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
784 max_active);
785 if (unlikely(!ret->normal)) {
08a9ff32
QW
786 kfree(ret);
787 return NULL;
788 }
789
1ca08976
QW
790 if (flags & WQ_HIGHPRI) {
791 ret->high = __btrfs_alloc_workqueue(name, flags, max_active);
792 if (unlikely(!ret->high)) {
793 __btrfs_destroy_workqueue(ret->normal);
794 kfree(ret);
795 return NULL;
796 }
797 }
08a9ff32
QW
798 return ret;
799}
800
1ca08976 801static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
08a9ff32
QW
802{
803 struct list_head *list = &wq->ordered_list;
804 struct btrfs_work_struct *work;
805 spinlock_t *lock = &wq->list_lock;
806 unsigned long flags;
807
808 while (1) {
809 spin_lock_irqsave(lock, flags);
810 if (list_empty(list))
811 break;
812 work = list_entry(list->next, struct btrfs_work_struct,
813 ordered_list);
814 if (!test_bit(WORK_DONE_BIT, &work->flags))
815 break;
816
817 /*
818 * we are going to call the ordered done function, but
819 * we leave the work item on the list as a barrier so
820 * that later work items that are done don't have their
821 * functions called before this one returns
822 */
823 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
824 break;
825 spin_unlock_irqrestore(lock, flags);
826 work->ordered_func(work);
827
828 /* now take the lock again and drop our item from the list */
829 spin_lock_irqsave(lock, flags);
830 list_del(&work->ordered_list);
831 spin_unlock_irqrestore(lock, flags);
832
833 /*
834 * we don't want to call the ordered free functions
835 * with the lock held though
836 */
837 work->ordered_free(work);
838 }
839 spin_unlock_irqrestore(lock, flags);
840}
841
842static void normal_work_helper(struct work_struct *arg)
843{
844 struct btrfs_work_struct *work;
1ca08976 845 struct __btrfs_workqueue_struct *wq;
08a9ff32
QW
846 int need_order = 0;
847
848 work = container_of(arg, struct btrfs_work_struct, normal_work);
849 /*
850 * We should not touch things inside work in the following cases:
851 * 1) after work->func() if it has no ordered_free
852 * Since the struct is freed in work->func().
853 * 2) after setting WORK_DONE_BIT
854 * The work may be freed in other threads almost instantly.
855 * So we save the needed things here.
856 */
857 if (work->ordered_func)
858 need_order = 1;
859 wq = work->wq;
860
861 work->func(work);
862 if (need_order) {
863 set_bit(WORK_DONE_BIT, &work->flags);
864 run_ordered_work(wq);
865 }
866}
867
868void btrfs_init_work(struct btrfs_work_struct *work,
869 void (*func)(struct btrfs_work_struct *),
870 void (*ordered_func)(struct btrfs_work_struct *),
871 void (*ordered_free)(struct btrfs_work_struct *))
872{
873 work->func = func;
874 work->ordered_func = ordered_func;
875 work->ordered_free = ordered_free;
876 INIT_WORK(&work->normal_work, normal_work_helper);
877 INIT_LIST_HEAD(&work->ordered_list);
878 work->flags = 0;
879}
880
1ca08976
QW
881static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
882 struct btrfs_work_struct *work)
08a9ff32
QW
883{
884 unsigned long flags;
885
886 work->wq = wq;
887 if (work->ordered_func) {
888 spin_lock_irqsave(&wq->list_lock, flags);
889 list_add_tail(&work->ordered_list, &wq->ordered_list);
890 spin_unlock_irqrestore(&wq->list_lock, flags);
891 }
892 queue_work(wq->normal_wq, &work->normal_work);
893}
894
1ca08976
QW
895void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
896 struct btrfs_work_struct *work)
897{
898 struct __btrfs_workqueue_struct *dest_wq;
899
900 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
901 dest_wq = wq->high;
902 else
903 dest_wq = wq->normal;
904 __btrfs_queue_work(dest_wq, work);
905}
906
907static inline void
908__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq)
08a9ff32
QW
909{
910 destroy_workqueue(wq->normal_wq);
911 kfree(wq);
912}
913
1ca08976
QW
914void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
915{
916 if (!wq)
917 return;
918 if (wq->high)
919 __btrfs_destroy_workqueue(wq->high);
920 __btrfs_destroy_workqueue(wq->normal);
921}
922
08a9ff32
QW
923void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max)
924{
1ca08976
QW
925 workqueue_set_max_active(wq->normal->normal_wq, max);
926 if (wq->high)
927 workqueue_set_max_active(wq->high->normal_wq, max);
928}
929
930void btrfs_set_work_high_priority(struct btrfs_work_struct *work)
931{
932 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
08a9ff32 933}